text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
---
_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
---
# Applied Machine Learning: Module 3 (Evaluation)
## Evaluation for Classification
### Preamble
```
%matplotlib notebook
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
dataset = load_digits()
X, y = dataset.data, dataset.target
for class_name, class_count in zip(dataset.target_names, np.bincount(dataset.target)):
print(class_name,class_count)
# Creating a dataset with imbalanced binary classes:
# Negative class (0) is 'not digit 1'
# Positive class (1) is 'digit 1'
y_binary_imbalanced = y.copy()
y_binary_imbalanced[y_binary_imbalanced != 1] = 0
print('Original labels:\t', y[1:30])
print('New binary labels:\t', y_binary_imbalanced[1:30])
np.bincount(y_binary_imbalanced) # Negative class (0) is the most frequent class
X_train, X_test, y_train, y_test = train_test_split(X, y_binary_imbalanced, random_state=0)
# Accuracy of Support Vector Machine classifier
from sklearn.svm import SVC
svm = SVC(kernel='rbf', C=1).fit(X_train, y_train)
svm.score(X_test, y_test)
```
### Dummy Classifiers
DummyClassifier is a classifier that makes predictions using simple rules, which can be useful as a baseline for comparison against actual classifiers, especially with imbalanced classes.
```
from sklearn.dummy import DummyClassifier
# Negative class (0) is most frequent
dummy_majority = DummyClassifier(strategy = 'most_frequent').fit(X_train, y_train)
# Therefore the dummy 'most_frequent' classifier always predicts class 0
y_dummy_predictions = dummy_majority.predict(X_test)
y_dummy_predictions
dummy_majority.score(X_test, y_test)
svm = SVC(kernel='linear', C=1).fit(X_train, y_train)
svm.score(X_test, y_test)
```
### Confusion matrices
#### Binary (two-class) confusion matrix
```
from sklearn.metrics import confusion_matrix
# Negative class (0) is most frequent
dummy_majority = DummyClassifier(strategy = 'most_frequent').fit(X_train, y_train)
y_majority_predicted = dummy_majority.predict(X_test)
confusion = confusion_matrix(y_test, y_majority_predicted)
print('Most frequent class (dummy classifier)\n', confusion)
# produces random predictions w/ same class proportion as training set
dummy_classprop = DummyClassifier(strategy='stratified').fit(X_train, y_train)
y_classprop_predicted = dummy_classprop.predict(X_test)
confusion = confusion_matrix(y_test, y_classprop_predicted)
print('Random class-proportional prediction (dummy classifier)\n', confusion)
svm = SVC(kernel='linear', C=1).fit(X_train, y_train)
svm_predicted = svm.predict(X_test)
confusion = confusion_matrix(y_test, svm_predicted)
print('Support vector machine classifier (linear kernel, C=1)\n', confusion)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression().fit(X_train, y_train)
lr_predicted = lr.predict(X_test)
confusion = confusion_matrix(y_test, lr_predicted)
print('Logistic regression classifier (default settings)\n', confusion)
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=2).fit(X_train, y_train)
tree_predicted = dt.predict(X_test)
confusion = confusion_matrix(y_test, tree_predicted)
print('Decision tree classifier (max_depth = 2)\n', confusion)
```
### Evaluation metrics for binary classification
```
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Accuracy = TP + TN / (TP + TN + FP + FN)
# Precision = TP / (TP + FP)
# Recall = TP / (TP + FN) Also known as sensitivity, or True Positive Rate
# F1 = 2 * Precision * Recall / (Precision + Recall)
print('Accuracy: {:.2f}'.format(accuracy_score(y_test, tree_predicted)))
print('Precision: {:.2f}'.format(precision_score(y_test, tree_predicted)))
print('Recall: {:.2f}'.format(recall_score(y_test, tree_predicted)))
print('F1: {:.2f}'.format(f1_score(y_test, tree_predicted)))
# Combined report with all above metrics
from sklearn.metrics import classification_report
print(classification_report(y_test, tree_predicted, target_names=['not 1', '1']))
print('Random class-proportional (dummy)\n',
classification_report(y_test, y_classprop_predicted, target_names=['not 1', '1']))
print('SVM\n',
classification_report(y_test, svm_predicted, target_names = ['not 1', '1']))
print('Logistic regression\n',
classification_report(y_test, lr_predicted, target_names = ['not 1', '1']))
print('Decision tree\n',
classification_report(y_test, tree_predicted, target_names = ['not 1', '1']))
```
### Decision functions
```
X_train, X_test, y_train, y_test = train_test_split(X, y_binary_imbalanced, random_state=0)
y_scores_lr = lr.fit(X_train, y_train).decision_function(X_test)
y_score_list = list(zip(y_test[0:20], y_scores_lr[0:20]))
# show the decision_function scores for first 20 instances
y_score_list
X_train, X_test, y_train, y_test = train_test_split(X, y_binary_imbalanced, random_state=0)
y_proba_lr = lr.fit(X_train, y_train).predict_proba(X_test)
y_proba_list = list(zip(y_test[0:20], y_proba_lr[0:20,1]))
# show the probability of positive class for first 20 instances
y_proba_list
```
### Precision-recall curves
```
from sklearn.metrics import precision_recall_curve
precision, recall, thresholds = precision_recall_curve(y_test, y_scores_lr)
closest_zero = np.argmin(np.abs(thresholds))
closest_zero_p = precision[closest_zero]
closest_zero_r = recall[closest_zero]
plt.figure()
plt.xlim([0.0, 1.01])
plt.ylim([0.0, 1.01])
plt.plot(precision, recall, label='Precision-Recall Curve')
plt.plot(closest_zero_p, closest_zero_r, 'o', markersize = 12, fillstyle = 'none', c='r', mew=3)
plt.xlabel('Precision', fontsize=16)
plt.ylabel('Recall', fontsize=16)
plt.axes().set_aspect('equal')
plt.show()
```
### ROC curves, Area-Under-Curve (AUC)
```
from sklearn.metrics import roc_curve, auc
X_train, X_test, y_train, y_test = train_test_split(X, y_binary_imbalanced, random_state=0)
y_score_lr = lr.fit(X_train, y_train).decision_function(X_test)
fpr_lr, tpr_lr, _ = roc_curve(y_test, y_score_lr)
roc_auc_lr = auc(fpr_lr, tpr_lr)
plt.figure()
plt.xlim([-0.01, 1.00])
plt.ylim([-0.01, 1.01])
plt.plot(fpr_lr, tpr_lr, lw=3, label='LogRegr ROC curve (area = {:0.2f})'.format(roc_auc_lr))
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('ROC curve (1-of-10 digits classifier)', fontsize=16)
plt.legend(loc='lower right', fontsize=13)
plt.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--')
plt.axes().set_aspect('equal')
plt.show()
from matplotlib import cm
X_train, X_test, y_train, y_test = train_test_split(X, y_binary_imbalanced, random_state=0)
plt.figure()
plt.xlim([-0.01, 1.00])
plt.ylim([-0.01, 1.01])
for g in [0.01, 0.1, 0.20, 1]:
svm = SVC(gamma=g).fit(X_train, y_train)
y_score_svm = svm.decision_function(X_test)
fpr_svm, tpr_svm, _ = roc_curve(y_test, y_score_svm)
roc_auc_svm = auc(fpr_svm, tpr_svm)
accuracy_svm = svm.score(X_test, y_test)
print("gamma = {:.2f} accuracy = {:.2f} AUC = {:.2f}".format(g, accuracy_svm,
roc_auc_svm))
plt.plot(fpr_svm, tpr_svm, lw=3, alpha=0.7,
label='SVM (gamma = {:0.2f}, area = {:0.2f})'.format(g, roc_auc_svm))
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate (Recall)', fontsize=16)
plt.plot([0, 1], [0, 1], color='k', lw=0.5, linestyle='--')
plt.legend(loc="lower right", fontsize=11)
plt.title('ROC curve: (1-of-10 digits classifier)', fontsize=16)
plt.axes().set_aspect('equal')
plt.show()
```
### Evaluation measures for multi-class classification
#### Multi-class confusion matrix
```
dataset = load_digits()
X, y = dataset.data, dataset.target
X_train_mc, X_test_mc, y_train_mc, y_test_mc = train_test_split(X, y, random_state=0)
svm = SVC(kernel = 'linear').fit(X_train_mc, y_train_mc)
svm_predicted_mc = svm.predict(X_test_mc)
confusion_mc = confusion_matrix(y_test_mc, svm_predicted_mc)
df_cm = pd.DataFrame(confusion_mc,
index = [i for i in range(0,10)], columns = [i for i in range(0,10)])
plt.figure(figsize=(5.5,4))
sns.heatmap(df_cm, annot=True)
plt.title('SVM Linear Kernel \nAccuracy:{0:.3f}'.format(accuracy_score(y_test_mc,
svm_predicted_mc)))
plt.ylabel('True label')
plt.xlabel('Predicted label')
svm = SVC(kernel = 'rbf').fit(X_train_mc, y_train_mc)
svm_predicted_mc = svm.predict(X_test_mc)
confusion_mc = confusion_matrix(y_test_mc, svm_predicted_mc)
df_cm = pd.DataFrame(confusion_mc, index = [i for i in range(0,10)],
columns = [i for i in range(0,10)])
plt.figure(figsize = (5.5,4))
sns.heatmap(df_cm, annot=True)
plt.title('SVM RBF Kernel \nAccuracy:{0:.3f}'.format(accuracy_score(y_test_mc,
svm_predicted_mc)))
plt.ylabel('True label')
plt.xlabel('Predicted label');
```
#### Multi-class classification report
```
print(classification_report(y_test_mc, svm_predicted_mc))
```
#### Micro- vs. macro-averaged metrics
```
print('Micro-averaged precision = {:.2f} (treat instances equally)'
.format(precision_score(y_test_mc, svm_predicted_mc, average = 'micro')))
print('Macro-averaged precision = {:.2f} (treat classes equally)'
.format(precision_score(y_test_mc, svm_predicted_mc, average = 'macro')))
print('Micro-averaged f1 = {:.2f} (treat instances equally)'
.format(f1_score(y_test_mc, svm_predicted_mc, average = 'micro')))
print('Macro-averaged f1 = {:.2f} (treat classes equally)'
.format(f1_score(y_test_mc, svm_predicted_mc, average = 'macro')))
```
### Regression evaluation metrics
```
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.dummy import DummyRegressor
diabetes = datasets.load_diabetes()
X = diabetes.data[:, None, 6]
y = diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
lm = LinearRegression().fit(X_train, y_train)
lm_dummy_mean = DummyRegressor(strategy = 'mean').fit(X_train, y_train)
y_predict = lm.predict(X_test)
y_predict_dummy_mean = lm_dummy_mean.predict(X_test)
print('Linear model, coefficients: ', lm.coef_)
print("Mean squared error (dummy): {:.2f}".format(mean_squared_error(y_test,
y_predict_dummy_mean)))
print("Mean squared error (linear model): {:.2f}".format(mean_squared_error(y_test, y_predict)))
print("r2_score (dummy): {:.2f}".format(r2_score(y_test, y_predict_dummy_mean)))
print("r2_score (linear model): {:.2f}".format(r2_score(y_test, y_predict)))
# Plot outputs
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, y_predict, color='green', linewidth=2)
plt.plot(X_test, y_predict_dummy_mean, color='red', linestyle = 'dashed',
linewidth=2, label = 'dummy')
plt.show()
```
### Model selection using evaluation metrics
#### Cross-validation example
```
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
dataset = load_digits()
# again, making this a binary problem with 'digit 1' as positive class
# and 'not 1' as negative class
X, y = dataset.data, dataset.target == 1
clf = SVC(kernel='linear', C=1)
# accuracy is the default scoring metric
print('Cross-validation (accuracy)', cross_val_score(clf, X, y, cv=5))
# use AUC as scoring metric
print('Cross-validation (AUC)', cross_val_score(clf, X, y, cv=5, scoring = 'roc_auc'))
# use recall as scoring metric
print('Cross-validation (recall)', cross_val_score(clf, X, y, cv=5, scoring = 'recall'))
```
#### Grid search example
```
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
dataset = load_digits()
X, y = dataset.data, dataset.target == 1
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = SVC(kernel='rbf')
grid_values = {'gamma': [0.001, 0.01, 0.05, 0.1, 1, 10, 100]}
# default metric to optimize over grid parameters: accuracy
grid_clf_acc = GridSearchCV(clf, param_grid = grid_values)
grid_clf_acc.fit(X_train, y_train)
y_decision_fn_scores_acc = grid_clf_acc.decision_function(X_test)
print('Grid best parameter (max. accuracy): ', grid_clf_acc.best_params_)
print('Grid best score (accuracy): ', grid_clf_acc.best_score_)
# alternative metric to optimize over grid parameters: AUC
grid_clf_auc = GridSearchCV(clf, param_grid = grid_values, scoring = 'roc_auc')
grid_clf_auc.fit(X_train, y_train)
y_decision_fn_scores_auc = grid_clf_auc.decision_function(X_test)
print('Test set AUC: ', roc_auc_score(y_test, y_decision_fn_scores_auc))
print('Grid best parameter (max. AUC): ', grid_clf_auc.best_params_)
print('Grid best score (AUC): ', grid_clf_auc.best_score_)
```
#### Evaluation metrics supported for model selection
```
from sklearn.metrics.scorer import SCORERS
print(sorted(list(SCORERS.keys())))
```
### Two-feature classification example using the digits dataset
#### Optimizing a classifier using different evaluation metrics
```
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from adspy_shared_utilities import plot_class_regions_for_classifier_subplot
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
dataset = load_digits()
X, y = dataset.data, dataset.target == 1
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Create a two-feature input vector matching the example plot above
# We jitter the points (add a small amount of random noise) in case there are areas
# in feature space where many instances have the same features.
jitter_delta = 0.25
X_twovar_train = X_train[:,[20,59]]+ np.random.rand(X_train.shape[0], 2) - jitter_delta
X_twovar_test = X_test[:,[20,59]] + np.random.rand(X_test.shape[0], 2) - jitter_delta
clf = SVC(kernel = 'linear').fit(X_twovar_train, y_train)
grid_values = {'class_weight':['balanced', {1:2},{1:3},{1:4},{1:5},{1:10},{1:20},{1:50}]}
plt.figure(figsize=(9,6))
for i, eval_metric in enumerate(('precision','recall', 'f1','roc_auc')):
grid_clf_custom = GridSearchCV(clf, param_grid=grid_values, scoring=eval_metric)
grid_clf_custom.fit(X_twovar_train, y_train)
print('Grid best parameter (max. {0}): {1}'
.format(eval_metric, grid_clf_custom.best_params_))
print('Grid best score ({0}): {1}'
.format(eval_metric, grid_clf_custom.best_score_))
plt.subplots_adjust(wspace=0.3, hspace=0.3)
plot_class_regions_for_classifier_subplot(grid_clf_custom, X_twovar_test, y_test, None,
None, None, plt.subplot(2, 2, i+1))
plt.title(eval_metric+'-oriented SVC')
plt.tight_layout()
plt.show()
```
#### Precision-recall curve for the default SVC classifier (with balanced class weights)
```
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
from adspy_shared_utilities import plot_class_regions_for_classifier
from sklearn.svm import SVC
dataset = load_digits()
X, y = dataset.data, dataset.target == 1
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# create a two-feature input vector matching the example plot above
jitter_delta = 0.25
X_twovar_train = X_train[:,[20,59]]+ np.random.rand(X_train.shape[0], 2) - jitter_delta
X_twovar_test = X_test[:,[20,59]] + np.random.rand(X_test.shape[0], 2) - jitter_delta
clf = SVC(kernel='linear', class_weight='balanced').fit(X_twovar_train, y_train)
y_scores = clf.decision_function(X_twovar_test)
precision, recall, thresholds = precision_recall_curve(y_test, y_scores)
closest_zero = np.argmin(np.abs(thresholds))
closest_zero_p = precision[closest_zero]
closest_zero_r = recall[closest_zero]
plot_class_regions_for_classifier(clf, X_twovar_test, y_test)
plt.title("SVC, class_weight = 'balanced', optimized for accuracy")
plt.show()
plt.figure()
plt.xlim([0.0, 1.01])
plt.ylim([0.0, 1.01])
plt.title ("Precision-recall curve: SVC, class_weight = 'balanced'")
plt.plot(precision, recall, label = 'Precision-Recall Curve')
plt.plot(closest_zero_p, closest_zero_r, 'o', markersize=12, fillstyle='none', c='r', mew=3)
plt.xlabel('Precision', fontsize=16)
plt.ylabel('Recall', fontsize=16)
plt.axes().set_aspect('equal')
plt.show()
print('At zero threshold, precision: {:.2f}, recall: {:.2f}'
.format(closest_zero_p, closest_zero_r))
```
| github_jupyter |
# CME 193 - Lecture 5 - Pandas
Before we get started, you may want to make sure that you have the following packages installed in whatever environment you're using: `pandas`
```bash
conda install pandas
```
Pandas is a package for working with tabular data.
We'll also cover dictionaries and lambda functions today.
At the end of class, we'll have a longer exercise than usual in a supplemental notebook.
# Pandas
[Pandas](https://pandas.pydata.org/) is a Python library for dealing with data. The main thing you'll hear people talk about is the DataFrame object (inspired by R), which is designed to hold tabular data.
## Difference between a DataFrame and NumPy Array
Pandas DataFrames and NumPy arrays both have similarities to Python lists.
* Numpy arrays are designed to contain data of one type (e.g. Int, Float, ...)
* DataFrames can contain different types of data (Int, Float, String, ...)
* Usually each column has the same type
Both arrays and DataFrames are optimized for storage/performance beyond Python lists
Pandas is also powerful for working with missing data, working with time series data, for reading and writing your data, for reshaping, grouping, merging your data, ...
## Key Features
* File I/O - integrations with multiple file formats
* Working with missing data (.dropna(), pd.isnull())
* Normal table operations: merging and joining, groupby functionality, reshaping via stack, and pivot_tables,
* Time series-specific functionality:
* date range generation and frequency conversion, moving window statistics/regressions, date shifting and lagging, etc.
* Built in Matplotlib integration
## Other Strengths
* Strong community, support, and documentation
* Size mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects
* Powerful, flexible group by functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data
* Make it easy to convert ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects Intelligent label-based slicing, fancy indexing, and subsetting of large data sets
## Python/Pandas vs. R
* R is a language dedicated to statistics. Python is a general-purpose language with statistics modules.
* R has more statistical analysis features than Python, and specialized syntaxes.
However, when it comes to building complex analysis pipelines that mix statistics with e.g. image analysis, text mining, or control of a physical experiment, the richness of Python is an invaluable asset.
# Getting Started
[Here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html) is a link to the documentation for DataFrames
```
import pandas as pd
import numpy as np
```
## Objects and Basic Creation
| Name | Dimensions | Description |
| ------:| -----------:|----------|
| ```pd.Series``` | 1 | 1D labeled homogeneously-typed array |
| ```pd.DataFrame``` | 2| General 2D labeled, size-mutable tabular structure |
| ```pd.Panel``` | 3| General 3D labeled, also size-mutable array |
# Series
## What are they?
- Series is a one-dimensional labeled array capable of holding any data type (integers, strings, floating point numbers, Python objects, etc.). The axis labels are collectively referred to as the index.
- Basic method to create a series:
```python
s = pd.Series(data, index = index) ```
- Data Can be many things:
* A Python Dictionary
* An ndarray (or reg. list)
* A scalar
- The passed index is a list of axis labels (which varies on what data is)
Think "Series = Vector + labels"
```
first_series = pd.Series([1,2,4,8,16,32,64])
print(type(first_series))
print(first_series)
s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
print(s)
print('-'*50)
print(s.index)
```
If Data is a dictionary, if index is passed the values in data corresponding to the labels in the index will be pulled out, otherwise an index will be constructed from the sorted keys of the dict
```
d = {'a': [0., 0], 'b': {'1':1.}, 'c': 2.}
pd.Series(d)
```
### Side Note: Dictionaries
We've seen Python lists. Dictionaries are just another built-in Python data structure. Dictionaries consist of key-value pairs, and are constructed using
```python
D = { key : value, ...}```
```
D = {1 : 5, 2 : 6}
# iteration over dictionary
for (k,v) in D.items():
print("key : %d value : %d" % (k, v))
print("\n\n")
D = {'a' : 1, 'b' : 2}
D['c'] = 3 # another way to set a key-value pair
# another way to iterate
for k in D:
v = D[k] # access value with key k
print("key : %s value : %d" % (k, v))
```
### Back to Pandas series...
You can create a series from a scalar, but need to specify indices
```
pd.Series(5, index = ['a', 'b', 'c'])
```
You can index and slice series like you would numpy arrays/python lists
```
end_string = '\n' + '-'*50 + '\n'
s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
print(s)
print(s[0], end = end_string)
# slicing
print(s[:3], end =end_string)
# conditional max - index with booleans
print(s[ s > s.mean()], end = end_string)
# elementwise function - vectorization
print(np.exp(s), end = end_string)
```
Series are also like dictionaries - you can access values using index labels
```
print(s, end = end_string)
print(s['a'], end = end_string)
s['e'] = 12 # set element using index label
print(s, end = end_string)
print('e' in s, end = end_string) # check for index label
print(s.get('f', None), end = end_string) # get item with index 'f' - if no such item return None
print(s.get('e', None), end = end_string)
```
### Series Attributes:
- Get the index :
```python
s.index ```
- Get the values :
``` python
s.values ```
- Find the shape :
``` python
s.shape ```
### Series Iteration
```
for idx,val in s.iteritems():
print(idx,val)
```
Sort by index or by value
```
print(s.sort_index(), end = end_string)
print(s.sort_values(), end = end_string)
```
Find counts of unique values
```
s = pd.Series([0,0,0,1,1,1,2,2,2,2])
sct = s.value_counts() # what is the type of sct?
print(sct)
```
You can do just about anything you can do with a numpy array
- Series.mean()
- Series.median()
- Series.mode()
- Series.nsmallest(num)
- Series.max ...
```
print(s.min(),end = end_string)
print(s.max(), end = end_string)
```
# DataFrame
- DataFrame is a 2-dimensional labeled data structure with columns of potentially different types. You can think of it like a spreadsheet or SQL table, or a dict of Series objects. It is generally the most commonly used pandas object.
- You can create a DataFrame from:
- Dict of 1D ndarrays, lists, dicts, or Series
- 2-D numpy array
- A list of dictionaries
- A Series
- Another Dataframe
``` python
df = pd.DataFrame(data, index = index, columns = columns)
```
- ```index```/ ``` columns ``` is a list of the row/ column labels. If you pass an index and/ or columns, you are guarenteeing the index and /or column of the df.
- If you do not pass anything in, the input will be constructed by "common sense" rules
[**pandas.DataFrame**](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html)
# DataFrame Creation From dict of series or dicts
- The index of the resulting DataFrame will be the union of the indices of the various Series. If there are any nested dicts, these will be first converted to Series.
- If no columns are passed, the columns will be the sorted list of dict keys.
```
# Create a dictionary of series
d = {'one': pd.Series([1,2,3], index = ['a', 'b', 'c']),
'two': pd.Series(list(range(4)), index = ['a','b', 'c', 'd'])}
df = pd.DataFrame(d)
print(df, end = end_string)
d= {'one': {'a': 1, 'b': 2, 'c':3},
'two': pd.Series(list(range(4)), index = ['a','b', 'c', 'd'])}
# Columns are dictionary keys, indices and values obtained from series
df = pd.DataFrame(d)
# Notice how it fills the column one with NaN for d
print(df, end = end_string)
d = {'one': pd.Series([1,2,3], index = ['a', 'b', 'c']),
'two': pd.Series(list(range(4)), index = ['a','b', 'c', 'd'])}
print(pd.DataFrame(d, index = ['d', 'b', 'a']), end = end_string)
print(pd.DataFrame(d, index = ['d', 'b', 'a'], columns = ['two', 'three']),
end = end_string)
# Accessing attributes
print(df.index, end = end_string)
print(df.columns,end = end_string)
print(df.shape)
```
# From dict of ndarray / lists
- The ndarrays must all be the same length.
- If an index is passed, it must clearly also be the same length as the arrays. If no index is passed, the result will be range(n), where n is the array length.
```
d = {'one' : [1., 2., 3., 4.], 'two' : [4., 3., 2., 1.]}
pd.DataFrame(d)
```
# From a list of dicts
```
data = []
for i in range(100):
data += [ {'Column' + str(j):np.random.randint(100) for j in range(5)} ]
# dictionary comprehension!
data[:5]
# Creation from a list of dicts
df = pd.DataFrame(data)
print(df.head(), end = end_string)
# Only certain columns
df = pd.DataFrame(data, columns = ['Column0', 'Column1'])
print(df.head(), end = end_string)
```
## Attributes
- ``` df.index ``` : the row index of df
- ``` df.columns ``` : the columns of df
- ``` df.shape ``` : the shape of the df
- ``` df.values ``` : numpy array of values
```
# Adding and accessing columns
d = {'one': pd.Series([1,2,3], index = ['a', 'b', 'c']),
'two': pd.Series(range(4), index = ['a','b', 'c', 'd'])}
df = pd.DataFrame(d)
# multiply
df['three'] = df['one']*df['two']
# Create a boolean flag
df['flag'] = df['one'] > 2
print(df.head())
# inserting column in specified location, with values
df.insert(1, 'bar', df['one'][:2])
print(df.head())
# Deleting Columns
three = df.pop('three')
print(df.head(), end = end_string)
# Propation of values
df['foo'] = 'bar'
print(df, end = end_string)
```
## Indexing and Selection
- 4 methods ``` [], ix, iloc, loc ```
| Operation | Syntax | Result |
|----|----------------------| ---------------------------|
| Select Column | df[col] | Series |
| Select Row by Label | df.loc[label] | Series |
| Select Row by Integer Location | df.iloc[idx] | Series |
| Slice rows | df[5:10] | DataFrame |
| Select rows by boolean | df[mask] | DataFrame |
- Note all the operations below are valid on series as well restricted to one dimension
## Simplest form Of Indexing: []
- Series: selecting a label: s[label]
- DataFrame: selection single or multiple columns:
``` python
df['col'] or df[['col1', 'col2']] ```
- DataFrame: slicing the rows:
``` python
df['rowlabel1': 'rowlabel2'] ```
or
``` python
df[boolean_mask] ```
```
# Lets create a data frame
pd.options.display.max_rows = 4
dates = pd.date_range('1/1/2000', periods=8)
df = pd.DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C','D'])
df
# column 'A
df['A']
# multiple column
df[['A', 'B']]
# slice by rows
df['2000-01-01': '2000-01-04']
# boolean mask
df[df['A'] > df['B']].head()
### You can also access a column by df.colname
df.A
# Assign via []
df['A'] = df['B'].values
df.head()
df.A
```
## Selecting by label .loc
- is primarily label based, but may also be used with a boolean array.
- .loc will raise KeyError when the items are not found
- Allowed inputs:
1. A single label
2. A list of labels
3. A boolean array
```
## Selection by label .loc
df.loc['2000-01-01']
# Accessing all greater than a date
df.loc['2000-01-01':, ['A', 'B']]
# Get columns for which value is greater than 0 on certain day, get all rows
df.loc[:, df.loc['2000-01-01'] > 0]
```
## Selecting by position
- The .iloc attribute is the primary access method. The following are valid input:
- An integer
- A list of integers
- A slice
- A boolean array
```
df1 = pd.DataFrame(np.random.randn(6,4),
index=list(range(0,12,2)), columns=list(range(0,12,3)))
df1
# rows 0-2
df1.iloc[:3]
# rows 1:4 and columns 2 : 4
df1.iloc[1:5, 2:4]
# select via integer list
df1.iloc[[1,3,5], [1,3]]
# selecting via integer mask
boolean_mask = df1.iloc[:, 1] > 0.0
df1.iloc[boolean_mask.values,1]
```
# Merging DataFrames
- Pandas has full-featured, very high performance, in memory join operations that are very similar to SQL and R
- The documentation is https://pandas.pydata.org/pandas-docs/stable/merging.html#database-style-dataframe-joining-merging
- Pandas provides a single function, merge, as the entry point for all standard database join operations between DataFrame objects:
``` python
pd.merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=True) ```
```
# Example of merge
left = pd.DataFrame({'key': ['foo', 'bar'], 'lval': [4, 2]})
right = pd.DataFrame({'key': ['bar', 'zoo'], 'rval': [4, 5]})
print("left: ",left,"right: ",right, sep=end_string)
merged = pd.merge(left, right)
print(merged)
merged = pd.merge(left, right, how="outer")
print(merged)
merged = pd.merge(left, right, how="left")
print(merged)
merged = pd.merge(left, right, how="right")
print(merged)
```
## Function Application
- Row or Column-wise Function Application: Applies function along input axis of DataFrame
```python
df.apply(func, axis = 0) ```
- Elementwise: apply the function to every element in the df
```python
df.applymap(func) ```
- Note, ``` applymap ``` is equivalent to the ``` map ``` function on lists.
- Note, ``` Series ``` objects support ``` .map ``` instead of ``` applymap ```
```
## APPLY EXAMPLES
df1 = pd.DataFrame(np.random.randn(6,4), index=list(range(0,12,2)), columns=list('abcd'))
df1
# Apply to each column
df1.apply(np.mean)
# Apply to each row
df1.apply(np.mean, axis = 1)
```
### Side note: lambda functions
lambda functions allow you to specify a function without giving it a separate declaration. For example, the function
```python
lambda x: (x - x.mean())/x.std()
```
is equivalent to the function
```python
def normalize(x):
return (x - x.mean())/x.std()
```
You'll often see lambda functions used in list comprehensions, or in methods (like `map()`, `apply()`, or `applymap()`) that take a function as input.
```
# # Use lambda functions to normalize columns
df1 = df1.apply(lambda x: (x - x.mean())/ x.std(), axis = 0)
df1
## APPLY EXAMPLES CONT
# Create DF with 1000 rows and 3 columns filled with random entries
tsdf = pd.DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=pd.date_range('1/1/2000', periods=1000))
tsdf.head()
# Can get trickier, say I wanted to find where the maximum dates occured for each column of the df:
tsdf.apply(lambda x: x.idxmax())
## APPLYMAP EXAMPLES
tmp = tsdf.applymap(lambda x: x - 1)
print(tmp.head())
```
# I/O Functions
- There are loads of input output features. The highlights most useful to you are likely:
- ``` pd.read_csv ``` / ``` pd.to_csv ```
- ``` pd.read_excel ``` / ``` pd.to_excel ```
- ``` pd.read_sql ``` / ``` pd.to_sql ```
- ``` pd.read_pickle ``` / ``` pd.to_pickle ```
Documentation:
* [Pandas Import-Output Functions](https://pandas.pydata.org/pandas-docs/stable/io.html)
### Loading data from CSV
Here are the first several lines of `iris.csv`:
```
sepal_length,sepal_width,petal_length,petal_width,name
5.1,3.5,1.4,0.2,setosa
4.9,3.0,1.4,0.2,setosa
4.7,3.2,1.3,0.2,setosa
4.6,3.1,1.5,0.2,setosa
5.0,3.6,1.4,0.2,setosa
5.4,3.9,1.7,0.4,setosa
```
```
import pandas as pd
# Can use df.info to find out information about the df
data = pd.read_csv('./data/iris.csv')
data.info()
# describe and summarize the dataframe
data.describe()
```
## The split/apply combo (groupyby)
- pandas objects can be split on any of their axes. The abstract definition of grouping is to provide a mapping of labels to group names:
- Syntax:
- ``` groups = df.groupby(key) ```
- ``` groups = df.groupby(key, axis = 1) ```
- ``` groups = df.groupby([key1, key2], axis = 1) ```
### A picture
- The group by concept is that we want to apply the same function on subsets of the dataframe, based on some key we use to split the DataFrame into subsets
- This idea is referred to as the "split-apply-combine" operation:
- Split the data into groups based on some criteria
- Apply a function to each group independently
- Combine the results

```
df = pd.DataFrame({'key':['A','B','C','A','B','C','A','B','C'],
'data': [0, 5, 10, 5, 10, 15, 10, 15, 20]})
df
groupby_key = df.groupby('key')
sums = groupby_key.aggregate(np.sum)
sums
```
# Plotting data
- The plot method on Series and DataFrame is just a wrapper on matplotlib plt.plot()
- Many available plots:
- โbarโ or โbarhโ for bar plots
- โhistโ for histogram
- โboxโ for boxplot
- โkdeโ or 'density' for density plots โข โareaโ for area plots
- โscatterโ for scatter plots
- โhexbinโ for hexagonal bin plots โข โpieโ for pie plots
- There are several more complex plotting functions in pandas.tools.plotting that take a Series or DataFrame as an argument. These include:
- Scatter matrices
- Andrews Curves
- Autocorrelation
- Bootstrap Plot
```
import matplotlib.pyplot as plt
data.plot();
## Quick example - Random walks
df = pd.DataFrame(np.random.randn(1000, 4), index =pd.date_range('1/1/2000', periods=1000), columns=list('ABCD'))
df = df.cumsum()
df.plot()
plt.show()
plt.figure()
df.iloc[5].plot(kind = 'bar')
plt.axhline(0, color = 'k')
data = pd.read_csv('./data/iris.csv')
ax = data.groupby('name') \
.get_group('setosa') \
.boxplot(column=["sepal_length","sepal_width"], return_type='axes')
```
# Exercise
Work through the supplement notebook, posted online.
| github_jupyter |
```
import spacy
from spacy import displacy
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag
nltk.download('wordnet')
from nltk.corpus import stopwords
import re
from nltk.stem import PorterStemmer
nltk.download('stopwords')
sp = spacy.load("en_core_web_sm")
stop_words = set(stopwords.words('english'))
words_relations ={'winner': ['win' , "winning" ,'won', 'wins'],
'occupant':["occupant"],
'position played on team / speciality':['role' , 'position on team ', 'position' , 'speciality' , "play"],
'participating team':['participating' ,'participating team'],
'father':['father'],
'league':['league'],
'country of citizenship': ['citizenship' , 'nationality', 'race', 'located' , "locate"],
'work in': ['part , member','instance' , "work" , "working"],
'country': ['country'],
'sports season of league or competition': ["season"],
'owned by':["owned" ,'own', 'owner'],
'field of work':['field', 'job', "occupation" , 'do'],
'developer':['develop', "developing" , 'develops'],
'head of government':['head of government'],
'participant of' :['participant' , 'contribute'],
'located in or next to body of water':["located in or next to body of water"],
'sibling' : ["sibling"],
'taxon rank':['taxon rank'],
'position held' :['position held'],
'subsidiary':["subsidiary"],
'architect' :["architect"],
'language of work or name':['language'],
'publisher':["publisher"],
'residence':["residence"]}
question = "For which club Mike Phelan work for ?"
question = re.sub('[!?#&()/;=@[\]^_`{|}~]', "", question)
question
text1= sp(question)
results =[]
for word in text1.ents:
results.append((word.text,word.label_))
displacy.render(sp(str(question)), jupyter=True, style='ent')
results
sub = 'null' # search in results
obj = 'null' # search in results
# search in POS tags
for tup in results :
if tup[1] in ["PERSON" ,"EVENT"]:
sub = tup[0]
else :
obj = tup[0]
diff = sub.split(" ") + obj.lower().split(" ")
word_tokens = word_tokenize(question)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
spec = [ k for k in filtered_sentence if k not in diff]
st = PorterStemmer()
ques = [st.stem(word) for word in spec]
rel = "null"
for k ,v in words_relations.items():
for ele in ques:
if ele in v:
rel = k
rel
query_elements = [sub , rel , obj]
query_elements
```
| github_jupyter |
<a href="https://colab.research.google.com/github/SLCFLAB/Data-Science-Python/blob/main/Day%202/2_1.%20numpy%26pandas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Numpy & Pandas basic
### Reference
https://numpy.org/doc/stable/reference/routines.html
https://pandas.pydata.org/docs/reference/
https://github.com/nhkim55/bigdata_fintech_python (๋น
๋ฐ์ดํฐ ํํ
ํฌ ๊ณผ์ )
## numpy.array
* numpy = numerical python
* **array**๋ฅผ ์ฌ์ฉํ์ฌ ๋ฒกํฐ, ํ๋ ฌ ์ฐ์ฐ ์๋ ๋น ๋ฆ
* ์ ์ฅ์ด ํจ์จ์
```
import numpy as np
# one dimensional array
a = np.array([1,2,3])
print(a)
# two dimensional array
b = np.array([[1, 2], [3, 4]])
print(b)
# minimum dimensions
d = np.array([1, 2, 3, 4, 5], ndmin = 2)
print(d)
# dtype parameter
e = np.array([1, 2, 3], dtype = float) #complex
print(e)
```
### Array attributes
ndarray.shape, ndarray.size, ndarray.ndim, ndarray.dtype, reshape function, np.arange
```
x1 = np.array([[1,2,3],[4,5,6]])
print(x1.shape)
print(x1.size) # elements ๊ฐฏ์
x3 = np.arange(24)
print(x3)
x4 = x3.reshape(2,4,3) #๊ฐ์ฅ ๋ฐ๊นฅ์ชฝ๋ถํฐ ์๊ฐ
print(x4)
x4.ndim
x4.dtype
```
### zeros, ones
```
print(np.ones(5))
print(np.zeros(5))
print(np.ones([2,2], dtype=int))
```
### arange, linspace function
arange function creates sequences of numbers. It is analogous to range that returns arrays instead of lists.
linspace function returns evenly spaced numbers over a specified interval
```
np.arange(10, 20, 2)
np.linspace(2.0, 4.0, 5)
```
### Array manipulation
```
import numpy as np
from numpy.random import randn
arr1 = np.array(randn(4, 4)*10, dtype = np.int8) #randn: ์ ๊ท๋ถํฌ ์ํ
print(arr1)
arr1.tolist() # Turn arr1 to a list
arr1.flatten() # Make a 1D array, arr1์ ๋ณํ์ง์์
arr1.sum() #๋ชจ๋ ์์์ ํฉ
arr1.sum(axis=0) #ํ ๋ฐฉํฅ ์งํ
arr1.sum(axis=1) #์ด ๋ฐฉํฅ ์งํ
arr1.cumsum(axis=0)
arr1.mean(axis=0)
```
### Arithmetic operations
```
z1 = np.array([10,20,30,40,50])
z2 = np.arange(5)
print(z1)
print(z2)
# addition
z_add = z1 + z2
print(z_add)
# subtraction
z_sub = z1 - z2
print(z_sub)
# multiplication - elementwise product
z_mult = z1 * z2
print(z_mult)
# division
z_div = z1/2
print(z_div)
# comparision operator
z1 < 35
np.cos(z1) #universal function
```
### Indexing and Slicing
```
arr = np.arange(1,17)
arr = arr.reshape(1,4,4)
#arr = arr.reshape(1,-1,4)
#arr = arr.reshape(1,4,-1)
print(arr)
print('shape: ', arr.shape)
print('data type: ', arr.dtype)
print('number of dimensions: ', arr.ndim)
#index
print(arr[0, 1])
print(arr[-1, -1])
#slicing
arr[:,:,1:3:]
```
### np.array_split
```
arr = np.array([1, 2, 3, 4, 5, 6])
newarr = np.array_split(arr, 3)
print(newarr)
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15], [16, 17, 18]])
print(np.array_split(arr, 3))
print(np.array_split(arr, 3, axis=1))
```
### np.where
np.where(์กฐ๊ฑด์): ์กฐ๊ฑด ๋ง์กฑํ๋ **์ธ๋ฑ์ค** ๋ฐํ
```
a = np.arange(4, 20, 3)
b = a.reshape(2,-1)
print(a)
print(b)
np.where(a%2==0) #์ง์์ธ ์์์ ์ธ๋ฑ์ค๋ฅผ ๋ฐํ
np.where(b%2==0) #axis=0(ํ)๊ธฐ์ค, axis=1(์ด) ๊ธฐ์ค -> (0,0) (0,2) (1,1)
```
## Pandas
```
import pandas as pd
```
### Series
```
ser1 = pd.Series([1, 2, 3, 4])
print(ser1)
# Create a pandas Index
idx = pd.Index(["New York", "Los Angeles", "Chicago",
"Houston", "Philadelphia", "Phoenix", "San Antonio",
"San Diego", "Dallas"])
print(idx)
pops = pd.Series([8550, 3972, 2721, 2296, 1567, np.nan, 1470, 1395, 1300],
index=idx, name="Population")
print(pops)
```
### Dataframe
```
pd.DataFrame({'Yes': [50, 21], 'No': [131, 2]}) #์๋ฌธ์๋ก ํ๋ฉด ์๋ฌ
pd.DataFrame({'Bob': ['I liked it.', 'It was awful.'],
'Sue': ['Pretty good.', 'Bland.']},
index=['Product A', 'Product B'])
```
### ๋ฐ์ดํฐ ์ค๋ช
ํ๊ธฐ
```
# ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ํฌํธํฉ๋๋ค.
import pandas as pd
# ๋ฐ์ดํฐ URL
url = 'https://raw.githubusercontent.com/chrisalbon/simulated_datasets/master/titanic.csv'
# ๋ฐ์ดํฐ๋ฅผ ์ ์ฌํฉ๋๋ค.
dataframe = pd.read_csv(url)
# ๋ ๊ฐ์ ํ์ ํ์ธํฉ๋๋ค.
dataframe.head(2)
dataframe.tail(3)
# ์ฐจ์์ ํ์ธํฉ๋๋ค.
dataframe.shape
# ํต๊ณ๊ฐ์ ํ์ธํฉ๋๋ค.
dataframe.describe()
```
### ํ์ํ๊ธฐ
iloc๊ณผ loc์ ์ฐจ์ด
* **iloc**: integer location์ ์ฝ์ด๋ก, ๋ฐ์ดํฐ ํ๋ ์์ ํ์ด๋ ์นผ๋ผ์ ์์๋ฅผ ๋ํ๋ด๋ ์ ์๋ก ํน์ ๊ฐ์ ์ถ์ถํด์ค๋ ๋ฐฉ๋ฒ
* '0๋ฒ ํ, 2๋ฒ ์นผ๋ผ' -> df.iloc[0,2]
* iloc๋ ์ปดํจํฐ๊ฐ ์ฝ๊ธฐ ์ข์ ๋ฐฉ๋ฒ์ผ๋ก(์ซ์๋ก) ๋ฐ์ดํฐ๊ฐ ์๋ ์์น(์์)์ ์ ๊ทผ
* **loc**๋ ์นผ๋ผ๋ช
์ ์ง์ ์ ๊ฑฐ๋ ํน์ ์กฐ๊ฑด์์ ์จ์ค์ผ๋ก์จ ์ฌ๋์ด ์ฝ๊ธฐ ์ข์ ๋ฐฉ๋ฒ์ผ๋ก ๋ฐ์ดํฐ์ ์ ๊ทผํ๋ ๋ฐฉ๋ฒ
```
# ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ํฌํธํฉ๋๋ค.
import pandas as pd
# ๋ฐ์ดํฐ URL
url = 'https://raw.githubusercontent.com/chrisalbon/simulated_datasets/master/titanic.csv'
# ๋ฐ์ดํฐ๋ฅผ ์ ์ฌํฉ๋๋ค.
dataframe = pd.read_csv(url)
# ์ฒซ ๋ฒ์งธ ํ์ ์ ํํฉ๋๋ค.
dataframe.iloc[0]
# ์ธ ๊ฐ์ ํ์ ์ ํํฉ๋๋ค.
dataframe.iloc[1:4]
# ๋ค ๊ฐ์ ํ์ ์ ํํฉ๋๋ค.
dataframe.loc[1:4]
# ๋ค ๊ฐ์ ํ์ ์ ํํฉ๋๋ค.
dataframe.iloc[:4]
# ์ธ๋ฑ์ค๋ฅผ ์ค์ ํฉ๋๋ค.
dataframe = dataframe.set_index(dataframe['Name'])
# ํ์ ํ์ธํฉ๋๋ค.
dataframe.loc['Allen, Miss Elisabeth Walton']
# 'Allison, Miss Helen Loraine' ์ด์ ๊น์ง Age ์ด๊ณผ Sex ์ด๋ง ์ ํํฉ๋๋ค.
dataframe.loc[:'Allison, Miss Helen Loraine', 'Age':'Sex']
# dataframe[:2]์ ๋์ผํฉ๋๋ค.
dataframe[:'Allison, Miss Helen Loraine']
dataframe['Name']
dataframe[['Age', 'Sex']].head(2)
```
### ์กฐ๊ฑด์ ๋ฐ๋ผ ํ ์ ํ
```
# ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ํฌํธํฉ๋๋ค.
import pandas as pd
# ๋ฐ์ดํฐ URL
url = 'https://raw.githubusercontent.com/chrisalbon/simulated_datasets/master/titanic.csv'
# ๋ฐ์ดํฐ๋ฅผ ์ ์ฌํฉ๋๋ค.
dataframe = pd.read_csv(url)
# โsexโ ์ด์ด โfemaleโ์ธ ํ ์ค ์ฒ์ ๋ ๊ฐ๋ฅผ ์ถ๋ ฅํฉ๋๋ค.
dataframe[dataframe['Sex'] == 'female'].head(2)
# ํ์ ํํฐ๋งํฉ๋๋ค.
dataframe[(dataframe['Sex'] == 'female') & (dataframe['Age'] >= 65)]
# Name ์ด์ Allison์ด ํฌํจ๋ ํ๋ง ์ฐพ๊ธฐ
dataframe['Name'].str.find('Allison')
```
### ์ต์๊ฐ, ์ต๋๊ฐ, ํฉ, ํ๊ท ๊ณ์ฐ ๋ฐ ๊ฐ์ ์ธ๊ธฐ
```
# ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ํฌํธํฉ๋๋ค.
import pandas as pd
# ๋ฐ์ดํฐ URL
url = 'https://raw.githubusercontent.com/chrisalbon/simulated_datasets/master/titanic.csv'
# ๋ฐ์ดํฐ๋ฅผ ์ ์ฌํฉ๋๋ค.
dataframe = pd.read_csv(url)
# ํต๊ณ๊ฐ์ ๊ณ์ฐํฉ๋๋ค.
print('์ต๋๊ฐ:', dataframe['Age'].max())
print('์ต์๊ฐ:', dataframe['Age'].min())
print('ํ๊ท :', dataframe['Age'].mean())
print('ํฉ:', dataframe['Age'].sum())
print('์นด์ดํธ:', dataframe['Age'].count())
# ์นด์ดํธ๋ฅผ ์ถ๋ ฅํฉ๋๋ค.
dataframe.count()
```
### Dataframe sort, group by
#### sort_index: ์ธ๋ฑ์ค ๋ฐ๋ผ ์ ๋ ฌ
```
df = pd.DataFrame(np.round(np.random.randn(7, 3) * 10),
columns=["AAA", "BBB", "CCC"],
index=list("defcabg"))
df
df.sort_index() #๊ธฐ๋ณธ: ์ค๋ฆ์ฐจ์
df.sort_index(axis=1, ascending=False) # Sorting columns by index, opposite order (ccc,bbb,aaa)
```
#### sort_values: ๊ฐ์ ๊ธฐ์ค์ผ๋ก ์ ๋ ฌ
```
df.sort_values(by='AAA') # According to contents of AAA
df.sort_values(by=['BBB', 'CCC']) # Arrange first by BBB, breaking ties with CCC
```
#### groupby()
๊ฐ์ ๊ฐ์ ํ๋๋ก ๋ฌถ์ด **ํต๊ณ ๊ฒฐ๊ณผ(ํ๊ท , max, min, ...)** ๋ฅผ ์ป๊ธฐ ์ํด ์ฌ์ฉ
```
df = pd.DataFrame({
'city': ['๋ถ์ฐ', '๋ถ์ฐ', '๋ถ์ฐ', '๋ถ์ฐ', '์์ธ', '์์ธ', '์์ธ'],
'fruits': ['apple', 'orange', 'banana', 'banana', 'apple', 'apple', 'banana'],
'price': [100, 200, 250, 300, 150, 200, 400],
'quantity': [1, 2, 3, 4, 5, 6, 7]
})
df
```
groupby๋ฅผ ์ฌ์ฉํ๋ฉด ๊ธฐ๋ณธ์ผ๋ก ๊ทธ๋ฃน ๋ผ๋ฒจ์ด index๊ฐ ๋๋๋ฐ, index๋ฅผ ์ฌ์ฉํ๊ณ ์ถ์ ์์ ๊ฒฝ์ฐ์๋ as_index=False ๋ฅผ ์ค์ ํ๋ฉด ๋ฉ๋๋ค.
```
df.groupby('city', as_index=False).mean()
df.groupby(['city', 'fruits'], as_index=False).mean()
```
### ๋ชจ๋ ์ด ์์์ ํจ์ ์ ์ฉ
```
# ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ํฌํธํฉ๋๋ค.
import pandas as pd
# ๋ฐ์ดํฐ URL
url = 'https://raw.githubusercontent.com/chrisalbon/simulated_datasets/master/titanic.csv'
# ๋ฐ์ดํฐ๋ฅผ ์ ์ฌํฉ๋๋ค.
dataframe = pd.read_csv(url)
# ํจ์๋ฅผ ๋ง๋ญ๋๋ค.
def uppercase(x):
return x.upper()
# ํจ์๋ฅผ ์ ์ฉํ๊ณ ๋ ๊ฐ์ ํ์ ์ถ๋ ฅํฉ๋๋ค.
dataframe['Name'].apply(uppercase)[0:2]
# Survived ์ด์ 1์ Live๋ก, 0์ Dead๋ก ๋ฐ๊ฟ๋๋ค.
dataframe['Survived'].map({1:'Live', 0:'Dead'})[:5]
# ํจ์์ ๋งค๊ฐ๋ณ์(age)๋ฅผ apply ๋ฉ์๋๋ฅผ ํธ์ถํ ๋ ์ ๋ฌํ ์ ์์ต๋๋ค.
dataframe['Age'].apply(lambda x, age: x < age, age=30)[:5]
```
### ๋ฐ์ดํฐํ๋ ์ ์ฐ๊ฒฐ
```
# ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ํฌํธํฉ๋๋ค.
import pandas as pd
# ๋ฐ์ดํฐํ๋ ์์ ๋ง๋ญ๋๋ค.
data_a = {'id': ['1', '2', '3'],
'first': ['Alex', 'Amy', 'Allen'],
'last': ['Anderson', 'Ackerman', 'Ali']}
dataframe_a = pd.DataFrame(data_a, columns = ['id', 'first', 'last'])
# ๋ฐ์ดํฐํ๋ ์์ ๋ง๋ญ๋๋ค.
data_b = {'id': ['4', '5', '6'],
'first': ['Billy', 'Brian', 'Bran'],
'last': ['Bonder', 'Black', 'Balwner']}
dataframe_b = pd.DataFrame(data_b, columns = ['id', 'first', 'last'])
# ํ ๋ฐฉํฅ์ผ๋ก ๋ฐ์ดํฐํ๋ ์์ ์ฐ๊ฒฐํฉ๋๋ค.
pd.concat([dataframe_a, dataframe_b], axis=0)
dataframe_a
dataframe_b
# ์ด ๋ฐฉํฅ์ผ๋ก ๋ฐ์ดํฐํ๋ ์์ ์ฐ๊ฒฐํฉ๋๋ค.
pd.concat([dataframe_a, dataframe_b], axis=1)
# ํ์ ๋ง๋ญ๋๋ค.
row = pd.Series([10, 'Chris', 'Chillon'], index=['id', 'first', 'last'])
# ํ์ ์ถ๊ฐํฉ๋๋ค.
dataframe_a.append(row, ignore_index=True)
```
## ๊ฒฐ์ธก์น ๋ค๋ฃจ๊ธฐ (dropna, fillna)
```
data_frame = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1], [np.nan, np.nan, np.nan, 5],\
[3, 4, np.nan, 1], [3, 4, 0, 1]], columns=list('ABCD'))
data_frame
data_frame.dropna()
data_frame.dropna(axis=1)
#A, B์ด์ NaN์ด ํฌํจ๋ ํ์ ์ญ์
data_frame.dropna(subset=['A','B'])
#2ํ๊ณผ 4ํ์ NaN์ด ํฌํจ๋ ์ด์ ์ญ์
data_frame.dropna(axis=1, subset=[2, 4])
#๋ชจ๋ NaN์ 0์ผ๋ก ์นํํ๋ค
data_frame.fillna(0)
data_frame.isnull()
#NaN์ ๊ฐ์ ์ด์ ๋ฐ๋ก ์์ ํ ๊ฐ์ผ๋ก ๋์ฒด
data_frame.fillna(method='ffill')
#NaN์ ๊ฐ์ ์ด์ ๋ฐ๋ก ์๋ ํ ๊ฐ์ผ๋ก ๋์ฒด
data_frame.fillna(method='bfill')
```
| github_jupyter |
REF_top-10-0-10943-stacking-mice-and-brutal-force
ๅ่:
- https://www.kaggle.com/agehsbarg/top-10-0-10943-stacking-mice-and-brutal-force
## Import PKGs
```
import os
import time
import numpy as np
import pandas as pd
import datetime
import random
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
import lightgbm as lgb
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
```
## Folders
```
CWD = os.getcwd()
input_folder = os.path.join(CWD, 'input')
output_folder = os.path.join(CWD, 'output')
model_folder = os.path.join(CWD, 'model')
train_csv = os.path.join(input_folder, 'train.csv')
test_csv = os.path.join(input_folder, 'test.csv')
sample_submission_csv = os.path.join(input_folder, 'sample_submission.csv')
submission_csv = os.path.join(output_folder, 'submission.csv')
print(train_csv)
print(test_csv)
print(sample_submission_csv)
print(submission_csv)
```
## Load Data
```
train_df = pd.read_csv(train_csv)
test_df = pd.read_csv(test_csv)
display(train_df.shape, train_df.head(2))
display(test_df.shape, test_df.head(2))
# display(train_df.columns)
# display(test_df.columns)
```
## cleaning data(remove outliers)
```
# As suggested by many participants, we remove several outliers
train_df.drop(train_df[(train_df['OverallQual']<5) & (train_df['SalePrice']>200000)].index, inplace=True)
train_df.drop(train_df[(train_df['GrLivArea']>4000) & (train_df['SalePrice']<300000)].index, inplace=True)
train_df.reset_index(drop=True, inplace=True)
# Some of the non-numeric predictors are stored as numbers; we convert them into strings
train_df['MSSubClass'] = train_df['MSSubClass'].apply(str)
train_df['YrSold'] = train_df['YrSold'].astype(str)
train_df['MoSold'] = train_df['MoSold'].astype(str)
```
## Function to fill in missings
```
# Here we create funtion which fills all the missing values
# Pay attention that some of the missing values of numeric predictors first are filled in with zeros and then
# small values are filled in with median/average (and indicator variables are created to account for such change:
# for each variable we create which are equal to one);
def fill_missings(res):
res['Alley'] = res['Alley'].fillna('missing')
res['PoolQC'] = res['PoolQC'].fillna(res['PoolQC'].mode()[0])
res['MasVnrType'] = res['MasVnrType'].fillna('None')
res['BsmtQual'] = res['BsmtQual'].fillna(res['BsmtQual'].mode()[0])
res['BsmtCond'] = res['BsmtCond'].fillna(res['BsmtCond'].mode()[0])
res['FireplaceQu'] = res['FireplaceQu'].fillna(res['FireplaceQu'].mode()[0])
res['GarageType'] = res['GarageType'].fillna('missing')
res['GarageFinish'] = res['GarageFinish'].fillna(res['GarageFinish'].mode()[0])
res['GarageQual'] = res['GarageQual'].fillna(res['GarageQual'].mode()[0])
res['GarageCond'] = res['GarageCond'].fillna('missing')
res['Fence'] = res['Fence'].fillna('missing')
res['Street'] = res['Street'].fillna('missing')
res['LotShape'] = res['LotShape'].fillna('missing')
res['LandContour'] = res['LandContour'].fillna('missing')
res['BsmtExposure'] = res['BsmtExposure'].fillna(res['BsmtExposure'].mode()[0])
res['BsmtFinType1'] = res['BsmtFinType1'].fillna('missing')
res['BsmtFinType2'] = res['BsmtFinType2'].fillna('missing')
res['CentralAir'] = res['CentralAir'].fillna('missing')
res['Electrical'] = res['Electrical'].fillna(res['Electrical'].mode()[0])
res['MiscFeature'] = res['MiscFeature'].fillna('missing')
res['MSZoning'] = res['MSZoning'].fillna(res['MSZoning'].mode()[0])
res['Utilities'] = res['Utilities'].fillna('missing')
res['Exterior1st'] = res['Exterior1st'].fillna(res['Exterior1st'].mode()[0])
res['Exterior2nd'] = res['Exterior2nd'].fillna(res['Exterior2nd'].mode()[0])
res['KitchenQual'] = res['KitchenQual'].fillna(res['KitchenQual'].mode()[0])
res["Functional"] = res["Functional"].fillna("Typ")
res['SaleType'] = res['SaleType'].fillna(res['SaleType'].mode()[0])
res['SaleCondition'] = res['SaleCondition'].fillna('missing')
flist = ['LotFrontage','LotArea','MasVnrArea','BsmtFinSF1','BsmtFinSF2','BsmtUnfSF',
'TotalBsmtSF','1stFlrSF','2ndFlrSF','LowQualFinSF','GrLivArea',
'BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','BedroomAbvGr','KitchenAbvGr',
'TotRmsAbvGrd','Fireplaces','GarageCars','GarageArea','WoodDeckSF','OpenPorchSF',
'EnclosedPorch','3SsnPorch','ScreenPorch','PoolArea','MiscVal']
for fl in flist:
res[fl] = res[fl].fillna(0)
res['TotalBsmtSF'] = res['TotalBsmtSF'].apply(lambda x: np.exp(6) if x <= 0.0 else x)
res['2ndFlrSF'] = res['2ndFlrSF'].apply(lambda x: np.exp(6.5) if x <= 0.0 else x)
res['GarageArea'] = res['GarageArea'].apply(lambda x: np.exp(6) if x <= 0.0 else x)
res['GarageCars'] = res['GarageCars'].apply(lambda x: 0 if x <= 0.0 else x)
res['LotFrontage'] = res['LotFrontage'].apply(lambda x: np.exp(4.2) if x <= 0.0 else x)
res['MasVnrArea'] = res['MasVnrArea'].apply(lambda x: np.exp(4) if x <= 0.0 else x)
res['BsmtFinSF1'] = res['BsmtFinSF1'].apply(lambda x: np.exp(6.5) if x <= 0.0 else x)
return res
```
## Filling in missing values, re-coding ordinal variables
```
# Running function to fill in missings
train_df = fill_missings(train_df)
train_df['TotalSF'] = train_df['TotalBsmtSF'] + train_df['1stFlrSF'] + train_df['2ndFlrSF']
# Working with ordinal predictors
def QualToInt(x):
if(x=='Ex'):
r = 0
elif(x=='Gd'):
r = 1
elif(x=='TA'):
r = 2
elif(x=='Fa'):
r = 3
elif(x=='missing'):
r = 4
else:
r = 5
return r
train_df['ExterQual'] = train_df['ExterQual'].apply(QualToInt)
train_df['ExterCond'] = train_df['ExterCond'].apply(QualToInt)
train_df['KitchenQual'] = train_df['KitchenQual'].apply(QualToInt)
train_df['HeatingQC'] = train_df['HeatingQC'].apply(QualToInt)
train_df['BsmtQual'] = train_df['BsmtQual'].apply(QualToInt)
train_df['BsmtCond'] = train_df['BsmtCond'].apply(QualToInt)
train_df['FireplaceQu'] = train_df['FireplaceQu'].apply(QualToInt)
train_df['GarageQual'] = train_df['GarageQual'].apply(QualToInt)
train_df['PoolQC'] = train_df['PoolQC'].apply(QualToInt)
def SlopeToInt(x):
if(x=='Gtl'):
r = 0
elif(x=='Mod'):
r = 1
elif(x=='Sev'):
r = 2
else:
r = 3
return r
train_df['LandSlope'] = train_df['LandSlope'].apply(SlopeToInt)
train_df['CentralAir'] = train_df['CentralAir'].apply( lambda x: 0 if x == 'N' else 1)
train_df['Street'] = train_df['Street'].apply( lambda x: 0 if x == 'Pave' else 1)
train_df['PavedDrive'] = train_df['PavedDrive'].apply( lambda x: 0 if x == 'Y' else 1)
def GFinishToInt(x):
if(x=='Fin'):
r = 0
elif(x=='RFn'):
r = 1
elif(x=='Unf'):
r = 2
else:
r = 3
return r
train_df['GarageFinish'] = train_df['GarageFinish'].apply(GFinishToInt)
def BsmtExposureToInt(x):
if(x=='Gd'):
r = 0
elif(x=='Av'):
r = 1
elif(x=='Mn'):
r = 2
elif(x=='No'):
r = 3
else:
r = 4
return r
train_df['BsmtExposure'] = train_df['BsmtExposure'].apply(BsmtExposureToInt)
def FunctionalToInt(x):
if(x=='Typ'):
r = 0
elif(x=='Min1'):
r = 1
elif(x=='Min2'):
r = 1
else:
r = 2
return r
train_df['Functional_int'] = train_df['Functional'].apply(FunctionalToInt)
def HouseStyleToInt(x):
if(x=='1.5Unf'):
r = 0
elif(x=='SFoyer'):
r = 1
elif(x=='1.5Fin'):
r = 2
elif(x=='2.5Unf'):
r = 3
elif(x=='SLvl'):
r = 4
elif(x=='1Story'):
r = 5
elif(x=='2Story'):
r = 6
elif(x==' 2.5Fin'):
r = 7
else:
r = 8
return r
train_df['HouseStyle_int'] = train_df['HouseStyle'].apply(HouseStyleToInt)
train_df['HouseStyle_1st'] = 1*(train_df['HouseStyle'] == '1Story')
train_df['HouseStyle_2st'] = 1*(train_df['HouseStyle'] == '2Story')
train_df['HouseStyle_15st'] = 1*(train_df['HouseStyle'] == '1.5Fin')
def FoundationToInt(x):
if(x=='PConc'):
r = 3
elif(x=='CBlock'):
r = 2
elif(x=='BrkTil'):
r = 1
else:
r = 0
return r
train_df['Foundation_int'] = train_df['Foundation'].apply(FoundationToInt)
def MasVnrTypeToInt(x):
if(x=='Stone'):
r = 3
elif(x=='BrkFace'):
r = 2
elif(x=='BrkCmn'):
r = 1
else:
r = 0
return r
train_df['MasVnrType_int'] = train_df['MasVnrType'].apply(MasVnrTypeToInt)
def BsmtFinType1ToInt(x):
if(x=='GLQ'):
r = 6
elif(x=='ALQ'):
r = 5
elif(x=='BLQ'):
r = 4
elif(x=='Rec'):
r = 3
elif(x=='LwQ'):
r = 2
elif(x=='Unf'):
r = 1
else:
r = 0
return r
train_df['BsmtFinType1_int'] = train_df['BsmtFinType1'].apply(BsmtFinType1ToInt)
train_df['BsmtFinType1_Unf'] = 1*(train_df['BsmtFinType1'] == 'Unf')
train_df['HasWoodDeck'] = (train_df['WoodDeckSF'] == 0) * 1
train_df['HasOpenPorch'] = (train_df['OpenPorchSF'] == 0) * 1
train_df['HasEnclosedPorch'] = (train_df['EnclosedPorch'] == 0) * 1
train_df['Has3SsnPorch'] = (train_df['3SsnPorch'] == 0) * 1
train_df['HasScreenPorch'] = (train_df['ScreenPorch'] == 0) * 1
train_df['YearsSinceRemodel'] = train_df['YrSold'].astype(int) - train_df['YearRemodAdd'].astype(int)
train_df['Total_Home_Quality'] = train_df['OverallQual'] + train_df['OverallCond']
```
## Adding log-transformed predictors to raw data
```
def addlogs(res, ls):
m = res.shape[1]
for l in ls:
res = res.assign(newcol=pd.Series(np.log(1.01+res[l])).values)
res.columns.values[m] = l + '_log'
m += 1
return res
loglist = ['LotFrontage','LotArea','MasVnrArea','BsmtFinSF1','BsmtFinSF2','BsmtUnfSF',
'TotalBsmtSF','1stFlrSF','2ndFlrSF','LowQualFinSF','GrLivArea',
'BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','BedroomAbvGr','KitchenAbvGr',
'TotRmsAbvGrd','Fireplaces','GarageCars','GarageArea','WoodDeckSF','OpenPorchSF',
'EnclosedPorch','3SsnPorch','ScreenPorch','PoolArea','MiscVal','YearRemodAdd','TotalSF']
train_df = addlogs(train_df, loglist)
```
## Creating dataset for training: adding dummies, adding numeric predictors
```
def getdummies(res, ls):
def encode(encode_df):
encode_df = np.array(encode_df)
enc = OneHotEncoder()
le = LabelEncoder()
le.fit(encode_df)
res1 = le.transform(encode_df).reshape(-1, 1)
enc.fit(res1)
return pd.DataFrame(enc.transform(res1).toarray()), le, enc
decoder = []
outres = pd.DataFrame({'A' : []})
for l in ls:
cat, le, enc = encode(res[l])
cat.columns = [l+str(x) for x in cat.columns]
outres.reset_index(drop=True, inplace=True)
outres = pd.concat([outres, cat], axis = 1)
decoder.append([le,enc])
return (outres, decoder)
catpredlist = ['MSSubClass','MSZoning','LotShape','LandContour','LotConfig',
'Neighborhood','Condition1','Condition2','BldgType',
'RoofStyle','RoofMatl','Exterior1st','Exterior2nd',
'BsmtFinType2','Heating','HouseStyle','Foundation','MasVnrType','BsmtFinType1',
'Electrical','Functional','GarageType','Alley','Utilities',
'GarageCond','Fence','MiscFeature','SaleType','SaleCondition','LandSlope','CentralAir',
'GarageFinish','BsmtExposure','Street']
# Applying function to get dummies
# Saving decoder - function which can be used to transform new data
res = getdummies(train_df[catpredlist],catpredlist)
df = res[0]
decoder = res[1]
# Adding real valued features
floatpredlist = ['LotFrontage_log',
'LotArea_log',
'MasVnrArea_log','BsmtFinSF1_log','BsmtFinSF2_log','BsmtUnfSF_log',
'TotalBsmtSF_log','1stFlrSF_log','2ndFlrSF_log','LowQualFinSF_log','GrLivArea_log',
'BsmtFullBath_log','BsmtHalfBath_log','FullBath_log','HalfBath_log','BedroomAbvGr_log','KitchenAbvGr_log',
'TotRmsAbvGrd_log','Fireplaces_log','GarageCars_log','GarageArea_log',
'PoolArea_log','MiscVal_log',
'YearRemodAdd','TotalSF_log','OverallQual','OverallCond','ExterQual','ExterCond','KitchenQual',
'HeatingQC','BsmtQual','BsmtCond','FireplaceQu','GarageQual','PoolQC','PavedDrive',
'HasWoodDeck', 'HasOpenPorch','HasEnclosedPorch', 'Has3SsnPorch', 'HasScreenPorch']
df = pd.concat([df,train_df[floatpredlist]],axis=1)
```
## Creating dataset for training: using function which creates squared predictors and adding them to the dataset
```
def addSquared(res, ls):
m = res.shape[1]
for l in ls:
res = res.assign(newcol=pd.Series(res[l]*res[l]).values)
res.columns.values[m] = l + '_sq'
m += 1
return res
sqpredlist = ['YearRemodAdd', 'LotFrontage_log',
'TotalBsmtSF_log', '1stFlrSF_log', '2ndFlrSF_log', 'GrLivArea_log',
'GarageCars_log', 'GarageArea_log',
'OverallQual','ExterQual','BsmtQual','GarageQual','FireplaceQu','KitchenQual']
df = addSquared(df, sqpredlist)
```
## Converting data to numpy array
```
X = np.array(df)
X = np.delete(X, 0, axis=1)
y = np.log(1+np.array(train_df['SalePrice']))
```
## Modelling
- 30-fold cross-validation.
- Stacking: on each run of cross-validation I fit 5 models (l2, l1, GBR, ENet and LGB).
- Then we make 5 predictions using these models on left-out fold and add geometric mean of these predictions.
- Finally, use lasso on these six predictors to forecast values on the left-out fold.
- Save all the models (in total we have 30*6=180 models).
```
nF = 20
kf = KFold(n_splits=nF, random_state=241, shuffle=True)
test_errors_l2 = []
train_errors_l2 = []
test_errors_l1 = []
train_errors_l1 = []
test_errors_GBR = []
train_errors_GBR = []
test_errors_ENet = []
test_errors_LGB = []
test_errors_stack = []
test_errors_ens = []
train_errors_ens = []
models = []
pred_all = []
ifold = 1
for train_index, test_index in kf.split(X):
print('fold: ',ifold)
ifold = ifold + 1
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# ridge
l2Regr = Ridge(alpha=9.0, fit_intercept = True)
l2Regr.fit(X_train, y_train)
pred_train_l2 = l2Regr.predict(X_train)
pred_test_l2 = l2Regr.predict(X_test)
# lasso
l1Regr = make_pipeline(RobustScaler(), Lasso(alpha = 0.0003, random_state=1, max_iter=50000))
l1Regr.fit(X_train, y_train)
pred_train_l1 = l1Regr.predict(X_train)
pred_test_l1 = l1Regr.predict(X_test)
# GBR
myGBR = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.02,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=50,
loss='huber', random_state = 5)
myGBR.fit(X_train,y_train)
pred_train_GBR = myGBR.predict(X_train)
pred_test_GBR = myGBR.predict(X_test)
# ENet
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=4.0, l1_ratio=0.005, random_state=3))
ENet.fit(X_train, y_train)
pred_train_ENet = ENet.predict(X_train)
pred_test_ENet = ENet.predict(X_test)
# LGB
myLGB = lgb.LGBMRegressor(objective='regression',num_leaves=5,
learning_rate=0.05, n_estimators=600,
max_bin = 50, bagging_fraction = 0.6,
bagging_freq = 5, feature_fraction = 0.25,
feature_fraction_seed=9, bagging_seed=9,
min_data_in_leaf = 6, min_sum_hessian_in_leaf = 11)
myLGB.fit(X_train, y_train)
pred_train_LGB = myLGB.predict(X_train)
pred_test_LGB = myLGB.predict(X_test)
# Stacking
stackedset = pd.DataFrame({'A' : []})
stackedset = pd.concat([stackedset,pd.DataFrame(pred_test_l2)],axis=1)
stackedset = pd.concat([stackedset,pd.DataFrame(pred_test_l1)],axis=1)
stackedset = pd.concat([stackedset,pd.DataFrame(pred_test_GBR)],axis=1)
stackedset = pd.concat([stackedset,pd.DataFrame(pred_test_ENet)],axis=1)
stackedset = pd.concat([stackedset,pd.DataFrame(pred_test_LGB)],axis=1)
prod = (pred_test_l2*pred_test_l1*pred_test_GBR*pred_test_ENet*pred_test_LGB) ** (1.0/5.0)
stackedset = pd.concat([stackedset,pd.DataFrame(prod)],axis=1)
Xstack = np.array(stackedset)
Xstack = np.delete(Xstack, 0, axis=1)
l1_staked = Lasso(alpha = 0.0001,fit_intercept = True)
l1_staked.fit(Xstack, y_test)
pred_test_stack = l1_staked.predict(Xstack)
models.append([l2Regr,l1Regr,myGBR,ENet,myLGB,l1_staked])
test_errors_l2.append(np.square(pred_test_l2 - y_test).mean() ** 0.5)
test_errors_l1.append(np.square(pred_test_l1 - y_test).mean() ** 0.5)
test_errors_GBR.append(np.square(pred_test_GBR - y_test).mean() ** 0.5)
test_errors_ENet.append(np.square(pred_test_ENet - y_test).mean() ** 0.5)
test_errors_LGB.append(np.square(pred_test_LGB - y_test).mean() ** 0.5)
test_errors_stack.append(np.square(pred_test_stack - y_test).mean() ** 0.5)
```
## Output of test set errors; they should be lower then
```
print(np.mean(test_errors_l2))
print(np.mean(test_errors_l1))
print(np.mean(test_errors_GBR))
print(np.mean(test_errors_ENet))
print(np.mean(test_errors_LGB))
print(np.mean(test_errors_stack))
```
## Scoring: predictions on the test set
```
scoredata = pd.read_csv(test_csv)
scoredata['MSSubClass'] = scoredata['MSSubClass'].apply(str)
scoredata['YrSold'] = scoredata['YrSold'].astype(str)
scoredata['MoSold'] = scoredata['MoSold'].astype(str)
```
## Filling in missing values, re-coding ordinal variables
```
scoredata = fill_missings(scoredata)
scoredata['ExterQual'] = scoredata['ExterQual'].apply(QualToInt)
scoredata['ExterCond'] = scoredata['ExterCond'].apply(QualToInt)
scoredata['KitchenQual'] = scoredata['KitchenQual'].apply(QualToInt)
scoredata['HeatingQC'] = scoredata['HeatingQC'].apply(QualToInt)
scoredata['BsmtQual'] = scoredata['BsmtQual'].apply(QualToInt)
scoredata['BsmtCond'] = scoredata['BsmtCond'].apply(QualToInt)
scoredata['FireplaceQu'] = scoredata['FireplaceQu'].apply(QualToInt)
scoredata['GarageQual'] = scoredata['GarageQual'].apply(QualToInt)
scoredata['PoolQC'] = scoredata['PoolQC'].apply(QualToInt)
scoredata['LandSlope'] = scoredata['LandSlope'].apply(SlopeToInt)
scoredata['CentralAir'] = scoredata['CentralAir'].apply( lambda x: 0 if x == 'N' else 1)
scoredata['Street'] = scoredata['Street'].apply( lambda x: 0 if x == 'Grvl' else 1)
scoredata['GarageFinish'] = scoredata['GarageFinish'].apply(GFinishToInt)
scoredata['BsmtExposure'] = scoredata['BsmtExposure'].apply(BsmtExposureToInt)
scoredata['TotalSF'] = scoredata['TotalBsmtSF'] + scoredata['1stFlrSF'] + scoredata['2ndFlrSF']
scoredata['TotalSF'] = scoredata['TotalSF'].fillna(0)
scoredata['Functional_int'] = scoredata['Functional'].apply(FunctionalToInt)
scoredata['HouseStyle_int'] = scoredata['HouseStyle'].apply(HouseStyleToInt)
scoredata['HouseStyle_1st'] = 1*(scoredata['HouseStyle'] == '1Story')
scoredata['HouseStyle_2st'] = 1*(scoredata['HouseStyle'] == '2Story')
scoredata['HouseStyle_15st'] = 1*(scoredata['HouseStyle'] == '1.5Fin')
scoredata['Foundation_int'] = scoredata['Foundation'].apply(FoundationToInt)
scoredata['MasVnrType_int'] = scoredata['MasVnrType'].apply(MasVnrTypeToInt)
scoredata['BsmtFinType1_int'] = scoredata['BsmtFinType1'].apply(BsmtFinType1ToInt)
scoredata['BsmtFinType1_Unf'] = 1*(scoredata['BsmtFinType1'] == 'Unf')
scoredata['PavedDrive'] = scoredata['PavedDrive'].apply( lambda x: 0 if x == 'Y' else 1)
scoredata['HasWoodDeck'] = (scoredata['WoodDeckSF'] == 0) * 1
scoredata['HasOpenPorch'] = (scoredata['OpenPorchSF'] == 0) * 1
scoredata['HasEnclosedPorch'] = (scoredata['EnclosedPorch'] == 0) * 1
scoredata['Has3SsnPorch'] = (scoredata['3SsnPorch'] == 0) * 1
scoredata['HasScreenPorch'] = (scoredata['ScreenPorch'] == 0) * 1
scoredata['Total_Home_Quality'] = scoredata['OverallQual'] + scoredata['OverallCond']
```
## Changing newly appeared values for some predictors
```
scoredata['MSSubClass'] = scoredata['MSSubClass'].apply(lambda x: '20' if x == '150' else x)
scoredata['MSZoning'] = scoredata['MSZoning'].apply(lambda x: 'RL' if x == 'missing' else x)
scoredata['Utilities'] = scoredata['Utilities'].apply(lambda x: 'AllPub' if x == 'missing' else x)
scoredata['Exterior1st'] = scoredata['Exterior1st'].apply(lambda x: 'VinylSd' if x == 'missing' else x)
scoredata['Exterior2nd'] = scoredata['Exterior2nd'].apply(lambda x: 'VinylSd' if x == 'missing' else x)
scoredata['Functional'] = scoredata['Functional'].apply(lambda x: 'Typ' if x == 'missing' else x)
scoredata['SaleType'] = scoredata['SaleType'].apply(lambda x: 'WD' if x == 'missing' else x)
scoredata['SaleCondition'] = scoredata['SaleCondition'].apply(lambda x: 'Normal' if x == 'missing' else x)
```
## Adding log-transformed predictors to raw data
```
scoredata = addlogs(scoredata, loglist)
```
## Creating dataset for training: dummies, adding numeric variables, adding squared predictors
```
def getdummies_transform(res, ls, decoder):
def encode(encode_df, le_df, enc_df):
encode_df = np.array(encode_df)
res1 = le_df.transform(encode_df).reshape(-1, 1)
return pd.DataFrame(enc_df.transform(res1).toarray())
L = len(ls)
outres = pd.DataFrame({'A' : []})
for j in range(L):
l = ls[j]
le = decoder[j][0]
enc = decoder[j][1]
cat = encode(res[l], le, enc)
cat.columns = [l+str(x) for x in cat.columns]
outres.reset_index(drop=True, inplace=True)
outres = pd.concat([outres, cat], axis = 1)
return outres
df_scores = getdummies_transform(scoredata, catpredlist, decoder)
df_scores = pd.concat([df_scores,scoredata[floatpredlist]],axis=1)
df_scores = addSquared(df_scores, sqpredlist)
```
## Converting data into numpy array
```
X_score = np.array(df_scores)
X_score = np.delete(X_score, 0, axis=1)
```
## Scoring data
```
M = X_score.shape[0]
scores_fin = 1+np.zeros(M)
for md in models:
l2 = md[0]
l1 = md[1]
GBR = md[2]
ENet = md[3]
LGB = md[4]
l1_stacked = md[5]
l2_scores = l2.predict(X_score)
l1_scores = l1.predict(X_score)
GBR_scores = GBR.predict(X_score)
ENet_scores = ENet.predict(X_score)
LGB_scores = LGB.predict(X_score)
stackedsets = pd.DataFrame({'A' : []})
stackedsets = pd.concat([stackedsets,pd.DataFrame(l2_scores)],axis=1)
stackedsets = pd.concat([stackedsets,pd.DataFrame(l1_scores)],axis=1)
stackedsets = pd.concat([stackedsets,pd.DataFrame(GBR_scores)],axis=1)
stackedsets = pd.concat([stackedsets,pd.DataFrame(ENet_scores)],axis=1)
stackedsets = pd.concat([stackedsets,pd.DataFrame(LGB_scores)],axis=1)
prod = (l2_scores*l1_scores*GBR_scores*ENet_scores*LGB_scores) ** (1.0/5.0)
stackedsets = pd.concat([stackedsets,pd.DataFrame(prod)],axis=1)
Xstacks = np.array(stackedsets)
Xstacks = np.delete(Xstacks, 0, axis=1)
scores_fin = scores_fin * l1_stacked.predict(Xstacks)
scores_fin = scores_fin ** (1/nF)
svm_solution = pd.read_csv('../input/svm-solution-32/svm_solution_32.csv')
svm_solution_ln = np.log(svm_solution['SalePrice'])
# Averaging stacked and SVM predictions
fin_score = np.sqrt(scores_fin * svm_solution_ln)
Id = scoredata['Id']
fin_score = pd.DataFrame({'SalePrice': np.exp(fin_score)-1})
fin_data = pd.concat([Id,fin_score],axis=1)
```
## Brutal approach to deal with predictions close to outer range
```
q1 = fin_data['SalePrice'].quantile(0.0042)
q2 = fin_data['SalePrice'].quantile(0.99)
fin_data['SalePrice'] = fin_data['SalePrice'].apply(lambda x: x if x > q1 else x*0.77)
fin_data['SalePrice'] = fin_data['SalePrice'].apply(lambda x: x if x < q2 else x*1.1)
```
## Writing dataset for submission
```
fin_data.to_csv('House_Prices_submit.csv', sep=',', index = False)
```
| github_jupyter |
Below is code with a link to a happy or sad dataset which contains 80 images, 40 happy and 40 sad.
Create a convolutional neural network that trains to 100% accuracy on these images, which cancels training upon hitting training accuracy of >.999
Hint -- it will work best with 3 convolutional layers.
```
import tensorflow as tf
import os
import zipfile
from os import path, getcwd, chdir
# DO NOT CHANGE THE LINE BELOW. If you are developing in a local
# environment, then grab happy-or-sad.zip from the Coursera Jupyter Notebook
# and place it inside a local folder and edit the path to that location
path = f"{getcwd()}/../tmp2/happy-or-sad.zip"
zip_ref = zipfile.ZipFile(path, 'r')
zip_ref.extractall("/tmp/h-or-s")
zip_ref.close()
# GRADED FUNCTION: train_happy_sad_model
def train_happy_sad_model():
# Please write your code only where you are indicated.
# please do not remove # model fitting inline comments.
DESIRED_ACCURACY = 0.999
class MyCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('acc') > DESIRED_ACCURACY):
print("\n Reuired 99% accuracy reached, so training cancelled!")
self.model.stop_training = True
callbacks = MyCallback()
# This Code Block should Define and Compile the Model. Please assume the images are 150 X 150 in your implementation.
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy'])
# This code block should create an instance of an ImageDataGenerator called train_datagen
# And a train_generator by calling train_datagen.flow_from_directory
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1/255)
# Please use a target_size of 150 X 150.
train_generator = train_datagen.flow_from_directory(
'/tmp/h-or-s',
target_size=(150, 150),
batch_size=64,
class_mode='binary'
)
# Expected output: 'Found 80 images belonging to 2 classes'
# This code block should call model.fit_generator and train for
# a number of epochs.
# model fitting
history = model.fit_generator(
train_generator, epochs=20, callbacks=[callbacks]
)
# model fitting
return history.history['acc'][-1]
# The Expected output: "Reached 99.9% accuracy so cancelling training!""
train_happy_sad_model()
```
| github_jupyter |
# Intuition for the Maximum Mean Discrepancy two-sample test
_Thomas Viehmann_
This note sketches the intuition behind [A. Gretton et al.: A Kernel Two-Sample Test. JMLR 2012](http://www.gatsby.ucl.ac.uk/~gretton/mmd/mmd.htm).
Given a (high-dimensional) space $\mathbb{R}^d$ and iid samples $X_i \in \mathbb{R}^d, i=1,...N_X$ sampled from $X ~ P_X$ and $Y_i \in \mathbb{R}^d, i=1,...N_Y$ sampled from $Y ~ Y_X$, we may wish to make a statistical test of the hypothesis that $P_X$ is different from $P_Y$.
For simplicity, we stay in the classical, frequentist scheme.
So let us import the world:
```
import torch
%matplotlib inline
from matplotlib import pyplot
import tqdm
```
And then we can sample a few points (to facilitate plotting, we'll operate in 1-d, so not that high dimensional, but hey.)
```
dist_x = torch.distributions.Beta(2, 5)
dist_y = torch.distributions.Beta(5, 5)
x = dist_x.sample((15,))
y = dist_y.sample((15,))
z = torch.linspace(-0.5, 1.5, 100)
pyplot.scatter(x, torch.zeros_like(x), marker='+')
raw_density_x = dist_x.log_prob(z).exp()
raw_density_y = dist_y.log_prob(z).exp()
density_x = torch.where(raw_density_x.isnan(), torch.tensor(0.0), raw_density_x)
density_y = torch.where(raw_density_y.isnan(), torch.tensor(0.0), raw_density_y)
pyplot.plot(z, density_x)
pyplot.plot(z, density_y)
```
One technique to estimating (the density of) $P_X$ (with $P_Y$ being analogous) is via a kernel (Kernel Density Estimation). Given a kernel, for example the Guassian or Radial Basis Function (RBF - but note that sometimes RBF is considered a general class) kernel $K(x,y) = C(\sigma, d) \exp\left(- \frac{|x-y|^2}{2\sigma^2}\right)$, for some fixed standard deviation $\sigma > 0$. We will not worry about the choice of $\sigma$ for now.
We can estimate $P_X$ as $\hat P_X(x) = \frac{1}{N_x} \sum_{i=1}^N K(X_i, x)$.
```
sigma = 0.1
raw_hat_p_x = torch.exp(-((x[None] - z[:, None])**2)/(2*sigma**2)).sum(1)
hat_p_x = (raw_hat_p_x / raw_hat_p_x.sum() / (z[1]-z[0]))
pyplot.plot(z, hat_p_x)
pyplot.plot(z, density_x)
```
Given our two samples, $X_i$ and $Y_i$, we may do this for both, so we have two probability densities $\hat P_X$ and $\hat P_Y$. Now, even if $P_X = P_Y$, we would not expect exact equality $\hat P_X = \hat P_Y$ due to the sampling and estimation error. But we would expect that they are somehow _close_.
To measure this closeness, we can look at our kernel again. It turns out that this is a so-call [reproducing kernel](https://en.wikipedia.org/wiki/Reproducing_kernel_Hilbert_space), that is if we define the two functions $K_x, K_y : \mathbb{R}^d \rightarrow \mathbb{R}$, $K_x(y) := K(x,y) =: K_y(x)$ we can define a scalar product on the Hilbert space spanned by such functions by setting $\langle K_x, K_y \rangle_{\mathcal{H}} := K(x,y)$, this is called a reproducing kernel Hilbert space in the literature. It turns out that for any function $f \in \mathcal{H}$, we have $f(x) = \langle f, K_x \rangle_\mathcal{H}$.
Clearly, our estimated distributions $\hat P_X$ and $\hat P_Y$ are in $\mathcal{H}$, so we can measure closeness in the norm of $\mathcal{H}$.
This leads to the maximum mean discrepancy test statistic $\widehat{MMD} = \| \hat P_X - \hat P_Y \|_{\mathcal{H}}^2$. By the the above representation, we can write
$$
\begin{aligned}
\widehat{MMD} &= \| \hat P_X - \hat P_Y \|_{\mathcal{H}} \\
&= \langle \hat P_X - \hat P_Y, \hat P_X - \hat P_Y \rangle_{\mathcal{H}} \\
&= \langle \hat P_X , \hat P_X \rangle_{\mathcal{H}} + \langle \hat P_Y, \hat P_Y \rangle_{\mathcal{H}}
- 2 \langle \hat P_X , \hat P_Y \rangle_{\mathcal{H}} \\
&= \sum_{i=1}^{N_X} \sum_{j=1}^{N_X} K(X_i, X_j) + \sum_{i=1}^{N_Y} \sum_{j=1}^{N_Y} K(Y_i, Y_j)
- 2 \sum_{i=1}^{N_X} \sum_{j=1}^{N_Y} K(X_i, Y_j).
\end{aligned}
$$
This is relatively straightforward to compute.
```
def mmd(x, y, sigma):
# compare kernel MMD paper and code:
# A. Gretton et al.: A kernel two-sample test, JMLR 13 (2012)
# http://www.gatsby.ucl.ac.uk/~gretton/mmd/mmd.htm
# x shape [n, d] y shape [m, d]
# n_perm number of bootstrap permutations to get p-value, pass none to not get p-value
n, d = x.shape
m, d2 = y.shape
assert d == d2
xy = torch.cat([x.detach(), y.detach()], dim=0)
dists = torch.cdist(xy, xy, p=2.0)
# we are a bit sloppy here as we just keep the diagonal and everything twice
# note that sigma should be squared in the RBF to match the Gretton et al heuristic
k = torch.exp((-1/(2*sigma**2)) * dists**2) + torch.eye(n+m)*1e-5
k_x = k[:n, :n]
k_y = k[n:, n:]
k_xy = k[:n, n:]
# The diagonals are always 1 (up to numerical error, this is (3) in Gretton et al.)
# note that their code uses the biased (and differently scaled mmd)
mmd = k_x.sum() / (n * (n - 1)) + k_y.sum() / (m * (m - 1)) - 2 * k_xy.sum() / (n * m)
return mmd
```
Gretton et. al. recommend to set the parameter $\sigma$ to the median distance between points $\sigma = \mathrm{Median(|Z_i - Z_j|)/2}$ where $Z_\cdot$ is the combined sample of $X_\cdot$ and $Y_\cdot$. We stick with this recommendation without further questioning. In practice we might take the median of a reasonable subset (e.g. 100 samples).
```
dists = torch.pdist(torch.cat([x, y], dim=0)[:,None])
sigma = dists[:100].median()/2
our_mmd = mmd(x[:, None], y[:, None], sigma)
our_mmd
```
## Detecting different distributions
But now we need to make precise when $\hat P_X$ and $\hat P_Y$ are fare enough apart to say with some confidence that they $P_X$ and $P_Y$ are distinct.
Clearly, one option, in particular for evaluation of a detector, is to take the Receiver Operating Characteristic point of view and consider what any threshold for $\widehat{MMD}$ means in terms of true positives and false positives. This is what we do in our experiments.
## Bootstrapping
The other route is to try to give a threshold for a given confidence level or equivalently to convert $\widehat{MMD}$ values (given the sample sizes) into $p$-values. We could try to derive expressions for this (and earlier papers of the same group of authors do, see the link above), but the conceptually easiest way is to sample from the distribution $\widehat{MMD}$ under the null-hypothesis using bootstrapping. In this technique, we approximate sampling from the null-hypothesis by shuffling between the $X_\cdot$ and $Y_\cdot$, so that both the $x$ and $y$ argument come from the same distribution. If one sample is sufficiently large (e.g. the training sample), we might compute the thresholds just on that, too.
*Note*: To do this efficiently, it is recommended to use custom CPU or GPU kernels.
```
N_X = len(x)
N_Y = len(y)
xy = torch.cat([x, y], dim=0)[:, None].double()
mmds = []
for i in tqdm.tqdm(range(1000)):
xy = xy[torch.randperm(len(xy))]
mmds.append(mmd(xy[:N_X], xy[N_X:], sigma).item())
mmds = torch.tensor(mmds)
pyplot.hist(mmds.numpy(), bins=20)
```
With the empirical distribution, we can compute the threshold for a given probability $p$.
```
torch.quantile(mmds, 0.95) # threshold for 5% significance
```
Or we may assign a $p$-value to our observation:
```
(our_mmd < mmds).float().mean()
```
This concludes our short tour of the MMD statistic and the derived two-sample test.
I hope you enjoyed it, don't hesitate to E-Mail <tv@mathinf.eu> for questions and comments.
| github_jupyter |
# [Angle closure Glaucoma Evaluation Challenge](https://age.grand-challenge.org/Details/)
## Scleral spur localization Baseline ๏ผRCNN)
- To keep model training stable, images with coordinate == -1, were removed.
- For real inference, you MIGHT keep all images in val_file_path file.
## requirement install
```
!pip install xlrd
!pip install tqdm
!pip install pycocotools
```
## Zip File Extract
Assume `Training100.zip` and `Validation_ASOCT_Image.zip` are stored @ `./AGE_challenge Baseline/datasets/`
```
!unzip -q ../datasets/Training100.zip -d ../datasets/
!unzip -q ../datasets/Validation_ASOCT_Image.zip -d ../datasets/
```
# Explore Data
```
import numpy as np
import csv
import matplotlib.pyplot as plt
import cv2
import os, shutil
import pprint
import coco_parser
import json
import pandas as pd
%matplotlib inline
data_root_path = "../datasets/Training100/"
xlsx_file_path = os.path.join(data_root_path, "Training100_Location.xlsx")
image_path = os.path.join(data_root_path, "ASOCT_Image")
label_file_path = os.path.join(data_root_path, "train_loc.csv")
train_file_path = os.path.join(data_root_path, "train2017")
val_file_path = os.path.join(data_root_path, "val2017")
json_path = os.path.join(data_root_path, "annotations")
train_json_path = os.path.join(json_path, "instances_train2017.json")
val_json_path = os.path.join(json_path, "instances_val2017.json")
xlsx_file = pd.read_excel(xlsx_file_path)
xlsx_file.to_csv(label_file_path,
index=False, columns=['ASOCT_Name', 'X1', 'Y1', 'X2', 'Y2'])
data_list = []
with open(label_file_path,'r') as f:
lines=csv.reader(f)
for key, line in enumerate(lines):
data_list.append(line)
pprint.pprint(data_list[:2])
plt.figure(figsize=(8, 5))
file_name, l_x, l_y, r_x, r_y = data_list[1]
img = cv2.imread(os.path.join(image_path, file_name))[:,:,::-1]
binary_mask = np.zeros((img.shape[0], img.shape[1]))
l_x, l_y = int(float(l_x)), int(float(l_y))
binary_mask[l_y - 100 : l_y + 100, l_x - 100 : l_x + 100] = 1
plt.figure(figsize=(8,8))
plt.subplot(2,1,1)
plt.imshow(img)
plt.scatter(float(l_x), float(l_y), c='r')
plt.scatter(float(r_x), float(r_y), c='r')
plt.subplot(2,1,2)
plt.imshow(binary_mask)
```
# Train/Val split
```
def train_val_split(data_list, train_ratio=0.8, shuffle_seed=42):
testee_list = list(set( [line[0].split("-")[0] for line in data_list[1:]] ))
val_testee_idx = np.random.choice(testee_list, int(len(testee_list) * (1-train_ratio)), replace=False)
train_list = []
val_list = []
for line in data_list[1:]:
file_name = line[0]
if file_name.split("-")[0] in val_testee_idx:
val_list.append(line)
else:
train_list.append(line)
return train_list, val_list
train_data_list, val_data_list = train_val_split(data_list)
print(len(train_data_list))
print(len(val_data_list))
def center_split(img, l_point, r_point):
# img: 3D nparray
# l_point/r_point: (x,y)
nrow, ncol, ch = img.shape
left_img, right_img = img[:,:nrow,:], img[:,-nrow:,:]
# l_point = l_point
r_point = (r_point[0] - (ncol-nrow), r_point[1])
return left_img, right_img, l_point, r_point
split_train_list = []
if os.path.exists(train_file_path):
shutil.rmtree(train_file_path)
os.mkdir(train_file_path)
for item in train_data_list:
file_name, l_x, l_y, r_x, r_y = item
img = cv2.imread(os.path.join(image_path, file_name))
# split
left_img, right_img, l_point, r_point = center_split(img, (float(l_x), float(l_y)), (float(r_x), float(r_y)))
cv2.imwrite(os.path.join( train_file_path, file_name.split(".")[0]+'_left.jpg'), left_img)
cv2.imwrite(os.path.join( train_file_path, file_name.split(".")[0]+'_right.jpg'), right_img)
split_train_list.append([file_name.split(".")[0]+'_left.jpg', *l_point])
split_train_list.append([file_name.split(".")[0]+'_right.jpg', *r_point])
plt.figure(figsize=(15,5))
file_name, l_x, l_y = split_train_list[1]
img = cv2.imread(os.path.join(train_file_path, file_name))[:,:,::-1]
binary_mask = np.zeros((img.shape[0], img.shape[1]))
l_x, l_y = int(float(l_x)), int(float(l_y))
binary_mask[l_y - 100 : l_y + 100, l_x - 100 : l_x + 100] = 1
plt.figure(figsize=(8,8))
plt.subplot(1,2,1)
plt.imshow(img)
plt.scatter(float(l_x), float(l_y), c='r')
plt.subplot(1,2,2)
plt.imshow(binary_mask)
split_val_list = []
if os.path.exists(val_file_path):
shutil.rmtree(val_file_path)
os.mkdir(val_file_path)
for item in val_data_list:
file_name, l_x, l_y, r_x, r_y = item
img = cv2.imread(os.path.join(image_path, file_name))
# split
left_img, right_img, l_point, r_point = center_split(img, (float(l_x), float(l_y)), (float(r_x), float(r_y)))
cv2.imwrite(os.path.join( val_file_path, file_name.split(".")[0]+'_left.jpg'), left_img)
cv2.imwrite(os.path.join( val_file_path, file_name.split(".")[0]+'_right.jpg'), right_img)
split_val_list.append([file_name.split(".")[0]+'_left.jpg', *l_point])
split_val_list.append([file_name.split(".")[0]+'_right.jpg', *r_point])
# Remove -1 in get_coco_dict()
coco_train_dict = coco_parser.get_coco_dict(train_file_path, split_train_list, box_range=100)
coco_val_dict = coco_parser.get_coco_dict(val_file_path, split_val_list, box_range=100)
if not os.path.exists(json_path):
os.mkdir(json_path)
with open(train_json_path, 'w+') as output_json_file:
json.dump(coco_train_dict, output_json_file)
with open(val_json_path, 'w+') as output_json_file:
json.dump(coco_val_dict, output_json_file)
```
| github_jupyter |
## 13) More NumPy Plus Linear Algebra Fundamentals
Related references:
- https://jakevdp.github.io/PythonDataScienceHandbook/02.04-computation-on-arrays-aggregates.html
- https://jakevdp.github.io/PythonDataScienceHandbook/02.05-computation-on-arrays-broadcasting.html
- [Feature Engineering for Machine Learning](https://search.lib.umich.edu/catalog/record/016260792)
- [The Manga Guide to Linear Algebra](https://www.safaribooksonline.com/library/view/the-manga-guide/9781457166730/)
- [Introduction to Linear Algebra by Gilbert Strang](http://math.mit.edu/~gs/linearalgebra/)
- [Advanced Engineering Mathematics by Erwin Kreyszig](https://search.lib.umich.edu/catalog/record/016256884)
## First, reminder to submit evaluations
You have until midnight tonight! As of Tuesday, 12/27 have completed the quiz I set up to self-report taking it, earning participation points.
## Second, let's discuss the individual project
Details posted on Canvas and [Github](project_instructions.ipynb).
## The simplicity of NumPy math
As we've discussed, Numpy allows us to perform math with arrays without writing loops, speeding programs and programming.
As always, array sizes must be compatible. Binary operations are performed on an element-by-element basis:
```
import numpy as np
a = np.array([0, 1, 2])
b = np.array([5, 5, 5])
print(a + b)
```
### Broadcasting: How NumPy will make compatible arrays
We can also perform these operations with a scalar; NumPy will "broadcast" it to the correct size for the binary operation. In the case below, it will treat `5` as the ndarray `[5, 5, 5]` while never actually creating such an array.
```
print(a + 5)
```
A visual to describe broadcasting:

The light boxes represent the broadcasted values: again, this extra memory is not actually allocated in the course of the operation, but it can be useful conceptually to imagine that it is.
As shown in the visual above, broadcasting can also be done in higher dimensions:
```
m = np.ones((3, 3))
m + a
print(a)
print(b.reshape((3, 1)))
a + b.reshape((3, 1))
```
### More examples of NumPy's math knowledge
```
x = [1, 2, 4, 10]
print("x =", x)
print("e^x =", np.exp(x))
print("2^x =", np.exp2(x))
print("3^x =", np.power(3, x))
print("ln(x) =", np.log(x))
print("log2(x) =", np.log2(x))
print("log10(x) =", np.log10(x))
print("sum(x) =", np.sum(x))
print("min(x) =", np.min(x))
print("max(x) =", np.sum(x))
print("mean(x) =", np.mean(x))
print("std(x) =", np.std(x))
```
### What about math with NaNs?
NaN = not a number, and you can specify NaN with np.nan.
```
m = np.random.random((3, 4))
n = np.random.random((4, 3))
m[2, 3] = np.nan
print(m)
m + n.T
```
Let's check if these other functions work with `np.nan`:
```
print("m =", m)
print("e^m =", np.exp(m))
print("2^m =", np.exp2(m))
print("3^m =", np.power(3, m))
print("ln(m) =", np.log(m))
print("log2(m) =", np.log2(m))
print("log10(m) =", np.log10(m))
print("sum(m) =", np.sum(m))
print("min(m) =", np.min(m))
print("max(m) =", np.max(m))
print("mean(m) =", np.mean(m))
print("std(m) =", np.std(m))
```
Not all did, but there are "NaN=safe" versions of functions! That is, they ignore the NaNs and carry on.
|Function Name | NaN-safe Version | Description |
|-------------------|---------------------|--------------------------------------------------|
| ``np.sum`` | ``np.nansum`` | Compute sum of elements |
| ``np.prod`` | ``np.nanprod`` | Compute product of elements |
| ``np.mean`` | ``np.nanmean`` | Compute mean of elements |
| ``np.std`` | ``np.nanstd`` | Compute standard deviation |
| ``np.var`` | ``np.nanvar`` | Compute variance |
| ``np.min`` | ``np.nanmin`` | Find minimum value |
| ``np.max`` | ``np.nanmax`` | Find maximum value |
| ``np.argmin`` | ``np.nanargmin`` | Find index of minimum value |
| ``np.argmax`` | ``np.nanargmax`` | Find index of maximum value |
| ``np.median`` | ``np.nanmedian`` | Compute median of elements |
| ``np.percentile`` | ``np.nanpercentile``| Compute rank-based statistics of elements |
| ``np.any`` | N/A | Evaluate whether any elements are true (see note)|
| ``np.all`` | N/A | Evaluate whether all elements are true (see note)|
| N/A | ``np.isnan`` | Test for NaN; returns a boolean array |
*Note*: NaN, positive infinity and negative infinity evaluate to True because these are not equal to zero.
```
print("sum(m) =", np.nansum(m))
print("min(m) =", np.nanmin(m))
print("max(m) =", np.nanmax(m))
print("mean(m) =", np.nanmean(m))
print("std(m) =", np.nanstd(m))
```
These are a few examples, but just ask the Internet if there is anything you need and you'll get an answer, even if that is to use `scipy.special` as we had to for `erfc`. Let's focus on a particular kind of math NumPy knows well: linear algebra.
## Linear algebra
### Overview

This and other comics from [The Manga Guide to Linear Algebra](https://www.safaribooksonline.com/library/view/the-manga-guide/9781457166730/)
Importantly, they are great for solving linear equations, especially those with the same number unknowns and independent equations. They are great for turning problems into forms that are easily solved by computers!

### Fundamentals
#### Inverse Functions


#### Linear Transformations
Let $x_i$ and $x_j$ be two arbitrary elements of the set $X$, $c$ be any real number, and $f$ be a function from $X$ to $Y$. $f$ is called a *linear transformation* from $X$ to $Y$ if is satisfies both:
1. $f(x_i) + f(x_j) = f(x_i + x_j)$
1. $cf(x_i) = f(cx_i)$

## Matrices







### Matrix Addition

```
a = np.array([[10, 10]])
b = np.array([[3, 6]])
print(a, b)
print(a - b)
```
### Scalar Multiplication

```
c = np.arange(1, 7).reshape((3, 2))
print(c)
print(10 * c)
```
### Matrix Multiplication


```
d = np.array([[8, -3], [2, 1]])
e = np.array([[3, 1], [1, 2]])
print(d)
print(e)
print(d * e)
print(np.multiply(e, d))
np.matmul(d, e)
np.matmul(e, d)
```
#### Cautions
1. In general $ \mathbf{AB} \neq \mathbf{BA}$
1. $\mathbf{AB} = 0 $ does not necessarily imply that $\mathbf{A} = 0 $ or $\mathbf{B} = 0 $ or $\mathbf{BA} = 0 $
1. $\mathbf{AC} = \mathbf{AD} $ does not necessarily imply that $\mathbf{C} = \mathbf{D} $, even when $\mathbf{A} \neq 0 $
(more about this later)
```
f = np.array([[1, 1], [2, 2]])
g = np.array([[-1, 1], [1, -1]])
print(f)
print(g)
np.matmul(f, g)
np.matmul(g, f)
h = np.array([[2, 1], [2, 2]])
k = np.array([[3, 0], [1, 3]])
print(f)
print(h)
print(k)
print(np.matmul(f, h))
print(np.matmul(f, k))
```
#### Multiplication properties of numbers that do hold true for matrices
1. $\mathbf{A}(\mathbf{BC}) = (\mathbf{AB})\mathbf{C} = \mathbf{ABC}$
1. $k\mathbf{AB} = \mathbf{A}k\mathbf{B}$
1. $(\mathbf{A} + \mathbf{B})\mathbf{C} = \mathbf{AC} + \mathbf{BC}$
1. $\mathbf{C}(\mathbf{A} + \mathbf{B}) = \mathbf{CA} + \mathbf{CB}$
### Inner product of vectors
When an $m\times 1$ matrix (aka a row vector) is multiplied by a $ 1 \times m$ matrix (aka a column vector), we get a $1 \times 1$ product, called the *inner product* or *dot product*, denoted $\mathbf{a} \bullet \mathbf{b}$.
```
np.dot([4, -1, 5], [2, 5, 8])
```
The power of matrix multiplication is in linear transformations. That topic will come after we finish matrices and talk about vectors.
### Special Matrices
An $n \times n$ matrix is called a square matrix. All others are rectangular matrices.

We already covered 1 and 2. An interesting tidbit: the transpose of a product equals the product of the transposed factors, taken in reverse order:
$$(\mathbf{A} \mathbf{B})^T = \mathbf{B}^T \mathbf{A}^T $$
Would you wager a guess as to:
- What a symmetric matrix is?
- A skew-symmetric matrix?
- What property must they have?
**Answers:**
- $\mathbf{A}^T = \mathbf{A}$
- $\mathbf{A}^T = -\mathbf{A}$
- They must be square matrices
How about upper and lower triangular matrices?
Upper triangular matrices have 0 for all elements below the diagonal, and lower triangular matrices have 0 for all elements above the diagonal.

A nice shortcut available for diagonal matrices:
```
f = np.diagflat([[2, 3]])
print(f)
np.linalg.matrix_power(f, 3)
```

#### What is the identity matrix and why is it called that?
```
eye = np.eye(2, dtype=int)
print(d)
print(eye)
np.matmul(d, eye)
np.matmul(eye, d)
```

If the product of two square matrices is an identity matrix, then the two factor matrices are inverses of each other. This means that
$ \left( \begin{array}{ccc}
x_{11} & x_{12} \\
x_{21} & x_{22} \end{array} \right) $ is an inverse matrix to
$ \left( \begin{array}{ccc}
1 & 2 \\
3 & 4 \end{array} \right) $ if
$$ \left( \begin{array}{ccc}
1 & 2 \\
3 & 4 \end{array} \right)
\left( \begin{array}{ccc}
x_{11} & x_{12} \\
x_{21} & x_{22} \end{array} \right)
= \left( \begin{array}{ccc}
1 & 0 \\
0 & 1 \end{array} \right) $$



*Note:* the example above has one solution. Singular cases (definition below) have none or infinite solutions.

Now, using Gaussian elimination (e.g. the sweeping method) find, the inverse matrix of
$ \left( \begin{array}{ccc}
3 & 1 \\
1 & 2 \end{array} \right) $
```
g = np.array([[3, 1], [1, 2]])
h = np.linalg.inv(g)
print(h)
# checking our work
np.matmul(g, h)
np.matmul(h, g)
```



If **A** has an inverse, than **A** is a *nonsingular matrix*. Similarly, if **A** has no inverse, then **A** is called a *singular matrix*.
If **A** has an inverse, the inverse is unique.
### Calculating determinants
From: https://www.mathsisfun.com/algebra/matrix-determinant.html

```
k = np.array([[4, 6], [3, 8]])
np.linalg.det(k)
```

```
m = np.array([[6, 1, 1], [4, -2, 5], [2, 8, 7]])
print(m)
np.linalg.det(m)
```

#### Fun fact about determinants:
For any $n \times n$ matrices **A** and **B**, det(**AB**) = det(**BA**) = det(**A**) det(**B**).
## Next up: Vectors and Linear Transformations!
| github_jupyter |
# NNabla Models Finetuning Tutorial
Here we demonstrate how to perform finetuning using nnabla's pre-trained models.
## Load the model
Loading the model is very simple. All you need is just 2 lines.
```
from nnabla.models.imagenet import ResNet18
model = ResNet18()
```
You can choose other ResNet models such as `ResNet34`, `ResNet50`, by specifying the model's name as an argument. Of course, you can choose other pretrained models as well. See the [Docs](https://nnabla.readthedocs.io/en/latest/python/api/models/imagenet.html).
**NOTE**: If you use the `ResNet18` for the first time, nnabla will automatically download the weights from `https://nnabla.org` and it may take up to a few minutes.
## Dataset
In this tutorial, we use [Caltech101](http://www.vision.caltech.edu/Image_Datasets/Caltech101/) as the dataset for finetuning.
Caltech101 consists of more than 9,000 object images in total and each image belongs to one of 101 distinct categories or "clutter" category. We use images from 101 categories for simple classification.
We have a script named `caltech101_data.py` which can automatically download the dataset and store it in `nnabla_data`.
If you have your own dataset and `DataIterator` which can load your data, you can use it instead.
```
run caltech101_data.py
batch_size = 32 # we set batch_size = 32
all_data = data_iterator_caltech101(batch_size)
```
Since there is no separate data for training and validation in caltech101, we need to *manually* split it up.
Here, we will split the dataset as the following way; **80% for training, and 20% for validation.**
```
num_samples = all_data.size
num_train_samples = int(0.8 * num_samples) # Take 80% for training, and the rest for validation.
num_class = 101
data_iterator_train = all_data.slice(
rng=None, slice_start=0, slice_end=num_train_samples)
data_iterator_valid = all_data.slice(
rng=None, slice_start=num_train_samples, slice_end=num_samples)
```
Now we have model and data!
### Optional: Check the image in the dataset
Let's take a look at what kind of images are included in the dataset. You can get images by `DataIterator`'s method, `next`
```
import matplotlib.pyplot as plt
%matplotlib inline
images, labels = data_iterator_train.next()
sample_image, sample_label = images[0], labels[0]
plt.imshow(sample_image.transpose(1,2,0))
plt.show()
print("image_shape: {}".format(sample_image.shape))
print("label_id: {}".format(sample_label))
```
### Preparing Graph Construction
Let's start with importing basic modules.
```
import nnabla as nn
# Optional: If you want to use GPU
from nnabla.ext_utils import get_extension_context
ctx = get_extension_context("cudnn")
nn.set_default_context(ctx)
ext = nn.ext_utils.import_extension_module("cudnn")
```
### Create input Variables for the Network
Now we are going to create the input variables.
```
channels, image_height, image_width = sample_image.shape # use info from the image we got
# input variables for the validation network
image_valid = nn.Variable((batch_size, channels, image_height, image_width))
label_valid = nn.Variable((batch_size, 1))
input_image_valid = {"image": image_valid, "label": label_valid}
# input variables for the training network
image_train = nn.Variable((batch_size, channels, image_height, image_width))
label_train = nn.Variable((batch_size, 1))
input_image_train = {"image": image_train, "label": label_train}
```
### Create the training graph using the pretrained model
If you take a look at the [Model's API Reference](https://nnabla.readthedocs.io/en/latest/python/api/models/imagenet.html), you can find `use_up_to` option. Specifying one of the pre-defined strings when calling the model, the computation graph will be constructed up to the layer you specify. For example, in case of `ResNet18`, you can choose one of the following as the last layer of the graph.
- 'classifier' (default): The output of the final affine layer for classification.
- 'pool': The output of the final global average pooling.
- 'lastconv': The input of the final global average pooling without ReLU activation..
- 'lastconv+relu': Network up to 'lastconv' followed by ReLU activation.
For finetuning, it is common to replace only the upper layers with the new (not trained) ones and re-use the lower layers with their pretrained weights. Also, pretrained models have been trained on a classification task on ImageNet, which has 1000 categories, so the output of the `classifier` layer has the output shape `(batch_size, 1000)` that wouldn't fit our current dataset.
For this reason, here we construct the graph up to the `pool` layer, which corresponds to the `global average pooling` layer in the original graph, and connect it to the additional affine (fully-connected) layer for 101-way classification. For finetuning, it is common to train only the weights for the newly added layers (in this case, the last affine layer), but in this tutorial, we will update the weights for *all* layers in the graph. Also, when creating a training graph, you need to set `training=True`.
```
import nnabla.parametric_functions as PF
y_train = model(image_train, force_global_pooling=True, use_up_to="pool", training=True)
with nn.parameter_scope("finetuning_fc"):
pred_train = PF.affine(y_train, 101) # adding the affine layer to the graph.
```
**NOTE**: You need to specify `force_global_pooling=True` when the input shape is different from what the model expects. You can check the model's default input shape by typing `model.input_shape`.
### Create the validation graph using the model
Creating the validation graph is almost the same. You simply need to change `training` flag to `False`.
```
y_valid = model(image_valid,
force_global_pooling=True, use_up_to="pool", training=False)
with nn.parameter_scope("finetuning_fc"):
pred_valid = PF.affine(y_valid, 101)
pred_valid.persistent = True # to keep the value when get `forward(clear_buffer=True)`-ed.
```
### Define the functions for computing Loss and Categorical Error
```
import nnabla.functions as F
def loss_function(pred, label):
"""
Compute loss.
"""
loss = F.mean(F.softmax_cross_entropy(pred, label))
return loss
loss_valid = loss_function(pred_valid, label_valid)
top_1_error_valid = F.mean(F.top_n_error(pred_valid, label_valid))
loss_train = loss_function(pred_train, label_train)
top_1_error_train = F.mean(F.top_n_error(pred_train, label_train))
```
### Prepare the solver
```
import nnabla.solvers as S
solver = S.Momentum(0.01) # you can choose others as well
solver.set_parameters(nn.get_parameters())
```
### Some setting for iteration
```
num_epoch = 10 # arbitrary
one_epoch = data_iterator_train.size // batch_size
max_iter = num_epoch * one_epoch
val_iter = data_iterator_valid.size // batch_size
```
### Performance before finetuning
Let's see how *well* the model works. Note that all the weights are pretrained on ImageNet except for the last affine layer.
First, prepare a function to show us the model's performance,
```
def run_validation(pred_valid, loss_valid, top_1_error_valid,
input_image_valid, data_iterator_valid,
with_visualized=False, num_visualized=3):
assert num_visualized < pred_valid.shape[0], "too many images to plot."
val_iter = data_iterator_valid.size // pred_valid.shape[0]
ve = 0.
vloss = 0.
for j in range(val_iter):
v_image, v_label = data_iterator_valid.next()
input_image_valid["image"].d = v_image
input_image_valid["label"].d = v_label
nn.forward_all([loss_valid, top_1_error_valid], clear_no_need_grad=True)
vloss += loss_valid.d.copy()
ve += top_1_error_valid.d.copy()
vloss /= val_iter
ve /= val_iter
if with_visualized:
ind = 1
random_start = np.random.randint(pred_valid.shape[0] - num_visualized)
fig = plt.figure(figsize=(12., 12.))
for n in range(random_start, random_start + num_visualized):
sample_image, sample_label = v_image[n], v_label[n]
ax = fig.add_subplot(1, num_visualized, ind)
ax.imshow(sample_image.transpose(1,2,0))
with nn.auto_forward():
predicted_id = np.argmax(F.softmax(pred_valid)[n].d)
result = "true label_id: {} - predicted as {}".format(str(sample_label[0]), str(predicted_id))
ax.set_title(result)
ind += 1
fig.show()
return ve, vloss
_, _ = run_validation(pred_valid, loss_valid, top_1_error_valid, input_image_valid, data_iterator_valid, with_visualized=True)
```
As you can see, the model fails to classify images properly. Now, let's begin the finetuning and see how performance improves.
### Start Finetuning
Let's prepare the monitor for training.
```
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor("tmp.monitor")
monitor_loss = MonitorSeries("Training loss", monitor, interval=200)
monitor_err = MonitorSeries("Training error", monitor, interval=200)
monitor_vloss = MonitorSeries("Test loss", monitor, interval=200)
monitor_verr = MonitorSeries("Test error", monitor, interval=200)
# Training-loop
for i in range(max_iter):
image, label = data_iterator_train.next()
input_image_train["image"].d = image
input_image_train["label"].d = label
nn.forward_all([loss_train, top_1_error_train], clear_no_need_grad=True)
monitor_loss.add(i, loss_train.d.copy())
monitor_err.add(i, top_1_error_train.d.copy())
solver.zero_grad()
loss_train.backward(clear_buffer=True)
# update parameters
solver.weight_decay(3e-4)
solver.update()
if i % 200 == 0:
ve, vloss = run_validation(pred_valid, loss_valid, top_1_error_valid,
input_image_valid, data_iterator_valid,
with_visualized=False, num_visualized=3)
monitor_vloss.add(i, vloss)
monitor_verr.add(i, ve)
```
As you see, the loss and error rate is decreasing as the finetuning progresses.
Let's see the classification result after finetuning.
```
_, _ = run_validation(pred_valid, loss_valid, top_1_error_valid, input_image_valid, data_iterator_valid, with_visualized=True)
```
You can see now the model is able to classify the image properly.
# Finetuning more
we have a convenient script named `finetuning.py`. By using this, you can try finetuning with different models **even on your original dataset**.
To do this, you need to prepare your own dataset and do some preprocessing. We will explain how to do this in the following.
## Prepare your dataset
Suppose you have a lot of images which can be used for image classification. You need to organize your data in a certain manner. Here, we will explain that with another dataset, [Stanford Dogs Dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/).
First, visit the official page and download `images.tar` (here is the [direct link](http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar)). Next, untar the archive and then you will see a directory named `Images`. Inside that directory, there are many subdirectories and each subdirectory stores images which belong to 1 category. For example, a directory `n02099712-Labrador_retriever` contains labrador retriever's images only. So if you want to use your own dataset, you need to organize your images and directiories in the same way like the following;
```
parent_directory
โโโ subdirectory_for_category_A
โ โโโ image_0.jpg
โ โโโ image_1.jpg
โ โโโ image_2.jpg
โ โโโ ...
โ
โโโ subdirectory_for_category_B
โ โโโ image_0.jpg
โ โโโ ...
โ
โโโ subdirectory_for_category_C
โ โโโ image_0.jpg
โ โโโ ...
โ
โโโ subdirectory_for_category_D
โ โโโ image_0.jpg
โ โโโ ...
โ
...
```
The numbers of images in each category can vary, do not have to be exactly the same. Once you arrange your dataset, now you're good to go!
## Create image classification dataset using NNabla CLI
Now that you prepare and organize your dataset, the only thing you have to do is to create a `.csv` file which will be used in `finetuning.py`. To do so, you can use NNabla's [Python Command Line Interface](https://nnabla.readthedocs.io/en/latest/python/command_line_interface.html#create-image-classification-dataset). Just type like the following.
```
nnabla_cli create_image_classification_dataset -i <path to parent directory> -o <output directory which contains "preprocessed" images> -c <number of channels> -w <width> -g <height> -m <padding or trimming> -s <whether apply shuffle or not> -f1 <name of the output csv file for training data> -f2 <name of the output csv file for test data> -r2 <ratio(%) of test data to training data>
```
If you do that on Stanford Dogs Dataset,
```
nnabla_cli create_image_classification_dataset -i Images -o arranged_images -c 3 -w 128 -g 128 -m padding -s true -f1 stanford_dog_train.csv -f2 stanford_dog_test.csv -r2 20
```
Note that output `.csv` file will be stored in the same directory you specified with -o option. For more information, please check the [docs](https://nnabla.readthedocs.io/en/latest/python/command_line_interface.html#create-image-classification-dataset).
After executing the command above, you can start finetuning on your dataset.
## Run finetuning
All you need is just to type one line.
```
python finetuning.py --model <model name> --train-csv <.csv file containing training data> --test-csv <.csv file containing test data>
```
It will execute finetuning on your dataset!
```
run finetuning.py --model ResNet34 --epoch 10 --train-csv ~/nnabla_data/stanford_dog_arranged/stanford_dog_train.csv --test-csv ~/nnabla_data/stanford_dog_arranged/stanford_dog_test.csv --shuffle True
```
## An example of how to use finetuning's result for inference
Once the finetuning finished, let's use it for inference! The script above has saved the parameters at every certain iteration you specified. So now call the same model you trained and this time let's use the finetuned parameters in the following way.
```
from nnabla.models.imagenet import ResNet34
import nnabla as nn
param_path = "params_XXX.h5" # specify the path to the saved parameter (.h5)
model = ResNet34()
batch_size = 1 # just for inference
input_shape = (batch_size, ) + model.input_shape
```
Then define an input Variable and a network for inference. Note that you need to construct the network exactly the same way as done in finetuning script (layer configuration, parameters names, and so on...).
```
x = nn.Variable(input_shape) # input Variable
pooled = model(x, use_up_to="pool", training=False)
with nn.parameter_scope("finetuning"):
with nn.parameter_scope("last_fc"):
pred = PF.affine(pooled, 120)
```
Load the parameters which you finetuned above. You can use `nn.load_parameters()` to load the parameters. Once you call this, the parameters stored in the `params.h5` will be stored in global scope. You can check the parameters are different before and after `nn.load_parameters()` by using `nn.get_parameters()`.
```
nn.load_parameters(param_path) # load the finetuned parameters.
pred.forward()
```
| github_jupyter |
<small><i>June 2016 - This notebook was created by [Oriol Pujol Vila](http://www.maia.ub.es/~oriol). Source and [license](./LICENSE.txt) info are in the folder.</i></small>
# Backpropagation
```
#pip install tqdm
```
## Basic scheme
Consider the problem up to this point. Let us recall the three basic components of the algorithm.
+ **The model class**
+ **The loss function**
+ **The optimization algorithm**
Let us consider the following graph representation of the problem:
<img src = "./images/pipeline1.png" width = "200">
The model function is $f(x,\omega)$, where $\omega$ are the parameters to optimise. For example, in the case of a linear model we may have $f(x,\omega) = \sum_i \omega_i x_i = \omega^T x$. The loss function is represented with $\mathcal{L}(y,t)$, where $y=f(x,\omega)$ and $t$ is the target value. Remember that the loss function models the dissimilarity between the output of the model and the true value to predict. The last element of the learning algorithm is the optimisation algorithm. In our case is an algorithm with the goal of minimizing the dissimilarity between target value and the model output, i.e.
$$\underset{\omega}{\text{minimize}} \quad \mathcal{L}(f(x,\omega),t)$$
Let us refactor the code using OOP in order to take into account these three elements:
```
class model:
def __init__(self):
pass
def forward(self,x):
#Takes a data point and evaluates f(x,w)
pass
class loss:
def __init__(self):
pass
def evaluate(self,x,t):
#Evaluates the loss function L(y,t)
pass
class optimize:
def __init__(self):
pass
def run(self, data, target, model, loss):
#Takes a loss function and a model and find the optimal parameters for an specific data set
pass
```
We know one way of optimising algorithms in front of large scale data sets, i.e. stochastic subgradient methods. In short this is an iterative algorithm that updates the unknown variables proportional to minus the gradient magnitude at each iteration.
Let us fill in the gaps for a classical linear regression where the model is $f(x,\omega) = \omega^Tx$,
```
import numpy as np
class model:
def __init__(self,w):
self.w = w
def forward(self,x):
#Takes a data point and evaluates f(x,w)
return np.dot(self.w,x.T)
```
using a least squares loss function $\mathcal{L}(y,t) = (t-y)^2$,
```
class loss:
def __init__(self):
pass
def evaluate(self,model,x,t):
#Evaluates the loss function L(y,t)
y = model.forward(x)
return (t-y)*(t-y)
```
using stochastic gradient descend,
```
class optimize:
def __init__(self,t):
self.num_iter = t
def run(self, data, target, model, loss):
#Takes a loss function and a model and find the optimal parameters for an specific data set
N_samples = data.shape[0]
for t in xrange(self.num_iter):
#Step 1.-take a sample x at random from the training set
idx = np.random.randint(N_samples)
xi = data[idx,:]
yi = target[idx]
#Step 2.- update the parameters to optimise
model.w = model.w - eta * "gradient_L_omega(xi,yi)"
```
The gradient with respect to the parameter to optimise is $$\nabla_\omega\mathcal{L(f(x,\omega),t)} = (\frac{\partial \mathcal{L}}{\partial \omega_1},\frac{\partial \mathcal{L}}{\partial \omega_2}, \dots, \frac{\partial \mathcal{L}}{\partial \omega_d}) = \frac{\partial \mathcal{L}}{\partial \bar{\omega}}$$.
Recall the graph representation
<img src = "./images/pipeline1.png" width = "200">
and consider the chain rule
$$\frac{\partial \mathcal{L}}{\partial \bar{\omega}} = \frac{\partial \mathcal{L}}{\partial y}\frac{\partial y}{\partial \bar{\omega}}$$
Observe that the complete differentiation is easier this way.
Given that $\mathcal{L}(y,t) = (t-y)^2$, then
$$\frac{\partial \mathcal{L}}{\partial y} = -2(t-y)$$
Given that $y = \omega^Tx$, then
$$ \frac{\partial y}{\partial \bar{\omega}} = x$$
## The flow of the chain rule
Let us try to understand visually what the chain rule is about. This will constitute the basis of the backpropagation algorithm, that will be next used to train deep learning techniques and the basis of **automatic differentiation**, a programatically optimum way for computing differentials.
```
class optimize:
def __init__(self):
pass
def run(self, data, target, model, loss, num_iter, eta):
#Takes a loss function and a model and find the optimal parameters for an specific data set
N_samples = data.shape[0]
for t in xrange(num_iter):
#Step 1.-take a sample x at random from the training set
idx = np.random.randint(N_samples)
xi = data[idx,:]
yi = target[idx]
#Step 2.- update the parameters to optimise
model.w = model.w - eta * loss.gradient(model,xi,yi)*model.gradient(xi)
```
Thus we need to add the gradient terms to the model and the loss.
```
import numpy as np
class model:
def __init__(self,d):
self.w = np.zeros((1,d+1))
def forward(self,x):
#Takes a data point and evaluates f(x,w)
return np.dot(self.w[0,:-1],x.T)+self.w[0,-1]
def gradient(self,x):
return np.concatenate((x,np.array([1])))
def gradient_x(self,x):
return self.w[0,:-1]
class loss:
def __init__(self):
self.y = 0
def evaluate(self,model,x,t):
#Evaluates the loss function L(y,t)
self.y = model.forward(x)
return (t-self.y)*(t-self.y)
def gradient(self,model,x,t):
self.y = model.forward(x)
return -2.*(t-self.y)
```
Let us put all the pieces together and solve a toy problem
```
import numpy as np
#Example dataset
N_samples_per_class = 1000
d_dimensions = 2
x = np.vstack((np.random.randn(N_samples_per_class, d_dimensions),np.random.randn(N_samples_per_class, d_dimensions)+np.array([3,3])))
y = np.vstack((-1.*np.ones((N_samples_per_class,1)),1.*np.ones((N_samples_per_class,1))))
%matplotlib inline
import matplotlib.pyplot as plt
idx = y==1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1)
idx = y==-1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1,color='pink')
delta = 0.05
xx = np.arange(-5.0, 8.0, delta)
yy = np.arange(-5.0, 8.0, delta)
XX, YY = np.meshgrid(xx, yy)
Xf = XX.flatten()
Yf = YY.flatten()
sz=XX.shape
test_data = np.concatenate([Xf[:,np.newaxis],Yf[:,np.newaxis]],axis=1);
idx = y==1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1)
idx = y==-1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1,color='pink')
f=model(d_dimensions)
Z = f.forward(test_data)
Z.shape=sz
plt.imshow(Z, interpolation='bilinear', origin='lower', extent=(-6,8,-6,8),alpha=0.3, vmin=-15, vmax=15)
plt.contour(XX,YY,Z,[0])
fig = plt.gcf()
fig.set_size_inches(9,9)
```
Let us add some control to check how it works. We will compute the loss every 1000 iterations, just for checking purposes and add a plot method for showing convergence.
```
import matplotlib.pyplot as plt
class optimize:
def __init__(self):
pass
def run(self, data, target, model, loss, num_iter, eta):
#Takes a loss function and a model and find the optimal parameters for an specific data set
self.l = np.zeros((np.ceil(num_iter/1000),1))
N_samples = data.shape[0]
print N_samples
i=0
for t in xrange(num_iter):
#Step 1.-take a sample x at random from the training set
idx = np.random.randint(N_samples)
xi = data[idx,:]
yi = target[idx]
# <-- Start new code
if t%1000==0:
self.l[i] = np.sum(loss.evaluate(model,data[:1000,:],target[:1000,0]))
i=i+1
## End new code -->
#Step 2.- update the parameters to optimise
model.w = model.w - eta * loss.gradient(model,xi,yi)*model.gradient(xi)
def plot(self):
plt.plot(self.l)
```
Now let us run the method.
```
num_iter = 100000
eta = 0.001 #optimization step/learning rate
f = model(d_dimensions)
L = loss()
opt = optimize()
opt.run(x,y,f,L,num_iter,eta)
opt.plot()
```
Let us visually check the results
```
delta = 0.05
xx = np.arange(-5.0, 8.0, delta)
yy = np.arange(-5.0, 8.0, delta)
XX, YY = np.meshgrid(xx, yy)
Xf = XX.flatten()
Yf = YY.flatten()
sz=XX.shape
test_data = np.concatenate([Xf[:,np.newaxis],Yf[:,np.newaxis]],axis=1);
idx = y==1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1)
idx = y==-1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1,color='pink')
Z = f.forward(test_data)
Z.shape=sz
plt.imshow(Z, interpolation='bilinear', origin='lower', extent=(-6,8,-6,8),alpha=0.3, vmin=-15, vmax=15)
plt.contour(XX,YY,Z,[0])
fig = plt.gcf()
fig.set_size_inches(9,9)
```
## Just for fun.
Let us create a new loss function more adequate for classification purposes. The hinge loss
$\mathcal{L}(y,t) = \max(0, 1-t\cdot y)$
```
#display
class loss_hinge:
def __init__(self):
self.y = 0
def evaluate(self,model,x,t):
#Evaluates the loss function L(y,t)
self.y = model.forward(x)
return np.maximum(0,1-t*self.y)
def gradient(self,model,x,t):
self.y = model.forward(x)
if (1-t*self.y)>0:
return -t
else:
return 0
num_iter = 10000
eta = 0.01 #optimization step/learning rate
f = model(d_dimensions)
L = loss_hinge()
opt = optimize()
opt.run(x,y,f,L,num_iter,eta)
opt.plot()
delta = 0.05
xx = np.arange(-5.0, 8.0, delta)
yy = np.arange(-5.0, 8.0, delta)
XX, YY = np.meshgrid(xx, yy)
Xf = XX.flatten()
Yf = YY.flatten()
sz=XX.shape
test_data = np.concatenate([Xf[:,np.newaxis],Yf[:,np.newaxis]],axis=1);
idx = y==1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1)
idx = y==-1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1,color='pink')
Z = f.forward(test_data)
Z.shape=sz
plt.imshow(Z, interpolation='bilinear', origin='lower', extent=(-6,8,-6,8),alpha=0.3, vmin=-15, vmax=15)
plt.contour(XX,YY,Z,[0])
fig = plt.gcf()
fig.set_size_inches(9,9)
```
## Even more fun
We still do not have a perfect result. But we could modify the data set so that our classifier solves the problem perfectly. Remember that the gradient can be seen as what change we must made to a certain variable in order to maximize some cost function. What if the variable is data? This would mean how should we change data for a perfect classification?.
Let us create a new optimizer in order to update data.
<div class = "alert alert-info" style="border-radius:10px">**EXERCISE: ** Discuss how can we change the problem to get a perfect classification. Fill the blank in the code to check it.</div>
```
# your code
from IPython.display import clear_output, Image, display
%matplotlib inline
class optimize_data:
def __init__(self):
pass
def run(self, data, target, model, loss, num_iter, eta):
#Takes a loss function and a model and find the optimal parameters for an specific data set
N_samples = data.shape[0]
for t in xrange(num_iter):
#Step 1.-take a sample x at random from the training set
idx = np.random.randint(N_samples)
xi = data[idx,:]
yi = target[idx,:]
#Step 2.- update the parameters to optimise
data[idx,:] = data[idx,:] - eta * ##### YOUR CODE HERE ##############
return data
```
Let us train a classifier first.
```
num_iter = 100000
eta = 0.001 #optimization step/learning rate
f = model(d_dimensions)
L = loss_hinge()
opt = optimize()
opt.run(x,y,f,L,num_iter,eta)
```
Now we check the results and see that the result is good but does not classify all data perfectly.
```
delta = 0.05
xx = np.arange(-5.0, 8.0, delta)
yy = np.arange(-5.0, 8.0, delta)
XX, YY = np.meshgrid(xx, yy)
Xf = XX.flatten()
Yf = YY.flatten()
sz=XX.shape
test_data = np.concatenate([Xf[:,np.newaxis],Yf[:,np.newaxis]],axis=1);
idx = y==1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1)
idx = y==-1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1,color='pink')
Z = f.forward(test_data)
Z.shape=sz
plt.imshow(Z, interpolation='bilinear', origin='lower', extent=(-6,8,-6,8),alpha=0.3, vmin=-15, vmax=15)
plt.contour(XX,YY,Z,[0])
fig = plt.gcf()
fig.set_size_inches(9,9)
```
And now, modify the data so that the trained classifier classifies all data points.
```
num_iter = 500000
eta = 0.05 #optimization step/learning rate
x_orig = x.copy()
Lh = loss_hinge()
opt = optimize_data()
x_mod = opt.run(x_orig,y,f,Lh,num_iter,eta)
delta = 0.05
xx = np.arange(-5.0, 8.0, delta)
yy = np.arange(-5.0, 8.0, delta)
XX, YY = np.meshgrid(xx, yy)
Xf = XX.flatten()
Yf = YY.flatten()
sz=XX.shape
test_data = np.concatenate([Xf[:,np.newaxis],Yf[:,np.newaxis]],axis=1);
idx = y==1
plt.scatter(x_mod[idx.ravel(),0],x_mod[idx.ravel(),1],alpha=0.1)
idx = y==-1
plt.scatter(x_mod[idx.ravel(),0],x_mod[idx.ravel(),1],alpha=0.4,color='pink')
Z = f.forward(test_data)
Z.shape=sz
plt.imshow(Z, interpolation='bilinear', origin='lower', extent=(-6,8,-6,8),alpha=0.3, vmin=-15, vmax=15)
plt.contour(XX,YY,Z,[0])
fig = plt.gcf()
fig.set_size_inches(9,9)
```
# Deep models
Deep models are defined as the composition or stacking of functions. For example, consider the following graph
<img src="./images/pipeline2.png" width = "300">
where
$$y_2 = f_2(f_1(x)).$$
Observe that we are just changing the model while the loss and the optimization function remains the same.
Different to the other case the parameters are arranged in layers. Thus the computation of the gradient with respect to the parameters is a little more involved. However, we will use standard chain rule. In this case we want to find
$$\frac{\partial \mathcal{L}}{\partial \bar{\omega_2}} = \frac{\partial \mathcal{L}}{\partial y_2}\frac{\partial y_2}{\partial \bar{\omega_2}}$$
and
$$\frac{\partial \mathcal{L}}{\partial \bar{\omega_1}} = \frac{\partial \mathcal{L}}{\partial y_2}\frac{\partial y_2}{\partial \bar{y_1}}\frac{\partial \bar{y_1}}{\partial \bar{\omega_1}}$$
Observe that this decomposition allows to decouple each layer in the following terms
$$\frac{\partial \mathcal{L}}{\partial \bar{\omega_1}} = \frac{\partial \mathcal{L}}{\partial \bar{y_1}}\frac{\partial \bar{y_1}}{\partial \bar{\omega_1}}$$
In general for $N$ layers the update of the parameters of the $m$-th layer is written as
$$\frac{\partial \mathcal{L}}{\partial \bar{\omega_m}} = \frac{\partial \mathcal{L}}{\partial \bar{y_m}}\frac{\partial \bar{y_m}}{\partial \bar{\omega_m}} = \frac{\partial \mathcal{L}}{\partial \bar{y_N}}\prod\limits_{i=m}^{N-1}\frac{\partial \bar{y_{i+1}}}{\partial \bar{y_i}} \frac{\partial \bar{y_m}}{\partial \bar{\omega_m}}$$
Thus we have to define for each layer two gradients:
+ *the gradient with respect to the parameters* is the one used for updating the parameters
+ *the gradient with respect to the layer input* is the one needed to move backward the gradient of the loss and will be used for updating lower layers.
This is what it is called **backpropagation**.
```
import numpy as np
class layer:
def __init__(self):
pass
def forward(self): # evaluate the layer
pass
def backward(self): # gradient with respect to the inputs
pass
def gradient(self): # gradient with respect to the parameters
pass
class reluLayer(layer): #this is a linear layer with relu activation
def __init__(self,input_dim,n_neurons):
self.x = np.zeros((1,input_dim))
self.z = np.zeros((1,n_neurons))
self.w = np.random.randn(input_dim,n_neurons)
self.y = np.zeros((1,n_neurons))
def forward(self,x): # evaluate the layer
self.z = np.dot(x, self.w)
self.y = np.maximum(0,self.z)
return self.y
def backward(self): # gradient with respect to the inputs
return np.select([self.z>0],[self.w],default=0).T
def gradient(self): # gradient with respect to the parameters
dydz = np.where(self.z>0,1.,0.)
return np.dot(dydz[:,np.newaxis],self.x).T
class model:
def __init__(self):
self.architecture = []
self.y_ = []
def addLayer(self,layer):
self.architecture.append(layer)
def forward(self,x):
#Takes a data point and evaluates f(x,w)
self.y_= x
for layer in self.architecture:
self.y_=layer.forward(self.y_)
return self.y_
class optimize:
def __init__(self):
pass
def run(self, data, target, model, loss, num_iter, eta):
#Takes a loss function and a model and find the optimal parameters for an specific data set
N_samples = data.shape[0]
for t in xrange(num_iter):
#Step 1.-take a sample x at random from the training set
idx = np.random.randint(N_samples)
xi = data[idx,:]
yi = target[idx,:]
#Step 2.- update the parameters to optimise
dLdx= loss.gradient(model,xi,yi)[:,np.newaxis]
for layer in reversed(model.architecture):
tmp = layer.w - eta * np.dot(layer.gradient(),dLdx.T)
dLdx = np.dot(dLdx,layer.backward())
layer.w = tmp
num_iter = 10000
eta = 0.01 #optimization step/learning rate
nn = model()
nn.addLayer(reluLayer(2,10))
nn.addLayer(reluLayer(10,100))
nn.addLayer(reluLayer(100,30))
nn.addLayer(reluLayer(30,10))
nn.addLayer(reluLayer(10,1))
L = loss()
opt = optimize()
x_mod = opt.run(x,y,nn,L,num_iter,eta)
delta = 0.05
xx = np.arange(-5.0, 8.0, delta)
yy = np.arange(-5.0, 8.0, delta)
XX, YY = np.meshgrid(xx, yy)
Xf = XX.flatten()
Yf = YY.flatten()
sz=XX.shape
test_data = np.concatenate([Xf[:,np.newaxis],Yf[:,np.newaxis]],axis=1);
idx = y==1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.3)
idx = y==-1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.3,color='green')
Z = nn.forward(test_data)
Z.shape=sz
plt.imshow(Z, interpolation='bilinear', origin='lower', extent=(-6,8,-6,8),alpha=0.3, vmin=-15, vmax=15)
plt.contour(XX,YY,Z,[0])
fig = plt.gcf()
fig.set_size_inches(9,9)
```
# All together now
```
class loss:
def __init__(self):
self.y = 0
def evaluate(self,model,x,t):
#Evaluates the loss function L(y,t)
self.y = model.forward(x)
return (t-self.y)*(t-self.y)
def gradient(self,model,x,t):
self.y = model.forward(x)
return -2.*(t-self.y)
class reluLayer(layer): #this is a linear layer with relu activation
def __init__(self,input_dim,n_neurons):
self.x = np.zeros((1,input_dim))
self.z = np.zeros((1,n_neurons))
self.w = np.random.randn(input_dim,n_neurons) #Normal Random Initialization
self.y = np.zeros((1,n_neurons))
def forward(self,x): # evaluate the layer
self.z = np.dot(x, self.w)
self.y = np.maximum(0,self.z)
return self.y
def backward(self): # gradient with respect to the inputs
return np.select([self.z>0],[self.w],default=0).T
def gradient(self): # gradient with respect to the parameters
dydz = np.where(self.z>0,1.,0.)
return np.dot(dydz[:,np.newaxis],self.x).T
class model:
def __init__(self):
self.architecture = []
self.y_ = []
def addLayer(self,layer):
self.architecture.append(layer)
def forward(self,x):
#Takes a data point and evaluates f(x,w)
self.y_= x
for layer in self.architecture:
self.y_=layer.forward(self.y_)
return self.y_
class optimize:
def __init__(self):
pass
def run(self, data, target, model, loss, num_iter, eta):
#Takes a loss function and a model and find the optimal parameters for an specific data set
N_samples = data.shape[0]
for t in xrange(num_iter):
#Step 1.-take a sample x at random from the training set
idx = np.random.randint(N_samples)
xi = data[idx,:]
yi = target[idx,:]
#Step 2.- update the parameters to optimise
dLdx= loss.gradient(model,xi,yi)[:,np.newaxis]
for layer in reversed(model.architecture):
layer.w = layer.w - eta * np.dot(layer.gradient(),dLdx.T)
dLdx = np.dot(dLdx,layer.backward())
```
# All together now ... with bias
```
import numpy as np
#Example dataset
N_samples_per_class = 1000
d_dimensions = 2
x = np.vstack((np.random.randn(N_samples_per_class, d_dimensions),np.random.randn(N_samples_per_class, d_dimensions)+np.array([3,3])))
y = np.vstack((-1.*np.ones((N_samples_per_class,1)),1.*np.ones((N_samples_per_class,1))))
%matplotlib inline
import matplotlib.pyplot as plt
idx = y==1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1)
idx = y==-1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.1,color='pink')
import numpy as np
import tqdm
class loss:
def __init__(self):
self.y = 0
def evaluate(self,model,x,t):
#Evaluates the loss function L(y,t)
self.y = model.forward(x)
return (t-self.y)*(t-self.y)
def gradient(self,model,x,t):
self.y = model.forward(x)
return -2.*(t-self.y)
class reluLayer(layer): #this is a linear layer with relu activation
def __init__(self,input_dim,n_neurons):
self.z = np.zeros((1,n_neurons))
self.w = np.random.randn(input_dim+1,n_neurons)
#self.w = self.w/np.dot(np.ones((input_dim+1,1)),np.sum(self.w*self.w,axis=0)[np.newaxis,:])
self.y = np.zeros((1,n_neurons))
def forward(self,x): # evaluate the layer
self.xext = np.concatenate((x,np.ones((x.shape[0],1))),axis=1)
self.z = np.dot(self.xext, self.w)
self.y = np.maximum(0,self.z)
return self.y
def backward(self): # gradient with respect to the inputs
dydz = np.tile(np.where(self.z>0.,1.,0.),(self.w.shape[0],1))
tw = dydz*self.w
return tw[:-1,:]
def gradient(self): # gradient with respect to the parameters
dydz = np.where(self.z>0.,1.,0.)
return np.dot(dydz.T,self.xext)
class linearLayer(layer): #this is a linear layer with relu activation
def __init__(self,input_dim,n_neurons):
self.w = np.random.randn(input_dim+1,n_neurons)
self.y = np.zeros((1,n_neurons))
def forward(self,x): # evaluate the layer
self.xext = np.concatenate((x,np.ones((x.shape[0],1))),axis=1)
self.y = np.dot(self.xext, self.w)
return self.y
def backward(self): # gradient with respect to the inputs
return self.w[:-1,:]
def gradient(self): # gradient with respect to the parameters
return self.xext
class model:
def __init__(self):
self.architecture = []
self.y_ = []
def addLayer(self,layer):
self.architecture.append(layer)
def forward(self,x):
#Takes a data point and evaluates f(x,w)
self.y_= x
for layer in self.architecture:
self.y_=layer.forward(self.y_)
return self.y_
class optimize:
def __init__(self, debug = False, plot_convergence = 10.):
self.debug_ = debug
self.pc = plot_convergence
def run(self, data, target, model, loss, num_iter, eta):
#Takes a loss function and a model and find the optimal parameters for an specific data set
self.l = np.zeros((np.ceil((num_iter*1.0)/self.pc),1))
print self.l.shape
i=0
N_samples = data.shape[0]
for t in tqdm.tqdm(xrange(num_iter)):
#Step 1.-take a sample x at random from the training set
idx = np.random.randint(N_samples)
xi = data[idx,:]
xi = xi[np.newaxis,:]
yi = target[idx,:]
if (t*1.)%self.pc==0:
self.l[i] = np.sum(loss.evaluate(model,data,target))
i=i+1
#Step 2.- update the parameters to optimise
dLdx= loss.gradient(model,xi,yi)
for layer in reversed(model.architecture):
if self.debug_:
print "xi"+str(xi.shape)
print "g"+str(layer.gradient().shape)
print "dLdx"+str(dLdx.shape)
print "w"+str(layer.w.shape)
print "b"+str(layer.backward().shape)
tmp = layer.w - eta * np.dot(dLdx,layer.gradient()).T
dLdx = np.dot(dLdx,layer.backward().T)
layer.w = tmp
def plot(self):
plt.plot(self.l)
num_iter = 100000
eta = 0.001 #optimization step/learning rate
nn = model()
nn.addLayer(reluLayer(2,5))
nn.addLayer(reluLayer(5,5))
nn.addLayer(linearLayer(5,1))
L = loss()
opt = optimize(plot_convergence=100)
x_mod = opt.run(x,y,nn,L,num_iter,eta)
opt.plot()
np.sum(L.evaluate(nn,x,y))
plt.hist(nn.forward(x))
delta = 0.05
xx = np.arange(-5.0, 8.0, delta)
yy = np.arange(-5.0, 8.0, delta)
XX, YY = np.meshgrid(xx, yy)
Xf = XX.flatten()
Yf = YY.flatten()
sz=XX.shape
test_data = np.concatenate([Xf[:,np.newaxis],Yf[:,np.newaxis]],axis=1);
idx = y==1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.3)
idx = y==-1
plt.scatter(x[idx.ravel(),0],x[idx.ravel(),1],alpha=0.3,color='green')
Z = nn.forward(test_data)
Z.shape=sz
plt.imshow(Z, interpolation='bilinear', origin='lower', extent=(-6,8,-6,8),alpha=0.3, vmin=-15, vmax=15)
plt.contour(XX,YY,Z,[0])
fig = plt.gcf()
fig.set_size_inches(9,9)
def test_forward():
nn = model()
nn.addLayer(reluLayer(2,1))
w = np.ndarray((3,1), buffer=np.array([1.,2.,0.]))
nn.architecture[0].w = w
x = np.ndarray((1,2), buffer=np.array([-0.1,0.1]))
assert nn.forward(x)==0.1, nn.forward(x)
nn = model()
nn.addLayer(reluLayer(2,1))
w = np.ndarray((3,1), buffer=np.array([1.,2.,0.]))
nn.architecture[0].w = w
x = np.ndarray((1,2), buffer=np.array([0.1,-0.1]))
assert nn.forward(x)==0., nn.forward(x)
def test_gradient():
nn = model()
nn.addLayer(reluLayer(2,1))
w = np.ndarray((3,1), buffer=np.array([1.,2.,0.]))
nn.architecture[0].w = w
x = np.ndarray((1,2), buffer=np.array([-0.1,0.1]))
l = loss()
eg = np.ndarray((1,3), buffer=np.array([-0.1,0.1,1.]))
eb = np.ndarray((2,1), buffer=np.array([1.,2.]))
#Step 2.- update the parameters to optimise
dLdx= l.gradient(nn,x,y)
for layer in reversed(nn.architecture):
assert np.all(np.equal(layer.gradient(),eg)), layer.gradient()
assert np.all(np.equal(layer.backward(),eb)), layer.backward()
def test_loss():
nn = model()
nn.addLayer(reluLayer(2,1))
w = np.ndarray((3,1), buffer=np.array([1.,2.,0.]))
nn.architecture[0].w = w
l = loss()
x = np.ndarray((1,2), buffer=np.array([-0.1,0.1]))
t=1.
assert l.evaluate(nn,x,t)==0.81, "Expected 0.81, returned: "+str(l.evaluate(nn,x,t))
assert l.gradient(nn,x,t)==-1.8, "Expected -1.8, returned: "+str(l.gradient(nn,x,t))
test_forward()
test_loss()
test_gradient()
```
## Bonus: Behavior in general acyclic directed graphs structures
In general, we may have a workflow that involves an acyclic directed graph. If that is the case we find two new elements where gradient is involved, splitting nodes and joint nodes.
Consider an splitting node. How will backpropagation work in that case?
<img src="./images/split.png" width="150">
Looking at the picture we realise that
$$(y_1,y_2)=f(x,\omega) = (f_1(x,\omega),f_2(x,\omega)).$$
Remember that in backpropagation we need to define two derivatives, the update derivative $\frac{\partial \mathcal{L}}{\partial \omega}$ and the backpropagation derivative $\frac{\partial \mathcal{L}}{\partial x}$.
Remember that at any layer we are able to define these two quantities as soon as we have $\frac{\partial \mathcal{L}}{\partial \bar{y}} = (\frac{\partial \mathcal{L}}{\partial y_1}, \frac{\partial \mathcal{L}}{\partial y_2})$.
Again we use the differentiation chain rule,
$$\frac{\partial \mathcal{L}}{\partial \omega} = \frac{\partial \mathcal{L}}{\partial \bar{y}}^T\frac{\partial \bar{y}}{\partial \omega}$$
As stated in the former lines
$$\frac{\partial \mathcal{L}}{\partial \bar{y}} = (\frac{\partial \mathcal{L}}{\partial y_1},\frac{\partial \mathcal{L}}{\partial y_2})^T$$
and
$$\frac{\partial \bar{y}}{\partial \omega} = (\frac{\partial y_1}{\partial \omega},\frac{\partial y_2}{\partial \omega})^T$$
Thus,
$$\frac{\partial \mathcal{L}}{\partial \omega} = (\frac{\partial \mathcal{L}}{\partial y_1},\frac{\partial \mathcal{L}}{\partial y_2})^T \left(\begin{matrix}\frac{\partial y_1}{\partial \omega}\\ \frac{\partial y_2}{\partial \omega}\end{matrix}\right) = $$
$$= \frac{\partial \mathcal{L}}{\partial y_1} \frac{\partial y_1}{\partial \omega} + \frac{\partial \mathcal{L}}{\partial y_2}\frac{\partial y_2}{\partial \omega}$$
Observe that the influence of both gradients backpropagating through the graph is added.
| github_jupyter |
# Thompson Sampling for Linearly Constrained Bandits
## Plots for Regret and Violation
```
import numpy as np
from matplotlib import pyplot as plt
```
# Load Data
```
results_dir = 'results/'
filename = 'edX_eta0.50_T50000_N16'
#filename = 'coupon_purchase_eta0.25_T10000_N16'
file_ext = '.npy'
#data = np.load('results/edX_eta0.30_T10000_N16.npy', allow_pickle=True)[()]
#data = np.load('results/edX_eta0.80_T10000_N16.npy', allow_pickle=True)[()]
data = np.load( results_dir + filename + file_ext, allow_pickle=True )[()]
filename = filename.replace('.','_')
image_format = '.png'
T = data['T']
N = data['N']
target_success_prob = data['constraint']
stationay_opt_reward = data['stationary_opt_reward']#0.0139
cum_constraint = np.tile( target_success_prob * np.arange( 0, T, 1 ), [ N, 1 ] )
cum_opt_reward = np.tile( stationay_opt_reward * np.arange( 0, T, 1 ), [ N, 1 ] )
lincon_kl_ucb_cum_reward = np.cumsum( data['lincon_kl_ucb_reward_values'], axis = 1 )
lincon_ts_cum_reward = np.cumsum( data['lincon_ts_reward_values'], axis = 1 )
lincon_kl_ucb_cum_violation = np.maximum( 0.0, cum_constraint - np.cumsum( data['lincon_kl_ucb_reward_events'], axis=1 ) )
lincon_ts_cum_violation = np.maximum( 0.0, cum_constraint - np.cumsum( data['lincon_ts_reward_events'], axis=1 ) )
lincon_kl_ucb_cum_regret = np.maximum(0.0, cum_opt_reward - lincon_kl_ucb_cum_reward)
lincon_ts_cum_regret = np.maximum(0.0, cum_opt_reward - lincon_ts_cum_reward)
```
# Plot Results
```
plt.rcParams.update({'font.size': 30,
'lines.linewidth' : 3,
'lines.markersize': 20})
#------------------------------------
# Expected Violation
#------------------------------------
plt.figure(figsize=[8, 6])
plt.grid(False)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.ticklabel_format(style='sci', axis='x', scilimits=(3,3))
plt.xlim([0, T])
#plt.ylim([0, 5000])
x_ticks = np.arange(0, T)
plt.plot(x_ticks, np.mean( lincon_kl_ucb_cum_violation, axis=0))
plt.plot(x_ticks, np.mean( lincon_ts_cum_violation, axis=0))
plt.legend(['LinCon-KL-UCB', 'LinConTS'], loc='upper left', fontsize=20)
plt.xlabel('T')
plt.ylabel('Violation')
plt.savefig( results_dir + filename + '_VIOLATION' + image_format, bbox_inches='tight')
plt.show()
#------------------------------------
# Expected Regret
#------------------------------------
plt.figure(figsize=[8, 6])
plt.grid(False)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.ticklabel_format(style='sci', axis='x', scilimits=(3,3))
plt.xlim([0, T])
#plt.ylim([0, 2000])
x_ticks = np.arange(0, T)
plt.plot(x_ticks, np.mean( lincon_kl_ucb_cum_regret, axis=0 ) )
plt.plot(x_ticks, np.mean( lincon_ts_cum_regret, axis=0 ) )
plt.legend(['LinCon-KL-UCB', 'LinConTS'], loc='upper left', fontsize=20)
plt.xlabel('T')
plt.ylabel('Regret')
plt.savefig( results_dir + filename + '_REGRET' + image_format, bbox_inches='tight')
plt.show()
plt.rcParams.update({'font.size': 30,
'lines.linewidth' : 3,
'lines.markersize': 20})
#------------------------------------
# Expected Reward
#------------------------------------
plt.figure(figsize=[8, 6])
plt.grid(False)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.ticklabel_format(style='sci', axis='x', scilimits=(3,3))
plt.xlim([0, T])
#plt.ylim([0, 2000])
x_ticks = np.arange(0, T)
plt.plot(x_ticks, np.mean( lincon_kl_ucb_cum_reward, axis=0 ) )
plt.plot(x_ticks, np.mean( lincon_ts_cum_reward, axis=0 ) )
plt.legend(['LinCon-KL-UCB', 'LinConTS'], loc='upper left', fontsize=20)
plt.xlabel('T')
plt.ylabel('Cumulative Reward ')
plt.savefig( results_dir + filename + '_REWARD' + image_format, bbox_inches='tight')
plt.show()
#------------------------------------
# Expected Reward / Expected Violation
#------------------------------------
plt.figure(figsize=[8, 6])
plt.grid(False)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.ticklabel_format(style='sci', axis='x', scilimits=(3,3))
plt.xlim([0, T])
offset = 0
x_ticks = np.arange(offset, T)
plt.plot(x_ticks, np.divide( np.mean( lincon_kl_ucb_cum_reward, axis=0),
np.mean( lincon_kl_ucb_cum_violation, axis=0)) )
plt.plot(x_ticks, np.divide( np.mean( lincon_ts_cum_reward, axis=0),
np.mean( lincon_ts_cum_violation, axis=0) ) )
plt.legend(['LinCon-KL-UCB', 'LinConTS'], loc='upper left', fontsize=20)
plt.xlabel('T')
plt.ylabel('Cum. Reward / Violation')
plt.savefig( results_dir + filename + '_REWARD_VIO' + image_format, bbox_inches='tight')
plt.show()
```
| github_jupyter |
# [Titanic Data Set](https://www.kaggle.com/c/titanic/data)
<img src="../images/titanic.jpeg">
### Data Set Information:
The titanic data frame describes the survival status of individual passengers on the Titanic.
The titanic data frame does not contain information for the crew, but it does contain actual and estimated ages for almost 80% of the passengers.
### Sources:
Hind, Philip. Encyclopedia Titanica. Online-only resource. Retrieved 01Feb2012 from
http://www.encyclopedia-titanica.org/
### Attribute Information:
survival: Survival
PassengerId: Unique Id of a passenger.
pclass: Ticket class
sex: Sex
Age: Age in years
sibsp: # of siblings / spouses aboard the Titanic
parch: # of parents / children aboard the Titanic
ticket: Ticket number
fare: Passenger fare
cabin: Cabin number
embarked: Port of Embarkation
train_df.describe()
## Exploratory data analysis
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Getting the Data
df = pd.read_csv("../datasets/titanic/train.csv")
df.head()
df.describe()
df.info()
```
- El conjunto de entrenamiento tiene 891 ejemplos y 11 caracterรญsticas + la variable objetivo (Survived).
- 2 de las features son float, 5 son int y 5 son objetos(string).
### Desbalanceo de las clases
```
sns.countplot(x='Survived', data=df)
```
### Visualizaciรณn de los datos
```
sns.barplot(x='Pclass', y='Survived', data=df, ci=None)
```
Los pasajeros de primera clase, sobrevivieron mรกs
```
sns.barplot(x = 'Sex', y='Survived', data=df, ci=None)
```
Las mujeres sobrevivieron mรกs
```
sns.barplot(x="SibSp", y="Survived", data=df, ci=None)
```
Los pasajeros con uno o dos acompaรฑantes sobrevivieron mรกs
```
sns.barplot(x="Parch", y="Survived", data=df, ci=None)
```
Los pasajeros con 1-3 hijos sobrevivieron mรกs
```
age = sns.FacetGrid(df, hue="Survived",aspect=2)
age.map(sns.kdeplot,'Age',shade= True)
age.set(xlim=(0, df['Age'].max()))
age.add_legend()
```
Los pasajeros jรณvenes sobrevivieron mรกs
```
fare = sns.FacetGrid(df, hue="Survived",aspect=2)
fare.map(sns.kdeplot,'Fare',shade= True)
fare.set(xlim=(0, 200))
fare.add_legend()
```
Los pasajeros que pagaron mรกs, sobrevivieron mรกs
## Preprocesamiento
### Valores nulos
```
total = df.isnull().sum().sort_values(ascending=False)
percent_1 = df.isnull().sum()/df.isnull().count()*100
percent_2 = (round(percent_1, 1)).sort_values(ascending=False)
missing_data = pd.concat([total, percent_2], axis=1, keys=['Total', '%'])
missing_data.head(5)
```
- La feature `Embarked` tiene solo 2 valores nulos, por lo que se pueden completar fรกcilmente.
- La feature `Age` se presenta mรกs complicada, ya que tiene 177 valores nulos.
- La `Cabin` necesita mรกs investigaciรณn, pero parece que podrรญamos querer eliminarla del conjunto de datos, ya que falta el 77%.
**Embarked**
Como solo tiene 2 valores nulos, los rellenaremos con el mรกs comรบn
```
from sklearn.impute import SimpleImputer
imp = SimpleImputer(strategy='most_frequent')
df['Embarked'] = imp.fit_transform(df)
df["Embarked"].isnull().sum()
```
**Age**
En este caso crearemos una matriz que contenga nรบmeros aleatorios, que se calculen en funciรณn del valor de la media de la edad y la desviaciรณn estรกndar.
```
df.Age.hist()
mean = df["Age"].mean()
std = df["Age"].std()
is_null = df["Age"].isnull().sum()
# compute random numbers between the mean, std and is_null
rand_age = np.random.randint(mean - std, mean + std, size = is_null)
# fill NaN values in Age column with random values generated
age_slice = df["Age"].copy()
age_slice[np.isnan(age_slice)] = rand_age
df["Age"] = age_slice
df["Age"] = df["Age"].astype(int)
df["Age"].isnull().sum()
```
**Cabin**
```
df.Cabin.unique()
```
Vemos que la variable `Cabin` empieza por una letra que, investigando, representa la cubierta en la que se alojaban los pasajeros. Como puede ser interesante, podemos quedarnos solo con la letra y rellenar con otra letra inventada los valores que faltan para quitarnos los nulos
<img src="../images/titanic_cutaway_diagram.png">
```
df['Cabin'] = df['Cabin'].fillna("U")
df['Deck'] = df['Cabin'].map(lambda x: x[0])
# sns.catplot("Survived", col="Deck", col_wrap=3,
# data=titanic[titanic.Deck != 'U'], kind="count")
sns.barplot(x="Deck", y="Survived", data=df, ci=None, order=['A', 'B', 'C', 'D', 'E', 'F', 'G', 'U'])
```
Ahora podemos borrar el feature `Cabin` ya que es redundante con `Deck`
```
# we can now drop the cabin feature
df = df.drop(['Cabin'], axis=1)
df["Deck"].isnull().sum()
```
## Detecciรณn de outliers
```
df.head()
df.describe()
sns.boxplot(x='Age', data=df)
sns.boxplot(x='Fare', data=df)
```
Como veรญamos en la teorรญa:
> Un valor atรญpico (outlier) es un valor de una variable muy distante a otras observaciones de la misma variable
- Errores en los instrumentos de medida
- Picos aleatorios en una variable
- La distribuciรณn tiene una cola muy โpesadaโ (heavily-tailed distribution)
- **Cuidado con hacer asunciones sobre la normalidad de la distribuciรณn**
```
df.Age.hist()
df.Fare.hist()
```
## Distintos รณrdenes de magnitud
```
df.head()
```
Las dos variables numรฉricas del dataset son `Age` y `Fare`. Ambas estรกn en distintos รณrdenes de magnitud, asรญ que vamos a normalizarlos
```
from sklearn.preprocessing import Normalizer
scaler = Normalizer(norm='l1')
ageAndFare = df[["Age", "Fare"]]
ageAndFare = scaler.fit_transform(ageAndFare)
ageAndFare = pd.DataFrame(ageAndFare, columns = ["age", "fare"])
df['NAge'] = ageAndFare[['age']]
df['NFare'] = ageAndFare[['fare']]
df.head()
```
## Datos categรณricos
```
df.head()
```
Tenemos como datos categรณricos: `Age` & `Deck`
```
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Deck'] = le.fit_transform(df['Deck'])
df.head()
le_sex = LabelEncoder()
df['Sex'] = le_sex.fit_transform(df['Sex'])
df.head()
```
## Selecciรณn de variables
```
df.head()
```
Primero eliminamos las variables de identificadores, ya que no aportan nada al modelo
```
df.drop(['PassengerId', 'Name', 'Ticket', 'Fare', 'Age'], 1, inplace =True)
df.head()
sns.heatmap(df.corr(), annot=True, cbar=True)
```
# Entrenando los modelos
```
from sklearn.model_selection import train_test_split
X = df.drop("Survived", axis=1)
y = df["Survived"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
```
## Logistic Regression
```
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print(accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
from sklearn.model_selection import cross_val_score
logreg = LogisticRegression(solver='liblinear')
scores = cross_val_score(logreg, X_train, y_train, cv=10, scoring = "accuracy")
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard Deviation:", scores.std())
```
## Naรฏve Bayes
```
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, confusion_matrix
gaussian = GaussianNB()
gaussian.fit(X_train, y_train)
y_pred = gaussian.predict(X_test)
print(accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
from sklearn.model_selection import cross_val_score
gaussian = GaussianNB()
scores = cross_val_score(gaussian, X_train, y_train, cv=10, scoring = "accuracy")
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard Deviation:", scores.std())
```
### Decision tree
```
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, y_train)
y_pred = decision_tree.predict(X_test)
print(accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
from sklearn.model_selection import cross_val_score
decision_tree = DecisionTreeClassifier()
scores = cross_val_score(decision_tree, X_train, y_train, cv=10, scoring = "accuracy")
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard Deviation:", scores.std())
```
## Random Forest
```
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, y_train)
y_pred = random_forest.predict(X_test)
print(accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
from sklearn.model_selection import cross_val_score
rf = RandomForestClassifier(n_estimators=100)
scores = cross_val_score(rf, X_train, y_train, cv=10, scoring = "accuracy")
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard Deviation:", scores.std())
importances = pd.DataFrame({'feature':X_train.columns,'importance':np.round(random_forest.feature_importances_,3)})
importances = importances.sort_values('importance',ascending=False).set_index('feature')
importances.head(15)
importances.plot.bar()
```
### Support Vector Machine
```
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import accuracy_score, confusion_matrix
linear_svc = LinearSVC(max_iter=1000000)
linear_svc.fit(X_train, y_train)
y_pred = linear_svc.predict(X_test)
print(accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
from sklearn.model_selection import cross_val_score
linear_svc = LinearSVC()
scores = cross_val_score(linear_svc, X_train, y_train, cv=10, scoring = "accuracy")
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard Deviation:", scores.std())
```
## K Nearest Neighbor
```
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
from sklearn.model_selection import cross_val_score
knn = KNeighborsClassifier(n_neighbors = 3)
scores = cross_val_score(knn, X_train, y_train, cv=10, scoring = "accuracy")
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard Deviation:", scores.std())
# experimenting with different n values
k_range = list(range(1,26))
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(accuracy_score(y_test, y_pred))
plt.plot(k_range, scores)
plt.xlabel('Value of k for KNN')
plt.ylabel('Accuracy Score')
plt.title('Accuracy Scores for Values of k of k-Nearest-Neighbors')
plt.show()
```
| github_jupyter |
# Getting started with Azure ML Data Prep SDK
Copyright (c) Microsoft Corporation. All rights reserved.<br>
Licensed under the MIT License.
Wonder how you can make the most of the Azure ML Data Prep SDK? In this "Getting Started" guide, we'll demonstrate how to do your normal data wrangling with this SDK and showcase a few highlights that make this SDK shine. Using a sample of this [Kaggle crime dataset](https://www.kaggle.com/currie32/crimes-in-chicago/home) as an example, we'll cover how to:
* [Read in data](#Read)
* [Profile your data](#Profile)
* [Append rows](#Append)
* [Apply common data science transforms](#Data-science-transforms)
* [Summarize](#Summarize)
* [Join](#Join)
* [Filter](#Filter)
* [Replace](#Replace)
* [Consume your cleaned dataset](#Consume)
* [Explore advanced features](#Explore)
```
from IPython.display import display
from os import path
from tempfile import mkdtemp
import pandas as pd
import azureml.dataprep as dprep
# Paths for datasets
file_crime_dirty = '../../data/crime-dirty.csv'
file_crime_spring = '../../data/crime-spring.csv'
file_crime_winter = '../../data/crime-winter.csv'
file_aldermen = '../../data/chicago-aldermen-2015.csv'
# Seed
RAND_SEED = 7251
```
<a id="Read"></a>
## Read in data
Azure ML Data Prep supports many different file reading formats (i.e. CSV, Excel, Parquet) and the ability to infer column types automatically. To see how powerful the `auto_read_file` capability is, let's take a peek at the `dirty-crime.csv`:
```
dprep.read_csv(path=file_crime_dirty).head(7)
```
A common occurrence in many datasets is to have a column of values with commas; in our case, the last column represents location in the form of longitude-latitude pair. The default CSV reader interprets this comma as a delimiter and thus splits the data into two columns. Furthermore, it incorrectly reads in the header as the column name. Normally, we would need to `skip` the header and specify the delimiter as `|`, but our `auto_read_file` eliminates that work:
```
crime_dirty = dprep.auto_read_file(path=file_crime_dirty)
crime_dirty.head(5)
```
__Advanced features:__ if you'd like to specify the file type and adjust how you want to read files in, you can see the list of our specialized file readers and how to use them [here](../../how-to-guides/data-ingestion.ipynb).
<a id="Profile"></a>
## Profile your data
Let's understand what our data looks like. Azure ML Data Prep facilitates this process by offering data profiles that help us glimpse into column types and column summary statistics. Notice that our auto file reader automatically guessed the column type:
```
crime_dirty.get_profile()
```
<a id="Append"></a>
## Append rows
What if your data is split across multiple files? We support the ability to append multiple datasets column-wise and row-wise. Here, we demonstrate how you can coalesce datasets row-wise:
```
# Datasets with the same schema as crime_dirty
crime_winter = dprep.auto_read_file(path=file_crime_winter)
crime_spring = dprep.auto_read_file(path=file_crime_spring)
crime = (crime_dirty.append_rows(dataflows=[crime_winter, crime_spring]))
crime.take_sample(probability=0.25, seed=RAND_SEED).head(5)
```
__Advanced features:__ you can learn how to append column-wise and how to deal with appending data with different schemas [here](../../how-to-guides/append-columns-and-rows.ipynb).
<a id="Data-science-transforms"></a>
## Apply common data science transforms
Azure ML Data Prep supports almost all common data science transforms found in other industry-standard data science libraries. Here, we'll explore the ability to `summarize`, `join`, `filter`, and `replace`.
__Advanced features:__
* We also provide "smart" transforms not found in pandas that use machine learning to [derive new columns](../../how-to-guides/derive-column-by-example.ipynb), [split columns](../../how-to-guides/split-column-by-example.ipynb), and [fuzzy grouping](../../how-to-guides/fuzzy-group.ipynb).
* Finally, we also help featurize your dataset to prepare it for machine learning; learn more about our featurizers like [one-hot encoder](../../how-to-guides/one-hot-encoder.ipynb), [label encoder](../../how-to-guides/label-encoder.ipynb), [min-max scaler](../../how-to-guides/min-max-scaler.ipynb), and [random (train-test) split](../../how-to-guides/random-split.ipynb).
* Our complete list of example Notebooks for transforms can be found in our [How-to Guides](../../how-to-guides).
<a id="Summarize"></a>
### Summarize
Let's see which wards had the most crimes in our sample dataset:
```
crime_summary = (crime
.summarize(
summary_columns=[
dprep.SummaryColumnsValue(
column_id='ID',
summary_column_name='total_ward_crimes',
summary_function=dprep.SummaryFunction.COUNT
)
],
group_by_columns=['Ward']
)
)
(crime_summary
.sort(sort_order=[('total_ward_crimes', True)])
.head(5)
)
```
<a id="Join"></a>
### Join
Let's annotate each observation with more information about the ward where the crime occurred. Let's do so by joining `crime` with a dataset which lists the current aldermen for each ward:
```
aldermen = dprep.auto_read_file(path=file_aldermen)
aldermen.head(5)
crime.join(
left_dataflow=crime,
right_dataflow=aldermen,
join_key_pairs=[
('Ward', 'Ward')
]
).head(5)
```
__Advanced features:__ [Learn more](../../how-to-guides/join.ipynb) about how you can do all variants of `join`, like inner-, left-, right-, anti-, and semi-joins.
<a id="Filter"></a>
### Filter
Let's look at theft crimes:
```
theft = crime.filter(crime['Primary Type'] == 'THEFT')
theft.head(5)
```
<a id="Replace"></a>
### Replace
Notice that our `theft` dataset has empty strings in column `Location`. Let's replace those with a missing value:
```
theft_replaced = (theft
.replace_na(
columns=['Location'],
use_empty_string_as_na=True
)
)
theft_replaced.head(5)
```
__Advanced features:__ [Learn more](../../how-to-guides/replace-fill-error.ipynb) about more advanced `replace` and `fill` capabilities.
<a id="Consume"></a>
## Consume your cleaned dataset
Azure ML Data Prep allows you to "choose your own adventure" once you're done wrangling. You can:
1. Write to a pandas dataframe
2. Execute on Spark
3. Consume directly in Azure Machine Learning models
In this quickstart guide, we'll show how you can export to a pandas dataframe.
__Advanced features:__
* One of the beautiful features of Azure ML Data Prep is that you only need to write your code once and choose whether to scale up or out.
* You can directly consume your new DataFlow in model builders like Azure Machine Learning's [automated machine learning](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/dataprep/auto-ml-dataprep.ipynb).
```
theft_replaced.to_pandas_dataframe()
```
<a id="Explore"></a>
## Explore advanced features
Congratulations on finishing your introduction to the Azure ML Data Prep SDK! If you'd like more detailed tutorials on how to construct machine learning datasets or dive deeper into all of its functionality, you can find more information in our detailed notebooks [here](https://github.com/Microsoft/PendletonDocs). There, we cover topics including how to:
* [Cache your Dataflow to speed up your iterations](../../how-to-guides/cache.ipynb)
* [Add your custom Python transforms](../../how-to-guides/custom-python-transforms.ipynb)
* [Impute missing values](../../how-to-guides/impute-missing-values.ipynb)
* [Sample your data](../../how-to-guides/subsetting-sampling.ipynb)
* [Reference and link between Dataflows](../../how-to-guides/join.ipynb)
| github_jupyter |
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import matplotlib as mpl
from importlib import reload
import IPython
mpl.rcParams['lines.linewidth'] = 0.25
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.linewidth'] = 0.25
```
Set up experiment directory and settings
```
import torch, argparse, os, shutil, inspect, json, numpy
import netdissect
from netdissect.easydict import EasyDict
from netdissect import experiment
from netdissect.experiment import resfile
from netdissect import pbar, nethook, renormalize, parallelfolder, pidfile
from netdissect import upsample, tally, imgviz, imgsave, bargraph, show
# choices are alexnet, vgg16, or resnet152.
args = EasyDict(model='vgg16', dataset='places', seg='netpqc', layer=None)
resdir = 'results/%s-%s-%s' % (args.model, args.dataset, args.seg)
experiment.set_result_dir(resdir)
```
load classifier model and dataset
```
model = experiment.load_model(args)
layername = experiment.instrumented_layername(args)
model.retain_layer(layername)
dataset = experiment.load_dataset(args)
upfn = experiment.make_upfn(args, dataset, model, layername)
sample_size = len(dataset)
print('Inspecting layer %s of model %s on %s' % (layername, args.model, args.dataset))
# Classifier labels
from urllib.request import urlopen
from netdissect import renormalize
percent_level=0.995
classlabels = dataset.classes
renorm = renormalize.renormalizer(dataset, mode='zc')
pbar.descnext('rq')
def compute_samples(batch, *args):
image_batch = batch.cuda()
_ = model(image_batch)
acts = model.retained_layer(layername)
hacts = upfn(acts)
return hacts.permute(0, 2, 3, 1).contiguous().view(-1, acts.shape[1])
rq = tally.tally_quantile(compute_samples, dataset,
sample_size=sample_size,
r=8192,
num_workers=100,
pin_memory=True,
cachefile=resfile('rq.npz'))
from netdissect import imgviz
iv = imgviz.ImageVisualizer((100, 100), source=dataset, quantiles=rq, level=rq.quantiles(percent_level))
model.retain_layer(layername)
image_index = 0
out = model(dataset[image_index][0][None,...].cuda())
print('gt', classlabels[dataset[image_index][1]])
print('pred', classlabels[out.max(1)[1][0]])
display(renormalize.as_image(dataset[image_index][0], source=dataset))
model.retained_layer(layername).shape
target_class = 'bedroom'
# target_class_id =
for class_id, classlabel in enumerate(classlabels):
if classlabel == target_class:
target_class_id = class_id
print(target_class_id, classlabel)
break
num_images = 1000
image_index = 0
good_indices = []
filtering_source_class = False
while (True):
gt_label = classlabels[dataset.images[image_index][1]]
if filtering_source_class and gt_label != 'ski_resort':
image_index += 1
continue
out = model(dataset[image_index][0][None,...].cuda())
pred_label = classlabels[out.max(1)[1][0]]
if gt_label != target_class and pred_label != target_class:
good_indices.append(image_index)
else:
print('image {:d} gt {:s} pred {:s}'.format(image_index, gt_label, pred_label))
image_index += 1
if len(good_indices) == num_images:
print('get {:d} images from {:d} candidates'.format(num_images, image_index))
break
import json, urllib.request
import json, urllib.request
unit_names = json.load(urllib.request.urlopen('http://dissect.csail.mit.edu/results/vgg16-places-netpqc-conv5_3-10/report.json'))
data = json.load(urllib.request.urlopen('http://dissect.csail.mit.edu/results/vgg16-places-netpqc-conv5_3-10/ttv_unit_ablation.json'))
units = data[target_class]
unit_ids = []
unit_acc = []
for unit in units:
unit_id = unit['unit']
acc = unit['val_acc']
unit_ids.append(unit_id)
unit_acc.append(acc)
label = unit_names['units'][unit_id]['label']
print(unit_id, acc, label, acts_mean_average[unit_id])
dataset.images
print(unit_ids)
print(acts_mean)
for good_index in good_indices[:10]:
result_path = os.path.join(results_dir, 'image_{:d}_target_{:s}.pkl'.format(good_index, target_class))
data = pickle_load(result_path)
image_id = data['image_id']
target_id = data['target_id']
ori_image = data['ori']
adv_image = data['adv']
image_ori = dataset[image_id][0]
out = model(image_ori[None,...].cuda())
acts_ori = model.retained_layer(layername).cpu()
adv = renormalize.as_tensor(adv_image, source='pt', mode='imagenet')[None,...]
pred_adv = model(adv.cuda())
acts_adv = model.retained_layer(layername).cpu()
acts_mean = (acts_adv - acts_ori).mean(dim=(2, 3)).numpy()[0]
diff_image = adv_image - ori_image
diff_image = diff_image/abs(diff_image).max()+0.5
img = renormalize.as_image(diff_image, source='pt')
# all_image = np.concatenate([ori_image, adv_image, diff_image], axis=1)
display(renormalize.as_image(ori_image, source='pt'))
display(renormalize.as_image(adv_image, source='pt'))
display(renormalize.as_image(diff_image, source='pt'))
# for u_idx, u in enumerate(unit_ids):
# print(u_idx, u)
display(show.blocks(
[[[
'unit {:03d} vac drop {:.3f} diff_mean {:.3f}'.format(u, unit_acc[u_idx], acts_mean_average[u]),
'{:s} {:3.3f} diff = {:.3f}'.format(unit_names['units'][u]['label'], unit_names['units'][u]['iou'], acts_mean[u]),
[iv.masked_image(image_ori, acts_ori, (0, u))],
[iv.heatmap(acts_ori, (0, u), mode='nearest')],
[iv.masked_image(adv, acts_adv, (0, u))],
[iv.heatmap(acts_adv, (0, u), mode='nearest')],
]
for u_idx, u in enumerate(unit_ids)]
],
))
json.dump(good_indices, open('ski_resort_to_bedroom.json', 'w'))
import foolbox
import torch
import torchvision.models as models
import numpy as np
# import cv2
print(foolbox.__version__)
from foolbox.criteria import TargetClass, TargetClassProbability
import numpy as np
import os
import pickle
from netdissect import pbar
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def pickle_load(file_name):
data = None
with open(file_name, 'rb') as f:
data = pickle.load(f)
return data
def pickle_save(file_name, data):
with open(file_name, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
# test_stop = 1
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) #0.475, 0.441, 0.408
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
fmodel = foolbox.models.PyTorchModel(model, bounds=(0, 1), num_classes=365, preprocessing=(mean, std))
results_dir = 'results/adv/vgg16/{:s}_images'.format(target_class)
print(results_dir)
mkdir(results_dir)
print('target id {:d}, class {:s}'.format(target_class_id, target_class))
for good_index in pbar(good_indices):
result_path = os.path.join(results_dir, 'image_{:d}_target_{:s}.pkl'.format(good_index, target_class))
if os.path.isfile(result_path):
continue
image = dataset[good_index][0]
image = renormalize.as_tensor(image, source=dataset, mode='pt').numpy()
pred = np.argmax(fmodel.forward_one(image))
# print('predicted class', pred, classlabels[pred])
attack = foolbox.attacks.CarliniWagnerL2Attack(fmodel, criterion=TargetClass(target_class_id))
adversarial = attack(image, pred)
adv_label = np.argmax(fmodel.forward_one(adversarial))
# print('adversarial class', adv_label, classlabels[adv_label])
ori_image = torch.from_numpy(image).float()
adv_image = torch.from_numpy(adversarial).float()
pickle_save(result_path, {'image_id': good_index, 'target_id': target_class_id, 'ori': ori_image, 'adv': adv_image})
if good_index % 50 == 0:
print('process {:d}/{:d}'.format(good_index, len(good_indices)))
print('predicted class', pred, classlabels[pred])
print('adversarial class', adv_label, classlabels[adv_label])
# if good_index +1 >= test_stop:
# break
```
visualize activations for single layer of single image
```
import pickle
import numpy as np
def pickle_load(file_name):
data = None
with open(file_name, 'rb') as f:
data = pickle.load(f)
return data
# loading results
results_dir = 'results/adv/vgg16/{:s}_images'.format(target_class)
os.makedirs(results_dir, exist_ok=True)
# test_stop = 70
# target_class = 'ski_resort'
acts_mean_abs_all = []
for good_index in good_indices:
result_path = os.path.join(results_dir, 'image_{:d}_target_{:s}.pkl'.format(good_index, target_class))
data = pickle_load(result_path)
image_id = data['image_id']
target_id = data['target_id']
ori_image = data['ori']
adv_image = data['adv']
pred_ori = model(dataset[image_id][0][None,...].cuda())
image_ori = dataset[image_id][0]
# out = model(image_ori[None,...].cuda())
acts_ori = model.retained_layer(layername).cpu()
adv = renormalize.as_tensor(adv_image, source='pt', mode='imagenet')[None,...]
pred_adv = model(adv.cuda())
acts_adv = model.retained_layer(layername).cpu()
acts_mean_abs = (acts_adv - acts_ori).abs().mean(dim=(2, 3)).numpy()[0]
acts_mean_abs_all.append(acts_mean_abs[..., np.newaxis])
# if good_index >= test_stop:
# break
acts_mean_abs_all = np.concatenate(acts_mean_abs_all, axis=1)
acts_mean_average = np.mean(acts_mean_abs_all, axis=1)
sort_ids = np.argsort(acts_mean_average)[::-1][:10]
print(acts_mean_average[sort_ids])
print(sort_ids)
def Diff(li1, li2):
return (list(set(li1) - set(li2)))
print(acts_mean_average[unit_ids])
print(unit_ids)
print(np.mean(acts_mean_average))
print(np.mean(acts_mean_average[unit_ids]))
# print(np.mean(acts_mean_average[unit_ids]))
remain_ids = Diff(range(512), unit_ids)
print(np.mean(acts_mean_average[remain_ids]))
_ = model(dataset[image_index][0][None,...].cuda())
image_ori = dataset[image_index][0]
acts_ori = model.retained_layer(layername).cpu()
display(show.blocks(
[[['unit {0:03d}'.format(u), '{:s} {:f}'.format(data['images'][u]['label'], data['images'][u]['iou']),
[iv.masked_image(image_ori, acts_ori, (0, u))],
[iv.heatmap(acts_ori, (0, u), mode='nearest')]]
for u in range(min(acts.shape[1], 32)) if data['images'][u]['iou'] > iou_threshold]
],
))
```
## Perform adversarial attacks
```
import foolbox
import torch
import torchvision.models as models
import numpy as np
import cv2
print(foolbox.__version__)
```
## Adversarial attack a pre-trained PyTorch model
```
%%time
from foolbox.criteria import TargetClass, TargetClassProbability
image_index=0
label = dataset[image_index][1]
image = dataset[image_index][0]
image = renormalize.as_tensor(image, source=dataset, mode='pt').numpy()
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) #0.475, 0.441, 0.408
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
fmodel = foolbox.models.PyTorchModel(model, bounds=(0, 1), num_classes=365, preprocessing=(mean, std))
print(image.max(), image.min(), image.shape, image.dtype)
print('label', label, classlabels[label])
pred = np.argmax(fmodel.forward_one(image))
print('predicted class', pred, classlabels[pred])
# attack = foolbox.attacks.DeepFoolAttack(fmodel)#, criterion=TargetClass(100)) #LBFGSAttack FGSM
attack = foolbox.attacks.CarliniWagnerL2Attack(fmodel, criterion=TargetClass(100)) #LBFGSAttack FGSM
adversarial = attack(image, pred)
adv_label = np.argmax(fmodel.forward_one(adversarial))
print('adversarial class', adv_label, classlabels[adv_label])
# visualize results
from PIL import Image
ori_image = torch.from_numpy(image).float()
adv_image = torch.from_numpy(adversarial).float()
diff_image = adv_image - ori_image
diff_image = diff_image/abs(diff_image).max()*0.5+0.5
img = renormalize.as_image(diff_image, source='pt')
display(renormalize.as_image(ori_image, source='pt'))
display(renormalize.as_image(adv_image, source='pt'))
display(renormalize.as_image(diff_image, source='pt'))
from netdissect import imgviz
adv = renormalize.as_tensor(adv_image, source='pt', mode='imagenet')[None,...]
_ = model(adv.cuda())
acts_adv = model.retained_layer(layername).cpu()
iou_threshold = 0.025
display_units = 32
sort_method = 'diff_p'
num_units = acts_adv.shape[1]
if sort_method is 'id':
sort_ids = range(min(num_units, display_units))
if sort_method is 'iou':
ious = [data['images'][i]['iou'] for i in range(num_units)]
sort_ids = np.argsort(ious)[::-1][:display_units]
if sort_method is 'diff_n':
# acts_mean_abs = (acts_adv - acts_ori).abs().mean(dim=(2, 3)).numpy()[0]
acts_mean = (acts_adv - acts_ori).mean(dim=(2, 3)).numpy()[0]
sort_ids = np.argsort(acts_mean)[:display_units]
if sort_method is 'diff_p':
acts_mean = (acts_adv - acts_ori).mean(dim=(2, 3)).numpy()[0]
sort_ids = np.argsort(acts_mean)[::-1][:display_units]
# print(ious[sort_ids[0]])
print(sort_ids)
display(show.blocks(
[[[
'unit {0:03d}'.format(u),
'{:s} {:3.3f} diff = {:3.3f}'.format(data['images'][u]['label'],
data['images'][u]['iou'], acts_mean[u]),
[iv.masked_image(image_ori, acts_ori, (0, u))],
[iv.heatmap(acts_ori, (0, u), mode='nearest')],
[iv.masked_image(adv, acts_adv, (0, u))],
[iv.heatmap(acts_adv, (0, u), mode='nearest')],
]
for u in sort_ids if data['images'][u]['iou'] > iou_threshold]
],
))
display(show.blocks(
[[[
'unit {0:03d}'.format(u),
'{:s} {:3.3f} diff = {:3.3f}'.format(data['images'][u]['label'],
data['images'][u]['iou'], acts_mean[u]),
[iv.masked_image(image_ori, acts_ori, (0, u))],
[iv.heatmap(acts_ori, (0, u), mode='nearest')],
[iv.masked_image(adv, acts_adv, (0, u))],
[iv.heatmap(acts_adv, (0, u), mode='nearest')],
]
for u in sort_ids if data['images'][u]['iou'] > iou_threshold]
],
))
```
## Collect quantile statistics
First, unconditional quantiles over the activations. We will upsample them to 56x56 to match with segmentations later.
```
pbar.descnext('rq')
def compute_samples(batch, *args):
image_batch = batch.cuda()
_ = model(image_batch)
acts = model.retained_layer(layername)
hacts = upfn(acts)
return hacts.permute(0, 2, 3, 1).contiguous().view(-1, acts.shape[1])
rq = tally.tally_quantile(compute_samples, dataset,
sample_size=sample_size,
r=8192,
num_workers=100,
pin_memory=True,
cachefile=resfile('rq.npz'))
def compute_conditional_samples(batch, *args):
image_batch = batch.cuda()
_ = model(image_batch)
acts = model.retained_layer(layername)
seg = segmodel.segment_batch(renorm(image_batch), downsample=4)
hacts = upfn(acts)
return tally.conditional_samples(hacts, seg)
condq = tally.tally_conditional_quantile(compute_conditional_samples,
dataset,
batch_size=1, num_workers=30, pin_memory=True,
sample_size=sample_size, cachefile=resfile('condq.npz'))
```
## Visualize Units
Collect topk stats first.
```
pbar.descnext('topk')
def compute_image_max(batch, *args):
image_batch = batch.cuda()
_ = model(image_batch)
acts = model.retained_layer(layername)
acts = acts.view(acts.shape[0], acts.shape[1], -1)
acts = acts.max(2)[0]
return acts
topk = tally.tally_topk(compute_image_max, dataset, sample_size=sample_size,
batch_size=50, num_workers=30, pin_memory=True,
cachefile=resfile('topk.npz'))
```
Then we just need to run through and visualize the images.
```
pbar.descnext('unit_images')
iv = imgviz.ImageVisualizer((100, 100), source=dataset, quantiles=rq,
level=rq.quantiles(percent_level))
def compute_acts(image_batch):
image_batch = image_batch.cuda()
_ = model(image_batch)
acts_batch = model.retained_layer(layername)
return acts_batch
unit_images = iv.masked_images_for_topk(
compute_acts, dataset, topk, k=10, num_workers=30, pin_memory=True,
cachefile=resfile('top10images.npz'))
for u in [10, 20, 30, 40]:
print('unit %d' % u)
display(unit_images[u])
```
## Label Units
Collect 99.5 quantile stats.
```
# Use the segmodel for segmentations. With broden, we could use ground truth instead.
def compute_conditional_indicator(batch, *args):
image_batch = batch.cuda()
seg = segmodel.segment_batch(renorm(image_batch), downsample=4)
_ = model(image_batch)
acts = model.retained_layer(layername)
hacts = upfn(acts)
iacts = (hacts > level_at_995).float() # indicator
return tally.conditional_samples(iacts, seg)
pbar.descnext('condi995')
condi995 = tally.tally_conditional_mean(compute_conditional_indicator,
dataset, sample_size=sample_size,
num_workers=3, pin_memory=True,
cachefile=resfile('condi995.npz'))
iou_995 = tally.iou_from_conditional_indicator_mean(condi995)
unit_label_995 = [
(concept.item(), seglabels[concept], segcatlabels[concept], bestiou.item())
for (bestiou, concept) in zip(*iou_995.max(0))]
label_list = [label for concept, label, labelcat, iou in unit_label_995 if iou > 0.04]
display(IPython.display.SVG(experiment.graph_conceptlist(label_list)))
len(label_list)
from netdissect import experiment
labelcat_list = [labelcat for concept, label, labelcat, iou in unit_label_995 if iou > 0.04]
display(IPython.display.SVG(experiment.graph_conceptcatlist(labelcat_list)))
unit_label_adaptive
```
Show a few units with their labels
```
for u in [10, 20, 30, 40]:
print('unit %d, label %s, iou %.3f' % (u, unit_label_995[u][1], unit_label_995[u][3]))
display(unit_images[u])
```
Investigate secondary labels
```
if False:
seg_cor = experiment.load_concept_correlation(args, segmodel, seglabels)
sorted_unit_label_995 = sorted([(unit, concept, label, iou)
for unit, (concept, label, labelcat, iou) in enumerate(unit_label_995)
], key=lambda x: -x[-1])
if False:
count = 0
double_count = 0
multilabels = {}
for unit, concept, label, iou in sorted_unit_label_995:
if iou < 0.02:
continue
labels = [(label, iou)]
for c2 in iou_995[:, unit].sort(0, descending=True)[1]:
if c2 == concept or seg_cor[c2, concept] > 0:
continue
if iou_995[c2, unit] < 0.02:
break
labels.append((seglabels[c2], iou_995[c2, unit]))
break
multilabels[unit] = labels
count += 1
double_count += 1 if len(labels) > 1 else 0
print('unit %d: %s' % (unit, ', '.join(['%s: iou %.3f' % r for r in labels])))
if len(labels) > 1 and label == 'bed':
display(unit_images[unit])
print('%d doubles out of %d (%.2f)' % (double_count, count, float(double_count) / count))
```
# Adaptive labeling of units
using conditional quantiles and IQR
```
if False:
cutoff_candidates = 1 - torch.logspace(-3, math.log10(0.15), 50)
unit_quantile_zero = rq.normalize(torch.zeros(256))
unit_quantile_mask = cutoff_candidates[None,:] <= unit_quantile_zero[:,None]
iqr_candidates = tally.iqr_from_conditional_quantile(condq, cutoff=cutoff_candidates)
iou_candidates = tally.iou_from_conditional_quantile(condq, cutoff=cutoff_candidates)
# Ignore records for which unit is zeroed
iqr_candidates[unit_quantile_mask[:,None,:].expand(iqr_candidates.shape)] = 0
best_adaptive_iqr, best_iqr_choice = iqr_candidates.max(2)
# Obtain the iou at the max-iqr threshold
iou_at_best_iqr = iou_candidates.gather(2, best_iqr_choice[...,None])[...,0]
# Ignore records for which the max-iqr is achieved at 50-50 (typically "painted", or "building")
masked_iou_at_best_iqr = iou_at_best_iqr.clone()
# masked_iou_at_best_iqr[best_iqr_choice == len(cutoff_candidates) - 1] = 0.0
masked_iou_at_best_iqr
best_adaptive_iou, best_adaptive_match = masked_iou_at_best_iqr.max(1)
for u in range(256):
print(u, best_adaptive_match[u].item(),
seglabels[best_adaptive_match[u]],
best_adaptive_iou[u].item(),
1 - cutoff_candidates[best_iqr_choice[u, best_adaptive_match[u]]].item())
print(unit_label_995[u])
display(unit_images[u])
# Get the best entity based on iqr
cutoff_candidates = 1 - torch.logspace(-3, math.log10(0.5), 50)
unit_quantile_zero = rq.normalize(torch.zeros(256))
unit_quantile_mask = cutoff_candidates[None,:] <= unit_quantile_zero[:,None]
iqr_candidates = tally.iqr_from_conditional_quantile(condq, cutoff=cutoff_candidates)
iou_candidates = tally.iou_from_conditional_quantile(condq, cutoff=cutoff_candidates)
best_adaptive_iqr, best_iqr_choice = iqr_candidates.max(2)
# This is needed for good results:
unmasked_iqr_candidates = iqr_candidates.clone()
# large_concepts = (best_iqr_choice == len(cutoff_candidates) - 1)
# iqr_candidates[large_concepts[:,:,None].expand(iqr_candidates.shape)] = 0
# Also ignore thresholds past zero relu threshold
iqr_candidates[unit_quantile_mask[:,None,:].expand(iqr_candidates.shape)] = 0
best_adaptive_iqr, best_iqr_choice = iqr_candidates.max(2)
# Get rid of cases where the max iqr is at the lowest threshold
if True:
max_at_low_quantile_mask = (unit_quantile_mask[:,None,:]
.expand(best_iqr_choice.shape + (unit_quantile_mask.shape[1],))
.gather(2, (best_iqr_choice[:,:,None] + 1).clamp(0, 49)))[...,0]
iqr_candidates[max_at_low_quantile_mask[...,None].expand(iqr_candidates.shape)] = 0
best_adaptive_iqr, best_iqr_choice = iqr_candidates.max(2)
iqr_at_best_threshold = iqr_candidates.gather(2, best_iqr_choice[...,None])[...,0]
iou_at_best_iqr = iou_candidates.gather(2, best_iqr_choice[...,None])[...,0]
best_adaptive_iqr, best_adaptive_match = iqr_at_best_threshold.max(1)
best_adaptive_iou = iou_at_best_iqr.gather(1, best_adaptive_match[...,None])[...,0]
unit_label_adaptive = []
for u in range(256):
unit_label_adaptive.append((
best_adaptive_match[u].item(),
seglabels[best_adaptive_match[u]],
segcatlabels[best_adaptive_match[u]],
best_adaptive_iou[u].item()
))
if unit_label_995[u][1] == seglabels[best_adaptive_match[u]]:
continue
print('adaptive', u, best_adaptive_match[u].item(),
seglabels[best_adaptive_match[u]],
best_adaptive_iou[u].item(),
1 - cutoff_candidates[best_iqr_choice[u, best_adaptive_match[u]]].item())
print('fixed 99.5%', unit_label_995[u])
display(unit_images[u])
best_iqr_choice.shape
unit_quantile_mask.shape
max_at_low_quantile_mask = (unit_quantile_mask[:,None,:]
.expand(best_iqr_choice.shape + (unit_quantile_mask.shape[1],))
.gather(2, (best_iqr_choice[:,:,None] + 1).clamp(0, 49)))[...,0]
max_at_low_quantile_mask.shape
best_iqr_choice.shape
plt.plot(cutoff_candidates.numpy(), iqr_candidates[65,4].numpy(), linewidth=2)
plt.xlabel('quantile threshold for unit')
plt.ylabel('information quality ratio')
plt.plot(cutoff_candidates.numpy(), iqr_candidates[3, 2].numpy(), linewidth=2)
plt.xlabel('quantile threshold for unit')
plt.ylabel('information quality ratio')
plt.title('segments vs unit at various thresholds')
plt.plot(cutoff_candidates.numpy(), iqr_candidates[3, 12].numpy(), linewidth=2)
plt.xlabel('quantile threshold for unit 254')
plt.ylabel('information quality ratio')
plt.title('Signboard segments vs unit 254 at various thresholds')
from netdissect import experiment
labelcat_list = [labelcat for concept, label, labelcat, iou in unit_label_adaptive]# if iou > 0.02]
display(IPython.display.SVG(experiment.graph_conceptcatlist(labelcat_list)))
best_iqr_choice[22,1], cutoff_candidates[best_iqr_choice[22,1]]
unit_quantile_zero[22]
unit_quantile_zero = rq.normalize(torch.zeros(256))
cutoff_candidates[None,:] > unit_quantile_zero[:,None]
```
# Intervention experiment
Part 2.
# Linear Disciminant Analysis
LDA of concepts -> single class. This will give us a baseline.
```
focus_class = 'church-outdoor'
focus_classnum = classlabels.index(focus_class)
rcov_in_class = experiment.concept_covariance(
args, segmodel, seglabels, sample_size=5000,
filter_class=lambda x: x == focus_classnum,
cachefile=experiment.sharedfile('lda-%s/%s-rcov.npz' % (args.seg, focus_class)))
rcov_out_of_class = experiment.concept_covariance(
args, segmodel, seglabels, sample_size=5000,
filter_class=(lambda x: x != focus_classnum),
cachefile=experiment.sharedfile('lda-%s/%s-negate-rcov.npz' % (args.seg, focus_class)))
import copy
def rcov_scaled_to_unit_mean(rcov):
rcov = copy.copy(rcov)
scale = 1 / rcov._mean
scale[rcov._mean == 0] = 0
rcov._mean *= scale
rcov.cmom2 *= scale[:, None]
rcov.cmom2 *= scale[None, :]
return rcov
def rcov_scaled_to_unit_std(rcov):
rcov = copy.copy(rcov)
std = rcov.covariance().diag().sqrt()
scale = std.reciprocal()
scale[std == 0] = 0
rcov._mean *= scale
rcov.cmom2 *= scale[:, None]
rcov.cmom2 *= scale[None, :]
return rcov
# rcov_in_class = rcov_scaled_to_unit_std(rcov_in_class)
# rcov_out_of_class = rcov_scaled_to_unit_std(rcov_out_of_class)
from netdissect import lda
reload(lda)
trans = lda.lda_transform_from_covariances([rcov_in_class, rcov_out_of_class], shrinkage=0.1,
prior=[1/365.0, 364/365.0])
for c in trans[:,0].sort(0)[1][-20:].flip(0):
print(seglabels[c], c.item(), trans[c, 0].item(), rcov_in_class.mean()[c].item())
```
## Salience of concept to class
Here we compute the mutual information between each visual concept and each scene class.
(We binarize the visual concept by thresholding at some number of pixels in the image; then we compute mutual information between this binary variable and each scene category. For each scene-concept pair we choose the threshold that maximizes mutual innformation.)
Listed below are the top 3 visual concepts with highest mutual information to each class.
```
#salience = experiment.load_salience_matrix(args, segmodel, classlabels, seglabels)
salience = experiment.load_class_concept_correlation(args, segmodel, classlabels, seglabels)
for cls in [100, 200, 300]: # range(len(classlabels)):
print(classlabels[cls])
for mi, concept in list(zip(*salience[cls].sort(0)))[:-5-1:-1]:
print(mi.item(), concept.item(), seglabels[concept])
```
Here we print the salience information the other way: for each visual concept, we list the top scene categories with highest mutual information to that visual concept.
```
for concept in [5, 10, 15, 20, 25]:
print(seglabels[concept])
for mi, cls in list(zip(*salience[:,concept].sort(0)))[:-5-1:-1]:
print(mi.item(), cls.item(), classlabels[cls])
```
## Per-class topk
visualization over subsets
```
pbar.descnext('topk in each class')
def compute_image_max_per_class(batch, class_batch, index_batch, *args):
classes = class_batch.bincount().nonzero()
image_batch = batch.cuda()
_ = model(image_batch)
acts = model.retained_layer(layername)
acts = acts.view(acts.shape[0], acts.shape[1], -1)
acts = acts.max(2)[0].cpu()
for cls in classes:
mask = (class_batch == cls)
yield (cls.item(), acts[mask], index_batch[mask])
topk_perclass = tally.tally_conditional_topk(compute_image_max_per_class, dataset,
sample_size=sample_size,
batch_size=50, num_workers=30, pin_memory=True,
cachefile=resfile('topk_perclass.npz'))
```
# Visualization of class accuracy drop
Plotting per-class accuracy drop versus salience (mutual information) ordering
```
# Which classes are most salient to each concept?
def align_yaxis(ax1, ax2):
"""Align zeros of the two axes, zooming them out by same ratio"""
axes = (ax1, ax2)
extrema = [ax.get_ylim() for ax in axes]
tops = [extr[1] / (extr[1] - extr[0]) for extr in extrema]
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [list(reversed(l)) for l in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
b_new_t = extrema[0][0] + tot_span * (extrema[0][1] - extrema[0][0])
t_new_b = extrema[1][1] - tot_span * (extrema[1][1] - extrema[1][0])
axes[0].set_ylim(extrema[0][0], b_new_t)
axes[1].set_ylim(t_new_b, extrema[1][1])
def plot_intervention_classes(concept, acc_diff, count=None, title=None):
most_salient_classes = salience.sort(0)[1].flip(0)
# concept = seglabels.index('bed')
dpi = 100
# f, (a1, a0) = plt.subplots(2, 1, gridspec_kw = {'height_ratios':[1, 2]}, dpi=dpi)
f, a1 = plt.subplots(1, 1, dpi=dpi, figsize=(10, 5))
x = []
labels = []
mutual_info = []
accuracy_diff = []
for cls in most_salient_classes[:,concept][:count]:
# pbar.print(classlabels[cls], salience[cls, concept])
mutual_info.append(salience[cls, concept])
labels.append(classlabels[cls])
x.append(len(x))
accuracy_diff.append(acc_diff[cls])
a1.bar(x, mutual_info)
a1.set_ylabel('Concept-class mutual information')
a1.set_xlabel('Classes ordered by correlation with concept "%s"' % seglabels[concept])
if len(x) < 60:
a1.set_xticks(x)
a1.set_xticklabels([label.replace('_', ' ') for label in labels], rotation='vertical')
a2 = a1.twinx()
a2.plot(x, [-a for a in accuracy_diff], linewidth=2, color='orange')
a2.spines["right"].set_visible(True)
a2.set_ylabel('Class accuracy drop')
if title is None:
title = 'Effect of zeroing detector for %s' % (seglabels[concept])
a2.set_title(title)
align_yaxis(a2, a1)
plt.show()
```
# Ablation of single concept detectors.
Question: when we ablate a single unit, how does it affect accuracy of each output class?
```
pbar.descnext('baseline_acc')
baseline_accuracy = experiment.test_perclass_accuracy(model, dataset,
cachefile=resfile('acc_baseline.npy'))
pbar.print('baseline acc', baseline_accuracy.mean().item())
```
Above: recall which concepts are present and seem to have one or more units specific to that concept.
Below, pull out units to probe corresponding to the top 12 concepts.
```
ablation_size = 5
for label, group in experiment.get_top_label_unit_groups(unit_label_995,
size=ablation_size, num=5, min_iou=0.02):
concept = group[0][1]
pbar.descnext('test %s' % label)
ablation_accuracy = experiment.test_perclass_accuracy(model, dataset,
layername=layername,
ablated_units=[unit for unit, iou, concept in group],
cachefile=resfile('acc_ablate_%d_%s.npy' % (ablation_size, label)))
pbar.print('ablate %s units of %s(%d) acc %.3f %s' %
(len(group), label, concept, ablation_accuracy.mean().item(),
args.model) )
pbar.print(', '.join(['unit %d: iou %.3f' % (unit, iou)
for unit, concept, iou in group]))
for unit, _, _ in group:
print('unit %d: %s' % (unit, ', '.join(['%s: iou %.3f' % r for r in multilabels[unit]])))
unit = group[0][0]
display(unit_images[unit])
# Which classes are most damaged?
acc_diff = ablation_accuracy - baseline_accuracy
for cls in acc_diff.sort(0)[1][:5]:
pbar.print('%s(%d) (mi %.4f): acc %.2f -> %.2f' % (
classlabels[cls], cls,
salience[cls, concept],
baseline_accuracy[cls], ablation_accuracy[cls]))
# display(iv.masked_image_for_conditional_topk(compute_acts, dataset, topk_perclass, cls.item(), unit))
plot_intervention_classes(seglabels.index(label), acc_diff,
title='Effect of zeroing 5 units (%s %s detectors)' % (args.model, label))
plot_intervention_classes(seglabels.index(label), acc_diff,
count=50,
title='Effect of zeroing 5 units (%s %s detectors)' % (args.model, label))
pbar.print()
multilabels.keys()
```
# Ablation of single units.
Question: when we ablate a single unit, how does it affect accuracy of each output class?
```
top_iou_units = sorted([(unit, label, iou)
for unit, (concept, label, labelcat, iou) in enumerate(unit_label_995)],
key=lambda x: -x[-1])[:300]
for unit, label, iou in [r for r in top_iou_units if r[0] == 48]:
pbar.descnext('test unit %d' % unit)
ablation_accuracy = experiment.test_perclass_accuracy(model, dataset,
layername=layername,
ablated_units=[unit],
cachefile=resfile('acc_ablate_unit_%d.npy' % (unit)))
pbar.print('ablate unit %d (%s iou %.3f) acc %.3f %s' %
(unit, label, iou, ablation_accuracy.mean().item(),
args.model) )
display(unit_images[unit])
# Which classes are most damaged?
acc_diff = ablation_accuracy - baseline_accuracy
for cls in acc_diff.sort(0)[1][:10]:
pbar.print('%s(%d) (mi %.4f): acc %.2f -> %.2f' % (
classlabels[cls], cls,
salience[cls, concept],
baseline_accuracy[cls], ablation_accuracy[cls]))
display(iv.masked_image_for_conditional_topk(compute_acts, dataset, topk_perclass, cls.item(), unit))
plot_intervention_classes(seglabels.index(label), acc_diff,
title='Effect of zeroing unit %d (%s %s detector, iou %.3f)' % (unit, args.model, label, iou))
plot_intervention_classes(seglabels.index(label), acc_diff,
count=50,
title='Effect of zeroing unit %d (%s %s detector, iou %.3f)' % (unit, args.model, label, iou))
pbar.print()
```
## Load all single-unit ablation perclass accuracy matrix
```
single_unit_ablation_acc = torch.zeros(num_units, len(classlabels))
for unit in range(num_units):
single_unit_ablation_acc[unit] = experiment.test_perclass_accuracy(model, dataset,
layername=layername,
ablated_units=[unit],
cachefile=resfile('acc_ablate_unit_%d.npy' % (unit)))
ablation_delta = single_unit_ablation_acc - baseline_accuracy
ablation_delta.max(0)[0].mean(), ablation_delta.min(0)[0].mean()
```
# Focus on single discriminative class
```
focus_class = 'mosque-outdoor'
clsnum = dataset.classes.index(focus_class)
clsnum
```
Recall on church images is 41%.
```
baseline_accuracy[clsnum]
discrimination = experiment.load_lda_vector(focus_class, args, segmodel, classlabels, seglabels, shrinkage=0.1)
```
Here are the top concepts that are most salient to churches, just by mutual information.
```
for concept in discrimination.sort(0)[1].flip(0)[:20]:
print(seglabels[concept], discrimination[concept].item())
for unit in ablation_delta[:,clsnum].sort(0)[1][:10]:
concept, label, labelcat, iou = unit_label_995[unit]
damage = ablation_delta[unit, clsnum]
print('unit %d (%s, iou %.3f) causes damage %.3f' % (unit, label, iou, damage))
display(unit_images[unit])
display(iv.masked_image_for_conditional_topk(compute_acts, dataset, topk_perclass, clsnum, unit.item()))
unit_label_995
```
# Visualize units
```
def plot_twin(triples, count=None, title=None, dpi=100, barlabel=None, linelabel=None,
label_ticks=True, figsize=(10, 5)):
ordering = [i for t, i in sorted((t, i) for i, t in enumerate(triples))[::-1]]
x = []
labels = []
bars = []
lines = []
for i in ordering[:count]:
x.append(len(x))
bars.append(triples[i][0])
lines.append(triples[i][1])
labels.append(triples[i][2])
f, a1 = plt.subplots(1, 1, dpi=dpi, figsize=figsize)
f.patch.set_facecolor('white')
a1.bar(x, bars)
if barlabel is not None:
a1.set_ylabel(barlabel)
a1.set_xlabel('Ordered by %s' % barlabel)
if label_ticks:
a1.set_xticks(x)
a1.set_xticklabels([label.replace('_', ' ') for label in labels], rotation='vertical')
a2 = a1.twinx()
a2.plot(x, lines, linewidth=2, color='orange')
a2.spines["right"].set_visible(True)
if linelabel is not None:
a2.set_ylabel(linelabel)
if title:
a2.set_title(title)
align_yaxis(a2, a1)
plt.show()
focus_class = dataset.classes[28]
clsnum = dataset.classes.index(focus_class)
discrimination = experiment.load_lda_vector(focus_class, args, segmodel, classlabels, seglabels, shrinkage=0.1)
clsnum
def plot_intervention_units(clsnum, ablation_delta, unit_label_995, discrimination, count=None,
figsize=(20, 5), label_ticks=True):
triples = []
for unit in range(256):
# Bardata is salience of most-salient unit concept
# Linedata is damage done by the unit
# Method 1.
# matching_concepts = [iou_995[:, unit].max(0)[1]]
matching_concepts = (iou_995[:, unit] > 0.02).nonzero()[:,0]
if len(matching_concepts) == 0:
matching_concepts = [iou_995[:, unit].max(0)[1]]
# relevance, relevant_concept = (0.0, 0)
# else:
relevance, relevant_concept = max([(discrimination[c, 0], c) for c in matching_concepts])
relevance = relevance.item()
bar = relevance
label = '%s (%d)' % (seglabels[relevant_concept], unit)
line = -ablation_delta[unit, clsnum].item()
triples.append((bar, line, label))
plot_twin(triples, count=count, figsize=figsize, label_ticks=label_ticks,
title="Can we predict how much a unit will damage %s classification accuracy?" % focus_class,
barlabel="salience of unit concept to %s (bars)" % focus_class,
linelabel="damage to accuracy of %s (line)" % focus_class)
plot_intervention_units(clsnum, ablation_delta, unit_label_995, discrimination, count=256, figsize=(50, 5))
unit = 11
matching_concepts = (iou_995[:, unit] > 0.02).nonzero()[:,0]
relevance, relevant_concept = max([(discrimination[c, 0], c) for c in matching_concepts])
seglabels[relevant_concept], relevance
for clsnum in range(0, len(classlabels), 10):
focus_class = dataset.classes[clsnum]
print(focus_class)
# clsnum = dataset.classes.index(focus_class)
discrimination = experiment.load_lda_vector(focus_class, args, segmodel, classlabels, seglabels, shrinkage=0.1)
plot_intervention_units(clsnum, ablation_delta, unit_label_995, discrimination, count=256, label_ticks=False)
plot_intervention_units(clsnum, ablation_delta, unit_label_995, discrimination, count=80, label_ticks=True)
# Make a table of concepts that are most disciminative
discriminate_matrix = torch.zeros(len(classlabels), len(seglabels))
for clsnum in range(len(classlabels)):
focus_class = dataset.classes[clsnum]
d = experiment.load_lda_vector(focus_class, args, segmodel, classlabels, seglabels, shrinkage=0.1)
discriminate_matrix[clsnum] = d[:,0]
unit_damage_matrix = torch.zeros(len(classlabels), 256)
for unit in range(256):
ablation_accuracy = experiment.test_perclass_accuracy(model, dataset,
layername=layername,
ablated_units=[unit],
cachefile=resfile('acc_ablate_unit_%d.npy' % (unit)))
acc_diff = ablation_accuracy - baseline_accuracy
unit_damage_matrix[:,unit] = acc_diff
```
Finding:
On average, zeroing a unit that detects the most discriminative concept for a class (for the 153 classes for which there is a unit for the most discriminative concept) damages accuracy of classification of class by an average of 4.1%, whereas zeroing other units damages accuracy of that class only by an average of 0.05%.
```
# Idea: for each unit, salience maybe shold be given by the most salient high-iou concept detected by the unit.
relevant_units = []
all_other_units = []
counted_classes = 0
for clsnum in range(len(classlabels)):
segnum = discriminate_matrix[clsnum].max(0)[1]
# print('Most relevant to %s is %s' % (classlabels[clsnum], seglabels[segnum]))
units = [unit
for unit, (s, _, _, iou) in enumerate(unit_label_995)
if s == segnum
if iou > 0.03
]
if not len(units):
continue
counted_classes += 1
other_units = [u for u in range(256) if u not in units]
# print(', '.join(str(r) for r in units))
relevant_units.extend(unit_damage_matrix[clsnum, units].numpy().tolist())
all_other_units.extend(unit_damage_matrix[clsnum, other_units].numpy().tolist())
print('Counted %d classes' % counted_classes)
print('Of the %d most relevant units, average damage is %.3g' %
(len(relevant_units), torch.tensor(relevant_units).mean().item()))
print('Of the %d most other units, average damage is %.3g' %
(len(all_other_units), torch.tensor(all_other_units).mean().item()))
```
Second experiment: compare ablation of the most-relevant concept detectors, where the most-relevant concept is counted among only those concepts that exist.
```
relevant_units = []
all_other_units = []
iou_floor = 0.03
for clsnum in range(len(classlabels)):
# segnum = discriminate_matrix[clsnum].max(0)[1]
# even if there is not a unit for the most discriminate feature, find unit
# for the most discriminate feature for which there is a unit.
units = sorted([(-discriminate_matrix[clsnum, s], s, unit)
for unit, (s, _, _, iou) in enumerate(unit_label_995)
if iou > iou_floor
])
segnum = units[0][1]
units = [unit
for unit, (s, _, _, iou) in enumerate(unit_label_995)
if s == segnum
and iou > iou_floor
]
other_units = [u for u in range(256) if u not in units]
# print('Most relevant to %s is %s (%d units)' % (classlabels[clsnum], seglabels[segnum], len(units)))
# print(', '.join(str(r) for r in units))
relevant_units.extend(unit_damage_matrix[clsnum, units].numpy().tolist())
all_other_units.extend(unit_damage_matrix[clsnum, other_units].numpy().tolist())
print('Of the %d most relevant units, average damage is %.3g' %
(len(relevant_units), torch.tensor(relevant_units).mean().item()))
print('Of the %d most other units, average damage is %.3g' %
(len(all_other_units), torch.tensor(all_other_units).mean().item()))
print('Ratio %.3f' % (
torch.tensor(relevant_units).mean().item() / torch.tensor(all_other_units).mean().item()))
```
Third idea: for each class, order units according to the salience of the detected concept, and; and then average the impacts.
```
all_sorted_damage = torch.zeros(len(classlabels), 256)
all_unit_concepts = set(u[1] for u in unit_label_995)
for clsnum in range(len(classlabels)):
unit_sorter = sorted([(-discriminate_matrix[clsnum, s], s, unit)
for unit, (s, _, _, iou) in enumerate(unit_label_995)
])
unit_order = [u[-1] for u in unit_sorter]
unit_concept = [u[1] for u in unit_sorter]
sorted_damage = unit_damage_matrix[clsnum, unit_order]
# since units with the same concept could be listed in any order, average their contributionns
for s in all_unit_concepts:
sorted_damage[unit_concept == s] = sorted_damage[unit_concept == s].mean()
all_sorted_damage[clsnum] = sorted_damage
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(10, 5))
a1.bar(range(51), (-all_sorted_damage.mean(0).numpy()[:50] * 100).tolist() +
[-all_sorted_damage.mean(0).numpy()[50:].mean() * 100])
a1.set_title('Effect of removing an object detector unit on classification accuracy of a scene class')
a1.set_ylabel('Damage to classification accuracy of scene class\nwhen a single unit is zeroed, percent')
a1.set_xlabel('Units ordered by (LDA-determined) salience of detected object to the affected scene class')
a1.set_xticks([0, 9, 19, 29, 39, 50])
a1.set_xticklabels(['1', '10', '20', '30', '40', '>50'])
```
Adaptive case
```
all_sorted_damage = torch.zeros(len(classlabels), 256)
all_unit_concepts = set(u[1] for u in unit_label_adaptive)
for clsnum in range(len(classlabels)):
unit_sorter = sorted([(-discriminate_matrix[clsnum, s], s, unit)
for unit, (s, _, _, iou) in enumerate(unit_label_adaptive)
])
unit_order = [u[-1] for u in unit_sorter]
unit_concept = [u[1] for u in unit_sorter]
sorted_damage = unit_damage_matrix[clsnum, unit_order]
# since units with the same concept could be listed in any order, average their contributionns
for s in all_unit_concepts:
sorted_damage[unit_concept == s] = sorted_damage[unit_concept == s].mean()
all_sorted_damage[clsnum] = sorted_damage
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(10, 5))
a1.bar(range(51), (-all_sorted_damage.mean(0).numpy()[:50] * 100).tolist() +
[-all_sorted_damage.mean(0).numpy()[50:].mean() * 100])
a1.set_title('Effect of removing an object detector unit on classification accuracy of a scene class')
a1.set_ylabel('Damage to classification accuracy of scene class\nwhen a single unit is zeroed, percent')
a1.set_xlabel('Units ordered by (LDA-determined) salience of detected object to the affected scene class')
a1.set_xticks([0, 9, 19, 29, 39, 50])
a1.set_xticklabels(['1', '10', '20', '30', '40', '>50'])
```
Fourth idea: scatterplot. Salience rank of a unit on the x axis, and classification accuracy damage on the y axis.
```
all_sorted_damage = torch.zeros(len(classlabels), 256)
all_unit_concepts = set(u[1] for u in unit_label_995)
yvals = []
xvals = []
for clsnum in range(len(classlabels)):
unit_sorter = sorted([(-discriminate_matrix[clsnum, s], s, unit)
for unit, (s, _, _, iou) in enumerate(unit_label_995)
])
unit_order = [u[-1] for u in unit_sorter]
unit_concept = [u[1] for u in unit_sorter]
sorted_damage = unit_damage_matrix[clsnum, unit_order]
rank_order = torch.arange(len(sorted_damage), dtype=torch.float)
# since units with the same concept could be listed in any order, average their contributionns
for s in all_unit_concepts:
rank_order[unit_concept == s] = rank_order[unit_concept == s].mean()
xvals.extend(rank_order.numpy().tolist())
yvals.extend(sorted_damage.numpy().tolist())
all_sorted_damage[clsnum] = sorted_damage
import random
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(30, 5))
a1.scatter([x + random.random() for x in xvals],
[y + random.random() * 0.01 for y in yvals], s=0.5, alpha=0.2)
a1.set_xlabel('Units ordered by salience of detected object to the affected scene class using LDA')
[(u, iou) for u, (s, label, labelcat, iou) in enumerate(unit_label_995) if label == 'bed']
```
Fifth idea, similar to the "Effect" graph, but here the x axis is purely determined by LDA and has nothing to do with the network being teested.
```
total_sorted_damage = torch.zeros(len(classlabels), len(seglabels))
count_sorted_damage = torch.zeros(len(classlabels), len(seglabels))
for clsnum in range(len(classlabels)):
dscore, drank = (-discriminate_matrix[clsnum]).sort(0)
zerorank = (dscore == 0).nonzero()[:, 0].numpy().tolist()
rankmap = {s.item(): zerorank if r in zerorank else [r] for r, s in enumerate(drank)}
for u, (s, label, labelcat, iou) in enumerate(unit_label_995):
r = rankmap[s]
damage = unit_damage_matrix[clsnum, u]
total_sorted_damage[clsnum, r] += (damage / len(r))
count_sorted_damage[clsnum, r] += (1.0 / len(r))
avg_sorted_damage = (total_sorted_damage.sum(0) / count_sorted_damage.sum(0))
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(10, 5))
a1.bar(range(len(avg_sorted_damage)), -avg_sorted_damage.numpy() * 100)
f.show()
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(10, 5))
a1.bar(range(len(count_sorted_damage.sum(0))), count_sorted_damage.sum(0).numpy())
f.show()
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(10, 5))
a1.bar(range(51), (-avg_sorted_damage.numpy()[:50] * 100).tolist() +
[-avg_sorted_damage.numpy()[50:].mean() * 100])
a1.set_ylabel('Damage to classification accuracy of scene class\nwhen a single unit is zeroed, percent')
a1.set_xlabel('Which object detector is zeroed, identified by dissection, ordered by LDA salience of the object to the scene')
a1.set_xticks([0, 9, 19, 29, 39, 50])
a1.set_xticklabels(['1', '10', '20', '30', '40', '>50'])
```
Sixth idea, put average rank by LDA on the y axis, and put rank by intervention impact on the x axis.
```
lda_rank_for_unit = torch.zeros(len(classlabels), 256)
lda_count_for_unit = torch.zeros(len(classlabels), 256)
for clsnum in range(len(classlabels)):
damage, damrank = (unit_damage_matrix[clsnum]).sort(0)
rankmap = {damrank[r].item(): (damage == d).nonzero()[:, 0].numpy().tolist() for r, d in enumerate(damage)}
dscore, drank = (-discriminate_matrix[clsnum]).sort(0)
# TODO: handle zero rank
# zerorank = (dscore == 0).nonzero()[:, 0].numpy().mean()
srankmap = {s.item(): r for r, s in enumerate(drank)}
for u, (s, label, labelcat, iou) in enumerate(unit_label_995):
ur = rankmap[u]
sr = srankmap[s]
damage = unit_damage_matrix[clsnum, u]
lda_rank_for_unit[clsnum, ur] += (sr / len(ur))
lda_count_for_unit[clsnum, ur] += (1.0 / len(ur))
avg_lda_rank_for_unit = (lda_rank_for_unit.sum(0) / lda_count_for_unit.sum(0))
avg_lda_rank_for_unit
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(10, 5))
a1.plot(range(51), (avg_lda_rank_for_unit.numpy()[:50]).tolist() +
[avg_lda_rank_for_unit.numpy()[50:].mean()])
a1.set_ylabel('Average rank of object detected by unit,\nordered by salience to class')
a1.set_xlabel('Which unit zeroed, ordered by damage caused to the scene class')
a1.set_xticks([0, 9, 19, 29, 39, 50])
a1.set_xticklabels(['1', '10', '20', '30', '40', '>50'])
```
Seventh idea, LDA coefficient on y axis
```
lda_weight_for_unit = torch.zeros(len(classlabels), 256)
lda_count_for_unit = torch.zeros(len(classlabels), 256)
for clsnum in range(len(classlabels)):
damage, damrank = (unit_damage_matrix[clsnum]).sort(0)
rankmap = {damrank[r].item(): (damage == d).nonzero()[:, 0].numpy().tolist() for r, d in enumerate(damage)}
ldavec = discriminate_matrix[clsnum]
ldavec /= ldavec.max()
dscore, drank = (-ldavec).sort(0)
dscore = -dscore
# TODO: handle zero rank
# zerorank = (dscore == 0).nonzero()[:, 0].numpy().mean()
srankmap = {s.item(): sc.item() for s, sc in zip(drank, dscore)}
for u, (s, label, labelcat, iou) in enumerate(unit_label_995):
ur = rankmap[u]
sr = srankmap[s]
damage = unit_damage_matrix[clsnum, u]
lda_weight_for_unit[clsnum, ur] += (sr / len(ur))
lda_count_for_unit[clsnum, ur] += (1.0 / len(ur))
avg_lda_weight_for_unit = (lda_weight_for_unit.sum(0) / lda_count_for_unit.sum(0))
lda_weight_for_unit.mean()
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(10, 5))
a1.plot(range(51), (avg_lda_weight_for_unit.numpy()[:50]).tolist() +
[avg_lda_weight_for_unit.numpy()[50:].mean()], linewidth=2,
label="LDA salience of objects detected by units with largest causal effect on class accuracy.")
a1.plot(range(51), 51 * [lda_weight_for_unit.mean().item()], linewidth=2, alpha=0.7,
label="Mean LDA salience for random units %.2g. (Object with maximum salience is 1.0.)" %
lda_weight_for_unit.mean().item())
a1.set_ylabel('Average LDA salience of object detected by zeroed unit')
a1.set_xlabel('Which unit zeroed, ordered by damage caused to the scene class')
a1.set_xticks([0, 9, 19, 29, 39, 50])
a1.set_xticklabels(['1', '10', '20', '30', '40', '>50'])
f.legend()
```
Repeat the experiment for adaptive case.
```
lda_weight_for_unit = torch.zeros(len(classlabels), 256)
lda_count_for_unit = torch.zeros(len(classlabels), 256)
for clsnum in range(len(classlabels)):
damage, damrank = (unit_damage_matrix[clsnum]).sort(0)
rankmap = {damrank[r].item(): (damage == d).nonzero()[:, 0].numpy().tolist() for r, d in enumerate(damage)}
ldavec = discriminate_matrix[clsnum]
ldavec /= ldavec.max()
dscore, drank = (-ldavec).sort(0)
dscore = -dscore
# TODO: handle zero rank
# zerorank = (dscore == 0).nonzero()[:, 0].numpy().mean()
srankmap = {s.item(): sc.item() for s, sc in zip(drank, dscore)}
for u, (s, label, labelcat, iou) in enumerate(unit_label_adaptive):
ur = rankmap[u]
sr = srankmap[s]
damage = unit_damage_matrix[clsnum, u]
lda_weight_for_unit[clsnum, ur] += (sr / len(ur))
lda_count_for_unit[clsnum, ur] += (1.0 / len(ur))
avg_lda_weight_for_unit_adaptive = (lda_weight_for_unit.sum(0) / lda_count_for_unit.sum(0))
f, a1 = plt.subplots(1, 1, dpi=200, figsize=(10, 5))
a1.plot(range(51), (avg_lda_weight_for_unit_adaptive.numpy()[:50]).tolist() +
[avg_lda_weight_for_unit_adaptive.numpy()[50:].mean()], linewidth=2,
label="LDA salience of objects detected by units with largest causal effect on class accuracy.")
a1.plot(range(51), 51 * [lda_weight_for_unit.mean().item()], linewidth=2, alpha=0.7,
label="Mean LDA salience for objects detected by random units")
a1.set_ylabel('Average LDA salience of object detected by zeroed unit')
a1.set_xlabel('Which unit zeroed, ordered by damage caused to the scene class')
a1.set_xticks([0, 9, 19, 29, 39, 50])
a1.set_xticklabels(['1', '10', '20', '30', '40', '>50'])
f.legend()
```
| github_jupyter |
```
%%html
<style>
div.output_stderr{
display:none
}
</style>
<a id='top'></a>
```
# Operation of parmeter based functions
* Documentation for *.yml and run_parameters funtions in ../src/mini_pipelines_toolbox.py.
### source code link:
##### (private) source repository: https://github.com/dlanier/minipipelines.git
### Spreadsheet Transformations:
1. [Subset Expression Based on Phenotype](#select_subtype)
2. [Intersection](#get_common_samples)
3. [Subset Genes](#select_genes)
4. [Union](#merge)
5. [Group Then Apply a Function](#cluster_stats)
### Basic Transformations:
1. [Transpose](#transpose)
2. [Numerical Transformation](#numerical_transformation)
3. [Numerical Details](#stats)
```
import os
import sys
import pandas as pd
import knpackage.toolbox as kn
sys.path.insert(1, '../src')
import mini_pipelines_toolbox as mpt
data_dir = '../data/spreadsheets'
run_data_dir = '../data/run_files'
results_dir = os.path.join(os.path.abspath('../test'), 'run_dir/results')
if not os.path.isdir(results_dir):
print('Created witout overwrite:', results_dir)
os.makedirs(results_dir)
def clear_results():
results_files = [f for f in os.listdir(results_dir) if os.path.isfile(os.path.join(results_dir, f))]
for rf in results_files:
os.remove(os.path.join(results_dir, rf))
os.listdir('../test/run_dir/')
os.listdir(data_dir)
```
# Spreadsheet Transformations
<a id='select_subtype'></a>
## Subset Expression Based on Phenotype [[top]](#top)
### run_select_subtype_df(run_parameters)
TEST_6_select_categorical.yml
From a genes x samples spreadsheet and a samples x phenotypes spreadsheet, return both spreadsheets with
only the samples corresponding to a category in a phenotype and write to output files names specified.
#### Required Keys:
method: select_subtype_df
spreadsheet_file_name: ../data/spreadsheets/transform_5_spreadsheet.tsv
phenotype_file_name: ../data/spreadsheets/spreadsheet_Two.txt
phenotype_id: "grade"
select_category: "grade 3"
results_directory: ./run_dir/results
```
clear_results()
run_file = 'TEST_6_select_categorical.yml'
run_parameters = kn.get_run_parameters(run_data_dir, run_file)
run_parameters['results_directory'] = results_dir
print('Run Parameters:')
for k, v in run_parameters.items():
print('%30s: %s'%(k, v))
mpt.run_select_subtype_df(run_parameters)
print('Result Files:')
os.listdir(results_dir)
```
<a id='get_common_samples'></a>
## Intersection [[top]](#top)
### run_common_samples_df(run_parameters)
TEST_2_common_samples.yml
find common sample names in two spreadsheets, write to output files names specified
#### Required Keys:
method: common_samples_df
spreadsheet_1_file_name: ../data/spreadsheets/spreadsheet_One.txt
spreadsheet_2_file_name: ../data/spreadsheets/spreadsheet_Two.txt
results_directory: ./run_dir/results
```
clear_results()
run_file = 'TEST_2_common_samples.yml'
run_parameters = kn.get_run_parameters(run_data_dir, run_file)
run_parameters['results_directory'] = results_dir
print('Run Parameters:')
for k, v in run_parameters.items():
print('%30s: %s'%(k, v))
mpt.run_common_samples_df(run_parameters)
print('Result Files:')
os.listdir(results_dir)
```
<a id="select_genes"></a>
## Subset Genes [[Top]](#top)
### run_select_genes(run_parameters)
TEST_4_select_genes.yml
Spreadsheet with only those genes selected from an input list.
#### Required Keys:
method: select_genes_df
spreadsheet_file_name: ../data/spreadsheets/gene_samples_1.tsv
gene_list_file_name: ../data/spreadsheets/gene_samples_1_list.txt
results_directory: ./run_dir/results
```
clear_results()
run_file = 'TEST_4_select_genes.yml'
run_parameters = kn.get_run_parameters(run_data_dir, run_file)
run_parameters['results_directory'] = results_dir
print('Run Parameters:')
for k, v in run_parameters.items():
print('%30s: %s'%(k, v))
mpt.run_select_genes(run_parameters)
print('Result Files:')
os.listdir(results_dir)
```
<a id='merge'></a>
## Union [[top]](#top)
### run_merge_df(run_parameters)
TEST_3_merge.yml
Combine two spreadsheets into one with all samples and phenotypes and write to output file name specified
#### Required Keys:
method: merge_df
spreadsheet_1_file_name: ../data/spreadsheets/spreadsheet_One.txt
spreadsheet_2_file_name: ../data/spreadsheets/spreadsheet_Two.txt
results_directory: ./run_dir/results
```
clear_results()
run_file = 'TEST_3_merge.yml'
run_parameters = kn.get_run_parameters(run_data_dir, run_file)
run_parameters['results_directory'] = results_dir
print('Run Parameters:')
for k, v in run_parameters.items():
print('%30s: %s'%(k, v))
mpt.run_merge_df(run_parameters)
print('Result Files:')
os.listdir(results_dir)
```
<a id='cluster_stats'></a>
## Group Then Apply a Function [[top]](#top)
### run_cluster_statistics_df(run_parameters)
TEST_5_cluster_averages.yml
Dataframe of averages for each category in a genes x samples dataframe with a samples classification dictionary.
#### Required Keys:
method: cluster_statistics_df
centroid_statistic: 'median' # ['std', 'median', 'mean']
spreadsheet_file_name: ../data/spreadsheets/gene_samples_small.tsv
sample_labels_file_name: ../data/spreadsheets/gene_samples_small_labels.tsv
results_directory: ./run_dir/results
```
clear_results()
run_file = 'TEST_5_cluster_averages.yml'
run_parameters = kn.get_run_parameters(run_data_dir, run_file)
run_parameters['results_directory'] = results_dir
print('Run Parameters:')
for k, v in run_parameters.items():
print('%30s: %s'%(k, v))
mpt.run_cluster_statistics_df(run_parameters)
print('Result Files:')
os.listdir(results_dir)
```
# Basic transformations
<a id='transpose'></a>
## Transpose [[top]](#top)
### run_transpose(run_parameters)
TEST_1_transpose.yml
transpose a single spreadsheet and write to output file name specified
#### Required Keys:
method: transpose_df
spreadsheet_name_full_path: ../data/spreadsheets/spreadsheet_One.txt
results_directory: ./run_dir/results
```
clear_results()
run_file = 'TEST_1_transpose.yml'
run_parameters = kn.get_run_parameters(run_data_dir, run_file)
run_parameters['results_directory'] = results_dir
print('Run Parameters:')
for k, v in run_parameters.items():
print('%30s: %s'%(k, v))
mpt.run_transpose(run_parameters)
print('Result Files:')
os.listdir(results_dir)
```
<a id='numerical_transformation'></a>
## Numerical Transformation [[top]](#top)
### run_spreadsheet_numerical_transform(run_parameters)
TEST_7_numerical_transform.yml
#### Required Keys:
method: numeric_transform
spreadsheet_name_full_path: ../data/spreadsheets/spreadsheet_A_.G.tsv
results_directory: ./run_dir/results
numeric_function: abs # [abs, z_transform, log_transform, threshold]
#### Method Specific Keys:
z_transform_axis: 1
z_transform_ddof: 0
log_transform_log_base: e # e for natural log or a positive number
log_transform_log_offset: 0
threshold_cut_off: 0.5
threshold_substitution_value: 0
threshold_scope: SUB_BELOW # [SUB_BELOW, SUB_ABOVE]
```
clear_results()
run_file = 'TEST_7_numerical_transform.yml'
run_parameters = kn.get_run_parameters(run_data_dir, run_file)
run_parameters['results_directory'] = results_dir
print('Run Parameters:')
for k, v in run_parameters.items():
print('%30s: %s'%(k, v))
mpt.run_spreadsheet_numerical_transform(run_parameters)
print('Result Files:')
os.listdir(results_dir)
```
<a id='stats'></a>
## Numerical Details [[top]](#top)
### run_stats_df(run_parameters)
TEST_8_stat_value.yml
#### Required Keys:
method: stats_df
spreadsheet_file_name: ../data/spreadsheets/gene_samples_1.tsv
stats_function: sum # ['min', 'max', 'mean', 'median', 'variation', 'std_deviation', 'sum']
direction_reference: columns # ['columns', 'rows', 'all']
results_directory: ./run_dir/results
```
clear_results()
run_file = 'TEST_8_stat_value.yml'
run_parameters = kn.get_run_parameters(run_data_dir, run_file)
run_parameters['results_directory'] = results_dir
print('Run Parameters:')
for k, v in run_parameters.items():
print('%30s: %s'%(k, v))
mpt.run_stats_df(run_parameters)
print('Result Files:')
os.listdir(results_dir)
```
| github_jupyter |
#Convective Cell Identification & TRAcking (CITRA) using Doppler Weather Radar Images
The cell below installs the Tesseract-OCR model and the Google Drive Mount sequence.
**NOTE**: - When the below cell in run, it pops up a link to request access to your google drive. Open that link and grant acces. Then copy the access code and paste it in the mentioned area in the installation sequence.
```
!pip install pytesseract
!sudo apt install tesseract-ocr
from google.colab import drive
drive.mount('/content/drive')
"""Mesoscale Convective System Identification and Tracking using Doppler Weather Radar Images"""
"""by A Niranjan"""
"""Algorithm for plotting the Convective_System sequence with a folder of consecutive images given as input"""
#Importing the necessary libraries for the project
"""
LIBRARY : USE
1. cv2: OpenSourced Computer Vision Library for image recognition.
2. Numpy: Popular Numerical Python library for various matrix calculations.
3. time: Simulation time analysis.
4. os: OS file handling.
5. pytesseract: An opensource text recognition library for image matrix to string conversions.
6. imutils: Image Utilities for basic image processing functions.
7. contours: Potential area boundary formations.
8. skimage: OpenSource Measure analysis and image recognition Library for segmentation, geometric transformations and color space manipulation.
9. argparse: For passing arguments to the algorithm from terminal.
10. scipy.spatial: Displacement analysis.
"""
import cv2 as cv
import numpy as np
import time,os
import pytesseract
import imutils
from imutils import contours
from skimage import measure
import argparse
from scipy.spatial import distance
import glob
```
##os.chdir('Your_Data_path_from_Google_Drive')
```
#Setting the working directory
os.chdir('/content')
#Setting the path to the tesseract library executable file
pytesseract.pytesseract.tesseract_cmd = (
r'/usr/bin/tesseract'
)
#Function for Contour formation with respect to Single Strength bar images.
def formcontour_Single1(x):
#Initializing Time sequence
start = time.time()
#Decleration of global variables for image-1 perspective analysis in later functions
global dresolution_1, Date_1, Time_1, Area_1, cX1, cY1, approx1
img_main = x
#Cropping the image with respect to Radar Field in the image
# img_radar = x[43:752,6:759]
img_radar = x[183:755,4:600]
#Convertion of the image into Gray scale and then a blur filter is used to patch the image
gray = cv.cvtColor(img_radar, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (11, 11), 0)
#Thresholding the image for potential area identification
thresh = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY)[1]
thresh = cv.erode(thresh, None, iterations=1)
thresh = cv.dilate(thresh, None, iterations=1)
#Connected Component Analysis to find the major patch formations
labels = measure.label(thresh, connectivity = 2, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")
# Loop formation over the unique categories
for label in np.unique(labels):
# If it were the background label then ignore it
if label == 0:
continue
# If it were the background label then ignore it
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv.countNonZero(labelMask)
# Adding the large components to the mask layer if the total number of connected pixels exceed the 150 range
if numPixels > 150:
mask = cv.add(mask, labelMask)
"""Implementation of the pytesseract library for text recognition and processing, the pytesseract library can
only process images in the RGB Spectrucm, so it is converted from BGR to RGB colour space"""
# Date_1 = pytesseract.image_to_string((cv.cvtColor(img_main[14:39, 3:137],cv.COLOR_BGR2RGB)))
Date_1 = pytesseract.image_to_string((cv.cvtColor(img_main[13:37,5:135],cv.COLOR_BGR2RGB)))
# Time_1 = pytesseract.image_to_string(cv.resize((cv.cvtColor(img_main[14:43, 233:333],cv.COLOR_BGR2RGB)),(150,30)))
Time_1 = pytesseract.image_to_string(cv.resize((cv.cvtColor(img_main[15:35,230:305],cv.COLOR_BGR2RGB)),(150,30)))
#In few cases the image needs to be resized for better recognition
#Lattitude and Logitude detection
lat = pytesseract.image_to_string((cv.resize((cv.cvtColor((img_main[20:39,794:848]),cv.COLOR_BGR2RGB)),(69,25))))
long = pytesseract.image_to_string((cv.resize((cv.cvtColor((img_main[22:39,863:925]),cv.COLOR_BGR2RGB)),(310,75))))
#Kilometer per pixel area recognition.
# dresolution_1 =float(pytesseract.image_to_string(cv.resize(((cv.cvtColor((image1[83:100,923:960]),cv.COLOR_BGR2RGB))),(150,70))))
dresolution_1 = 0.8
#Potential area contour formation for displaying the largest patches of the anomaly
_, contours, _=cv.findContours(mask,cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
#Storing all the contour areas in one list
cntArea = [cv.contourArea(c) for c in contours]
for cnt in contours:
#Only the largest contour area is considered for plotting
if(cv.contourArea(cnt) == max(cntArea)):
epsilon = 0.01*cv.arcLength(cnt,True)
#Polygonal Structure approximation
approx1 = cv.approxPolyDP(cnt,epsilon,True)
#Drawing the contour
cv.drawContours(img_radar,[approx1],0,(0,0,255),2)
#Total number of pixels enclosed by the contour
TotalPixels = cv.contourArea(cnt)
#Area Calculation with respect to the km/pixel value as the image is in a (xx,xx,3) format the values measured are
#divided with 30 as the colour spectrum of 3 layers is calculated with respect to kilometer squares.
Area_1 = ((TotalPixels/30)*dresolution_1)
#Determining the centroid of the countour
X = cv.moments(cnt)
cX1 = int(X["m10"] / X["m00"])
cY1 = int(X["m01"] / X["m00"])
#Drawing a line perpendicular from the RADAR to the Contour centroid
cv.line(img_radar, (int(299), int(288)), (int(cX1), int(cY1)),(0,255,0), 2)
#Drwing points on the Contour Centroid and the RADAR Origin
cv.circle(img_radar, (cX1, cY1), 3, (0, 0, 0), -5)
# cv.circle(img_radar, (376,353),3,(0,0,0),-5)
cv.circle(img_radar, (299,288),3,(0,0,0),-5)
#Calculating the distance between the Contour Centroid and the RADAR Origin
D = distance.euclidean((363,329),(cX1,cY1))
DistanceFromRadar = round(D,2)
#Printing all the necessary details
# print('RADAR 1:')
# print('Date1: '+ Date_1)
# print('Time Stamp 1: ' + Time_1)
# print('Radar Lat&Long: '+lat+" , "+long)
# print('Distance From Radar:',round(DistanceFromRadar*dresolution_1,2), 'km',DirectionOfAnomaly(cX1,cY1))
# print("Total Area Covered is: "+str(round(Area_1,2))+" Km.sq")
# #Ending the time sequence
# end = time.time()
# print(f"Runtime {end - start}")
# cv.imwrite(r'C:\Users\niran\Desktop\Radar_Image_Processing\RadarData\GIF_Images\2017\April\13th\TwoImages\ConvectiveSystem1.jpeg',img_radar)
# #Displaying the image
# cv.imshow("Result 1",img_radar)
return 'Successful'
def getRadarStats_Single1(x):
ContourFormation = formcontour_Single1(x)
if(ContourFormation == 'Successful'):
return 'Successful'
else: print('No Anomaly in 1st Image\n')
#Function for Contour formation with respect to Single Strength bar images.
def formcontour_Single2(x):
#Initializing Time sequence
start = time.time()
#Decleration of global variables for image-1 perspective analysis in later functions
global dresolution_2, Date_2, Time_2, Area_2, cX2, cY2, img_radar2
img_main = x
#Decleration of global variables for image-1 perspective analysis in later functions
# img_radar2 = x[43:752,6:759]
img_radar2 = x[183:755,4:600]
#Convertion of the image into Gray scale and then a blur filter is used to patch the image
gray = cv.cvtColor(img_radar2, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (11, 11), 0)
#Thresholding the image for potential area identification
thresh = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY)[1]
thresh = cv.erode(thresh, None, iterations=1)
thresh = cv.dilate(thresh, None, iterations=1)
#Connected Component Analysis to find the major patch formations
labels = measure.label(thresh, connectivity = 2, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")
# Loop formation over the unique categories
for label in np.unique(labels):
# If it were the background label then ignore it
if label == 0:
continue
# Construction of the label mask and determining the total number of pixels in the patches
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv.countNonZero(labelMask)
# Adding the large components to the mask layer if the total number of connected pixels exceed the 150 range
if numPixels > 150:
mask = cv.add(mask, labelMask)
"""Implementation of the pytesseract library for text recognition and processing, the pytesseract library can
only process images in the RGB Spectrucm, so it is converted from BGR to RGB colour space"""
# Date_2 = pytesseract.image_to_string((cv.cvtColor(img_main[14:39, 3:137],cv.COLOR_BGR2RGB)))
Date_2 = pytesseract.image_to_string((cv.cvtColor(img_main[13:37,5:135],cv.COLOR_BGR2RGB)))
# Time_2 = pytesseract.image_to_string(cv.resize((cv.cvtColor(img_main[14:43, 233:333],cv.COLOR_BGR2RGB)),(150,30)))
Time_2 = pytesseract.image_to_string(cv.resize((cv.cvtColor(img_main[15:35,230:305],cv.COLOR_BGR2RGB)),(150,30)))
#In few cases the image needs to be resized for better recognition
#Lattitude and Logitude detection
lat = pytesseract.image_to_string((cv.resize((cv.cvtColor((img_main[20:39,794:848]),cv.COLOR_BGR2RGB)),(69,25))))
long = pytesseract.image_to_string((cv.resize((cv.cvtColor((img_main[22:39,863:925]),cv.COLOR_BGR2RGB)),(310,75))))
#Kilometer per pixel area recognition.
# dresolution_2 =float(pytesseract.image_to_string(cv.resize(((cv.cvtColor((image1[83:100,923:960]),cv.COLOR_BGR2RGB))),(150,70))))
dresolution_2 = 0.8
#Potential area contour formation for displaying the largest patches of the anomaly
_, contours, _=cv.findContours(mask,cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
#Storing all the contour areas in one list
cntArea = [cv.contourArea(c) for c in contours]
for cnt in contours:
#Only the largest contour area is considered for plotting
if(cv.contourArea(cnt) == max(cntArea)):
epsilon = 0.01*cv.arcLength(cnt,True)
#Polygonal Structure approximation
approx = cv.approxPolyDP(cnt,epsilon,True)
#Drawing the contour
img_radar2 = cv.drawContours(img_radar2,[approx],0,(0,0,255),2)
#Total number of pixels enclosed by the contour
TotalPixels = cv.contourArea(cnt)
#Area Calculation with respect to the km/pixel value as the image is in a (xx,xx,3) format the values measured are
#divided with 30 as the colour spectrum of 3 layers is calculated with respect to kilometer squares.
Area_2 = ((TotalPixels/30)*dresolution_2)
#Determining the centroid of the countour
X = cv.moments(cnt)
cX2 = int(X["m10"] / X["m00"])
cY2 = int(X["m01"] / X["m00"])
#Drawing a line perpendicular from the RADAR to the Contour centroid
img_radar2 = cv.line(img_radar2, (int(299), int(288)), (int(cX2), int(cY2)),(0,255,0), 2)
#Drwing points on the Contour Centroid and the RADAR Origin
img_radar2 = cv.circle(img_radar2, (cX2, cY2), 3, (0, 0, 0), -5)
# img_radar2 = cv.circle(img_radar2, (376,353),3,(0,0,0),-5)
img_radar2 = cv.circle(img_radar2, (299,288),3,(0,0,0),-5)
#Calculating the distance between the Contour Centroid and the RADAR Origin
D = distance.euclidean((363,329),(cX2,cY2))
DistanceFromRadar = round(D,2)
#Printing all the necessary details
# print('RADAR 2:')
# print('Date2: '+ Date_2)
# print('Time Stamp 2: ' + Time_2)
# print('Radar Lat&Long: '+lat+" , "+long)
# print('Distance From Radar:',round(DistanceFromRadar*dresolution_2,2), 'km',DirectionOfAnomaly(cX2,cY2))
# print("Total Area Covered is: "+str(round(Area_2,2))+" Km.sq")
# #Ending the time sequence
# end = time.time()
# print(f"Runtime {end - start}")
# cv.imwrite(r'C:\Users\niran\Desktop\Radar_Image_Processing\RadarData\GIF_Images\2017\April\13th\TwoImages\ConvectiveSystem2.jpeg',img_radar2)
# #Ending the time sequence
# cv.imshow("Result 2",img_radar2)
return 'Successful'
#Function for Radar statistics of Single strength bar image-2
def getRadarStats_Single2(x):
ContourFormation = formcontour_Single2(x)
if(ContourFormation == 'Successful'):
return 'Successful'
else: print('No Anomaly in 2nd Image')
#Combined function for RADAR information determination and tracking.
#In the following function the conditional statements are used to execute various functions depending on the image dimensions.
def FinalRadarStats(image1,image2):
global radar1_info, radar2_info
if(image1.shape == (770,1078,3)):
radar1_info = getRadarStats_Single1(image1)
elif(image1.shape == (720,1082,3)):
radar1_info = getRadarStats_Double1(image1)
if(image2.shape == (770,1078,3)):
radar2_info = getRadarStats_Single2(image2)
elif(image2.shape == (720,1082,3)):
radar2_info = getRadarStats_Double2(image2)
#Initializing the timer sequence
start = time.time()
#list to store file names
file_names = []
#List to store all the anomaly centroids
anomaly_centroids = []
#adding all the file names to the list
for i in glob.glob('*.gif*'):
file_names.append(i)
#Reading all the .gif images in the folder and capturing the images for image processing
for i in range(0,len(file_names)-1):
gif1 = cv.VideoCapture(file_names[i])
ret1,image1 = gif1.read()
image1 = cv.resize(image1,(1078,770))
gif2 = cv.VideoCapture(file_names[i+1])
ret2,image2 = gif2.read()
image2 = cv.resize(image2,(1078,770))
#Calling the main function for image processing
FinalRadarStats(image1,image2)
if(radar1_info == 'Successful' and radar2_info == 'Successful'):
# if(file_names[i] == file_names[0]):
# global initialanomaly_Time
# initialanomaly_Time = Time_1
# print('Initial Anomaly Time'+ initialanomaly_Time)
# if(file_names[i]==file_names[-1]):
# global finalanomaly_Time
# finalanomaly_Time = Time_2
# print('Final Anomaly Time'+ finalanomaly_Time)
#Determining the time & area difference between the consecutive anomalies from the global variables declared in the functions
time_difference = round((abs(get_sec(Time_1)-get_sec(Time_2)))/60,2)
area_difference = round((abs(Area_1 - Area_2)),2)
#Displacement of the anomaly
distance_travelled = round( (distance.euclidean((cX1,cY1),(cX2,cY2)))*dresolution_2 ,2)
# The following statement will run if the time difference is less than 70minutes, Area difference is less than 20 km.sq
# and displacement less than 100 km.sq
if(time_difference < 70 and area_difference < 40 and distance_travelled < 100):
#Drawing relationg between consecutive frames
# img_radar2 = cv.drawContours(img_radar2,[approx1],0,(0,255,0),2)
img_radar2 = cv.line(img_radar2, (int(cX1), int(cY1)), (int(cX2), int(cY2)),(0,255,0), 2)
img_radar2 = cv.circle(img_radar2, (cX1, cY1), 3, (0, 0, 0), -5)
img_radar2 = cv.circle(img_radar2, (cX2, cY2), 3, (0, 0, 0), -5)
#Adding the centroid values to the lists
# print('Coordinates1: ','[',cX1,cY1,']','\t','Coordinates2','[',cX2,cY2,']','\n')
anomaly_centroids.append([cX1,cY1])
anomaly_centroids.append([cX2,cY2])
else:
print("Anomaly could not be traced from ", file_names[i],'&',file_names[i+1])
else:
print('Anomaly correlation not found\n')
i += 1
#Declaration of the final centroid list to store only exact values which are necessary
final_anomaly_centroids = []
#Loop to remove duplicates and storing only the consecutive anomaly centroids
for i in anomaly_centroids:
if i not in final_anomaly_centroids:
final_anomaly_centroids.append(i)
#Plotting line map to all the consecutive centroids of the anomaly
for x in range(0,len(final_anomaly_centroids)-1):
for y in range(0,1):
#Plotting the path only if the difference in the displacement is less than 40km
if( (round((distance.euclidean((int(final_anomaly_centroids[x][y]),int(final_anomaly_centroids[x][y+1])),(int(final_anomaly_centroids[x+1][y]),int(final_anomaly_centroids[x+1][y+1]) ) )),2)*0.7) < 40 ):
img_radar2 = cv.line(img_radar2, (int(final_anomaly_centroids[x][y]), int(final_anomaly_centroids[x][y+1])), (int(final_anomaly_centroids[x+1][y]), int(final_anomaly_centroids[x+1][y+1])),(0,255,0), 2)
img_radar2 = cv.circle(img_radar2, (final_anomaly_centroids[x][y], final_anomaly_centroids[x][y+1]), 3, (0, 0, 0), -5)
img_radar2 = cv.circle(img_radar2, (final_anomaly_centroids[x+1][y], final_anomaly_centroids[x+1][y+1]), 3, (0, 0, 0), -5)
else: print(final_anomaly_centroids[x],'\n')
#Ending the time sequence
end = time.time()
print(f"Runtime {end - start} Seconds")
#Displaying the traced anomaly
cv2_imshow(img_radar2)
# cv.imwrite(r'C:\Users\niran\Desktop\Radar_Image_Processing\RadarData\GIF_Images\2017\April\13th\Stage8\TracedAnomaly.jpeg',img_radar2)
cv.waitKey(0)
#Printing the path to which the plotted image is going to be saved
# destination = args["folder"]
# print(destination)
# """Writing the final plotted image to the destination file. It is going to be the same directory from which images are given as input to the algorithm"""
# cv.imwrite(args['folder']+'\TestResult.jpeg',img_radar2)
```
| github_jupyter |
# Morphing basis animations
Let's make something cool:

```
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
%matplotlib inline
import sys
try:
from madminer.morphing import Morpher
except ModuleNotFoundError:
sys.path.append('../..')
from madminer.morphing import Morpher
```
## Settings
```
n_runs = 10
n_resolution = 200
n_frames = 8 * 25
randomness1 = 0.3
randomness2 = 0.6
```
## Preparation
```
morpher = Morpher(parameter_max_power=[4,4],
parameter_range=[(-1.,1.), (-1.,1.)])
_ = morpher.find_components(max_overall_power=4)
xi = np.linspace(-1.,1.,n_resolution)
yi = np.linspace(-1.,1.,n_resolution)
xx, yy = np.meshgrid(xi, yi)
xx = xx.reshape((-1,1))
yy = yy.reshape((-1,1))
theta_evaluation = np.hstack([xx, yy])
```
## Basis evaluation
```
def evaluate_basis(basis=None):
# Optimization
if basis is None:
basis = morpher.optimize_basis(n_bases=1)
# Evaluate basis
squared_weights = []
for theta in theta_evaluation:
wi = morpher.calculate_morphing_weights(theta, basis)
squared_weights.append(np.sum(wi*wi)**0.5)
squared_weights = np.array(squared_weights).reshape((n_resolution,n_resolution))
return squared_weights
```
## Basis point trajectories
```
def initialize_trajectories():
global theta1, theta2, theta_phase, theta_order
theta_fix = np.array([[ 0. , 0. ],
[-0.8318245 , 0.85645093],
[-0.82002127, -0.85191237],
[ 0.76870769, -0.81272456],
[ 0.7819962 , 0.86242685],
[-0.57243257, 0.37755934],
[-0.29730939, 0.74563426],
[ 0.13777926, 0.35254704],
[ 0.46330191, 0.51783982],
[ 0.64649576, -0.01232633],
[ 0.16629182, -0.29365045],
[ 0.39752054, -0.64235507],
[-0.19238158, -0.59962178],
[-0.30730345, -0.09697784],
[-0.70631846, -0.18913046]])
n_benchmarks = theta_fix.shape[0]
theta_order = np.random.randint(1, 4, (n_benchmarks,2))
random_points = -1. + 2.*np.random.rand(n_benchmarks,2)
random_point_weights = randomness1 * np.random.rand(n_benchmarks,2) / theta_order.astype(np.float)
theta1 = (1.-random_point_weights)*theta_fix + random_point_weights*random_points
random_points = -1. + 2.*np.random.rand(n_benchmarks,2)
random_point_weights = randomness2 * np.random.rand(n_benchmarks,2) / theta_order.astype(np.float)
theta2 = (1.-random_point_weights)*theta_fix + random_point_weights*random_points
theta_phase = 2.*np.pi*np.random.rand(n_benchmarks,2)
#theta_phase[:,1] = theta_phase[:,0] + np.random.normal(np.pi/2, np.pi/4, n_benchmarks)
def calculate_basis(t):
basis = 0.5 * (
(theta1 + theta2)
+ (theta1 - theta2)
* np.sin(2.*np.pi * theta_order * t / n_frames + theta_phase)
)
return basis
```
## Animation
```
def initialize_animation():
global basis, squared_weights
initialize_trajectories()
basis = calculate_basis(0)
squared_weights = evaluate_basis(basis)
def update(t):
global basis, squared_weights
basis = calculate_basis(t)
squared_weights = evaluate_basis(basis)
scatter.set_offsets(basis)
pcm.set_array(squared_weights[:-1,:-1].ravel())
if (t + 1) % 10 == 0:
print(' Frame', t + 1, '/', n_frames)
def make_animation(i):
global pcm, scatter
fig = plt.figure(figsize=(5.,4.))
ax = plt.gca()
pcm = ax.pcolormesh(xi, yi, squared_weights,
norm=matplotlib.colors.LogNorm(vmin=.5, vmax=200.),
cmap='viridis_r')
cbar = fig.colorbar(pcm, ax=ax, extend='both')
scatter = plt.scatter(basis[:,0], basis[:,1], s=40., c='black')
plt.xlabel(r'$\theta_0$')
plt.ylabel(r'$\theta_1$')
cbar.set_label(r'$\sqrt{\sum w_i^2}$')
plt.xlim(-1.,1.)
plt.ylim(-1.,1.)
plt.tight_layout()
anim = FuncAnimation(fig, update, frames=np.arange(0, n_frames), interval=40)
anim.save('animations/run_' + str(i) + '.gif', dpi=120, writer='imagemagick')
#anim.save('animations/run_' + str(i) + '.avi')
```
## Main loop
```
for i in range(n_runs):
print('Run', i + 1, '/', n_runs)
initialize_animation()
make_animation(i)
```
| github_jupyter |
```
import wave
import struct
import os
from scipy import signal
import numpy as np
import tensorflow as tf
# from tensorflow.python.ops import variable_scope as vs
tf.reset_default_graph()
path = r'C:\Users\xujiahao\Desktop\MIR-1K_for_MIREX\trainwav' #ๆไปถๅคน็ฎๅฝ
fname1 = 'left.wav'
fname2 = 'right.wav'
nframes = 96000
framerate = 16000
sampwidth = 2
batch_size = 1
num_steps = 189
step_num = 513
rnn_hidden1_size = 200 #ๅฎ็ๅคงๅฐๅฏ่ชๅทฑ้ไพฟๅ,hidden1_size==step_num
rnn_hidden2_size = 100
rnn_hiddenL_size = 200
state_size = 200 #็ญไบrnn_hiddenL_sizeๆๅไธไธช้่ๅฑ็size
y_size = step_num*2 #็ญไบstep_num็ไธคๅ
soft=1e-4
#็ๆ้ณ้ขๆไปถ
def Generate_Wav(fname, wave_data, width, rate):
wf = wave.open(fname,'wb')
wf.setnchannels(1)
wf.setsampwidth(width)
wf.setframerate(rate)
for i in wave_data:
data = struct.pack('<h', int(i))
wf.writeframesraw( data )
wf.close()
#ๅฏน่ฏปๅ
ฅๆไปถ้ขๅค็
def init_handle(filename):
f = wave.open(filename)
params = f.getparams() #่ฏปๅๆ ผๅผไฟกๆฏ
#ไธๆฌกๆง่ฟๅๆๆ็WAVๆไปถ็ๆ ผๅผไฟกๆฏ๏ผๅฎ่ฟๅ็ๆฏไธไธช็ปๅ
(tuple)๏ผๅฃฐ้ๆฐ, ้ๅไฝๆฐ๏ผbyteๅไฝ๏ผ, ้
#ๆ ท้ข็, ้ๆ ท็นๆฐ, ๅ็ผฉ็ฑปๅ, ๅ็ผฉ็ฑปๅ็ๆ่ฟฐใwaveๆจกๅๅชๆฏๆ้ๅ็ผฉ็ๆฐๆฎ๏ผๅ ๆญคๅฏไปฅๅฟฝ็ฅๆๅไธคไธชไฟกๆฏ
nframes1 = params[3]
#่ฏปๅๅฃฐ้ณๆฐๆฎ๏ผไผ ้ไธไธชๅๆฐๆๅฎ้่ฆ่ฏปๅ็้ฟๅบฆ๏ผไปฅๅๆ ท็นไธบๅไฝ๏ผ
str_buf = f.readframes(nframes1)
f.close()
str_data = np.frombuffer(str_buf,dtype = np.short)
#ๅฐwave_dataๆฐ็ปๆนไธบ2ๅ๏ผ่กๆฐ่ชๅจๅน้
ใๅจไฟฎๆนshape็ๅฑๆงๆถ๏ผ้ไฝฟๅพๆฐ็ป็ๆป้ฟๅบฆไธๅใ
str_data.shape = -1,2
str_data = str_data.T #่ฝฌ็ฝฎๆฐๆฎ,็ฌฌไธ่กไธบไธบๅทฆๅฃฐ้๏ผ้
ไน๏ผ๏ผ็ฌฌไบ่กไธบๅณๅฃฐ้๏ผไบบๅฃฐ๏ผ
if(len(str_data[0]) >= nframes):#ๅไธบๅๅฃฐ้้ๆ ท็นไธบ96000๏ผๆถ้ฟ6็ง๏ผ
wave_left = str_data[0, :nframes]
wave_right = str_data[1, :nframes]
if(len(str_data[0]) < nframes):
concat = np.zeros(nframes-len(str_data[0]))
wave_left = np.append(str_data[0], concat)
wave_right = np.append(str_data[1], concat)
#็ญๆถๅ
้ๅถๅๆข
f, t, Z_left1 = signal.stft(wave_left, fs = framerate, nperseg = 1024, noverlap = 512)
f, t, Z_right1 = signal.stft(wave_right, fs = framerate, nperseg = 1024, noverlap = 512)
theta = np.angle(Z_left1 + Z_right1) #ๆฑๆททๅ่ฏญ้ณๅ
้ๅถๅๆขๅ็็ธไฝ่ฐฑ
Z_left = Z_left1.T
Z_right = Z_right1.T
left = abs(Z_left) #้ณไนๅฃฐๅน
ๅบฆ่ฐฑ
right = abs(Z_right) #ไบบๅฃฐๅน
ๅบฆ่ฐฑ
y1 = (left+0.1)/(left + right + 0.1) #labelY
y2 = (right+0.1)/(left + right +0.1)
X = abs(Z_left+Z_right) #ๆททๅ่ฏญ้ณๅน
ๅบฆ่ฐฑ
#X = np.reshape(X,(1, num_steps, step_num))
Y = np.column_stack((y1,y2))
return theta, X, Y
#ๅฏน้ขๆตๆฐๆฎ่ฟ่กๅค็
def out_handle(theta, X, prediction):
X = X.T
prediction = prediction.T
y1 = prediction[:step_num, :]
y2 = prediction[step_num:, :]
s1 = (y1+soft)/(y1 + y2 + soft) #่ฝฏๆถ้ขๆฉๆจก
s2 = (y2+soft)/(y1 + y2 + soft)
X_left = X * s1 #ๅทฆๅฃฐ้๏ผ้ณไนๅฃฐ๏ผ็ๅน
ๅบฆ่ฐฑ
X_right = X * s2
Z_left = X_left*np.cos(theta) + 1j*X_left*np.sin(theta)
Z_right = X_right*np.cos(theta) + 1j*X_right*np.sin(theta)
_, data_left = signal.istft(Z_left, framerate, nperseg = 1024, noverlap = 512)
_, data_right = signal.istft(Z_right, framerate, nperseg = 1024, noverlap = 512)
Generate_Wav(fname1, data_left, sampwidth, framerate)
Generate_Wav(fname2, data_right, sampwidth, framerate)
class AudioRNN:
def __init__(self):
self.x1 = tf.placeholder(tf.float32, [num_steps, step_num], name='input_placeholder')
self.x = tf.reshape(self.x1, [batch_size, num_steps, step_num])
self.y = tf.placeholder(tf.float32, [num_steps,y_size], name='output_placeholder')
self.lr = tf.Variable(0.01,dtype=tf.float32)
def RNNLayer(self):
rnn_layers = [tf.nn.rnn_cell.BasicRNNCell(num_units=size, reuse=tf.AUTO_REUSE) for size in [rnn_hidden1_size, rnn_hidden2_size, rnn_hiddenL_size]]
rnn_multi = tf.contrib.rnn.MultiRNNCell(rnn_layers)
initial_state = rnn_multi.zero_state(batch_size, dtype=tf.float32)
#with tf.variable_scope("scope", reuse=None):
outputs, last_states = tf.nn.dynamic_rnn(cell=rnn_multi,inputs=self.x, initial_state=initial_state, dtype=tf.float32)
outputs = tf.reshape(outputs, [-1, state_size])
with tf.variable_scope("sigmoid1", reuse=tf.AUTO_REUSE):
W1 = tf.get_variable('W', [state_size, y_size])#้ๆบ็ๆ่ๅดๅจๆญฃๆๅๅธๆ ๅๅทฎไธบ0.1็w
b1 = tf.get_variable('b', [y_size], initializer=tf.constant_initializer(0.0))
#logits1 = tf.reshape(tf.matmul(outputs, W) + b,[num_steps,y_size])
logits = tf.matmul(outputs, W1) + b1
predictions = tf.nn.sigmoid(logits)
return logits, predictions
#ๅๅปบRNN็ฝ็ปไผ่ฏๆกๅนถ่ฎญ็ป
def train(self, tf_save_path):
epoch = 3
logits,_ = self.RNNLayer()
losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=self.y)
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdadeltaOptimizer(self.lr).minimize(total_loss)#ไผๅๅจๅๆๅฐๅๆๅคฑ๏ผๅฐฑๆฏๆๅคฑๅฝๆฐๅฏนๅๆฐ็่ฎก็ฎ
#print(losses,total_loss,train_step)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())#ๅๅงๅๅ้
#่ฎญ็ป
for i in range(epoch):
sess.run(tf.assign(self.lr,0.5)) #ๅฏไปฅ่ฐๆดๆฏๆฌก่ฎญ็ป็ๅญฆไน ็๏ผไพๅฆ0.5*๏ผ0.9 ** i๏ผ
for filename in os.listdir(path):
filename = path+"/"+filename
_, train_X, train_Y = init_handle(filename)
# train_X1 = np.reshape(train_X,(batch_size, num_steps, step_num)) #ๅๆrnnๅฏๆฅๅ็ไธ็ปด็ฉ้ต
to, _ = sess.run([total_loss,train_step],feed_dict={self.x1:train_X, self.y:train_Y})
# if(i%10==0):
print("step"+str(i)+"\t"+"loss:",to)
save_path = saver.save(sess, tf_save_path)
# writer = tf.summary.FileWriter("./summary", tf.get_default_graph())
# writer.close()
print("Model saved in file: %s" %save_path)
# sess.close()
def test(self, testfile, tfsavepath):
# vs.get_variable_scope().reuse_variables()
_, predictions = self.RNNLayer()
test_theta, test_X, _ = init_handle(testfile)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, tfsavepath)
prediction = sess.run(predictions,feed_dict={self.x1:test_X})
out_handle(test_theta, test_X, prediction)
print(prediction.shape)
import pyaudio
import tkinter as tk
from PIL import ImageTk, Image
from tkinter import filedialog
class Control:
def __init__(self, RNN, savepath, testfile):
self.RNN = RNN
self.savepath = savepath
self.testfile = testfile
self.root = tk.Tk()
self.root.title("ๆงๅถ้ขๆฟ") # ็ปไธป็ชๅฃ่ฎพ็ฝฎๆ ้ขๅ
ๅฎน
self.root.geometry('500x352')
self.canvas = tk.Canvas(self.root, height=352, width=500)#ๅๅปบ็ปๅธย ย
self.imgpath = 'kehuan.jpg'
self.img = Image.open(self.imgpath)
self.image_file = ImageTk.PhotoImage(self.img)#ๅ ่ฝฝๅพ็ๆไปถ
self.canvas.create_image(0,0, anchor='nw', image=self.image_file)#ๅฐๅพ็็ฝฎไบ็ปๅธไธย ย
self.canvas.pack(side='top')
self.trainBtn = tk.Button(self.canvas, command = self.TrainBtn, text = "ๅผๅง่ฎญ็ป")
self.getfileBtn = tk.Button(self.canvas, command = self.SelectFile, text = "้ๆฉ้ณ้ขๆไปถ")
self.separateBtn = tk.Button(self.canvas, command = self.SeparateBtn, text = "ๅผๅงๅ็ฆป")
self.leftBtn = tk.Button(self.canvas, command = self.LeftBtn, text = "ๆญๆพ่ๆฏ้ณไน")
self.rightBtn = tk.Button(self.canvas, command = self.RightBtn, text = "ๆญๆพไบบๅฃฐ")
# self.p = tk.StringVar()
# self.p.set("ๆญๆพๆ็คบ๏ผ")
self.trainlabel = tk.Label(self.canvas)
self.text = tk.Text(self.canvas)#ๆพ็คบ้ณ้ขๆไปถ
self.separatelabel = tk.Label(self.canvas, text="่ฏทๅ
็กฎ่ฎคๅ็ฆป้ป่ฎค้ณ้ขๆๅทฒ็ป้ๅฅฝ็๏ผ", foreground="red")
self.playlabel = tk.Label(self.canvas, text="ๆญๆพๆ็คบ๏ผ", foreground="red")
def gui_arrang(self):
self.canvas.create_window(100, 50, width=80, height=30,window=self.trainBtn)
self.canvas.create_window(100, 130, width=80, height=30,window=self.getfileBtn)
self.canvas.create_window(100, 210, width=80, height=30,window=self.separateBtn)
self.canvas.create_window(150, 290, width=80, height=30,window=self.leftBtn)
self.canvas.create_window(350, 290, width=80, height=30,window=self.rightBtn)
self.canvas.create_window(300, 50, width=300, height=30, window=self.trainlabel)
self.canvas.create_window(300, 130, width=300, height=30, window=self.text)
self.canvas.create_window(300, 210, width=300, height=30, window=self.separatelabel)
self.canvas.create_window(250, 300, width=80, height=30, window=self.playlabel)
if os.path.exists(self.savepath + '.meta'):
self.trainlabel.config(text="ๅทฒๆๆจกๅ๏ผๅฏ็ดๆฅๅ็ฆป๏ผ", foreground="red")
else:
self.trainlabel.config(text="่ฏทๅ
่ฎญ็ปๆจกๅ๏ผ", foreground="red")
self.text.tag_config("tag1", foreground="red", offset=-7)
self.text.insert(tk.INSERT, self.testfile, "tag1")
self.text.config(state=tk.DISABLED)
self.root.mainloop()
def TrainBtn(self):
self.trainlabel.config(text="ๆญฃๅจ่ฎญ็ป๏ผ่ฏท็จ็ญ๏ผ", foreground="red")
self.RNN.train(self.savepath)
self.trainlabel.config(text="ๅฎๆ่ฎญ็ป๏ผ", foreground="red")
def SelectFile(self):
self.testfile = tk.filedialog.askopenfilename()
self.text.tag_config("tag1", foreground="red")
if self.testfile != '':
self.text.config(state=tk.NORMAL)
self.text.delete(1.0, tk.END)
self.text.insert(tk.INSERT, self.testfile, "tag1")
self.text.config(state=tk.DISABLED)
else:
self.text.config(state=tk.NORMAL)
self.text.delete(1.0, tk.END)
self.text.insert(tk.INSERT, "ๆจๆช้ๆฉๆไปถ๏ผ", "tag1")
self.text.config(state=tk.DISABLED)
def SeparateBtn(self):
self.separatelabel.config(text="ๆญฃๅจๅ็ฆป๏ผ่ฏท็จ็ญ๏ผ", foreground="red")
self.RNN.test(self.testfile, self.savepath)
self.separatelabel.config(text="ๅฎๆๅ็ฆป๏ผ", foreground="red")
def LeftBtn(self):
self.playlabel.config(text="ๆญฃๅจๆญๆพ๏ผ", foreground="red")
filename = 'left.wav'
self.PlayMusic(filename)
def RightBtn(self):
filename = 'right.wav'
self.PlayMusic(filename)
def PlayMusic(self, filename):
self.playlabel.config(text="ๆญฃๅจๆญๆพ๏ผ", foreground="red")
wf = wave.open(filename,'rb')
pms = wf.getparams()
nchannels1, sampwidth1, framerate1, nframes1 = pms[:4]
p = pyaudio.PyAudio()
stream=p.open(format = p.get_format_from_width(sampwidth1), channels = nchannels1, rate = framerate1, output = True)
data=wf.readframes(nframes1)
stream.write(data)
stream.stop_stream() # ๅๆญขๆฐๆฎๆต
stream.close()
p.terminate() # ๅ
ณ้ญ PyAudio
self.playlabel.config(text="ๆญๆพ็ปๆ๏ผ", foreground="red")
print("ๆญๆพ็ปๆ๏ผ")
def main(_):
savepath = 'AudioRNN.ckpt'
testfile = r'C:\Users\xujiahao\Desktop\MIR-1K_for_MIREX\Wavfile\amy_7_03.wav'
RNN = AudioRNN()
C = Control(RNN, savepath, testfile)
C.gui_arrang()
# RNN.train(savepath)
# RNN.test(testfile, savepath)
if __name__ == '__main__':
main(0)
import tkinter as tk
from PIL import ImageTk, Image
from tkinter import filedialog
import wave
import pyaudio
import pygame
# path = r'C:\Users\xujiahao\Desktop\MIR-1K_for_MIREX\trainwav' #ๆไปถๅคน็ฎๅฝ
# print(len(os.listdir(path)))
class Control:
def __init__(self, RNN, savepath, testfile):
self.RNN = RNN
self.savepath = savepath
self.testfile = testfile
self.root = tk.Tk()
self.root.title("ๆงๅถ้ขๆฟ") # ็ปไธป็ชๅฃ่ฎพ็ฝฎๆ ้ขๅ
ๅฎน
self.root.geometry('500x352')
self.canvas = tk.Canvas(self.root, height=352, width=500)#ๅๅปบ็ปๅธย ย
self.imgpath = 'kehuan.jpg'
self.img = Image.open(self.imgpath)
self.image_file = ImageTk.PhotoImage(self.img)#ๅ ่ฝฝๅพ็ๆไปถ
self.canvas.create_image(0,0, anchor='nw', image=self.image_file)#ๅฐๅพ็็ฝฎไบ็ปๅธไธย ย
self.canvas.pack(side='top')
self.trainBtn = tk.Button(self.canvas, command = self.TrainBtn, text = "ๅผๅง่ฎญ็ป")
self.getfileBtn = tk.Button(self.canvas, command = self.SelectFile, text = "้ๆฉ่ฏญ้ขๆไปถ")
self.testBtn = tk.Button(self.canvas, command = self.TestBtn, text = "ๅผๅงๅ็ฆป")
self.leftBtn = tk.Button(self.canvas, command = self.LeftBtn, text = "ๆญๆพ่ๆฏ้ณไน")
self.rightBtn = tk.Button(self.canvas, command = self.RightBtn, text = "ๆญๆพไบบๅฃฐ")
self.label = tk.Label(self.canvas)
self.text = tk.Text(self.canvas)
def gui_arrang(self):
self.canvas.create_window(100, 50, width=80, height=30,window=self.trainBtn)
self.canvas.create_window(100, 130, width=80, height=30,window=self.getfileBtn)
self.canvas.create_window(100, 210, width=80, height=30,window=self.testBtn)
self.canvas.create_window(150, 290, width=80, height=30,window=self.leftBtn)
self.canvas.create_window(350, 290, width=80, height=30,window=self.rightBtn)
self.canvas.create_window(300, 50, width=300, height=30, window=self.label)
self.canvas.create_window(300, 130, width=300, height=30, window=self.text)
self.text.tag_config("tag1", foreground="red", offset=-7)
self.text.insert(tk.INSERT, self.testfile, "tag1")
self.text.config(state=tk.DISABLED)
self.root.mainloop()
def TrainBtn(self):
print(self.RNN)
def SelectFile(self):
self.testfile = tk.filedialog.askopenfilename()
self.text.tag_config("tag1", foreground="red")
if self.testfile != '':
self.text.config(state=tk.NORMAL)
self.text.delete(1.0, tk.END)
self.text.insert(tk.INSERT, self.testfile, "tag1")
self.text.config(state=tk.DISABLED)
else:
self.text.config(state=tk.NORMAL)
self.text.delete(1.0, tk.END)
self.text.insert(tk.INSERT, "ๆจๆช้ๆฉๆไปถ", "tag1")
self.text.config(state=tk.DISABLED)
def TestBtn(self):
print(self.savepath)
def LeftBtn(self):
fname1 = 'left.wav'
self.PlayMusic(fname1)
print(1)
def RightBtn(self):
fname2 = 'right.wav'
self.PlayMusic(fname2)
print(2)
def PlayMusic(self, filename):
wf = wave.open(filename,'rb')
pms = wf.getparams()
nchannels1, sampwidth1, framerate1, nframes1 = pms[:4]
p = pyaudio.PyAudio()
stream=p.open(format = p.get_format_from_width(sampwidth1), channels = nchannels1, rate = framerate1, output = True)
# while True:
data=wf.readframes(nframes1)
# if data=="":
# break
stream.write(data)
stream.stop_stream() # ๅๆญขๆฐๆฎๆต
stream.close()
p.terminate() # ๅ
ณ้ญ PyAudio
print("ๆญๆพ็ปๆ๏ผ")
# def PlayMusic(self, filename, loops=0, start=0.0, value=0.5):
# flag = True
# pygame.mixer.init() # ้ณไนๆจกๅๅๅงๅ
# track = pygame.mixer.music.load(filename)
# pygame.mixer.music.play()
# pygame.mixer.music.set_volume(value) # ๆฅ่ฎพ็ฝฎๆญๆพ็้ณ้๏ผ้ณ้value็่ๅดไธบ0.0ๅฐ1.0ใ
# pygame.mixer.music.play(loops=loops, start=start) #loopsๅstartๅๅซไปฃ่กจ้ๅค็ๆฌกๆฐๅๅผๅงๆญๆพ็ไฝ็ฝฎใ
# # pygame.mixer.music.stop() # ๅๆญขๆญๆพ
# # music = pygame.mixer.Sound(filename)
# # music.play()
# # music.stop()
C = Control(1,2,"wer")
C.gui_arrang()
```
| github_jupyter |
# Automated Machine Learning
#### Forecasting away from training data
## Contents
1. [Introduction](#Introduction)
2. [Setup](#Setup)
3. [Data](#Data)
4. [Prepare remote compute and data.](#prepare_remote)
4. [Create the configuration and train a forecaster](#train)
5. [Forecasting from the trained model](#forecasting)
6. [Forecasting away from training data](#forecasting_away)
## Introduction
This notebook demonstrates the full interface of the `forecast()` function.
The best known and most frequent usage of `forecast` enables forecasting on test sets that immediately follows training data.
However, in many use cases it is necessary to continue using the model for some time before retraining it. This happens especially in **high frequency forecasting** when forecasts need to be made more frequently than the model can be retrained. Examples are in Internet of Things and predictive cloud resource scaling.
Here we show how to use the `forecast()` function when a time gap exists between training data and prediction period.
Terminology:
* forecast origin: the last period when the target value is known
* forecast periods(s): the period(s) for which the value of the target is desired.
* lookback: how many past periods (before forecast origin) the model function depends on. The larger of number of lags and length of rolling window.
* prediction context: `lookback` periods immediately preceding the forecast origin

## Setup
Please make sure you have followed the `configuration.ipynb` notebook so that your ML workspace information is saved in the config file.
```
import os
import pandas as pd
import numpy as np
import logging
import warnings
import azureml.core
from azureml.core.dataset import Dataset
from pandas.tseries.frequencies import to_offset
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
# Squash warning messages for cleaner output in the notebook
warnings.showwarning = lambda *args, **kwargs: None
np.set_printoptions(precision=4, suppress=True, linewidth=120)
```
This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
```
print("This notebook was created using version 1.38.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
from azureml.core.workspace import Workspace
from azureml.core.experiment import Experiment
from azureml.train.automl import AutoMLConfig
ws = Workspace.from_config()
# choose a name for the run history container in the workspace
experiment_name = "automl-forecast-function-demo"
experiment = Experiment(ws, experiment_name)
output = {}
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["SKU"] = ws.sku
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Run History Name"] = experiment_name
pd.set_option("display.max_colwidth", -1)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
```
## Data
For the demonstration purposes we will generate the data artificially and use them for the forecasting.
```
TIME_COLUMN_NAME = "date"
TIME_SERIES_ID_COLUMN_NAME = "time_series_id"
TARGET_COLUMN_NAME = "y"
def get_timeseries(
train_len: int,
test_len: int,
time_column_name: str,
target_column_name: str,
time_series_id_column_name: str,
time_series_number: int = 1,
freq: str = "H",
):
"""
Return the time series of designed length.
:param train_len: The length of training data (one series).
:type train_len: int
:param test_len: The length of testing data (one series).
:type test_len: int
:param time_column_name: The desired name of a time column.
:type time_column_name: str
:param time_series_number: The number of time series in the data set.
:type time_series_number: int
:param freq: The frequency string representing pandas offset.
see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
:type freq: str
:returns: the tuple of train and test data sets.
:rtype: tuple
"""
data_train = [] # type: List[pd.DataFrame]
data_test = [] # type: List[pd.DataFrame]
data_length = train_len + test_len
for i in range(time_series_number):
X = pd.DataFrame(
{
time_column_name: pd.date_range(
start="2000-01-01", periods=data_length, freq=freq
),
target_column_name: np.arange(data_length).astype(float)
+ np.random.rand(data_length)
+ i * 5,
"ext_predictor": np.asarray(range(42, 42 + data_length)),
time_series_id_column_name: np.repeat("ts{}".format(i), data_length),
}
)
data_train.append(X[:train_len])
data_test.append(X[train_len:])
X_train = pd.concat(data_train)
y_train = X_train.pop(target_column_name).values
X_test = pd.concat(data_test)
y_test = X_test.pop(target_column_name).values
return X_train, y_train, X_test, y_test
n_test_periods = 6
n_train_periods = 30
X_train, y_train, X_test, y_test = get_timeseries(
train_len=n_train_periods,
test_len=n_test_periods,
time_column_name=TIME_COLUMN_NAME,
target_column_name=TARGET_COLUMN_NAME,
time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,
time_series_number=2,
)
```
Let's see what the training data looks like.
```
X_train.tail()
# plot the example time series
import matplotlib.pyplot as plt
whole_data = X_train.copy()
target_label = "y"
whole_data[target_label] = y_train
for g in whole_data.groupby("time_series_id"):
plt.plot(g[1]["date"].values, g[1]["y"].values, label=g[0])
plt.legend()
plt.show()
```
### Prepare remote compute and data. <a id="prepare_remote"></a>
The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation.
```
# We need to save thw artificial data and then upload them to default workspace datastore.
DATA_PATH = "fc_fn_data"
DATA_PATH_X = "{}/data_train.csv".format(DATA_PATH)
if not os.path.isdir("data"):
os.mkdir("data")
pd.DataFrame(whole_data).to_csv("data/data_train.csv", index=False)
# Upload saved data to the default data store.
ds = ws.get_default_datastore()
ds.upload(src_dir="./data", target_path=DATA_PATH, overwrite=True, show_progress=True)
train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X))
```
You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.
> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
amlcompute_cluster_name = "fcfn-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print("Found existing cluster, use it.")
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_DS12_V2", max_nodes=6
)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
```
## Create the configuration and train a forecaster <a id="train"></a>
First generate the configuration, in which we:
* Set metadata columns: target, time column and time-series id column names.
* Validate our data using cross validation with rolling window method.
* Set normalized root mean squared error as a metric to select the best model.
* Set early termination to True, so the iterations through the models will stop when no improvements in accuracy score will be made.
* Set limitations on the length of experiment run to 15 minutes.
* Finally, we set the task to be forecasting.
* We apply the lag lead operator to the target value i.e. we use the previous values as a predictor for the future ones.
* [Optional] Forecast frequency parameter (freq) represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.
```
from azureml.automl.core.forecasting_parameters import ForecastingParameters
lags = [1, 2, 3]
forecast_horizon = n_test_periods
forecasting_parameters = ForecastingParameters(
time_column_name=TIME_COLUMN_NAME,
forecast_horizon=forecast_horizon,
time_series_id_column_names=[TIME_SERIES_ID_COLUMN_NAME],
target_lags=lags,
freq="H", # Set the forecast frequency to be hourly
)
```
Run the model selection and training process. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous.
```
from azureml.core.workspace import Workspace
from azureml.core.experiment import Experiment
from azureml.train.automl import AutoMLConfig
automl_config = AutoMLConfig(
task="forecasting",
debug_log="automl_forecasting_function.log",
primary_metric="normalized_root_mean_squared_error",
experiment_timeout_hours=0.25,
enable_early_stopping=True,
training_data=train_data,
compute_target=compute_target,
n_cross_validations=3,
verbosity=logging.INFO,
max_concurrent_iterations=4,
max_cores_per_iteration=-1,
label_column_name=target_label,
forecasting_parameters=forecasting_parameters,
)
remote_run = experiment.submit(automl_config, show_output=False)
remote_run.wait_for_completion()
# Retrieve the best model to use it further.
_, fitted_model = remote_run.get_output()
```
## Forecasting from the trained model <a id="forecasting"></a>
In this section we will review the `forecast` interface for two main scenarios: forecasting right after the training data, and the more complex interface for forecasting when there is a gap (in the time sense) between training and testing data.
### X_train is directly followed by the X_test
Let's first consider the case when the prediction period immediately follows the training data. This is typical in scenarios where we have the time to retrain the model every time we wish to forecast. Forecasts that are made on daily and slower cadence typically fall into this category. Retraining the model every time benefits the accuracy because the most recent data is often the most informative.

We use `X_test` as a **forecast request** to generate the predictions.
#### Typical path: X_test is known, forecast all upcoming periods
```
# The data set contains hourly data, the training set ends at 01/02/2000 at 05:00
# These are predictions we are asking the model to make (does not contain thet target column y),
# for 6 periods beginning with 2000-01-02 06:00, which immediately follows the training data
X_test
y_pred_no_gap, xy_nogap = fitted_model.forecast(X_test)
# xy_nogap contains the predictions in the _automl_target_col column.
# Those same numbers are output in y_pred_no_gap
xy_nogap
```
#### Confidence intervals
Forecasting model may be used for the prediction of forecasting intervals by running ```forecast_quantiles()```.
This method accepts the same parameters as forecast().
```
quantiles = fitted_model.forecast_quantiles(X_test)
quantiles
```
#### Distribution forecasts
Often the figure of interest is not just the point prediction, but the prediction at some quantile of the distribution.
This arises when the forecast is used to control some kind of inventory, for example of grocery items or virtual machines for a cloud service. In such case, the control point is usually something like "we want the item to be in stock and not run out 99% of the time". This is called a "service level". Here is how you get quantile forecasts.
```
# specify which quantiles you would like
fitted_model.quantiles = [0.01, 0.5, 0.95]
# use forecast_quantiles function, not the forecast() one
y_pred_quantiles = fitted_model.forecast_quantiles(X_test)
# quantile forecasts returned in a Dataframe along with the time and time series id columns
y_pred_quantiles
```
#### Destination-date forecast: "just do something"
In some scenarios, the X_test is not known. The forecast is likely to be weak, because it is missing contemporaneous predictors, which we will need to impute. If you still wish to predict forward under the assumption that the last known values will be carried forward, you can forecast out to "destination date". The destination date still needs to fit within the forecast horizon from training.
```
# We will take the destination date as a last date in the test set.
dest = max(X_test[TIME_COLUMN_NAME])
y_pred_dest, xy_dest = fitted_model.forecast(forecast_destination=dest)
# This form also shows how we imputed the predictors which were not given. (Not so well! Use with caution!)
xy_dest
```
## Forecasting away from training data <a id="forecasting_away"></a>
Suppose we trained a model, some time passed, and now we want to apply the model without re-training. If the model "looks back" -- uses previous values of the target -- then we somehow need to provide those values to the model.

The notion of forecast origin comes into play: the forecast origin is **the last period for which we have seen the target value**. This applies per time-series, so each time-series can have a different forecast origin.
The part of data before the forecast origin is the **prediction context**. To provide the context values the model needs when it looks back, we pass definite values in `y_test` (aligned with corresponding times in `X_test`).
```
# generate the same kind of test data we trained on,
# but now make the train set much longer, so that the test set will be in the future
X_context, y_context, X_away, y_away = get_timeseries(
train_len=42, # train data was 30 steps long
test_len=4,
time_column_name=TIME_COLUMN_NAME,
target_column_name=TARGET_COLUMN_NAME,
time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,
time_series_number=2,
)
# end of the data we trained on
print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())
# start of the data we want to predict on
print(X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())
```
There is a gap of 12 hours between end of training and beginning of `X_away`. (It looks like 13 because all timestamps point to the start of the one hour periods.) Using only `X_away` will fail without adding context data for the model to consume.
```
try:
y_pred_away, xy_away = fitted_model.forecast(X_away)
xy_away
except Exception as e:
print(e)
```
How should we read that eror message? The forecast origin is at the last time the model saw an actual value of `y` (the target). That was at the end of the training data! The model is attempting to forecast from the end of training data. But the requested forecast periods are past the forecast horizon. We need to provide a define `y` value to establish the forecast origin.
We will use this helper function to take the required amount of context from the data preceding the testing data. It's definition is intentionally simplified to keep the idea in the clear.
```
def make_forecasting_query(
fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback
):
"""
This function will take the full dataset, and create the query
to predict all values of the time series from the `forecast_origin`
forward for the next `horizon` horizons. Context from previous
`lookback` periods will be included.
fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y.
time_column_name: string which column (must be in fulldata) is the time axis
target_column_name: string which column (must be in fulldata) is to be forecast
forecast_origin: datetime type the last time we (pretend to) have target values
horizon: timedelta how far forward, in time units (not periods)
lookback: timedelta how far back does the model look
Example:
```
forecast_origin = pd.to_datetime("2012-09-01") + pd.DateOffset(days=5) # forecast 5 days after end of training
print(forecast_origin)
X_query, y_query = make_forecasting_query(data,
forecast_origin = forecast_origin,
horizon = pd.DateOffset(days=7), # 7 days into the future
lookback = pd.DateOffset(days=1), # model has lag 1 period (day)
)
```
"""
X_past = fulldata[
(fulldata[time_column_name] > forecast_origin - lookback)
& (fulldata[time_column_name] <= forecast_origin)
]
X_future = fulldata[
(fulldata[time_column_name] > forecast_origin)
& (fulldata[time_column_name] <= forecast_origin + horizon)
]
y_past = X_past.pop(target_column_name).values.astype(np.float)
y_future = X_future.pop(target_column_name).values.astype(np.float)
# Now take y_future and turn it into question marks
y_query = y_future.copy().astype(
np.float
) # because sometimes life hands you an int
y_query.fill(np.NaN)
print("X_past is " + str(X_past.shape) + " - shaped")
print("X_future is " + str(X_future.shape) + " - shaped")
print("y_past is " + str(y_past.shape) + " - shaped")
print("y_query is " + str(y_query.shape) + " - shaped")
X_pred = pd.concat([X_past, X_future])
y_pred = np.concatenate([y_past, y_query])
return X_pred, y_pred
```
Let's see where the context data ends - it ends, by construction, just before the testing data starts.
```
print(
X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(
["min", "max", "count"]
)
)
print(
X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg(
["min", "max", "count"]
)
)
X_context.tail(5)
# Since the length of the lookback is 3,
# we need to add 3 periods from the context to the request
# so that the model has the data it needs
# Put the X and y back together for a while.
# They like each other and it makes them happy.
X_context[TARGET_COLUMN_NAME] = y_context
X_away[TARGET_COLUMN_NAME] = y_away
fulldata = pd.concat([X_context, X_away])
# forecast origin is the last point of data, which is one 1-hr period before test
forecast_origin = X_away[TIME_COLUMN_NAME].min() - pd.DateOffset(hours=1)
# it is indeed the last point of the context
assert forecast_origin == X_context[TIME_COLUMN_NAME].max()
print("Forecast origin: " + str(forecast_origin))
# the model uses lags and rolling windows to look back in time
n_lookback_periods = max(lags)
lookback = pd.DateOffset(hours=n_lookback_periods)
horizon = pd.DateOffset(hours=forecast_horizon)
# now make the forecast query from context (refer to figure)
X_pred, y_pred = make_forecasting_query(
fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback
)
# show the forecast request aligned
X_show = X_pred.copy()
X_show[TARGET_COLUMN_NAME] = y_pred
X_show
```
Note that the forecast origin is at 17:00 for both time-series, and periods from 18:00 are to be forecast.
```
# Now everything works
y_pred_away, xy_away = fitted_model.forecast(X_pred, y_pred)
# show the forecast aligned
X_show = xy_away.reset_index()
# without the generated features
X_show[["date", "time_series_id", "ext_predictor", "_automl_target_col"]]
# prediction is in _automl_target_col
```
## Forecasting farther than the forecast horizon <a id="recursive forecasting"></a>
When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the `forecast()` function will still make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future.
To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the forecasting horizon given at training time.

Internally, we apply the forecaster in an iterative manner and finish the forecast task in two interations. In the first iteration, we apply the forecaster and get the prediction for the first forecast-horizon periods (y_pred1). In the second iteraction, y_pred1 is used as the context to produce the prediction for the next forecast-horizon periods (y_pred2). The combination of (y_pred1 and y_pred2) gives the results for the total forecast periods.
A caveat: forecast accuracy will likely be worse the farther we predict into the future since errors are compounded with recursive application of the forecaster.


```
# generate the same kind of test data we trained on, but with a single time-series and test period twice as long
# as the forecast_horizon.
_, _, X_test_long, y_test_long = get_timeseries(
train_len=n_train_periods,
test_len=forecast_horizon * 2,
time_column_name=TIME_COLUMN_NAME,
target_column_name=TARGET_COLUMN_NAME,
time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME,
time_series_number=1,
)
print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min())
print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max())
# forecast() function will invoke the recursive forecast method internally.
y_pred_long, X_trans_long = fitted_model.forecast(X_test_long)
y_pred_long
# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following.
y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon])
y_pred_all, _ = fitted_model.forecast(
X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan)))
)
np.array_equal(y_pred_all, y_pred_long)
```
#### Confidence interval and distributional forecasts
AutoML cannot currently estimate forecast errors beyond the forecast horizon set during training, so the `forecast_quantiles()` function will return missing values for quantiles not equal to 0.5 beyond the forecast horizon.
```
fitted_model.forecast_quantiles(X_test_long)
```
Similarly with the simple senarios illustrated above, forecasting farther than the forecast horizon in other senarios like 'multiple time-series', 'Destination-date forecast', and 'forecast away from the training data' are also automatically handled by the `forecast()` function.
| github_jupyter |
```
import os
import numpy as np
import pandas as pd
path = "../data/partial_files/"
elements_list = ["players_info", "match_info", "players_lanes", "player_laning_stats",
"player_flair_stats", "champion_bans", "champion_picks",
"player_combat_stats", "player_objective_stats", "players_champions"]
files_list = os.listdir(path)
```
# Concatenate the dataframes found for each element
```
frames = {}
for element in elements_list:
to_concat = []
for file in files_list:
if element in file:
to_concat.append(pd.read_pickle(path+file))
frames[element] = pd.concat(to_concat, ignore_index=True)
```
# Check the data for duplicates or weird values
```
frames.keys()
frames["match_info"].duplicated().sum()
frames["players_info"].duplicated().sum()
frames["players_champions"].head()
frames["players_lanes"].head()
frames["player_laning_stats"].duplicated().sum()
frames["player_combat_stats"].duplicated().sum()
frames["player_objective_stats"].duplicated().sum()
frames["player_flair_stats"].duplicated().sum()
frames["champion_bans"]["champion"] = frames["champion_bans"]["champion"].replace(-1, "no ban")
frames["champion_picks"].duplicated().sum()
```
# Clean the data from duplicates and check dtypes and store the cleaned data to our data folder
```
final_path = "../data/"
```
**players_info**
```
frames["players_info"].dtypes
frames["players_info"] = frames["players_info"].drop_duplicates()
frames["players_info"].to_csv(final_path+"players_info.csv", index=False)
```
**match_info**
```
frames["match_info"].dtypes
frames["match_info"].to_csv(final_path+"match_info.csv", index=False)
```
**champion_statistics**
```
frames["champion_bans"].to_csv(final_path+"champion_bans.csv", index=False)
frames["champion_picks"].to_csv(final_path+"champion_picks.csv", index=False)
```
**player_statistics**
```
frames["players_lanes"].to_csv(final_path+"player_lanes.csv", index=False)
frames["players_champions"].to_csv(final_path+"players_champions.csv", index=False)
# in game
# drop duplicates
frames["player_laning_stats"] = frames["player_laning_stats"].drop_duplicates()
frames["player_combat_stats"] = frames["player_combat_stats"].drop_duplicates()
frames["player_objective_stats"] = frames["player_objective_stats"].drop_duplicates()
frames["player_flair_stats"] = frames["player_flair_stats"].drop_duplicates()
# store files
frames["player_laning_stats"].to_csv(final_path+"player_laning_stats.csv", index=False)
frames["player_combat_stats"].to_csv(final_path+"player_combat_stats.csv", index=False)
frames["player_objective_stats"].to_csv(final_path+"player_objective_stats.csv", index=False)
frames["player_flair_stats"].to_csv(final_path+"player_flair_stats.csv", index=False)
```
**Merge the data**
```
shared = ["match_id", "account_id", "region", "champion", "lane", "won"]
complete_df = (pd.merge(frames["player_laning_stats"], frames["player_combat_stats"], on=shared, how="left")
.merge(frames["player_objective_stats"], on=shared, how="left")
.merge(frames["player_flair_stats"], on=shared, how="left")
.fillna(0))
complete_df.head()
complete_df.to_pickle("../data/merged_stats.pkl", protocol=4)
```
| github_jupyter |
# Bouts of Sleep from a month-long recording of WT C57BL/6 mice
### First set up the working environment
```
import numpy as np # calculations
import pandas as pd # dataframes and IO
import matplotlib.pyplot as plt # plotting
# show graphs/figures in notebooks
%matplotlib inline
import seaborn as sns # statistical plots and analysis
sns.set(style="ticks") # styling
sns.set_context("talk")
```
### Then import .CSV text file with the 1 month of activity data for 2 mice (with ISO-8601 encoding for the timepoints) - used for Figure4.
#### Then apply all the commands to remove unwanted columns and adjust the time to match environmental/Zeitgeber time
```
ts_pre = pd.read_csv('../PIRdata/1monthPIRsleep.csv', parse_dates=[0],index_col=[0])
ts_pre.pop('PIR2') # remove unwarnted/empty columns
ts_pre.pop('PIR4')
ts_pre.pop('PIR5')
ts_pre.pop('PIR6')
ts_pre.pop('Device')
ts_sH = pd.DataFrame.tshift(ts_pre,-7, freq='H', axis=0) # shift back 7 hours
# then shift this back 1 minute, and cut out one month of activity from slightly longer file
ts = pd.DataFrame.tshift(ts_sH,-1, freq='T', axis=0).truncate(before='2014-08-14 00:00:00.000000',
after='2014-09-14 00:00:00.000000')
# show end of new timeseries (ts)
ts.tail(4)
```
### Next we define some functions that allow us to look at the length of bouts of sleep and when they occur
```
def sleepscan(a,bins):
# run through trace looking for bouts of sleep (defined as 4 or more sequential '0' values)
ss = a.rolling(bins).sum()
y = ss==0
return y.astype(int) # if numerical output is required
def sleep_count(val):
if val == 0:
sleep_count.count = 0
elif val == 1:
sleep_count.count +=1
return sleep_count.count
sleep_count.count = 0 #static variable
def bouts_sleep(data, dataLD, ID='ID'):
length_bins=[]
length_timedelta=[]
bout_start_time=[]
bout_end_time=[]
bout_LD=[]
mouse_ID=[]
for i in range(data.shape[0]):
if (data[i]==1): # mark start of sleep bout
bout_start_time.append(data.index[i])
if dataLD[i]==0:
bout_LD.append('D')
else:
bout_LD.append('L')
#print data.index[i],data[i], 'start'
elif (data[i-1]>=1 and data[i]==0 and i!=0): # mark end of sleep bout
bout_end_time.append(data.index[i-1])
length_bins.append(data[i-1])
#print data.index[i],data[i-1], 'end'
elif (data[i]!=0 and i==data.shape[0]-1): # end of record counts as end of sleep bout if in the middle
bout_end_time.append(data.index[i])
length_bins.append(data[i])
#print data.index[i],data[i],'end'
final_frame=pd.DataFrame(data={'Start_Time': bout_start_time,'End_Time':bout_end_time,
'Length of bout, 10s bins':length_bins,
'Light or Dark':bout_LD,'ID':ID}).set_index('Start_Time')
return final_frame
```
### Add new columns to a copy of the 'ts' dataframe, with 2 steps:
### 1. using the 'sleepscan' function to define sleep
### 2. using the 'sleep_count' function for counting the periods of sleep (number of consecutive 10s bins) in the data produced by step 1
```
sc = ts.assign(sleep1 =sleepscan(ts['PIR1'],4).apply(sleep_count),
sleep2 =sleepscan(ts['PIR3'],4).apply(sleep_count))
sc.head(20)
```
### Now run the analysis of bouts of sleep for the "sleep1" column
### Using the results of 'sleep-count' to find the start and end times of each bout, along with the length of the bout in 10s bins and if the bout started in light (L) or darkness (D)
```
# create a new dataframe, and run the 'bouts_sleep' function
# on the first column containing counts of sleep based on 40s or more of immobility
sbouts_1 =bouts_sleep(sc['sleep1'], sc['LDR'], ID='PIR1')
```
### Save the resulting data and have a look at the first few lines
```
sbouts_1.to_csv('../PIRdata/bouts/wildtype_1212_SleepBouts_1month.csv')
sbouts_1.head()
```
### Add columns for hour in which bout starts of finishes (from UTC timestamps)
```
sbouts_1['start-hour']=sbouts_1.index.hour
sbouts_1['end-hour']=sbouts_1.End_Time.dt.hour
# Have a look at a week
sbouts_1['Length of bout, 10s bins'].truncate(before='2014-08-14 00:00:00.000000',
after='2014-08-21 00:00:00.000000') .plot()
```
### Have a look at the distrubtion of bout lengths
```
sns.kdeplot(sbouts_1['Length of bout, 10s bins']/6, bw='scott',label='sleep bout length, min', shade=True, color='black')
```
## An example of counting the number of bouts of sleep of different lengths,
#### Divinding data into bins of 0-1 min, 1-10min, 10-60min and 60min+
```
binsSleepLength = sbouts_1.groupby(['ID', pd.cut(sbouts_1['Length of bout, 10s bins'],[0,6,60,360,8640])]).count() #.aggregate([np.mean,st.sem])
binsSleepLength.fillna(0, inplace=True)
#binsActLength['Bout Length, 10s bins'].groupby(level=[2,0]) #.mean().to_clipboard()
binsSleepLength['Length of bout, 10s bins'] #.to_clipboard()
```
## or carry out this counting on bouts starting in darkness or in the light
```
sbouts_1D =sbouts_1.loc[sbouts_1['Light or Dark']=='D']
binsSleepLengthD = sbouts_1D.groupby(['ID', pd.cut(sbouts_1D['Length of bout, 10s bins'],[0,6,60,360,8640])]).count()
binsSleepLengthD.fillna(0, inplace=True)
binsSleepLengthD['Length of bout, 10s bins'] #.to_clipboard()
sbouts_1L =sbouts_1.loc[sbouts_1['Light or Dark']=='L']
binsSleepLengthL = sbouts_1L.groupby(['ID', pd.cut(sbouts_1L['Length of bout, 10s bins'],[0,6,60,360,8640])]).count()
binsSleepLengthL.fillna(0, inplace=True)
binsSleepLengthL['Length of bout, 10s bins'] #.to_clipboard()
```
### and plot the length of bouts of sleep (starting in light or darkness) against the time of day (hour) they start
```
plt.scatter(y= sbouts_1D['Length of bout, 10s bins']/6,x=sbouts_1D['start-hour'], color='black')
plt.scatter(y= sbouts_1L['Length of bout, 10s bins']/6,x=sbouts_1L['start-hour'], color='lightgrey')
plt.ylabel('bout length, minutes')
plt.xlabel('Hour of Day, ZT')
plt.xlim(0,24)
plt.ylim(0,200)
plt.show()
```
| github_jupyter |
# Proof of concept of new "composable" ADMM formulation
3/30/21
This notebook is a proof of concept and understanding of the new ADMM formulation, based on grouping quadratic terms and linear constraints in with the global equality constraint.
```
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
from time import time
import seaborn as sns
import cvxpy as cvx
sns.set_style('darkgrid')
import sys
sys.path.append('..')
from osd import Problem
from osd.components import GaussNoise, SmoothSecondDifference, PiecewiseConstant, SparseFirstDiffConvex, LaplaceNoise, Blank
from osd.generators import proj_l2_d2, make_pwc_data
from osd.utilities import progress
```
## Problem data generation
```
T = 1000
X_real = np.zeros((3, T))
X_real[0] = 0.1 * np.random.randn(T)
X_real[1] = proj_l2_d2(np.random.randn(T), theta=5e2) * 2
X_real[2] = make_pwc_data(T, segments=4)
y = np.sum(X_real, axis=0)
fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(14, 7))
ax[0].set_title('Smooth component')
ax[0].plot(X_real[1])
ax[1].set_title('PWC component')
ax[1].plot(X_real[2])
ax[2].set_title('Observed signal')
ax[2].plot(y, linewidth=1, marker='.')
# ax[2].plot(signal1 + signal2, label='true signal minus noise', ls='--')
plt.tight_layout()
plt.show()
```
## Example 1: Quadratically smooth, plus Gaussian noise
```
y = np.sum(X_real[:2], axis=0)
fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(14, 7))
ax[0].set_title('Noise component')
ax[0].plot(X_real[0])
ax[1].set_title('Smooth component')
ax[1].plot(X_real[1])
ax[2].set_title('Observed signal')
ax[2].plot(y, linewidth=1, marker='.')
# ax[2].plot(signal1 + signal2, label='true signal minus noise', ls='--')
plt.tight_layout()
plt.show()
```
### Solve with CVXPY + MOSEK
```
c1 = GaussNoise()
c2 = SmoothSecondDifference(theta=1e2)
components = [c1, c2]
problem = Problem(y, components)
problem.weights.value = [c.theta for c in problem.components]
problem.decompose(admm=False, solver='MOSEK')
K = len(components)
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true')
ax[k].plot(est, label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
ax[k].legend()
plt.tight_layout()
```
### Solve with new ADMM formulation
This problem is comprised of quadratic terms and linear constraints, so everything goes in the z-update.
We have
$$ g(z,\tilde{z};\theta) = \left\lVert z_1 \right\rVert_2^2 + \theta\left\lVert \tilde{z}_2\right\rVert_2^2, $$
with
$$ \mathbf{dom}\,g = \left\{z,\tilde{z}\mid\sum_{k} z_k = y, D^2 z_2 = \tilde{z}_2\right\}.$$
Let $\hat{z} = \left[ \begin{matrix} z_1^T & z_2^T & \tilde{z}_2^T\end{matrix}\right]^T$. Then $g$ can be rewritten as
$$ g(\hat{z};P) = \hat{z}^T P \hat{z},$$
where $P\in\mathbf{R}^{(3T-2)\times(3T-2)}$ is a diagonal matrix with the first T entries equal to 1, the second T entris equal to 0, and the final T-1 entries equal to $\theta$.
```
# Initialization
X = np.zeros((K, T))
X[0] = y
X_tilde = np.zeros(T-2)
Z = np.copy(X)
Z_tilde = np.copy(X_tilde)
U = np.zeros_like(X)
U_tile = np.zeros_like(X_tilde)
d = np.zeros(3 * T - 2)
d[:T] = 1
d[2*T:] = 1e2
P = np.diag(d)
I = np.eye(T)
D = np.diff(I, axis=0, n=2)
F_1 = np.block([np.eye(T), np.eye(T), np.zeros((T, T-2))])
F_2 = np.block([np.zeros((T-2, T)), D, -1 * np.eye(T-2)])
F = np.block([[F_1], [F_2]])
A = np.block([
[P + np.eye(3*T - 2), F.T],
[F, np.zeros((F.shape[0], F.shape[0]))]
])
A.shape
v = np.random.randn(3 * T - 2)
g = np.block([y, np.zeros(T - 2)])
vp = np.block([v, g])
out_test = np.linalg.solve(A, vp)
x = cvx.Variable(3 * T - 2)
objective = cvx.Minimize(
cvx.sum_squares(x[:T]) + 1e2 * cvx.sum_squares(x[2*T:]) + 0.5 * cvx.sum_squares(x - v)
)
constraints = [
x[:T] + x[T:2*T] == y,
cvx.diff(x[T:2*T], k=2) == x[2*T:]
]
problem = cvx.Problem(objective, constraints)
problem.solve()
(cvx.sum_squares(out_test[:T]) + 1e2 * cvx.sum_squares(out_test[2*T:3*T-2]) + 0.5 * cvx.sum_squares(out_test[:3*T-2] - v)).value
plt.plot(x.value)
plt.plot(out_test[:3*T-1])
np.alltrue([
np.alltrue(np.isclose(out_test[:T] + out_test[T:2*T], y)),
np.alltrue(np.isclose(x.value[:T] + x.value[T:2*T], y))
])
np.alltrue([
np.alltrue(np.isclose(np.diff(out_test[T:2*T], n=2), out_test[2*T:3*T-2])),
np.alltrue(np.isclose(np.diff(x.value[T:2*T], n=2), x.value[2*T:]))
])
from scipy import sparse as sp
A_s = sp.csc_matrix(A)
A_s
%timeit sp.csc_matrix(A)
%timeit out_test = np.linalg.solve(A, vp)
%timeit A_factored = sp.linalg.splu(A_s)
A_factored = sp.linalg.splu(A_s)
%timeit sparse_test = A_factored.solve(vp)
A_factored = sp.linalg.splu(A_s)
sparse_test = A_factored.solve(vp)
plt.plot(x.value)
plt.plot(out_test[:3*T-1])
plt.plot(sparse_test[:3*T-1])
def make_consensus_prob(y, X, U):
Z_var = cvx.Variable(X.shape)
X_param = cvx.Parameter(X.shape, value=X, name='X')
U_param = cvx.Parameter(X.shape, value=U, name='U')
y_param = cvx.Parameter(len(y), value=y, name='y')
objective = cvx.Minimize(cvx.norm(Z_var - U_param - X_param, 'fro'))
constraints = [
cvx.sum(Z_var[:-1, :], axis=0) == y_param,
cvx.diff(Z_var[2, :], k=1) == Z_var[3, :-1]
]
problem = cvx.Problem(objective, constraints)
return problem
def calc_obj(y, X, components, use_ix):
"""
Calculate the current objective value of the problem
:param y: numpy array containing problem data
:param X: current estimate of decomposed signal components from ADMM
:param use_ix: the known index set (Boolean array)
:return: the scalar problem objective value
"""
K = len(components)
X_tilde = make_estimate(y, X, use_ix)
obj_val = 0
for k in range(K):
try:
cost = components[k].cost(X_tilde[k]).value.item()
except AttributeError:
cost = components[k].cost(X_tilde[k])
theta = components[k].theta
obj_val += theta * cost
return obj_val
def make_estimate(y, X, use_ix):
"""
After any given iteration of the ADMM algorithm, generate an estimate that
is feasible with respect to the global equality constraint by making x0
equal to the residual between the input data y and the rest of the
component estimates
:param y: numpy array containing problem data
:param X: current estimate of decomposed signal components from ADMM
:param use_ix: the known index set (Boolean array)
:return: the estimate with the first component replaced by the residuals
"""
X_tilde = np.copy(X)
X_tilde[0, use_ix] = y - np.sum(X[1:, use_ix], axis=0)
X_tilde[0, ~use_ix] = 0
return X_tilde
def run_admm(data, components, num_iter=50, rho=1., use_ix=None, verbose=True,
randomize_start=False, X_init=None):
"""
Serial implementation of SD ADMM algorithm.
:param data: numpy array containing problem data
:param components: list of osd.components class objects
:param num_iter: (int) the number of ADMM iterations to perform
:param rho: (float) the ADMM learning rate
:param use_ix: (None or Boolean array) the set of known index values
:param verbose: (Boolean) print progress to screen
:param randomize_start: (Boolean) Randomize initialization of components
:return:
"""
y = data
T = len(data)
K = len(components)
if use_ix is None:
use_ix = np.ones_like(data, dtype=bool)
if X_init is None:
X = np.zeros((K, T))
if not randomize_start:
X[0, use_ix] = y[use_ix]
else:
X[1:, :] = np.random.randn(K-1, T)
X[0, use_ix] = y[use_ix] - np.sum(X[1:, use_ix], axis=0)
elif X_init.shape == (K, T):
X = np.copy(X_init)
else:
m1 = 'A initial value was given for X that does not match the problem shape.'
print(m1)
return
Z = np.copy(X)
U = np.zeros_like(X)
residuals = []
obj_vals = []
ti = time()
consensus_problem = make_consensus_prob(y, X, U)
parameters = {p.name(): p for p in prob.parameters()}
best = {
'X': None,
'u': None,
'it': None,
'obj_val': np.inf
}
try:
for it in range(num_iter):
if verbose:
td = time() - ti
progress(it, num_iter, '{:.2f} sec'.format(td))
# Apply proximal operators for each signal class
for k in range(K):
prox = components[k].prox_op
theta = components[k].theta
X[k, :] = prox(Z[k, :] - U[k, :], theta, rho)
# Consensus step
parameters['X'].value = X
parameters['U'].value = U
consensus_problem.solve(solver='MOSEK')
Z = consensus_problem.variables()[0].value
# U-update
U += X - Z
# mean-square-error
error = np.sum(X[:-1, use_ix], axis=0) - y[use_ix]
mse = np.sum(np.power(error, 2)) / error.size
residuals.append(mse)
obj_val = calc_obj(y, X, components, use_ix)
obj_vals.append(obj_val)
if obj_val < best['obj_val']:
X_tilde = make_estimate(y, X, use_ix)
best = {
'X': X_tilde,
'u': U,
'it': it,
'obj_val': obj_val
}
except error as e:
print("something failed :(")
print(e)
if verbose:
td = time() - ti
progress(it + 1, num_iter, '{:.2f} sec\n'.format(td))
outdict = {
'X': best['X'],
'u': best['u'],
'it': best['it'],
'residuals': residuals,
'obj_vals': obj_vals,
'best_obj': best['obj_val']
}
return outdict
c1 = GaussNoise()
c2 = SmoothSecondDifference(theta=1e2)
c3 = Blank()
c4 = LaplaceNoise()
results = run_admm(y, [c1, c2, c3, c4], rho=5, num_iter=500)
plt.plot(results['obj_vals'])
plt.axvline(results['it'], color='red', ls='--')
plt.yscale('log')
K = 3
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
if k > 0:
true = X_real[k]
est = results['X'][k]
ax[k].plot(true, label='true')
ax[k].plot(est, label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
else:
ax[k].plot(np.sum(X_real, axis=0), label='observed', linewidth=1, alpha=0.3, marker='.', color='green')
ax[k].plot(np.sum(X_real[1:], axis=0), label='true')
ax[k].plot(np.sum(results['X'][1:-1], axis=0), label='estimated')
ax[k].set_title('Composed Signal')
ax[k].legend()
plt.tight_layout()
plt.plot(results['X'][2])
```
## Cardinality-constrained formulation
### Solve with ADMM
```
c1 = GaussNoise()
c2 = SmoothSecondDifference(theta=1e2)
c3 = PiecewiseConstant(num_segments=4)
components = [c1, c2, c3]
problem = Problem(y, components)
problem.decompose(admm=True, rho=1, num_iter=100)
plt.plot(problem.admm_result['obj_vals'])
plt.axvline(problem.admm_result['it'], color='red', ls='--')
plt.yscale('log')
K = len(components)
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true')
ax[k].plot(est - np.average(est) + np.average(true), label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
else:
ax[k].plot(np.sum(X_real, axis=0), label='observed', linewidth=1, alpha=0.3, marker='.', color='green')
ax[k].plot(np.sum(X_real[1:], axis=0), label='true')
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated')
ax[k].set_title('Composed Signal')
ax[k].legend()
plt.tight_layout()
```
### Solve with CVXPY
```
problem.decompose(admm=False)
print([c.is_convex for c in problem.components])
```
## $\ell_1$-penalized formulation
### Solve with ADMM
```
c1 = GaussNoise()
c2 = SmoothSecondDifference(theta=1e2)
c3 = SparseFirstDiffConvex(theta=1e0)
components = [c1, c2, c3]
problem = Problem(y, components)
problem.decompose(admm=True, rho=1, num_iter=100)
plt.plot(problem.admm_result['obj_vals'])
plt.axvline(problem.admm_result['it'], color='red', ls='--')
plt.yscale('log')
K = len(components)
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true')
ax[k].plot(est - np.average(est) + np.average(true), label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
else:
ax[k].plot(np.sum(X_real, axis=0), label='observed', linewidth=1, alpha=0.3, marker='.', color='green')
ax[k].plot(np.sum(X_real[1:], axis=0), label='true')
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated')
ax[k].set_title('Composed Signal')
ax[k].legend()
plt.tight_layout()
```
### Solve with CVXPY
```
problem.weights.value = [c.theta for c in problem.components]
problem.decompose(admm=False, solver='MOSEK')
K = len(components)
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true')
ax[k].plot(est - np.average(est) + np.average(true), label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
else:
ax[k].plot(np.sum(X_real, axis=0), label='observed', linewidth=1, alpha=0.3, marker='.', color='green')
ax[k].plot(np.sum(X_real[1:], axis=0), label='true')
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated')
ax[k].set_title('Composed Signal')
ax[k].legend()
plt.tight_layout()
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_parent" href="https://github.com/giswqs/geemap/tree/master/tutorials/FeatureCollection/us_census_data.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_parent" href="https://nbviewer.jupyter.org/github/giswqs/geemap/blob/master/tutorials/FeatureCollection/us_census_data.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_parent" href="https://colab.research.google.com/github/giswqs/geemap/blob/master/tutorials/FeatureCollection/us_census_data.ipynb"><img width=26px src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
# U.S. Census Data
The United States Census Bureau Topologically Integrated Geographic Encoding and Referencing (TIGER) dataset contains the 2018 boundaries for the primary governmental divisions of the United States. In addition to the fifty states, the Census Bureau treats the District of Columbia, Puerto Rico, and each of the island areas (American Samoa, the Commonwealth of the Northern Mariana Islands, Guam, and the U.S. Virgin Islands) as the statistical equivalents of States for the purpose of data presentation. Each feature represents a state or state equivalent.
For full technical details on all TIGER 2018 products, see the [TIGER technical documentation](https://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2018/TGRSHP2018_TechDoc.pdf).
* [TIGER: US Census States](https://developers.google.com/earth-engine/datasets/catalog/TIGER_2018_States): `ee.FeatureCollection("TIGER/2018/States")`
* [TIGER: US Census Counties](https://developers.google.com/earth-engine/datasets/catalog/TIGER_2018_Counties): `ee.FeatureCollection("TIGER/2018/Counties")`
* [TIGER: US Census Tracts](https://developers.google.com/earth-engine/datasets/catalog/TIGER_2010_Tracts_DP1): `ee.FeatureCollection("TIGER/2010/Tracts_DP1")`
* [TIGER: US Census Blocks](https://developers.google.com/earth-engine/datasets/catalog/TIGER_2010_Blocks): `ee.FeatureCollection("TIGER/2010/Blocks")`
* [TIGER: US Census Roads](https://developers.google.com/earth-engine/datasets/catalog/TIGER_2016_Roads): `ee.FeatureCollection("TIGER/2016/Roads")`
* [TIGER: US Census 5-digit ZIP Code](https://developers.google.com/earth-engine/datasets/catalog/TIGER_2010_ZCTA5): `ee.FeatureCollection("TIGER/2010/ZCTA5")`
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.foliumap as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## TIGER: US Census States
https://developers.google.com/earth-engine/datasets/catalog/TIGER_2018_States

### Displaying data
```
Map = emap.Map(center=[40, -100], zoom=4)
states = ee.FeatureCollection('TIGER/2018/States')
Map.centerObject(states, 4)
Map.addLayer(states, {}, 'US States')
Map.addLayerControl() #This line is not needed for ipyleaflet-based Map
Map
```
### Displaying vector as raster
```
Map = emap.Map(center=[40, -100], zoom=4)
states = ee.FeatureCollection('TIGER/2018/States')
image = ee.Image().paint(states, 0, 2)
Map.centerObject(states, 4)
Map.addLayer(image, {}, 'US States')
Map.addLayerControl()
Map
```
### Select by attribute
#### Select one single state
```
Map = emap.Map(center=[40, -100], zoom=4)
tn = ee.FeatureCollection('TIGER/2018/States') \
.filter(ee.Filter.eq("NAME", 'Tennessee'))
Map.centerObject(tn, 6)
Map.addLayer(tn, {}, 'Tennessee')
Map.addLayerControl()
Map
tn = ee.FeatureCollection('TIGER/2018/States') \
.filter(ee.Filter.eq("NAME", 'Tennessee')) \
.first()
props = tn.toDictionary().getInfo()
print(props)
```
#### Select multiple states
```
Map = emap.Map(center=[40, -100], zoom=4)
selected = ee.FeatureCollection('TIGER/2018/States') \
.filter(ee.Filter.inList("NAME", ['Tennessee', 'Alabama', 'Georgia']))
Map.centerObject(selected, 6)
Map.addLayer(selected, {}, 'Selected states')
Map.addLayerControl()
Map
```
#### Printing all values of a column
```
states = ee.FeatureCollection('TIGER/2018/States').sort('ALAND', False)
names = states.aggregate_array("STUSPS").getInfo()
print(names)
areas = states.aggregate_array("ALAND").getInfo()
print(areas)
import matplotlib.pyplot as plt
%matplotlib notebook
plt.bar(names, areas)
plt.show()
```
#### Discriptive statistics of a column
For example, we can calculate the total land area of all states:
```
states = ee.FeatureCollection('TIGER/2018/States')
area_m2 = states.aggregate_sum("ALAND").getInfo()
area_km2 = area_m2 / 1000000
print("Total land area: ", area_km2, " km2")
states = ee.FeatureCollection('TIGER/2018/States')
stats = states.aggregate_stats("ALAND").getInfo()
print(stats)
```
#### Add a new column to the attribute table
```
states = ee.FeatureCollection('TIGER/2018/States').sort('ALAND', False)
states = states.map(lambda x: x.set('AreaKm2', x.area().divide(1000000).toLong()))
first = states.first().toDictionary().getInfo()
print(first)
```
#### Set symbology based on column values
```
Map = emap.Map(center=[40, -100], zoom=4)
states = ee.FeatureCollection('TIGER/2018/States')
visParams = {
'palette': ['purple', 'blue', 'green', 'yellow', 'orange', 'red'],
'min': 500000000.0,
'max': 5e+11,
'opacity': 0.8,
}
image = ee.Image().float().paint(states, 'ALAND')
Map.addLayer(image, visParams, 'TIGER/2018/States')
Map.addLayerControl()
Map
```
#### Download attribute table as a CSV
```
states = ee.FeatureCollection('TIGER/2018/States')
url = states.getDownloadURL(filetype="csv", selectors=['NAME', 'ALAND', 'REGION', 'STATEFP', 'STUSPS'], filename="states")
print(url)
```
#### Formatting the output
```
first = states.first()
props = first.propertyNames().getInfo()
print(props)
props = states.first().toDictionary(props).getInfo()
print(props)
for key, value in props.items():
print("{}: {}".format(key, value))
```
#### Download data as shapefile to Google Drive
```
# function for converting GeometryCollection to Polygon/MultiPolygon
def filter_polygons(ftr):
geometries = ftr.geometry().geometries()
geometries = geometries.map(lambda geo: ee.Feature( ee.Geometry(geo)).set('geoType', ee.Geometry(geo).type()))
polygons = ee.FeatureCollection(geometries).filter(ee.Filter.eq('geoType', 'Polygon')).geometry()
return ee.Feature(polygons).copyProperties(ftr)
states = ee.FeatureCollection('TIGER/2018/States')
new_states = states.map(filter_polygons)
col_names = states.first().propertyNames().getInfo()
print("Column names: ", col_names)
url = new_states.getDownloadURL("shp", col_names, 'states');
print(url)
desc = 'states'
# Set configuration parameters for output vector
task_config = {
'folder': 'gee-data', # output Google Drive folder
'fileFormat': 'SHP',
'selectors': col_names # a list of properties/attributes to be exported
}
print('Exporting {}'.format(desc))
task = ee.batch.Export.table.toDrive(new_states, desc, **task_config)
task.start()
```
## TIGER: US Census Blocks
https://developers.google.com/earth-engine/datasets/catalog/TIGER_2010_Blocks

```
Map = emap.Map(center=[40, -100], zoom=4)
dataset = ee.FeatureCollection('TIGER/2010/Blocks') \
.filter(ee.Filter.eq('statefp10', '47'))
pop = dataset.aggregate_sum('pop10')
print("The number of census blocks: ", dataset.size().getInfo())
print("Total population: ", pop.getInfo())
Map.setCenter(-86.79, 35.87, 6)
Map.addLayer(dataset, {}, "Census Block", False)
visParams = {
'min': 0.0,
'max': 700.0,
'palette': ['black', 'brown', 'yellow', 'orange', 'red']
}
image = ee.Image().float().paint(dataset, 'pop10')
Map.setCenter(-73.99172, 40.74101, 13)
Map.addLayer(image, visParams, 'TIGER/2010/Blocks')
Map.addLayerControl()
Map
```
## TIGER: US Census Counties 2018
https://developers.google.com/earth-engine/datasets/catalog/TIGER_2018_Counties

```
Map = emap.Map(center=[40, -100], zoom=4)
Map.setCenter(-110, 40, 5)
states = ee.FeatureCollection('TIGER/2018/States')
# .filter(ee.Filter.eq('STUSPS', 'TN'))
# // Turn the strings into numbers
states = states.map(lambda f: f.set('STATEFP', ee.Number.parse(f.get('STATEFP'))))
state_image = ee.Image().float().paint(states, 'STATEFP')
visParams = {
'palette': ['purple', 'blue', 'green', 'yellow', 'orange', 'red'],
'min': 0,
'max': 50,
'opacity': 0.8,
};
counties = ee.FeatureCollection('TIGER/2016/Counties')
# print(counties.first().propertyNames().getInfo())
image = ee.Image().paint(states, 0, 2)
# Map.setCenter(-99.844, 37.649, 4)
# Map.addLayer(image, {'palette': 'FF0000'}, 'TIGER/2018/States')
Map.addLayer(state_image, visParams, 'TIGER/2016/States');
Map.addLayer(ee.Image().paint(counties, 0, 1), {}, 'TIGER/2016/Counties')
Map.addLayerControl()
Map
```
## TIGER: US Census Tracts
https://developers.google.com/earth-engine/datasets/catalog/TIGER_2010_Tracts_DP1
http://magic.lib.uconn.edu/magic_2/vector/37800/demogprofilehousect_37800_0000_2010_s100_census_1_t.htm

```
Map = emap.Map(center=[40, -100], zoom=4)
dataset = ee.FeatureCollection('TIGER/2010/Tracts_DP1')
visParams = {
'min': 0,
'max': 4000,
'opacity': 0.8,
'palette': ['#ece7f2', '#d0d1e6', '#a6bddb', '#74a9cf', '#3690c0', '#0570b0', '#045a8d', '#023858']
}
# print(dataset.first().propertyNames().getInfo())
# Turn the strings into numbers
dataset = dataset.map(lambda f: f.set('shape_area', ee.Number.parse(f.get('dp0010001'))))
# Map.setCenter(-103.882, 43.036, 8)
image = ee.Image().float().paint(dataset, 'dp0010001')
Map.addLayer(image, visParams, 'TIGER/2010/Tracts_DP1')
Map.addLayerControl()
Map
```
## TIGER: US Census Roads
https://developers.google.com/earth-engine/datasets/catalog/TIGER_2016_Roads

```
Map = emap.Map(center=[40, -100], zoom=4)
fc = ee.FeatureCollection('TIGER/2016/Roads')
Map.setCenter(-73.9596, 40.7688, 12)
Map.addLayer(fc, {}, 'Census roads')
Map.addLayerControl()
Map
```
| github_jupyter |
# Pytorch Tutorial
### 4. Saving and Loading Models and Their States
- Saving and loading model parameters
- Using ```torchvision.models```
Setup torch and torchvision.
```
import torch, torchvision
import torch.nn as nn
import torch.optim as optim
```
## Using ```torchvision.models```
Models provided by ```torchvision.models``` are:
- AlexNet
- VGG
- ResNet
- SqueezeNet
- DenseNet
- Inception v3
Refer to the [torchvision documentation](https://pytorch.org/docs/stable/torchvision/models.html) for details.
Here we load the AlexNet model structure.
```
alex = torchvision.models.alexnet()
alex
```
Model parameters are initialized randomly.
```
alex.state_dict()
```
We can also load pretrained models with the optional argument ```pretrained=True```.
```
alex_pretrained = torchvision.models.alexnet(pretrained=True)
alex_pretrained.state_dict()
```
Finetuning pretrained networks can be done by modifying the ```requires_grad``` parameter.
Here we switch on the gradients only for the last fully-connected layer.
```
for param in alex_pretrained.parameters():
param.requires_grad = False
# Parameters of newly created modules have requires_grad=True by default
alex_pretrained.classifier[6] = nn.Linear(in_features=4096, out_features=1000, bias=True)
for name, param in alex_pretrained.named_parameters():
print(f'{name: <20}: {param.requires_grad}')
```
## Saving and Loading Parameters
Saving the current parameters is implemented internally using pickle.
There are two ways:
1. **Saving the entire model**
Here we save the model instance itself, including model structure and parameter values. Suitable for later inference after training is done. May be unstable or even break on future releases or in other projects.
2. **Saving only the parameter values** (recommended)
Suitable for resuming training later. You may also save the optimizer state. Also ```state_dict``` is easier to manipulate and load into other models.
```
model = torchvision.models.alexnet(pretrained=True)
optimizer = optim.Adam(model.parameters(), lr=5e-3)
# Method 1: Saving the entire model
PATH = 'alexnet_pretrained_1'
torch.save(model, PATH)
replica = torch.load(PATH)
print(type(replica))
# Method 2: Saving only the parameters
PATH = 'alexnet_pretrained_2'
state = {
'epoch': 30,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()
}
torch.save(state, PATH)
```
Load back states to a fresh instance of ```AlexNet``` and ```optim.Adam```.
```
model = torchvision.models.alexnet()
optimizer = optim.Adam(model.parameters())
old_state = torch.load(PATH)
model.load_state_dict(old_state['state_dict'])
optimizer.load_state_dict(old_state['optimizer'])
optimizer.state_dict()['param_groups']
```
| github_jupyter |
# Get started with the Estimator primitive
Learn how to set up and use the Estimator primitive program.
## Overview
The Estimator primitive lets you efficiently calculate and interpret expectation values of quantum operators required for many algorithms. You can specify a list of circuits and observables, then evaluate expectation values and variances for a given parameter input.
## Prepare the environment
1. Follow the steps in the [getting started guide](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/getting_started.html) to get your quantum service instance ready to use.
2. You'll need at least one circuit to submit to the program. Our examples all have circuits in them, but if you want to submit your own circuit, you can use Qiskit to create one. To learn how to create circuits by using Qiskit, see the [Circuit basics tutorial](https://qiskit.org/documentation/tutorials/circuits/01_circuit_basics.html).
3. Create a list of observables. Observables let you define the properties of the circuit that are relevant to your problem and enable you to efficiently measure their expectation value. For simplicity, you can use the [PauliSumOp class](https://qiskit.org/documentation/stubs/qiskit.opflow.primitive_ops.html#module-qiskit.opflow.primitive_ops) in Qiskit to define them, as illustrated in the example below.
## Start a session
With Qiskit Runtime primitives, we introduce the concept of a session or a factory that allows you to define a job as a collection of iterative calls to the quantum computer. When you start a session, it caches the data you send so it doesn't have to be transmitted to the Quantum Datacenter on each iteration.
### Specify program inputs
The Estimator takes in the following arguments:
- **circuits**: a list of (parameterized) circuits that you want to investigate.
- **observables**: a list of observables to measure the expectation values.
- **parameters**: a list of parameters for the parameterized circuits. It should be omitted if the circuits provided are not parameterized.
- **skip_transpilation**: circuit transpilation is skipped if set to `True`. Default value is `False`.
- **service**: the `QiskitRuntimeService` instance to run the program on. If not specified, the default saved account for `QiskitRuntimeService` is initialized.
- **options**: Runtime options dictionary that control the execution environment.
- **backend**: The backend to run on. This option is required if you are running on [IBM Quantum](https://quantum-computing.ibm.com/). However, if you are running on [IBM Cloud](https://cloud.ibm.com/quantum), you can choose not to specify the backend, in which case the least busy backend is used.
You can find more details in [the Estimator API reference](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.Estimator.html).
Example:
```
from qiskit_ibm_runtime import QiskitRuntimeService, Estimator
from qiskit.circuit.library import RealAmplitudes
from qiskit.quantum_info import SparsePauliOp
service = QiskitRuntimeService()
psi1 = RealAmplitudes(num_qubits=2, reps=2)
psi2 = RealAmplitudes(num_qubits=2, reps=3)
H1 = SparsePauliOp.from_list([("II", 1), ("IZ", 2), ("XI", 3)])
H2 = SparsePauliOp.from_list([("IZ", 1)])
H3 = SparsePauliOp.from_list([("ZI", 1), ("ZZ", 1)])
```
## Write to & read from a session
Running a job and returning the results are done by writing to and reading from the session. After the results are returned, the session is automatically closed.
### Run the job & print results
Run the job, specifying your previously defined inputs and options. Use `circuit_indices`, `observable_indices`, and `parameter_values` to use a specific parameter and observable with the specified circuit.
For example, this line `psi1_H23_result = estimator(circuit_indices=[0, 0], observable_indices=[1, 2], parameter_values=[theta1]*2)` specifies the following:
- Return the value for using observable `H2` and parameter `theta1` with the circuit `psi1`.
- Return the value for using observable `H3` and parameter `theta1` with the circuit `psi1`.
```
with Estimator(
circuits=[psi1, psi2],
observables=[H1, H2, H3],
service=service,
options={ "backend": "ibmq_qasm_simulator" }
) as estimator:
theta1 = [0, 1, 1, 2, 3, 5]
theta2 = [0, 1, 1, 2, 3, 5, 8, 13]
theta3 = [1, 2, 3, 4, 5, 6]
# calculate [ <psi1(theta1)|H1|psi1(theta1)> ]
psi1_H1_result = estimator(circuit_indices=[0], observable_indices=[0], parameter_values=[theta1])
print(psi1_H1_result)
# calculate [ <psi1(theta1)|H2|psi1(theta1)>, <psi1(theta1)|H3|psi1(theta1)> ]
psi1_H23_result = estimator(circuit_indices=[0, 0], observable_indices=[1, 2], parameter_values=[theta1]*2)
print(psi1_H23_result)
# calculate [ <psi2(theta2)|H2|psi2(theta2)> ]
# Note that you don't need to specify the labels "circuit_indices", "observable_indices", or "parameter_values", as long as they are specified in that order.
psi2_H2_result = estimator([1], [1], [theta2])
print(psi2_H2_result)
# calculate [ <psi1(theta1)|H1|psi1(theta1)>, <psi1(theta3)|H1|psi1(theta3)> ]
psi1_H1_result2 = estimator([0, 0], [0, 0], [theta1, theta3])
print(psi1_H1_result2)
# calculate [ <psi1(theta1)|H1|psi1(theta1)>,
# <psi2(theta2)|H2|psi2(theta2)>,
# <psi1(theta3)|H3|psi1(theta3)> ]
psi12_H23_result = estimator([0, 1, 0], [0, 1, 2], [theta1, theta2, theta3])
print(psi12_H23_result)
```
The results align with the parameter - circuit - observable tuples specified previously. For example, the first result: `EstimatorResult(values=array([1.55273438]), metadata=[{'variance': 8.897655487060547, 'shots': 1024}])` is the output of the parameter labeled `theta1` and observable `H1` being sent to the first circuit.
| github_jupyter |
# Titanic Survival Prediction
1. [Import Libraries](#heading1)<br>
2. [Read Data](#heading2)<br>
3. [Data Cleaning & Feature Engineering](#heading3)<br>
4. [Exploratory Data Analysis](#heading4)<br>
5. [Model Building & Evaluation](#heading5)<br>
5.1 [Logistic Regression](#subheading1)<br>
5.2 [Gaussian Naive Bayes](#subheading2)<br>
5.3 [Linear Discriminant Analysis (LDA)](#subheading3)<br>
5.4 [k Nearest Neighbors (kNN)](#subheading4)<br>
5.5 [Support Vector Machine (SVM)](#subheading5)<br>
5.6 [Decision Tree](#subheading6)<br>
5.7 [Random Forest](#subheading7)<br>
5.8 [XGBoost](#subheading8)<br>
5.9 [Model Stacking](#subheading9)<br>
5.10 [Result Comparison](#subheading10)<br>
6. [Conclusion](#heading6)<br>
## 1. Import Libraries <a id="heading1"></a>
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, StackingClassifier
import xgboost as xgb
# Set seed value for reproducing the same results
seed = 101
```
## 2. Read Data <a id="heading2"></a>
```
train_data = pd.read_csv("train.csv")
test_data = pd.read_csv("test.csv")
# Train data preview
train_data.head()
# Test data preview
test_data.head()
```
We can see that the 'Survived' column is missing in the test set. We have to predict that label for each passenger in the test data.
```
# Summary of train data
train_data.info()
# Summary of test data
test_data.info()
```
## 3. Data Cleaning & Feature Engineering <a id="heading3"></a>
```
# Train data descriptive statistics
train_data.describe()
# Test data descriptive statistics
test_data.describe()
```
For both train and test datasets, the statistics for 'Fare' column seem a bit strange. The minimum fare is 0 and the maximum is around 512, with 75% of values less than 31.5 and the mean being 35.6. We need to analyze this further to see if there are any outliers.
For this purpose, we can make use of a boxplot. It will help us understand the variation in the 'Fare' values by visually displaying the distribution of the data points.
```
plt.subplots(figsize=(7, 5))
plt.boxplot(train_data['Fare'])
plt.title('Boxplot of Fare')
plt.show()
```
It seems like there are a few extreme data points. Let's explore this further.
```
# Retrieve rows with Fare greater than 500
train_data[train_data['Fare']>500]
```
Since all of the passengers have the same ticket number, we can conclude that the fare was calculated for the entire group and not each individual. Hence, we will not discard these rows.
To standardize the fare calculation across all passengers in the dataset, the obvious step would be to divide fare by the number of people on the same ticket and get the individual fare. But factors such as reduced fares for children, missing values, etc., will further complicate things. Therefore, we will leave it as it is. For an in-depth understanding of the titanic dataset (particularly fare calculation), you can explore [Encyclopedia Titanica](https://www.encyclopedia-titanica.org/).
Before we proceed further, we also need to analyze passengers who had 0 fare.
```
# Retrieve rows with Fare equal to 0
train_data[train_data['Fare']==0]
```
Again, it looks like there are no data errors; just some passengers who got a free ride for whatever reason (visit Encyclopedia Titanica if you're interested to find out why).
Next, we will check for missing values.
```
# Number of missing values in each column in train data
train_data.isnull().sum()
# Number of missing values in each column in test data
test_data.isnull().sum()
```
First, let's deal with the missing 'Age' values. For that purpose, we will first extract title of each passenger from their name.
```
# Function to extract title from passenger's name
def extract_title(df):
title = df['Name'].apply(lambda name: name.split(',')[1].split('.')[0].strip())
return title
# Count of each title in train data
train_data['Title'] = extract_title(train_data)
train_data['Title'].value_counts()
# Count of each title in test data
test_data['Title'] = extract_title(test_data)
test_data['Title'].value_counts()
```
Since there are many titles with very few counts, we will map them to main categories (titles that are more frequently occurring).
```
# Function to map titles to main categories
def map_title(df):
title_category = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir": "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess": "Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr": "Mr",
"Mrs": "Mrs",
"Miss": "Miss",
"Master": "Master",
"Lady": "Royalty"
}
new_title = df['Title'].map(title_category)
return new_title
# Count of each title in train data after mapping
train_data['Title'] = map_title(train_data)
train_data['Title'].value_counts()
# Count of each title in test data after mapping
test_data['Title'] = map_title(test_data)
test_data['Title'].value_counts()
```
Now that we have extracted titles from names, we can group data by title and impute missing age values using the median age of each category. We will also group by 'Pclass' as it will help in accurately calculating the median age within each class.<br>
Note: We are using median value instead of mean because extreme values (or outliers) have a lot more impact on mean than median.
```
# Group train data by 'Pclass', 'Title' and calculate the median age
train_data.groupby(['Pclass', 'Title']).median()['Age']
```
One thing to note here is that unlike the 'Master' title, there is no separate category for young female passengers. If we go back and look at the original dataset, we will realize that the 'Miss' title includes both young and adult females. We can somewhat solve this by identifying passengers with 'Miss' title having 1 or 2 value in the 'Parch' column. This way we can retrieve passengers who are most likely, young females (there's also a small chance that the retrieved passenger is a female adult because the 'Parch' column not only reveals the number of parents but also the number of children).
```
# Function to identify passengers who have the title 'Miss' and, 1 or 2 value in the 'Parch' column
def is_young(df):
young = []
for index, value in df['Parch'].items():
if ((df.loc[index, 'Title'] == 'Miss') and (value == 1 or value == 2)):
young.append(1)
else:
young.append(0)
return young
# Group train data by 'Pclass', 'Title', 'Is_Young(Miss)' and calculate the median age
train_data['Is_Young(Miss)'] = is_young(train_data)
grouped_age = train_data.groupby(['Pclass', 'Title', 'Is_Young(Miss)']).median()['Age']
grouped_age
```
This looks better as we can now guess the missing age values more accurately than before. We will apply this function to the test data as well.
```
test_data['Is_Young(Miss)'] = is_young(test_data)
```
Next, we will impute the missing age values according to the grouped data shown above.
```
# Fill missing age values in train and test data
train_data.set_index(['Pclass', 'Title', 'Is_Young(Miss)'], drop=False, inplace=True)
train_data['Age'].fillna(grouped_age, inplace=True)
train_data.reset_index(drop=True, inplace=True)
test_data.set_index(['Pclass', 'Title', 'Is_Young(Miss)'], drop=False, inplace=True)
test_data['Age'].fillna(grouped_age, inplace=True)
test_data.reset_index(drop=True, inplace=True)
```
A very important thing that needs to be addressed is that I've only used the train data to calculate the median ages for replacing missing values in both train and test datasets. Many people, especially those participating in data science competitions, use test data as well for preprocessing purposes. This may help people improve their model's test accuracy and rank higher in competitions, but it is considered a major mistake in real world applications (known as **data leakage**). Models built using this approach do not generalize too well to the new/unseen data and give results that are a lot poorer than expected. Hence, test data should never be used for data preprocessing and should only be used for testing purposes.
For replacing the missing 'Fare' value in test data, we will simply group the train data by 'Pclass' and repeat the same steps as above.
```
# Group train data by 'Pclass' and calculate the median fare
grouped_fare = train_data.groupby('Pclass').median()['Fare']
grouped_fare
# Fill the missing fare value in test data
test_data.set_index('Pclass', drop=False, inplace=True)
test_data['Fare'].fillna(grouped_fare, inplace=True)
test_data.reset_index(drop=True, inplace=True)
```
Finally, we will drop all of the unnecessary rows and columns:
* Name: We've extracted the information that we needed (i.e. Title) and don't need this column anymore
* Cabin: Majority of the values are missing so we will drop the entire column
* Embarked: Only 2 values are missing in train data so we can just remove those 2 entire rows
* Ticket: Doesn't seem to provide any useful information so we will drop the entire column
* Is_Young(Miss): Purpose of creating this column has been fulfilled and we don't need it anymore
```
# Drop unnecessary rows and columns
train_data.drop(columns=['Name', 'Cabin', 'Ticket', 'Is_Young(Miss)'], inplace=True)
test_data.drop(columns=['Name', 'Cabin', 'Ticket', 'Is_Young(Miss)'], inplace=True)
train_data.dropna(subset=['Embarked'], inplace=True)
```
It is always good to verify that there are no remaining missing values.
```
# Missing values in train data after data cleaning
train_data.isnull().sum()
# Missing values in test data after data cleaning
test_data.isnull().sum()
```
## 4. Exploratory Data Analysis <a id="heading4"></a>
In this section, we will try to find some interesting insights using visual methods.
First, we will look at the class distribtuion.
```
plt.subplots(figsize=(7, 5))
sns.countplot(x='Survived', data=train_data)
plt.title('Class Distribution')
plt.show()
```
We can clearly see that the classes are slightly imbalanced since majority of the passengers did not survive. In scenarios like this, the same ratio is expected in test data so we don't need to worry about the imbalanced classes.
Next, let's find out the ratio of survivors with respect to other variables (i.e. 'Sex', 'Pclass', 'Embarked', 'Title').
```
plt.subplots(figsize=(7, 5))
sns.barplot(x='Sex', y='Survived', data=train_data, ci=None)
plt.title('Ratio of survivors based on sex')
plt.show()
plt.subplots(figsize=(7, 5))
sns.barplot(x='Pclass', y='Survived', data=train_data, ci=None)
plt.title('Ratio of survivors based on ticket class')
plt.show()
plt.subplots(figsize=(7, 5))
sns.barplot(x='Embarked', y='Survived', data=train_data, ci=None)
plt.title('Ratio of survivors based on port of embarkation')
plt.show()
plt.subplots(figsize=(7, 5))
sns.barplot(x='Title', y='Survived', data=train_data, ci=None)
plt.title('Ratio of survivors based on title')
plt.show()
```
Based on these visualizations, we can conclude the following:
* Females had a way higher survival rate than males
* Lower ticket class (with 3 being the lowest) means less chance of survival
* Passengers who embarked from port 'C' had slightly more chances of survival
* Passengers with the title 'Mr' and 'Officer' had really low chances of survival as compared to other passengers
Note: The accuracy of these findings also depends on other factors such as the frequency distribution within each categorical variable. For example, if there is only 1 female in the entire dataset and she survived, then the survival rate of females will be 100% which cannot be considered a concrete finding. Hence, depending on the type of problem being solved, further data analysis should be done if required.
Next, we will compute the pairwise correlation of different variables, focusing mainly on how different features correlate with the target variable 'Survived'. But first, we need to convert all of the categorical variables into numeric data type.
To convert 'Sex' variable into numeric format, we will simply encode male with 1 and female with 0.
```
# Encode 'Sex' variable values
le = LabelEncoder()
train_data['Sex'] = le.fit_transform(train_data['Sex'])
test_data['Sex'] = le.transform(test_data['Sex'])
```
For 'Embarked' and 'Title' variables, we will use dummy variables to represent different values.
```
# Convert 'Embarked' and 'Title' into dummy variables
train_data = pd.get_dummies(train_data, columns=['Embarked', 'Title'])
test_data = pd.get_dummies(test_data, columns=['Embarked', 'Title'])
```
This is how the dataset looks like after conversion:
```
train_data.head()
```
Finally, we can calculate the correlation.
```
# Pairwise correlation of columns
corr = train_data.corr()
corr
```
Let's convert this into a visualization for better comprehension.
```
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=np.bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(10, 8))
# Draw the heatmap with the mask
sns.heatmap(corr, mask=mask, cmap='RdBu_r', linewidths=.5, cbar_kws={'shrink': .7})
plt.show()
```
If we just focus on the 'Survived' variable, we will notice that:
* It has a comparatively strong negative correlation with 'Pclass', 'Sex' and 'Title_Mr'
* It has a comparatively strong positive correlation with 'Fare', 'Embarked_C', 'Title_Miss' and 'Title_Mrs'
## 5. Model Building & Evaluation <a id="heading5"></a>
Before we can start building the machine learning models, we need to apply feature scaling to standardize the independent variables within a particular range. This is required because some machine learning algorithms (such as kNN) tend to give more weightage to features with high magnitudes than features with low magnitudes, regardless of the unit of the values. To bring all features to the same level of magnitudes, we need to apply feature scaling.
In this case, we will use the MinMaxScaler to scale each feature to a (0, 1) range.
```
# Apply feature scaling using MinMaxScaler
scaler = MinMaxScaler()
train_data.iloc[:, 2:] = scaler.fit_transform(train_data.iloc[:, 2:])
test_data.iloc[:, 1:] = scaler.transform(test_data.iloc[:, 1:])
```
This is how the dataset looks like after feature scaling (remember, we only need to scale predictor variables):
```
train_data.head()
```
Next, we will split our train and test datasets with respect to predictor (X) and response (y) variables.
```
X_train, X_test, y_train = train_data.iloc[:, 2:], test_data.iloc[:, 1:], train_data['Survived']
```
The 'y_test' is not provided in this dataset. For getting the test scores, we will have to submit our predictions online. To make the entire process a bit smoother, we will write a function that takes in model predictions and generates a file in the required format to submit online.
```
# Function to generate submission file to get test score
def submission(preds):
test_data['Survived'] = preds
predictions = test_data[['PassengerId', 'Survived']]
predictions.to_csv('submission.csv', index=False)
```
Now, we can finally start building machine learning models to predict which of the passengers survived.
### 5.1 Logistic Regression <a id="subheading1"></a>
Important parameters that we will tune:
* penalty: Used to specify the norm used in the penalization
* C: Inverse of regularization strength
For hyperparameter tuning, we will use grid search cross validation over the specified parameter values. We will repeat 5-fold cross validation 10 times so that we can further improve the model performance and reduce overfitting. This will lead to better results for test/unseen data.
```
# Classification model
logreg = LogisticRegression()
# Parameters to tune
params = [{'penalty': ['l1', 'l2', 'elasticnet', 'none'],
'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}]
# Hyperparameter tuning using GridSearchCV
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=seed)
lr_clf = GridSearchCV(logreg, params, cv=cv, n_jobs=-1)
lr_clf.fit(X_train, y_train)
# Best parameters
lr_clf.best_params_
# Train score
lr_clf.best_score_
```
The train accuracy is 82.7%.
```
# Test score
y_preds = lr_clf.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 76.8%
### 5.2 Gaussian Naive Bayes <a id="subheading2"></a>
Using default parameters.
```
# Classification model
gnb = GaussianNB()
gnb.fit(X_train, y_train)
# Test score
y_preds = gnb.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 75.1%.
### 5.3 Linear Discriminant Analysis (LDA) <a id="subheading3"></a>
Using default parameters.
```
# Classification model
lda = LinearDiscriminantAnalysis()
lda.fit(X_train, y_train)
# Test score
y_preds = lda.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 77.5%.
### 5.4 k Nearest Neighbors (kNN) <a id="subheading4"></a>
Important parameters that we will tune:
* n_neighbors: Number of neighbors to use
* p: For choosing between manhattan distance and euclidean distance metrics
```
# Classification model
knn = KNeighborsClassifier()
# Parameters to tune
params = [{'n_neighbors': range(1, 21),
'p': [1, 2]}]
# Hyperparameter tuning using GridSearchCV
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=seed)
knn_clf = GridSearchCV(knn, params, cv=cv, n_jobs=-1)
knn_clf.fit(X_train, y_train)
# Best parameters
knn_clf.best_params_
# Train score
knn_clf.best_score_
```
The train accuracy is 82.1%.
```
# Test score
y_preds = knn_clf.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 77.3%.
### 5.5 Support Vector Machine (SVM) <a id="subheading5"></a>
Important parameters that we will tune:
* C: Penalty parameter for determining the trade-off between setting a larger margin and lowering misclassification
* kernel: Specifies the kernel type to be used in the algorithm
```
# Classification model
svm = SVC(max_iter=10000)
# Parameters to tune
params = [{'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid']}]
# Hyperparameter tuning using GridSearchCV
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=seed)
svm_clf = GridSearchCV(svm, params, cv=cv, n_jobs=-1)
svm_clf.fit(X_train, y_train)
# Best parameters
svm_clf.best_params_
# Train score
svm_clf.best_score_
```
The train accuracy is 82.8%.
```
# Test score
y_preds = svm_clf.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 77.8%.
### 5.6 Decision Tree <a id="subheading6"></a>
Important parameters that we will tune:
* max_depth: Maximum depth of the tree
* min_samples_split: Minimum number of samples required to split an internal node
* max_features: Number of features to consider when looking for the best split
```
# Classification model
dt = DecisionTreeClassifier(random_state=seed)
# Parameters to tune
params = [{'max_depth': [5, 7, 10, None],
'min_samples_split': [2, 5, 10],
'max_features': ['sqrt', 5, 7, 10]}]
# Hyperparameter tuning using GridSearchCV
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=seed)
dt_clf = GridSearchCV(dt, params, cv=cv, n_jobs=-1)
dt_clf.fit(X_train, y_train)
# Best parameters
dt_clf.best_params_
# Train score
dt_clf.best_score_
```
The train accuracy is 81.6%.
```
# Test score
y_preds = dt_clf.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 78%.
### 5.7 Random Forest <a id="subheading7"></a>
Important parameters that we will tune:
* n_estimators: Number of trees in the forest
* max_depth: Maximum depth of the tree
* min_samples_split: Minimum number of samples required to split an internal node
* max_features: Number of features to consider when looking for the best split
```
# Note: This cell will take a while to run depending on the available processing power
# Classification model
rf = RandomForestClassifier(random_state=seed)
# Parameters to tune
params = [{'n_estimators': range(50, 550, 50),
'max_depth': [5, 7, 10, None],
'min_samples_split': [2, 5, 10],
'max_features': ['sqrt', 5, 7, 10]}]
# Hyperparameter tuning using GridSearchCV
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=5, random_state=seed)
rf_clf = GridSearchCV(rf, params, cv=cv, n_jobs=-1)
rf_clf.fit(X_train, y_train)
# Best parameters
rf_clf.best_params_
# Train score
rf_clf.best_score_
```
The train accuracy is 83.7%.
```
# Test score
y_preds = rf_clf.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 77%.
### 5.8 XGBoost <a id="subheading8"></a>
Important parameters that we will tune:
* max_depth: Maximum depth of the tree
* learning_rate: Controls the contribution of each tree
* n_estimators: Number of trees
```
# Note: This cell will take a while to run depending on the available processing power
# Classification model
xgboost = xgb.XGBClassifier(random_state=seed)
# Parameters to tune
params = [{'max_depth': [3, 5, 10],
'learning_rate': [0.01, 0.02, 0.03, 0.05, 0.07, 0.1],
'n_estimators': range(100, 1100, 100)}]
# Hyperparameter tuning using GridSearchCV
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=5, random_state=seed)
xgb_clf = GridSearchCV(xgboost, params, cv=cv, n_jobs=-1)
xgb_clf.fit(X_train, y_train)
# Best parameters
xgb_clf.best_params_
# Train score
xgb_clf.best_score_
```
The train accuracy is 82.9%.
```
# Test score
y_preds = xgb_clf.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 76.8%.
### 5.9 Model Stacking <a id="subheading9"></a>
In this part, we will stack all of our best performing models using the stacking classifier. Predictions generated by various models will be optimally combined to form a new set of predictions. (Note: The new predictions may not always give better result than the individual models).
Using default parameters.
```
# Models that we will input to stacking classifier
base_estimators = list()
base_estimators.append(('lda', lda))
base_estimators.append(('knn', knn_clf.best_estimator_))
base_estimators.append(('svm', svm_clf.best_estimator_))
base_estimators.append(('dt', dt_clf.best_estimator_))
base_estimators.append(('rf', rf_clf.best_estimator_))
# Stacking classifier
stacking_clf = StackingClassifier(estimators=base_estimators, final_estimator=LogisticRegression(), cv=5, n_jobs=-1)
stacking_clf.fit(X_train, y_train)
# Test score
y_preds = stacking_clf.predict(X_test)
submission(y_preds)
```
After submission, the test accuracy is found to be 78%.
### 5.10 Result Comparison <a id="subheading10"></a>
| Model | Train Accuracy (%) | Test Accuracy (%) |
| ----- | ------------------ | ----------------- |
| Logistic Regression | 82.7 | 76.8 |
| Gaussian Naive Bayes | N/A | 75.1 |
| Linear Discriminant Analysis | N/A | 77.5 |
| k Nearest Neighbors | 82.1 | 77.3 |
| Support Vector Machine | 82.8| 77.8 |
| Decision Tree | 81.6 | 78 |
| Random Forest | 83.7 | 77 |
| XGBoost | 82.9 | 76.8 |
| Model Stacking | N/A | 78 |
Looking at the above table, we can observe the following:
* Random Forest gave the highest train accuracy of 83.7%
* Decision Tree and Stacking Classifier performed best for test/unseen data with an accuracy of 78%
* Most of the models performed really similar in terms of test accuracy
* Due to the small dataset size, all models have (slightly) overfitted the train data, giving lower test scores than expected
## 6. Conclusion <a id="heading6"></a>
This notebook gave a brief overview of how different steps are performed in a data science project life cycle. We started by reading in the dataset, preprocessing it, exploring it to find useful insights, and finally built various machine learning models and evaluated them. The main objective of this project was to analyze the titanic dataset and predict whether a passenger will survive or not, based on various input features. To further build and improve upon this project, a lot of techniques could be tried.
Innovative ways of feature engineering like combining the 'SibSp' and 'Parch' features, or applying different data preprocessing methods such as binning the 'Age' column could be tried to help improve the overall performance. One technique that will surely improve the scores is to further hypertune the models. Due to limited time and processing power available, we only performed grid search over a few combinations of paramters' values (we also skipped many parameters and used their default value). The extra time spent on tuning the parameters usually leads to better results.
Additionally, there are other options for trying and improving the prediction accuracy such as applying feature selection techniques or building deep learning models (e.g. neural networks). Part of a job of data scientists is to be creative, keep experimenting and try figuring out new ways of improving upon their work. The 'titanic survival prediction' task is no exception.
| github_jupyter |
## 0.Import Packages
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
import os
import glob
import seaborn as sns
```
## 1. Load Dataset
```
dir = 'refined_dataset'
listdir = os.listdir(dir)
print(listdir)
print("The number of dataset :", len(listdir))
num = ['B05', 'B07', 'B18', 'B33', 'B34', 'B46', 'B47', 'B48']
for i in range(len(listdir)):
vector = np.zeros((1,3))
path = os.path.join(os.getcwd(), 'refined_dataset/', num[i] + '_discharge_soh.csv')
csv = pd.read_csv(path)
df = pd.DataFrame(csv)
vec = df[['cycle', 'capacity', 'SOH']]
globals()['data_{}'.format(num[i])] = vec
data = pd.read_csv('refined_dataset/B05_discharge_soh.csv')
df = pd.DataFrame(data)
df
data_B05 ## result
for i in range(len(listdir)):
print("Shape of data :", np.shape(globals()['data_{}'.format(num[i])]))
```
## 3. Visualiztion of SoH
```
for i in range(len(listdir)) :
dff = globals()['data_{}'.format(num[i])]
sns.set_style("darkgrid")
plt.figure(figsize=(12, 8))
plt.scatter(dff['cycle'], dff['SOH'])
# plt.plot(dff['cycle'], len(dff['cycle'])*[0.7], color = 'red')
plt.ylabel('SoH', fontsize = 15)
plt.xlabel('cycle', fontsize = 15)
plt.title('Discharge_' + num[i], fontsize = 15)
plt.savefig('fig/refined_SoH_' + num[i] + '.jpg')
plt.show()
```
## 3-1. group_A
```
# Group A
sns.set_style("darkgrid")
plt.figure(figsize=(12, 8))
plt.scatter(data_B05['cycle'], data_B05['SOH'],label='B05')
plt.scatter(data_B07['cycle'], data_B07['SOH'],label='B07')
plt.scatter(data_B18['cycle'], data_B18['SOH'],label='B18')
plt.legend(prop={'size': 16})
plt.ylabel('SoH', fontsize = 15)
plt.xlabel('Discharge cycle', fontsize = 15)
plt.title('SoH of group A', fontsize = 15)
plt.savefig('fig/A_group.jpg')
plt.show()
# Group B
sns.set_style("darkgrid")
plt.figure(figsize=(12, 8))
plt.scatter(data_B33['cycle'], data_B33['SOH'],label='B33')
plt.scatter(data_B34['cycle'], data_B34['SOH'],label='B34')
plt.legend(prop={'size': 16})
plt.ylabel('SoH', fontsize = 15)
plt.xlabel('Discharge cycle', fontsize = 15)
plt.title('SoH of group B', fontsize = 15)
plt.savefig('fig/B_group.jpg')
plt.show()
# Group C
sns.set_style("darkgrid")
plt.figure(figsize=(12, 8))
plt.scatter(data_B46['cycle'], data_B46['SOH'],label='B46')
plt.scatter(data_B47['cycle'], data_B47['SOH'],label='B47')
plt.scatter(data_B48['cycle'], data_B48['SOH'],label='B48')
plt.legend(prop={'size': 16})
plt.ylabel('SoH', fontsize = 15)
plt.xlabel('Discharge cycle', fontsize = 15)
plt.title('SoH of group C', fontsize = 15)
plt.savefig('fig/C_group.jpg')
plt.show()
```
| github_jupyter |
```
# !wget https://f000.backblazeb2.com/file/malay-dataset/knowledge-graph/kelm/train_X
# !wget https://f000.backblazeb2.com/file/malay-dataset/knowledge-graph/kelm/train_Y
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'mesolitica-tpu.json'
from tqdm import tqdm
import re
def cleaning(string):
string = string.replace('\n', ' ').replace('\t', ' ')
string = re.sub(r'[ ]+', ' ', string).strip()
return string
import tensorflow as tf
import tensorflow_datasets as tfds
from t5.data import preprocessors as prep
import functools
import t5
import gin
import sentencepiece as spm
from glob import glob
import os
gin.parse_config_file('pretrained_models_base_operative_config.gin')
vocab = 'sp10m.cased.ms-en.model'
sp = spm.SentencePieceProcessor()
sp.Load(vocab)
with open('train_X') as fopen:
train_X = fopen.read().split('\n')
with open('train_Y') as fopen:
train_Y = fopen.read().split('\n')
with tf.io.gfile.GFile('knowledge-graph.tsv', "w") as outfile:
for i in tqdm(range(len(train_X))):
if len(train_X) and len(train_Y):
l = cleaning(train_X[i])
r = cleaning(train_Y[i])
outfile.write("%s\t%s\n" % (l, r))
def knowledge_graph_dataset(split, shuffle_files = False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'knowledge-graph.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults = ['', ''],
field_delim = '\t',
use_quote_delim = False,
),
num_parallel_calls = tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def knowledge_graph_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['grafik pengetahuan: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls = tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('knowledge_graph_dataset')
t5.data.TaskRegistry.add(
'knowledge_graph_dataset',
dataset_fn = knowledge_graph_dataset,
splits = ['train'],
text_preprocessor = [knowledge_graph_preprocessor],
sentencepiece_model_path = vocab,
metric_fns = [t5.evaluation.metrics.accuracy],
)
nq_task = t5.data.TaskRegistry.get("knowledge_graph_dataset")
ds = nq_task.get_dataset(split='knowledge-graph.tsv', sequence_length={"inputs": 1024, "targets": 1024})
r = tfds.as_numpy(ds)
next(r)
from google.cloud import storage
client = storage.Client()
bucket = client.bucket('mesolitica-tpu-general')
blob = bucket.blob('t5-data/knowledge-graph-train.tsv')
blob.upload_from_filename('knowledge-graph.tsv')
os.remove('knowledge-graph.tsv')
```
| github_jupyter |
# Experiment Collection #03
This notebook contains experiments regarding the use of a penalty term and enabling charging from the grid. These experiments are with the stochastic environment.
## 1. Basic Setup
```
# Jupyter setup
%load_ext autoreload
%autoreload 2
%config IPCompleter.greedy=True
import ray
ray.shutdown()
import ray
import ray.rllib
import ray.tune
import solara.envs.creator
## Initialising ray (starts background process for distributed computing)
ray.shutdown()
ray.init(logging_level="WARNING", object_store_memory= 25 * 10**9)
# Adding environment creator function to ray
ray.tune.registry.register_env("battery_control", solara.envs.creator.create_env)
# Output format of figures
OUT_FORMAT = ".svg"
```
## 2. Experiment Definition
```
from solara.constants import PROJECT_PATH
import solara.utils.rllib
EXPERIMENT_NAME = "experiment_03_penalty_grid_ausgrid_stochastic"
# RL environment configuration
ENV_CONFIG = {
'general': {
'type': 'battery_control.BatteryControlEnv',
'infeasible_control_penalty': ray.tune.grid_search([False, True]),
'grid_charging': ray.tune.grid_search([True, False]),
'logging_level': "RAY", # if using RLlib, set to 'RAY'
},
'components': {
'battery': {
'type': 'LithiumIonBattery',
'size': 10,
'chemistry': 'NMC',
'time_step_len': 1,
},
'solar': {
'type': 'DataPV',
'data_path': PROJECT_PATH + "/data/ausgrid/processed/house2_solar_gen.txt",
'fixed_sample_num': None, #200,
},
'load': {
'type': 'DataLoad',
'data_path': PROJECT_PATH + "/data/ausgrid/processed/house2_combined_load.txt",
'fixed_sample_num': None, #200,
},
'grid': {
'type': 'PeakGrid',
'peak_threshold': 1.0,
},
},
}
# RL agent configuration
AGENT_CONFIG = {
"env": "battery_control",
"env_config": ENV_CONFIG,
"gamma": 0.9999999,
"lr": 5e-5,
"model": {
"fcnet_hiddens": [256, 256, 256, 256],
"fcnet_activation": "relu",
"post_fcnet_activation": "tanh",
},
# Utilities settings
"framework": "torch",
"log_level": "WARNING",
#"num_workers": 9,
#"num_gpus": 1,
"callbacks": solara.utils.rllib.InfoCallback,
"seed" : ray.tune.randint(0, 10000000),
}
# Full experiment configuration including RL algorithm type
EXPERIMENT_CONFIG = {
"run_or_experiment": "PPO",
"config": AGENT_CONFIG,
"stop": {"training_iteration": 200},
"name": EXPERIMENT_NAME,
"local_dir": "./tmp/tune/",
"log_to_file": True,
"checkpoint_freq": 10,
}
# Other settings
PLOT_DIR = PROJECT_PATH + "/figures/experiments/"
# Parallelisation Setup
if False:
num_workers = 4
gpu_count = 1
reserved_capacity = 0.01 # Driver GPU
num_gpus_per_worker = (gpu_count - reserved_capacity) / num_workers
AGENT_CONFIG["num_workers"] = num_workers
AGENT_CONFIG["num_gpus"] = num_gpus
AGENT_CONFIG["num_envs_per_worker"]= 8
#AGENT_CONFIG["num_gpus"] = 1
#AGENT_CONFIG["num_envs_per_worker"]= 8
AGENT_CONFIG["num_workers"] = 10
AGENT_CONFIG["num_gpus"] = 1
#AGENT_CONFIG["remote_worker_envs"]= True
```
## 3. Running Experiment
```
# Setting visualisation in notebook
reporter = ray.tune.JupyterNotebookReporter(overwrite=True)
reporter.add_metric_column("custom_metrics/cost_mean")
reporter.add_metric_column("custom_metrics/power_diff_mean")
# Running experiment
analysis = ray.tune.run(
progress_reporter=reporter,
**EXPERIMENT_CONFIG,
resume=True,
trials=2
)
```
## 4. Visualisation
```
import os
import solara.plot.pyplot
import matplotlib.pyplot as plt
exp_path = EXPERIMENT_CONFIG["local_dir"] + EXPERIMENT_CONFIG["name"] + "/"
#exp_path = "./tmp/tune/PPO/"
state_files = [filename for filename in os.listdir(exp_path) if "experiment_state" in filename ]
last_state_file = sorted(state_files, reverse=True)[0]
analysis = ray.tune.ExperimentAnalysis(experiment_checkpoint_path=exp_path + last_state_file)
trials = analysis.fetch_trial_dataframes()
trials = {key: trials[key] for key in sorted(trials.keys())} # Sort trials
```
# 4.1 Training Progress Plots
```
# Creating helper function for plotting
import numpy as np
def plot_trials(trials,
necessary_cond=None,
other_conditions=None,
experiment_name="default_experiment",
plot_name = "plot_00_default",
plot_dir = "./figures",
optimal_value=None,
figsize=(6,3)):
"""Plot progress over iterations for experiments."""
solara.plot.pyplot.default_setup(figsize=figsize)
other_conditions = list(other_conditions)
for trial_name, trial_data in trials.items():
if necessary_cond is None or necessary_cond[1] in trial_name:
label = ""
for i, (cond_label, condition) in enumerate(other_conditions):
if condition in trial_name:
label += cond_label
else:
label += "no " + cond_label
if i < len(other_conditions) - 1:
label += ", "
label = label.capitalize()
trace_len = 200
x_values = np.arange(1,trace_len+1)
ticks_gap = 25
x_ticks = [1] + list(np.arange(ticks_gap,trace_len+1, ticks_gap)) + [trace_len]
trace = trial_data["custom_metrics/cost_mean"][0:trace_len]
plt.plot(x_values,trace, label=label)
if optimal_value is not None:
plt.plot(x_values,np.ones(len(x_values))*optimal_value, label="Optimal control", color="grey")
plt.semilogy()
plt.legend()
plt.xlabel("Training iteration")
plt.ylabel("Average cost per episode (\$)")
plt.xticks(x_ticks)
plt.savefig(fname=plot_dir + experiment_name + "_" + plot_name + OUT_FORMAT)
plot_trials(trials,
necessary_cond=["grid charging", "grid_charging=False"],
other_conditions=[["penalty","infeasible_control_penalty=True"]],
experiment_name=EXPERIMENT_NAME,
plot_dir=PLOT_DIR,
plot_name="plot_01_penalty")
plot_trials(trials,
necessary_cond=["penalty","infeasible_control_penalty=False"],
other_conditions=[["grid charging", "grid_charging=True"]],
experiment_name=EXPERIMENT_NAME,
plot_dir=PLOT_DIR,
plot_name="plot_02_grid_charging_no_penalty")
plot_trials(trials,
necessary_cond=["penalty","infeasible_control_penalty=True"],
other_conditions=[["grid charging", "grid_charging=True"]],
experiment_name=EXPERIMENT_NAME,
plot_dir=PLOT_DIR,
plot_name="plot_03_grid_charging_with_penalty")
other_conditions = [["penalty","infeasible_control_penalty=True"],
["grid charging", "grid_charging=True"],]
plot_trials(trials, necessary_cond=None, other_conditions=other_conditions,
experiment_name=EXPERIMENT_NAME,
plot_dir=PLOT_DIR,
plot_name="plot_04_all_trials",
optimal_value=3.3682)
```
# 4.2 Policy Plots
```
# Helper functions
def get_episode_data_from_checkpoint(exp_path: str, iteration_num: int):
"""Get episode data from loading policy from certain iteration of experiment."""
trial_agent_config = analysis.get_all_configs()[exp_path]
# Remove some unnecessary configs that may stop re-loading
trial_agent_config.pop("callbacks")
trial_agent_config.pop("num_gpus")
agent = ray.rllib.agents.ppo.PPOTrainer(config=trial_agent_config)
check_range=iteration_num
episodes_data = solara.utils.rllib.run_episodes_from_checkpoints(agent=agent,
check_save_path=exp_path,
check_range=check_range)
if len(episodes_data) == 1:
return episodes_data[0]
else:
return episodes_data
def get_experiment_path(trials, grid_charging=True,penalty=True):
"""Get experiment paths"""
exp_path = [trial_path for trial_path in trials.keys()
if "grid_charging={}".format(grid_charging) in trial_path and
"infeasible_control_penalty={}".format(penalty) in trial_path][0]
return exp_path
# Plotting configuration
## Lines to draw in policy plot
POLICY_PLOT_CONF = {
"selected_keys": ['load','pv_gen','energy_cont','net_load',
'charging_power','cost','price_threshold',
'actions'],
"y_min":-1.3,
"y_max":1.4,
"show_grid":False,
}
#import matplotlib
#matplotlib.use("pgf")
#matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'font.family': 'serif',
# 'text.usetex': True,
# 'pgf.rcfonts': False,
#})
exp_path = get_experiment_path(trials, grid_charging=False, penalty=False)
episode_data = get_episode_data_from_checkpoint(exp_path, iteration_num=150)
solara.plot.pyplot.plot_episode(episode_data,title=None, **POLICY_PLOT_CONF)
plt.savefig(fname=PLOT_DIR + EXPERIMENT_NAME + "_plot_05_policy_iter150_no_grid_no_penalty_failure" + OUT_FORMAT, bbox_inches='tight')
exp_path = get_experiment_path(trials, grid_charging=False, penalty=True)
episode_data = get_episode_data_from_checkpoint(exp_path, iteration_num=150)
solara.plot.pyplot.plot_episode(episode_data,title=None, **POLICY_PLOT_CONF)
plt.savefig(fname=PLOT_DIR + EXPERIMENT_NAME + "_plot_06_policy_iter150_no_grid_with_penalty" + OUT_FORMAT, bbox_inches='tight')
exp_path = get_experiment_path(trials, grid_charging=True, penalty=False)
episode_data = get_episode_data_from_checkpoint(exp_path, iteration_num=150)
solara.plot.pyplot.plot_episode(episode_data,title=None, **POLICY_PLOT_CONF)
plt.savefig(fname=PLOT_DIR + EXPERIMENT_NAME + "_plot_07_policy_iter150_grid_no_penalty_failure" + OUT_FORMAT, bbox_inches='tight')
exp_path = get_experiment_path(trials, grid_charging=True, penalty=True)
episode_data = get_episode_data_from_checkpoint(exp_path, iteration_num=150)
solara.plot.pyplot.plot_episode(episode_data,title=None, **POLICY_PLOT_CONF)
plt.savefig(fname=PLOT_DIR + EXPERIMENT_NAME + "_plot_08_policy_iter150_grid_with_penalty" + OUT_FORMAT, bbox_inches='tight')
# Widget
exp_path = get_experiment_path(trials, grid_charging=True, penalty=True)
episode_data = get_episode_data_from_checkpoint(exp_path, iteration_num=[1,151])
import solara.plot.widgets
solara.plot.widgets.InteractiveEpisodes(episode_data,
initial_visibility=POLICY_PLOT_CONF["selected_keys"])
```
| github_jupyter |
# Neural Machine Translation
Welcome to your first programming assignment for this week!
* You will build a Neural Machine Translation (NMT) model to translate human-readable dates ("25th of June, 2009") into machine-readable dates ("2009-06-25").
* You will do this using an attention model, one of the most sophisticated sequence-to-sequence models.
This notebook was produced together with NVIDIA's Deep Learning Institute.
## Table of Contents
- [Packages](#0)
- [1 - Translating Human Readable Dates Into Machine Readable Dates](#1)
- [1.1 - Dataset](#1-1)
- [2 - Neural Machine Translation with Attention](#2)
- [2.1 - Attention Mechanism](#2-1)
- [Exercise 1 - one_step_attention](#ex-1)
- [Exercise 2 - modelf](#ex-2)
- [Exercise 3 - Compile the Model](#ex-3)
- [3 - Visualizing Attention (Optional / Ungraded)](#3)
- [3.1 - Getting the Attention Weights From the Network](#3-1)
<a name='0'></a>
## Packages
```
from tensorflow.keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply
from tensorflow.keras.layers import RepeatVector, Dense, Activation, Lambda
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import load_model, Model
import tensorflow.keras.backend as K
import tensorflow as tf
import numpy as np
from faker import Faker
import random
from tqdm import tqdm
from babel.dates import format_date
from nmt_utils import *
import matplotlib.pyplot as plt
%matplotlib inline
```
<a name='1'></a>
## 1 - Translating Human Readable Dates Into Machine Readable Dates
* The model you will build here could be used to translate from one language to another, such as translating from English to Hindi.
* However, language translation requires massive datasets and usually takes days of training on GPUs.
* To give you a place to experiment with these models without using massive datasets, we will perform a simpler "date translation" task.
* The network will input a date written in a variety of possible formats (*e.g. "the 29th of August 1958", "03/30/1968", "24 JUNE 1987"*)
* The network will translate them into standardized, machine readable dates (*e.g. "1958-08-29", "1968-03-30", "1987-06-24"*).
* We will have the network learn to output dates in the common machine-readable format YYYY-MM-DD.
<!--
Take a look at [nmt_utils.py](./nmt_utils.py) to see all the formatting. Count and figure out how the formats work, you will need this knowledge later. !-->
<a name='1-1'></a>
### 1.1 - Dataset
We will train the model on a dataset of 10,000 human readable dates and their equivalent, standardized, machine readable dates. Let's run the following cells to load the dataset and print some examples.
```
m = 10000
dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m)
dataset[:10]
```
You've loaded:
- `dataset`: a list of tuples of (human readable date, machine readable date).
- `human_vocab`: a python dictionary mapping all characters used in the human readable dates to an integer-valued index.
- `machine_vocab`: a python dictionary mapping all characters used in machine readable dates to an integer-valued index.
- **Note**: These indices are not necessarily consistent with `human_vocab`.
- `inv_machine_vocab`: the inverse dictionary of `machine_vocab`, mapping from indices back to characters.
Let's preprocess the data and map the raw text data into the index values.
- We will set Tx=30
- We assume Tx is the maximum length of the human readable date.
- If we get a longer input, we would have to truncate it.
- We will set Ty=10
- "YYYY-MM-DD" is 10 characters long.
```
human_vocab
tf.config.list_physical_devices()
Tx = 30
Ty = 10
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
print("X.shape:", X.shape)
print("Y.shape:", Y.shape)
print("Xoh.shape:", Xoh.shape)
print("Yoh.shape:", Yoh.shape)
```
You now have:
- `X`: a processed version of the human readable dates in the training set.
- Each character in X is replaced by an index (integer) mapped to the character using `human_vocab`.
- Each date is padded to ensure a length of $T_x$ using a special character (< pad >).
- `X.shape = (m, Tx)` where m is the number of training examples in a batch.
- `Y`: a processed version of the machine readable dates in the training set.
- Each character is replaced by the index (integer) it is mapped to in `machine_vocab`.
- `Y.shape = (m, Ty)`.
- `Xoh`: one-hot version of `X`
- Each index in `X` is converted to the one-hot representation (if the index is 2, the one-hot version has the index position 2 set to 1, and the remaining positions are 0.
- `Xoh.shape = (m, Tx, len(human_vocab))`
- `Yoh`: one-hot version of `Y`
- Each index in `Y` is converted to the one-hot representation.
- `Yoh.shape = (m, Ty, len(machine_vocab))`.
- `len(machine_vocab) = 11` since there are 10 numeric digits (0 to 9) and the `-` symbol.
* Let's also look at some examples of preprocessed training examples.
* Feel free to play with `index` in the cell below to navigate the dataset and see how source/target dates are preprocessed.
```
index = 0
print("Source date:", dataset[index][0])
print("Target date:", dataset[index][1])
print()
print("Source after preprocessing (indices):", X[index])
print("Target after preprocessing (indices):", Y[index])
print()
print("Source after preprocessing (one-hot):", Xoh[index])
print("Target after preprocessing (one-hot):", Yoh[index])
```
<a name='2'></a>
## 2 - Neural Machine Translation with Attention
* If you had to translate a book's paragraph from French to English, you would not read the whole paragraph, then close the book and translate.
* Even during the translation process, you would read/re-read and focus on the parts of the French paragraph corresponding to the parts of the English you are writing down.
* The attention mechanism tells a Neural Machine Translation model where it should pay attention to at any step.
<a name='2-1'></a>
### 2.1 - Attention Mechanism
In this part, you will implement the attention mechanism presented in the lecture videos.
* Here is a figure to remind you how the model works.
* The diagram on the left shows the attention model.
* The diagram on the right shows what one "attention" step does to calculate the attention variables $\alpha^{\langle t, t' \rangle}$.
* The attention variables $\alpha^{\langle t, t' \rangle}$ are used to compute the context variable $context^{\langle t \rangle}$ for each timestep in the output ($t=1, \ldots, T_y$).
<table>
<td>
<img src="images/attn_model.png" style="width:500;height:500px;"> <br>
</td>
<td>
<img src="images/attn_mechanism.png" style="width:500;height:500px;"> <br>
</td>
</table>
<caption><center> **Figure 1**: Neural machine translation with attention</center></caption>
Here are some properties of the model that you may notice:
#### Pre-attention and Post-attention LSTMs on both sides of the attention mechanism
- There are two separate LSTMs in this model (see diagram on the left): pre-attention and post-attention LSTMs.
- *Pre-attention* Bi-LSTM is the one at the bottom of the picture is a Bi-directional LSTM and comes *before* the attention mechanism.
- The attention mechanism is shown in the middle of the left-hand diagram.
- The pre-attention Bi-LSTM goes through $T_x$ time steps
- *Post-attention* LSTM: at the top of the diagram comes *after* the attention mechanism.
- The post-attention LSTM goes through $T_y$ time steps.
- The post-attention LSTM passes the hidden state $s^{\langle t \rangle}$ and cell state $c^{\langle t \rangle}$ from one time step to the next.
#### An LSTM has both a hidden state and cell state
* In the lecture videos, we were using only a basic RNN for the post-attention sequence model
* This means that the state captured by the RNN was outputting only the hidden state $s^{\langle t\rangle}$.
* In this assignment, we are using an LSTM instead of a basic RNN.
* So the LSTM has both the hidden state $s^{\langle t\rangle}$ and the cell state $c^{\langle t\rangle}$.
#### Each time step does not use predictions from the previous time step
* Unlike previous text generation examples earlier in the course, in this model, the post-attention LSTM at time $t$ does not take the previous time step's prediction $y^{\langle t-1 \rangle}$ as input.
* The post-attention LSTM at time 't' only takes the hidden state $s^{\langle t\rangle}$ and cell state $c^{\langle t\rangle}$ as input.
* We have designed the model this way because unlike language generation (where adjacent characters are highly correlated) there isn't as strong a dependency between the previous character and the next character in a YYYY-MM-DD date.
#### Concatenation of hidden states from the forward and backward pre-attention LSTMs
- $\overrightarrow{a}^{\langle t \rangle}$: hidden state of the forward-direction, pre-attention LSTM.
- $\overleftarrow{a}^{\langle t \rangle}$: hidden state of the backward-direction, pre-attention LSTM.
- $a^{\langle t \rangle} = [\overrightarrow{a}^{\langle t \rangle}, \overleftarrow{a}^{\langle t \rangle}]$: the concatenation of the activations of both the forward-direction $\overrightarrow{a}^{\langle t \rangle}$ and backward-directions $\overleftarrow{a}^{\langle t \rangle}$ of the pre-attention Bi-LSTM.
#### Computing "energies" $e^{\langle t, t' \rangle}$ as a function of $s^{\langle t-1 \rangle}$ and $a^{\langle t' \rangle}$
- Recall in the lesson videos "Attention Model", at time 6:45 to 8:16, the definition of "e" as a function of $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$.
- "e" is called the "energies" variable.
- $s^{\langle t-1 \rangle}$ is the hidden state of the post-attention LSTM
- $a^{\langle t' \rangle}$ is the hidden state of the pre-attention LSTM.
- $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$ are fed into a simple neural network, which learns the function to output $e^{\langle t, t' \rangle}$.
- $e^{\langle t, t' \rangle}$ is then used when computing the attention $a^{\langle t, t' \rangle}$ that $y^{\langle t \rangle}$ should pay to $a^{\langle t' \rangle}$.
- The diagram on the right of figure 1 uses a `RepeatVector` node to copy $s^{\langle t-1 \rangle}$'s value $T_x$ times.
- Then it uses `Concatenation` to concatenate $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$.
- The concatenation of $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$ is fed into a "Dense" layer, which computes $e^{\langle t, t' \rangle}$.
- $e^{\langle t, t' \rangle}$ is then passed through a softmax to compute $\alpha^{\langle t, t' \rangle}$.
- Note that the diagram doesn't explicitly show variable $e^{\langle t, t' \rangle}$, but $e^{\langle t, t' \rangle}$ is above the Dense layer and below the Softmax layer in the diagram in the right half of figure 1.
- We'll explain how to use `RepeatVector` and `Concatenation` in Keras below.
#### Implementation Details
Let's implement this neural translator. You will start by implementing two functions: `one_step_attention()` and `model()`.
#### one_step_attention
* The inputs to the one_step_attention at time step $t$ are:
- $[a^{<1>},a^{<2>}, ..., a^{<T_x>}]$: all hidden states of the pre-attention Bi-LSTM.
- $s^{<t-1>}$: the previous hidden state of the post-attention LSTM
* one_step_attention computes:
- $[\alpha^{<t,1>},\alpha^{<t,2>}, ..., \alpha^{<t,T_x>}]$: the attention weights
- $context^{ \langle t \rangle }$: the context vector:
$$context^{<t>} = \sum_{t' = 1}^{T_x} \alpha^{<t,t'>}a^{<t'>}\tag{1}$$
##### Clarifying 'context' and 'c'
- In the lecture videos, the context was denoted $c^{\langle t \rangle}$
- In the assignment, we are calling the context $context^{\langle t \rangle}$.
- This is to avoid confusion with the post-attention LSTM's internal memory cell variable, which is also denoted $c^{\langle t \rangle}$.
<a name='ex-1'></a>
### Exercise 1 - one_step_attention
Implement `one_step_attention()`.
* The function `model()` will call the layers in `one_step_attention()` $T_y$ times using a for-loop.
* It is important that all $T_y$ copies have the same weights.
* It should not reinitialize the weights every time.
* In other words, all $T_y$ steps should have shared weights.
* Here's how you can implement layers with shareable weights in Keras:
1. Define the layer objects in a variable scope that is outside of the `one_step_attention` function. For example, defining the objects as global variables would work.
- Note that defining these variables inside the scope of the function `model` would technically work, since `model` will then call the `one_step_attention` function. For the purposes of making grading and troubleshooting easier, we are defining these as global variables. Note that the automatic grader will expect these to be global variables as well.
2. Call these objects when propagating the input.
* We have defined the layers you need as global variables.
* Please run the following cells to create them.
* Please note that the automatic grader expects these global variables with the given variable names. For grading purposes, please do not rename the global variables.
* Please check the Keras documentation to learn more about these layers. The layers are functions. Below are examples of how to call these functions.
* [RepeatVector()](https://www.tensorflow.org/api_docs/python/tf/keras/layers/RepeatVector)
```Python
var_repeated = repeat_layer(var1)
```
* [Concatenate()](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Concatenate)
```Python
concatenated_vars = concatenate_layer([var1,var2,var3])
```
* [Dense()](https://keras.io/layers/core/#dense)
```Python
var_out = dense_layer(var_in)
```
* [Activation()](https://keras.io/layers/core/#activation)
```Python
activation = activation_layer(var_in)
```
* [Dot()](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dot)
```Python
dot_product = dot_layer([var1,var2])
```
```
# Defined shared layers as global variables
repeator = RepeatVector(Tx)
concatenator = Concatenate(axis=-1)
densor1 = Dense(10, activation = "tanh")
densor2 = Dense(1, activation = "relu")
activator = Activation(softmax, name='attention_weights') # We are using a custom softmax(axis = 1) loaded in this notebook
dotor = Dot(axes = 1)
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: one_step_attention
def one_step_attention(a, s_prev):
"""
Performs one step of attention: Outputs a context vector computed as a dot product of the attention weights
"alphas" and the hidden states "a" of the Bi-LSTM.
Arguments:
a -- hidden state output of the Bi-LSTM, numpy-array of shape (m, Tx, 2*n_a)
s_prev -- previous hidden state of the (post-attention) LSTM, numpy-array of shape (m, n_s)
Returns:
context -- context vector, input of the next (post-attention) LSTM cell
"""
### START CODE HERE ###
# Use repeator to repeat s_prev to be of shape (m, Tx, n_s) so that you can concatenate it with all hidden states "a" (โ 1 line)
s_prev = repeator(s_prev)
# Use concatenator to concatenate a and s_prev on the last axis (โ 1 line)
# For grading purposes, please list 'a' first and 's_prev' second, in this order.
concat = concatenator([a,s_prev])
# Use densor1 to propagate concat through a small fully-connected neural network to compute the "intermediate energies" variable e. (โ1 lines)
e = densor1(concat)
# Use densor2 to propagate e through a small fully-connected neural network to compute the "energies" variable energies. (โ1 lines)
energies = densor2(e)
# Use "activator" on "energies" to compute the attention weights "alphas" (โ 1 line)
alphas = activator(energies)
# Use dotor together with "alphas" and "a", in this order, to compute the context vector to be given to the next (post-attention) LSTM-cell (โ 1 line)
context = dotor([alphas,a])
### END CODE HERE ###
return context
# UNIT TEST
def one_step_attention_test(target):
m = 10
Tx = 30
n_a = 32
n_s = 64
#np.random.seed(10)
a = np.random.uniform(1, 0, (m, Tx, 2 * n_a)).astype(np.float32)
s_prev =np.random.uniform(1, 0, (m, n_s)).astype(np.float32) * 1
context = target(a, s_prev)
assert type(context) == tf.python.framework.ops.EagerTensor, "Unexpected type. It should be a Tensor"
assert tuple(context.shape) == (m, 1, n_s), "Unexpected output shape"
assert np.all(context.numpy() > 0), "All output values must be > 0 in this example"
assert np.all(context.numpy() < 1), "All output values must be < 1 in this example"
#assert np.allclose(context[0][0][0:5].numpy(), [0.50877404, 0.57160693, 0.45448175, 0.50074816, 0.53651875]), "Unexpected values in the result"
print("\033[92mAll tests passed!")
one_step_attention_test(one_step_attention)
```
<a name='ex-2'></a>
### Exercise 2 - modelf
Implement `modelf()` as explained in figure 1 and the instructions:
* `modelf` first runs the input through a Bi-LSTM to get $[a^{<1>},a^{<2>}, ..., a^{<T_x>}]$.
* Then, `modelf` calls `one_step_attention()` $T_y$ times using a `for` loop. At each iteration of this loop:
- It gives the computed context vector $context^{<t>}$ to the post-attention LSTM.
- It runs the output of the post-attention LSTM through a dense layer with softmax activation.
- The softmax generates a prediction $\hat{y}^{<t>}$.
Again, we have defined global layers that will share weights to be used in `modelf()`.
```
n_a = 32 # number of units for the pre-attention, bi-directional LSTM's hidden state 'a'
n_s = 64 # number of units for the post-attention LSTM's hidden state "s"
# Please note, this is the post attention LSTM cell.
post_activation_LSTM_cell = LSTM(n_s, return_state = True) # Please do not modify this global variable.
output_layer = Dense(len(machine_vocab), activation=softmax)
```
Now you can use these layers $T_y$ times in a `for` loop to generate the outputs, and their parameters will not be reinitialized. You will have to carry out the following steps:
1. Propagate the input `X` into a bi-directional LSTM.
* [Bidirectional](https://keras.io/layers/wrappers/#bidirectional)
* [LSTM](https://keras.io/layers/recurrent/#lstm)
* Remember that we want the LSTM to return a full sequence instead of just the last hidden state.
Sample code:
```Python
sequence_of_hidden_states = Bidirectional(LSTM(units=..., return_sequences=...))(the_input_X)
```
2. Iterate for $t = 0, \cdots, T_y-1$:
1. Call `one_step_attention()`, passing in the sequence of hidden states $[a^{\langle 1 \rangle},a^{\langle 2 \rangle}, ..., a^{ \langle T_x \rangle}]$ from the pre-attention bi-directional LSTM, and the previous hidden state $s^{<t-1>}$ from the post-attention LSTM to calculate the context vector $context^{<t>}$.
2. Give $context^{<t>}$ to the post-attention LSTM cell.
- Remember to pass in the previous hidden-state $s^{\langle t-1\rangle}$ and cell-states $c^{\langle t-1\rangle}$ of this LSTM
* This outputs the new hidden state $s^{<t>}$ and the new cell state $c^{<t>}$.
Sample code:
```Python
next_hidden_state, _ , next_cell_state =
post_activation_LSTM_cell(inputs=..., initial_state=[prev_hidden_state, prev_cell_state])
```
Please note that the layer is actually the "post attention LSTM cell". For the purposes of passing the automatic grader, please do not modify the naming of this global variable. This will be fixed when we deploy updates to the automatic grader.
3. Apply a dense, softmax layer to $s^{<t>}$, get the output.
Sample code:
```Python
output = output_layer(inputs=...)
```
4. Save the output by adding it to the list of outputs.
3. Create your Keras model instance.
* It should have three inputs:
* `X`, the one-hot encoded inputs to the model, of shape ($T_{x}, humanVocabSize)$
* $s^{\langle 0 \rangle}$, the initial hidden state of the post-attention LSTM
* $c^{\langle 0 \rangle}$, the initial cell state of the post-attention LSTM
* The output is the list of outputs.
Sample code
```Python
model = Model(inputs=[...,...,...], outputs=...)
```
```
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: model
def modelf(Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size):
"""
Arguments:
Tx -- length of the input sequence
Ty -- length of the output sequence
n_a -- hidden state size of the Bi-LSTM
n_s -- hidden state size of the post-attention LSTM
human_vocab_size -- size of the python dictionary "human_vocab"
machine_vocab_size -- size of the python dictionary "machine_vocab"
Returns:
model -- Keras model instance
"""
# Define the inputs of your model with a shape (Tx,)
# Define s0 (initial hidden state) and c0 (initial cell state)
# for the decoder LSTM with shape (n_s,)
X = Input(shape=(Tx, human_vocab_size))
s0 = Input(shape=(n_s,), name='s0')
c0 = Input(shape=(n_s,), name='c0')
s = s0
c = c0
# Initialize empty list of outputs
outputs = []
### START CODE HERE ###
# Step 1: Define your pre-attention Bi-LSTM. (โ 1 line)
a = Bidirectional(LSTM(n_a, return_sequences=True))(X)
# Step 2: Iterate for Ty steps
for t in range(Ty):
# Step 2.A: Perform one step of the attention mechanism to get back the context vector at step t (โ 1 line)
context = one_step_attention(a, s)
# Step 2.B: Apply the post-attention LSTM cell to the "context" vector.
# Don't forget to pass: initial_state = [hidden state, cell state] (โ 1 line)
s, _, c = post_activation_LSTM_cell(context,initial_state=[s, c])
# Step 2.C: Apply Dense layer to the hidden state output of the post-attention LSTM (โ 1 line)
out = output_layer(s)
# Step 2.D: Append "out" to the "outputs" list (โ 1 line)
outputs.append(out)
# Step 3: Create model instance taking three inputs and returning the list of outputs. (โ 1 line)
model = Model(inputs=[X, s0, c0],outputs=outputs)
### END CODE HERE ###
return model
# UNIT TEST
from test_utils import *
def modelf_test(target):
m = 10
Tx = 30
n_a = 32
n_s = 64
len_human_vocab = 37
len_machine_vocab = 11
model = target(Tx, Ty, n_a, n_s, len_human_vocab, len_machine_vocab)
print(summary(model))
expected_summary = [['InputLayer', [(None, 30, 37)], 0],
['InputLayer', [(None, 64)], 0],
['Bidirectional', (None, 30, 64), 17920],
['RepeatVector', (None, 30, 64), 0, 30],
['Concatenate', (None, 30, 128), 0],
['Dense', (None, 30, 10), 1290, 'tanh'],
['Dense', (None, 30, 1), 11, 'relu'],
['Activation', (None, 30, 1), 0],
['Dot', (None, 1, 64), 0],
['InputLayer', [(None, 64)], 0],
['LSTM',[(None, 64), (None, 64), (None, 64)], 33024,[(None, 1, 64), (None, 64), (None, 64)],'tanh'],
['Dense', (None, 11), 715, 'softmax']]
comparator(summary(model), expected_summary)
modelf_test(modelf)
```
Run the following cell to create your model.
```
model = modelf(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))
```
#### Troubleshooting Note
* If you are getting repeated errors after an initially incorrect implementation of "model", but believe that you have corrected the error, you may still see error messages when building your model.
* A solution is to save and restart your kernel (or shutdown then restart your notebook), and re-run the cells.
Let's get a summary of the model to check if it matches the expected output.
```
model.summary()
```
**Expected Output**:
Here is the summary you should see
<table>
<tr>
<td>
**Total params:**
</td>
<td>
52,960
</td>
</tr>
<tr>
<td>
**Trainable params:**
</td>
<td>
52,960
</td>
</tr>
<tr>
<td>
**Non-trainable params:**
</td>
<td>
0
</td>
</tr>
<tr>
<td>
**bidirectional_1's output shape **
</td>
<td>
(None, 30, 64)
</td>
</tr>
<tr>
<td>
**repeat_vector_1's output shape **
</td>
<td>
(None, 30, 64)
</td>
</tr>
<tr>
<td>
**concatenate_1's output shape **
</td>
<td>
(None, 30, 128)
</td>
</tr>
<tr>
<td>
**attention_weights's output shape **
</td>
<td>
(None, 30, 1)
</td>
</tr>
<tr>
<td>
**dot_1's output shape **
</td>
<td>
(None, 1, 64)
</td>
</tr>
<tr>
<td>
**dense_3's output shape **
</td>
<td>
(None, 11)
</td>
</tr>
</table>
<a name='ex-3'></a>
### Exercise 3 - Compile the Model
* After creating your model in Keras, you need to compile it and define the loss function, optimizer and metrics you want to use.
* Loss function: 'categorical_crossentropy'.
* Optimizer: [Adam](https://keras.io/optimizers/#adam) [optimizer](https://keras.io/optimizers/#usage-of-optimizers)
- learning rate = 0.005
- $\beta_1 = 0.9$
- $\beta_2 = 0.999$
- decay = 0.01
* metric: 'accuracy'
Sample code
```Python
optimizer = Adam(lr=..., beta_1=..., beta_2=..., decay=...)
model.compile(optimizer=..., loss=..., metrics=[...])
```
```
### START CODE HERE ### (โ2 lines)
opt = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
### END CODE HERE ###
# UNIT TESTS
assert opt.lr == 0.005, "Set the lr parameter to 0.005"
assert opt.beta_1 == 0.9, "Set the beta_1 parameter to 0.9"
assert opt.beta_2 == 0.999, "Set the beta_2 parameter to 0.999"
assert opt.decay == 0.01, "Set the decay parameter to 0.01"
assert model.loss == "categorical_crossentropy", "Wrong loss. Use 'categorical_crossentropy'"
assert model.optimizer == opt, "Use the optimizer that you have instantiated"
assert model.compiled_metrics._user_metrics[0] == 'accuracy', "set metrics to ['accuracy']"
print("\033[92mAll tests passed!")
```
#### Define inputs and outputs, and fit the model
The last step is to define all your inputs and outputs to fit the model:
- You have input X of shape $(m = 10000, T_x = 30)$ containing the training examples.
- You need to create `s0` and `c0` to initialize your `post_attention_LSTM_cell` with zeros.
- Given the `model()` you coded, you need the "outputs" to be a list of 10 elements of shape (m, T_y).
- The list `outputs[i][0], ..., outputs[i][Ty]` represents the true labels (characters) corresponding to the $i^{th}$ training example (`X[i]`).
- `outputs[i][j]` is the true label of the $j^{th}$ character in the $i^{th}$ training example.
```
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
outputs = list(Yoh.swapaxes(0,1))
```
Let's now fit the model and run it for one epoch.
```
model.fit([Xoh, s0, c0], outputs, epochs=1, batch_size=100)
```
While training you can see the loss as well as the accuracy on each of the 10 positions of the output. The table below gives you an example of what the accuracies could be if the batch had 2 examples:
<img src="images/table.png" style="width:700;height:200px;"> <br>
<caption><center>Thus, `dense_2_acc_8: 0.89` means that you are predicting the 7th character of the output correctly 89% of the time in the current batch of data. </center></caption>
We have run this model for longer, and saved the weights. Run the next cell to load our weights. (By training a model for several minutes, you should be able to obtain a model of similar accuracy, but loading our model will save you time.)
```
model.load_weights('models/model.h5')
```
You can now see the results on new examples.
```
EXAMPLES = ['3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007', 'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001']
s00 = np.zeros((1, n_s))
c00 = np.zeros((1, n_s))
for example in EXAMPLES:
source = string_to_int(example, Tx, human_vocab)
#print(source)
source = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), source))).swapaxes(0,1)
source = np.swapaxes(source, 0, 1)
source = np.expand_dims(source, axis=0)
prediction = model.predict([source, s00, c00])
prediction = np.argmax(prediction, axis = -1)
output = [inv_machine_vocab[int(i)] for i in prediction]
print("source:", example)
print("output:", ''.join(output),"\n")
def translate_date(sentence):
source = string_to_int(sentence, Tx, human_vocab)
source = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), source))).swapaxes(0,1)
source = np.swapaxes(source, 0, 1)
source = np.expand_dims(source, axis=0)
prediction = model.predict([source, s00, c00])
prediction = np.argmax(prediction, axis = -1)
output = [inv_machine_vocab[int(i)] for i in prediction]
print("source:", sentence)
print("output:", ''.join(output),"\n")
example = "4th of july 2001"
translate_date(example)
```
You can also change these examples to test with your own examples. The next part will give you a better sense of what the attention mechanism is doing--i.e., what part of the input the network is paying attention to when generating a particular output character.
<a name='3'></a>
## 3 - Visualizing Attention (Optional / Ungraded)
Since the problem has a fixed output length of 10, it is also possible to carry out this task using 10 different softmax units to generate the 10 characters of the output. But one advantage of the attention model is that each part of the output (such as the month) knows it needs to depend only on a small part of the input (the characters in the input giving the month). We can visualize what each part of the output is looking at which part of the input.
Consider the task of translating "Saturday 9 May 2018" to "2018-05-09". If we visualize the computed $\alpha^{\langle t, t' \rangle}$ we get this:
<img src="images/date_attention.png" style="width:600;height:300px;"> <br>
<caption><center> **Figure 8**: Full Attention Map</center></caption>
Notice how the output ignores the "Saturday" portion of the input. None of the output timesteps are paying much attention to that portion of the input. We also see that 9 has been translated as 09 and May has been correctly translated into 05, with the output paying attention to the parts of the input it needs to to make the translation. The year mostly requires it to pay attention to the input's "18" in order to generate "2018."
<a name='3-1'></a>
### 3.1 - Getting the Attention Weights From the Network
Lets now visualize the attention values in your network. We'll propagate an example through the network, then visualize the values of $\alpha^{\langle t, t' \rangle}$.
To figure out where the attention values are located, let's start by printing a summary of the model .
```
model.summary()
```
Navigate through the output of `model.summary()` above. You can see that the layer named `attention_weights` outputs the `alphas` of shape (m, 30, 1) before `dot_2` computes the context vector for every time step $t = 0, \ldots, T_y-1$. Let's get the attention weights from this layer.
The function `attention_map()` pulls out the attention values from your model and plots them.
**Note**: We are aware that you might run into an error running the cell below despite a valid implementation for Exercise 2 - `modelf` above. If you get the error kindly report it on this [Topic](https://discourse.deeplearning.ai/t/error-in-optional-ungraded-part-of-neural-machine-translation-w3a1/1096) on [Discourse](https://discourse.deeplearning.ai) as it'll help us improve our content.
If you havenโt joined our Discourse community you can do so by clicking on the link: http://bit.ly/dls-discourse
And donโt worry about the error, it will not affect the grading for this assignment.
```
attention_map = plot_attention_map(model, human_vocab, inv_machine_vocab, "Tuesday 09 Oct 1993", num = 7, n_s = 64);
```
On the generated plot you can observe the values of the attention weights for each character of the predicted output. Examine this plot and check that the places where the network is paying attention makes sense to you.
In the date translation application, you will observe that most of the time attention helps predict the year, and doesn't have much impact on predicting the day or month.
### Congratulations!
You have come to the end of this assignment
#### Here's what you should remember
- Machine translation models can be used to map from one sequence to another. They are useful not just for translating human languages (like French->English) but also for tasks like date format translation.
- An attention mechanism allows a network to focus on the most relevant parts of the input when producing a specific part of the output.
- A network using an attention mechanism can translate from inputs of length $T_x$ to outputs of length $T_y$, where $T_x$ and $T_y$ can be different.
- You can visualize attention weights $\alpha^{\langle t,t' \rangle}$ to see what the network is paying attention to while generating each output.
Congratulations on finishing this assignment! You are now able to implement an attention model and use it to learn complex mappings from one sequence to another.
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/text/word2vec">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/text/word2vec.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/word2vec.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/text/word2vec.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
# Word2Vec
Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.
Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and
[Distributed
Representations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.
These papers proposed two methods for learning representations of words:
* **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.
* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.
You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/).
## Skip-gram and Negative Sampling
While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`.
Consider the following sentence of 8 words.
> The wide road shimmered in the hot sun.
The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes.
Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.

The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w<sub>1</sub>, w<sub>2</sub>, ... w<sub>T</sub>*, the objective can be written as the average log probability

where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.

where *v* and *v<sup>'<sup>* are target and context vector representations of words and *W* is vocabulary size.
Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (10<sup>5</sup>-10<sup>7</sup>) terms.
The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling.
The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *P<sub>n</sub>(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples.
A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).
```
(hot, shimmered)
(wide, hot)
(wide, sun)
```
In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial.
## Setup
```
!pip install tqdm
import io
import itertools
import numpy as np
import os
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Dot, Embedding, Flatten, GlobalAveragePooling1D, Reshape
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
```
### Vectorize an example sentence
Consider the following sentence:
`The wide road shimmered in the hot sun.`
Tokenize the sentence:
```
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
```
Create a vocabulary to save mappings from tokens to integer indices.
```
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
```
Create an inverse vocabulary to save mappings from integer indices to tokens.
```
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
```
Vectorize your sentence.
```
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
```
### Generate skip-grams from one sentence
The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.
Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
```
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
```
Take a look at few positive skip-grams.
```
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
```
### Negative sampling for one skip-gram
The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the funtion on one skip-grams's target word and pass the context word as true class to exclude it from being sampled.
Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
```
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
```
### Construct one training example
For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
```
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
```
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
```
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
```
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
```
print(f"target :", target)
print(f"context :", context )
print(f"label :", label )
```
### Summary
This picture summarizes the procedure of generating training example from a sentence.

## Compile all steps into one function
### Skip-gram Sampling table
A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occuring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality.
The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
```
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
```
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling.
Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective.
### Generate training data
Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
```
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
```
## Prepare training data for Word2Vec
With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences!
### Download text corpus
You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
```
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
```
Read text from the file and take a look at the first few lines.
```
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
```
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
```
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
```
### Vectorize sentences from the corpus
You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
```
# We create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
```
Call `adapt` on the text dataset to create vocabulary.
```
vectorize_layer.adapt(text_ds.batch(1024))
```
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
```
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
```
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
```
def vectorize_text(text):
text = tf.expand_dims(text, -1)
return tf.squeeze(vectorize_layer(text))
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
```
### Obtain sequences from the dataset
You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples.
Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
```
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
```
Take a look at few examples from `sequences`.
```
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
```
### Generate training examples from sequences
`sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
```
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
```
### Configure the dataset for performance
To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
```
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
```
Add `cache()` and `prefetch()` to improve performance.
```
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
```
## Model and Training
The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset.
### Subclassed Word2Vec Model
Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:
* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.
* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.
* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.
* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.
With the sublassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result.
Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
```
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding", )
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3,2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
we = self.target_embedding(target)
ce = self.context_embedding(context)
dots = self.dots([ce, we])
return self.flatten(dots)
```
### Define loss function and compile model
For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:
``` python
def custom_loss(x_logit, y_true):
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)
```
It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
```
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
```
Also define a callback to log training statistics for tensorboard.
```
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
```
Train the model with `dataset` prepared above for some number of epochs.
```
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
```
Tensorboard now shows the Word2Vec model's accuracy and loss.
```python
%tensorboard --logdir logs
```

## Embedding lookup and analysis
Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
```
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
```
Create and save the vectors and metadata file.
```
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0: continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
```
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
```
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception as e:
pass
```
## Next steps
This tutorial has shown you how to implement a skip-gram Word2Vec model with negative sampling from scratch and visualize the obtained word embeddings.
* To learn more about word vectors and their mathematical representations, refer to these [notes](https://web.stanford.edu/class/cs224n/readings/cs224n-2019-notes01-wordvecs1.pdf).
* To learn more about advanced text processing, read the [Transformer model for language understanding](https://www.tensorflow.org/tutorials/text/transformer) tutorial.
* If youโre interested in pre-trained embedding models, you may also be interested in [Exploring the TF-Hub CORD-19 Swivel Embeddings](https://www.tensorflow.org/hub/tutorials/cord_19_embeddings_keras), or the [Multilingual Universal Sentence Encoder](https://www.tensorflow.org/hub/tutorials/cross_lingual_similarity_with_tf_hub_multilingual_universal_encoder)
* You may also like to train the model on a new dataset (there are many available in [TensorFlow Datasets](https://www.tensorflow.org/datasets)).
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
with open('../Malaya-Dataset/dependency/gsd-ud-train.conllu.txt') as fopen:
corpus = fopen.read().split('\n')
with open('../Malaya-Dataset/dependency/gsd-ud-test.conllu.txt') as fopen:
corpus.extend(fopen.read().split('\n'))
with open('../Malaya-Dataset/dependency/gsd-ud-dev.conllu.txt') as fopen:
corpus.extend(fopen.read().split('\n'))
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from bert import modeling
import tensorflow as tf
import numpy as np
import unicodedata
import six
from functools import partial
SPIECE_UNDERLINE = 'โ'
def preprocess_text(inputs, lower=False, remove_space=True, keep_accents=False):
if remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if six.PY2 and isinstance(outputs, str):
outputs = outputs.decode('utf-8')
if not keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
def encode_pieces(sp_model, text, return_unicode=True, sample=False):
# return_unicode is used only for py2
# note(zhiliny): in some systems, sentencepiece only accepts str for py2
if six.PY2 and isinstance(text, unicode):
text = text.encode('utf-8')
if not sample:
pieces = sp_model.EncodeAsPieces(text)
else:
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit():
cur_pieces = sp_model.EncodeAsPieces(
piece[:-1].replace(SPIECE_UNDERLINE, ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
# note(zhiliny): convert back to unicode for py2
if six.PY2 and return_unicode:
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = piece.decode('utf-8')
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def encode_ids(sp_model, text, sample=False):
pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample)
ids = [sp_model.PieceToId(piece) for piece in pieces]
return ids
import sentencepiece as spm
sp_model = spm.SentencePieceProcessor()
sp_model.Load('sp10m.cased.bert.model')
with open('sp10m.cased.bert.vocab') as fopen:
v = fopen.read().split('\n')[:-1]
v = [i.split('\t') for i in v]
v = {i[0]: i[1] for i in v}
class Tokenizer:
def __init__(self, v):
self.vocab = v
pass
def tokenize(self, string):
return encode_pieces(sp_model, string, return_unicode=False, sample=False)
def convert_tokens_to_ids(self, tokens):
return [sp_model.PieceToId(piece) for piece in tokens]
def convert_ids_to_tokens(self, ids):
return [sp_model.IdToPiece(i) for i in ids]
tokenizer = Tokenizer(v)
tag2idx = {'PAD': 0, 'X': 1}
tag_idx = 2
def process_corpus(corpus, until = None):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
sentences, words, depends, labels, pos, sequences = [], [], [], [], [], []
temp_sentence, temp_word, temp_depend, temp_label, temp_pos = [], [], [], [], []
first_time = True
for sentence in corpus:
try:
if len(sentence):
if sentence[0] == '#':
continue
if first_time:
print(sentence)
first_time = False
sentence = sentence.split('\t')
if sentence[7] not in tag2idx:
tag2idx[sentence[7]] = tag_idx
tag_idx += 1
temp_word.append(sentence[1])
temp_depend.append(int(sentence[6]) + 1)
temp_label.append(tag2idx[sentence[7]])
temp_sentence.append(sentence[1])
temp_pos.append(sentence[3])
else:
if len(temp_sentence) < 2 or len(temp_word) != len(temp_label):
temp_word = []
temp_depend = []
temp_label = []
temp_sentence = []
temp_pos = []
continue
bert_tokens = ['[CLS]']
labels_ = [0]
depends_ = [0]
seq_ = []
for no, orig_token in enumerate(temp_word):
labels_.append(temp_label[no])
depends_.append(temp_depend[no])
t = tokenizer.tokenize(orig_token)
bert_tokens.extend(t)
labels_.extend([1] * (len(t) - 1))
depends_.extend([0] * (len(t) - 1))
seq_.append(no + 1)
bert_tokens.append('[SEP]')
labels_.append(0)
depends_.append(0)
words.append(tokenizer.convert_tokens_to_ids(bert_tokens))
depends.append(depends_)
labels.append(labels_)
sentences.append(bert_tokens)
pos.append(temp_pos)
sequences.append(seq_)
temp_word = []
temp_depend = []
temp_label = []
temp_sentence = []
temp_pos = []
except Exception as e:
print(e, sentence)
return sentences[:-1], words[:-1], depends[:-1], labels[:-1], pos[:-1], sequences[:-1]
sentences, words, depends, labels, _, _ = process_corpus(corpus)
import json
with open('../Malaya-Dataset/dependency/augmented-dependency.json') as fopen:
augmented = json.load(fopen)
text_augmented, depends_augmented, labels_augmented = [], [], []
for a in augmented:
text_augmented.extend(a[0])
depends_augmented.extend(a[1])
labels_augmented.extend((np.array(a[2]) + 1).tolist())
def parse_XY(texts, depends, labels):
outside, sentences, outside_depends, outside_labels = [], [], [], []
for no, text in enumerate(texts):
temp_depend = depends[no]
temp_label = labels[no]
s = text.split()
sentences.append(s)
bert_tokens = ['[CLS]']
labels_ = [0]
depends_ = [0]
for no, orig_token in enumerate(s):
labels_.append(temp_label[no])
depends_.append(temp_depend[no])
t = tokenizer.tokenize(orig_token)
bert_tokens.extend(t)
labels_.extend([1] * (len(t) - 1))
depends_.extend([0] * (len(t) - 1))
bert_tokens.append('[SEP]')
labels_.append(0)
depends_.append(0)
outside.append(tokenizer.convert_tokens_to_ids(bert_tokens))
outside_depends.append(depends_)
outside_labels.append(labels_)
return outside, sentences, outside_depends, outside_labels
outside, _, outside_depends, outside_labels = parse_XY(text_augmented,
depends_augmented,
labels_augmented)
words.extend(outside)
depends.extend(outside_depends)
labels.extend(outside_labels)
idx2tag = {v:k for k, v in tag2idx.items()}
from sklearn.model_selection import train_test_split
words_train, words_test, depends_train, depends_test, labels_train, labels_test \
= train_test_split(words, depends, labels, test_size = 0.2)
len(words_train), len(words_test)
train_X = words_train
train_Y = labels_train
train_depends = depends_train
test_X = words_test
test_Y = labels_test
test_depends = depends_test
BERT_INIT_CHKPNT = 'bert-base-v3/model.ckpt'
BERT_CONFIG = 'bert-base-v3/config.json'
epoch = 30
batch_size = 32
warmup_proportion = 0.1
num_train_steps = int(len(train_X) / batch_size * epoch)
num_warmup_steps = int(num_train_steps * warmup_proportion)
bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG)
class BiAAttention:
def __init__(self, input_size_encoder, input_size_decoder, num_labels):
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.W_d = tf.get_variable("W_d", shape=[self.num_labels, self.input_size_decoder],
initializer=tf.contrib.layers.xavier_initializer())
self.W_e = tf.get_variable("W_e", shape=[self.num_labels, self.input_size_encoder],
initializer=tf.contrib.layers.xavier_initializer())
self.U = tf.get_variable("U", shape=[self.num_labels, self.input_size_decoder, self.input_size_encoder],
initializer=tf.contrib.layers.xavier_initializer())
def forward(self, input_d, input_e, mask_d=None, mask_e=None):
batch = tf.shape(input_d)[0]
length_decoder = tf.shape(input_d)[1]
length_encoder = tf.shape(input_e)[1]
out_d = tf.expand_dims(tf.matmul(self.W_d, tf.transpose(input_d, [0, 2, 1])), 3)
out_e = tf.expand_dims(tf.matmul(self.W_e, tf.transpose(input_e, [0, 2, 1])), 2)
output = tf.matmul(tf.expand_dims(input_d, 1), self.U)
output = tf.matmul(output, tf.transpose(tf.expand_dims(input_e, 1), [0, 1, 3, 2]))
output = output + out_d + out_e
if mask_d is not None:
d = tf.expand_dims(tf.expand_dims(mask_d, 1), 3)
e = tf.expand_dims(tf.expand_dims(mask_e, 1), 2)
output = output * d * e
return output
class BiLinear:
def __init__(self, left_features, right_features, out_features):
self.left_features = left_features
self.right_features = right_features
self.out_features = out_features
self.U = tf.get_variable("U-bi", shape=[out_features, left_features, right_features],
initializer=tf.contrib.layers.xavier_initializer())
self.W_l = tf.get_variable("Wl", shape=[out_features, left_features],
initializer=tf.contrib.layers.xavier_initializer())
self.W_r = tf.get_variable("Wr", shape=[out_features, right_features],
initializer=tf.contrib.layers.xavier_initializer())
def forward(self, input_left, input_right):
left_size = tf.shape(input_left)
output_shape = tf.concat([left_size[:-1], [self.out_features]], axis = 0)
batch = tf.cast(tf.reduce_prod(left_size[:-1]), tf.int32)
input_left = tf.reshape(input_left, (batch, self.left_features))
input_right = tf.reshape(input_right, (batch, self.right_features))
tiled = tf.tile(tf.expand_dims(input_left, axis = 0), (self.out_features,1,1))
output = tf.transpose(tf.reduce_sum(tf.matmul(tiled, self.U), axis = 2))
output = output + tf.matmul(input_left, tf.transpose(self.W_l))\
+ tf.matmul(input_right, tf.transpose(self.W_r))
return tf.reshape(output, output_shape)
class Model:
def __init__(
self,
learning_rate,
hidden_size_word,
training = True,
cov = 0.0):
self.words = tf.placeholder(tf.int32, (None, None))
self.heads = tf.placeholder(tf.int32, (None, None))
self.types = tf.placeholder(tf.int32, (None, None))
self.switch = tf.placeholder(tf.bool, None)
self.mask = tf.cast(tf.math.not_equal(self.words, 0), tf.float32)
self.maxlen = tf.shape(self.words)[1]
self.lengths = tf.count_nonzero(self.words, 1)
mask = self.mask
heads = self.heads
types = self.types
self.arc_h = tf.layers.Dense(hidden_size_word)
self.arc_c = tf.layers.Dense(hidden_size_word)
self.attention = BiAAttention(hidden_size_word, hidden_size_word, 1)
self.type_h = tf.layers.Dense(hidden_size_word)
self.type_c = tf.layers.Dense(hidden_size_word)
self.bilinear = BiLinear(hidden_size_word, hidden_size_word, len(tag2idx))
model = modeling.BertModel(
config=bert_config,
is_training=training,
input_ids=self.words,
use_one_hot_embeddings=False)
output_layer = model.get_sequence_output()
arc_h = tf.nn.elu(self.arc_h(output_layer))
arc_c = tf.nn.elu(self.arc_c(output_layer))
type_h = tf.nn.elu(self.type_h(output_layer))
type_c = tf.nn.elu(self.type_c(output_layer))
out_arc = tf.squeeze(self.attention.forward(arc_h, arc_c, mask_d=self.mask,
mask_e=self.mask), axis = 1)
self.out_arc = out_arc
batch = tf.shape(out_arc)[0]
max_len = tf.shape(out_arc)[1]
sec_max_len = tf.shape(out_arc)[2]
batch_index = tf.range(0, batch)
decode_arc = out_arc + tf.linalg.diag(tf.fill([max_len], -np.inf))
minus_mask = tf.expand_dims(tf.cast(1 - mask, tf.bool), axis = 2)
minus_mask = tf.tile(minus_mask, [1, 1, sec_max_len])
decode_arc = tf.where(minus_mask, tf.fill(tf.shape(decode_arc), -np.inf), decode_arc)
self.decode_arc = decode_arc
self.heads_seq = tf.argmax(decode_arc, axis = 1)
self.heads_seq = tf.identity(self.heads_seq, name = 'heads_seq')
t = tf.cast(tf.transpose(self.heads_seq), tf.int32)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(t, axis = 0)], axis = 0))
type_h = tf.gather_nd(type_h, concatenated)
out_type = self.bilinear.forward(type_h, type_c)
self.tags_seq = tf.argmax(out_type, axis = 2)
self.tags_seq = tf.identity(self.tags_seq, name = 'tags_seq')
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
out_type, self.types, self.lengths
)
crf_loss = tf.reduce_mean(-log_likelihood)
self.logits, _ = tf.contrib.crf.crf_decode(
out_type, transition_params, self.lengths
)
self.logits = tf.identity(self.logits, name = 'logits')
batch = tf.shape(out_arc)[0]
max_len = tf.shape(out_arc)[1]
batch_index = tf.range(0, batch)
t = tf.transpose(heads)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(t, axis = 0)], axis = 0))
type_h = tf.gather_nd(type_h, concatenated)
out_type = self.bilinear.forward(type_h, type_c)
minus_inf = -1e8
minus_mask = (1 - mask) * minus_inf
out_arc = out_arc + tf.expand_dims(minus_mask, axis = 2) + tf.expand_dims(minus_mask, axis = 1)
loss_arc = tf.nn.log_softmax(out_arc, dim=1)
loss_type = tf.nn.log_softmax(out_type, dim=2)
loss_arc = loss_arc * tf.expand_dims(mask, axis = 2) * tf.expand_dims(mask, axis = 1)
loss_type = loss_type * tf.expand_dims(mask, axis = 2)
num = tf.reduce_sum(mask) - tf.cast(batch, tf.float32)
child_index = tf.tile(tf.expand_dims(tf.range(0, max_len), 1), [1, batch])
t = tf.transpose(heads)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(t, axis = 0),
tf.expand_dims(child_index, axis = 0)], axis = 0))
loss_arc = tf.gather_nd(loss_arc, concatenated)
loss_arc = tf.transpose(loss_arc, [1, 0])
t = tf.transpose(types)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(child_index, axis = 0),
tf.expand_dims(t, axis = 0)], axis = 0))
loss_type = tf.gather_nd(loss_type, concatenated)
loss_type = tf.transpose(loss_type, [1, 0])
cost = (tf.reduce_sum(-loss_arc) / num) + (tf.reduce_sum(-loss_type) / num)
self.cost = tf.cond(self.switch, lambda: cost + crf_loss, lambda: cost)
self.optimizer = optimization.create_optimizer(self.cost, learning_rate,
num_train_steps, num_warmup_steps, False)
mask = tf.sequence_mask(self.lengths, maxlen = self.maxlen)
self.prediction = tf.boolean_mask(self.logits, mask)
mask_label = tf.boolean_mask(self.types, mask)
correct_pred = tf.equal(tf.cast(self.prediction, tf.int32), mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
self.prediction = tf.cast(tf.boolean_mask(self.heads_seq, mask), tf.int32)
mask_label = tf.boolean_mask(self.heads, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy_depends = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.reset_default_graph()
sess = tf.InteractiveSession()
learning_rate = 2e-5
hidden_size_word = 128
model = Model(learning_rate, hidden_size_word)
sess.run(tf.global_variables_initializer())
var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'bert')
saver = tf.train.Saver(var_list = var_lists)
saver.restore(sess, BERT_INIT_CHKPNT)
from tensorflow.keras.preprocessing.sequence import pad_sequences
batch_x = train_X[:5]
batch_x = pad_sequences(batch_x,padding='post')
batch_y = train_Y[:5]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = train_depends[:5]
batch_depends = pad_sequences(batch_depends,padding='post')
sess.run([model.accuracy, model.accuracy_depends, model.cost],
feed_dict = {model.words: batch_x,
model.types: batch_y,
model.heads: batch_depends,
model.switch: False})
sess.run([model.accuracy, model.accuracy_depends, model.cost],
feed_dict = {model.words: batch_x,
model.types: batch_y,
model.heads: batch_depends,
model.switch: True})
tags_seq, heads = sess.run(
[model.logits, model.heads_seq],
feed_dict = {
model.words: batch_x,
},
)
tags_seq[0], heads[0], batch_depends[0]
from tqdm import tqdm
epoch = 20
for e in range(epoch):
train_acc, train_loss = [], []
test_acc, test_loss = [], []
train_acc_depends, test_acc_depends = [], []
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = train_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_y = train_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = train_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
acc_depends, acc, cost, _ = sess.run(
[model.accuracy_depends, model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.words: batch_x,
model.types: batch_y,
model.heads: batch_depends,
model.switch: False
},
)
train_loss.append(cost)
train_acc.append(acc)
train_acc_depends.append(acc_depends)
pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = test_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_y = test_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = test_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
acc_depends, acc, cost = sess.run(
[model.accuracy_depends, model.accuracy, model.cost],
feed_dict = {
model.words: batch_x,
model.types: batch_y,
model.heads: batch_depends,
model.switch: False
},
)
test_loss.append(cost)
test_acc.append(acc)
test_acc_depends.append(acc_depends)
pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends)
print(
'epoch: %d, training loss: %f, training acc: %f, training depends: %f, valid loss: %f, valid acc: %f, valid depends: %f\n'
% (e, np.mean(train_loss),
np.mean(train_acc),
np.mean(train_acc_depends),
np.mean(test_loss),
np.mean(test_acc),
np.mean(test_acc_depends)
))
from tqdm import tqdm
epoch = 5
for e in range(epoch):
train_acc, train_loss = [], []
test_acc, test_loss = [], []
train_acc_depends, test_acc_depends = [], []
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = train_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_y = train_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = train_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
acc_depends, acc, cost, _ = sess.run(
[model.accuracy_depends, model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.words: batch_x,
model.types: batch_y,
model.heads: batch_depends,
model.switch: True
},
)
train_loss.append(cost)
train_acc.append(acc)
train_acc_depends.append(acc_depends)
pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = test_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_y = test_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = test_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
acc_depends, acc, cost = sess.run(
[model.accuracy_depends, model.accuracy, model.cost],
feed_dict = {
model.words: batch_x,
model.types: batch_y,
model.heads: batch_depends,
model.switch: True
},
)
test_loss.append(cost)
test_acc.append(acc)
test_acc_depends.append(acc_depends)
pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends)
print(
'epoch: %d, training loss: %f, training acc: %f, training depends: %f, valid loss: %f, valid acc: %f, valid depends: %f\n'
% (e, np.mean(train_loss),
np.mean(train_acc),
np.mean(train_acc_depends),
np.mean(test_loss),
np.mean(test_acc),
np.mean(test_acc_depends)
))
saver = tf.train.Saver(tf.trainable_variables())
saver.save(sess, 'bert-base-dependency/model.ckpt')
tf.reset_default_graph()
sess = tf.InteractiveSession()
learning_rate = 2e-5
hidden_size_word = 128
model = Model(learning_rate, hidden_size_word, training = False)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(sess, 'bert-base-dependency/model.ckpt')
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
def evaluate(heads_pred, types_pred, heads, types, lengths,
symbolic_root=False, symbolic_end=False):
batch_size, _ = heads_pred.shape
ucorr = 0.
lcorr = 0.
total = 0.
ucomplete_match = 0.
lcomplete_match = 0.
corr_root = 0.
total_root = 0.
start = 1 if symbolic_root else 0
end = 1 if symbolic_end else 0
for i in range(batch_size):
ucm = 1.
lcm = 1.
for j in range(start, lengths[i] - end):
total += 1
if heads[i, j] == heads_pred[i, j]:
ucorr += 1
if types[i, j] == types_pred[i, j]:
lcorr += 1
else:
lcm = 0
else:
ucm = 0
lcm = 0
if heads[i, j] == 0:
total_root += 1
corr_root += 1 if heads_pred[i, j] == 0 else 0
ucomplete_match += ucm
lcomplete_match += lcm
return ucorr / total, lcorr / total, corr_root / total_root
arcs, types, roots = [], [], []
real_Y, predict_Y = [], []
for i in tqdm(range(0, len(test_X), batch_size)):
index = min(i + batch_size, len(test_X))
batch_x = test_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_y = test_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = test_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
tags_seq, heads = sess.run(
[model.logits, model.heads_seq],
feed_dict = {
model.words: batch_x,
},
)
arc_accuracy, type_accuracy, root_accuracy = evaluate(heads - 1, tags_seq, batch_depends - 1, batch_y,
np.count_nonzero(batch_x, axis = 1))
arcs.append(arc_accuracy)
types.append(type_accuracy)
roots.append(root_accuracy)
predicted = pred2label(tags_seq)
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
temp_real_Y = []
for r in real_Y:
temp_real_Y.extend(r)
temp_predict_Y = []
for r in predict_Y:
temp_predict_Y.extend(r)
from sklearn.metrics import classification_report
print(classification_report(temp_real_Y, temp_predict_Y, digits = 5))
print('arc accuracy:', np.mean(arcs))
print('types accuracy:', np.mean(types))
print('root accuracy:', np.mean(roots))
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'Placeholder' in n.name
or '_seq' in n.name
or 'alphas' in n.name
or 'logits' in n.name
or 'self/Softmax' in n.name)
and 'Adam' not in n.name
and 'beta' not in n.name
and 'global_step' not in n.name
and 'adam' not in n.name
and 'gradients/bert' not in n.name
]
)
strings.split(',')
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('bert-base-dependency', strings)
string = 'husein makan ayam'
import re
def entities_textcleaning(string, lowering = False):
"""
use by entities recognition, pos recognition and dependency parsing
"""
string = re.sub('[^A-Za-z0-9\-\/() ]+', ' ', string)
string = re.sub(r'[ ]+', ' ', string).strip()
original_string = string.split()
if lowering:
string = string.lower()
string = [
(original_string[no], word.title() if word.isupper() else word)
for no, word in enumerate(string.split())
if len(word)
]
return [s[0] for s in string], [s[1] for s in string]
def parse_X(left):
bert_tokens = ['[CLS]']
for no, orig_token in enumerate(left):
t = tokenizer.tokenize(orig_token)
bert_tokens.extend(t)
bert_tokens.append("[SEP]")
return tokenizer.convert_tokens_to_ids(bert_tokens), bert_tokens
sequence = entities_textcleaning(string)[1]
parsed_sequence, bert_sequence = parse_X(sequence)
def merge_sentencepiece_tokens_tagging(x, y):
new_paired_tokens = []
n_tokens = len(x)
rejected = ['[CLS]', '[SEP]']
i = 0
while i < n_tokens:
current_token, current_label = x[i], y[i]
if not current_token.startswith('โ') and current_token not in rejected:
previous_token, previous_label = new_paired_tokens.pop()
merged_token = previous_token
merged_label = [previous_label]
while (
not current_token.startswith('โ')
and current_token not in rejected
):
merged_token = merged_token + current_token.replace('โ', '')
merged_label.append(current_label)
i = i + 1
current_token, current_label = x[i], y[i]
merged_label = merged_label[0]
new_paired_tokens.append((merged_token, merged_label))
else:
new_paired_tokens.append((current_token, current_label))
i = i + 1
words = [
i[0].replace('โ', '')
for i in new_paired_tokens
if i[0] not in rejected
]
labels = [i[1] for i in new_paired_tokens if i[0] not in rejected]
return words, labels
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph('bert-base-dependency/frozen_model.pb')
x = g.get_tensor_by_name('import/Placeholder:0')
heads_seq = g.get_tensor_by_name('import/heads_seq:0')
tags_seq = g.get_tensor_by_name('import/logits:0')
test_sess = tf.InteractiveSession(graph = g)
h, t = test_sess.run([heads_seq, tags_seq],
feed_dict = {
x: [parsed_sequence],
},
)
h = h[0] - 1
t = [idx2tag[d] for d in t[0]]
merged_h = merge_sentencepiece_tokens_tagging(bert_sequence, h)
merged_t = merge_sentencepiece_tokens_tagging(bert_sequence, t)
print(list(zip(merged_h[0], merged_h[1])))
print(list(zip(merged_t[0], merged_t[1])))
import boto3
bucketName = 'huseinhouse-storage'
Key = 'bert-base-dependency/frozen_model.pb'
outPutname = "v34/dependency/bert-base-dependency.pb"
s3 = boto3.client('s3')
s3.upload_file(Key,bucketName,outPutname)
```
| github_jupyter |

[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/5.1_Text_classification_examples_in_SparkML_SparkNLP.ipynb)
# Text Classification with Spark NLP
```
import os
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
# Install pyspark
! pip install --ignore-installed -q pyspark==2.4.4
! pip install --ignore-installed -q spark-nlp==2.5.1
import os
import sys
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from sparknlp.annotator import *
from sparknlp.common import *
from sparknlp.base import *
import pandas as pd
import sparknlp
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
! wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Public/data/news_category_train.csv
! wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Public/data/news_category_test.csv
# newsDF = spark.read.parquet("data/news_category.parquet") >> if it is a parquet
newsDF = spark.read \
.option("header", True) \
.csv("news_category_train.csv")
newsDF.show(truncate=50)
newsDF.show(truncate=50)
newsDF.take(2)
from pyspark.sql.functions import col
newsDF.groupBy("category") \
.count() \
.orderBy(col("count").desc()) \
.show()
```
## Building Classification Pipeline
### LogReg with CountVectorizer
Tokenizer: Tokenization
stopwordsRemover: Remove Stop Words
countVectors: Count vectors (โdocument-term vectorsโ)
```
from pyspark.ml.feature import CountVectorizer, HashingTF, IDF, OneHotEncoder, StringIndexer, VectorAssembler, SQLTransformer
%%time
document_assembler = DocumentAssembler() \
.setInputCol("description") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("normalized")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)
stemmer = Stemmer() \
.setInputCols(["cleanTokens"]) \
.setOutputCol("stem")
finisher = Finisher() \
.setInputCols(["stem"]) \
.setOutputCols(["token_features"]) \
.setOutputAsArray(True) \
.setCleanAnnotations(False)
countVectors = CountVectorizer(inputCol="token_features", outputCol="features", vocabSize=10000, minDF=5)
label_stringIdx = StringIndexer(inputCol = "category", outputCol = "label")
nlp_pipeline = Pipeline(
stages=[document_assembler,
tokenizer,
normalizer,
stopwords_cleaner,
stemmer,
finisher,
countVectors,
label_stringIdx])
nlp_model = nlp_pipeline.fit(newsDF)
processed = nlp_model.transform(newsDF)
processed.count()
processed.select('description','token_features').show(truncate=50)
processed.select('token_features').take(2)
processed.select('features').take(2)
processed.select('description','features','label').show()
# set seed for reproducibility
(trainingData, testData) = processed.randomSplit([0.7, 0.3], seed = 100)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
trainingData.printSchema()
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0)
lrModel = lr.fit(trainingData)
predictions = lrModel.transform(testData)
predictions.filter(predictions['prediction'] == 0) \
.select("description","category","probability","label","prediction") \
.orderBy("probability", ascending=False) \
.show(n = 10, truncate = 30)
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
evaluator = MulticlassClassificationEvaluator(predictionCol="prediction")
evaluator.evaluate(predictions)
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
y_true = predictions_tf.select("label")
y_true = y_true.toPandas()
y_pred = predictions_tf.select("prediction")
y_pred = y_pred.toPandas()
y_pred.prediction.value_counts()
cnf_matrix = confusion_matrix(list(y_true.label.astype(int)), list(y_pred.prediction.astype(int)))
cnf_matrix
print(classification_report(y_true.label, y_pred.prediction))
print(accuracy_score(y_true.label, y_pred.prediction))
```
### LogReg with TFIDF
```
from pyspark.ml.feature import HashingTF, IDF
hashingTF = HashingTF(inputCol="token_features", outputCol="rawFeatures", numFeatures=10000)
idf = IDF(inputCol="rawFeatures", outputCol="features", minDocFreq=5) #minDocFreq: remove sparse terms
nlp_pipeline_tf = Pipeline(
stages=[document_assembler,
tokenizer,
normalizer,
stopwords_cleaner,
stemmer,
finisher,
hashingTF,
idf,
label_stringIdx])
nlp_model_tf = nlp_pipeline_tf.fit(newsDF)
processed_tf = nlp_model_tf.transform(newsDF)
processed_tf.count()
# set seed for reproducibility
processed_tf.select('description','features','label').show()
(trainingData, testData) = processed_tf.randomSplit([0.7, 0.3], seed = 100)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
lrModel_tf = lr.fit(trainingData)
predictions_tf = lrModel_tf.transform(testData)
predictions_tf.select("description","category","probability","label","prediction") \
.orderBy("probability", ascending=False) \
.show(n = 10, truncate = 30)
y_true = predictions_tf.select("label")
y_true = y_true.toPandas()
y_pred = predictions_tf.select("prediction")
y_pred = y_pred.toPandas()
print(classification_report(y_true.label, y_pred.prediction))
print(accuracy_score(y_true.label, y_pred.prediction))
```
### Random Forest with TFIDF
```
from pyspark.ml.classification import RandomForestClassifier
rf = RandomForestClassifier(labelCol="label", \
featuresCol="features", \
numTrees = 100, \
maxDepth = 4, \
maxBins = 32)
# Train model with Training Data
rfModel = rf.fit(trainingData)
predictions_rf = rfModel.transform(testData)
predictions_rf.select("description","category","probability","label","prediction") \
.orderBy("probability", ascending=False) \
.show(n = 10, truncate = 30)
y_true = predictions_rf.select("label")
y_true = y_true.toPandas()
y_pred = predictions_rf.select("prediction")
y_pred = y_pred.toPandas()
print(classification_report(y_true.label, y_pred.prediction))
print(accuracy_score(y_true.label, y_pred.prediction))
```
## LogReg with Spark NLP Glove Word Embeddings
```
document_assembler = DocumentAssembler() \
.setInputCol("description") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("normalized")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)
glove_embeddings = WordEmbeddingsModel().pretrained() \
.setInputCols(["document",'cleanTokens'])\
.setOutputCol("embeddings")\
.setCaseSensitive(False)
embeddingsSentence = SentenceEmbeddings() \
.setInputCols(["document", "embeddings"]) \
.setOutputCol("sentence_embeddings") \
.setPoolingStrategy("AVERAGE")
embeddings_finisher = EmbeddingsFinisher() \
.setInputCols(["sentence_embeddings"]) \
.setOutputCols(["finished_sentence_embeddings"]) \
.setOutputAsVector(True)\
.setCleanAnnotations(False)
explodeVectors = SQLTransformer(statement=
"SELECT EXPLODE(finished_sentence_embeddings) AS features, * FROM __THIS__")
label_stringIdx = StringIndexer(inputCol = "category", outputCol = "label")
nlp_pipeline_w2v = Pipeline(
stages=[document_assembler,
tokenizer,
normalizer,
stopwords_cleaner,
glove_embeddings,
embeddingsSentence,
embeddings_finisher,
explodeVectors,
label_stringIdx])
nlp_model_w2v = nlp_pipeline_w2v.fit(newsDF)
processed_w2v = nlp_model_w2v.transform(newsDF)
processed_w2v.count()
processed_w2v.select('finished_embeddings').take(1)
processed_w2v.select("finished_embeddings").show(1)
processed_w2v.select('finished_sentence_embeddings').take(1)
# IF SQLTransformer IS NOT USED INSIDE THE PIPELINE, WE CAN EXPLODE OUTSIDE
from pyspark.sql.functions import explode
# processed_w2v= processed_w2v.withColumn("features", explode(processed_w2v.finished_sentence_embeddings))
processed_w2v.select("features").take(1)
processed_w2v.select("features").take(1)
processed_w2v.select('description','features','label').show()
# set seed for reproducibility
(trainingData, testData) = processed_w2v.randomSplit([0.7, 0.3], seed = 100)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
from pyspark.sql.functions import udf
@udf("long")
def num_nonzeros(v):
return v.numNonzeros()
testData = testData.where(num_nonzeros("features") != 0)
testData.count()
lrModel_w2v = lr.fit(trainingData)
predictions_w2v = lrModel_w2v.transform(testData)
predictions_w2v.select("description","category","probability","label","prediction") \
.orderBy("probability", ascending=False) \
.show(n = 10, truncate = 30)
y_true = predictions_w2v.select("label")
y_true = y_true.toPandas()
y_pred = predictions_w2v.select("prediction")
y_pred = y_pred.toPandas()
print(classification_report(y_true.label, y_pred.prediction))
print(accuracy_score(y_true.label, y_pred.prediction))
processed_w2v.select('description','cleanTokens.result').show(truncate=50)
```
## LogReg with Spark NLP Bert Embeddings
```
document_assembler = DocumentAssembler() \
.setInputCol("description") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("normalized")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)
bert_embeddings = BertEmbeddings\
.pretrained('bert_base_cased', 'en') \
.setInputCols(["document",'cleanTokens'])\
.setOutputCol("bert")\
.setCaseSensitive(False)\
.setPoolingLayer(0)
embeddingsSentence = SentenceEmbeddings() \
.setInputCols(["document", "bert"]) \
.setOutputCol("sentence_embeddings") \
.setPoolingStrategy("AVERAGE")
embeddings_finisher = EmbeddingsFinisher() \
.setInputCols(["sentence_embeddings"]) \
.setOutputCols(["finished_sentence_embeddings"]) \
.setOutputAsVector(True)\
.setCleanAnnotations(False)
label_stringIdx = StringIndexer(inputCol = "category", outputCol = "label")
nlp_pipeline_bert = Pipeline(
stages=[document_assembler,
tokenizer,
normalizer,
stopwords_cleaner,
bert_embeddings,
embeddingsSentence,
embeddings_finisher,
label_stringIdx])
nlp_model_bert = nlp_pipeline_bert.fit(newsDF)
processed_bert = nlp_model_bert.transform(newsDF)
processed_bert.count()
from pyspark.sql.functions import explode
processed_bert= processed_bert.withColumn("features", explode(processed_bert.finished_sentence_embeddings))
processed_bert.select('description','features','label').show()
# set seed for reproducibility
(trainingData, testData) = processed_bert.randomSplit([0.7, 0.3], seed = 100)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)
lrModel = lr.fit(trainingData)
from pyspark.sql.functions import udf
@udf("long")
def num_nonzeros(v):
return v.numNonzeros()
testData = testData.where(num_nonzeros("features") != 0)
predictions = lrModel.transform(testData)
predictions.select("description","category","probability","label","prediction") \
.orderBy("probability", ascending=False) \
.show(n = 10, truncate = 30)
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import pandas as pd
df = predictions.select('description','category','label','prediction').toPandas()
print(classification_report(df.label, df.prediction))
print(accuracy_score(df.label, df.prediction))
```
## LogReg with ELMO Embeddings
```
document_assembler = DocumentAssembler() \
.setInputCol("description") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("normalized")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)
elmo_embeddings = ElmoEmbeddings.load('/Users/vkocaman/cache_pretrained/elmo_en_2.4.0_2.4_1580488815299')\
.setPoolingLayer("word_emb")\
.setInputCols(["document",'cleanTokens'])\
.setOutputCol("elmo")
embeddingsSentence = SentenceEmbeddings() \
.setInputCols(["document", "elmo"]) \
.setOutputCol("sentence_embeddings") \
.setPoolingStrategy("AVERAGE")
embeddings_finisher = EmbeddingsFinisher() \
.setInputCols(["sentence_embeddings"]) \
.setOutputCols(["finished_sentence_embeddings"]) \
.setOutputAsVector(True)\
.setCleanAnnotations(False)
label_stringIdx = StringIndexer(inputCol = "category", outputCol = "label")
nlp_pipeline_elmo = Pipeline(
stages=[document_assembler,
tokenizer,
normalizer,
stopwords_cleaner,
elmo_embeddings,
embeddingsSentence,
embeddings_finisher,
label_stringIdx])
nlp_model_elmo = nlp_pipeline_elmo.fit(newsDF)
processed_elmo = nlp_model_elmo.transform(newsDF)
processed_elmo.count()
(trainingData, testData) = newsDF.randomSplit([0.7, 0.3], seed = 100)
processed_trainingData = nlp_model_elmo.transform(trainingData)
processed_trainingData.count()
processed_testData = nlp_model_elmo.transform(testData)
processed_testData.count()
processed_trainingData.columns
processed_testData= processed_testData.withColumn("features", explode(processed_testData.finished_sentence_embeddings))
processed_trainingData= processed_trainingData.withColumn("features", explode(processed_trainingData.finished_sentence_embeddings))
from pyspark.sql.functions import udf
@udf("long")
def num_nonzeros(v):
return v.numNonzeros()
processed_testData = processed_testData.where(num_nonzeros("features") != 0)
%%time
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)
lrModel = lr.fit(processed_trainingData)
processed_trainingData.columns
predictions = lrModel.transform(processed_testData)
predictions.select("description","category","probability","label","prediction") \
.orderBy("probability", ascending=False) \
.show(n = 10, truncate = 30)
df = predictions.select('description','category','label','prediction').toPandas()
df.shape
df.head()
from sklearn.metrics import classification_report, accuracy_score
print(classification_report(df.label, df.prediction))
print(accuracy_score(df.label, df.prediction))
```
## LogReg with Universal Sentence Encoder
```
document_assembler = DocumentAssembler() \
.setInputCol("description") \
.setOutputCol("document")
useEmbeddings = UniversalSentenceEncoder.load('/Users/vkocaman/cache_pretrained/tfhub_use_en_2.4.0_2.4_1580582893733')\
.setInputCols("document")\
.setOutputCol("use_embeddings")
embeddings_finisher = EmbeddingsFinisher() \
.setInputCols(["use_embeddings"]) \
.setOutputCols(["finished_use_embeddings"]) \
.setOutputAsVector(True)\
.setCleanAnnotations(False)
label_stringIdx = StringIndexer(inputCol = "category", outputCol = "label")
use_pipeline = Pipeline(
stages=[
document_assembler,
useEmbeddings,
embeddings_finisher,
label_stringIdx]
)
use_df = use_pipeline.fit(newsDF).transform(newsDF)
use_df.select('finished_use_embeddings').show(3)
from pyspark.sql.functions import explode
use_df= use_df.withColumn("features", explode(use_df.finished_use_embeddings))
use_df.show(2)
# set seed for reproducibility
(trainingData, testData) = use_df.randomSplit([0.7, 0.3], seed = 100)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import pandas as pd
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)
lrModel = lr.fit(trainingData)
predictions = lrModel.transform(testData)
predictions.filter(predictions['prediction'] == 0) \
.select("description","category","probability","label","prediction") \
.orderBy("probability", ascending=False) \
.show(n = 10, truncate = 30)
df = predictions.select('description','category','label','prediction').toPandas()
#df['result'] = df['result'].apply(lambda x: x[0])
df.head()
print(classification_report(df.label, df.prediction))
print(accuracy_score(df.label, df.prediction))
```
### train on entire dataset
```
lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)
lrModel = lr.fit(use_df)
test_df = spark.read.parquet("data/news_category_test.parquet")
test_df = use_pipeline.fit(test_df).transform(test_df)
test_df= test_df.withColumn("features", explode(test_df.finished_use_embeddings))
test_df.show(2)
predictions = lrModel.transform(test_df)
df = predictions.select('description','category','label','prediction').toPandas()
df['label'] = df.category.replace({'World':2.0,
'Sports':3.0,
'Business':0.0,
'Sci/Tech':1.0})
df.head()
print(classification_report(df.label, df.prediction))
print(accuracy_score(df.label, df.prediction))
```
## Spark NLP Licensed DocClassifier
```
from sparknlp_jsl.annotator import *
# set seed for reproducibility
(trainingData, testData) = newsDF.randomSplit([0.7, 0.3], seed = 100)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
document_assembler = DocumentAssembler() \
.setInputCol("description") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("normalized")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)
stemmer = Stemmer() \
.setInputCols(["cleanTokens"]) \
.setOutputCol("stem")
logreg = DocumentLogRegClassifierApproach()\
.setInputCols(["stem"])\
.setLabelCol("category")\
.setOutputCol("prediction")
nlp_pipeline = Pipeline(
stages=[document_assembler,
tokenizer,
normalizer,
stopwords_cleaner,
stemmer,
logreg])
nlp_model = nlp_pipeline.fit(trainingData)
processed = nlp_model.transform(testData)
processed.count()
processed.select('description','category','prediction.result').show(truncate=50)
processed.select('description','prediction.result').show(truncate=50)
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import pandas as pd
df = processed.select('description','category','prediction.result').toPandas()
df.head()
df.result[0][0]
df = processed.select('description','category','prediction.result').toPandas()
df['result'] = df['result'].apply(lambda x: x[0])
df.head()
df = processed.select('description','category','prediction.result').toPandas()
df['result'] = df['result'].apply(lambda x: x[0])
print(classification_report(df.category, df.result))
print(accuracy_score(df.category, df.result))
```
# ClassifierDL
```
# actual content is inside description column
document = DocumentAssembler()\
.setInputCol("description")\
.setOutputCol("document")
use = UniversalSentenceEncoder.load('/Users/vkocaman/cache_pretrained/tfhub_use_en_2.4.4_2.4_1583158595769')\
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
# the classes/labels/categories are in category column
classsifierdl = ClassifierDLApproach()\
.setInputCols(["sentence_embeddings"])\
.setOutputCol("class")\
.setLabelColumn("category")\
.setMaxEpochs(5)\
.setEnableOutputLogs(True)
pipeline = Pipeline(
stages = [
document,
use,
classsifierdl
])
# set seed for reproducibility
(trainingData, testData) = newsDF.randomSplit([0.7, 0.3], seed = 100)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
pipelineModel = pipeline.fit(trainingData)
from sklearn.metrics import classification_report, accuracy_score
df = pipelineModel.transform(testDataset).select('category','description',"class.result").toPandas()
df['result'] = df['result'].apply(lambda x: x[0])
print(classification_report(df.category, df.result))
print(accuracy_score(df.category, df.result))
```
## Loading the trained classifier from disk
```
classsifierdlmodel = ClassifierDLModel.load('classifierDL_model_20200317_5e')
import sparknlp
sparknlp.__path__
.setInputCols(["sentence_embeddings"])\
.setOutputCol("class")\
.setLabelColumn("category")\
.setMaxEpochs(5)\
.setEnableOutputLogs(True)
trainDataset = spark.read \
.option("header", True) \
.csv("data/news_category_train.csv")
trainDataset.count()
trainingData.count()
document = DocumentAssembler()\
.setInputCol("description")\
.setOutputCol("document")
sentence = SentenceDetector()\
.setInputCols(['document'])\
.setOutputCol('sentence')
use = UniversalSentenceEncoder.load('/Users/vkocaman/cache_pretrained/tfhub_use_en_2.4.4_2.4_1583158595769')\
.setInputCols(["sentence"])\
.setOutputCol("sentence_embeddings")
classsifierdlmodel = ClassifierDLModel.load('classifierDL_model_20200317_5e')
pipeline = Pipeline(
stages = [
document,
sentence,
use,
classsifierdlmodel
])
pipeline.fit(testData.limit(1)).transform(testData.limit(10)).select('category','description',"class.result").show(10, truncate=50)
lm = LightPipeline(pipeline.fit(testDataset.limit(1)))
lm.annotate('In its first two years, the UK dedicated card companies have surge')
text='''
Fearing the fate of Italy, the centre-right government has threatened to be merciless with those who flout tough restrictions. As of Wednesday it will also include all shops being closed across Greece, with the exception of supermarkets. Banks, pharmacies, pet-stores, mobile phone stores, opticians, bakers, mini-markets, couriers and food delivery outlets are among the few that will also be allowed to remain open.
'''
lm = LightPipeline(pipeline.fit(testDataset.limit(1)))
lm.annotate(text)
```
# Classifier DL + Glove + Basic text processing
```
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
lemma = LemmatizerModel.pretrained('lemma_antbnc') \
.setInputCols(["token"]) \
.setOutputCol("lemma")
lemma_pipeline = Pipeline(
stages=[document_assembler,
tokenizer,
lemma,
glove_embeddings])
lemma_pipeline.fit(trainingData.limit(1000)).transform(trainingData.limit(1000)).show(truncate=30)
document_assembler = DocumentAssembler() \
.setInputCol("description") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("normalized")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)
lemma = LemmatizerModel.pretrained('lemma_antbnc') \
.setInputCols(["cleanTokens"]) \
.setOutputCol("lemma")
glove_embeddings = WordEmbeddingsModel().pretrained() \
.setInputCols(["document",'lemma'])\
.setOutputCol("embeddings")\
.setCaseSensitive(False)
embeddingsSentence = SentenceEmbeddings() \
.setInputCols(["document", "embeddings"]) \
.setOutputCol("sentence_embeddings") \
.setPoolingStrategy("AVERAGE")
classsifierdl = ClassifierDLApproach()\
.setInputCols(["sentence_embeddings"])\
.setOutputCol("class")\
.setLabelColumn("category")\
.setMaxEpochs(10)\
.setEnableOutputLogs(True)
clf_pipeline = Pipeline(
stages=[document_assembler,
tokenizer,
normalizer,
stopwords_cleaner,
lemma,
glove_embeddings,
embeddingsSentence,
classsifierdl])
!rm -rf classifier_dl_pipeline_glove
clf_pipelineModel.save('classifier_dl_pipeline_glove')
clf_pipelineModel = clf_pipeline.fit(trainingData)
df = clf_pipelineModel.transform(testDataset).select('category','description',"class.result").toPandas()
df['result'] = df['result'].apply(lambda x: x[0])
print(classification_report(df.category, df.result))
print(accuracy_score(df.category, df.result))
!cd data && ls -l
import pandas as pd
import
news_df = newsDF.toPandas()
news_df.head()
news_df.to_csv('data/news_dataset.csv', index=False)
document_assembler = DocumentAssembler() \
.setInputCol("description") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("normalized")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)
lemma = LemmatizerModel.pretrained('lemma_antbnc') \
.setInputCols(["cleanTokens"]) \
.setOutputCol("lemma")
glove_embeddings = WordEmbeddingsModel().pretrained() \
.setInputCols(["document",'lemma'])\
.setOutputCol("embeddings")\
.setCaseSensitive(False)
txt_pipeline = Pipeline(
stages=[document_assembler,
tokenizer,
normalizer,
stopwords_cleaner,
lemma,
glove_embeddings,
embeddingsSentence])
txt_pipelineModel = txt_pipeline.fit(testData.limit(1))
txt_pipelineModel.save('text_prep_pipeline_glove')
df.head()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/satyajitghana/TSAI-DeepNLP-END2.0/blob/main/05_NLP_Augment/SSTModel.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
! nvidia-smi
! pip install pytorch-lightning --quiet
! pip install OmegaConf --quiet
! pip install nlpaug --quiet
! pip install gdown==3.13.0
import copy
import torch
import torchtext
import pytorch_lightning as pl
from torch import nn
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from pytorch_lightning.metrics.functional import accuracy
from torchtext.utils import download_from_url, extract_archive
from torchtext.data.utils import get_tokenizer
from torchtext.experimental.functional import sequential_transforms, ngrams_func, totensor, vocab_func
from torchtext.vocab import build_vocab_from_iterator
import torchtext.experimental.functional as text_f
import nlpaug.augmenter.char as nac
import nlpaug.augmenter.word as naw
import nlpaug.augmenter.sentence as nas
import nlpaug.flow as nafc
from nlpaug.util import Action
import random
import gdown
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from pathlib import Path
from omegaconf import OmegaConf
from zipfile import ZipFile
from typing import Optional, Tuple, Any, Dict, List
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
plt.style.use("dark_background")
class StanfordSentimentTreeBank(Dataset):
"""The Standford Sentiment Tree Bank Dataset
Stanford Sentiment Treebank V1.0
This is the dataset of the paper:
Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank
Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher Manning, Andrew Ng and Christopher Potts
Conference on Empirical Methods in Natural Language Processing (EMNLP 2013)
If you use this dataset in your research, please cite the above paper.
@incollection{SocherEtAl2013:RNTN,
title = {{Parsing With Compositional Vector Grammars}},
author = {Richard Socher and Alex Perelygin and Jean Wu and Jason Chuang and Christopher Manning and Andrew Ng and Christopher Potts},
booktitle = {{EMNLP}},
year = {2013}
}
"""
ORIG_URL = "http://nlp.stanford.edu/~socherr/stanfordSentimentTreebank.zip"
DATASET_NAME = "StanfordSentimentTreeBank"
URL = 'https://drive.google.com/uc?id=1urNi0Rtp9XkvkxxeKytjl1WoYNYUEoPI'
OUTPUT = 'sst_dataset.zip'
def __init__(self, root, vocab=None, text_transforms=None, label_transforms=None, split='train', ngrams=1, use_transformed_dataset=True):
"""Initiate text-classification dataset.
Args:
data: a list of label and text tring tuple. label is an integer.
[(label1, text1), (label2, text2), (label2, text3)]
vocab: Vocabulary object used for dataset.
transforms: a tuple of label and text string transforms.
"""
super(self.__class__, self).__init__()
if split not in ['train', 'test']:
raise ValueError(f'split must be either ["train", "test"] unknown split {split}')
self.vocab = vocab
gdown.cached_download(self.URL, Path(root) / self.OUTPUT)
self.generate_sst_dataset(split, Path(root) / self.OUTPUT)
tokenizer = get_tokenizer("basic_english")
# the text transform can only work at the sentence level
# the rest of tokenization and vocab is done by this class
self.text_transform = sequential_transforms(tokenizer, text_f.ngrams_func(ngrams))
def build_vocab(data, transforms):
def apply_transforms(data):
for line in data:
yield transforms(line)
return build_vocab_from_iterator(apply_transforms(data), len(data))
if self.vocab is None:
# vocab is always built on the train dataset
self.vocab = build_vocab(self.dataset_train["phrase"], self.text_transform)
if text_transforms is not None:
self.text_transform = sequential_transforms(
self.text_transform, text_transforms, text_f.vocab_func(self.vocab), text_f.totensor(dtype=torch.long)
)
else:
self.text_transform = sequential_transforms(
self.text_transform, text_f.vocab_func(self.vocab), text_f.totensor(dtype=torch.long)
)
self.label_transform = sequential_transforms(text_f.totensor(dtype=torch.long))
def generate_sst_dataset(self, split, dataset_file):
with ZipFile(dataset_file) as datasetzip:
with datasetzip.open('sst_dataset/sst_dataset_augmented.csv') as f:
dataset = pd.read_csv(f, index_col=0)
self.dataset_orig = dataset.copy()
dataset_train_raw = dataset[dataset['splitset_label'].isin([1, 3])]
self.dataset_train = pd.concat([
dataset_train_raw[['phrase_cleaned', 'sentiment_values']].rename(columns={"phrase_cleaned": 'phrase'}),
dataset_train_raw[['synonym_sentences', 'sentiment_values']].rename(columns={"synonym_sentences": 'phrase'}),
dataset_train_raw[['backtranslated', 'sentiment_values']].rename(columns={"backtranslated": 'phrase'}),
], ignore_index=True)
if split == 'train':
self.dataset = self.dataset_train.copy()
else:
self.dataset = dataset[dataset['splitset_label'].isin([2])] \
[['phrase_cleaned', 'sentiment_values']] \
.rename(columns={"phrase_cleaned": 'phrase'}) \
.reset_index(drop=True)
@staticmethod
def discretize_label(label):
if label <= 0.2: return 0
if label <= 0.4: return 1
if label <= 0.6: return 2
if label <= 0.8: return 3
return 4
def __getitem__(self, idx):
# print(f'text: {self.dataset["sentence"].iloc[idx]}, label: {self.dataset["sentiment_values"].iloc[idx]}')
text = self.text_transform(self.dataset['phrase'].iloc[idx])
label = self.label_transform(self.dataset['sentiment_values'].iloc[idx])
# print(f't_text: {text} {text.shape}, t_label: {label}')
return label, text
def __len__(self):
return len(self.dataset)
@staticmethod
def get_labels():
return ['very negative', 'negative', 'neutral', 'positive', 'very positive']
def get_vocab(self):
return self.vocab
@property
def collator_fn(self):
def collate_fn(batch):
pad_idx = self.get_vocab()['<pad>']
labels, sequences = zip(*batch)
labels = torch.stack(labels)
lengths = torch.LongTensor([len(sequence) for sequence in sequences])
# print('before padding: ', sequences[40])
sequences = torch.nn.utils.rnn.pad_sequence(sequences,
padding_value = pad_idx,
batch_first=True
)
# print('after padding: ', sequences[40])
return labels, sequences, lengths
return collate_fn
class SSTDataModule(pl.LightningDataModule):
"""
DataModule for SST, train, val, test splits and transforms
"""
name = "stanford_sentiment_treebank"
def __init__(
self,
data_dir: str = '.',
val_split: int = 1000,
num_workers: int = 2,
batch_size: int = 64,
*args,
**kwargs,
):
"""
Args:
data_dir: where to save/load the data
val_split: how many of the training images to use for the validation split
num_workers: how many workers to use for loading data
normalize: If true applies image normalize
batch_size: desired batch size.
"""
super().__init__(*args, **kwargs)
self.data_dir = data_dir
self.val_split = val_split
self.num_workers = num_workers
self.batch_size = batch_size
self.dataset_train = ...
self.dataset_val = ...
self.dataset_test = ...
self.SST = StanfordSentimentTreeBank
def prepare_data(self):
"""Saves IMDB files to `data_dir`"""
self.SST(self.data_dir)
def setup(self, stage: Optional[str] = None):
"""Split the train and valid dataset"""
train_trans, test_trans = self.default_transforms
train_dataset = self.SST(self.data_dir, split='train', **train_trans)
test_dataset = self.SST(self.data_dir, split='test', **test_trans)
train_length = len(train_dataset)
self.raw_dataset_train = train_dataset
self.raw_dataset_test = test_dataset
# self.dataset_train, self.dataset_val = random_split(train_dataset, [train_length - self.val_split, self.val_split])
self.dataset_train = train_dataset
self.dataset_test = test_dataset
def train_dataloader(self):
"""IMDB train set removes a subset to use for validation"""
loader = DataLoader(
self.dataset_train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.collator_fn
)
return loader
def val_dataloader(self):
"""IMDB val set uses a subset of the training set for validation"""
loader = DataLoader(
self.dataset_test,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.collator_fn
)
return loader
def test_dataloader(self):
"""IMDB test set uses the test split"""
loader = DataLoader(
self.dataset_test,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.collator_fn
)
return loader
def get_vocab(self):
return self.raw_dataset_train.get_vocab()
@property
def default_transforms(self):
train_transforms = {
'text_transforms': text_f.sequential_transforms(
random_deletion,
random_swap
),
'label_transforms': None
}
test_transforms = {
'text_transforms': None,
'label_transforms': None
}
return train_transforms, test_transforms
@property
def collator_fn(self):
return self.raw_dataset_train.collator_fn
def random_deletion(words, p=0.1):
if len(words) == 1: # return if single word
return words
remaining = list(filter(lambda x: random.uniform(0, 1) > p, words))
if len(remaining) == 0: # if not left, sample a random word
return [random.choice(words)]
else:
return remaining
def random_swap(sentence, n=3, p=0.1):
length = range(len(sentence))
n = min(n, len(sentence))
for _ in range(n):
if random.uniform(0, 1) > p:
idx1, idx2 = random.choices(length, k=2)
sentence[idx1], sentence[idx2] = sentence[idx2], sentence[idx1]
return sentence
class SSTModel(pl.LightningModule):
def __init__(self, hparams, *args, **kwargs):
super().__init__()
self.save_hyperparameters(hparams)
self.num_classes = self.hparams.output_dim
self.embedding = nn.Embedding(self.hparams.input_dim, self.hparams.embedding_dim)
self.lstm = nn.LSTM(
self.hparams.embedding_dim,
self.hparams.hidden_dim,
num_layers=self.hparams.num_layers,
dropout=self.hparams.dropout,
batch_first=True
)
self.proj_layer = nn.Sequential(
nn.Linear(self.hparams.hidden_dim, self.hparams.hidden_dim),
nn.BatchNorm1d(self.hparams.hidden_dim),
nn.ReLU(),
nn.Dropout(self.hparams.dropout),
)
self.fc = nn.Linear(self.hparams.hidden_dim, self.num_classes)
self.loss = nn.CrossEntropyLoss()
def init_state(self, sequence_length):
return (torch.zeros(self.hparams.num_layers, sequence_length, self.hparams.hidden_dim).to(self.device),
torch.zeros(self.hparams.num_layers, sequence_length, self.hparams.hidden_dim).to(self.device))
def forward(self, text, text_length, prev_state=None):
# [batch size, sentence length] => [batch size, sentence len, embedding size]
embedded = self.embedding(text)
# packs the input for faster forward pass in RNN
packed = torch.nn.utils.rnn.pack_padded_sequence(
embedded, text_length.to('cpu'),
enforce_sorted=False,
batch_first=True
)
# [batch size sentence len, embedding size] =>
# output: [batch size, sentence len, hidden size]
# hidden: [batch size, 1, hidden size]
packed_output, curr_state = self.lstm(packed, prev_state)
hidden_state, cell_state = curr_state
# print('hidden state shape: ', hidden_state.shape)
# print('cell')
# unpack packed sequence
# unpacked, unpacked_len = torch.nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True)
# print('unpacked: ', unpacked.shape)
# [batch size, sentence len, hidden size] => [batch size, num classes]
# output = self.proj_layer(unpacked[:, -1])
output = self.proj_layer(hidden_state[-1])
# print('output shape: ', output.shape)
output = self.fc(output)
return output, curr_state
def shared_step(self, batch, batch_idx):
label, text, text_length = batch
logits, in_state = self(text, text_length)
loss = self.loss(logits, label)
pred = torch.argmax(F.log_softmax(logits, dim=1), dim=1)
acc = accuracy(pred, label)
metric = {'loss': loss, 'acc': acc}
return metric
def training_step(self, batch, batch_idx):
metrics = self.shared_step(batch, batch_idx)
log_metrics = {'train_loss': metrics['loss'], 'train_acc': metrics['acc']}
self.log_dict(log_metrics, prog_bar=True)
return metrics
def validation_step(self, batch, batch_idx):
metrics = self.shared_step(batch, batch_idx)
return metrics
def validation_epoch_end(self, outputs):
acc = torch.stack([x['acc'] for x in outputs]).mean()
loss = torch.stack([x['loss'] for x in outputs]).mean()
log_metrics = {'val_loss': loss, 'val_acc': acc}
self.log_dict(log_metrics, prog_bar=True)
return log_metrics
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs):
accuracy = torch.stack([x['acc'] for x in outputs]).mean()
self.log('hp_metric', accuracy)
self.log_dict({'test_acc': accuracy}, prog_bar=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
lr_scheduler = {
'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, verbose=True),
'monitor': 'train_loss',
'name': 'scheduler'
}
return [optimizer], [lr_scheduler]
```
**Sanity Checking**
```
sst_dataset = SSTDataModule(batch_size=128)
sst_dataset.setup()
loader = sst_dataset.train_dataloader()
batch = next(iter(loader))
label, text, text_length = batch
text.size(0)
label.shape, text.shape, text_length.shape
text[0]
hparams = OmegaConf.create({
'input_dim': len(sst_dataset.get_vocab()),
'embedding_dim': 128,
'num_layers': 2,
'hidden_dim': 64,
'dropout': 0.5,
'output_dim': len(StanfordSentimentTreeBank.get_labels()),
'lr': 5e-4,
'epochs': 30,
'use_lr_finder': False
})
sst_model = SSTModel(hparams)
output, (h, c) = sst_model(text, text_length)
output.shape
sst_model = SSTModel(hparams)
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
checkpoint_callback = ModelCheckpoint(
monitor='val_loss',
save_top_k=3,
mode='min'
)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer(gpus=1, max_epochs=hparams.epochs, callbacks=[lr_monitor, checkpoint_callback], progress_bar_refresh_rate=1, reload_dataloaders_every_epoch=True)
if hparams.use_lr_finder:
# Run learning rate finder
lr_finder = trainer.tuner.lr_find(sst_model, sst_dataset, max_lr=5)
# Plot with
fig = lr_finder.plot(suggest=True)
fig.show()
# Pick point based on plot, or get suggestion
new_lr = lr_finder.suggestion()
print(f'lr finder suggested lr: {new_lr}')
# update hparams of the model
sst_model.hparams.lr = new_lr
trainer.fit(sst_model, sst_dataset)
trainer.test()
%load_ext tensorboard
%tensorboard --logdir lightning_logs/
```
## Model Diagnosis
```
loader = sst_dataset.test_dataloader()
batch = next(iter(loader))
label, text, text_length = batch
label.shape, text.shape, text_length.shape
def k_missclassified(batch, model, datamodule, k=10):
model.eval()
with torch.no_grad():
label, text, text_length = batch
logits, in_state = model(text, text_length)
pred = torch.argmax(F.log_softmax(logits, dim=1), dim=1)
acc = accuracy(pred, label)
miss_idx = pred != label
vocab = datamodule.get_vocab()
for t, l, p in zip(text.numpy()[miss_idx][:k], label.numpy()[miss_idx][:k], pred.numpy()[miss_idx][:k]):
sentence = ' '.join(vocab.itos[x] for x in t).replace(" <pad>", "")
print('sentence: ', sentence)
print(f'label: {datamodule.dataset_train.get_labels()[l]}, predicted: {datamodule.dataset_train.get_labels()[p]}')
print('\n')
k_missclassified(batch, sst_model, sst_dataset)
def k_correctclassified(batch, model, datamodule, k=10):
model.eval()
with torch.no_grad():
label, text, text_length = batch
logits, in_state = model(text, text_length)
pred = torch.argmax(F.log_softmax(logits, dim=1), dim=1)
acc = accuracy(pred, label)
miss_idx = label == pred
vocab = datamodule.get_vocab()
for t, l, p in zip(text.numpy()[miss_idx][:k], label.numpy()[miss_idx][:k], pred.numpy()[miss_idx][:k]):
sentence = ' '.join(vocab.itos[x] for x in t).replace(" <pad>", "")
print('sentence: ', sentence)
print(f'label: {datamodule.dataset_train.get_labels()[l]}, predicted: {datamodule.dataset_train.get_labels()[p]}')
print('\n')
k_correctclassified(batch, sst_model, sst_dataset)
```
## Misc Stuff
```
! ls
ls lightning_logs/version_0
from google.colab import drive
drive.mount('/gdrive')
! ls /gdrive/MyDrive/END2.0/05_NLP_Augment/
# ! cp -r /gdrive/MyDrive/END2.0/05_NLP_Augment/lightning_logs .
# ! cp -r lightning_logs /gdrive/MyDrive/END2.0/05_NLP_Augment/
# drive.flush_and_unmount()
# ! rm -r lightning_logs
# ! du -sh *
# ! tensorboard dev upload --logdir lightning_logs \
# --name "END2 05_NLP_Augment - Satyajit" \
# --description "Experiments on NLP Augmentation on SST Dataset"
```
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import numpy as np
import tensorflow as tf
import json
with open('dataset-bpe.json') as fopen:
data = json.load(fopen)
train_X = data['train_X']
train_Y = data['train_Y']
test_X = data['test_X']
test_Y = data['test_Y']
EOS = 2
GO = 1
vocab_size = 32000
train_Y = [i + [2] for i in train_Y]
test_Y = [i + [2] for i in test_Y]
from tensor2tensor.utils import beam_search
def pad_second_dim(x, desired_size):
padding = tf.tile([[[0.0]]], tf.stack([tf.shape(x)[0], desired_size - tf.shape(x)[1], tf.shape(x)[2]], 0))
return tf.concat([x, padding], 1)
class Translator:
def __init__(self, size_layer, num_layers, embedded_size, learning_rate):
def cells(size_layer, reuse=False):
return tf.nn.rnn_cell.BasicRNNCell(size_layer,reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype = tf.int32)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype = tf.int32)
batch_size = tf.shape(self.X)[0]
embeddings = tf.Variable(tf.random_uniform([vocab_size, embedded_size], -1, 1))
def forward(x, y, reuse = False):
X_seq_len = tf.count_nonzero(x, 1, dtype = tf.int32)
Y_seq_len = tf.count_nonzero(y, 1, dtype = tf.int32)
with tf.variable_scope('model',reuse=reuse):
encoder_embedded = tf.nn.embedding_lookup(embeddings, x)
decoder_embedded = tf.nn.embedding_lookup(embeddings, y)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(size_layer // 2),
cell_bw = cells(size_layer // 2),
inputs = encoder_embedded,
sequence_length = X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
encoder_embedded = tf.concat((out_fw, out_bw), 2)
bi_state = tf.concat((state_fw,state_bw), -1)
last_state = tuple([bi_state] * num_layers)
with tf.variable_scope("decoder",reuse=reuse):
rnn_cells_dec = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)])
outputs, _ = tf.nn.dynamic_rnn(rnn_cells_dec, decoder_embedded,
sequence_length=Y_seq_len,
initial_state = last_state,
dtype = tf.float32)
return tf.layers.dense(outputs,vocab_size)
main = tf.strided_slice(self.X, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
self.training_logits = forward(self.X, decoder_input, reuse = False)
self.training_logits = self.training_logits[:, :tf.reduce_max(self.Y_seq_len)]
self.training_logits = pad_second_dim(self.training_logits, tf.reduce_max(self.Y_seq_len))
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
initial_ids = tf.fill([batch_size], GO)
def symbols_to_logits(ids):
x = tf.contrib.seq2seq.tile_batch(self.X, 1)
logits = forward(x, ids, reuse = True)
return logits[:, tf.shape(ids)[1]-1, :]
final_ids, final_probs, _ = beam_search.beam_search(
symbols_to_logits,
initial_ids,
1,
tf.reduce_max(self.X_seq_len),
vocab_size,
0.0,
eos_id = EOS)
self.fast_result = final_ids
size_layer = 512
num_layers = 2
embedded_size = 256
learning_rate = 1e-3
batch_size = 128
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Translator(size_layer, num_layers, embedded_size, learning_rate)
sess.run(tf.global_variables_initializer())
pad_sequences = tf.keras.preprocessing.sequence.pad_sequences
batch_x = pad_sequences(train_X[:10], padding='post')
batch_y = pad_sequences(train_Y[:10], padding='post')
sess.run([model.fast_result, model.cost, model.accuracy],
feed_dict = {model.X: batch_x, model.Y: batch_y})
import tqdm
for e in range(epoch):
pbar = tqdm.tqdm(
range(0, len(train_X), batch_size), desc = 'minibatch loop')
train_loss, train_acc, test_loss, test_acc = [], [], [], []
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = pad_sequences(train_X[i : index], padding='post')
batch_y = pad_sequences(train_Y[i : index], padding='post')
feed = {model.X: batch_x,
model.Y: batch_y}
accuracy, loss, _ = sess.run([model.accuracy,model.cost,model.optimizer],
feed_dict = feed)
train_loss.append(loss)
train_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
pbar = tqdm.tqdm(
range(0, len(test_X), batch_size), desc = 'minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = pad_sequences(test_X[i : index], padding='post')
batch_y = pad_sequences(test_Y[i : index], padding='post')
feed = {model.X: batch_x,
model.Y: batch_y,}
accuracy, loss = sess.run([model.accuracy,model.cost],
feed_dict = feed)
test_loss.append(loss)
test_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
print('epoch %d, training avg loss %f, training avg acc %f'%(e+1,
np.mean(train_loss),np.mean(train_acc)))
print('epoch %d, testing avg loss %f, testing avg acc %f'%(e+1,
np.mean(test_loss),np.mean(test_acc)))
from tensor2tensor.utils import bleu_hook
results = []
for i in tqdm.tqdm(range(0, len(test_X), batch_size)):
index = min(i + batch_size, len(test_X))
batch_x = pad_sequences(test_X[i : index], padding='post')
feed = {model.X: batch_x}
p = sess.run(model.fast_result,feed_dict = feed)[:,0,:]
result = []
for row in p:
result.append([i for i in row if i > 3])
results.extend(result)
rights = []
for r in test_Y:
rights.append([i for i in r if i > 3])
bleu_hook.compute_bleu(reference_corpus = rights,
translation_corpus = results)
```
| github_jupyter |
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import sampler
from tqdm import tnrange, tqdm_notebook, tqdm
import skorch
import torchvision.datasets as dset
import torchvision.transforms as T
import torchvision.models as models
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
import numpy as np
import matplotlib
# matplotlib.use('agg')
import matplotlib.pyplot as plt
from Lung_dataset import ILDDataset
USE_GPU = True
dtype = torch.float32 # we will be using float throughout this tutorial
if USE_GPU and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
# Constant to control how frequently we print train loss
print_every = 100
print('using device:', device)
NUM_TOTAL = 1968
NUM_TRAIN = 1783
#add path as absolute path for root dir
im_size = 128
lung_dataset_train = ILDDataset(csv_file=r'C:/Users/Akrofi/Desktop/CS 231/Project/train_labels.csv',
root_dir=r'C:/Users/Akrofi/Desktop/CS 231/Project/train',mask=True, train=True, resize=im_size)#, transform=transform)
lung_dataset_validation = ILDDataset(csv_file=r'C:/Users/Akrofi/Desktop/CS 231/Project/train_labels.csv',
root_dir=r'C:/Users/Akrofi/Desktop/CS 231/Project/validation',mask=True, train=False, resize=im_size)#, transform=transform)
#add path as absolute path for root dir
lung_dataset_test = ILDDataset(csv_file=r'C:/Users/Akrofi/Desktop/CS 231/Project/test_labels.csv',
root_dir=r'C:/Users/Akrofi/Desktop/CS 231/Project/test', mask=True, train=False, resize=im_size)#, transform=transform)
weight0= 1/35
weight1= 1/18
weight2= 1/47
weights = []
for i in tnrange(1783):
sample = lung_dataset_train[i]
if sample[1] == 0:
weights.append(weight0)
elif sample[1] == 1:
weights.append(weight1)
else:
weights.append(weight2)
sampler = sampler.WeightedRandomSampler(torch.cuda.DoubleTensor(weights), len(weights) )
loader_train = DataLoader(lung_dataset_train, batch_size= 16,
sampler = sampler)
loader_val = DataLoader(lung_dataset_validation, batch_size=16,
sampler = torch.utils.data.sampler.SubsetRandomSampler(range(190)))
loader_test = DataLoader(lung_dataset_test, batch_size=16, sampler = torch.utils.data.sampler.SubsetRandomSampler(range(370)))
#show datasample
sample = lung_dataset_test[374]
plt.imshow(sample[0], cmap='gray')
plt.show()
print("label: " + str(sample[1]))
def flatten(x):
N = x.shape[0] # read in N, C, H, W
return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image
class Flatten(nn.Module):
def forward(self, x):
return flatten(x)
def get_model(freeze=False):
"""
Used to fetch model for classification
"""
num_classes = 3
model = models.resnet50(pretrained=True)
model.avgpool = nn.AvgPool2d(1, stride=1)
model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=1, bias=False)
if(freeze==True):
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(32768, num_classes)
return model
##### From CS 231N
def check_accuracy(loader, model, train=False, val = False):
predictedStore = []
solutionStore = []
if loader.dataset.train and train == True:
print('Checking accuracy on training set')
elif val:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
num_correct = 0
num_samples = 0
model.eval() # set model to evaluation mode
with torch.no_grad():
for x, y in loader:
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
x.unsqueeze_(1)
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
predictedStore.append(preds.view(-1,).cpu().numpy().tolist())
solutionStore.append(y.data.view(-1,).cpu().numpy().tolist())
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
return acc,predictedStore,solutionStore
##### From CS 231N
def train(model, optimizer, epochs=1, overfit=False):
"""
Inputs:
- model: A PyTorch Module giving the model to train.
- optimizer: An Optimizer object we will use to train the model
- epochs: (Optional) A Python integer giving the number of epochs to train for
Returns: Nothing, but prints model accuracies during training.
"""
model = model.to(device=device) # move the model parameters to CPU/GPU
best_acc = 0;
for e in range(epochs):
for t, (x, y) in enumerate(loader_train):
model.train() # put model to training mode
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
# Implemet Weighted Loss
weights = torch.tensor([1/35,1/18, 1/47], device= device, dtype=dtype)
x.unsqueeze_(1)
scores = model(x)
Loss = nn.CrossEntropyLoss(weight = weights)
loss = Loss(scores, y)
# Zero out all of the gradients for the variables which the optimizer
# will update.
optimizer.zero_grad()
# This is the backwards pass: compute the gradient of the loss with
# respect to each parameter of the model.
loss.backward()
# Actually update the parameters of the model using the gradients
# computed by the backwards pass.
optimizer.step()
print('Iteration %d, loss = %.4f' % (t, loss.item()))
if(overfit):
_,predictedStoreTrain,solutionStoreTrain = check_accuracy(loader_train, model, train=True)
acc,predictedStoreVal,solutionStoreVal = check_accuracy(loader_val, model, val = True)
if acc > best_acc:
#Save Best Model
best_acc = acc
best_model = model
predStoreTrain = predictedStoreTrain
solStoreTrain = solutionStoreTrain
predStoreVal = predictedStoreVal
solStoreVal = solutionStoreVal
print()
return best_model,predStoreTrain,solStoreTrain,predStoreVal,solStoreVal
# From CS 231N
model1 = get_model(freeze = True)
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model1.parameters()), lr=0.0001, weight_decay= 1e-5)
best_model,predStoreTrain,solStoreTrain,predStoreVal,solStoreVal = train(model1, optimizer, epochs= 5, overfit=True)
#Check Test set
acc,predStoreTest,solStoreTest = check_accuracy(loader_test, best_model)
# From SciKit Documentation Page
import itertools
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
##Actual Plots
y_pred_train = np.concatenate([np.array(i) for i in predStoreTrain])
y_train = np.concatenate([np.array(i) for i in solStoreTrain])
y_pred_val = np.concatenate([np.array(i) for i in predStoreVal])
y_val = np.concatenate([np.array(i) for i in solStoreVal])
y_pred_test = np.concatenate([np.array(i) for i in predStoreTest])
y_test = np.concatenate([np.array(i) for i in solStoreTest])
cnf_matrix1 = confusion_matrix(y_train, y_pred_train)
cnf_matrix2 = confusion_matrix( y_val, y_pred_val)
cnf_matrix3 = confusion_matrix(y_test, y_pred_test)
class_names = [0,1,2]
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix1, classes=class_names,
title='Normalized confusion matrix - Train')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix2, classes=class_names,
title='Normalized confusion matrix - Val')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix3, classes=class_names,
title='Normalized confusion matrix - Test')
plt.show()
# From SciKit Documentation Page
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
y_test = label_binarize(y_test, classes = [0,1,2])
y_pred_test = label_binarize(y_pred_test, classes = [0,1,2])
y_val = label_binarize(y_val, classes = [0,1,2])
y_pred_val = label_binarize(y_pred_val, classes = [0,1,2])
precision = dict()
recall = dict()
average_precision = dict()
n_classes = 3
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_val[:, i],
y_pred_val[:, i])
average_precision[i] = average_precision_score(y_val[:, i], y_pred_val[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_val.ravel(),
y_pred_val.ravel())
average_precision["micro"] = average_precision_score(y_val, y_pred_val,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
print(precision)
print(recall)
```
| github_jupyter |
KEGG
====
KEGG (<http://www.kegg.jp/>) is a database resource for understanding
high-level functions and utilities of the biological system, such as the
cell, the organism and the ecosystem, from molecular-level information,
especially large-scale molecular datasets generated by genome sequencing
and other high-throughput experimental technologies.
Please note that the KEGG parser implementation in Biopython is
incomplete. While the KEGG website indicates many flat file formats,
only parsers and writers for compound, enzyme, and map are currently
implemented. However, a generic parser is implemented to handle the
other formats.
Parsing KEGG records
--------------------
Parsing a KEGG record is as simple as using any other file format parser
in Biopython. (Before running the following codes, please open
http://rest.kegg.jp/get/ec:5.4.2.2 with your web browser and save it as
ec\_5.4.2.2.txt.)
```
!wget http://rest.kegg.jp/get/ec:5.4.2.2 -O ec_5.4.2.2.txt
from Bio.KEGG import Enzyme
records = Enzyme.parse(open("ec_5.4.2.2.txt"))
record = list(records)[0]
record.classname
record.entry
```
The following section will shows how to download the above enzyme using
the KEGG api as well as how to use the generic parser with data that
does not have a custom parser implemented.
Querying the KEGG API
---------------------
Biopython has full support for the querying of the KEGG api. Querying
all KEGG endpoints are supported; all methods documented by KEGG
(<http://www.kegg.jp/kegg/rest/keggapi.html>) are supported. The
interface has some validation of queries which follow rules defined on
the KEGG site. However, invalid queries which return a 400 or 404 must
be handled by the user.
First, here is how to extend the above example by downloading the
relevant enzyme and passing it through the Enzyme parser.
```
from Bio.KEGG import REST
from Bio.KEGG import Enzyme
request = REST.kegg_get("ec:5.4.2.2")
open("ec_5.4.2.2.txt", 'w').write(request.read().decode("utf-8"))
records = Enzyme.parse(open("ec_5.4.2.2.txt"))
record = list(records)[0]
record.classname
record.entry
```
Now, hereโs a more realistic example which shows a combination of
querying the KEGG API. This will demonstrate how to extract a unique set
of all human pathway gene symbols which relate to DNA repair. The steps
that need to be taken to do so are as follows. First, we need to get a
list of all human pathways. Secondly, we need to filter those for ones
which relate to โrepairโ. Lastly, we need to get a list of all the gene
symbols in all repair pathways.
```
from Bio.KEGG import REST
human_pathways = REST.kegg_list("pathway", "hsa").read()
human_pathways.decode("utf-8").split("\n")[0:5]
# Filter all human pathways for repair pathways
repair_pathways = []
for line in human_pathways.decode("utf-8").rstrip().split("\n"):
entry, description = line.split("\t")
if "repair" in description:
repair_pathways.append(entry)
repair_pathways
# Get the genes for pathways and add them to a list
repair_genes = []
for pathway in repair_pathways:
pathway_file = REST.kegg_get(pathway).read() # query and read each pathway
# iterate through each KEGG pathway file, keeping track of which section
# of the file we're in, only read the gene in each pathway
current_section = None
for line in pathway_file.decode("utf-8").rstrip().split("\n"):
section = line[:12].strip() # section names are within 12 columns
if not section == "":
current_section = section
if current_section == "GENE":
gene_identifiers, gene_description = line[12:].split("; ")
gene_id, gene_symbol = gene_identifiers.split()
if not gene_symbol in repair_genes:
repair_genes.append(gene_symbol)
print("There are %d repair pathways and %d repair genes. The genes are:" % \
(len(repair_pathways), len(repair_genes)))
print(", ".join(repair_genes))
```
The KEGG API wrapper is compatible with all endpoints. Usage is
essentially replacing all slashes in the url with commas and using that
list as arguments to the corresponding method in the KEGG module. Here
are a few examples from the api documentation
(<http://www.kegg.jp/kegg/docs/keggapi.html>).
/list/hsa:10458+ece:Z5100 -> REST.kegg_list(["hsa:10458", "ece:Z5100"])
/find/compound/300-310/mol_weight -> REST.kegg_find("compound", "300-310", "mol_weight")
/get/hsa:10458+ece:Z5100/aaseq -> REST.kegg_get(["hsa:10458", "ece:Z5100"], "aaseq")
| github_jupyter |
Copyright 2020 DeepMind Technologies Limited.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
[https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
# Alchemy Environment and Analysis
This is an introduction to the environment described in the paper [**Alchemy: A structured task distribution for meta-reinforcement learning**](https://arxiv.org/pdf/2102.02926). Please see the paper for full details.
## Installation
Follow the instructions in the README.md to install dm_alchemy and dependencies. Run in jupyter or colab with a [local runtime](https://research.google.com/colaboratory/local-runtimes.html). If you use a virtual environment ensure that the python kernel for this virtual environment is being used for example by running `python -m ipykernel install --user --name=dm_alchemy` inside your virtual environment and selecting the kernel `dm_alchemy` in jupyter.
## 3d Environment
We create an instance of the 3d environment and inspect it.
```
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import dm_alchemy
from dm_alchemy import io
from dm_alchemy import symbolic_alchemy
from dm_alchemy import symbolic_alchemy_bots
from dm_alchemy import symbolic_alchemy_trackers
from dm_alchemy import symbolic_alchemy_wrapper
from dm_alchemy.encode import chemistries_proto_conversion
from dm_alchemy.encode import symbolic_actions_proto_conversion
from dm_alchemy.encode import symbolic_actions_pb2
from dm_alchemy.types import stones_and_potions
from dm_alchemy.types import unity_python_conversion
from dm_alchemy.types import utils
width, height = 240, 200
level_name = 'alchemy/perceptual_mapping_randomized_with_rotation_and_random_bottleneck'
seed = 1023
settings = dm_alchemy.EnvironmentSettings(
seed=seed, level_name=level_name, width=width, height=height)
env = dm_alchemy.load_from_docker(settings)
```
We can see that observations consist of:
* RGB_INTERLEAVED - This is the image that the agent can see. For our experiments we set the resolution to 96x72 but here we use 240x200 so that the image is easier to interpret.
* ACCELERATION, HAND_FORCE, HAND_IS_HOLDING, HAND_DISTANCE - These are proprioceptive observations.
* events - This is a variable sized array of protobufs containing information from the environment which can be used for analysis but should not be used by the agent.
```
env.observation_spec()
```
The actions available to the agent consist of looking left or right, up or down, moving left or right and backward or forward and actions for grabbing an object and moving it around.
```
env.action_spec()
```
Let's start a new episode and look at the image observations.
```
timestep = env.reset()
plt.figure()
plt.imshow(timestep.observation['RGB_INTERLEAVED'])
```
With this seed we have started far away from the table, let's take a few actions to get closer to it.
```
for _ in range(38):
timestep = env.step({'MOVE_BACK_FORWARD': 1.0})
for _ in range(6):
timestep = env.step({'LOOK_LEFT_RIGHT': -1.0})
for _ in range(3):
timestep = env.step({'LOOK_DOWN_UP': 1.0})
plt.figure()
plt.imshow(timestep.observation['RGB_INTERLEAVED'])
```
Now we can see the potions of different colours which can be used to transform stones to increase their value before placing them in the big white cauldron to get reward.
## Symbolic Environment
We can represent the same challenge in a symbolic form which eliminates the complex perceptual challenges, more difficult action space and longer timescales of the 3d environment.
```
env = symbolic_alchemy.get_symbolic_alchemy_level(level_name, seed=314)
```
Observations from the symbolic environment are concatenated together into a single array of length 39 with 5 dimensions for each stone:
* 3 perceptual features - colour, size and roundness. These can take values -1, 0 or 1 e.g. for small, medium and large.
* 1 reward. This takes values -1, -1/3, 1/3, 1 which correspond to actual rewards -3, -1, 1, 15.
* 1 dimension which shows if the stone has been used in the cauldron. This is 0 if the stone still exists 1 otherwise. If the stone is put into the cauldron all the other dimensions are set to default values.
2 dimensions for each potion:
* 1 for colour. This takes values -1, -2/3, -1/3, 0, 1/3, 2/3 for colours red, green, orange, yellow, pink, turquoise.
* 1 dimension which shows if the potion has been used by placing a stone in it. This is 0 if the potion is still full, 1 otherwise. If the potion is used the colour dimension is set to a default value.
```
env.observation_spec()
```
There are 40 possible actions in the symbolic environment which are selected using an int between 0 and 39.
These are:
* 0 represents doing nothing
* The remaining integers represent putting a stone into a potion or into the cauldron, i.e. s * (num potion slots (= 12) + 1) + 1 represents putting the stone in slot s (from 0 - 2) into the cauldron and s * (num potion slots (= 12) + 1) + 2 + p represents putting stone s into the potion in slot p (from 0 - 11). For example putting stone 1 into potion 5 is represented by action 20.
```
env.action_spec()
```
For example with this seed we start with stone 0 being purple, small and halfway between round and pointy and its reward indicator shows a -1. Potion 0 is turquoise.
```
timestep = env.reset()
timestep.observation['symbolic_obs']
```
If we put stone 0 into potion 0 and look at the resulting observation we can see that stone 0 has become purple, large and halfway between round and pointy and its reward indicator shows -3. From this an agent which understands the task and the generative process can deduce things like:
* The turquoise potion makes stones large.
* The pink potion makes stones small (because it is always paired with the turquoise potion).
* The blue, small, halfway between round and pointy stone has maximum value (because it is in the opposite corner to the stone we have observed to have the minimum possible value).
```
timestep = env.step(2)
timestep.observation['symbolic_obs']
```
## Combined environment
We can create an environment which is a combination of the 3d environment and the symbolic environment by using a wrapper around the 3d environment which listens to the events output and uses them to initialise and take actions in a symbolic environment to keep it in sync with the 3d environment. In this setting the agent takes actions in the 3d environment and recieves observations from the 3d environment but can also optionally receive observations that are only produced by the symbolic environment.
```
seed = 1023
settings = dm_alchemy.EnvironmentSettings(
seed=seed, level_name=level_name, width=width, height=height)
env3d = dm_alchemy.load_from_docker(settings)
env = symbolic_alchemy_wrapper.SymbolicAlchemyWrapper(
env3d, level_name=level_name, see_symbolic_observation=True)
```
Using the same seed as before we create the environment and go and look at the table.
```
timestep = env.reset()
plt.figure()
plt.imshow(timestep.observation['RGB_INTERLEAVED'])
for _ in range(37):
timestep = env.step({'MOVE_BACK_FORWARD': 1.0})
for _ in range(4):
timestep = env.step({'LOOK_LEFT_RIGHT': -1.0})
for _ in range(3):
timestep = env.step({'LOOK_DOWN_UP': 1.0})
plt.figure()
plt.imshow(timestep.observation['RGB_INTERLEAVED'])
```
Although a few potions are occluded, if we look at the picture above we can see that the symbolic observations match. i.e. we have:
* 2 stones which are blue, small with medium roundness
* 1 stone which is midway between blue and purple, large and pointy.
The potion colours also match.
```
print('Stones:')
for i in range(0, 15, 5):
print(timestep.observation['symbolic_obs'][i:i + 5])
print('Potions:')
for i in range(15, 39, 2):
potion_index = int(round(timestep.observation['symbolic_obs'][i] * 3 + 3))
potion = stones_and_potions.perceived_potion_from_index(potion_index)
print(unity_python_conversion.POTION_COLOUR_AT_PERCEIVED_POTION[potion])
```
## Additional Observations
With the symbolic or combined environment we can get additional observations of the ground thruth chemistry or a belief state over the chemistry.
### Ground Truth Chemistry
The ground truth chemistry observation consists of 4 parts:
* Graph - this consists of 12 entries for the 12 edges on the graph, each entry is set to 0 if there is no edge or 1 if there is an edge.
* The potion map - this is a 1 hot vector over the 6 possible assignments of potion colour pairs to latent space dimensions (called the dimension map) and then a 3 dimensional direction map (i.e. we assume a canonical direction for each potion colour pair and this value will be 1 if this is the direction in latent space or 0 if it is the reverse, e.g. if the red potion increases the value of the latent variable and the green potion decreases it this entry is set to 1, if the green potion increases the value of the latent variable and the red potion decreases this is set to 0).
* Stone map - this is another 3 dimensional direction map (e.g. if a large size stone has a higher latent variable than a small size stone then the entry is set to 1, otherwise it is set to 0 etc).
* Rotation - this is a 1 hot vector over the 4 possible rotations (no rotation or 45 degrees around x, y or z).
In total this is a 28 dimensional observation (12 + 6 + 3 + 3 + 4).
```
seed = 789
env = symbolic_alchemy.get_symbolic_alchemy_level(
level_name, seed=seed,
see_chemistries={
'input_chem': utils.ChemistrySeen(
content=utils.ElementContent.GROUND_TRUTH)
})
env.observation_spec()
timestep = env.reset()
input_chem = timestep.observation['input_chem']
print('Graph:', input_chem[:12])
print('Potion map dimension map:', input_chem[12:18])
print('Potion map direction map:', input_chem[18:21])
print('Stone map direction map:', input_chem[21:24])
print('Rotation:', input_chem[24:])
```
### Belief state
We can also maintain a belief state over the underlying chemistry. This is a probability distribution over all possible chemistries which could be present in the current episode.
The belief state is updated to perfectly incorporate all information available to the agent both through:
* Infinite training on the task distribution.
* Observations within the current episode.
The first form of information means that with no other information the belief state starts as a prior distribution over all possible chemistries which accurately reflects the generative distribution.
The second form of information means that the belief state is correctly updated to eliminate chemistries which are inconsistent with observations during the episode and rescale the resulting distribution.
As soon as the agent is presented with a set of stones, it can rule out some chemistries. For example, some rotations will not include the stones that are present and the reward indicator on each stone can eliminate some of the possible stone maps.
As the agent takes actions in the environment the belief state is further updated. For example, if a small stone becomes large when put into a green potion all chemistries but those where red and green potions are mapped to the size variable and the green potion increases the size can be eliminated.
The belief state observation is a relaxation of the ground truth observation in which the values from the ground truth observation are used if the belief state assigns all probability to that possibility.
* The graph - if the presence of an edge is unknown then the corresponding entry is set to 0.5.
* The potion map - for the dimension map if the correct assignment is unknown then all possibilities are set to 0.5, for the direction map if the true direction for that dimension is unknown then we set the entry to 0.5.
* The stone map - this is another direction map so this works the same way as in the potion map.
* Rotation - if multiple rotations are possibile then the entry corresponding to each possibile rotation is set to 0.5.
```
seed = 22
env = symbolic_alchemy.get_symbolic_alchemy_level(
level_name, seed=seed,
see_chemistries={
'input_chem': utils.ChemistrySeen(
content=utils.ElementContent.BELIEF_STATE,
precomputed=level_name)
})
```
Before any actions are taken the presence of each edge in the graph is unknown and nothing is known about the potion map.
However, the stones present narrow down the possible rotations and stone maps. The stones present are:
* blue, small, medium roundness with reward 1
* purple, small, medium roundness with reward 1
* blue, large, medium roundness with reward -1
There cannot be medium roundness stones in the unrotated chemistry or the chemistry which is rotated around the roundness axis so these are ruled out. The chemistry which is rotated around the colour axis can also be ruled out because the first and second stones differ only in colour and have the same reward. The chemistry which is rotated around the colour axis has 1 latent variable aligned with change in colour so 2 stones which are the same size and roundness but different colour must have different reward. Therefore, the rotation is known to be around the size axis.
Since the second and third stones differ only in size and the small stone has higher reward we can also determine in the stone map that increasing size decreases the value of the associated latent variable.
```
timestep = env.reset()
print('Stones:')
for i in range(0, 15, 5):
print(timestep.observation['symbolic_obs'][i:i + 5])
input_chem = timestep.observation['input_chem']
print('Graph:', input_chem[:12])
print('Potion map dimension map:', input_chem[12:18])
print('Potion map direction map:', input_chem[18:21])
print('Stone map direction map:', input_chem[21:24])
print('Rotation:', input_chem[24:])
```
When one of the stones is transformed by a potion the belief state updates to reflect that the corresponding edge in the graph is known to exist.
The potion dimension maps are narrowed down to 2 possibilities since this action has shown us the assignment of one pair of potions to one latent space axis but the other 2 potion pairs could be assigned to either of the remaining 2 latent space axes.
The potion direction map is updated on one dimension to reflect whether the potion used changed the associated latent variable in the canonical direction assigned to the pair of potions (e.g. that for the green and red pair that green is positive and red is negative).
```
timestep = env.step(3)
print('Stones:')
for i in range(0, 15, 5):
print(timestep.observation['symbolic_obs'][i:i + 5])
input_chem = timestep.observation['input_chem']
print('Graph:', input_chem[:12])
print('Potion map dimension map:', input_chem[12:18])
print('Potion map direction map:', input_chem[18:21])
print('Stone map direction map:', input_chem[21:24])
print('Rotation:', input_chem[24:])
```
## Symbolic Alchemy Trackers
We can add trackers to the symbolic environment which have callbacks which are executed when the environment is reset or a step is taken.
We provide implementations of trackers which:
* Keep a record of the symbolic actions that occurred in a matrix
* Keep track of the stones and potions generated each trial
* Keep track of the number of potions and stones used
* Keep track of the score achieved
* Update the belief state
* Track the average value of stones put into the cauldron
* Track the average improvement of the value of stones before they are put into the cauldron
* Track the frequency of putting negative stones into the cauldron
* Track the frequency of leaving stones with a specified reward (e.g. +1) on the table at the end of a trial
* Track the frequency of actions where a stone is put into a potion which has no effect on the stone
We initialise the environment with the same seed as above and add a few of the above trackers. We then put the first stone into the first 2 potions and then into the cauldron and then end the trial by taking the "no operation" action for the rest of the trial.
```
seed = 22
env = symbolic_alchemy.get_symbolic_alchemy_level(level_name, seed=seed)
env.add_trackers({
symbolic_alchemy_trackers.ItemGeneratedTracker.NAME:
symbolic_alchemy_trackers.ItemGeneratedTracker(),
symbolic_alchemy_trackers.ItemsUsedTracker.NAME:
symbolic_alchemy_trackers.ItemsUsedTracker(),
symbolic_alchemy_trackers.ScoreTracker.NAME:
symbolic_alchemy_trackers.ScoreTracker(env._reward_weights),
})
timestep = env.reset()
timestep = env.step_slot_based_action(utils.SlotBasedAction(
stone_ind=0, potion_ind=0))
timestep = env.step_slot_based_action(utils.SlotBasedAction(
stone_ind=0, potion_ind=1))
timestep = env.step_slot_based_action(utils.SlotBasedAction(
stone_ind=0, cauldron=True))
_ = env.end_trial()
```
The returns from the trackers show the potions and stones generated and show that 1 stone and 2 potion were used and that the score achieved was -1 (because that was the value of the stone put into the cauldron).
```
episode_returns = env.episode_returns()
print(episode_returns['items_generated'].trials[0])
print(episode_returns['items_used']['per_trial'][0])
print(episode_returns['score']['per_trial'][0])
```
## Symbolic Bots
We provide the following hand-coded policies for acting with the symbolic environment:
* Random action bot - this bot randomly selects any stone which does not have the maximum possible value and puts it into randomly selected potions until either all potions are used or all stones have the maximum value. Then it puts any positive stones into the cauldron.
* Search oracle bot - this bot is given the chemistry for the episode and exhaustively searches all possible actions to maximise the reward it will obtain.
* Ideal observer bot - this bot maintains a belief state (as described above) and exhaustively searches over all possible actions and all possible outcomes of those actions using the belief state to track the probability of each outcome to maximise the expected reward it will obtain in the current trial.
* Replay bot - this bot takes a record of actions taken and takes them again. This can be used to replay the actions an agent or another bot took to perform additional tracking.
```
env = symbolic_alchemy.get_symbolic_alchemy_level(level_name)
env.add_trackers({
symbolic_alchemy_trackers.ScoreTracker.NAME:
symbolic_alchemy_trackers.ScoreTracker(env._reward_weights)
})
bot = symbolic_alchemy_bots.RandomActionBot(env._reward_weights, env)
bot_returns = bot.run_episode()
bot_returns['score']['per_trial']
```
## Evaluation Episodes
We have released a set of 1000 episodes on which we evaluated our agents. Each episode consists of a chemistry and a set of stones and potions generated at the start of each trial.
```
chems = chemistries_proto_conversion.load_chemistries_and_items(
'chemistries/perceptual_mapping_randomized_with_random_bottleneck/chemistries')
```
To run symbolic alchemy on these evaluation episodes the function `get_symbolic_alchemy_fixed` can be used.
```
env = symbolic_alchemy.get_symbolic_alchemy_fixed(
chemistry=chems[0][0], episode_items=chems[0][1])
env.add_trackers({
symbolic_alchemy_trackers.ItemGeneratedTracker.NAME:
symbolic_alchemy_trackers.ItemGeneratedTracker()})
env.reset()
env.end_trial()
first_trial_items = env.episode_returns()[symbolic_alchemy_trackers.ItemGeneratedTracker.NAME].trials[0]
assert first_trial_items == chems[0][1].trials[0]
assert env._chemistry == chems[0][0]
```
To run the 3d environment on the evaluation episodes we have included them as named levels alchemy/evaluation_episodes/{episode_number}.
```
settings = dm_alchemy.EnvironmentSettings(
seed=seed, level_name='alchemy/evaluation_episodes/0', width=width, height=height)
env3d = dm_alchemy.load_from_docker(settings)
env = symbolic_alchemy_wrapper.SymbolicAlchemyWrapper(
env3d, level_name=level_name, see_symbolic_observation=True)
env.env_symbolic.add_trackers({
symbolic_alchemy_trackers.ItemGeneratedTracker.NAME:
symbolic_alchemy_trackers.ItemGeneratedTracker()})
env.reset()
# We need to take a step or 2 to ensure that the trial has started.
for _ in range(2):
env.step({})
# Let the symbolic environment trial end now so we can test the items generated were as expected.
env.env_symbolic.end_trial()
first_trial_items = env.env_symbolic.episode_returns()[symbolic_alchemy_trackers.ItemGeneratedTracker.NAME].trials[0]
first_trial_potions = {p.latent_potion() for p in first_trial_items.potions}
first_trial_stones = {s.latent_stone() for s in first_trial_items.stones}
chem_first_trial_potions = {p.latent_potion() for p in chems[0][1].trials[0].potions}
chem_first_trial_stones = {s.latent_stone() for s in chems[0][1].trials[0].stones}
assert first_trial_potions == chem_first_trial_potions
assert first_trial_stones == chem_first_trial_stones
assert env.env_symbolic._chemistry == chems[0][0]
```
Whilst you can run the ideal observer and search oracle yourself these bots (particularly the ideal observer) are slow on full size trials of 12 potions and 3 stones, due to the size of the exhaustive search performed. However, we have included in the release the symbolic actions taken on the evaluation episodes. So you can perform analysis and, for example, compare agent actions to those of the ideal observer.
```
serialized = io.read_proto('agent_events/ideal_observer')
proto = symbolic_actions_pb2.EvaluationSetEvents.FromString(serialized)
ideal_observer_events = symbolic_actions_proto_conversion.proto_to_evaluation_set_events(proto)
env = symbolic_alchemy.get_symbolic_alchemy_fixed(
chemistry=chems[0][0], episode_items=chems[0][1])
env.add_trackers({
symbolic_alchemy_trackers.ScoreTracker.NAME:
symbolic_alchemy_trackers.ScoreTracker(env._reward_weights)})
bot = symbolic_alchemy_bots.ReplayBot(ideal_observer_events[0], env)
episode_results = bot.run_episode()
episode_results['score']['per_trial']
```
## Plotting performance
We can compare agent's performance on the 1000 evaluation episodes to the ideal observer, search oracle and random action bot. New agents can also be added to these performance plots by simply running on the 1000 evaluation episodes with `AddMatrixEventTracker` added to the environment. The events for 3 task settings are included in the repository. Events for agents run on other settings can be downloaded at https://storage.googleapis.com/dm-alchemy/agent_events.tar.gz.
```
loaded_events = {}
for name, events_file in [('ideal_observer', 'agent_events/ideal_observer'),
('search_oracle', 'agent_events/search_oracle'),
('baseline', 'agent_events/baseline'),
('belief_state_predict', 'agent_events/belief_state_predict'),
('ground_truth_predict', 'agent_events/ground_truth_predict')]:
serialized = io.read_proto(events_file)
proto = symbolic_actions_pb2.EvaluationSetEvents.FromString(serialized)
events = symbolic_actions_proto_conversion.proto_to_evaluation_set_events(proto)
loaded_events[name] = events
random_action_events = []
for chem, items in chems:
env = symbolic_alchemy.get_symbolic_alchemy_fixed(
chemistry=chem, episode_items=items)
env.add_trackers({
symbolic_alchemy_trackers.AddMatrixEventTracker.NAME:
symbolic_alchemy_trackers.AddMatrixEventTracker()})
bot = symbolic_alchemy_bots.RandomActionBot(env._reward_weights, env)
episode_results = bot.run_episode()
random_action_events.append(episode_results['matrix_event']['event_tracker'])
plt.figure()
for name, events, colour in [
('ideal_observer', loaded_events['ideal_observer'], 'blue'),
('search_oracle', loaded_events['search_oracle'], 'red'),
('baseline', loaded_events['baseline'], 'orange'),
('belief_state_predict', loaded_events['belief_state_predict'], 'yellow'),
('ground_truth_predict', loaded_events['ground_truth_predict'], 'purple'),
('random_action', random_action_events, 'green')]:
scores = []
for (chem, items), ep_events in zip(chems, events):
env = symbolic_alchemy.get_symbolic_alchemy_fixed(
chemistry=chem, episode_items=items)
env.add_trackers({
symbolic_alchemy_trackers.ScoreTracker.NAME:
symbolic_alchemy_trackers.ScoreTracker(env._reward_weights)})
bot = symbolic_alchemy_bots.ReplayBot(ep_events, env)
episode_results = bot.run_episode()
scores.append(np.sum(episode_results['score']['per_trial']))
sns.histplot(scores, label=name, bins=12, kde=True, edgecolor=None,
stat='density', color=colour)
plt.xlabel('Episode reward')
plt.ylabel('Proportion of episodes')
plt.title('Histogram of strategies rewards')
plt.legend()
plt.show()
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Automated Machine Learning
_**Forecasting using the Energy Demand Dataset**_
## Contents
1. [Introduction](#introduction)
1. [Setup](#setup)
1. [Data and Forecasting Configurations](#data)
1. [Train](#train)
1. [Generate and Evaluate the Forecast](#forecast)
Advanced Forecasting
1. [Advanced Training](#advanced_training)
1. [Advanced Results](#advanced_results)
# Introduction<a id="introduction"></a>
In this example we use the associated New York City energy demand dataset to showcase how you can use AutoML for a simple forecasting problem and explore the results. The goal is predict the energy demand for the next 48 hours based on historic time-series data.
If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) first, if you haven't already, to establish your connection to the AzureML Workspace.
In this notebook you will learn how to:
1. Creating an Experiment using an existing Workspace
1. Configure AutoML using 'AutoMLConfig'
1. Train the model using AmlCompute
1. Explore the engineered features and results
1. Generate the forecast and compute the out-of-sample accuracy metrics
1. Configuration and remote run of AutoML for a time-series model with lag and rolling window features
1. Run and explore the forecast with lagging features
# Setup<a id="setup"></a>
```
import logging
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import warnings
import os
# Squash warning messages for cleaner output in the notebook
warnings.showwarning = lambda *args, **kwargs: None
import azureml.core
from azureml.core import Experiment, Workspace, Dataset
from azureml.train.automl import AutoMLConfig
from datetime import datetime
```
This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
```
print("This notebook was created using version 1.32.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
```
As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
```
ws = Workspace.from_config()
# choose a name for the run history container in the workspace
experiment_name = 'automl-forecasting-energydemand'
# # project folder
# project_folder = './sample_projects/automl-forecasting-energy-demand'
experiment = Experiment(ws, experiment_name)
output = {}
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Run History Name'] = experiment_name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
```
## Create or Attach existing AmlCompute
A compute target is required to execute a remote Automated ML run.
[Azure Machine Learning Compute](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) is a managed-compute infrastructure that allows the user to easily create a single or multi-node compute. In this tutorial, you create AmlCompute as your training compute resource.
#### Creation of AmlCompute takes approximately 5 minutes.
If the AmlCompute with that name is already in your workspace this code will skip the creation process.
As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your cluster.
amlcompute_cluster_name = "energy-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',
max_nodes=6)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
```
# Data<a id="data"></a>
We will use energy consumption [data from New York City](http://mis.nyiso.com/public/P-58Blist.htm) for model training. The data is stored in a tabular format and includes energy demand and basic weather data at an hourly frequency.
With Azure Machine Learning datasets you can keep a single copy of data in your storage, easily access data during model training, share data and collaborate with other users. Below, we will upload the datatset and create a [tabular dataset](https://docs.microsoft.com/bs-latn-ba/azure/machine-learning/service/how-to-create-register-datasets#dataset-types) to be used training and prediction.
Let's set up what we know about the dataset.
<b>Target column</b> is what we want to forecast.<br></br>
<b>Time column</b> is the time axis along which to predict.
The other columns, "temp" and "precip", are implicitly designated as features.
```
target_column_name = 'demand'
time_column_name = 'timeStamp'
dataset = Dataset.Tabular.from_delimited_files(path = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv").with_timestamp_columns(fine_grain_timestamp=time_column_name)
dataset.take(5).to_pandas_dataframe().reset_index(drop=True)
```
The NYC Energy dataset is missing energy demand values for all datetimes later than August 10th, 2017 5AM. Below, we trim the rows containing these missing values from the end of the dataset.
```
# Cut off the end of the dataset due to large number of nan values
dataset = dataset.time_before(datetime(2017, 10, 10, 5))
```
## Split the data into train and test sets
The first split we make is into train and test sets. Note that we are splitting on time. Data before and including August 8th, 2017 5AM will be used for training, and data after will be used for testing.
```
# split into train based on time
train = dataset.time_before(datetime(2017, 8, 8, 5), include_boundary=True)
train.to_pandas_dataframe().reset_index(drop=True).sort_values(time_column_name).tail(5)
# split into test based on time
test = dataset.time_between(datetime(2017, 8, 8, 6), datetime(2017, 8, 10, 5))
test.to_pandas_dataframe().reset_index(drop=True).head(5)
```
### Setting the maximum forecast horizon
The forecast horizon is the number of periods into the future that the model should predict. It is generally recommend that users set forecast horizons to less than 100 time periods (i.e. less than 100 hours in the NYC energy example). Furthermore, **AutoML's memory use and computation time increase in proportion to the length of the horizon**, so consider carefully how this value is set. If a long horizon forecast really is necessary, consider aggregating the series to a coarser time scale.
Learn more about forecast horizons in our [Auto-train a time-series forecast model](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-auto-train-forecast#configure-and-run-experiment) guide.
In this example, we set the horizon to 48 hours.
```
forecast_horizon = 48
```
## Forecasting Parameters
To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.
|Property|Description|
|-|-|
|**time_column_name**|The name of your time column.|
|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|
|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.
# Train<a id="train"></a>
Instantiate an AutoMLConfig object. This config defines the settings and data used to run the experiment. We can provide extra configurations within 'automl_settings', for this forecasting task we add the forecasting parameters to hold all the additional forecasting parameters.
|Property|Description|
|-|-|
|**task**|forecasting|
|**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|
|**blocked_models**|Models in blocked_models won't be used by AutoML. All supported models can be found at [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py).|
|**experiment_timeout_hours**|Maximum amount of time in hours that the experiment take before it terminates.|
|**training_data**|The training data to be used within the experiment.|
|**label_column_name**|The name of the label column.|
|**compute_target**|The remote compute for training.|
|**n_cross_validations**|Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way.|
|**enable_early_stopping**|Flag to enble early termination if the score is not improving in the short term.|
|**forecasting_parameters**|A class holds all the forecasting related parameters.|
This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results.
```
from azureml.automl.core.forecasting_parameters import ForecastingParameters
forecasting_parameters = ForecastingParameters(
time_column_name=time_column_name,
forecast_horizon=forecast_horizon,
freq='H' # Set the forecast frequency to be hourly
)
automl_config = AutoMLConfig(task='forecasting',
primary_metric='normalized_root_mean_squared_error',
blocked_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'],
experiment_timeout_hours=0.3,
training_data=train,
label_column_name=target_column_name,
compute_target=compute_target,
enable_early_stopping=True,
n_cross_validations=3,
verbosity=logging.INFO,
forecasting_parameters=forecasting_parameters)
```
Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while.
One may specify `show_output = True` to print currently running iterations to the console.
```
remote_run = experiment.submit(automl_config, show_output=False)
remote_run.wait_for_completion()
```
## Retrieve the Best Model
Below we select the best model from all the training iterations using get_output method.
```
best_run, fitted_model = remote_run.get_output()
fitted_model.steps
```
## Featurization
You can access the engineered feature names generated in time-series featurization.
```
fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()
```
### View featurization summary
You can also see what featurization steps were performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:
+ Raw feature name
+ Number of engineered features formed out of this raw feature
+ Type detected
+ If feature was dropped
+ List of feature transformations for the raw feature
```
# Get the featurization summary as a list of JSON
featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()
# View the featurization summary as a pandas dataframe
pd.DataFrame.from_records(featurization_summary)
```
# Forecasting<a id="forecast"></a>
Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. We will do batch scoring on the test dataset which should have the same schema as training dataset.
The inference will run on a remote compute. In this example, it will re-use the training compute.
```
test_experiment = Experiment(ws, experiment_name + "_inference")
```
### Retreiving forecasts from the model
We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute.
```
from run_forecast import run_remote_inference
remote_run_infer = run_remote_inference(test_experiment=test_experiment,
compute_target=compute_target,
train_run=best_run,
test_dataset=test,
target_column_name=target_column_name)
remote_run_infer.wait_for_completion(show_output=False)
# download the inference output file to the local machine
remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')
```
### Evaluate
To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). For more metrics that can be used for evaluation after training, please see [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics), and [how to calculate residuals](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).
```
# load forecast data frame
fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])
fcst_df.head()
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
from matplotlib import pyplot as plt
# use automl metrics module
scores = scoring.score_regression(
y_test=fcst_df[target_column_name],
y_pred=fcst_df['predicted'],
metrics=list(constants.Metric.SCALAR_REGRESSION_SET))
print("[Test data scores]\n")
for key, value in scores.items():
print('{}: {:.3f}'.format(key, value))
# Plot outputs
%matplotlib inline
test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')
test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')
plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)
plt.show()
```
# Advanced Training <a id="advanced_training"></a>
We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation.
### Using lags and rolling window features
Now we will configure the target lags, that is the previous values of the target variables, meaning the prediction is no longer horizon-less. We therefore must still specify the `forecast_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features.
This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the iteration_timeout_minutes parameter value to get results.
```
advanced_forecasting_parameters = ForecastingParameters(
time_column_name=time_column_name, forecast_horizon=forecast_horizon,
target_lags=12, target_rolling_window_size=4
)
automl_config = AutoMLConfig(task='forecasting',
primary_metric='normalized_root_mean_squared_error',
blocked_models = ['ElasticNet','ExtremeRandomTrees','GradientBoosting','XGBoostRegressor','ExtremeRandomTrees', 'AutoArima', 'Prophet'], #These models are blocked for tutorial purposes, remove this for real use cases.
experiment_timeout_hours=0.3,
training_data=train,
label_column_name=target_column_name,
compute_target=compute_target,
enable_early_stopping = True,
n_cross_validations=3,
verbosity=logging.INFO,
forecasting_parameters=advanced_forecasting_parameters)
```
We now start a new remote run, this time with lag and rolling window featurization. AutoML applies featurizations in the setup stage, prior to iterating over ML models. The full training set is featurized first, followed by featurization of each of the CV splits. Lag and rolling window features introduce additional complexity, so the run will take longer than in the previous example that lacked these featurizations.
```
advanced_remote_run = experiment.submit(automl_config, show_output=False)
advanced_remote_run.wait_for_completion()
```
### Retrieve the Best Model
```
best_run_lags, fitted_model_lags = advanced_remote_run.get_output()
```
# Advanced Results<a id="advanced_results"></a>
We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation.
```
test_experiment_advanced = Experiment(ws, experiment_name + "_inference_advanced")
advanced_remote_run_infer = run_remote_inference(test_experiment=test_experiment_advanced,
compute_target=compute_target,
train_run=best_run_lags,
test_dataset=test,
target_column_name=target_column_name,
inference_folder='./forecast_advanced')
advanced_remote_run_infer.wait_for_completion(show_output=False)
# download the inference output file to the local machine
advanced_remote_run_infer.download_file('outputs/predictions.csv', 'predictions_advanced.csv')
fcst_adv_df = pd.read_csv('predictions_advanced.csv', parse_dates=[time_column_name])
fcst_adv_df.head()
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
from matplotlib import pyplot as plt
# use automl metrics module
scores = scoring.score_regression(
y_test=fcst_adv_df[target_column_name],
y_pred=fcst_adv_df['predicted'],
metrics=list(constants.Metric.SCALAR_REGRESSION_SET))
print("[Test data scores]\n")
for key, value in scores.items():
print('{}: {:.3f}'.format(key, value))
# Plot outputs
%matplotlib inline
test_pred = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df['predicted'], color='b')
test_test = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color='g')
plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)
plt.show()
```
| github_jupyter |
```
%pylab inline
import numpy as np
from scipy.integrate import odeint
import itertools
from Oracle_Training import *
import json
from SparseARD import*
np.random.seed(0)
retrain = False
noise_percent = 0.1
n_trials = 10
n_sample = 2500
tol = 1e-8 # tolerance for ARD algorithm
verbose = True
# Translated into Python from AK Kassam and LN Trefethen, July 2002
from scipy.fftpack import fft, ifft, fftfreq
# Spatial grid and IC
n = 512
m = 1024
x = 32*np.pi*np.linspace(0,1,n+1)[:-1]
u = np.cos(x/8)*(1+np.sin(x/16))
v = fft(u)
# Precompute various ETDRK4 scalar quantities:
h = 0.01
k = np.array(np.concatenate([np.arange(n/2),np.array([0]),-np.arange(1,n/2)[::-1]]))/16
L = (k**2-k**4)
E = np.exp(h*L)
E2 = np.exp(h*L/2)
M = 16
r = np.exp(1j*np.pi*(np.arange(M)+0.5)/M)
LR = h*np.vstack([L for _ in range(M)]) + np.hstack([np.reshape(r, (M,1)) for _ in range(n)])
LR = LR.T
Q = h*np.real(np.mean((np.exp(LR/2)-1)/LR, axis = 1))
f1 = h*np.real(np.mean((-4-LR+np.exp(LR)*(4-3*LR+LR**2))/LR**3, axis = 1))
f2 = h*np.real(np.mean((2+LR+np.exp(LR)*(-2+LR))/LR**3, axis = 1))
f3 = h*np.real(np.mean((-4-3*LR-LR**2+np.exp(LR)*(4-LR))/LR**3, axis = 1))
uu = [u]; tt= [0]
tmax = 150; jmax = int(tmax/h); jplt=floor(tmax/m/h)
g = -0.5*1j*k
for j in range(1,jmax+1):
t = j*h
Nv = g*fft(np.real(ifft(v))**2)
a = E2*v+Q*Nv
Na = g*fft(np.real(ifft(a))**2)
b = E2*v+Q*Na
Nb = g*fft(np.real(ifft(b))**2)
c = E2*a+Q*(2*Nb-Nv)
Nc = g*fft(np.real(ifft(c))**2)
v = E*v+Nv*f1+2*(Na+Nb)*f2+Nc*f3
if j%jplt == 0:
u = np.real(ifft(v))
uu.append(u)
tt.append(t)
u = np.stack(uu).T; t = np.array(tt); dx = x[1]-x[0]; dt = t[1]-t[0]
n,m = u.shape
pcolor(u)
un = u + 0.01*noise_percent*np.std(u)*np.random.randn(*u.shape)
y, Theta, rhs_des = build_linear_system(un, dt, dx, D=4, P=4)
xi_true = np.zeros((Theta.shape[1],1))
xi_true[[i for i, s in enumerate(rhs_des) if 'uu_{x}' == s]] = 1
xi_true[[i for i, s in enumerate(rhs_des) if 'u_{xx}' == s]] = 1
xi_true[[i for i, s in enumerate(rhs_des) if 'u_{xxxx}' == s]] = 1
xi = xi_true
if retrain:
L1_err = {'ARD' : [], 'ARDr' : [], 'ARDvi' : [], 'mSTSBL' : [], 'lSTSBL' : [], 'dSTSBL' : []}
L2_err = {'ARD' : [], 'ARDr' : [], 'ARDvi' : [], 'mSTSBL' : [], 'lSTSBL' : [], 'dSTSBL' : []}
Added = {'ARD' : [], 'ARDr' : [], 'ARDvi' : [], 'mSTSBL' : [], 'lSTSBL' : [], 'dSTSBL' : []}
Missed = {'ARD' : [], 'ARDr' : [], 'ARDvi' : [], 'mSTSBL' : [], 'lSTSBL' : [], 'dSTSBL' : []}
Xi = {'ARD' : [], 'ARDr' : [], 'ARDvi' : [], 'mSTSBL' : [], 'lSTSBL' : [], 'dSTSBL' : []}
for trial in range(n_trials):
print('Trial', trial+1, 'of', str(n_trials)+'.')
sample = np.random.choice(n*m, n_sample, replace = False)
Theta_sample = Theta[sample, :]
y_sample = y[sample,:]
# Test each method
print('Training ARD.')
gamma_ARD,sigma2_ARD,mu_xi_ARD,_ = SBL([Theta_sample,y_sample], \
sigma2=1, \
estimate_sigma=True, \
maxit=500, \
verbose=False, \
tol=tol)
ARD_results = gamma_ARD,sigma2_ARD,mu_xi_ARD
print('Training ARDr.')
lams = [np.linalg.norm(y)**2*l for l in [0,1e-1,1e0,1e1,1e2,1e3,1e4]]
gamma_ARDr, sigma2_ARDr, mu_xi_ARDr = Train_ARDr(Theta_sample,y_sample,xi_true,\
ARD_results=ARD_results,\
lams = lams,\
verbose=verbose)
print('Training ARDvi.')
alphas = [1,2,4,8,16,32,64,128,256]
gamma_ARDvi, sigma2_ARDvi, mu_xi_ARDvi = Train_ARDvi(Theta_sample,y_sample,xi_true,\
ARD_results=ARD_results, \
alphas=alphas,\
verbose=verbose)
print('Training STSBL.')
taus = [0,1e-5,1e-4,1e-3,1e-2,1e-1,1]
gamma_STSBL, sigma2_STSBL, mu_xi_STSBL = Train_M_STSBL(Theta_sample,y_sample,xi_true,\
ARD_results=ARD_results,\
taus=taus,\
verbose=verbose)
print('Training lSTSBL.')
taus = [np.inf,1e-8,1e-6,1e-4,1e-3,1e-2,1e-1,1,2,5,10]
gamma_lSTSBL, sigma2_lSTSBL, mu_xi_lSTSBL = Train_L_STSBL(Theta_sample,y_sample,xi_true,\
ARD_results=ARD_results, \
taus=taus,\
verbose=verbose)
print('Training dSTSBL.')
taus = [0,1e-2,1e-1,1,1e1,1e2,1e3]
gamma_dSTSBL, sigma2_dSTSBL, mu_xi_dSTSBL = Train_MAP_STSBL(Theta_sample,y_sample,xi_true,\
ARD_results=ARD_results,\
taus=taus,\
verbose=verbose)
# Record predictions
Xi['ARD'].append(list(mu_xi_ARD.flatten()))
Xi['ARDr'].append(list(mu_xi_ARDr.flatten()))
Xi['ARDvi'].append(list(mu_xi_ARDvi.flatten()))
Xi['mSTSBL'].append(list(mu_xi_STSBL.flatten()))
Xi['lSTSBL'].append(list(mu_xi_lSTSBL.flatten()))
Xi['dSTSBL'].append(list(mu_xi_dSTSBL.flatten()))
# Record errors
L1_err['ARD'].append(np.linalg.norm(xi - mu_xi_ARD, 1))
L1_err['ARDr'].append(np.linalg.norm(xi - mu_xi_ARDr, 1))
L1_err['ARDvi'].append(np.linalg.norm(xi - mu_xi_ARDvi, 1))
L1_err['mSTSBL'].append(np.linalg.norm(xi - mu_xi_STSBL, 1))
L1_err['lSTSBL'].append(np.linalg.norm(xi - mu_xi_lSTSBL, 1))
L1_err['dSTSBL'].append(np.linalg.norm(xi - mu_xi_dSTSBL, 1))
L2_err['ARD'].append(np.linalg.norm(xi - mu_xi_ARD, 2))
L2_err['ARDr'].append(np.linalg.norm(xi - mu_xi_ARDr, 2))
L2_err['ARDvi'].append(np.linalg.norm(xi - mu_xi_ARDvi, 2))
L2_err['mSTSBL'].append(np.linalg.norm(xi - mu_xi_STSBL, 2))
L2_err['lSTSBL'].append(np.linalg.norm(xi - mu_xi_lSTSBL, 2))
L2_err['dSTSBL'].append(np.linalg.norm(xi - mu_xi_dSTSBL, 2))
_,added,missed = sparsity_err(mu_xi_ARD,xi)
Added['ARD'].append(added)
Missed['ARD'].append(missed)
_,added,missed = sparsity_err(mu_xi_ARDr,xi)
Added['ARDr'].append(added)
Missed['ARDr'].append(missed)
_,added,missed = sparsity_err(mu_xi_ARDvi,xi)
Added['ARDvi'].append(added)
Missed['ARDvi'].append(missed)
_,added,missed = sparsity_err(mu_xi_STSBL,xi)
Added['mSTSBL'].append(added)
Missed['mSTSBL'].append(missed)
_,added,missed = sparsity_err(mu_xi_lSTSBL,xi)
Added['lSTSBL'].append(added)
Missed['lSTSBL'].append(missed)
_,added,missed = sparsity_err(mu_xi_dSTSBL,xi)
Added['dSTSBL'].append(added)
Missed['dSTSBL'].append(missed)
if retrain:
with open('./saved_results/KS_results.json','w') as f:
json.dump({'L2_err': L2_err, \
'L1_err': L1_err, \
'Added' : Added, \
'Missed' : Missed, \
'Xi': Xi}, f)
else:
with open('./saved_results/KS_results.json','r') as f:
results = json.load(f)
L2_err = results['L2_err']
L1_err = results['L1_err']
Added = results['Added']
Missed = results['Missed']
Xi = results['Xi']
figure(figsize = (15,4))
lw = 2
label_fs = 16
whis = [0,100]
showfliers = False
subplot(1,4,1)
title(r'$\ell^2$ Error', fontsize = 16)
bp1 = boxplot([L2_err[key] for key in L2_err.keys()], positions = np.arange(6), \
patch_artist=True, whis=whis, showfliers=showfliers)
for box, whisker,cap in zip(bp1['boxes'], bp1['whiskers'], bp1['caps']):
box.set(color='blue', linewidth=lw)
box.set(facecolor = 'w' )
yticks(fontsize = 12)
xticks(np.arange(6), ['ARD','ARDr','ARDvi','M-ST','L-ST','MAP-ST'], fontsize = 12, rotation=45)
for median in bp1['medians']:
median.set(color='k', linestyle = '--', linewidth=1.5)
##########################################################################################
##########################################################################################
##########################################################################################
subplot(1,4,2)
title(r'$\ell^1$ Error', fontsize = 16)
bp2 = boxplot([L1_err[key] for key in L1_err.keys()], positions = np.arange(6), patch_artist=True, whis=whis, showfliers=showfliers)
for box, whisker, cap in zip(bp2['boxes'], bp2['whiskers'], bp2['caps']):
box.set(color='red', linewidth=lw)
box.set(facecolor = 'w' )
yticks(fontsize = 12)
xticks(np.arange(6), ['ARD','ARDr','ARDvi','M-ST','L-ST','MAP-ST'], fontsize = 12, rotation=45)
for median in bp2['medians']:
median.set(color='k', linestyle = '--', linewidth=1.5)
##########################################################################################
##########################################################################################
##########################################################################################
subplot(1,4,3)
title(r'Added', fontsize = 16)
bp3 = boxplot([Added[key] for key in Added.keys()], positions = np.arange(6), patch_artist=True, whis=whis, showfliers=showfliers)
for box, whisker, cap, median in zip(bp3['boxes'], bp3['whiskers'], bp3['caps'], bp3['medians']):
box.set(color='green', linewidth=lw)
box.set(facecolor = 'w' )
yticks(fontsize = 12)
xticks(np.arange(6), ['ARD','ARDr','ARDvi','M-ST','L-ST','MAP-ST'], fontsize = 12, rotation=45)
for median in bp3['medians']:
median.set(color='k', linestyle = '--', linewidth=1.5)
##########################################################################################
##########################################################################################
##########################################################################################
subplot(1,4,4)
title(r'Missed', fontsize = 16)
bp4 = boxplot([Missed[key] for key in Missed.keys()], positions = np.arange(6), patch_artist=True, whis=whis, showfliers=showfliers)
for box, whisker, cap, median in zip(bp4['boxes'], bp4['whiskers'], bp4['caps'], bp4['medians']):
box.set(color='c', linewidth=lw)
box.set(facecolor = 'w' )
yticks(fontsize = 12)
xticks(np.arange(6), ['ARD','ARDr','ARDvi','M-ST','L-ST','MAP-ST'], fontsize = 12, rotation=45)
for median in bp4['medians']:
median.set(color='k', linestyle = '--', linewidth=1.5)
```
| github_jupyter |
# SYMPAIS Torus Demo
[](https://colab.research.google.com/github/ethanluoyc/sympais/blob/master/notebooks/torus_demo.ipynb)
This notebook provides a visual illustration of the SYMPAIS algorithm.
## Setup
```
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
GIT_TOKEN = ""
if IN_COLAB:
!pip install -U pip setuptools wheel
if GIT_TOKEN:
!pip install git+https://{GIT_TOKEN}@github.com/ethanluoyc/sympais.git#egg=sympais
else:
!pip install git+https://github.com/ethanluoyc/sympais.git#egg=sympais
if IN_COLAB:
!curl -L "https://drive.google.com/uc?export=download&id=1_Im0Ot5TjkzaWfid657AV_gyMpnPuVRa" -o realpaver
!chmod u+x realpaver
!cp realpaver /usr/local/bin
import jax
import jax.numpy as jnp
from sympais import tasks
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import numpy as onp
import math
%load_ext autoreload
%autoreload 2
%matplotlib inline
# Configure z3
import z3
z3.set_option("smt.arith.random_initial_value", True)
z3.set_option("auto_config", False)
z3.set_option("smt.phase_selection", 5)
z3.set_option("smt.random_seed", 42)
```
## Implementation
### SYMPAIS
Here we show how to implement SYMPAIS from the different components described in the paper.
```
from typing import Optional
import jax
import jax.numpy as jnp
import numpy as onp
from sympais import constraint
from sympais import logger as logger_lib
from sympais import tasks
from sympais.infer import importance
from sympais.infer import utils
from sympais.initializer import Initializer
from sympais.methods.importance import ProposalBuilder
from sympais.methods.importance import RandomWalkMetropolisKernel
from sympais.methods.importance import refine_domains
from sympais.methods.importance import sample_chain
from sympais.methods.importance import WindowedScaleAdaptor
def run(task: tasks.Task,
seed: int,
num_samples: int = int(1e6),
num_proposals: int = 100,
num_samples_per_iter: int = 5,
proposal_scale_multiplier: float = 0.5,
rmh_scale: float = 1.0,
init: str = "z3",
tune: bool = True,
num_warmup_steps: int = 500,
window_size: int = 100,
resample: bool = True,
proposal_std_num_samples: int = 100,
logger: Optional[logger_lib.Logger] = None):
"""Run SYMPAIS.
Refer to the inline comments for a better idea of what this function
implements
"""
profile = task.profile
pcs = task.constraints
# Build callable for 1_PC(x)
constraint_fn = constraint.build_constraint_fn(pcs, task.domains)
# Build the unnormalized density \bar{p}(x) = 1_PC(x) p(x)
target_log_prob_fn = constraint.build_target_log_prob_fn(
task.profile, task.domains, constraint_fn)
domains = task.domains
# Find a coarse approximation of the solution space.
# This is used both by the RMH kernel for making proposals and
# by the IS proposal for proposing from truncated distributions.
refined_domains = refine_domains(pcs, domains)
key = jax.random.PRNGKey(seed)
key, subkey = jax.random.split(key)
# The proposal builder is a callable that
# constructs importance sampling proposal distribution q(x)
# for performing MIS at every iteration of PIMAIS
proposal_builder = ProposalBuilder(profile, refined_domains,
proposal_scale_multiplier,
proposal_std_num_samples, subkey)
# The initializer finds initial feasible solution to bootstrap
# the MCMC chains
initializer_ = Initializer(profile, pcs, domains, init, resample)
initial_chain_state = initializer_(num_proposals, subkey)
# Construct a RMH transition kernel
kernel = RandomWalkMetropolisKernel(target_log_prob_fn,
jnp.ones(num_proposals) * rmh_scale,
refined_domains)
kernel.step = jax.jit(kernel.step)
key, subkey = jax.random.split(key)
# Initialize kernel parameters and run warmup
# and optional parameter adaptation
params = kernel.init()
key, subkey = jax.random.split(key)
if num_warmup_steps < 1:
print("Not running warmup")
chain_state = initial_chain_state
else:
if tune:
print("Tuning the kernel")
params, chain_state, _ = WindowedScaleAdaptor(kernel, window_size)(
subkey, params, initial_chain_state, num_warmup_steps)
else:
print("Not tuning the kernel")
chain_state, (_, extra) = sample_chain(kernel, params, subkey,
initial_chain_state,
num_warmup_steps)
print("Finished warm-up")
# Comput the number of iterations given total sampling budget
num_samples_warmup = num_proposals * num_warmup_steps
num_iterations = (
# 1) subtract the samples used during warmup
(num_samples - num_samples_warmup) //
# 2) For each PI-MAIS iteration, we sample from each mixture component
# `num_samples_per_iter` samples, plus an additional sample used by
# each chain for making a single step of transition
((num_proposals + 1) * num_samples_per_iter))
# Initialize the state for running PI-MAIS.
state = importance.pimais_init(chain_state)
@jax.jit
def pimais_step_fn(params, rng, state):
kernel_fn = lambda key, state: kernel.step(params, key, state)
return importance.pimais_step(rng, target_log_prob_fn, kernel_fn,
proposal_builder, num_samples_per_iter, state)
# Start running the PI-MAIS iterations
rngs = jax.random.split(key, num_iterations)
states = []
extras = []
for idx in range(num_iterations):
# tic = time.time()
state, extra = pimais_step_fn(params, rngs[idx], state)
states.append(state)
extras.append(extra)
# Make sure async dispatch is accounted for in measuring running time.
utils.block_until_ready((state, extra))
# toc = time.time()
# print("Time elapsed", toc - tic, "Mean", state.Ztot)
if logger is not None:
logger.write({"mean": state.Ztot})
print("Final estimated probability {}".format(state.Ztot))
# Collect some intermediate results for post-processing and viz.
output = {
"pimais_states": jax.tree_multimap(lambda *x: jnp.stack(x, 0), *states),
"pimais_extras": jax.tree_multimap(lambda *x: jnp.stack(x, 0), *extras),
"constraint_fn": constraint_fn,
"target_log_prob_fn": target_log_prob_fn,
"initial_chain_state": initial_chain_state,
"proposal_builder": proposal_builder,
"prob": state.Ztot,
}
return output
```
### Visualization
Some helper functions for visualization
```
def _plot_proposals(axes, initial_proposal_state, proposal_states, proposal_fn):
for i, t in enumerate([0, 10, 100]):
if t == 0:
proposal_state = initial_proposal_state
else:
proposal_state = jax.tree_map(
lambda x: x[t - 1], proposal_states.proposal_state
)
proposal_dist = proposal_fn(proposal_state)
ax = axes[i]
plot_proposal_dist(ax, proposal_dist)
plot_proposal_state(ax, proposal_state)
_plot_torus_boundaries(ax)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set(title=f"$t = {t}$")
def _plot_torus_boundaries(ax):
inner_circle = plt.Circle(
(0, 0),
2,
fill=False,
linestyle="--",
linewidth=.5,
edgecolor="black",
label="inner",
)
outer_circle = plt.Circle(
(0, 0), 4, fill=False, linestyle="--", linewidth=.5, edgecolor="black"
)
ax.add_artist(inner_circle)
ax.add_artist(outer_circle)
def _fixup_axes(ax):
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set(xlabel="$x$", ylabel="$y$", title="$z = 0$")
def plot_proposal_dist(ax, proposal, resolution=100):
x = jnp.linspace(-5, 5, resolution)
y = jnp.linspace(-5, 5, resolution)
xx, yy = jnp.meshgrid(x, y)
logp = proposal.log_prob(
{
"x": xx.reshape(-1, 1),
"y": yy.reshape(-1, 1),
"z": jnp.zeros((resolution * resolution, 1)),
}
)
p = jnp.mean(jnp.exp(logp), -1).reshape((resolution, resolution))
ax.contourf(xx, yy, p, cmap="Blues")
def plot_proposal_state(ax, proposal_state, s=5, linewidth=0.5):
ax.scatter(
proposal_state["x"],
proposal_state["y"],
marker="o",
c="white",
edgecolors="black",
linewidth=linewidth,
s=s,
alpha=0.75,
)
output = run(
tasks.Torus(),
seed=0,
init="z3", num_warmup_steps=0,
proposal_scale_multiplier=1.0
)
```
## Visualizing the input distribution and corresponding optimal proposal distribution
```
task = tasks.Torus()
constraint_fn = constraint.build_constraint_fn(task.constraints, task.domains)
target_log_prob_fn = constraint.build_target_log_prob_fn(task.profile, task.domains,
constraint_fn)
fig, ax = plt.subplots(2, 2, sharey=True)
fig.set_size_inches(6, 6.)
resolution = 100
x = jnp.linspace(-5, 5, resolution)
y = jnp.linspace(-5, 5, resolution)
xx, yy = jnp.meshgrid(x, y)
logp = (
task.profile.log_prob(
{
"x": xx.reshape(-1, 1),
"y": yy.reshape(-1, 1),
"z": jnp.zeros((resolution * resolution, 1)),
}
)
.reshape((resolution, resolution))
)
ax[0, 0].contourf(xx, yy, jnp.exp(logp), cmap="Blues")
_plot_torus_boundaries(ax[0, 0])
_fixup_axes(ax[0, 0])
logp_gt = target_log_prob_fn(
{
"x": xx.reshape(-1, 1),
"y": yy.reshape(-1, 1),
"z": jnp.zeros((resolution * resolution, 1)),
}
)
p_gt = jnp.exp(logp_gt).reshape((resolution, resolution))
p_gt = onp.ma.masked_where(p_gt == 0.0, p_gt)
ax[0, 1].contourf(xx, yy, p_gt, cmap="Blues")
_plot_torus_boundaries(ax[0, 1])
_fixup_axes(ax[0, 1])
for a in ax.flat:
a.set_aspect(1)
ax[0, 1].set_ylabel("")
ax[0, 0].set_title("$p(x)$")
ax[0, 1].set_title("$q^*({x})$")
# task = tasks.torus(profile_type="correlated")
task = tasks.Torus(profile_type="correlated")
constraint_fn = constraint.build_constraint_fn(task.constraints, task.domains)
target_log_prob_fn = constraint.build_target_log_prob_fn(
task.profile, task.domains, constraint_fn)
logp = task.profile.log_prob(
{
"x": xx.reshape(-1, 1),
"y": yy.reshape(-1, 1),
"z": jnp.zeros((resolution * resolution, 1)),
}
).reshape((resolution, resolution))
ax[1, 0].contourf(xx, yy, jnp.exp(logp), cmap="Blues")
_plot_torus_boundaries(ax[1, 0])
_fixup_axes(ax[1, 0])
logp_gt = target_log_prob_fn(
{
"x": xx.reshape(-1, 1),
"y": yy.reshape(-1, 1),
"z": jnp.zeros((resolution * resolution, 1)),
}
)
p_gt = jnp.exp(logp_gt).reshape((resolution, resolution))
p_gt = onp.ma.masked_where(p_gt == 0.0, p_gt)
ax[1, 1].contourf(xx, yy, p_gt, cmap="Blues")
_plot_torus_boundaries(ax[1, 1])
_fixup_axes(ax[1, 1])
for a in ax.flat:
a.set_aspect(1)
ax[1, 1].set_ylabel("")
ax[1, 1].set_title("")
ax[1, 0].set_title("")
ax[1,1].xaxis.set_major_locator(matplotlib.ticker.FixedLocator([-4, 0, 4]))
ax[0,0].yaxis.set_major_locator(matplotlib.ticker.FixedLocator([-4, 0, 4]))
ax[1,0].xaxis.set_major_locator(matplotlib.ticker.FixedLocator([-4, 0, 4]))
ax[1,0].yaxis.set_major_locator(matplotlib.ticker.FixedLocator([-4, 0, 4]))
for a in ax[0]:
a.xaxis.set_visible(False)
```
## Example trajectory of SYMPAIS
```
output = run(
tasks.Torus(),
seed=0,
init="z3",
num_warmup_steps=0,
num_samples=int(1e5),
proposal_scale_multiplier=1.0)
def plot_trajectory(output):
fig, axes = plt.subplots(1, 4,
figsize=matplotlib.figure.figaspect(0.25),
sharey=True)
for i, t in enumerate([0, 10, 100, 1000]):
if t == 0:
proposal_state = output["initial_chain_state"]
else:
proposal_state = jax.tree_map(
lambda x: x[t - 1], output["pimais_states"].proposal_state
)
proposal_dist = output['proposal_builder'](proposal_state)
ax = axes.flat[i]
plot_proposal_dist(ax, proposal_dist)
# plot_proposal_state(ax, proposal_state)
ax.scatter(
proposal_state['x'],
proposal_state['y'],
marker="o",
c="white",
edgecolors="black",
alpha=0.75,
)
_plot_torus_boundaries(ax)
if t > 0:
ax.plot(
output["pimais_states"].proposal_state["x"][:t, 0],
output["pimais_states"].proposal_state["y"][:t, 0],
color="#F2528D",
linestyle="dashed",
alpha=0.75,
linewidth=2,
)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set(title=f"$t = {t}$")
for ax in axes.flat:
ax.set_aspect(1)
ax.set_xlabel("$x$")
ax.xaxis.set_major_locator(matplotlib.ticker.FixedLocator([-5, 0, 5]))
ax.yaxis.set_major_locator(matplotlib.ticker.FixedLocator([-5, 0, 5]))
# ="#F27781",
# for a in axes[0]:
# a.set_xticks([])
# a.set_xlabel("")
axes[0].set(ylabel="$y$")
# axes[1,0].set(ylabel="$y$")
plt.subplots_adjust(left=.2, hspace=.3, top=.9, bottom=.15, right=.95)
plot_trajectory(output)
```
### Interactive visualization of the SYMPAIS trajectory
We also wrote a interactive widget to visualize the SYMPAIS trajectory.
```
from ipywidgets import widgets
def make_interactive_plot(output):
@widgets.interact(
T=(0, output["pimais_states"].proposal_state['x'].shape[0], 1)
)
def update(T):
fig, ax = plt.subplots(figsize=(4,4))
inner_circle = plt.Circle(
(0, 0),
2,
fill=False,
linestyle="--",
linewidth=1,
edgecolor="black",
label="inner",
)
outer_circle = plt.Circle(
(0, 0), 4, fill=False,
linestyle="--", linewidth=1, edgecolor="black"
)
ax.add_artist(inner_circle)
ax.add_artist(outer_circle)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set(xlabel="$x$", ylabel="$y$", title="$z = 0$")
if T == 0:
proposal_state = output['initial_chain_state']
else:
proposal_state = jax.tree_map(
lambda x: x[T - 1], output["pimais_states"].proposal_state
)
proposal_dist = output['proposal_builder'](proposal_state)
plot_proposal_dist(ax, proposal_dist)
ax.scatter(
proposal_state['x'],
proposal_state['y'],
marker="o",
c="white",
edgecolors="black",
alpha=0.75,
)
plt.show()
return update
make_interactive_plot(output);
```
## Effect of Initialization
In this section, we compare the different initialization settings, which is described in our
optimization section.
```
task = tasks.Torus(profile_type="correlated")
num_warmup_steps = 0
num_samples = int(1e5)
seed = 0
z3_init_output = run(task,
seed=seed,
num_samples=num_samples,
init='z3',
num_warmup_steps=num_warmup_steps)
rp_init_output = run(task,
seed=seed,
num_samples=num_samples,
init='realpaver',
num_warmup_steps=num_warmup_steps,
resample=False)
rp_resample_output = run(task,
seed=seed,
num_samples=num_samples,
init='realpaver',
num_warmup_steps=num_warmup_steps,
resample=True)
fig, axes = plt.subplots(3, 3, sharey=True)
fig.set_size_inches(6, 6)
for ax, o in zip(axes, (z3_init_output, rp_init_output, rp_resample_output)):
_plot_proposals(
ax,
o["initial_chain_state"],
o["pimais_states"],
o['proposal_builder']
)
for ax in axes[1:, :].flat:
ax.set(title="")
for ax in axes[2:, :].flat:
ax.set(xlabel="$x$")
ax.set(title="")
axes[0, 0].set_ylabel("Single Solution \n $ y $")
axes[1, 0].set_ylabel("Diverse Solution \n $ y $")
axes[2, 0].set_ylabel("Re-sample \n $ y $")
# for ax in axes.flat:
# ax.set_aspect(1.0)
plt.setp(axes, aspect=1.0)
for ax in axes[0:2].flat:
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(left=0.2, bottom=0.15, right=0.975, wspace=0.05)
# fig.savefig("images/pimais_init.pdf")
```
| github_jupyter |
**This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/introduction).**
---
As a warm-up, you'll review some machine learning fundamentals and submit your initial results to a Kaggle competition.
# Setup
The questions below will give you feedback on your work. Run the following cell to set up the feedback system.
```
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex1 import *
print("Setup Complete")
```
You will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course) to predict home prices in Iowa using 79 explanatory variables describing (almost) every aspect of the homes.

Run the next code cell without changes to load the training and validation features in `X_train` and `X_valid`, along with the prediction targets in `y_train` and `y_valid`. The test features are loaded in `X_test`. (_If you need to review **features** and **prediction targets**, please check out [this short tutorial](https://www.kaggle.com/dansbecker/your-first-machine-learning-model). To read about model **validation**, look [here](https://www.kaggle.com/dansbecker/model-validation). Alternatively, if you'd prefer to look through a full course to review all of these topics, start [here](https://www.kaggle.com/learn/machine-learning).)_
```
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X_full = pd.read_csv('../input/train.csv', index_col='Id')
X_test_full = pd.read_csv('../input/test.csv', index_col='Id')
# Obtain target and predictors
y = X_full.SalePrice
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = X_full[features].copy()
X_test = X_test_full[features].copy()
# Break off validation set from training data
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2,
random_state=0)
```
Use the next cell to print the first several rows of the data. It's a nice way to get an overview of the data you will use in your price prediction model.
```
X_train.head()
```
The next code cell defines five different random forest models. Run this code cell without changes. (_To review **random forests**, look [here](https://www.kaggle.com/dansbecker/random-forests)._)
```
from sklearn.ensemble import RandomForestRegressor
# Define the models
model_1 = RandomForestRegressor(n_estimators=50, random_state=0)
model_2 = RandomForestRegressor(n_estimators=100, random_state=0)
model_3 = RandomForestRegressor(n_estimators=100, criterion='mae', random_state=0)
model_4 = RandomForestRegressor(n_estimators=200, min_samples_split=20, random_state=0)
model_5 = RandomForestRegressor(n_estimators=100, max_depth=7, random_state=0)
models = [model_1, model_2, model_3, model_4, model_5]
```
To select the best model out of the five, we define a function `score_model()` below. This function returns the mean absolute error (MAE) from the validation set. Recall that the best model will obtain the lowest MAE. (_To review **mean absolute error**, look [here](https://www.kaggle.com/dansbecker/model-validation).)_
Run the code cell without changes.
```
from sklearn.metrics import mean_absolute_error
# Function for comparing different models
def score_model(model, X_t=X_train, X_v=X_valid, y_t=y_train, y_v=y_valid):
model.fit(X_t, y_t)
preds = model.predict(X_v)
return mean_absolute_error(y_v, preds)
for i in range(0, len(models)):
mae = score_model(models[i])
print("Model %d MAE: %d" % (i+1, mae))
```
# Step 1: Evaluate several models
Use the above results to fill in the line below. Which model is the best model? Your answer should be one of `model_1`, `model_2`, `model_3`, `model_4`, or `model_5`.
```
# Fill in the best model
best_model = model_3
# Check your answer
step_1.check()
# Lines below will give you a hint or solution code
#step_1.hint()
#step_1.solution()
```
# Step 2: Generate test predictions
Great. You know how to evaluate what makes an accurate model. Now it's time to go through the modeling process and make predictions. In the line below, create a Random Forest model with the variable name `my_model`.
```
# Define a model
my_model = model_3 # Your code here
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
#step_2.hint()
#step_2.solution()
```
Run the next code cell without changes. The code fits the model to the training and validation data, and then generates test predictions that are saved to a CSV file. These test predictions can be submitted directly to the competition!
```
# Fit the model to the training data
my_model.fit(X, y)
# Generate test predictions
preds_test = my_model.predict(X_test)
# Save predictions in format used for competition scoring
output = pd.DataFrame({'Id': X_test.index,
'SalePrice': preds_test})
output.to_csv('submission.csv', index=False)
```
# Submit your results
Once you have successfully completed Step 2, you're ready to submit your results to the leaderboard! First, you'll need to join the competition if you haven't already. So open a new window by clicking on [this link](https://www.kaggle.com/c/home-data-for-ml-course). Then click on the **Join Competition** button.

Next, follow the instructions below:
1. Begin by clicking on the blue **Save Version** button in the top right corner of the window. This will generate a pop-up window.
2. Ensure that the **Save and Run All** option is selected, and then click on the blue **Save** button.
3. This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the **Save Version** button. This pulls up a list of versions on the right of the screen. Click on the ellipsis **(...)** to the right of the most recent version, and select **Open in Viewer**. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
4. Click on the **Output** tab on the right of the screen. Then, click on the file you would like to submit, and click on the blue **Submit** button to submit your results to the leaderboard.
You have now successfully submitted to the competition!
If you want to keep working to improve your performance, select the blue **Edit** button in the top right of the screen. Then you can change your code and repeat the process. There's a lot of room to improve, and you will climb up the leaderboard as you work.
# Keep going
You've made your first model. But how can you quickly make it better?
Learn how to improve your competition results by incorporating columns with **[missing values](https://www.kaggle.com/alexisbcook/missing-values)**.
---
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161289) to chat with other Learners.*
| github_jupyter |
```
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import roc_curve, auc
import pandas as pd
import time
from scipy import interp
from sklearn.preprocessing import FunctionTransformer
from sklearn.ensemble import RandomForestClassifier
import scipy.stats as stats
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.manifold import TSNE
estimators = [('reduce_dim', PCA()),
('clf', SVC())]
pipe = Pipeline(estimators)
pipe
from sklearn.pipeline import make_pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import Binarizer
make_pipeline(Binarizer(), MultinomialNB())
pipe.steps[0]
pipe.named_steps['reduce_dim']
pipe.set_params(clf__C=10)
pipe.named_steps.reduce_dim is pipe.named_steps['reduce_dim']
from sklearn.model_selection import GridSearchCV
param_grid = dict( reduce_dim__n_components=[2,5,10],)
clf__C=[0.1,10,100])
grid_search = GridSearchCV(pipe, param_grid=param_grid)
grid_search
```
**Run grid search to find params for dimensionality reduction, then do a logistic regression**
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
pipe = Pipeline([
('reduce_dim', PCA()),
('classify', LinearSVC())
])
N_FEATURES_OPTIONS = [2,4,8]
C_OPTIONS = [1,10,100,1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
}
]
reducer_labels = ['PCA', 'NMF', 'KBest(chi2)']
grid = GridSearchCV(pipe, cv=3, n_jobs=1, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
mean_scores = np.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
print(mean_scores)
bar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
plt.show()
```
<h2> Now do a more informative example </h2>
Example from : https://tomaugspurger.github.io/scalable-ml-01.html
```
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
#Let's make a learning curve
# invent a classification problem
X, y = samples_generator.make_classification(
n_features=100, n_informative=5, n_redundant=10, n_classes=4,
n_clusters_per_class=2)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = 'Learning Curves (Naive Bayes)'
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01),
cv=cv, n_jobs=4)
X, y = samples_generator.make_classification(
n_features=1000, n_informative=10, n_redundant=10, n_classes=4,
n_clusters_per_class=1,
n_samples=100)
print(X.shape)
cv = ShuffleSplit(n_splits=50, test_size=0.05, random_state=0)
my_plot = plot_learning_curve(estimator, title, X, y,
cv=cv, n_jobs=4)
my_plot.show()
X_toy = np.array([[1,2,3],
[10, 50, 15],
[100, 500, 150]])
a = np.array([])
print X_toy
print np.mean(X_toy, axis=0)
def pqn_normalize(X):
feature_means = np.mean(X, axis=0)
# mean-center each feature
X_mean_centered = np.divide(X, feature_means)
dilution_factors = np.median(X_mean_centered, axis=1)
# broadcast correctly
X_pqn = np.divide(X, dilution_factors[:,np.newaxis] )
return X_pqn
pqn_normalize(X_toy)
X, y = samples_generator.make_classification(
n_features=100, n_informative=10, n_redundant=10, n_classes=2,
n_clusters_per_class=1,
n_samples=20)
pipe = Pipeline([
('pqn_normalize', FunctionTransformer(pqn_normalize)),
('rf_clf', RandomForestClassifier())
])
param_grid = dict(rf_clf__n_estimators = np.int_(
np.round(
np.linspace(50, 2000, 10)
))
)
n_iter = 5
test_size = 0.3
random_state = 1
cv = StratifiedShuffleSplit(n_splits=n_iter, test_size=test_size, random_state=random_state)
#param_grid = {rf_clf__n_estimators=stats.uniform()}
grid_search = GridSearchCV(pipe, param_grid=param_grid,
scoring='roc_auc',
cv=cv)
np.int_(np.array([10.2, 11.5]))
print y
grid_search.fit(X, y)
pd.DataFrame(grid_search.cv_results_)
# Now plot a learning curve with given parameters
cv_learn_curve = StratifiedShuffleSplit(
n_splits=25, test_size=0.3, random_state=random_state)
my_plt = plot_learning_curve(grid_search.best_estimator_,
'Learning curve (RF) Chosen vis gridsearchCV',
X, y, cv=cv, n_jobs=4)
my_plt.show()
# Get an ROC curve plot from the best estimator
def roc_curve_cv(X, y, clf, cross_val,
path='/home/irockafe/Desktop/roc.pdf',
save=False, plot=True):
t1 = time.time()
# collect vals for the ROC curves
tpr_list = []
mean_fpr = np.linspace(0,1,100)
auc_list = []
# Get the false-positive and true-positive rate
for i, (train, test) in enumerate(cross_val.split(X,y)):
clf.fit(X[train], y[train])
y_pred = clf.predict_proba(X[test])[:,1]
# get fpr, tpr
fpr, tpr, thresholds = roc_curve(y[test], y_pred)
roc_auc = auc(fpr, tpr)
#print 'AUC', roc_auc
#sns.plt.plot(fpr, tpr, lw=10, alpha=0.6, label='ROC - AUC = %0.2f' % roc_auc,)
#sns.plt.show()
tpr_list.append(interp(mean_fpr, fpr, tpr))
tpr_list[-1][0] = 0.0
auc_list.append(roc_auc)
if (i % 10 == 0):
print '{perc}% done! {time}s elapsed'.format(perc=100*float(i)/cross_val.n_splits,
time=(time.time() - t1))
# get mean tpr and fpr
mean_tpr = np.mean(tpr_list, axis=0)
# make sure it ends up at 1.0
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(auc_list)
if plot:
# plot mean auc
plt.plot(mean_fpr, mean_tpr, label='Mean ROC - AUC = %0.2f $\pm$ %0.2f' % (mean_auc,
std_auc),
lw=5, color='b')
# plot luck-line
plt.plot([0,1], [0,1], linestyle = '--', lw=2, color='r',
label='Luck', alpha=0.5)
# plot 1-std
std_tpr = np.std(tpr_list, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.2,
label=r'$\pm$ 1 stdev')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve, {iters} iterations of {cv} cross validation'.format(
iters=cross_val.n_splits, cv='{train}:{test}'.format(test=cross_val.test_size, train=(1-cross_val.test_size)))
)
plt.legend(loc="lower right")
if save:
plt.savefig(path, format='pdf')
#plt.show()
return plt, tpr_list, auc_list, mean_fpr
roc_cv = StratifiedShuffleSplit(
n_splits=5, test_size=0.3, random_state=random_state)
print roc_cv
plt, tpr_list, auc_list, mean_fpr = roc_curve_cv(X, y, grid_search.best_estimator_, cross_val=roc_cv)
```
<h2> Great, looks like everything works okay. Let's clean it up a bit </h2>
```
def rf_gridsearch(X, y):
# TODO How to require pqn_normalize to be a function accessible here?
# Make your gridsearch pipeline
pipe = Pipeline([
('pqn_normalize', FunctionTransformer(pqn_normalize)),
('rf_clf', RandomForestClassifier())
])
#param_grid = dict(rf_clf__n_estimators = np.int_(
np.round(
np.linspace(50, 2000, 10)
))
)
n_splits = 15
test_size = 0.3
random_state = 1
cv = StratifiedShuffleSplit(n_splits=n_splits,
test_size=test_size,
random_state=random_state)
#param_grid = {rf_clf__n_estimators=stats.uniform()}
grid_search = GridSearchCV(pipe, param_grid=param_grid,
scoring='roc_auc',
cv=cv,
n_jobs=4)
grid_search.fit(X, y)
return grid_search
grid = rf_gridsearch(X, y)
```
<h2> Duh. Don't cross-validate the number of estimators - always better with more (asymptotically) </h2> Could do a few other things instead
```
pd.DataFrame(grid.cv_results_)
```
<h2> Try to use PCA -> t-SNE </h2>
See if this improves performance
```
X, y = samples_generator.make_classification(
n_features=100, n_informative=10, n_redundant=10, n_classes=2,
n_clusters_per_class=1,
n_samples=20)
pipe = Pipeline([
('pqn_normalize', FunctionTransformer(pqn_normalize)),
('pca', PCA()),
('tsne', TSNE()),
('rf_clf', RandomForestClassifier())
])
param_grid = dict(rf_clf__n_estimators = np.int_(
np.round(
np.linspace(50, 2000, 10))),
pca__n_components = [10, 25, 50, 75, 100],
)
n_iter = 5
test_size = 0.3
random_state = 1
cv = StratifiedShuffleSplit(n_splits=n_iter, test_size=test_size, random_state=random_state)
#param_grid = {rf_clf__n_estimators=stats.uniform()}
grid_search = GridSearchCV(pipe, param_grid=param_grid,
scoring='roc_auc',
cv=cv)
X, y = samples_generator.make_classification(
n_features=100, n_informative=90, n_redundant=10, n_classes=3,
n_clusters_per_class=1,
n_samples=60)
pca = PCA(n_components=50)
X_pca = pca.fit_transform(X)
print('PCA shape', X_pca.shape, np.sum(pca.explained_variance_ratio_))
tsne = TSNE()
X_pca_tsne = tsne.fit_transform(X_pca)
print('tsne shape', X_pca_tsne.shape)
def plt_scatter(X, y, title):
plt.scatter(x=[i[0] for i in X],
y=[i[1] for i in X],
c=y)
plt.title(title)
plt.show()
plt_scatter(X_pca, y, 'pca')
plt_scatter(X_pca_tsne, y, 'Tsne(pca)')
```
| github_jupyter |
## Integraciรณn y procesamiento de los datos
Primeramente importaremos todas las librerรญas que vamos a necesitar para el procesamiento de los datos, pandas para el manejo de data frames, matplotlib para generar las grรกficas, scipy para crear clusteres herarquicos y sklearn para hacer clusteres
Luego importamos los data sets y creamos un data frame por cada data set y uno adicional para poner los estadรฌsticos que caracterizan nuestros datos
Son dos data sets, el primero es la respuesta de un grupo de individuo a nuestra encuesta, el segundo para conocer los metadatos de la encuesta, que pregunta se relaciona con cual categrรญa y subcategorรญa
```
import pandas as pd
from numpy.core.numeric import NaN
#import matplotlib.pyplot as plt
#import scipy.cluster.hierarchy as sch
#from sklearn.cluster import AgglomerativeClustering
df = pd.read_csv("STCI.csv")
sub = pd.read_csv("PreguntaCategoria.csv", index_col="NumeroPregunta")
preproc = pd.DataFrame()
preproc = pd.DataFrame()
```
A continuaciรณn agrupamos las preguntas por categorรญas, luego por cada categoria vamos a obtener e valor de la misma, el porcentaje del total que se podia obtener y una media, por individuo y se guarda en preproc
```
categoria = sub.groupby(['Categoria'])
preproc["Nombre"] = df["Nombre"]
for i in categoria:
preproc[i[0]] = df[list(categoria.get_group(i[0])["Pregunta"])].sum(axis=1)
preproc["Porcentaje " + i[0]] =(df[list(categoria.get_group(i[0])["Pregunta"])].sum(axis=1)-20)/60*100
preproc["Media " + i[0]] = df[list(categoria.get_group(i[0])["Pregunta"])].mean(axis=1)
preproc
```
A continuacion se calcula de similar forma el promedio por subcategorรญa a cada individuo
```
subcategoria = sub.groupby(['SubCategoria'])
for i in subcategoria:
preproc[i[0]] = df[list(subcategoria.get_group(i[0])["Pregunta"])].mean(axis=1)
preproc
```
Ahora obtenemos en un df los resultados por cada dato obtenido como promedio de todos los individuos
```
preproc.loc[:, "Alegria":]
result = (preproc.loc[:, "Alegria":].sum()/df["Nombre"].count()).to_dict()
result
```
Tambien es interesante conocer la moda y el porcentaje de la moda en el total por cada categorรญa
```
moda = {
"Alegria" : {
"valor": "-",
"porcentaje": "-"
},
"Seriedad" : {
"valor": "-",
"porcentaje": "-"
},
"Mal humor": {
"valor": "-",
"porcentaje": "-"
}
}
if not preproc["Media Alegria"].empty and not pd.isna(preproc["Media Alegria"].round().mode(dropna=False)[0]):
moda["Alegria"]["valor"] = preproc["Media Alegria"].round().mode(dropna=False)[0]
moda["Alegria"]["porcentaje"] = preproc["Media Alegria"].round().value_counts(normalize=True).max()*100
if not preproc["Media Seriedad"].empty and not pd.isna(preproc["Media Seriedad"].round().mode(dropna=False)[0]):
moda["Seriedad"]["valor"] = preproc["Media Seriedad"].round().mode(dropna=False)[0]
moda["Seriedad"]["porcentaje"] = preproc["Media Seriedad"].round().value_counts(normalize=True).max()*100
if not preproc["Media Mal humor"].empty and not pd.isna(preproc["Media Mal humor"].round().mode(dropna=False)[0]):
moda["Mal humor"]["valor"] = preproc["Media Mal humor"].round().mode(dropna=False)[0]
moda["Mal humor"]["porcentaje"] = preproc["Media Mal humor"].round().value_counts(normalize=True).max()*100
moda
```
Ademรกs puede ser interesante calcular las correlaciones entre categorias y subcatetgorias en nuestra poblaciรณn
```
corr = preproc.iloc[:,[0,3,6,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]].corr()
corr
```
Luego guardamos todos estos resultaddos en descripcion de nuestra problacion, donde accedemos en busca de informaciรณn al respecto
```
descr = {
"descripcion": preproc,
"resultado": result,
"moda": moda,
"correlacion por categoria y subcategoria": corr
}
descr
```
Luego, se puede en caso de necesitarlo hacer una agrupaciรณn de los datos con un oar cualquiera de variables, ya sea entre las categpรญas o si se hacce una nueva encuesta entre la variable dada por la encuesta y otra variable dada lo mismo por una encuesta como por alguna categorรญa o sub categoria
primero hacemos el clรบster jerรกrquico, y lo graficamos, luego un clรบster por aglomeraciรณn y tambien lo graficamos
```
#subcat = preproc.iloc[:,[0,9]]
#denograma = sch.dendrogram(sch.linkage(subcat, method='ward'))
#plt.title('Dendograma')
#plt.xlabel('Individuo')
#plt.ylabel('Distancias')
#plt.show()
#hc = AgglomerativeClustering(n_clusters = 4, affinity= 'euclidean', linkage='ward')
#y_hc = hc.fit_predict(subcat)
#y_hc
#plt.scatter(subcat.iloc[:,0][y_hc == 0], subcat.iloc[:,1][y_hc == 0], s=80, c='red', label = 'Cluster 1')
#plt.scatter(subcat.iloc[:,0][y_hc == 1], subcat.iloc[:,1][y_hc == 1], s=80, c='blue', label = 'Cluster 2')
#plt.scatter(subcat.iloc[:,0][y_hc == 2], subcat.iloc[:,1][y_hc == 2], s=80, c='green', label = 'Cluster 3')
#plt.scatter(subcat.iloc[:,0][y_hc == 3], subcat.iloc[:,1][y_hc == 3], s=80, c='magenta', label = 'Cluster 4')
#plt.title("Clusters")
#plt.xlabel("Subcategoria x")
#plt.ylabel("Categiria y")
#plt.legend()
#plt.show()
```
### Resultado final a devolver
```
general = preproc.loc[:, "Alegria": "Media Seriedad"]
general.to_csv("Resultados en el grupo.csv")
general
personal = preproc[(preproc["Nombre"] == 50)]
personal.to_csv("Resultados en la persona" + "50" +".csv", sep=',')
personal
descripcion = {"Medias" : result, "Modas": moda}
print("Medias: ")
for d in result:
print(d +": "+ str(round(result[d],2)))
print("Modas")
for d in moda:
print(d +": ")
for m in moda[d]:
print(m + ": " + str(round(moda[d][m],2)))
```
| github_jupyter |
```
#loading libraries
import pandas as pd
import string
import seaborn as sns
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
import xgboost as xgb
from sklearn.ensemble import forest
from sklearn import tree
from sklearn import linear_model
from sklearn import svm
from sklearn.model_selection import cross_val_score
import statistics as stats
from sklearn.feature_extraction.text import TfidfVectorizer
from skopt import BayesSearchCV
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('punkt')
# !pip install scikit-optimize
```
# reading the data
```
#reading the data
df = pd.read_csv("train.csv")
#dummy variables
id = df["id"]
location = df["location"]
keyword = df["keyword"]
```
# **Data Preprocessing**
**renaming the text column**
```
df = df.rename(columns={"text":"tweet"})
df["original tweet"] = df["tweet"]
df.head()
```
***Data Cleaning***
**Removing irrelevant features**
```
#removing irrelevant features
df = df.drop(columns=["id","location","keyword"],axis=1)
df.head()
```
**Checking for missing values**
```
#Dropping variables that has more than 60 percent missing values
#Checking the percentage missing values by columns
missing_column = (df.isna().sum()/len(df))*100
print(missing_column)
```
**Checking for duplicate values**
```
duplicate = df.duplicated().sum()
print(duplicate)#we can see that there are duplicates values
```
**dropping duplicates**
```
df = df.drop_duplicates()
sns.countplot(df["target"])
#We can see the target column is balanced.
```
***Cleaning the tweet column***
**Removing Hyper Links**
By observing the data we can see that some text contains external links ("http://"..) which are irrelevant
```
df["tweet"] = df['tweet'].str.replace('http\S+|www.\S+', '', case=False)
df.head()
```
**Removing Punctuation And Changing The Special Characters To The Usual Alphabet Letters**
Raw data contain punctuation,Hyper Link,special character.These value can hamper the performance of model so before applying any text Vectorization first we need to convert raw data into meaningful data which is also called as text preprocessing .
```
#Removing punctuation
df["tweet"] = df['tweet'].str.replace('[{}]'.format(string.punctuation), '')
#Changing the special characters to the usual alphabet letters
df['tweet'] = df["tweet"].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')
df.head()
```
**Removing numbers from dataframe**
Removing numbers from the text like โ1,2,3,4,5โฆโ We will remove numbers because numbers doesnโt give much importance to get the main words.
```
df['tweet'] = df['tweet'].str.replace('\d+', '')
```
**Tokenizing data**
We use the method word_tokenize() to split a sentence into words. The output of word tokenization can be converted to DataFrame for better text understanding in machine learning applications. It can also be provided as input for further text cleaning steps such as numeric character removal or stop words removal. Machine learning models need numeric data to be trained and make a prediction. Word tokenization becomes a crucial part of the text (string) to numeric data conversion.
```
df["tweet"] = [word_tokenize(word) for word in df["tweet"]]
df.head()
```
**Conveting text to Lower Case**
The model might treat a word which is in the beginning of a sentence with a capital letter different from the same word which appears later in the sentence but without any capital latter. This might lead to decline in the accuracy. Whereas lowering the words would be a better trade off.So that 'A' letter differ from 'a' letter in computer
```
df["tweet"] = [[word.lower() for word in words ] for words in df["tweet"]]
df.head()
```
**Removing stop words from texts**
Removing stopwords can potentially help improve the performance as
there are fewer and only meaningful tokens left.
Thus, it could increase classification accuracy.
```
stop_words = stopwords.words('english')
df["tweet"] = [[words for words in word if words not in stop_words] for word in df["tweet"]]
df.head()
```
**Lematization**
Lemmatization usually aims to remove word endings. It helps in returning the base or dictionary form of a word, which is known as the lemma.
<!-- congrats yey you found the easter egg (hahaha)
text me to claim your reward -->
```
lem = nltk.WordNetLemmatizer()
df["tweet"] = [[lem.lemmatize(lema,"v") for lema in i]for i in df["tweet"]]
```
# Modelling
**Declaring Inpendent and Dependent Variable**
```
x = df["tweet"]
y = df["target"]
```
**Splitting the data**
Now the data is clean we will be Spltting the dataframe into training and testing sample of 80% and 20% respectively.
```
x_train,x_test,y_train,y_test = train_test_split = train_test_split(x,y, test_size=0.2, random_state=0)
print(x_train.shape)
print(x_test.shape)
```
**Converting text to numeric**
We cannot work with text directly when using machine learning algorithms. Instead, we need to convert the text to numbers.
Computers donโt understand text and only understand and process numbers.
When applying TfidfVectorizer,CountVectorization etc on text they expect an array of string that has not been tokenized. So if you pass him an array of arrays of tokenz, it crashes.We will be passing a tokenized text to the vectorizer, to deal with this We need to pass a dummy fuction to tokenizer and preprocessor parameter.
Count vectorization
```
# # Creating a dummy fuction so it can be passed to the (tokenizer and preprocessor) parameter
# def dummy(doc):
# return doc
# cv = CountVectorizer(
# tokenizer=dummy,
# preprocessor=dummy,
# min_df = 0.000167
# )
# x_train = cv.fit_transform(x_train)
# x_test = cv.transform(x_test)
```
Tfidfvectorizer
```
def dummy(doc):
return doc
tfidf = TfidfVectorizer(
tokenizer=dummy,
preprocessor=dummy,
min_df = 0.000167
)
x_train = tfidf.fit_transform(x_train)
x_test = tfidf.transform(x_test)
```
Using Cross Validation to find the algorithm that gives the best performance
```
xg = xgb.XGBClassifier()
fo = forest.RandomForestClassifier()
tr = tree.DecisionTreeClassifier()
lo = linear_model.LogisticRegression()
sv = svm.SVC()
xgb_score = cross_val_score(xg,x_train,y_train,cv=5)
ran_score = cross_val_score(fo,x_train,y_train,cv=5)
dtree_score = cross_val_score(tr,x_train,y_train,cv=5)
log_score = cross_val_score(lo,x_train,y_train,cv=5)
svm_score = cross_val_score(sv,x_train,y_train,cv=5)
# This Dataframe outputs the average score for each algorithms
df_score = pd.DataFrame({"model":["xgboost","RandomForestClassifier","DecisionTreeClassifier","LogisticRegression","Support vector machine"],"score":[stats.mean(xgb_score),stats.mean(ran_score),stats.mean(dtree_score),stats.mean(log_score),stats.mean(svm_score)]})
df_score
# We can see that Support vector machine classifier gave the best score
```
**Hyperparameter Tuning Using Using BayesSearchCV**
Finding the hyperparameter values of a learning algorithm that produces the best result
```
#Checking initial model score
initial_model = svm.SVC()
initial_model = initial_model.fit(x_train,y_train)
original_score = initial_model.score(x_test,y_test)
print(f'Original Score = {original_score}')
# Count vectorization score = Score = 0.7906976744186046
# Tfidf score = Score = 0.7933554817275748
#We see Tfidf gives the best score..so tfidf will be used for vectorization
```
Optimizing parameters
```
# Finding the best parameter
# optimize_model = svm.SVC()
# param = {'C': [0.1,1, 10, 52,100], 'gamma': ('auto','scale'),'kernel': ['linear','rbf', 'poly', 'sigmoid']}
# search = BayesSearchCV(optimize_model,param,scoring="accuracy")
# search = search.fit(x_train,y_train)
# print(search.best_params_)
#best_param = C=1.0,gamma='scale',kernel='rbf'
```
Optimized model
```
model = svm.SVC(C=1.0,gamma=1.0,kernel='rbf')
model = model.fit(x_train,y_train)
score = model.score(x_test,y_test)
print(f'model Score = {score}')
```
# Predicting which Tweets are about real disasters and which ones are not
**Reading the data**
```
test = pd.read_csv("clean_test.csv")
#dummy variables
tweet_id = test["id"]
tweets = test["tweet"]
print(tweets.shape)
```
**converting tweets to numeric using Tfidf vectorizer**
```
tweets = tfidf.transform(tweets)
```
**Predicting test data**
```
pred = model.predict(tweets)
new_df = {"id":tweet_id,"target":pred}
new_df = pd.DataFrame(new_df)
new_df.head()
```
saving to csv
```
disaster_pred = new_df.to_csv("disaster_pred.csv",index = False)
print(disaster_pred)
```
```
```
| github_jupyter |
# Training Unet & Attention Unet
## Dependencies
Install, load, and initialize all required dependencies for this experiment.
### Install Dependencies
```
import sys
!{sys.executable} -m pip install -q -e ../../utils/
```
### Import Dependencies
# System libraries
```
from __future__ import absolute_import, division, print_function
import logging, os, sys
# Enable logging
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.INFO, stream=sys.stdout)
# Re-import packages if they change
%load_ext autoreload
%autoreload 2
# Recursion Depth
sys.setrecursionlimit(1000000000)
# Intialize tqdm to always use the notebook progress bar
import tqdm
tqdm.tqdm = tqdm.tqdm_notebook
# Third-party libraries
import comet_ml
import numpy as np
import pandas as pd
import nilearn.plotting as nip
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import torch
import collections
%matplotlib inline
plt.rcParams["figure.figsize"] = (12,6)
%config InlineBackend.figure_format='retina' # adapt plots for retina displays
import git
import time
# Project utils
import aneurysm_utils
from aneurysm_utils import evaluation, training,preprocessing
if "workspace" in os.getcwd():
ROOT = "/workspace" # local
elif "/group/cake" in os.getcwd():
ROOT = "/group/cake" # Jupyter Lab
```
### Initialize Environment
```
env = aneurysm_utils.Environment(project="our-git-project", root_folder=ROOT)
env.cached_data["comet_key"] = "" # Add comet key here to track experiments
env.print_info()
```
## Load Data
Download, explore, and prepare all required data for the experiment in this section.
```
dataset_params = {
"prediction": "mask",
"mri_data_selection": "",
"balance_data": False,
"seed": 1,
"resample_voxel_dim": (1.2, 1.2, 1.2)
}
preprocessing_params = {
'min_max_normalize': True,
'mean_std_normalize': False,
'smooth_img': False, # can contain a number: smoothing factor
'intensity_segmentation': False
}
```
### Load Meta Data
```
from aneurysm_utils.data_collection import load_aneurysm_dataset
df = load_aneurysm_dataset(
env,
mri_data_selection=dataset_params["mri_data_selection"],
random_state=dataset_params["seed"]
)
df.head()
```
### Load & Split MRI Data
```
# Load MRI images and split into train, test, and validation
from aneurysm_utils.data_collection import split_mri_images
train_data, test_data, val_data, _ = split_mri_images(
env,
df,
prediction=dataset_params["prediction"],
encode_labels=False,
random_state=dataset_params["seed"],
balance_data=dataset_params["balance_data"],
resample_voxel_dim=dataset_params["resample_voxel_dim"]
)
mri_imgs_train, labels_train,train_participants = train_data
mri_imgs_test, labels_test,test_participants = test_data
mri_imgs_val, labels_val,val_participants = val_data
from aneurysm_utils import preprocessing
most_commen_shape = preprocessing.check_mri_shapes(mri_imgs_train)
```
## Transform & Preprocess Data
```
size = most_commen_shape
train_index = [i for i, e in enumerate(mri_imgs_train) if e.shape != size]
mri_imgs_train = [i for j, i in enumerate(mri_imgs_train) if j not in train_index]
labels_train = [i for j, i in enumerate(labels_train) if j not in train_index]
test_index = [i for i, e in enumerate(mri_imgs_test) if e.shape != size]
mri_imgs_test = [i for j, i in enumerate(mri_imgs_test) if j not in test_index]
labels_test = [i for j, i in enumerate(labels_test) if j not in test_index]
val_index = [i for i, e in enumerate(mri_imgs_val) if e.shape != size]
mri_imgs_val = [i for j, i in enumerate(mri_imgs_val) if j not in val_index]
labels_val = [i for j, i in enumerate(labels_val) if j not in val_index]
mri_imgs_train[0].shape
preprocessing.check_mri_shapes(mri_imgs_train)
print(np.unique(labels_val[0], return_counts=True))
from aneurysm_utils import preprocessing
patch_size = 64
size_of_train = len(mri_imgs_train)
size_of_test = len(mri_imgs_test)
size_of_val = len(mri_imgs_val)
# preprocess all lists as one to have a working mean_std_normalization
mri_imgs = mri_imgs_train + mri_imgs_test + mri_imgs_val
mri_imgs = preprocessing.preprocess(env, mri_imgs, preprocessing_params)
###creating patches
mri_imgs_train = np.asarray(mri_imgs[:size_of_train])
mri_imgs_train = preprocessing.patch_list(mri_imgs_train,patch_size)
mri_imgs_test = np.asarray(mri_imgs[size_of_train : size_of_train + size_of_test])
mri_imgs_test = preprocessing.patch_list(mri_imgs_test,patch_size)
mri_imgs_val = np.asarray(mri_imgs[size_of_train + size_of_test :])
mri_imgs_val = preprocessing.patch_list(mri_imgs_val,patch_size)
# preprocess mask
x, y, h = labels_train[0].shape
labels_train = patch_list(labels_train,patch_size)
labels_test = patch_list(labels_test,patch_size)
labels_val = patch_list(labels_val,patch_size)
```
### Optional: View image
```
idx = 0
nip.view_img(
nib.Nifti1Image(mri_imgs_train[0], np.eye(4)),
symmetric_cmap=False,
cmap="Greys_r",
bg_img=False,
black_bg=True,
threshold=1e-03,
draw_cross=False
)
evaluation.plot_slices(mri_train[0])
mri_imgs_train = np.array(mri_imgs_train)
labels_train = np.array(labels_train)
mri_imgs_val = np.array(mri_imgs_val)
labels_val = np.array(labels_val)
mri_imgs_test = np.array(mri_imgs_test)
labels_test = np.array(labels_test)
## nach der zelle 3.3gb
```
## Train Model
Implementation, configuration, and evaluation of the experiment.
### Train Deep Model 3D data
```
from comet_ml import Optimizer
artifacts = {
"train_data": (mri_imgs_train, labels_train),
"val_data": (mri_imgs_val, labels_val),
"test_data": (mri_imgs_test, labels_test)
}
params = {
"batch_size": 6,
"epochs": 45,
"es_patience": 3, # None = deactivate early stopping
"model_name": 'Unet3D_Oktay',
"optimizer_momentum": 0.9,
"optimizer":'Adam',
"scheduler": 'ReduceLROnPlateau',
"criterion": "DiceCELoss",
"sampler": None, #'ImbalancedDatasetSampler2',
"shuffle_train_set": True,
"save_models":True,
"debug": False,
"criterion_weights": 1,
"learning_rate": 1e-4,
"use_cuda":True,
"feature_scale": 2,
}
params.update(dataset_params)
params.update(preprocessing_params)
config = {
# We pick the Bayes algorithm:
"algorithm": "bayes",
# Declare your hyperparameters in the Vizier-inspired format:
"parameters": {
"learning_rate": {"type": "float", "scalingType": "loguniform", "min": 1e-4, "max": 1e-3},
},
# Declare what we will be optimizing, and how:
"spec": {"metric": "train_bal_acc", "objective": "maximize"}, #test balance accuracy
}
opt = Optimizer(config, api_key=env.cached_data["comet_key"])
## 3.3gb
import gc
gc.collect()
# Finally, get experiments, and train your models:
for comet_exp in opt.get_experiments(project_name=env.project + "-" + params["prediction"]):
print(comet_exp)
param_copy = params.copy()
comet_exp.params
param_copy["learning_rate"] = comet_exp.get_parameter("learning_rate")
exp = env.create_experiment(
params["prediction"] + "-pytorch-" + params["model_name"], comet_exp
) #params["selected_label"] + "-hyperopt-" + params["model_name"]
exp.run(training.train_pytorch_model, param_copy, artifacts)
time.sleep(3)
del exp
import gc
gc.collect()
```
| github_jupyter |
# Quick Start
Below is a simple demo of interaction with the environment of the VM scheduling scenario.
```
from maro.simulator import Env
from maro.simulator.scenarios.vm_scheduling import AllocateAction, DecisionPayload
env = Env(scenario="vm_scheduling", topology="azure.2019.10k", start_tick=0, durations=8638, snapshot_resolution=1)
metrics: object = None
decision_payload: DecisionPayload = None
is_done: bool = False
while not is_done:
action: AllocateAction = None
metrics, decision_payload, is_done = env.step(action)
print(metrics)
```
# Environment of the VM scheduling
To initialize an environment, you need to specify the values of several parameters:
- **scenario**: The target scenario of this Env.
- `vm_scheduling` denotes for the virtual machine scheduling.
- **topology**: The target topology of this Env. As shown below, you can get the predefined topology list by calling `get_topologies(scenario='vm_scheduling')`
- **start_tick**: The start tick of this Env. In vm_scheduling scenario, 1 tick represents as 5 minutes in real-time.
- In the demo above, `start_tick=0` indicates a simulation start from the beginning of the given topology.
- **durations**: The duration of this Env, in the unit of tick/5 minutes.
- In the demo above, `durations=8638` indicates a simulation length of roughly 30 days (30d * 24h * 60m / 5). It is also the maximum length of this topology.
- **snapshot_resolution**: The time granularity of maintaining the snapshots of the environments, in the unit of tick/5 minutes.
- In the demo above, `snapshot_resolution=1` indicates that a snapshot will be created and saved every tick during the simulation.
You can get all available scenarios and topologies by calling:
```
from maro.simulator.utils import get_scenarios, get_topologies
from pprint import pprint
from typing import List
scenarios: List[str] = get_scenarios()
topologies: List[str] = get_topologies(scenario='vm_scheduling')
pprint(f'The available scenarios in MARO:')
pprint(scenarios)
print()
pprint(f'The predefined topologies in VM Scheduling:')
pprint(topologies)
```
Once you created an instance of the environment, you can easily access the real-time information of this environment, like:
```
from maro.backends.frame import SnapshotList
from maro.simulator import Env
from pprint import pprint
# Initialize an Env for vm_scheduling scenario.
env = Env(scenario="vm_scheduling", topology="azure.2019.10k", start_tick=0, durations=8638, snapshot_resolution=1)
# The current tick.
tick: int = env.tick
print(f"The current tick: {tick}.")
# The current frame index, which indicates the index of current frame in the snapshot-list.
frame_index: int = env.frame_index
print(f"The current frame index: {frame_index}.")
# The whole snapshot-list of the environment, snapshots are taken in the granularity of the given snapshot_resolution.
# The example of how to use the snapshot will be shown later.
snapshot_list: SnapshotList = env.snapshot_list
print(f"There will be {len(snapshot_list)} snapshots in total.")
# The summary information of the environment.
summary: dict = env.summary
print(f"\nEnv Summary:")
pprint(summary)
# The metrics of the environment
metrics: dict = env.metrics
print(f"\nEnv Metrics:")
pprint(metrics)
```
# Interaction with the environment
Before starting interaction with the environment, we need to know **DecisionPayload** and **Action** first.
## DecisionPayload
Once the environment need the agent's response to promote the simulation, it will throw an **PendingDecision** event with the **DecisionPayload**. In the scenario of vm_scheduling, the information of `DecisionPayload` is listed as below:
- **valid_pms** (List[int]): The list of the PM ID that is considered as valid (its CPU and memory resource is enough for the incoming VM request).
- **vm_id** (int): The VM ID of the incoming VM request(VM request that is waiting for the allocation).
- **vm_cpu_cores_requirement** (int): The CPU cores that is requested by the incoming VM request.
- **vm_memory_requirement** (int): The memory resource that is requested by the incoming VM request.
- **remaining_buffer_time** (int): The remaining buffer time for the VM allocation. The VM request will be treated as failed when the `remaining_buffer_time` is spent. The initial buffer time budget can be set in the `config.yml`.
## Action
Once get a **PendingDecision** event from the environment, the agent should respond with an `Action`. Valid `Action` includes:
- **None**. It means do nothing but ignore this VM request.
- **AllocateAction**. It includes:
- vm_id (int): The ID of the VM that is waiting for the allocation.
- pm_id (int): The ID of the PM where the VM is scheduled to allocate to.
- **PostponeAction**. It includes:
- vm_id (int): The ID of the VM that is waiting for the allocation.
- postpone_step (int): The number of times that the allocation to be postponed. The unit is `DELAY_DURATION`. 1 means delay 1 `DELAY_DURATION`, which can be set in the `config.yml`.
## Generate random actions
The demo code in the Quick Start part has shown an interaction mode that doing nothing(responding with `None` action). Here we read the detailed information about the `DecisionPayload` and randomly choose an available PM.
```
import random
from maro.simulator import Env
from maro.simulator.scenarios.vm_scheduling import AllocateAction, DecisionPayload, PostponeAction
# Initialize an Env for vm_scheduling scenario
env = Env(scenario="vm_scheduling", topology="azure.2019.10k", start_tick=0, durations=8638, snapshot_resolution=1)
metrics: object = None
decision_event: DecisionPayload = None
is_done: bool = False
action: AllocateAction = None
# Start the env with a None Action
metrics, decision_event, is_done = env.step(None)
while not is_done:
valid_pm_num: int = len(decision_event.valid_pms)
if valid_pm_num <= 0:
# No valid PM now, postpone.
action: PostponeAction = PostponeAction(
vm_id=decision_event.vm_id,
postpone_step=1
)
else:
# Randomly choose an available PM.
random_idx = random.randint(0, valid_pm_num - 1)
pm_id = decision_event.valid_pms[random_idx]
action: AllocateAction = AllocateAction(
vm_id=decision_event.vm_id,
pm_id=pm_id
)
metrics, decision_event, is_done = env.step(action)
print(f"[Random] Topology: azure.2019.10k. Total ticks: 8638. Start tick: 0")
print(metrics)
```
# Get the environment observation
You can also implement other strategies or build models to take action. At this time, real-time information and historical records of the environment are very important for making good decisions. In this case, the the environment snapshot list is exactly what you need.
The information in the snapshot list is indexed by 3 dimensions:
- A frame index (list). (int / List[int]) Empty indicates for all time slides till now.
- A PM id (list). (int / List[int]) Empty indicates for all PMs.
- An Attribute name (list). (str / List[str]) You can get all available attributes in env.summary as shown before.
The return value from the snapshot list is a `numpy.ndarray` with shape (`num_frame` * `num_pms` * `num_attribute`, ).
More detailed introduction to the snapshot list is [here](https://maro.readthedocs.io/en/latest/key_components/data_model.html#advanced-features).
```
from pprint import pprint
from maro.simulator import Env
# Initialize an Env for vm_scheduling scenario
env = Env(scenario="vm_scheduling", topology="azure.2019.10k", start_tick=0, durations=8638, snapshot_resolution=1)
# To get the attribute list that can be accessed in snapshot_list
pprint(env.summary['node_detail'], depth=2)
print()
# The attribute list of stations
pprint(env.summary['node_detail']['pms'])
from pprint import pprint
from maro.simulator import Env
from maro.simulator.scenarios.vm_scheduling import AllocateAction, DecisionPayload, PostponeAction
env = Env(scenario="vm_scheduling", topology="azure.2019.10k", start_tick=0, durations=8638, snapshot_resolution=1)
metrics: object = None
decision_event: DecisionPayload = None
is_done: bool = False
action: AllocateAction = None
metrics, decision_event, is_done = env.step(None)
while not is_done:
# This demo is used to show how to retrieve the information from the snapshot,
# we terminate at 2000th tick and see the output of the environment.
if env.frame_index >= 2000 and len(decision_event.valid_pms) > 0:
# Get current state information of the first 10 valid PMs.
valid_pm_info = env.snapshot_list["pms"][
env.frame_index:decision_event.valid_pms[:10]:["cpu_cores_capacity", "cpu_cores_allocated"]
].reshape(-1, 2)
# Calculate to get the remaining cpu cores.
cpu_cores_remaining = valid_pm_info[:, 0] - valid_pm_info[:, 1]
# Show current state information of the first 10 valid PMs.
print("For the first 10 valid PMs:")
print(f"cpu core capacity: {valid_pm_info[:, 0]}")
print(f"cpu core allocated: {valid_pm_info[:, 1]}")
print(f"cpu core remaining: {cpu_cores_remaining}")
# Get the historical cpu utilization of the first valid PM in the recent 10 ticks.
past_10_frames = [x for x in range(env.frame_index - 10, env.frame_index)]
cpu_utilization_series = env.snapshot_list["pms"][
past_10_frames:decision_event.valid_pms[0]:"cpu_utilization"
]
# Show the historical information of the first valid PM.
print("For the first valid PM:")
print(f"Recent cpu utilization series is: {cpu_utilization_series}")
break
valid_pm_num: int = len(decision_event.valid_pms)
if valid_pm_num <= 0:
# No valid PM now, postpone.
action: PostponeAction = PostponeAction(
vm_id=decision_event.vm_id,
postpone_step=1
)
else:
# Randomly choose an available PM.
random_idx = random.randint(0, valid_pm_num - 1)
pm_id = decision_event.valid_pms[random_idx]
action: AllocateAction = AllocateAction(
vm_id=decision_event.vm_id,
pm_id=pm_id
)
metrics, decision_event, is_done = env.step(action)
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import higlass
import higlass.tilesets
from higlass.client import Track, View
```
## Synced heatmaps
```
from higlass.client import View, Track
import higlass
t1 = Track(track_type='top-axis', position='top')
t2 = Track(track_type='heatmap', position='center',
tileset_uuid='CQMd6V_cRw6iCI_-Unl3PQ',
server="http://higlass.io/api/v1/")
# the entire viewport has a width of 12 so a width of 6 for
# each view means they take up half the width
view1 = View([t1, t2], width=6)
view2 = View([t1, t2], width=6, x=6)
display, server, viewconf = higlass.display(
[view1, view2],
location_syncs = [[view1, view2]],
zoom_syncs = [[view1, view2]],
value_scale_syncs = [[(view1, t2), (view2, t2)]])
display
```
## Value scale syncing
```
from higlass.client import View, Track
import higlass
t1 = Track(track_type='top-axis', position='top')
t2 = Track(track_type='heatmap', position='center',
tileset_uuid='CQMd6V_cRw6iCI_-Unl3PQ',
server="http://higlass.io/api/v1/",
height=250,
options={ 'valueScaleMax': 0.5 })
# the entire viewport has a width of 12 so a width of 6 for
# each view means they take up half the width
view1 = View([t1, t2], width=6)
view2 = View([t1, t2], width=6, x=6)
display, server, viewconf = higlass.display(
[view1, view2],
value_scale_syncs = [[(view1, t2), (view2, t2)]])
display
```
## Remote heatmaps
```
from higlass.utils import hg_cmap
track_def = {
"track_type": 'heatmap',
"position": 'center',
"tileset_uuid": 'CQMd6V_cRw6iCI_-Unl3PQ',
"server": "http://higlass.io/api/v1/",
"height": 210,
"options": {}
}
t1 = Track(**track_def)
t2 = Track(**{ **track_def, "tileset_uuid": "QvdMEvccQuOxKTEjrVL3wA" })
t3 = (t1 / t2).change_attributes(
options={
'colorRange': hg_cmap('coolwarm'),
'valueScaleMin': 0.1,
'valueScaleMax': 10,
})
domain = [7e7,8e7]
v1 = View([t1], x=0, width=4, initialXDomain=domain)
v2 = View([t3], x=4, width=4, initialXDomain=domain)
v3 = View([t2], x=8, width=4, initialXDomain=domain)
display, server, viewconf = higlass.display([v1, v2, v3])
display
QvdMEvccQuOxKTEjrVL3wA
viewconf
```
## Remote bigWig tiles
```
ts1 = higlass.tilesets.bigwig(
'http://hgdownload.cse.ucsc.edu/goldenpath/hg19/encodeDCC/wgEncodeSydhTfbs/wgEncodeSydhTfbsGm12878InputStdSig.bigWig')
view1 = View([Track('horizontal-bar', tileset=ts1)])
display, server, viewconf = higlass.display([view1])
display
```
## Local cooler files
This section describes how to load cooler files that are on the same filesystem as the Jupyter notebook. If the Jupyter kernel is not running on localhost, see the section on [Remote Jupyter notebooks](https://docs-python.higlass.io/getting_started.html#remote-jupyter-notebook) in the docs.
```
ts1 = higlass.tilesets.cooler('../test/data/Dixon2012-J1-NcoI-R1-filtered.100kb.multires.cool')
tr1 = Track('heatmap', tileset=ts1)
view1 = View([tr1])
display, server, viewconf = higlass.display([view1])
display
```
## Local bigWig files (with chromsizes)
```
chromsizes = [
('chr1', 249250621),
('chr2', 243199373),
('chr3', 198022430),
('chr4', 191154276),
('chr5', 180915260),
('chr6', 171115067),
('chr7', 159138663),
('chr8', 146364022),
('chr9', 141213431),
('chr10', 135534747),
('chr11', 135006516),
('chr12', 133851895),
('chr13', 115169878),
('chr14', 107349540),
('chr15', 102531392),
('chr16', 90354753),
('chr17', 81195210),
('chr18', 78077248),
('chr20', 63025520),
('chr19', 59128983),
('chr21', 48129895),
('chr22', 51304566),
('chrX', 155270560),
('chrY', 59373566),
('chrM', 16571),
]
bigwig_fp = '../test/data/wgEncodeCaltechRnaSeqHuvecR1x75dTh1014IlnaPlusSignalRep2.bigWig'
ts = higlass.tilesets.bigwig(bigwig_fp, chromsizes=chromsizes)
cs = higlass.tilesets.ChromSizes(chromsizes)
view1 = View([
Track('top-axis'),
Track('horizontal-bar', tileset=ts),
Track('horizontal-chromosome-labels', position='top', tileset=cs)
])
display, server, viewconf = higlass.display([view1])
display
```
## Local bedlike data
```
from higlass.client import View, Track
from higlass.inline_tiles import bedtiles
import higlass
bed = [['chr1', 1000, 2000, 'item #1', '.', '+'],
['chr2', 3000, 3500, 'item #1', '.', '-']]
chroms = [['chr1', 2100], ['chr2', 4000]]
data = bedtiles(bed, chroms)
track = Track(track_type='bedlike', position='top',
height=50, data=data, options={"minusStrandColor": "red"})
d,s,v = higlass.display([[track]])
d
```
## Custom data
```
import numpy as np
dim = 2000
I, J = np.indices((dim, dim))
data = (
-(J + 47) * np.sin(np.sqrt(np.abs(I / 2 + (J + 47))))
- I * np.sin(np.sqrt(np.abs(I - (J + 47))))
)
import clodius.tiles.npmatrix
from higlass.tilesets import Tileset
ts = Tileset(
tileset_info=lambda: clodius.tiles.npmatrix.tileset_info(data),
tiles=lambda tids: clodius.tiles.npmatrix.tiles_wrapper(data, tids)
)
display, server, viewconf = higlass.display([
View([
Track(track_type='top-axis', position='top'),
Track(track_type='left-axis', position='left'),
Track(track_type='heatmap',
position='center',
tileset=ts,
height=250,
options={ 'valueScaleMax': 0.5 }),
])
])
display
%%time
import requests
import json
url = 'http://localhost:{}/api/v1/register_url/'.format(server.port)
print('url:', url)
ret = requests.post(url,
json={
'fileUrl': 'http://hgdownload.cse.ucsc.edu/goldenpath/hg19/encodeDCC/wgEncodeSydhTfbs/wgEncodeSydhTfbsGm12878InputStdSig.bigWig',
#'fileUrl': 'http://localhost:8111/wgEncodeCaltechRnaSeqHuvecR1x75dTh1014IlnaPlusSignalRep2.bigWig',
'filetype': "bigwig"
})
print("content:", ret.content)
uid = json.loads(ret.content)['uid']
url = 'http://localhost:{}/api/v1/tiles/?d={}.2.0&d={}.2.1&d={}.2.2'.format(server.port, uid, uid, uid)
#url = "http://localhost:8111/wgEncodeCaltechRnaSeqHuvecR1x75dTh1014IlnaPlusSignalRep2.bigWig"
req = requests.get(url)
print('req', req.content)
"""
url = 'http://localhost:{}/api/v1/tileset_info/?d={}'.format(server.port, uid)
print('url:', url);
req = requests.get(url)
print('req', req.content)
"""
```
| github_jupyter |
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Solution Notebook
## Problem: Find the single different char between two strings.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Unit Test](#Unit-Test)
## Constraints
* Can we assume the strings are ASCII?
* Yes
* Is case important?
* The strings are lower case
* Can we assume the inputs are valid?
* No, check for None
* Can we assume this fits memory?
* Yes
## Test Cases
* None input -> TypeError
* 'abcd', 'abcde' -> 'e'
* 'aaabbcdd', 'abdbacade' -> 'e'
## Algorithm
### Dictionary
* Keep a dictionary of seen values in s
* Loop through t, decrementing the seen values
* If the char is not there or if the decrement results in a negative value, return the char
Complexity:
* Time: O(m+n), where m and n are the lengths of s, t
* Space: O(h), for the dict, where h is the unique chars in s
### XOR
* XOR the two strings, which will isolate the differing char
Complexity:
* Time: O(m+n), where m and n are the lengths of s, t
* Space: O(1)
## Code
```
class Solution(object):
def find_diff(self, str1, str2):
if str1 is None or str2 is None:
raise TypeError('str1 or str2 cannot be None')
seen = {}
for char in str1:
if char in seen:
seen[char] += 1
else:
seen[char] = 1
for char in str2:
try:
seen[char] -= 1
except KeyError:
return char
if seen[char] < 0:
return char
return None
def find_diff_xor(self, str1, str2):
if str1 is None or str2 is None:
raise TypeError('str1 or str2 cannot be None')
result = 0
for char in str1:
result ^= ord(char)
for char in str2:
result ^= ord(char)
return chr(result)
```
## Unit Test
```
%%writefile test_str_diff.py
from nose.tools import assert_equal, assert_raises
class TestFindDiff(object):
def test_find_diff(self):
solution = Solution()
assert_raises(TypeError, solution.find_diff, None)
assert_equal(solution.find_diff('abcd', 'abcde'), 'e')
assert_equal(solution.find_diff('aaabbcdd', 'abdbacade'), 'e')
print('Success: test_find_diff')
def main():
test = TestFindDiff()
test.test_find_diff()
if __name__ == '__main__':
main()
%run -i test_str_diff.py
```
| github_jupyter |
Similar to fiducial drift correction, 3D imaging based on astigmatism is implemented in B-Store in separate parts:
1. the `CalibrateAstigmatism` processor that is used to launch the interactive calibration, and
2. a `ComputeTrajectories` class that describes the algorithm for fitting smoothed curves to the beads' x- and y-widths.
After fitting the calibration curves, CalibrateAstigmatism provides a function known as `calibrationCurve` that takes a set of localizations as inputs and computes their axial positions as a result.
```
# Be sure not to use the %pylab inline option
%pylab
from bstore import processors as proc
from pathlib import Path
import pandas as pd
```
# Load the test data
The test data for this example is in the [B-Store test files repository](https://github.com/kmdouglass/bstore_test_files). Download or clone this repository, and set the variable below to point to */processor_test_files/sequence-as-stack-Beads-AS-Exp_Localizations.csv*
```
pathToData = Path('../../bstore_test_files/processor_test_files/sequence-as-stack-Beads-AS-Exp_Localizations.csv')
# Load the test data
with open(str(pathToData), 'r') as f:
df = pd.read_csv(f)
df.describe()
```
This dataset contains localizations from six fluorescent beads that are scanned axially and with a cylindrical lens in the imaging path. The dataset is synthetic and comes from the [2016 SMLM Challenge](http://bigwww.epfl.ch/smlm/challenge2016/index.html); for convenience, the localizations have already been computed from the z-stacks.
The known z-position is in the column named **z [nm]**. The PSF widths in x and y are in the columns named **sigma_x [nm]** and **sigma_y [nm]** respectively.
Let's plot the localizations' x- and y-positions:
```
plt.scatter(df['x [nm]'], -df['y [nm]'], s=2)
plt.xlabel('x-position')
plt.ylabel('y-position')
plt.axis('equal')
plt.grid(True)
plt.show()
```
# Axial calibrations
If you have already worked through the Fiducial Drift Correction notebook, then this part will look familiar. We start by defining a `CalibrateAstigmatism` processor with a few custom parameters. Then, we feed it with our localization file. A window appears that shows a 2D histogram of the density of localizations, allowing us to manually select the beads. We can select any number of regions we like by clicking and dragging a rectangle around the regions.
After a region is drawn, **press the space bar to add it to the processor**. You may then select another region in the same manner. To finish searching for beads, simply close the window.
Try selecting the bead at \\( x = 4.3 \, \mu m \\), \\( y = 7 \, \mu m \\) and closing the window afterward.
```
# coordCols = ['x', 'y'] by default
ca = proc.CalibrateAstigmatism(coordCols=['x [nm]', 'y [nm]'],
sigmaCols=['sigma_x [nm]', 'sigma_y [nm]'],
zCol='z [nm]')
ca.astigmatismComputer.smoothingWindowSize=20
ca.astigmatismComputer.smoothingFilterSize=3
processed_df = ca(df)
processed_df.describe()
```
The processed DataFrame is actually the same as the input; no changes were made. However, if all went well, the CalibrateAstigmatism processor has fit splines to the PSF widths as a function of z and computed the calibration curve.
Before discussing what the `CalibrateAstigmatism` processor did, let's go over how it was used. First, we create the processor by setting some of its optional parameters:
```
ca = proc.CalibrateAstigmatism(coordCols=['x [nm]', 'y [nm]'],
sigmaCols=['sigma_x [nm]', 'sigma_y [nm]'],
zCol='z [nm]')
```
`coordCols` contains the names of the columns of the x- and y-positions of the localizations and `sigmaCols` contains the names of the columns containing the widths of the PSF in the x- and y-directions. Finally, `zCol` is the name of the known z-position of the beads. (This is known because it is controlled during the acquisition of the image stack of the fluorescent beads.) Some of the other optional parameters we could set are
1. `startz` : where the z-fitting should begin
2. `stopz` : where the z-fitting should end
3. `astigmatismComputer` : this is a `ComputeTrajectories` object for calculating the spline fits to the individual bead trajectories in z. You can write your own algorithm for fitting z-trajectories and feed it to the processor using this parameter.
4. `interactiveSearch` : a True/False value that determines whether a window is displayed to allow the user to select regions containing beads. You would set this to False if you already found beads but want to refit them using some different spline fitting parameters of the astigmatismComputer.
5. `wobbleComputer` : this is another `ComputeTrajectories` object that calculates the PSF's centroid's position as a function of z, also known as wobble.
Next, we adjust some of the smoothing spline parameters. These parameters are not part of the `CalibrateAstigmatism` processor; rather they belong to the `DefaultAstigmatismComputer` which belongs to the processor. The `DefaultAstigmatismComputer` is simply a type of `ComputeTrajectories` object for computing astigmatism calibration curves.
```
ca.astigmatismComputer.smoothingWindowSize=20
ca.astigmatismComputer.smoothingFilterSize=3
```
`smoothingWindowSize` is the size of the moving window that is used to weight the points in the trajectory during the spline fitting; `smoothingFilterSize` is the standard deviation of the Gaussian weighting function.
Finally, we perform the calibration by calling the processor on the DataFrame:
```
processed_df = ca(df)
```
### Plot the calibration curves and bead localizations
Just like with the FiducialDriftCorrect processor, we can plot the individual localizations belonging to the bead we selected as a function of z, as well as the average smoothing spline.
```
ca.astigmatismComputer.plotBeads()
```
You should see a single window appear containing two plots. The top is a plot of the PSF width in x vs. z, and the bottom is a plot of the PSF width in y vs. z. The data points are the individual localizations and the curves are the two splines that fit to each trajectory.
# Modifying the bead fits
## Changing which beads are used in the average trajectory
Let's now rerun the processor. This time, select at least two regions containing beads. (I selected the same one as before and another at \\(x = 9.3 \, \mu m \\) and \\(y = 2.7 \, \mu m\\).
```
processed_df = ca(df)
ca.astigmatismComputer.plotBeads()
```
By selecting multiple beads, we tell B-Store to compute the average of the individually-fitted splines. This average spline is displayed in the plots as the solid, continuous curve plotted over the data ponts. If, for some reason, one of the beads was noisy or simply not good, then the average spline may not accurately represent the astigmatism present in the system. We can request that the `DefaultAstigmatismComputer` use only certain beads by setting its `useTrajectories` parameter.
```
# Recompute the average spline without selecting beads first
ca.interactiveSearch = False
ca.astigmatismComputer.useTrajectories = [1] # Use only bead number 1
_ = ca(df) # underscore means don't bother capturing the output
ca.astigmatismComputer.plotBeads()
```
Now the points belonging to bead number 0 will appear in gray; this indicates that they were not used in the fit. If you look closely, you will also see that the spline has changed very slightly and fits only the localizations belonging to bead number 1.
If you decide that you really do want to use all the beads, we can indicate this by setting `useTrajectories` to the empty list (`[]`).
```
ca.astigmatismComputer.useTrajectories = [] # Use all beads
_ = ca(df)
ca.astigmatismComputer.plotBeads()
```
## Changing the fit range
You may also find that the full axial range in the data contains regions that are noisy or not well fit. We can select a smaller axial region to fit using the `startz` and `stopz` parameters of the `DefaultAstigmatismComputer`.
```
ca.astigmatismComputer.startz = -300
ca.astigmatismComputer.stopz = 300
_ = ca(df)
ca.astigmatismComputer.plotBeads()
```
You should now see gray x's corresponding to data points that are outside the fitting range. You should also see that the average spline now only covers the range \\( \left[ -300 \, \mu m, 300 \, \mu m \right] \\).
You will also see a notice that startz and stopz parameters of the wobble computer were updated as well. Its startz and stopz parameters are always synchronized with the astigmatism computer to ensure that all fits are performed on the same range.
## Changing the spline smoothing parameters
Similarly, we can change the smoothing parameters of the cubic spline after we have already selected beads.
```
ca.astigmatismComputer.reset()
ca.astigmatismComputer.smoothingWindowSize = 50
ca.astigmatismComputer.smoothingFilterSize = 25
_ = ca(df)
ca.astigmatismComputer.plotBeads()
ca.astigmatismComputer.reset()
_ = ca(df)
ca.astigmatismComputer.plotBeads()
```
## Adjust the wobble curves
Wobble is the x- and y-position of the PSF centroid as a function of the axial position. The trajectories of the beads' centroid in x and y as a function z is a wobble curve and may be used to correct errors made by false assumptions about the aberrations present in the PSF. (See [Carlini et al., PLoS One 2015](http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0142949) for more information.)
The wobble computer is tuned in much the same way as the astigmatism computer. If you don't want to correct for wobble, then simply ignore this feature.
```
ca.wobbleComputer.plotBeads()
ca.wobbleComputer.smoothingWindowSize = 10
ca.wobbleComputer.smoothingFilterSize = 2
# The following are locked to the value of startz and stopz from the
# astigmatism computer and therefore do not do anything.
ca.wobbleComputer.startz = -300 # Does nothing!
ca.wobbleComputer.stopz = 300 # Does nothing!
_ = ca(df)
ca.wobbleComputer.plotBeads()
```
# Using the calibrations to axially localize a dataset
Once calibrated, the `CalibrateAstigmatism` processor contains a property called `calibrationCurves` that holds the spline fits to \\( W_x \\) vs. \\(z\\) and \\( W_y \\) vs. z, where \\( W_x \\) and \\( W_y \\) are the PSF widths in x and y, respectively. These fits are functions, which means they that they accept a single number (the z-coordinate) as an input and produce the width in x and y as outputs.
```
ca.calibrationCurves
```
We can use these functions in B-Store's `ComputeZPosition` processor. To initialize the processor, we need to specify which functions to use and, optionally, the names of the columns containing the z-positions and PSF widths.
```
cz = proc.ComputeZPosition(ca.calibrationCurves,
zCol='z [nm]',
sigmaCols=['sigma_x [nm]', 'sigma_y [nm]'])
```
Though we didn't specify it above, the ComputeZPosition also accepts a parameter called `fittype` that takes one of two values: `diff` (the default value) and `huang`. `diff` computes the calibration curve by first sampling the two spline fits, taking their difference, and then reinterpolating to produce a monotonic calibration curve that transforms \\(W_x - W_y\\) into \\( z \\). In general, it is very fast. The `huang` method computes the z-position by minimizing an objective function related to the distance between the experimental and calibrated PSF widths. This method was used in the first astigmatic STORM paper [Huang, et al. Science 319, 810-813 (2008)](http://science.sciencemag.org/content/319/5864/810). Because each localization requires a call to an optimiztion method, it is much smaller.
Having created the processor, let's now load a test dataset and localize it in z.
```
pathToData = Path('../../bstore_test_files/processor_test_files/MT0.N1.LD-AS-Exp_Localizations.csv')
with open(str(pathToData), 'r') as f:
locs = pd.read_csv(f)
locs.head()
```
You will notice that we already specified the correct column names when creating the `ComputeZPosition` processor.
Now, we simply pass these localizations to the processor and the z-position is computed automatically.
```
locs_z = cz(locs)
locs_z.head()
```
## Correcting wobble
To also correct for wobble, we can specify a few extra parameters to the `ComputeZPosition` processor, including the wobble curves calculated by the `CalibrateAstigmatism` processor.
```
ca.wobbleCurves
cz = proc.ComputeZPosition(ca.calibrationCurves,
zCol='z [nm]',
coordCols=['x [nm]', 'y [nm]'],
sigmaCols=['sigma_x [nm]', 'sigma_y [nm]'],
wobbleFunc=ca.wobbleCurves)
locs_z_wobble = cz(locs)
locs_z_wobble.head()
```
Now you see that a small offset has been applied to the **x [nm]** and **y [nm]** columns to correct these localizations for wobble. The value of the offset has been saved in the **dx** and **dy** columns.
# Modifying the trajectory-fitting algorithm
*You may skip this section if you do not want to program your own astigmatism computer.*
By default, B-Store uses a curve fitting algorithm based on a cubic smoothing spline with weights determined by a Gaussian filter. The algorithm is implemented in a class called `DefaultAstigmatismComputer` which uses the `ComputeTrajectories` interface. You can write your own astigmatism computer by inheriting this interface.
```
import inspect
print(inspect.getsource(proc.ComputeTrajectories))
```
The `ComputeTrajectories` interface provides a property and four methods:
1. `regionLocs` contains a DataFrame with all of the localizations. It must have at least one index with the label 'region_id' that identifies which region the localizations came from.
2. `clearRegionLocs()` removes the localization information that is held by the computer.
3. `_plotCurves()` is the code used to plot the trajectories.
4. `_movingAverage()` is the sliding window Gaussian filter used to weight the datapoints for the cubic smoothing spline.
5. `reset()` resets the computer to its initial state.
In addition, there is one abstract method called `computeTrajectory`. Any class that implements this interface must define a function with this name.
As an example, the actual implementation of this interface by the `DefaultAstigmatismComputer` is printed below:
```
print(inspect.getsource(proc.DefaultAstigmatismComputer.computeTrajectory))
```
The method returns the averaged splines for the PSF widths in each direction. This is a Pandas DataFrame with columns named `z`, `xS` and `yS`.
```
# Print the first five values of the DataFrame returned by the drift computer
ca.astigmatismComputer.avgSpline.head()
```
To set the astigmatism computer used by the `CalibrateAstigmatism` processor, you can either set its `astigmatismComputer` property to the new computer instance or specify the `astigmatismComputer` argument in its constructor:
```python
newCA = proc.CalibrateAstigmatism(astigmatismComputer=myCustomComputer)
```
# Summary
+ 3D astigmatic imaging calibrations are implemented in two parts: a `CalibrateAstigmatism` processor and an interface known as `ComputeTrajectories`.
+ The default astigmatism computer in B-Store is called `DefaultAstigmatismComputer`. It implements the `ComputeTrajectories` interface.
+ Beads are manually identified by setting `interactiveSearch` to True and applying the calibration processor to a DataFrame containing your localizations.
+ Select beads by dragging a square around them and hitting the space bar.
+ You can investigate the individual fiducial trajectories with the `plotBeads()` method belonging to the astigmatism computer.
+ You can change which beads are used by setting `interactiveSearch` to False, and then sending a list of region indexes to `useTrajectories`.
+ Setting `startz` and `stopz` can narrow the z-fitting range of the beads.
+ If you wish, you can write your own astigmatism calibration algorithm by implementing the `ComputeTrajectories` interface and specifying a `computeTrajectory()` method.
+ After calibrating, use the `calibrationCurve` function and the `ComputeZPosition` processor to determine the z-positions of the experimental localizations.
+ Your custom computer may be specified in the `astigmatismComputer` property of the `CalibrateAstigmatism` processor.
| github_jupyter |
In this notebook, we'll learn how to use GANs to do semi-supervised learning.
In supervised learning, we have a training set of inputs $x$ and class labels $y$. We train a model that takes $x$ as input and gives $y$ as output.
In semi-supervised learning, our goal is still to train a model that takes $x$ as input and generates $y$ as output. However, not all of our training examples have a label $y$. We need to develop an algorithm that is able to get better at classification by studying both labeled $(x, y)$ pairs and unlabeled $x$ examples.
To do this for the SVHN dataset, we'll turn the GAN discriminator into an 11 class discriminator. It will recognize the 10 different classes of real SVHN digits, as well as an 11th class of fake images that come from the generator. The discriminator will get to train on real labeled images, real unlabeled images, and fake images. By drawing on three sources of data instead of just one, it will generalize to the test set much better than a traditional classifier trained on only one source of data.
```
%matplotlib inline
import pickle as pkl
import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import loadmat
import tensorflow as tf
!mkdir data
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
data_dir = 'data/'
if not isdir(data_dir):
raise Exception("Data directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(data_dir + "train_32x32.mat"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar:
urlretrieve(
'http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
data_dir + 'train_32x32.mat',
pbar.hook)
if not isfile(data_dir + "test_32x32.mat"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar:
urlretrieve(
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
data_dir + 'test_32x32.mat',
pbar.hook)
trainset = loadmat(data_dir + 'train_32x32.mat')
testset = loadmat(data_dir + 'test_32x32.mat')
idx = np.random.randint(0, trainset['X'].shape[3], size=36)
fig, axes = plt.subplots(6, 6, sharex=True, sharey=True, figsize=(5,5),)
for ii, ax in zip(idx, axes.flatten()):
ax.imshow(trainset['X'][:,:,:,ii], aspect='equal')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
plt.subplots_adjust(wspace=0, hspace=0)
def scale(x, feature_range=(-1, 1)):
# scale to (0, 1)
x = ((x - x.min())/(255 - x.min()))
# scale to feature_range
min, max = feature_range
x = x * (max - min) + min
return x
class Dataset:
def __init__(self, train, test, val_frac=0.5, shuffle=True, scale_func=None):
split_idx = int(len(test['y'])*(1 - val_frac))
self.test_x, self.valid_x = test['X'][:,:,:,:split_idx], test['X'][:,:,:,split_idx:]
self.test_y, self.valid_y = test['y'][:split_idx], test['y'][split_idx:]
self.train_x, self.train_y = train['X'], train['y']
# The SVHN dataset comes with lots of labels, but for the purpose of this exercise,
# we will pretend that there are only 1000.
# We use this mask to say which labels we will allow ourselves to use.
self.label_mask = np.zeros_like(self.train_y)
self.label_mask[0:1000] = 1
self.train_x = np.rollaxis(self.train_x, 3)
self.valid_x = np.rollaxis(self.valid_x, 3)
self.test_x = np.rollaxis(self.test_x, 3)
if scale_func is None:
self.scaler = scale
else:
self.scaler = scale_func
self.train_x = self.scaler(self.train_x)
self.valid_x = self.scaler(self.valid_x)
self.test_x = self.scaler(self.test_x)
self.shuffle = shuffle
def batches(self, batch_size, which_set="train"):
x_name = which_set + "_x"
y_name = which_set + "_y"
num_examples = len(getattr(dataset, y_name))
if self.shuffle:
idx = np.arange(num_examples)
np.random.shuffle(idx)
setattr(dataset, x_name, getattr(dataset, x_name)[idx])
setattr(dataset, y_name, getattr(dataset, y_name)[idx])
if which_set == "train":
dataset.label_mask = dataset.label_mask[idx]
dataset_x = getattr(dataset, x_name)
dataset_y = getattr(dataset, y_name)
for ii in range(0, num_examples, batch_size):
x = dataset_x[ii:ii+batch_size]
y = dataset_y[ii:ii+batch_size]
if which_set == "train":
# When we use the data for training, we need to include
# the label mask, so we can pretend we don't have access
# to some of the labels, as an exercise of our semi-supervised
# learning ability
yield x, y, self.label_mask[ii:ii+batch_size]
else:
yield x, y
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
y = tf.placeholder(tf.int32, (None), name='y')
label_mask = tf.placeholder(tf.int32, (None), name='label_mask')
return inputs_real, inputs_z, y, label_mask
def generator(z, output_dim, reuse=False, alpha=0.2, training=True, size_mult=128):
with tf.variable_scope('generator', reuse=reuse):
# First fully connected layer
x1 = tf.layers.dense(z, 4 * 4 * size_mult * 4)
# Reshape it to start the convolutional stack
x1 = tf.reshape(x1, (-1, 4, 4, size_mult * 4))
x1 = tf.layers.batch_normalization(x1, training=training)
x1 = tf.maximum(alpha * x1, x1)
x2 = tf.layers.conv2d_transpose(x1, size_mult * 2, 5, strides=2, padding='same')
x2 = tf.layers.batch_normalization(x2, training=training)
x2 = tf.maximum(alpha * x2, x2)
x3 = tf.layers.conv2d_transpose(x2, size_mult, 5, strides=2, padding='same')
x3 = tf.layers.batch_normalization(x3, training=training)
x3 = tf.maximum(alpha * x3, x3)
# Output layer
logits = tf.layers.conv2d_transpose(x3, output_dim, 5, strides=2, padding='same')
out = tf.tanh(logits)
return out
def discriminator(x, reuse=False, alpha=0.2, drop_rate=0., num_classes=10, size_mult=64):
with tf.variable_scope('discriminator', reuse=reuse):
x = tf.layers.dropout(x, rate=drop_rate/2.5)
# Input layer is 32x32x3
x1 = tf.layers.conv2d(x, size_mult, 3, strides=2, padding='same')
relu1 = tf.maximum(alpha * x1, x1)
relu1 = tf.layers.dropout(relu1, rate=drop_rate)
x2 = tf.layers.conv2d(relu1, size_mult, 3, strides=2, padding='same')
bn2 = tf.layers.batch_normalization(x2, training=True)
relu2 = tf.maximum(alpha * x2, x2)
x3 = tf.layers.conv2d(relu2, size_mult, 3, strides=2, padding='same')
bn3 = tf.layers.batch_normalization(x3, training=True)
relu3 = tf.maximum(alpha * bn3, bn3)
relu3 = tf.layers.dropout(relu3, rate=drop_rate)
x4 = tf.layers.conv2d(relu3, 2 * size_mult, 3, strides=1, padding='same')
bn4 = tf.layers.batch_normalization(x4, training=True)
relu4 = tf.maximum(alpha * bn4, bn4)
x5 = tf.layers.conv2d(relu4, 2 * size_mult, 3, strides=1, padding='same')
bn5 = tf.layers.batch_normalization(x5, training=True)
relu5 = tf.maximum(alpha * bn5, bn5)
x6 = tf.layers.conv2d(relu5, 2 * size_mult, 3, strides=2, padding='same')
bn6 = tf.layers.batch_normalization(x6, training=True)
relu6 = tf.maximum(alpha * bn6, bn6)
relu6 = tf.layers.dropout(relu6, rate=drop_rate)
x7 = tf.layers.conv2d(relu5, 2 * size_mult, 3, strides=1, padding='valid')
# Don't use bn on this layer, because bn would set the mean of each feature
# to the bn mu parameter.
# This layer is used for the feature matching loss, which only works if
# the means can be different when the discriminator is run on the data than
# when the discriminator is run on the generator samples.
relu7 = tf.maximum(alpha * x7, x7)
# Flatten it by global average pooling
features = raise NotImplementedError()
# Set class_logits to be the inputs to a softmax distribution over the different classes
raise NotImplementedError()
# Set gan_logits such that P(input is real | input) = sigmoid(gan_logits).
# Keep in mind that class_logits gives you the probability distribution over all the real
# classes and the fake class. You need to work out how to transform this multiclass softmax
# distribution into a binary real-vs-fake decision that can be described with a sigmoid.
# Numerical stability is very important.
# You'll probably need to use this numerical stability trick:
# log sum_i exp a_i = m + log sum_i exp(a_i - m).
# This is numerically stable when m = max_i a_i.
# (It helps to think about what goes wrong when...
# 1. One value of a_i is very large
# 2. All the values of a_i are very negative
# This trick and this value of m fix both those cases, but the naive implementation and
# other values of m encounter various problems)
raise NotImplementedError()
return out, class_logits, gan_logits, features
def model_loss(input_real, input_z, output_dim, y, num_classes, label_mask, alpha=0.2, drop_rate=0.):
"""
Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z input
:param output_dim: The number of channels in the output image
:param y: Integer class labels
:param num_classes: The number of classes
:param alpha: The slope of the left half of leaky ReLU activation
:param drop_rate: The probability of dropping a hidden unit
:return: A tuple of (discriminator loss, generator loss)
"""
# These numbers multiply the size of each layer of the generator and the discriminator,
# respectively. You can reduce them to run your code faster for debugging purposes.
g_size_mult = 32
d_size_mult = 64
# Here we run the generator and the discriminator
g_model = generator(input_z, output_dim, alpha=alpha, size_mult=g_size_mult)
d_on_data = discriminator(input_real, alpha=alpha, drop_rate=drop_rate, size_mult=d_size_mult)
d_model_real, class_logits_on_data, gan_logits_on_data, data_features = d_on_data
d_on_samples = discriminator(g_model, reuse=True, alpha=alpha, drop_rate=drop_rate, size_mult=d_size_mult)
d_model_fake, class_logits_on_samples, gan_logits_on_samples, sample_features = d_on_samples
# Here we compute `d_loss`, the loss for the discriminator.
# This should combine two different losses:
# 1. The loss for the GAN problem, where we minimize the cross-entropy for the binary
# real-vs-fake classification problem.
# 2. The loss for the SVHN digit classification problem, where we minimize the cross-entropy
# for the multi-class softmax. For this one we use the labels. Don't forget to ignore
# use `label_mask` to ignore the examples that we are pretending are unlabeled for the
# semi-supervised learning problem.
raise NotImplementedError()
# Here we set `g_loss` to the "feature matching" loss invented by Tim Salimans at OpenAI.
# This loss consists of minimizing the absolute difference between the expected features
# on the data and the expected features on the generated samples.
# This loss works better for semi-supervised learning than the tradition GAN losses.
raise NotImplementedError()
pred_class = tf.cast(tf.argmax(class_logits_on_data, 1), tf.int32)
eq = tf.equal(tf.squeeze(y), pred_class)
correct = tf.reduce_sum(tf.to_float(eq))
masked_correct = tf.reduce_sum(label_mask * tf.to_float(eq))
return d_loss, g_loss, correct, masked_correct, g_model
def model_opt(d_loss, g_loss, learning_rate, beta1):
"""
Get optimization operations
:param d_loss: Discriminator loss Tensor
:param g_loss: Generator loss Tensor
:param learning_rate: Learning Rate Placeholder
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:return: A tuple of (discriminator training operation, generator training operation)
"""
# Get weights and biases to update. Get them separately for the discriminator and the generator
raise NotImplementedError()
# Minimize both players' costs simultaneously
raise NotImplementedError()
shrink_lr = tf.assign(learning_rate, learning_rate * 0.9)
return d_train_opt, g_train_opt, shrink_lr
class GAN:
"""
A GAN model.
:param real_size: The shape of the real data.
:param z_size: The number of entries in the z code vector.
:param learnin_rate: The learning rate to use for Adam.
:param num_classes: The number of classes to recognize.
:param alpha: The slope of the left half of the leaky ReLU activation
:param beta1: The beta1 parameter for Adam.
"""
def __init__(self, real_size, z_size, learning_rate, num_classes=10, alpha=0.2, beta1=0.5):
tf.reset_default_graph()
self.learning_rate = tf.Variable(learning_rate, trainable=False)
inputs = model_inputs(real_size, z_size)
self.input_real, self.input_z, self.y, self.label_mask = inputs
self.drop_rate = tf.placeholder_with_default(.5, (), "drop_rate")
loss_results = model_loss(self.input_real, self.input_z,
real_size[2], self.y, num_classes,
label_mask=self.label_mask,
alpha=0.2,
drop_rate=self.drop_rate)
self.d_loss, self.g_loss, self.correct, self.masked_correct, self.samples = loss_results
self.d_opt, self.g_opt, self.shrink_lr = model_opt(self.d_loss, self.g_loss, self.learning_rate, beta1)
def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)):
fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,
sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.axis('off')
img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8)
ax.set_adjustable('box-forced')
im = ax.imshow(img)
plt.subplots_adjust(wspace=0, hspace=0)
return fig, axes
def train(net, dataset, epochs, batch_size, figsize=(5,5)):
saver = tf.train.Saver()
sample_z = np.random.normal(0, 1, size=(50, z_size))
samples, train_accuracies, test_accuracies = [], [], []
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
print("Epoch",e)
t1e = time.time()
num_examples = 0
num_correct = 0
for x, y, label_mask in dataset.batches(batch_size):
assert 'int' in str(y.dtype)
steps += 1
num_examples += label_mask.sum()
# Sample random noise for G
batch_z = np.random.normal(0, 1, size=(batch_size, z_size))
# Run optimizers
t1 = time.time()
_, _, correct = sess.run([net.d_opt, net.g_opt, net.masked_correct],
feed_dict={net.input_real: x, net.input_z: batch_z,
net.y : y, net.label_mask : label_mask})
t2 = time.time()
num_correct += correct
sess.run([net.shrink_lr])
train_accuracy = num_correct / float(num_examples)
print("\t\tClassifier train accuracy: ", train_accuracy)
num_examples = 0
num_correct = 0
for x, y in dataset.batches(batch_size, which_set="test"):
assert 'int' in str(y.dtype)
num_examples += x.shape[0]
correct, = sess.run([net.correct], feed_dict={net.input_real: x,
net.y : y,
net.drop_rate: 0.})
num_correct += correct
test_accuracy = num_correct / float(num_examples)
print("\t\tClassifier test accuracy", test_accuracy)
print("\t\tStep time: ", t2 - t1)
t2e = time.time()
print("\t\tEpoch time: ", t2e - t1e)
gen_samples = sess.run(
net.samples,
feed_dict={net.input_z: sample_z})
samples.append(gen_samples)
_ = view_samples(-1, samples, 5, 10, figsize=figsize)
plt.show()
# Save history of accuracies to view after training
train_accuracies.append(train_accuracy)
test_accuracies.append(test_accuracy)
saver.save(sess, './checkpoints/generator.ckpt')
with open('samples.pkl', 'wb') as f:
pkl.dump(samples, f)
return train_accuracies, test_accuracies, samples
!mkdir checkpoints
real_size = (32,32,3)
z_size = 100
learning_rate = 0.0003
net = GAN(real_size, z_size, learning_rate)
dataset = Dataset(trainset, testset)
batch_size = 128
epochs = 25
train_accuracies, test_accuracies, samples = train(net,
dataset,
epochs,
batch_size,
figsize=(10,5))
fig, ax = plt.subplots()
plt.plot(train_accuracies, label='Train', alpha=0.5)
plt.plot(test_accuracies, label='Test', alpha=0.5)
plt.title("Accuracy")
plt.legend()
```
When you run the fully implemented semi-supervised GAN, you should usually find that the test accuracy peaks at 69-71%. It should definitely stay above 68% fairly consistently throughout the last several epochs of training.
This is a little bit better than a [NIPS 2014 paper](https://arxiv.org/pdf/1406.5298.pdf) that got 64% accuracy on 1000-label SVHN with variational methods. However, we still have lost something by not using all the labels. If you re-run with all the labels included, you should obtain over 80% accuracy using this architecture (and other architectures that take longer to run can do much better).
```
_ = view_samples(-1, samples, 5, 10, figsize=(10,5))
!mkdir images
for ii in range(len(samples)):
fig, ax = view_samples(ii, samples, 5, 10, figsize=(10,5))
fig.savefig('images/samples_{:03d}.png'.format(ii))
plt.close()
```
Congratulations! You now know how to train a semi-supervised GAN. This exercise is stripped down to make it run faster and to make it simpler to implement. In the original work by Tim Salimans at OpenAI, a GAN using [more tricks and more runtime](https://arxiv.org/pdf/1606.03498.pdf) reaches over 94% accuracy using only 1,000 labeled examples.
| github_jupyter |
```
project_id = 'elife-data-pipeline'
source_dataset = 'de_dev'
output_dataset = 'de_dev'
output_table_prefix = 'data_science_'
mv_prefix = 'mv_'
max_workers = 10
max_editors = 100
email = 'd.ecer@elifesciences.org'
import logging
from datetime import datetime
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from typing import List
import pandas as pd
from tqdm.auto import tqdm
import data_science_pipeline.configure_warnings # pylint: disable=unused-import
import data_science_pipeline.configure_notebook_logging # pylint: disable=unused-import
from data_science_pipeline.utils.europepmc import (
EuropePMCApi,
europepmc_requests_retry_session
)
from data_science_pipeline.utils.bq import (
to_gbq,
is_bq_not_found_exception
)
from data_science_pipeline.utils.jupyter import (
read_big_query as _read_big_query,
)
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level='INFO')
logging.getLogger('data_science_pipeline').setLevel(logging.INFO)
editor_parsed_pubmed_links_table_name = '{output_dataset}.{prefix}{suffix}'.format(
output_dataset=output_dataset,
prefix=output_table_prefix,
suffix='editor_pubmed_links'
)
editor_parsed_pubmed_ids_table_name = '{output_dataset}.{prefix}{suffix}'.format(
output_dataset=output_dataset,
prefix=output_table_prefix,
suffix='editor_pubmed_ids'
)
editor_parsed_pubmed_ids_mv_name = '{output_dataset}.{mv_prefix}{prefix}{suffix}'.format(
output_dataset=output_dataset,
prefix=output_table_prefix,
mv_prefix=mv_prefix,
suffix='editor_pubmed_ids'
)
read_big_query = partial(_read_big_query, project_id=project_id)
default_query_props = dict(project=project_id, dataset=source_dataset)
_sql_from_table = (
'''
SELECT
person_id,
pubmed_url,
imported_timestamp
FROM (
SELECT
person_id,
pubmed_url,
provenance.imported_timestamp as imported_timestamp,
ROW_NUMBER() OVER (PARTITION BY person_id ORDER BY provenance.imported_timestamp DESC) as rn
FROM `{table}`
) WHERE rn = 1
''').format(table=editor_parsed_pubmed_ids_table_name)
_sql_from_mv = (
'''
SELECT
person_id,
pubmed_url,
provenance.imported_timestamp as imported_timestamp,
FROM `{table}`
''').format(table=editor_parsed_pubmed_ids_mv_name)
try:
existing_editor_ids_and_pubmed_url_df = read_big_query(_sql_from_mv)
except Exception as e: # pylint: disable=broad-except
if not is_bq_not_found_exception(e):
raise
try:
print('table not found: %s' % editor_parsed_pubmed_ids_mv_name)
existing_editor_ids_and_pubmed_url_df = read_big_query(_sql_from_table)
except Exception as err: # pylint: disable=broad-except
if not is_bq_not_found_exception(err):
raise
print('table not found: %s' % editor_parsed_pubmed_ids_table_name)
existing_editor_ids_and_pubmed_url_df = pd.DataFrame(
columns=['person_id', 'pubmed_url', 'imported_timestamp'])
print("existing_editor_ids_and_pubmed_url_df length: ", len(existing_editor_ids_and_pubmed_url_df))
existing_editor_ids_and_pubmed_url_df.head(3)
existing_editor_ids_set = set(existing_editor_ids_and_pubmed_url_df['person_id'])
print("existing_editor_ids_set length :", len(existing_editor_ids_set))
editor_parsed_pubmed_links_df = read_big_query(
'SELECT * FROM `{table_name}`\nWHERE parsed_search_term IS NOT NULL'.format(
table_name=editor_parsed_pubmed_links_table_name
)
)
print("editor_parsed_pubmed_links_df length: ", len(editor_parsed_pubmed_links_df))
editor_parsed_pubmed_links_df.head(3)
merged_editor_parsed_pubmed_links_df = editor_parsed_pubmed_links_df.merge(
existing_editor_ids_and_pubmed_url_df,
how='left',
on='person_id',
suffixes=('', '_existing')
)
print("merged_editor_parsed_pubmed_links_df length: ", len(merged_editor_parsed_pubmed_links_df))
merged_editor_parsed_pubmed_links_df.head(3)
editors_with_changed_pubmed_url_df = merged_editor_parsed_pubmed_links_df[
(merged_editor_parsed_pubmed_links_df['pubmed_url_existing'].notnull())
&
(
merged_editor_parsed_pubmed_links_df['pubmed_url']
!=
merged_editor_parsed_pubmed_links_df['pubmed_url_existing']
)
].drop(columns=['pubmed_url_existing', 'imported_timestamp'])
print("editors_with_changed_pubmed_url_df length: ", len(editors_with_changed_pubmed_url_df))
editors_with_changed_pubmed_url_df.head(3)
editors_with_not_currently_updated_info_df = merged_editor_parsed_pubmed_links_df[
(
pd.to_datetime(pd.Timestamp.utcnow())
-
pd.to_datetime(merged_editor_parsed_pubmed_links_df['imported_timestamp'])
).dt.days > 15
].drop(columns=['pubmed_url_existing', 'imported_timestamp'])
print("editors_with_not_currently_updated_info length: ", len(editors_with_not_currently_updated_info_df))
editors_with_not_currently_updated_info_df.head(3)
new_added_editors_df = editor_parsed_pubmed_links_df[
~editor_parsed_pubmed_links_df['person_id'].isin(existing_editor_ids_set)
]
print("new_added_editors_df length: ", len(new_added_editors_df))
new_added_editors_df.head(3)
selected_person_ids = set.union(
set(new_added_editors_df['person_id']),
set(editors_with_changed_pubmed_url_df['person_id']),
set(editors_with_not_currently_updated_info_df['person_id'])
)
if None in selected_person_ids:
selected_person_ids.remove(None)
print("selected_person_ids length: ", len(selected_person_ids))
print("selected_person_ids: ", selected_person_ids)
remaining_editor_parsed_pubmed_links_df = editor_parsed_pubmed_links_df[
editor_parsed_pubmed_links_df['person_id'].isin(selected_person_ids)
]
print("remaining_editor_parsed_pubmed_links_df length: ", len(remaining_editor_parsed_pubmed_links_df))
remaining_editor_parsed_pubmed_links_df.head(3)
processing_editor_parsed_pubmed_links_df = remaining_editor_parsed_pubmed_links_df
if max_editors:
processing_editor_parsed_pubmed_links_df = processing_editor_parsed_pubmed_links_df[:max_editors]
print("processing_editor_parsed_pubmed_links_df length: ", len(processing_editor_parsed_pubmed_links_df))
def get_editor_pubmed_paper_ids(europepmc_api: EuropePMCApi, row) -> List[str]:
parsed_search_term = row.parsed_search_term
if not parsed_search_term:
return None
author_names = parsed_search_term.get('include', {}).get('author')
try:
author_names = parsed_search_term.get('include', {}).get('author')
return europepmc_api.get_author_pmids(author_names)
except: # pylint: disable=bare-except
LOGGER.error('failed to retrieve pubmed ids for author names: %s', author_names, exc_info=1)
return None
editor_pubmed_links_result_df = processing_editor_parsed_pubmed_links_df[:max_editors].copy()
with europepmc_requests_retry_session() as session:
europepmc_api = EuropePMCApi(
session,
params={'email': email}
)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
editor_pubmed_links_result_df['pubmed_ids'] = list(tqdm(
executor.map(
lambda row: get_editor_pubmed_paper_ids(europepmc_api, row),
editor_pubmed_links_result_df.itertuples()
),
total=len(editor_pubmed_links_result_df),
leave=False
))
non_empty_editor_pubmed_links_result_df = editor_pubmed_links_result_df[
~pd.isnull(editor_pubmed_links_result_df['pubmed_ids'])
].copy()
print("non_empty_editor_pubmed_links_result_df length: ", len(non_empty_editor_pubmed_links_result_df))
non_empty_editor_pubmed_links_result_df.head(3)
non_empty_editor_pubmed_links_result_df['provenance'] = [{
'source': 'europepmc',
'imported_timestamp': datetime.utcnow().isoformat()
}] * len(non_empty_editor_pubmed_links_result_df)
if len(non_empty_editor_pubmed_links_result_df) == 0:
print('no data to upload')
else:
print('writing to:', editor_parsed_pubmed_ids_table_name)
to_gbq(
non_empty_editor_pubmed_links_result_df,
project_id=project_id,
destination_table=editor_parsed_pubmed_ids_table_name,
if_exists='append'
)
print('done')
```
| github_jupyter |
```
%matplotlib inline
```
# Training a Classifier
This is it. You have seen how to define neural networks, compute loss and make
updates to the weights of the network.
Now you might be thinking,
## What about data?
Generally, when you have to deal with image, text, audio or video data,
you can use standard python packages that load data into a numpy array.
Then you can convert this array into a ``torch.*Tensor``.
- For images, packages such as Pillow, OpenCV are useful
- For audio, packages such as scipy and librosa
- For text, either raw Python or Cython based loading, or NLTK and
SpaCy are useful
Specifically for vision, we have created a package called
``torchvision``, that has data loaders for common datasets such as
Imagenet, CIFAR10, MNIST, etc. and data transformers for images, viz.,
``torchvision.datasets`` and ``torch.utils.data.DataLoader``.
This provides a huge convenience and avoids writing boilerplate code.
For this tutorial, we will use the CIFAR10 dataset.
It has the classes: โairplaneโ, โautomobileโ, โbirdโ, โcatโ, โdeerโ,
โdogโ, โfrogโ, โhorseโ, โshipโ, โtruckโ. The images in CIFAR-10 are of
size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.
<p align=center>
<img src=https://pytorch.org/tutorials/_images/cifar10.png>
</p>
## Training an image classifier
We will do the following steps in order:
1. Load and normalizing the CIFAR10 training and test datasets using
``torchvision``
2. Define a Convolutional Neural Network
3. Define a loss function
4. Train the network on the training data
5. Test the network on the test data
### 1. Loading and normalizing CIFAR10
Using ``torchvision``, itโs extremely easy to load CIFAR10.
```
import torch
import torchvision
import torchvision.transforms as transforms
```
The output of torchvision datasets are PILImage images of range [0, 1].
We transform them to Tensors of normalized range [-1, 1].
```
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16,
shuffle=True, num_workers=4)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=16,
shuffle=False, num_workers=4)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
```
Let us show some of the training images, for fun.
```
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
```
### 2. Define a Convolutional Neural Network
Copy the neural network from the Neural Networks section before and modify it to
take 3-channel images (instead of 1-channel images as it was defined).
```
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
# Check if CUDA is available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assuming that we are on a CUDA machine, this should print a CUDA device:
print(f'Train on: {device}')
net.to(device)
```
## 3. Define a Loss function and optimizer
Let's use a Classification Cross-Entropy loss and SGD with momentum.
```
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
```
### 4. Train the network
This is when things start to get interesting.
We simply have to loop over our data iterator, and feed the inputs to the
network and optimize.
```
for epoch in range(20): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
```
### 5. Test the network on the test data
We have trained the network for 2 passes over the training dataset.
But we need to check if the network has learnt anything at all.
We will check this by predicting the class label that the neural network
outputs, and checking it against the ground-truth. If the prediction is
correct, we add the sample to the list of correct predictions.
Okay, first step. Let us display an image from the test set to get familiar.
```
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
```
Okay, now let us see what the neural network thinks these examples above are:
```
images, labels = inputs.to(device), labels.to(device)
outputs = net(images)
```
The outputs are energies for the 10 classes.
The higher the energy for a class, the more the network
thinks that the image is of the particular class.
So, let's get the index of the highest energy:
```
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
```
The results seem pretty good.
Let us look at how the network performs on the whole dataset.
```
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = inputs.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
```
That looks waaay better than chance, which is 10% accuracy (randomly picking
a class out of 10 classes).
Seems like the network learnt something.
Hmmm, what are the classes that performed well, and the classes that did
not perform well:
```
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = inputs.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
```
Okay, so what next?
How do we run these neural networks on the GPU?
Training on GPU
----------------
Just like how you transfer a Tensor onto the GPU, you transfer the neural
net onto the GPU.
Let's first define our device as the first visible cuda device if we have
CUDA available:
```
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assuming that we are on a CUDA machine, this should print a CUDA device:
print(device)
```
The rest of this section assumes that ``device`` is a CUDA device.
Then these methods will recursively go over all modules and convert their
parameters and buffers to CUDA tensors:
.. code:: python
net.to(device)
Remember that you will have to send the inputs and targets at every step
to the GPU too:
.. code:: python
inputs, labels = inputs.to(device), labels.to(device)
Why dont I notice MASSIVE speedup compared to CPU? Because your network
is realllly small.
**Exercise:** Try increasing the width of your network (argument 2 of
the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` โ
they need to be the same number), see what kind of speedup you get.
**Goals achieved**:
- Understanding PyTorch's Tensor library and neural networks at a high level.
- Train a small neural network to classify images
Training on multiple GPUs
-------------------------
If you want to see even more MASSIVE speedup using all of your GPUs,
please check out :doc:`data_parallel_tutorial`.
Where do I go next?
-------------------
- :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>`
- `Train a state-of-the-art ResNet network on imagenet`_
- `Train a face generator using Generative Adversarial Networks`_
- `Train a word-level language model using Recurrent LSTM networks`_
- `More examples`_
- `More tutorials`_
- `Discuss PyTorch on the Forums`_
- `Chat with other users on Slack`_
| github_jupyter |
# Known issues
## A float quantity is Iterable
https://docs.python.org/3/library/collections.abc.html#collections.abc.Iterable
This tests if the object has "__iter__"
```
import collections
from physipy import m
isinstance(m, collections.abc.Iterable)
```
## Array repr with 0 value
Pick best favunit take the smallest when 0 is in the array with positiv and negativ values
```
from physipy import m, Quantity, Dimension
import numpy as np
Quantity(np.array([0, -1.2, 1.2]), Dimension("L"))
```
# Inplace change using asqarray
```
from physipy.quantity.utils import asqarray
print(type(m.value))
arrq_9 = np.array([m.__copy__()], dtype=object)
out = asqarray(arrq_9)
# this changes the type of m value
print(type(m.value))
```
# Numpy trapz implementaion not called when only x or dx is a quantity
https://github.com/numpy/numpy/issues/18902
Only work when the array to integrate is a quantity
```
from physipy import m
import numpy as np
# this works
print(np.trapz(np.arange(5)*m))
# also this
print(np.trapz(np.arange(5), x=np.arange(5)*m))
print(np.trapz(np.arange(5), dx=5000*m, x=np.arange(5)*m)) #dx is silent
# but not this
#np.trapz(np.arange(5), dx=5000*m)
print("----uncomment above line to trigger exception")
```
# Array function interface not triggered on scalars
Calling a numpy function with only scalars will not trigger the array function interface, since it is used only when an argument is an array.
```
from physipy import m
# this raises a DimensionError because of the casting into float
#np.random.normal(3*m, 1*m)
# while this works
np.random.normal(np.array(3*m), np.array(1*m))
```
# FIXED (Matplotlib histogram) by adding "to_numpy" method
It turns out that matplotlib first checks if the object has a "to_numpy()" method, then again improved by removing to_numpy and removing __iter__ and delegate it to getattr
Some preprocessing turn a quantity-array into a "set of elements", and plots one histogram for each value.
```
import numpy as np
from physipy import m
import matplotlib.pyplot as plt
arr = np.random.normal(1, 0.1, size=100)*m
plt.hist(arr.value)
plt.hist(arr)
```
# Matplotlib histogram, again : missing units support
Source code for hist : https://matplotlib.org/stable/_modules/matplotlib/axes/_axes.html#Axes.hist
One of the first things done is :
```python
x = cbook._reshape_2D(x, 'x')
```
With
```python
Signature: cbook._reshape_2D(X, name)
Source:
def _reshape_2D(X, name):
"""
Use Fortran ordering to convert ndarrays and lists of iterables to lists of
1D arrays.
Lists of iterables are converted by applying `np.asanyarray` to each of
their elements. 1D ndarrays are returned in a singleton list containing
them. 2D ndarrays are converted to the list of their *columns*.
*name* is used to generate the error message for invalid inputs.
"""
# unpack if we have a values or to_numpy method.
try:
X = X.to_numpy()
except AttributeError:
try:
if isinstance(X.values, np.ndarray):
X = X.values
except AttributeError:
pass
# Iterate over columns for ndarrays.
if isinstance(X, np.ndarray):
X = X.T
if len(X) == 0:
return [[]]
elif X.ndim == 1 and np.ndim(X[0]) == 0:
# 1D array of scalars: directly return it.
return [X]
elif X.ndim in [1, 2]:
# 2D array, or 1D array of iterables: flatten them first.
return [np.reshape(x, -1) for x in X]
else:
raise ValueError(f'{name} must have 2 or fewer dimensions')
# Iterate over list of iterables.
if len(X) == 0:
return [[]]
result = []
is_1d = True
for xi in X:
# check if this is iterable, except for strings which we
# treat as singletons.
if (isinstance(xi, collections.abc.Iterable) and
not isinstance(xi, str)):
is_1d = False
xi = np.asanyarray(xi)
nd = np.ndim(xi)
if nd > 1:
raise ValueError(f'{name} must have 2 or fewer dimensions')
result.append(xi.reshape(-1))
if is_1d:
# 1D array of scalars: directly return it.
return [np.reshape(result, -1)]
else:
# 2D array, or 1D array of iterables: use flattened version.
return result
```
```
import matplotlib.pyplot as plt
from physipy import units, m, K, setup_matplotlib
from matplotlib import cbook
import numpy as np
arr = np.random.normal(1, 0.1, size=100)*m
setup_matplotlib()
plt.hist(arr)
```
I created a [unit package for physical computation](https://github.com/mocquin/physipy) and its [matplotlib' unit interface](https://github.com/mocquin/physipy/blob/master/physipy/quantity/plot.py) that works well for plotting with `Axes` methods like `ax.plot`, as you can see in the [plotting notebooke demo](https://github.com/mocquin/physipy/blob/master/docs/Plotting.ipynb).
The issue I am facing is to have the unit interface work with the histogram plotting, like in `ax.hist(arr)`.
For now, I have 2 solutions that are not satisfiying :
- first solution is the current state of my project : I added to my `Quantity` object a `to_numpy()` method that cast the instance into a plain numpy array, which makes the histogram plotting work, but looses the automatic unit plotting, since it is not a Quantity anymore but a plain numpy array. For some reasons, this method is never used when plotting with `ax.plot`, but is one of the first things tried when using `ax.hist`.
- other solution is what I had until recently, which was even worse : without the `to_numpy()` method, matplotlib tries to loop inside the object, and since my object can be iterated over (if it is a 1D array for eg), then it plots one 1-element-histogram for each value. You can see what it looked like [here](https://render.githubusercontent.com/view/ipynb?color_mode=light&commit=f0871009f57da092eee1d640d9508070d1662c1d&enc_url=68747470733a2f2f7261772e67697468756275736572636f6e74656e742e636f6d2f6d6f637175696e2f706879736970792f663038373130303966353764613039326565653164363430643935303830373064313636326331642f646f63732f4b6e6f776e2532304973737565732e6970796e62&nwo=mocquin%2Fphysipy&path=docs%2FKnown+Issues.ipynb&repository_id=175999533&repository_type=Repository#Matplotlib-histogram) (see the Matplotlib histogram section).
After digging into the source code, I found that some preprocessing is done the object passed to `hist`, using [cbook._reshape2d](https://github.com/matplotlib/matplotlib/blob/6f92db0fc6aad8dfcdb197202b969a01e4391fde/lib/matplotlib/axes/_axes.py#L6654). Then, onto [`cbook._reshape2D` source](https://github.com/matplotlib/matplotlib/blob/6f92db0fc6aad8dfcdb197202b969a01e4391fde/lib/matplotlib/cbook/__init__.py#L1304), I think most of the time objects are subclass of `np.ndarray`, and so are caught [in this loop](https://github.com/matplotlib/matplotlib/blob/6f92db0fc6aad8dfcdb197202b969a01e4391fde/lib/matplotlib/cbook/__init__.py#L1327). But my class is not a subclass of `np.ndarray`, so it ends up [in this loop](https://github.com/matplotlib/matplotlib/blob/6f92db0fc6aad8dfcdb197202b969a01e4391fde/lib/matplotlib/cbook/__init__.py#L1347). Then `np.asanyarray(xi)` is called and cast each quantity element (like `1m` into just `1`) into an int/float, again loosing the unit. Now the workaround for this kind of problem is currently addressed by numpy's [NEP-35](https://numpy.org/neps/nep-0035-array-creation-dispatch-with-array-function.html), which allows to override array-creation functions (like `np.asanyarray`), but it requires to pass an extra argument : `np.asanyarray(x)` would be `np.asanyarray(x, like=x)`. Unfortunately, this doesn't solve completely the problem, because the `xi` object are reshaped using `xi.reshape` and not `np.reshape(xi)`.
```
import collections
import numpy as np
from physipy import m
# make a 1D array of meters
X = np.arange(10)*m
# introspect https://github.com/matplotlib/matplotlib/blob/6f92db0fc6aad8dfcdb197202b969a01e4391fde/lib/matplotlib/cbook/__init__.py#L1347
result = []
is_1d = True
for xi in X:
# check if this is iterable, except for strings which we
# treat as singletons.
if (isinstance(xi, collections.abc.Iterable) and
not isinstance(xi, str)):
is_1d = False
#xi = np.asanyarray(xi)
print(xi)
xi = np.asanyarray(xi, like=xi)
nd = np.ndim(xi)
print(xi.reshape(-1))
if nd > 1:
raise ValueError(f'{name} must have 2 or fewer dimensions')
result.append(xi.reshape(-1))
print(is_1d)
if is_1d:
# 1D array of scalars: directly return it.
#return
print([np.reshape(result, -1)])
else:
# 2D array, or 1D array of iterables: use flattened version.
#return
print(result)
```
# Numpy random normal
Not really a bug, more of a Feature request on numpy's side...
`__array_struct__` is tried on the value, and unit is dropped.
See https://github.com/numpy/numpy/issues/19382
```
import numpy as np
from physipy import m
np.random.normal(np.array([1, 2, 3]),
np.array([2, 3, 4]), size=(2, 3))
np.random.normal(np.array([1, 2, 3])*m,
np.array([2, 3, 4])*m, size=(2, 3))
import numpy as np
np.random.seed(1234)
HANDLED_FUNCTIONS = {}
class NumericalLabeled():
def __init__(self, value, label=""):
self.value = value
self.label = label
def __repr__(self):
return "NumericalLabelled<"+str(self.value) + "," + self.label+">"
def __array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def make_numericallabelled(x, label=""):
"""
Helper function to cast anything into a NumericalLabelled object.
"""
if isinstance(x, NumericalLabeled):
return x
else:
return NumericalLabeled(x, label=label)
# Numpy functions
# Override functions - used with __array_function__
def implements(np_function):
def decorator(func):
HANDLED_FUNCTIONS[np_function] = func
return func
return decorator
@implements(np.random.normal)
def np_random_normal(loc=0.0, scale=1.0, **kwargs):
# cast both loc and scale into Numericallabelled
loc = make_numericallabelled(loc)
scale = make_numericallabelled(scale)
# check their label is "compatible"
if not loc.label == scale.label:
raise ValueError
return NumericalLabeled(np.random.rand(loc=loc.value,
scale=scale.value, **kwargs),
loc.label+scale.label)
@implements(np.mean)
def np_mean(a, *args, **kwargs):
return NumericalLabeled(np.mean(a.value, *args, **kwargs),
a.label)
def main():
# reference result for standard array
arr = np.arange(10)
print(np.mean(arr))
print(np.random.normal(arr))
# array-like object
num_labeled = NumericalLabeled(arr, "toto")
print(np.mean(num_labeled))
try:
print(np.random.normal(num_labeled))
except Exception as e:
print(e)
main()
import sys
print("Python :", sys.version)
print("Numpy :", np.__version__)
arr = np.arange(10)
# Reference result
print(np.mean(arr))
print(np.random.normal(arr))
custom_obj = MyArrayLike(arr)
print(np.mean(custom_obj)) # np.mean will trigger __array_function__ interface
print(np.random.normal(custom_obj)) # np.random.normal will "only" try to cast the object to float
```
# Power of dimension are not casted to int when possible
```
from physipy import m, K
a = (m**2)**0.5
a.dimension
```
# Degree rendering
Degree as favunit is rendered as "<function deg at 0x...>"
```
from physipy import rad, units
deg = units["deg"]
a = 5*deg
a.favunit = deg
a
```
| github_jupyter |
```
from las import LASReader
import numpy as np
import pandas as pd
from scipy import signal
import matplotlib.pyplot as plt
file = r'./data/7120_1_3.las'
def loadLog(file):
"""
# Import sonic log into Numpy.
"""
log = LASReader(file, null_subs=np.nan)
return log
def npSonic(file):
"""
Return 1D array of sonic/dt data
"""
data = loadLog(file)
return data
def MedFilt(arr, filter_size):
"""
Apply scipy.signal.medfilt(volume, kernel_size=None)
"""
medFilt = signal.medfilt(volume=arr, kernel_size=filter_size)
return medFilt
def las2Dataframe(file):
log = LASReader(file, null_subs=np.nan)
df = pd.DataFrame(log.data)
return df
def integrate(arr):
"""
two-way-time to depth relationship
"""
scaled = 0.1524 * np.nan_to_num(arr) / 1e6
tcum = 2 * np.cumsum(scaled)
tdr = tcum + log_start_time
return tdr
def vavgSonic():
"""
# Calculate Average velocity
"""
pass
def vrmsSonic():
"""
# Calculate RMS velocity
"""
pass
def vintSonic(tops):
"""
# Calculate Interval velocity for each Formation
"""
pass
def v0kSonic():
"""
# Calculate V0k for each Formation interval
"""
pass
## Dealing with LAS file
data = npSonic(file)
print (data.curves.names)
TVDSS = data.data['TVDSS']
GR = data.data['GR']
NEU = data.data['NEU']
RDEP = data.data['RDEP']
PHIE = data.data['PHIE_CPI']
PEF = data.data['PEF']
DT = data.data['AC']*3.28084 # convert usec/ft to usec/m
DTS = data.data['ACS']*3.28084 # convert usec/ft to usec/m
RHOB = data.data['DEN']*1000 # convert to SI units
DT
KB_elev = 30 #data.well.KB.data # Kelly Bushing elevation
water_depth = 331.5 #data.well.GL.data # Depth to Sea Floor below sea level
top_of_log = data.start
print ("KB elevation [m]: ", KB_elev) # Kelly Bushing (ft)
print ("Seafloor elevation [m]: ", water_depth) # Depth to sea floor below sea level
repl_int = data.start + KB_elev - water_depth
water_vel = 1480 # velocity of sea water [m/s]
EGL_time = 2.0 * np.abs(KB_elev) / water_vel
print ("Ground Level time above SRD :", EGL_time)
water_twt = 2.0*abs(water_depth + EGL_time) / water_vel
print ("water_time: ", water_twt)
print ("Top of sonic log [m]: ", data.start)
print ("replacement interval [m]: ", repl_int)
repl_vel = 2100 # m/s
repl_time = 2.0 * repl_int / repl_vel
print ("two-way-replacement time: ", repl_time)
log_start_time = water_twt + repl_time
print ('log_start_time:', log_start_time)
#data = dfSonic(file)
sonicMedFilt = npMedFilt(data.data['AC'], 31)
sonicMedFilt
data.data['AC']
df = las2Dataframe(file)
df.head()
df.drop(['DEN', 'GR', 'NEU', 'PEF', 'ACS', 'RDEP', 'Lithology_detailed',
'AC_CPI', 'DEN_CPI', 'GR_CPI', 'NEU_CPI', 'PHIE_CPI', 'PHIT_CPI',
'SW_CPI', 'VShale_CPI', 'RDEP_CPI'], axis=1, inplace=True)
df.TVDSS = df.TVDSS*-1
df.head()
df['VEL'] = 304800 / df['AC']
df.head()
df['OWT'] = df['TVDSS'] / df['VEL']
df.head()
df = df.dropna()
df['Tint'] = 0.19
df.head()
if df['Tint'].shift(-1)== np.nan:
df['Tint']=0.19
else:
df['Tint']= df['Tint'].shift(-1) + (df['TVDSS'] - df['TVDSS'].shift(-1)) * ((df['VEL'] + df['VEL'])/(2*304785.126485))
df.head()
df['Tint'] = df['Tint'].shift(-1)+(((df['TVDSS']-df['TVDSS'].shift(-1)*(df['VEL'] + df['VEL'].shift(-1))/2)*0.000003281))
df.tail()
df.head()
log = loadLog(file)
log.curves.items
log.data[['DEPT', 'AC']].shape
log.data.shape
log.data['TVDSS']
sonic = npSonic(file)
sonic_med41 = npMedFilt(sonic, 41)
fig, ax = plt.subplots(figsize=(5,15))
ax.set_ylim (0,3000)
ax.invert_yaxis()
ax.yaxis.grid(True)
ax.get_xaxis().set_visible(False)
ax=ax.twiny()
ax.grid(True)
ax.set_xlim(0,7000)
ax.spines['top'].set_position(('outward',0))
ax.plot(304800/log.data['AC'], log.data['TVDSS']*-1, label='Vint[m/s]', color='blue')
ax.set_xlabel('Vint[m/s]', color='blue')
ax.tick_params(axis='x', colors='blue')
#plt.plot(log.data['AC'], log.data['DEPT'])
#plt.plot(concat[1], log.data['DEPT'])
sonic_med41.shape
sonic.shape
concat = np.vstack((concat, Vavg))
concat.shape
concat[2]
Vavg = (log.data['TVDSS']*-1)/(304.8/log.data['AC'])
Vavg
def velocity_logs(top_depth, bottom_depth):
log = loadLog(file)
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(15,15), sharey=True)
fig.subplots_adjust(top=0.9,wspace=0.1)
#General setting for all axis
for axes in ax:
axes.set_ylim (top_depth,bottom_depth)
axes.invert_yaxis()
axes.yaxis.grid(True)
axes.get_xaxis().set_visible(False)
# 1st track: Sonic
ax01=ax[0].twiny()
ax01.grid(True)
ax01.set_xlim(0,200)
ax01.spines['top'].set_position(('outward',0))
ax01.set_xlabel('Vint[m/s]')
ax01.plot(log.data['AC'], log.data['TVDSS']*-1, label='Vint[m/s]', color='blue')
ax01.set_xlabel('Vint[m/s]', color='blue')
ax01.tick_params(axis='x', colors='blue')
# 2nd track: Sonic
ax02=ax[1].twiny()
ax02.grid(True)
ax02.set_xlim(0,200)
ax02.spines['top'].set_position(('outward',0))
ax02.set_xlabel('Vint[m/s]')
ax02.plot(concat[1], log.data['TVDSS']*-1, label='Vint[m/s]', color='red')
ax02.set_xlabel('Vint[m/s]', color='red')
ax02.tick_params(axis='x', colors='red')
# 3rd track: Velocity from sonic
ax03=ax[2].twiny()
ax03.grid(True)
ax03.set_xlim(0,7000)
ax03.spines['top'].set_position(('outward',0))
ax03.set_xlabel('Vinst[m/s]')
ax03.plot(304800/concat[1], log.data['TVDSS']*-1, label='Vinst[m/s]', color='orange')
ax03.set_xlabel('Vinst[m/s]', color='orange')
ax03.tick_params(axis='x', colors='orange')
# 4th track: Average Velocity from sonic
ax04=ax[2].twiny()
ax04.grid(True)
ax04.set_xlim(0,7000)
ax04.spines['top'].set_position(('outward',0))
ax04.set_xlabel('Vinst[m/s]')
ax04.plot(concat[3], log.data['TVDSS']*-1, label='Vinst[m/s]', color='orange')
ax04.set_xlabel('Vinst[m/s]', color='orange')
ax04.tick_params(axis='x', colors='orange')
velocity_logs(0,3000)
```
| github_jupyter |
# Spark streaming basics project
_____
### Note on Streaming
Streaming is something that is rapidly advancing and changing fast, there are multiple new libraries every year, new and different services always popping up, and what is in this notebook may or may not apply to you. Maybe your looking for something specific on Kafka, or maybe you are looking for streaming about twitter, in which case Spark might be overkill for what you really want. Realistically speaking each situation is going to require a customized solution and this course is never going to be able to supply a one size fits all solution. Because of this, I wanted to point out some great resources for Python and Spark StreamingL
* [The Official Documentation is great. This should be your first go to.](http://spark.apache.org/docs/latest/streaming-programming-guide.html#spark-streaming-programming-guide)
* [Fantastic Guide to Spark Streaming with Kafka](https://www.rittmanmead.com/blog/2017/01/getting-started-with-spark-streaming-with-python-and-kafka/)
* [Another Spark Streaming Example with Geo Plotting](http://nbviewer.jupyter.org/github/ibm-cds-labs/spark.samples/blob/master/notebook/DashDB%20Twitter%20Car%202015%20Python%20Notebook.ipynb)
____
Spark has pretty well known Streaming Capabilities, if streaming is something you've found yourself needing at work then you are probably familiar with some of these concepts already, in which case you may find it more useful to jump straight to the official documentation here:
http://spark.apache.org/docs/latest/streaming-programming-guide.html#spark-streaming-programming-guide
It is really a great guide, but keep in mind some of the features are restricted to Scala at this time (Spark 2.1), hopefully they will be expanded to the Python API in the future!
For those of you new to Spark Streaming, let's get started with a classic example, streaming Twitter! Twitter is a great source for streaming because its something most people already have an intuitive understanding of, you can visit the site yourself, and a lot of streaming technology has come out of Twitter as a company. You don't access to the entire "firehose" of twitter without paying for it, but that would be too much for us to handle anyway, so we'll be more than fine with the freely available API access.
_____
Let's discuss SparkStreaming!
Spark Streaming is an extension of the core Spark API that enables scalable, high-throughput, fault-tolerant stream processing of live data streams. Data can be ingested from many sources like Kafka, Flume, Kinesis, or TCP sockets, and can be processed using complex algorithms expressed with high-level functions like map, reduce, join and window. Finally, processed data can be pushed out to filesystems, databases, and live dashboards. In fact, you can apply Sparkโs machine learning and graph processing algorithms on data streams.
<img src='http://spark.apache.org/docs/latest/img/streaming-arch.png'/>
Keep in mind that a few of these Streamiing Capabilities are limited when it comes to Python, you'll need to reference the documentation for the most up to date information. Also the streaming contexts tend to follow more along with the older RDD syntax, so a few things might seem different than what we are used to seeing, keep that in mind, you'll definitely want to have a good understanding of lambda expressions before continuing with this!
There are SparkSQL modules for streaming:
http://spark.apache.org/docs/latest/api/python/pyspark.sql.html?highlight=streaming#module-pyspark.sql.streaming
But they are all still listed as experimental, so instead of showing you somethign that might break in the future, we'll stick to the RDD methods (which is what the documentation also currently shows for streaming).
Internally, it works as follows. Spark Streaming receives live input data streams and divides the data into batches, which are then processed by the Spark engine to generate the final stream of results in batches.
<img src='http://spark.apache.org/docs/latest/img/streaming-flow.png'/>
## Simple local example
```
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode,split
spark = SparkSession.builder.appName('spark_stream').getOrCreate()
# Create DataFrame representing the stream of input lines from connection to localhost:9999
lines = spark \
.readStream \
.format("socket") \
.option("host", "localhost") \
.option("port", 9999) \
.load()
# Split the lines into words
words = lines.select(
explode(
split(lines.value, " ")
).alias('word')
)
# Generate running word count
wordCounts = words.groupBy('word').count()
```
Now we open up a Unix terminal and type:
$ nc -lk 9999
$ hello world any text you want
With this running run the line below, then type Ctrl+C to terminate it.
```
# Start running the query that prints the running counts to the console
query = wordCounts.writeStream.outputMode('complete').format('console').start()
query.awaitTermination()
''' will print like this based on words we enter
-------------------------------------------
Batch: 0
-------------------------------------------
+----+-----+
|word|count|
+----+-----+
+----+-----+
-------------------------------------------
Batch: 1
-------------------------------------------
+------+-----+
| word|count|
+------+-----+
|hadoop| 1|
+------+-----+
-------------------------------------------
Batch: 2
-------------------------------------------
+------+-----+
| word|count|
+------+-----+
|apache| 1|
|hadoop| 1|
+------+-----+
-------------------------------------------
Batch: 3
-------------------------------------------
+------+-----+
| word|count|
+------+-----+
|apache| 1|
| spark| 1|
|hadoop| 2|
+------+-----+
-------------------------------------------
Batch: 4
-------------------------------------------
+------+-----+
| word|count|
+------+-----+
|apache| 2|
| spark| 1|
|hadoop| 2|
+------+-----+
'''
```
| github_jupyter |
# Title: Alert Investigation (Windows Process Alerts)
**Notebook Version:** 1.0<br>
**Python Version:** Python 3.6 (including Python 3.6 - AzureML)<br>
**Required Packages**: kqlmagic, msticpy, pandas, numpy, matplotlib, networkx, ipywidgets, ipython, scikit_learn<br>
**Platforms Supported**:<br>
- Azure Notebooks Free Compute
- Azure Notebooks DSVM
- OS Independent
**Data Sources Required**:<br>
- Log Analytics - SecurityAlert, SecurityEvent (EventIDs 4688 and 4624/25)
- (Optional) - VirusTotal (with API key)
## Description:
This notebook is intended for triage and investigation of security alerts. It is specifically targeted at alerts triggered by suspicious process activity on Windows hosts. Some of the sections will work on other types of alerts but this is not guaranteed.
<h2><font color="red">Warning: Example Notebook - Not for production use!</font></h2>
<font style="border-left: 6px solid red;background-color: lightgrey;">
<nbsp> This notebooks is meant to be illustrative of specific scenarios and is not actively maintained. <br></font>
<font style="border-left: 6px solid red;background-color: lightgrey;">
It is unlikely to be runnable directly in your environment. Instead, please use the notebooks
in the root of this repo.
</font>
<a id='toc'></a>
## Table of Contents
- [Setup and Authenticate](#setup)
- [Get Alerts List](#getalertslist)
- [Choose an Alert to investigate](#enteralertid)
- [Extract Properties and entities from alert](#extractalertproperties)
- [Entity Graph](#entitygraph)
- [Related Alerts](#related_alerts)
- [Session Process Tree](#processtree)
- [Process Timeline](#processtimeline)
- [Other Process on Host](#process_clustering)
- [Check for IOCs in Commandline](#cmdlineiocs)
- [VirusTotal lookup](#virustotallookup)
- [Alert command line - Occurrence on other hosts in subscription](#cmdlineonotherhosts)
- [Host Logons](#host_logons)
- [Alert Account](#logonaccount)
- [Failed Logons](#failed_logons)
- [Appendices](#appendices)
- [Saving data to Excel](#appendices)
<a id='setup'></a>[Contents](#toc)
# Setup
1. Make sure that you have installed packages specified in the setup (uncomment the lines to execute)
2. There are some manual steps up to selecting the alert ID. After this most of the notebook can be executed sequentially
3. Major sections should be executable independently (e.g. Alert Command line and Host Logons can be run skipping Session Process Tree)
## Install Packages
The first time this cell runs for a new Azure Notebooks project or local Python environment it will take several minutes to download and install the packages. In subsequent runs it should run quickly and confirm that package dependencies are already installed. Unless you want to upgrade the packages you can feel free to skip execution of the next cell.
If you see any import failures (```ImportError```) in the notebook, please re-run this cell and answer 'y', then re-run the cell where the failure occurred.
Note you may see some warnings about package incompatibility with certain packages. This does not affect the functionality of this notebook but you may need to upgrade the packages producing the warnings to a more recent version.
```
import sys
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
MIN_REQ_PYTHON = (3,6)
if sys.version_info < MIN_REQ_PYTHON:
print('Check the Kernel->Change Kernel menu and ensure that Python 3.6')
print('or later is selected as the active kernel.')
sys.exit("Python %s.%s or later is required.\n" % MIN_REQ_PYTHON)
# Package Installs - try to avoid if they are already installed
try:
import msticpy.sectools as sectools
import Kqlmagic
print('If you answer "n" this cell will exit with an error in order to avoid the pip install calls,')
print('This error can safely be ignored.')
resp = input('msticpy and Kqlmagic packages are already loaded. Do you want to re-install? (y/n)')
if resp.strip().lower() != 'y':
sys.exit('pip install aborted - you may skip this error and continue.')
else:
print('After installation has completed, restart the current kernel and run '
'the notebook again skipping this cell.')
except ImportError:
pass
print('\nPlease wait. Installing required packages. This may take a few minutes...')
!pip install git+https://github.com/microsoft/msticpy --upgrade --user
!pip install Kqlmagic --no-cache-dir --upgrade --user
print('\nTo ensure that the latest versions of the installed libraries '
'are used, please restart the current kernel and run '
'the notebook again skipping this cell.')
```
### Import Python Packages
### Get WorkspaceId
To find your Workspace Id go to [Log Analytics](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.OperationalInsights%2Fworkspaces). Look at the workspace properties to find the ID.
```
# Imports
import sys
MIN_REQ_PYTHON = (3,6)
if sys.version_info < MIN_REQ_PYTHON:
print('Check the Kernel->Change Kernel menu and ensure that Python 3.6')
print('or later is selected as the active kernel.')
sys.exit("Python %s.%s or later is required.\n" % MIN_REQ_PYTHON)
import numpy as np
from IPython import get_ipython
from IPython.display import display, HTML, Markdown
import ipywidgets as widgets
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
sns.set()
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_colwidth', 100)
import msticpy.sectools as sectools
import msticpy.nbtools as mas
import msticpy.nbtools.kql as qry
import msticpy.nbtools.nbdisplay as nbdisp
# Some of our dependencies (networkx) still use deprecated Matplotlib
# APIs - we can't do anything about it so suppress them from view
from matplotlib import MatplotlibDeprecationWarning
warnings.simplefilter("ignore", category=MatplotlibDeprecationWarning)
import os
from msticpy.nbtools.wsconfig import WorkspaceConfig
ws_config_file = 'config.json'
WORKSPACE_ID = None
TENANT_ID = None
try:
ws_config = WorkspaceConfig(ws_config_file)
display(Markdown(f'Read Workspace configuration from local config.json for workspace **{ws_config["workspace_name"]}**'))
for cf_item in ['tenant_id', 'subscription_id', 'resource_group', 'workspace_id', 'workspace_name']:
display(Markdown(f'**{cf_item.upper()}**: {ws_config[cf_item]}'))
if ('cookiecutter' not in ws_config['workspace_id'] or
'cookiecutter' not in ws_config['tenant_id']):
WORKSPACE_ID = ws_config['workspace_id']
TENANT_ID = ws_config['tenant_id']
except:
pass
if not WORKSPACE_ID or not TENANT_ID:
display(Markdown('**Workspace configuration not found.**\n\n'
'Please go to your Log Analytics workspace, copy the workspace ID'
' and/or tenant Id and paste here.<br> '
'Or read the workspace_id from the config.json in your Azure Notebooks project.'))
ws_config = None
ws_id = mas.GetEnvironmentKey(env_var='WORKSPACE_ID',
prompt='Please enter your Log Analytics Workspace Id:', auto_display=True)
ten_id = mas.GetEnvironmentKey(env_var='TENANT_ID',
prompt='Please enter your Log Analytics Tenant Id:', auto_display=True)
```
### Authenticate to Log Analytics
If you are using user/device authentication, run the following cell.
- Click the 'Copy code to clipboard and authenticate' button.
- This will pop up an Azure Active Directory authentication dialog (in a new tab or browser window). The device code will have been copied to the clipboard.
- Select the text box and paste (Ctrl-V/Cmd-V) the copied value.
- You should then be redirected to a user authentication page where you should authenticate with a user account that has permission to query your Log Analytics workspace.
Use the following syntax if you are authenticating using an Azure Active Directory AppId and Secret:
```
%kql loganalytics://tenant(aad_tenant).workspace(WORKSPACE_ID).clientid(client_id).clientsecret(client_secret)
```
instead of
```
%kql loganalytics://code().workspace(WORKSPACE_ID)
```
Note: you may occasionally see a JavaScript error displayed at the end of the authentication - you can safely ignore this.<br>
On successful authentication you should see a ```popup schema``` button.
```
if not WORKSPACE_ID or not TENANT_ID:
try:
WORKSPACE_ID = ws_id.value
TENANT_ID = ten_id.value
except NameError:
raise ValueError('No workspace or Tenant Id.')
mas.kql.load_kql_magic()
%kql loganalytics://code().tenant(TENANT_ID).workspace(WORKSPACE_ID)
```
<a id='getalertslist'></a>[Contents](#toc)
# Get Alerts List
Specify a time range to search for alerts. One this is set run the following cell to retrieve any alerts in that time window.
You can change the time range and re-run the queries until you find the alerts that you want.
```
alert_q_times = mas.QueryTime(units='day', max_before=20, before=5, max_after=1)
alert_q_times.display()
alert_counts = qry.list_alerts_counts(provs=[alert_q_times])
alert_list = qry.list_alerts(provs=[alert_q_times])
print(len(alert_counts), ' distinct alert types')
print(len(alert_list), ' distinct alerts')
display(HTML('<h2>Alert Timeline</h2>'))
nbdisp.display_timeline(data=alert_list, source_columns = ['AlertName', 'CompromisedEntity'], title='Alerts', height=200)
display(HTML('<h2>Top alerts</h2>'))
alert_counts.head(20) # remove '.head(20)'' to see the full list grouped by AlertName
```
<a id='enteralertid'></a>[Contents](#toc)
# Choose Alert to Investigate
Either pick an alert from a list of retrieved alerts or paste the SystemAlertId into the text box in the following section.
### Select alert from list
As you select an alert, the main properties will be shown below the list.
Use the filter box to narrow down your search to any substring in the AlertName.
```
alert_select = mas.SelectAlert(alerts=alert_list, action=nbdisp.display_alert)
alert_select.display()
```
### Or paste in an alert ID and fetch it
**Skip this if you selected from the above list**
```
# Allow alert to be selected
# Allow subscription to be selected
get_alert = mas.GetSingleAlert(action=nbdisp.display_alert)
get_alert.display()
```
<a id='extractalertproperties'></a>[Contents](#toc)
## Extract properties and entities from Alert
This section extracts the alert information and entities into a SecurityAlert object allowing us to query the properties more reliably.
In particular, we use the alert to automatically provide parameters for queries and UI elements.
Subsequent queries will use properties like the host name and derived properties such as the OS family (Linux or Windows) to adapt the query. Query time selectors like the one above will also default to an origin time that matches the alert selected.
The alert view below shows all of the main properties of the alert plus the extended property dictionary (if any) and JSON representations of the Entity.
```
# Extract entities and properties into a SecurityAlert class
if alert_select.selected_alert is None and get_alert.selected_alert is None:
sys.exit("Please select an alert before executing remaining cells.")
if get_alert.selected_alert is not None:
security_alert = mas.SecurityAlert(get_alert.selected_alert)
elif alert_select.selected_alert is not None:
security_alert = mas.SecurityAlert(alert_select.selected_alert)
mas.disp.display_alert(security_alert, show_entities=True)
```
<a id='entitygraph'></a>[Contents](#toc)
## Entity Graph
Depending on the type of alert there may be one or more entities attached as properties. Entities are things like Host, Account, IpAddress, Process, etc. - essentially the 'nouns' of security investigation. Events and alerts are the things that link them in actions so can be thought of as the verbs. Entities are often related to other entities - for example a process will usually have a related file entity (the process image) and an Account entity (the context in which the process was running). Endpoint alerts typically always have a host entity (which could be a physical or virtual machine).
### Plot using Networkx/Matplotlib
```
# Draw the graph using Networkx/Matplotlib
%matplotlib inline
alertentity_graph = mas.create_alert_graph(security_alert)
nbdisp.draw_alert_entity_graph(alertentity_graph, width=15)
```
<a id='related_alerts'></a>[Contents](#toc)
# Related Alerts
For a subset of entities in the alert we can search for any alerts that have that entity in common. Currently this query looks for alerts that share the same Host, Account or Process and lists them below.
**Notes:**
- Some alert types do not include all of these entity types.
- The original alert will be included in the "Related Alerts" set if it occurs within the query time boundary set below.
The query time boundaries default to a longer period than when searching for the alert. You can extend the time boundary searched before or after the alert time. If the widget doesn't support the time boundary that you want you can change the max_before and max_after parameters in the call to QueryTime below to extend the possible time boundaries.
```
# set the origin time to the time of our alert
query_times = mas.QueryTime(units='day', origin_time=security_alert.TimeGenerated,
max_before=28, max_after=1, before=5)
query_times.display()
related_alerts = qry.list_related_alerts(provs=[query_times, security_alert])
if related_alerts is not None and not related_alerts.empty:
host_alert_items = related_alerts\
.query('host_match == @True')[['AlertType', 'StartTimeUtc']]\
.groupby('AlertType').StartTimeUtc.agg('count').to_dict()
acct_alert_items = related_alerts\
.query('acct_match == @True')[['AlertType', 'StartTimeUtc']]\
.groupby('AlertType').StartTimeUtc.agg('count').to_dict()
proc_alert_items = related_alerts\
.query('proc_match == @True')[['AlertType', 'StartTimeUtc']]\
.groupby('AlertType').StartTimeUtc.agg('count').to_dict()
def print_related_alerts(alertDict, entityType, entityName):
if len(alertDict) > 0:
print('Found {} different alert types related to this {} (\'{}\')'
.format(len(alertDict), entityType, entityName))
for (k,v) in alertDict.items():
print(' {}, Count of alerts: {}'.format(k, v))
else:
print('No alerts for {} entity \'{}\''.format(entityType, entityName))
print_related_alerts(host_alert_items, 'host', security_alert.hostname)
print_related_alerts(acct_alert_items, 'account',
security_alert.primary_account.qualified_name
if security_alert.primary_account
else None)
print_related_alerts(proc_alert_items, 'process',
security_alert.primary_process.ProcessFilePath
if security_alert.primary_process
else None)
nbdisp.display_timeline(data=related_alerts, source_columns = ['AlertName'], title='Alerts', height=100)
else:
display(Markdown('No related alerts found.'))
```
### Show these related alerts on a graph
This should indicate which entities the other alerts are related to.
This can be unreadable with a lot of alerts. Use the matplotlib interactive zoom control to zoom in to part of the graph.
```
# Draw a graph of this (add to entity graph)
%matplotlib notebook
%matplotlib inline
if related_alerts is not None and not related_alerts.empty:
rel_alert_graph = mas.add_related_alerts(related_alerts=related_alerts,
alertgraph=alertentity_graph)
nbdisp.draw_alert_entity_graph(rel_alert_graph, width=15)
else:
display(Markdown('No related alerts found.'))
```
### Browse List of Related Alerts
Select an Alert to view details.
If you want to investigate that alert - copy its *SystemAlertId* property and open a new instance of this notebook to investigate this alert.
```
def disp_full_alert(alert):
global related_alert
related_alert = mas.SecurityAlert(alert)
nbdisp.display_alert(related_alert, show_entities=True)
if related_alerts is not None and not related_alerts.empty:
related_alerts['CompromisedEntity'] = related_alerts['Computer']
print('Selected alert is available as \'related_alert\' variable.')
rel_alert_select = mas.SelectAlert(alerts=related_alerts, action=disp_full_alert)
rel_alert_select.display()
else:
display(Markdown('No related alerts found.'))
```
<a id='processtree'></a>[Contents](#toc)
# Get Process Tree
If the alert has a process entity this section tries to retrieve the entire process tree to which that process belongs.
Notes:
- The alert must have a process entity
- Only processes started within the query time boundary will be included
- Ancestor and descented processes are retrieved to two levels (i.e. the parent and grandparent of the alert process plus any child and grandchild processes).
- Sibling processes are the processes that share the same parent as the alert process
- This can be a long-running query, especially if a wide time window is used! Caveat Emptor!
The source (alert) process is shown in red.
What's shown for each process:
- Each process line is indented according to its position in the tree hierarchy
- Top line fields:
- \[relationship to source process:lev# - where # is the hops away from the source process\]
- Process creation date-time (UTC)
- Process Image path
- PID - Process Id
- SubjSess - the session Id of the process spawning the new process
- TargSess - the new session Id if the process is launched in another context/session. If 0/0x0 then the process is launched in the same session as its parent
- Second line fields:
- Process command line
- Account - name of the account context in which the process is running
```
# set the origin time to the time of our alert
query_times = mas.QueryTime(units='minute', origin_time=security_alert.origin_time)
query_times.display()
from msticpy.nbtools.query_defns import DataFamily
if security_alert.data_family != DataFamily.WindowsSecurity:
raise ValueError('The remainder of this notebook currently only supports Windows. '
'Linux support is in development but not yet implemented.')
def extract_missing_pid(security_alert):
for pid_ext_name in ['Process Id', 'Suspicious Process Id']:
pid = security_alert.ExtendedProperties.get(pid_ext_name, None)
if pid:
return pid
def extract_missing_sess_id(security_alert):
sess_id = security_alert.ExtendedProperties.get('Account Session Id', None)
if sess_id:
return sess_id
for session in [e for e in security_alert.entities if
e['Type'] == 'host-logon-session' or e['Type'] == 'hostlogonsession']:
return session['SessionId']
if (security_alert.primary_process):
# Do some patching up if the process entity doesn't have a PID
pid = security_alert.primary_process.ProcessId
if not pid:
pid = extract_missing_pid(security_alert)
if pid:
security_alert.primary_process.ProcessId = pid
else:
raise ValueError('Could not find the process Id for the alert process.')
# Do the same if we can't find the account logon ID
if not security_alert.get_logon_id():
sess_id = extract_missing_sess_id(security_alert)
if sess_id and security_alert.primary_account:
security_alert.primary_account.LogonId = sess_id
else:
raise ValueError('Could not find the session Id for the alert process.')
# run the query
process_tree = qry.get_process_tree(provs=[query_times, security_alert])
if len(process_tree) > 0:
# Print out the text view of the process tree
nbdisp.display_process_tree(process_tree)
else:
display(Markdown('No processes were returned so cannot obtain a process tree.'
'\n\nSkip to [Other Processes](#process_clustering) later in the'
' notebook to retrieve all processes'))
else:
display(Markdown('This alert has no process entity so cannot obtain a process tree.'
'\n\nSkip to [Other Processes](#process_clustering) later in the'
' notebook to retrieve all processes'))
process_tree = None
```
<a id='processtimeline'></a>[Contents](#toc)
## Process TimeLine
This shows each process in the process tree on a timeline view.
Labelling of individual process is very performance intensive and often results in nothing being displayed at all! Besides, for large numbers of processes it would likely result in an unreadable mess.
Your main tools for negotiating the timeline are the Hover tool (toggled on and off by the speech bubble icon) and the wheel-zoom and pan tools (the former is an icon with an elipse and a magnifying glass, the latter is the crossed-arrows icon). The wheel zoom is particularly useful.
As you hover over each process it will display the image name, PID and commandline.
Also shown on the graphic is the timestamp line of the source/alert process.
```
# Show timeline of events
if process_tree is not None and not process_tree.empty:
nbdisp.display_timeline(data=process_tree, alert=security_alert,
title='Alert Process Session', height=250)
```
<a id='process_clustering'></a>[Contents](#toc)
# Other Processes on Host - Clustering
Sometimes you don't have a source process to work with. Other times it's just useful to see what else is going on on the host. This section retrieves all processes on the host within the time bounds
set in the query times widget.
You can display the raw output of this by looking at the *processes_on_host* dataframe. Just copy this into a new cell and hit Ctrl-Enter.
Usually though, the results return a lot of very repetitive and unintersting system processes so we attempt to cluster these to make the view easier to negotiate.
To do this we process the raw event list output to extract a few features that render strings (such as commandline)into numerical values. The default below uses the following features:
- commandLineTokensFull - this is a count of common delimiters in the commandline
(given by this regex r'[\s\-\\/\.,"\'|&:;%$()]'). The aim of this is to capture the commandline structure while ignoring variations on what is essentially the same pattern (e.g. temporary path GUIDs, target IP or host names, etc.)
- pathScore - this sums the ordinal (character) value of each character in the path (so /bin/bash and /bin/bosh would have similar scores).
- isSystemSession - 1 if this is a root/system session, 0 if anything else.
Then we run a clustering algorithm (DBScan in this case) on the process list. The result groups similar (noisy) processes together and leaves unique process patterns as single-member clusters.
### Clustered Processes (i.e. processes that have a cluster size > 1)
```
from msticpy.sectools.eventcluster import dbcluster_events, add_process_features
processes_on_host = qry.list_processes(provs=[query_times, security_alert])
if processes_on_host is not None and not processes_on_host.empty:
feature_procs = add_process_features(input_frame=processes_on_host,
path_separator=security_alert.path_separator)
# you might need to play around with the max_cluster_distance parameter.
# decreasing this gives more clusters.
(clus_events, dbcluster, x_data) = dbcluster_events(data=feature_procs,
cluster_columns=['commandlineTokensFull',
'pathScore',
'isSystemSession'],
max_cluster_distance=0.0001)
print('Number of input events:', len(feature_procs))
print('Number of clustered events:', len(clus_events))
clus_events[['ClusterSize', 'processName']][clus_events['ClusterSize'] > 1].plot.bar(x='processName',
title='Process names with Cluster > 1',
figsize=(12,3));
else:
display(Markdown('Unable to obtain any processes for this host. This feature'
' is currently only supported for Windows hosts.'
'\n\nIf this is a Windows host skip to [Host Logons](#host_logons)'
' later in the notebook to examine logon events.'))
```
### Variability in Command Lines and Process Names
The top chart shows the variability of command line content for a give process name. The wider the box, the more instances were found with different command line structure
Note, the 'structure' in this case is measured by the number of tokens or delimiters in the command line and does not look at content differences. This is done so that commonly varying instances of the same command line are grouped together.<br>
For example `updatepatch host1.mydom.com` and `updatepatch host2.mydom.com` will be grouped together.
The second chart shows the variability in executable path. This does compare content so `c:\windows\system32\net.exe` and `e:\windows\system32\net.exe` are treated as distinct. You would normally not expect to see any variability in this chart unless you have multiple copies of the same name executable or an executable is trying masquerade as another well-known binary.
```
# Looking at the variability of commandlines and process image paths
import seaborn as sns
sns.set(style="darkgrid")
if processes_on_host is not None and not processes_on_host.empty:
proc_plot = sns.catplot(y="processName", x="commandlineTokensFull",
data=feature_procs.sort_values('processName'),
kind='box', height=10)
proc_plot.fig.suptitle('Variability of Commandline Tokens', x=1, y=1)
proc_plot = sns.catplot(y="processName", x="pathLogScore",
data=feature_procs.sort_values('processName'),
kind='box', height=10, hue='isSystemSession')
proc_plot.fig.suptitle('Variability of Path', x=1, y=1);
```
The top graph shows that, for a given process, some have a wide variability in their command line content while the majority have little or none. Looking at a couple of examples - like cmd.exe, powershell.exe, reg.exe, net.exe - we can recognize several common command line tools.
The second graph shows processes by full process path content. We wouldn't normally expect to see variation here - as is the cast with most. There is also quite a lot of variance in the score making it a useful proxy feature for unique path name (this means that proc1.exe and proc2.exe that have the same commandline score won't get collapsed into the same cluster).
Any process with a spread of values here means that we are seeing the same process name (but not necessarily the same file) is being run from different locations.
```
if not clus_events.empty:
resp = input('View the clustered data? y/n')
if resp == 'y':
display(clus_events.sort_values('TimeGenerated')[['TimeGenerated', 'LastEventTime',
'NewProcessName', 'CommandLine',
'ClusterSize', 'commandlineTokensFull',
'pathScore', 'isSystemSession']])
# Look at clusters for individual process names
def view_cluster(exe_name):
display(clus_events[['ClusterSize', 'processName', 'CommandLine', 'ClusterId']][clus_events['processName'] == exe_name])
display(Markdown('You can view the cluster members for individual processes'
'by inserting a new cell and entering:<br>'
'`>>> view_cluster(process_name)`<br></div>'
'where process_name is the unqualified process binary. E.g<br>'
'`>>> view_cluster(\'reg.exe\')`'))
```
### Time showing clustered vs. original data
```
# Show timeline of events - clustered events
if not clus_events.empty:
nbdisp.display_timeline(data=clus_events,
overlay_data=processes_on_host,
alert=security_alert,
title='Distinct Host Processes (top) and All Proceses (bottom)')
```
<a id='cmdlineiocs'></a>[Contents](#toc)
# Base64 Decode and Check for IOCs
This section looks for Indicators of Compromise (IoC) within the data sets passed to it.
The first section looks at the commandline for the alert process (if any). It also looks for base64 encoded strings within the data - this is a common way of hiding attacker intent. It attempts to decode any strings that look like base64. Additionally, if the base64 decode operation returns any items that look like a base64 encoded string or file, a gzipped binary sequence, a zipped or tar archive, it will attempt to extract the contents before searching for potentially interesting items.
```
process = security_alert.primary_process
ioc_extractor = sectools.IoCExtract()
if process:
# if nothing is decoded this just returns the input string unchanged
base64_dec_str, _ = sectools.b64.unpack_items(input_string=process["CommandLine"])
if base64_dec_str and '<decoded' in base64_dec_str:
print('Base64 encoded items found.')
print(base64_dec_str)
# any IoCs in the string?
iocs_found = ioc_extractor.extract(base64_dec_str)
if iocs_found:
print('\nPotential IoCs found in alert process:')
display(iocs_found)
else:
print('Nothing to process')
```
### If we have a process tree, look for IoCs in the whole data set
You can replace the data=process_tree parameter to ioc_extractor.extract() to pass other data frames.
use the columns parameter to specify which column or columns that you want to search.
```
ioc_extractor = sectools.IoCExtract()
try:
if not process_tree.empty:
source_processes = process_tree
else:
source_processes = clus_events
except NameError:
source_processes = None
if source_processes is not None:
ioc_df = ioc_extractor.extract(data=source_processes,
columns=['CommandLine'],
os_family=security_alert.os_family,
ioc_types=['ipv4', 'ipv6', 'dns', 'url',
'md5_hash', 'sha1_hash', 'sha256_hash'])
if len(ioc_df):
display(HTML("<h3>IoC patterns found in process tree.</h3>"))
display(ioc_df)
else:
ioc_df = None
```
### If any Base64 encoded strings, decode and search for IoCs in the results.
For simple strings the Base64 decoded output is straightforward. However for nested encodings this can get a little complex and difficult to represent in a tabular format.
**Columns**
- reference - The index of the row item in dotted notation in depth.seq pairs (e.g. 1.2.2.3 would be the 3 item at depth 3 that is a child of the 2nd item found at depth 1). This may not always be an accurate notation - it is mainly use to allow you to associate an individual row with the reference value contained in the full_decoded_string column of the topmost item).
- original_string - the original string before decoding.
- file_name - filename, if any (only if this is an item in zip or tar file).
- file_type - a guess at the file type (this is currently elementary and only includes a few file types).
- input_bytes - the decoded bytes as a Python bytes string.
- decoded_string - the decoded string if it can be decoded as a UTF-8 or UTF-16 string. Note: binary sequences may often successfully decode as UTF-16 strings but, in these cases, the decodings are meaningless.
- encoding_type - encoding type (UTF-8 or UTF-16) if a decoding was possible, otherwise 'binary'.
- file_hashes - collection of file hashes for any decoded item.
- md5 - md5 hash as a separate column.
- sha1 - sha1 hash as a separate column.
- sha256 - sha256 hash as a separate column.
- printable_bytes - printable version of input_bytes as a string of \xNN values
- src_index - the index of the row in the input dataframe from which the data came.
- full_decoded_string - the full decoded string with any decoded replacements. This is only really useful for top-level items, since nested items will only show the 'full' string representing the child fragment.
```
if source_processes is not None:
dec_df = sectools.b64.unpack_items(data=source_processes, column='CommandLine')
if source_processes is not None and not dec_df.empty:
display(HTML("<h3>Decoded base 64 command lines</h3>"))
display(HTML("Warning - some binary patterns may be decodable as unicode strings"))
display(dec_df[['full_decoded_string', 'original_string', 'decoded_string', 'input_bytes', 'file_hashes']])
ioc_dec_df = ioc_extractor.extract(data=dec_df, columns=['full_decoded_string'])
if len(ioc_dec_df):
display(HTML("<h3>IoC patterns found in base 64 decoded data</h3>"))
display(ioc_dec_df)
if ioc_df is not None:
ioc_df = ioc_df.append(ioc_dec_df ,ignore_index=True)
else:
ioc_df = ioc_dec_df
else:
print("No base64 encodings found.")
ioc_df = None
```
<a id='virustotallookup'></a>[Contents](#toc)
## Virus Total Lookup
This section uses the popular Virus Total service to check any recovered IoCs against VTs database.
To use this you need an API key from virus total, which you can obtain here: https://www.virustotal.com/.
Note that VT throttles requests for free API keys to 4/minute. If you are unable to process the entire data set, try splitting it and submitting smaller chunks.
**Things to note:**
- Virus Total lookups include file hashes, domains, IP addresses and URLs.
- The returned data is slightly different depending on the input type
- The VTLookup class tries to screen input data to prevent pointless lookups. E.g.:
- Only public IP Addresses will be submitted (no loopback, private address space, etc.)
- URLs with only local (unqualified) host parts will not be submitted.
- Domain names that are unqualified will not be submitted.
- Hash-like strings (e.g 'AAAAAAAAAAAAAAAAAA') that do not appear to have enough entropy to be a hash will not be submitted.
**Output Columns**
- Observable - The IoC observable submitted
- IoCType - the IoC type
- Status - the status of the submission request
- ResponseCode - the VT response code
- RawResponse - the entire raw json response
- Resource - VT Resource
- SourceIndex - The index of the Observable in the source DataFrame. You can use this to rejoin to your original data.
- VerboseMsg - VT Verbose Message
- ScanId - VT Scan ID if any
- Permalink - VT Permanent URL describing the resource
- Positives - If this is not zero, it indicates the number of malicious reports that VT holds for this observable.
- MD5 - The MD5 hash, if any
- SHA1 - The MD5 hash, if any
- SHA256 - The MD5 hash, if any
- ResolvedDomains - In the case of IP Addresses, this contains a list of all domains that resolve to this IP address
- ResolvedIPs - In the case Domains, this contains a list of all IP addresses resolved from the domain.
- DetectedUrls - Any malicious URLs associated with the observable.
```
vt_key = mas.GetEnvironmentKey(env_var='VT_API_KEY',
help_str='To obtain an API key sign up here https://www.virustotal.com/',
prompt='Virus Total API key:')
vt_key.display()
if vt_key.value and ioc_df is not None and not ioc_df.empty:
vt_lookup = sectools.VTLookup(vt_key.value, verbosity=2)
print(f'{len(ioc_df)} items in input frame')
supported_counts = {}
for ioc_type in vt_lookup.supported_ioc_types:
supported_counts[ioc_type] = len(ioc_df[ioc_df['IoCType'] == ioc_type])
print('Items in each category to be submitted to VirusTotal')
print('(Note: items have pre-filtering to remove obvious erroneous '
'data and false positives, such as private IPaddresses)')
print(supported_counts)
print('-' * 80)
vt_results = vt_lookup.lookup_iocs(data=ioc_df, type_col='IoCType', src_col='Observable')
pos_vt_results = vt_results.query('Positives > 0')
if len(pos_vt_results) > 0:
display(HTML(f'<h3>{len(pos_vt_results)} Positive Results Found</h3>'))
display(pos_vt_results[['Observable', 'IoCType','Permalink',
'ResolvedDomains', 'ResolvedIPs',
'DetectedUrls', 'RawResponse']])
display(HTML('<h3>Other results</h3>'))
display(vt_results.query('Status == "Success"'))
```
To view the raw response for a specific row.
```
import json
row_idx = 0 # The row number from one of the above dataframes
raw_response = json.loads(pos_vt_results['RawResponse'].loc[row_idx])
raw_response
```
<a id='cmdlineonotherhosts'></a>[Contents](#toc)
# Alert command line - Occurrence on other hosts in workspace
To get a sense of whether the alert process is something that is occuring on other hosts, run this section.
This might tell you that the alerted process is actually a commonly-run process and the alert is a false positive. Alternatively, it may tell you that a real infection or attack is happening on other hosts in your environment.
```
# set the origin time to the time of our alert
query_times = mas.QueryTime(units='day', before=5, max_before=20,
after=1, max_after=10,
origin_time=security_alert.origin_time)
query_times.display()
# API ILLUSTRATION - Find the query to use
qry.list_queries()
# API ILLUSTRATION - What does the query look like?
qry.query_help('list_hosts_matching_commandline')
# This query needs a commandline parameter which isn't supplied
# by default from the the alert
# - so extract and escape this from the process
if not security_alert.primary_process:
raise ValueError('This alert has no process entity. This section is not applicable.')
proc_match_in_ws = None
commandline = security_alert.primary_process.CommandLine
commandline = mas.utility.escape_windows_path(commandline)
if commandline.strip():
proc_match_in_ws = qry.list_hosts_matching_commandline(provs=[query_times, security_alert],
commandline=commandline)
else:
print('process has empty commandline')
# Check the results
if proc_match_in_ws is None or proc_match_in_ws.empty:
print('No proceses with matching commandline found in on other hosts in workspace')
print('between', query_times.start, 'and', query_times.end)
else:
hosts = proc_match_in_ws['Computer'].drop_duplicates().shape[0]
processes = proc_match_in_ws.shape[0]
print('{numprocesses} proceses with matching commandline found on {numhosts} hosts in workspace'\
.format(numprocesses=processes, numhosts=hosts))
print('between', query_times.start, 'and', query_times.end)
print('To examine these execute the dataframe \'{}\' in a new cell'.format('proc_match_in_ws'))
print(proc_match_in_ws[['TimeCreatedUtc','Computer', 'NewProcessName', 'CommandLine']].head())
```
<a id='host_logons'></a>[Contents](#toc)
# Host Logons
This section retrieves the logon events on the host in the alert.
You may want to use the query times to search over a broader range than the default.
```
# set the origin time to the time of our alert
query_times = mas.QueryTime(units='day', origin_time=security_alert.origin_time,
before=1, after=0, max_before=20, max_after=1)
query_times.display()
```
<a id='logonaccount'></a>[Contents](#toc)
## Alert Logon Account
The logon associated with the process in the alert.
```
logon_id = security_alert.get_logon_id()
if logon_id:
if logon_id in ['0x3e7', '0X3E7', '-1', -1]:
print('Cannot retrieve single logon event for system logon id '
'- please continue with All Host Logons below.')
else:
logon_event = qry.get_host_logon(provs=[query_times, security_alert])
nbdisp.display_logon_data(logon_event, security_alert)
else:
print('No account entity in the source alert or the primary account had no logonId value set.')
```
### All Host Logons
Since the number of logon events may be large and, in the case of system logons, very repetitive, we use clustering to try to identity logons with unique characteristics.
In this case we use the numeric score of the account name and the logon type (i.e. interactive, service, etc.). The results of the clustered logons are shown below along with a more detailed, readable printout of the logon event information. The data here will vary depending on whether this is a Windows or Linux host.
```
from msticpy.sectools.eventcluster import dbcluster_events, add_process_features, _string_score
host_logons = qry.list_host_logons(provs=[query_times, security_alert])
if host_logons is not None and not host_logons.empty:
logon_features = host_logons.copy()
logon_features['AccountNum'] = host_logons.apply(lambda x: _string_score(x.Account), axis=1)
logon_features['LogonHour'] = host_logons.apply(lambda x: x.TimeGenerated.hour, axis=1)
# you might need to play around with the max_cluster_distance parameter.
# decreasing this gives more clusters.
(clus_logons, _, _) = dbcluster_events(data=logon_features, time_column='TimeGenerated',
cluster_columns=['AccountNum',
'LogonType'],
max_cluster_distance=0.0001)
print('Number of input events:', len(host_logons))
print('Number of clustered events:', len(clus_logons))
print('\nDistinct host logon patterns:')
display(clus_logons.sort_values('TimeGenerated'))
else:
print('No logon events found for host.')
# Display logon details
nbdisp.display_logon_data(clus_logons, security_alert)
```
### Comparing All Logons with Clustered results relative to Alert time line
```
# Show timeline of events - all logons + clustered logons
if host_logons is not None and not host_logons.empty:
nbdisp.display_timeline(data=host_logons, overlay_data=clus_logons,
alert=security_alert,
source_columns=['Account', 'LogonType'],
title='All Host Logons')
```
### View Process Session and Logon Events in Timelines
This shows the timeline of the clustered logon events with the process tree obtained earlier. This allows you to get a sense of which logon was responsible for the process tree session whether any additional logons (e.g. creating a process as another user) might be associated with the alert timeline.
*Note you should use the pan and zoom tools to align the timelines since the data may be over different time ranges.*
```
# Show timeline of events - all events
if host_logons is not None and not host_logons.empty:
nbdisp.display_timeline(data=clus_logons, source_columns=['Account', 'LogonType'],
alert=security_alert,
title='Clustered Host Logons', height=200)
try:
nbdisp.display_timeline(data=process_tree, alert=security_alert, title='Alert Process Session', height=200)
except NameError:
print('process_tree not available for this alert.')
# Counts of Logon types by Account
if host_logons is not None and not host_logons.empty:
display(host_logons[['Account', 'LogonType', 'TimeGenerated']]
.groupby(['Account','LogonType']).count()
.rename(columns={'TimeGenerated': 'LogonCount'}))
```
<a id='failed logons'></a>[Contents](#toc)
## Failed Logons
```
failedLogons = qry.list_host_logon_failures(provs=[query_times, security_alert])
if failedLogons.shape[0] == 0:
display(print('No logon failures recorded for this host between {security_alert.start} and {security_alert.start}'))
failedLogons
```
<a id='appendices'></a>[Contents](#toc)
# Appendices
## Available DataFrames
```
print('List of current DataFrames in Notebook')
print('-' * 50)
current_vars = list(locals().keys())
for var_name in current_vars:
if isinstance(locals()[var_name], pd.DataFrame) and not var_name.startswith('_'):
print(var_name)
```
## Saving Data to CSV
To save the contents of a pandas DataFrame to an CSV
use the following syntax
```
host_logons.to_csv('host_logons.csv')
```
## Saving Data to Excel
To save the contents of a pandas DataFrame to an Excel spreadsheet
use the following syntax
```
writer = pd.ExcelWriter('myWorksheet.xlsx')
my_data_frame.to_excel(writer,'Sheet1')
writer.save()
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import laser.fresnel_propag as prop
from laser.misc import gauss2D
import laser.zernike as zern
```
# Example 1: Propagation through an optical setup with a hole
## Laser parameters
```
lam = 8e-7 # Wavelength (in m)
k = 2*np.pi/lam # Wave vector
fwhm = 0.07 # Input full-width at half-maximum
# Focusing elements (lengths in m)
f1 = 2.034 # Off-axis parabola
f2 = 0.825 # Spherical mirror
f3 = 30e-3 # Collimating lens
f4 = 0.1 # Imaging lens
# Propagation distances
z1 = f1 + 1.6
z2 = 1.33
z3 = 1.148337292 + f3
z4 = 0.2
z5 = f4
```
## Simulation parameters
```
L1 = 0.2 # Input plane dimension (in m)
L2 = L1*(z1-f1)/f1 # keep beam size to box size ratio constant
L3 = L1*(z1-f1+z2)/f1
L4 = L3*f3/(z3-f3)
L5 = L4
L6 = 10e-4 # Output plane dimension (in m)
N = 501 # Number of transverse points (odd to have a pixel at transverse axis = 0)
# Construct axes
x1 = prop.axis_vect(N)/N*L1 # Input plane transverse axis
x2 = prop.axis_vect(N)/N*L2
x3 = prop.axis_vect(N)/N*L3
x4 = prop.axis_vect(N)/N*L4
x5 = prop.axis_vect(N)/N*L5
x6 = prop.axis_vect(N)/N*L6 # Output plane transverse axis
# Build 2D trasnverse axes
X1, Y1 = np.meshgrid(x1,x1)
X2, Y2 = np.meshgrid(x2,x2)
X3, Y3 = np.meshgrid(x3,x3)
X4, Y4 = np.meshgrid(x4,x4)
# The fresnel_propag module needs a 2D input field,
# to just have a 1D simulation, do "np.meshgrid(x1,0)",
# and the result of "prop.prop_two_steps" below can be flattened
```
## Propagation
```
%%time
# 2 sec with 500 x 500 input array
"""
Change this value to "True" to add a hole in an optic
"""
with_hole = True
# Define input electric field
u1 = gauss2D(X1, Y1, fwhm, fwhm, order=6) # 6th order super-Gaussian beam
# Add perfect lens (focal length f1) to focus the beam
u1 = u1*np.exp( -1j * k / 2 / f1 * ( X1**2 + Y1**2 ) )
# Propagate the input field
u2 = prop.prop_two_steps(u1, L1, -L2, lam, z1) # "-L2" sign because the beam flips through the focus
# Add a hole in the beam
if with_hole:
u2[np.sqrt(X2**2 + Y2**2)<=0.01] = 0
# Propagate again
u3 = prop.prop_two_steps(u2, L2, L3, lam, z2)
# Lens and propagation again
u3 = u3*np.exp( -1j * k / 2 / f2 * ( X3**2 + Y3**2 ) )
u4 = prop.prop_two_steps(u3, L3, -L4, lam, z3)
# And again
u4 = u4*np.exp( -1j * k / 2 / f3 * ( X4**2 + Y4**2 ) )
u5 = prop.prop_two_steps(u4, L4, L5, lam, z4)
# And again
u5 = u5*np.exp( -1j * k / 2 / f4 * ( X4**2 + Y4**2 ) ) # X4 = X5
u6 = prop.prop_two_steps(u5, L5, L6, lam, z5)
```
## Display results (units in mm)
```
fig, ax = plt.subplots(3,2,figsize = (10,15))
ax[0,0].pcolormesh(x1*1e3,x1*1e3,np.abs(u1)**2)
ax[0,0].set_aspect('equal')
ax[0,0].set_title('Input beam')
ax[0,0].set_xlim(-60,60)
ax[0,0].set_ylim(-60,60)
ax[0,1].pcolormesh(x2*1e3,x2*1e3,np.abs(u2)**2)
ax[0,1].set_aspect('equal')
if with_hole:
strtitle = 'with hole'
else:
strtitle = 'without hole'
ax[0,1].set_title('After focus, on optic ' + strtitle)
ax[0,1].set_xlim(-60,60)
ax[0,1].set_ylim(-60,60)
ax[1,0].pcolormesh(x3*1e3,x3*1e3,np.abs(u3)**2)
ax[1,0].set_aspect('equal')
ax[1,0].set_title('Refocusing mirror')
ax[1,0].set_xlim(-60,60)
ax[1,0].set_ylim(-60,60)
ax[1,1].pcolormesh(x4*1e3,x4*1e3,np.abs(u4)**2)
ax[1,1].set_aspect('equal')
ax[1,1].set_title('Collimating lens')
ax[1,1].set_xlim(-2,2)
ax[1,1].set_ylim(-2,2)
ax[2,0].pcolormesh(x5*1e3,x5*1e3,np.abs(u5)**2)
ax[2,0].set_aspect('equal')
ax[2,0].set_title('Imaging lens')
ax[2,0].set_xlim(-2,2)
ax[2,0].set_ylim(-2,2)
"""
Change this value to display in log scale
"""
log_scale = True
i6 = np.abs(u6)**2 / np.max(np.abs(u6)**2)
logtitle = ''
if log_scale:
ax[2,1].pcolormesh(x6*1e3,x6*1e3,np.log10(i6), vmin=-4)
logtitle = ' (log)'
else:
ax[2,1].pcolormesh(x6*1e3,x6*1e3,i6)
ax[2,1].set_aspect('equal')
ax[2,1].set_title('Image of focus disturbed by the hole' + logtitle)
ax[2,1].set_xlim(-0.2,0.2)
ax[2,1].set_ylim(-0.2,0.2);
```
# Example 2: Simulation of z-scan with non-flat wavefront
## Laser parameters
```
lam = 8e-7 # Wavelength (in m)
k = 2*np.pi/lam # Wave vector
fwhm = 0.07 # Input full-width at half-maximum
f = 2 # Focal length (in m)
```
## Simulation parameters
```
L1 = 0.3 # Input plane dimension (in m)
L2 = 5e-4 # Output plane dimension (in m)
Nz = 100 # Number of longitudinal positions
N = 200 # Number of transverse points
x1 = prop.axis_vect(N)/N*L1 # Input plane transverse axis
x2 = prop.axis_vect(N)/N*L2 # Output plane transverse axis
z_scan = f + np.linspace(-1e-2, 1e-2, num=Nz) # Longitudinal axis
X1, Y1 = np.meshgrid(x1,x1) # Build 2D trasnverse axes
```
## Propagation
```
%%time
# 100 x 200 x 200 -> about 2 sec on a standard Windows 7 desktop computer
# Define input electric field
u1 = gauss2D(X1, Y1, fwhm, fwhm, order=6) # 6th order super-Gaussian beam
# Add perfect lens (focal length f) to focus the beam
u1 = u1*np.exp( -1j * k / 2 / f * ( X1**2 + Y1**2 ) )
# Add a bit of wavefront aberration
Rho, Theta = zern.cart2pol(X1/(1.1*fwhm/2), Y1/(1.1*fwhm/2)) # Scale the FWHM by 1.1 to include the wings of the profile
astig0 = zern.zernike_map(Rho, Theta, (2,2)) # 1 lambda RMS amplitude
astig45 = zern.zernike_map(Rho, Theta, (2,-2))
coma0 = zern.zernike_map(Rho, Theta, (3,1))
coma90 = zern.zernike_map(Rho, Theta, (3,-1))
trefoil0 = zern.zernike_map(Rho, Theta, (3,3))
trefoil30 = zern.zernike_map(Rho, Theta, (3,-3))
wavefront = 0.1*coma0 - 0.1*astig45
u1 = u1*np.exp(1j * 2*np.pi * wavefront) # Add to spatial phase
# Initialise output electric field
u2 = np.complex64(np.zeros([Nz,N,N]))
# Propagate the input field for every z
for idx, z in enumerate(z_scan):
u2[idx,:] = prop.prop_two_steps(u1, L1, L2, lam, z)
i2 = np.abs(u2)**2
```
## Display z-scan
```
# Extract slices
i2_hor = np.squeeze(i2[:, np.int(N/2-1), :])
i2_ver = np.squeeze(i2[:, :, np.int(N/2-1)])
i2_focus = np.squeeze(i2[np.int(Nz/2-1), :, :])
# Z-scan
fig, ax = plt.subplots(2,1,figsize=(10,6), sharex=True)
ax[0].pcolormesh((z_scan-f)*1e3, x2*1e6, i2_hor.T)
ax[0].set_ylabel('Horizontal [um]')
ax[0].set_ylim(-100,100)
ax[1].pcolormesh((z_scan-f)*1e3, x2*1e6, i2_ver.T)
ax[1].set_ylabel('Vertical [um]')
ax[1].set_xlabel('Longitudinal position [mm]')
ax[1].set_ylim(-100,100)
fig.tight_layout()
# Focus
wave_plot = np.ma.masked_array(wavefront, mask = Rho>1)
fig2, ax2 = plt.subplots(1,2,figsize=(12,6))
ax2[0].pcolormesh(x1*1e3,x1*1e3,wave_plot)
ax2[0].set_aspect('equal')
ax2[0].set_xlabel('Horizontal [mm]')
ax2[0].set_ylabel('Vertical [mm]')
ax2[0].set_xlim(-50,50)
ax2[0].set_ylim(-50,50)
ax2[0].set_title('Input wavefront')
ax2[1].pcolormesh(x2*1e6,x2*1e6,i2_focus)
ax2[1].set_aspect('equal')
ax2[1].set_xlabel('Horizontal [um]')
ax2[1].set_ylabel('Vertical [um]')
ax2[1].set_xlim(-100,100)
ax2[1].set_ylim(-100,100)
ax2[1].set_title('Focal spot');
```
| github_jupyter |
# T81-558: Applications of Deep Neural Networks
**Module 4: Training a Neural Network**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module Video Material
Main video lecture:
* [Part 4.1: Early Stopping and Feature Vector Encoding](https://www.youtube.com/watch?v=ATuyK_HWZgc&index=12&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN)
* [Part 4.2: Evaluating Classification and Regression Networks](https://www.youtube.com/watch?v=hXkZqGi5mB4&index=13&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN)
* [Part 4.3: Cross-Validation for Neural Networks](https://www.youtube.com/watch?v=SIyMm5DFwQ8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN)
* [Part 4.4: Manual Error Calculation](https://www.youtube.com/watch?v=iMyGyZYE9Lc)
# Helpful Functions
You will see these at the top of every module. These are simply a set of reusable functions that we will make use of. Each of them will be explained as the semester progresses. They are explained in greater detail as the course progresses. Class 4 contains a complete overview of these functions.
```
import base64
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from sklearn import preprocessing
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = f"{name}-{x}"
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1
# at every location where the original column (name) matches each of the target_values. One column is added for
# each target value.
def encode_text_single_dummy(df, name, target_values):
for tv in target_values:
l = list(df[name].astype(str))
l = [1 if str(x) == str(tv) else 0 for x in l]
name2 = f"{name}-{tv}"
df[name2] = l
# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).
def encode_text_index(df, name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# Encode a numeric column as zscores
def encode_numeric_zscore(df, name, mean=None, sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name] - mean) / sd
# Convert all missing values in the specified column to the median
def missing_median(df, name):
med = df[name].median()
df[name] = df[name].fillna(med)
# Convert all missing values in the specified column to the default
def missing_default(df, name, default_value):
df[name] = df[name].fillna(default_value)
# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(
target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = pd.get_dummies(df[target])
return df[result].values.astype(np.float32), dummies.values.astype(np.float32)
# Regression
return df[result].values.astype(np.float32), df[[target]].values.astype(np.float32)
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return f"{h}:{m:>02}:{s:>05.2f}"
# Regression chart.
def chart_regression(pred, y, sort=True):
t = pd.DataFrame({'pred': pred, 'y': y.flatten()})
if sort:
t.sort_values(by=['y'], inplace=True)
plt.plot(t['y'].tolist(), label='expected')
plt.plot(t['pred'].tolist(), label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
# Remove all rows where the specified column is +/- sd standard deviations
def remove_outliers(df, name, sd):
drop_rows = df.index[(np.abs(df[name] - df[name].mean())
>= (sd * df[name].std()))]
df.drop(drop_rows, axis=0, inplace=True)
# Encode a column to a range between normalized_low and normalized_high.
def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1,
data_low=None, data_high=None):
if data_low is None:
data_low = min(df[name])
data_high = max(df[name])
df[name] = ((df[name] - data_low) / (data_high - data_low)) \
* (normalized_high - normalized_low) + normalized_low
# This function submits an assignment. You can submit an assignment as much as you like, only the final
# submission counts. The paramaters are as follows:
# data - Pandas dataframe output.
# key - Your student key that was emailed to you.
# no - The assignment class number, should be 1 through 1.
# source_file - The full path to your Python or IPYNB file. This must have "_class1" as part of its name.
# . The number must match your assignment number. For example "_class2" for class assignment #2.
def submit(data,key,no,source_file=None):
if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.')
if source_file is None: source_file = __file__
suffix = '_class{}'.format(no)
if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix))
with open(source_file, "rb") as image_file:
encoded_python = base64.b64encode(image_file.read()).decode('ascii')
ext = os.path.splitext(source_file)[-1].lower()
if ext not in ['.ipynb','.py']: raise Exception("Source file is {} must be .py or .ipynb".format(ext))
r = requests.post("https://api.heatonresearch.com/assignment-submit",
headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode("ascii"),
'assignment': no, 'ext':ext, 'py':encoded_python})
if r.status_code == 200:
print("Success: {}".format(r.text))
else: print("Failure: {}".format(r.text))
```
# Building the Feature Vector
Neural networks require their input to be a fixed number of columns. This is very similar to spreadsheet data. This input must be completely numeric.
It is important to represent the data in a way that the neural network can train from it. In class 6, we will see even more ways to preprocess data. For now, we will look at several of the most basic ways to transform data for a neural network.
Before we look at specific ways to preprocess data, it is important to consider four basic types of data, as defined by [Stanley Smith Stevens](https://en.wikipedia.org/wiki/Stanley_Smith_Stevens). These are commonly referred to as the [levels of measure](https://en.wikipedia.org/wiki/Level_of_measurement):
* Character Data (strings)
* **Nominal** - Individual discrete items, no order. For example: color, zip code, shape.
* **Ordinal** - Individual discrete items that can be ordered. For example: grade level, job title, Starbucks(tm) coffee size (tall, vente, grande)
* Numeric Data
* **Interval** - Numeric values, no defined start. For example, temperature. You would never say "yesterday was twice as hot as today".
* **Ratio** - Numeric values, clearly defined start. For example, speed. You would say that "The first car is going twice as fast as the second."
The following code contains several useful functions to encode the feature vector for various types of data. Encoding data:
* **encode_text_dummy** - Encode text fields, such as the iris species as a single field for each class. Three classes would become "0,0,1" "0,1,0" and "1,0,0". Encode non-target predictors this way. Good for nominal.
* **encode_text_index** - Encode text fields, such as the iris species as a single numeric field as "0" "1" and "2". Encode the target field for a classification this way. Good for nominal.
* **encode_numeric_zscore** - Encode numeric values as a z-score. Neural networks deal well with "centered" fields, zscore is usually a good starting point for interval/ratio.
*Ordinal values can be encoded as dummy or index. Later we will see a more advanced means of encoding*
Dealing with missing data:
* **missing_median** - Fill all missing values with the median value.
Creating the final feature vector:
* **to_xy** - Once all fields are numeric, this function can provide the x and y matrixes that are used to fit the neural network.
Other utility functions:
* **hms_string** - Print out an elapsed time string.
* **chart_regression** - Display a chart to show how well a regression performs.
# Dealing with Outliers
```
import tensorflow.contrib.learn as skflow
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# create feature vector
missing_median(df, 'horsepower')
df.drop('name',1,inplace=True)
#encode_numeric_binary(df,'mpg',20)
#df['origin'] = df['origin'].astype(str)
#encode_text_tfidf(df, 'origin')
# Drop outliers in horsepower
print("Length before MPG outliers dropped: {}".format(len(df)))
remove_outliers(df,'mpg',2)
print("Length after MPG outliers dropped: {}".format(len(df)))
print(df)
```
## Google API Keys
Sometimes you will use external API's to obtain data. The following examples show how to use the Google API keys to encode addresses for use with neural networks. To use these, you will need your own Google API key. The key I have below is not a real key, you need to put your own in there. Google will ask for a credit card, but unless you use a very large number of lookups, there will be no actual cost. YOU ARE NOT required to get an Google API key for this class, this only shows you how. If you would like to get a Google API key, visit this site and obtain one for **geocode**.
[Google API Keys](https://developers.google.com/maps/documentation/embed/get-api-key)
```
GOOGLE_KEY = 'AIzaSyshdufhsdiuhfuhdfuhduhiuxbUrg'
```
# Other Examples: Dealing with Addresses
Addresses can be difficult to encode into a neural network. There are many different approaches, and you must consider how you can transform the address into something more meaningful. Map coordinates can be a good approach. [Latitude and longitude](https://en.wikipedia.org/wiki/Geographic_coordinate_system) can be a useful encoding. Thanks to the power of the Internet, it is relatively easy to transform an address into its latitude and longitude values. The following code determines the coordinates of [Washington University](https://wustl.edu/):
```
import requests
address = "1 Brookings Dr, St. Louis, MO 63130"
response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?key={}&address={}'.format(GOOGLE_KEY,address))
resp_json_payload = response.json()
if 'error_message' in resp_json_payload:
print(resp_json_payload['error_message'])
else:
print(resp_json_payload['results'][0]['geometry']['location'])
```
If latitude and longitude are simply fed into the neural network as two features, they might not be overly helpful. These two values would allow your neural network to cluster locations on a map. Sometimes cluster locations on a map can be useful. Consider the percentage of the population that smokes in the USA by state:

The above map shows that certain behaviors, like smoking, can be clustered by global region.
However, often you will want to transform the coordinates into distances. It is reasonably easy to estimate the distance between any two points on Earth by using the [great circle distance](https://en.wikipedia.org/wiki/Great-circle_distance) between any two points on a sphere:
The following code implements this formula:
$\Delta\sigma=\arccos\bigl(\sin\phi_1\cdot\sin\phi_2+\cos\phi_1\cdot\cos\phi_2\cdot\cos(\Delta\lambda)\bigr)$
$d = r \, \Delta\sigma$
```
from math import sin, cos, sqrt, atan2, radians
# Distance function
def distance_lat_lng(lat1,lng1,lat2,lng2):
# approximate radius of earth in km
R = 6373.0
# degrees to radians (lat/lon are in degrees)
lat1 = radians(lat1)
lng1 = radians(lng1)
lat2 = radians(lat2)
lng2 = radians(lng2)
dlng = lng2 - lng1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlng / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return R * c
# Find lat lon for address
def lookup_lat_lng(address):
response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?key={}&address={}'.format(GOOGLE_KEY,address))
json = response.json()
if len(json['results']) == 0:
print("Can't find: {}".format(address))
return 0,0
map = json['results'][0]['geometry']['location']
return map['lat'],map['lng']
# Distance between two locations
import requests
address1 = "1 Brookings Dr, St. Louis, MO 63130"
address2 = "3301 College Ave, Fort Lauderdale, FL 33314"
lat1, lng1 = lookup_lat_lng(address1)
lat2, lng2 = lookup_lat_lng(address2)
print("Distance, St. Louis, MO to Ft. Lauderdale, FL: {} km".format(
distance_lat_lng(lat1,lng1,lat2,lng2)))
```
Distances can be useful to encode addresses as. You must consider what distance might be useful for your dataset. Consider:
* Distance to major metropolitan area
* Distance to competitor
* Distance to distribution center
* Distance to retail outlet
The following code calculates the distance between 10 universities and washu:
```
# Encoding other universities by their distance to Washington University
schools = [
["Princeton University, Princeton, NJ 08544", 'Princeton'],
["Massachusetts Hall, Cambridge, MA 02138", 'Harvard'],
["5801 S Ellis Ave, Chicago, IL 60637", 'University of Chicago'],
["Yale, New Haven, CT 06520", 'Yale'],
["116th St & Broadway, New York, NY 10027", 'Columbia University'],
["450 Serra Mall, Stanford, CA 94305", 'Stanford'],
["77 Massachusetts Ave, Cambridge, MA 02139", 'MIT'],
["Duke University, Durham, NC 27708", 'Duke University'],
["University of Pennsylvania, Philadelphia, PA 19104", 'University of Pennsylvania'],
["Johns Hopkins University, Baltimore, MD 21218", 'Johns Hopkins']
]
lat1, lng1 = lookup_lat_lng("1 Brookings Dr, St. Louis, MO 63130")
for address, name in schools:
lat2,lng2 = lookup_lat_lng(address)
dist = distance_lat_lng(lat1,lng1,lat2,lng2)
print("School '{}', distance to wustl is: {}".format(name,dist))
```
# Training with a Validation Set and Early Stopping
**Overfitting** occurs when a neural network is trained to the point that it begins to memorize rather than generalize.

It is important to segment the original dataset into several datasets:
* **Training Set**
* **Validation Set**
* **Holdout Set**
There are several different ways that these sets can be constructed. The following programs demonstrate some of these.
The first method is a training and validation set. The training data are used to train the neural network until the validation set no longer improves. This attempts to stop at a near optimal training point. This method will only give accurate "out of sample" predictions for the validation set, this is usually 20% or so of the data. The predictions for the training data will be overly optimistic, as these were the data that the neural network was trained on.

```
import pandas as pd
import io
import requests
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import EarlyStopping
path = "./data/"
filename = os.path.join(path,"iris.csv")
df = pd.read_csv(filename,na_values=['NA','?'])
species = encode_text_index(df,"species")
x,y = to_xy(df,"species")
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
model = Sequential()
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(5,activation='relu'))
model.add(Dense(y.shape[1],activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x_train, y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=2,epochs=1000)
```
Now that the neural network is trained, we can make predictions about the test set. The following code predicts the type of iris for test set and displays the first five irises.
```
from sklearn import metrics
import tensorflow as tf
pred = model.predict(x_test)
print(pred[0:5]) # print first five predictions
```
These numbers are in scientific notation. Each line provides the probability that the iris is one of the 3 types of iris in the data set. For the first line, the second type of iris has a 91% probability of being the species of iris.
# Early Stopping and the Best Weights
In the previous section we used early stopping so that the training would halt once the validation set no longer saw score improvements for a number of epoch. This number of epochs that early stopping will tolerate no improvement is called *patience*. If the patience value is large, the neural network's error may continue to worsen while early stopping is patiently waiting. At some point earlier in the training the optimal set of weights was obtained for the neural network. However, at the end of training we will have the weights for the neural network that finally exhausted the patience of early stopping. The weights of this neural network might not be bad, but it would be better to have the most optimal weights during the entire training operation.
The code presented below does this. An additional monitor is used and saves a copy of the neural network to **best_weights.hdf5** each time the validation score of the neural network improves. Once training is done, we just reload this file and we have the optimal training weights that were found.
This technique is slight overkill for many of the examples for this class. It can also introduce an occasional issue (as described in the next section). Because of this, most of the examples in this course will not use this code to obtain the absolute best weights. However, for the larger, more complex datasets in this course, we will save the absolute best weights as demonstrated here.
```
import pandas as pd
import io
import requests
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
path = "./data/"
filename = os.path.join(path,"iris.csv")
df = pd.read_csv(filename,na_values=['NA','?'])
species = encode_text_index(df,"species")
x,y = to_xy(df,"species")
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
model = Sequential()
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(5,activation='relu'))
model.add(Dense(y.shape[1],activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(filepath="best_weights.hdf5", verbose=0, save_best_only=True) # save best model
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor,checkpointer],verbose=2,epochs=1000)
model.load_weights('best_weights.hdf5') # load weights from best model
```
# Potential Keras Issue on Small Networks and Early Stopping
You might occasionally see this error:
```
OSError: Unable to create file (Unable to open file: name = 'best_weights.hdf5', errno = 22, error message = 'invalid argument', flags = 13, o_flags = 302)
```
Usually you can just run rerun the code and it goes away. This is an unfortunate result of saving a file each time the validation score improves (as described in the previous section). If the errors improve two rapidly, you might try to save the file twice and get an error from these two saves overlapping. For larger neural networks this will not be a problem because each training step will take longer, allowing for plenty of time for the previous save to complete. Because of this potential issue, this code is not used with every neural network in this course.
# Calculate Classification Accuracy
Accuracy is the number of rows where the neural network correctly predicted the target class. Accuracy is only used for classification, not regression.
$ accuracy = \frac{c}{N} $
Where $c$ is the number correct and $N$ is the size of the evaluated set (training or validation). Higher accuracy numbers are desired.
As we just saw, by default, Keras will return the percent probability for each class. We can change these prediction probabilities into the actual iris predicted with **argmax**.
```
pred = np.argmax(pred,axis=1) # raw probabilities to chosen class (highest probability)
print(pred)
```
Now that we have the actual iris flower predicted, we can calculate the percent accuracy (how many were correctly classified).
```
y_compare = np.argmax(y_test,axis=1)
score = metrics.accuracy_score(y_compare, pred)
print("Accuracy score: {}".format(score))
```
# Calculate Classification Log Loss
Accuracy is like a final exam with no partial credit. However, neural networks can predict a probability of each of the target classes. Neural networks will give high probabilities to predictions that are more likely. Log loss is an error metric that penalizes confidence in wrong answers. Lower log loss values are desired.
The following code shows the output of predict_proba:
```
from IPython.display import display
# Don't display numpy in scientific notation
np.set_printoptions(precision=4)
np.set_printoptions(suppress=True)
# Generate predictions
pred = model.predict(x_test)
print("Numpy array of predictions")
print(pred[0]*100)
print("As percent probability")
display(pred[0:5])
score = metrics.log_loss(y_test, pred)
print("Log loss score: {}".format(score))
```
[Log loss](https://www.kaggle.com/wiki/LogarithmicLoss) is calculated as follows:
$ \text{log loss} = -\frac{1}{N}\sum_{i=1}^N {( {y}_i\log(\hat{y}_i) + (1 - {y}_i)\log(1 - \hat{y}_i))} $
The log function is useful to penalizing wrong answers. The following code demonstrates the utility of the log function:
```
%matplotlib inline
from matplotlib.pyplot import figure, show
from numpy import arange, sin, pi
#t = arange(1e-5, 5.0, 0.00001)
#t = arange(1.0, 5.0, 0.00001) # computer scientists
t = arange(0.0, 1.0, 0.00001) # data scientists
fig = figure(1,figsize=(12, 10))
ax1 = fig.add_subplot(211)
ax1.plot(t, np.log(t))
ax1.grid(True)
ax1.set_ylim((-8, 1.5))
ax1.set_xlim((-0.1, 2))
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('log(x)')
show()
```
# Evaluating Regression Results
Regression results are evaluated differently than classification. Consider the following code that trains a neural network for the [MPG dataset](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/datasets_mpg.ipynb).
```
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
cars = df['name']
df.drop('name',1,inplace=True)
missing_median(df, 'horsepower')
x,y = to_xy(df,"mpg")
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=45)
model = Sequential()
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(10))
model.add(Dense(10))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=2,epochs=1000)
```
### Mean Square Error
The mean square error is the sum of the squared differences between the prediction ($\hat{y}$) and the expected ($y$). MSE values are not of a particular unit. If an MSE value has decreased for a model, that is good. However, beyond this, there is not much more you can determine. Low MSE values are desired.
$ \text{MSE} = \frac{1}{n} \sum_{i=1}^n \left(\hat{y}_i - y_i\right)^2 $
```
# Predict
pred = model.predict(x_test)
# Measure MSE error.
score = metrics.mean_squared_error(pred,y_test)
print("Final score (MSE): {}".format(score))
```
### Root Mean Square Error
The root mean square (RMSE) is essentially the square root of the MSE. Because of this, the RMSE error is in the same units as the training data outcome. Low RMSE values are desired.
$ \text{RMSE} = \sqrt{\frac{1}{n} \sum_{i=1}^n \left(\hat{y}_i - y_i\right)^2} $
```
# Measure RMSE error. RMSE is common for regression.
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Final score (RMSE): {}".format(score))
```
# Training with Cross-Validation
Cross-Validation uses a number of folds, and multiple models, to generate out of sample predictions on the entire dataset. It is important to note that there will be one model (neural network) for each fold. Each model contributes part of the final out-of-sample prediction.

For new data, which is data not present in the training set, predictions from the fold models can be handled in several ways.
* Choose the model that had the highest validation score as the final model.
* Preset new data to the 5 models and average the result (this is an [ensemble](https://en.wikipedia.org/wiki/Ensemble_learning)).
* Retrain a new model (using the same settings as the cross-validation) on the entire dataset. Train for as many epochs, and with the same hidden layer structure.
## Regression with Cross-Validation
The following code trains the MPG dataset using a 5-fold cross-validation. The expected performance of a neural network, of the type trained here, would be the score for the generated out-of-sample predictions.
```
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Activation
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
filename_write = os.path.join(path,"auto-mpg-out-of-sample.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# Shuffle
np.random.seed(42)
df = df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
# Preprocess
cars = df['name']
df.drop('name',1,inplace=True)
missing_median(df, 'horsepower')
# Encode to a 2D matrix for training
x,y = to_xy(df,'mpg')
# Cross-Validate
kf = KFold(5)
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x):
fold+=1
print("Fold #{}".format(fold))
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
model = Sequential()
model.add(Dense(20, input_dim=x.shape[1], activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)
pred = model.predict(x_test)
oos_y.append(y_test)
oos_pred.append(pred)
# Measure this fold's RMSE
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Fold score (RMSE): {}".format(score))
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
score = np.sqrt(metrics.mean_squared_error(oos_pred,oos_y))
print("Final, out of sample score (RMSE): {}".format(score))
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oosDF = pd.concat( [df, oos_y, oos_pred],axis=1 )
oosDF.to_csv(filename_write,index=False)
```
## Classification with Cross-Validation
The following code trains and fits the iris dataset with Cross-Validation. It also writes out the out of sample (predictions on the test set) results.
```
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Activation
path = "./data/"
filename_read = os.path.join(path,"iris.csv")
filename_write = os.path.join(path,"iris-out-of-sample.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# Shuffle
np.random.seed(42)
df = df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
# Encode to a 2D matrix for training
species = encode_text_index(df,"species")
x,y = to_xy(df,"species")
# Cross-validate
kf = KFold(5)
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x):
fold+=1
print("Fold #{}".format(fold))
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu')) # Hidden 1
model.add(Dense(25, activation='relu')) # Hidden 2
model.add(Dense(y.shape[1],activation='softmax')) # Output
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=25, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)
pred = model.predict(x_test)
oos_y.append(y_test)
pred = np.argmax(pred,axis=1) # raw probabilities to chosen class (highest probability)
oos_pred.append(pred)
# Measure this fold's accuracy
y_compare = np.argmax(y_test,axis=1) # For accuracy calculation
score = metrics.accuracy_score(y_compare, pred)
print("Fold score (accuracy): {}".format(score))
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
oos_y_compare = np.argmax(oos_y,axis=1) # For accuracy calculation
score = metrics.accuracy_score(oos_y_compare, oos_pred)
print("Final score (accuracy): {}".format(score))
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oosDF = pd.concat( [df, oos_y, oos_pred],axis=1 )
oosDF.to_csv(filename_write,index=False)
```
# Training with both a Cross-Validation and a Holdout Set
If you have a considerable amount of data, it is always valuable to set aside a holdout set before you cross-validate. This hold out set will be the final evaluation before you make use of your model for its real-world use.

The following program makes use of a holdout set, and then still cross-validates.
```
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
from sklearn.model_selection import KFold
from keras.callbacks import EarlyStopping
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
filename_write = os.path.join(path,"auto-mpg-holdout.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# create feature vector
missing_median(df, 'horsepower')
df.drop('name',1,inplace=True)
encode_text_dummy(df, 'origin')
# Shuffle
np.random.seed(42)
df = df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
# Encode to a 2D matrix for training
x,y = to_xy(df,'mpg')
# Keep a 10% holdout
x_main, x_holdout, y_main, y_holdout = train_test_split(
x, y, test_size=0.10)
# Cross-validate
kf = KFold(5)
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x_main):
fold+=1
print("Fold #{}".format(fold))
x_train = x_main[train]
y_train = y_main[train]
x_test = x_main[test]
y_test = y_main[test]
model = Sequential()
model.add(Dense(20, input_dim=x.shape[1], activation='relu'))
model.add(Dense(5, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)
pred = model.predict(x_test)
oos_y.append(y_test)
oos_pred.append(pred)
# Measure accuracy
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Fold score (RMSE): {}".format(score))
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
score = np.sqrt(metrics.mean_squared_error(oos_pred,oos_y))
print()
print("Cross-validated score (RMSE): {}".format(score))
# Write the cross-validated prediction (from the last neural network)
holdout_pred = model.predict(x_holdout)
score = np.sqrt(metrics.mean_squared_error(holdout_pred,y_holdout))
print("Holdout score (RMSE): {}".format(score))
```
# Scikit-Learn Versions: model_selection vs cross_validation
Scikit-Learn changed a bit in how cross-validation is handled. Both versions still work, but you should use the **sklearn.model_selection** import, rather than **sklearn.cross_validation**. The following shows both the new and old forms of cross-validation. All examples from this class will use the newer form.
The following two sections show both forms:
```
# Note: 2018-12-14: sklearn.cross_validation is now totally gone from sklearn, if the code
# below still works on your system, you REALLY need to update your software. :-)
# Older scikit-learn syntax for splits/cross-validation
# Still valid, but going away. Do not use.
# (Note the red box warning below)
#from sklearn.cross_validation import train_test_split
#from sklearn.cross_validation import KFold
#path = "./data/"
#filename_read = os.path.join(path,"auto-mpg.csv")
#df = pd.read_csv(filename_read,na_values=['NA','?'])
#kf = KFold(len(df), n_folds=5)
#fold = 0
#for train, test in kf:
# fold+=1
# print("Fold #{}: train={}, test={}".format(fold,len(train),len(test)))
# Newer scikit-learn syntax for splits/cross-validation
# Use this method (as shown above)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
kf = KFold(5)
fold = 0
for train, test in kf.split(df):
fold+=1
print("Fold #{}: train={}, test={}".format(fold,len(train),len(test)))
```
# How Kaggle Competitions are Scored
[Kaggle](https://www.kaggle.com/) is a platform for competitive data science. Competitions are posted onto Kaggle by companies seeking the best model for their data. Competing in a Kaggle competition is quite a bit of work, I've [competed in one Kaggle competition](https://www.kaggle.com/jeffheaton).
Kaggle awards "tiers", such as:
* Kaggle Grandmaster
* Kaggle Master
* Kaggle Expert
Your [tier](https://www.kaggle.com/progression) is based on your performance in past competitions.
To compete in Kaggle you simply provide predictions for a dataset that they post. You do not need to submit any code. Your prediction output will place you onto the [leaderboard of a competition](https://www.kaggle.com/c/otto-group-product-classification-challenge/leaderboard/public).

An original dataset is sent to Kaggle by the company. From this dataset, Kaggle posts public data that includes "train" and "test. For the "train" data, the outcomes (y) are provided. For the test data, no outcomes are provided. Your submission file contains your predictions for the "test data". When you submit your results, Kaggle will calculate a score on part of your prediction data. They do not publish want part of the submission data are used for the public and private leaderboard scores (this is a secret to prevent overfitting). While the competition is still running, Kaggle publishes the public leaderboard ranks. Once the competition ends, the private leaderboard is revealed to designate the true winners. Due to overfitting, there is sometimes an upset in positions when the final private leaderboard is revealed.
# Managing Hyperparameters
There are many different settings that you can use for a neural network. These can affect performance. The following code changes some of these, beyond their default values:
* **activation:** relu, sigmoid, tanh
* Layers/Neuron Counts
* **optimizer:** adam, sgd, rmsprop, and [others](https://keras.io/optimizers/)
```
%matplotlib inline
from matplotlib.pyplot import figure, show
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
import tensorflow as tf
path = "./data/"
preprocess = False
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# create feature vector
missing_median(df, 'horsepower')
encode_text_dummy(df, 'origin')
df.drop('name',1,inplace=True)
if preprocess:
encode_numeric_zscore(df, 'horsepower')
encode_numeric_zscore(df, 'weight')
encode_numeric_zscore(df, 'cylinders')
encode_numeric_zscore(df, 'displacement')
encode_numeric_zscore(df, 'acceleration')
# Encode to a 2D matrix for training
x,y = to_xy(df,'mpg')
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.20, random_state=42)
model = Sequential()
model.add(Dense(100, input_dim=x.shape[1], activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(25, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)
# Predict and measure RMSE
pred = model.predict(x_test)
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Score (RMSE): {}".format(score))
```
# Error Calculation from Scratch
We will now look at how to calculate RMSE and logloss by hand. You will need to be able to perform this for the midterm.
## Regression
```
from sklearn import metrics
import numpy as np
predicted = [1.1,1.9,3.4,4.2,4.3]
expected = [1,2,3,4,5]
score_mse = metrics.mean_squared_error(predicted,expected)
score_rmse = np.sqrt(score_mse)
print("Score (MSE): {}".format(score_mse))
print("Score (RMSE): {}".format(score_rmse))
score_mse = ((predicted[0]-expected[0])**2 + (predicted[1]-expected[1])**2
+ (predicted[2]-expected[2])**2 + (predicted[3]-expected[3])**2
+ (predicted[4]-expected[4])**2)/len(predicted)
score_rmse = np.sqrt(score_mse)
print("Score (MSE): {}".format(score_mse))
print("Score (RMSE): {}".format(score_rmse))
```
## Classification
We will now look at how to calculate a logloss by hand. For this we look at a binary prediction. The expected is always 0 or 1. The predicted is some number between 0-1 that indicates the probability true (1). Therefore, a prediction of 1.0 is completely correct if the expected is 1 and completely wrong if the expected is 0.
```
from sklearn import metrics
expected = [1,1,0,0,0]
predicted = [0.9,0.99,0.1,0.05,0.06]
print(metrics.log_loss(expected,predicted))
```
Now we attempt to calculate the same logloss manually.
```
import numpy as np
score_logloss = (np.log(1.0-np.abs(expected[0]-predicted[0]))+\
np.log(1.0-np.abs(expected[1]-predicted[1]))+\
np.log(1.0-np.abs(expected[2]-predicted[2]))+\
np.log(1.0-np.abs(expected[3]-predicted[3]))+\
np.log(1.0-np.abs(expected[4]-predicted[4])))\
*(-1/len(predicted))
print(f'Score Logloss {score_logloss}')
```
# Module 4 Assignment
You can find the first assignment here: [assignment 4](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class1.ipynb)
| github_jupyter |
# Logistic Regression with a Neural Network mindset
Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
**Instructions:**
- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
**You will learn to:**
- Build the general architecture of a learning algorithm, including:
- Initializing parameters
- Calculating the cost function and its gradient
- Using an optimization algorithm (gradient descent)
- Gather all three functions above into a main model function, in the right order.
## <font color='darkblue'>Updates</font>
This notebook has been updated over the past few months. The prior version was named "v5", and the current versionis now named '6a'
#### If you were working on a previous version:
* You can find your prior work by looking in the file directory for the older files (named by version name).
* To view the file directory, click on the "Coursera" icon in the top left corner of this notebook.
* Please copy your work from the older versions to the new version, in order to submit your work for grading.
#### List of Updates
* Forward propagation formula, indexing now starts at 1 instead of 0.
* Optimization function comment now says "print cost every 100 training iterations" instead of "examples".
* Fixed grammar in the comments.
* Y_prediction_test variable name is used consistently.
* Plot's axis label now says "iterations (hundred)" instead of "iterations".
* When testing the model, the test image is normalized by dividing by 255.
## 1 - Packages ##
First, let's run the cell below to import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
```
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
```
## 2 - Overview of the Problem set ##
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
- a test set of m_test images labeled as cat or non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
Let's get more familiar with the dataset. Load the data by running the following code.
```
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
```
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
```
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
train_set_x_orig.shape[1]
#test_set_x_orig.shape
```
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
**Exercise:** Find the values for:
- m_train (number of training examples)
- m_test (number of test examples)
- num_px (= height = width of a training image)
Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
```
### START CODE HERE ### (โ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
```
**Expected Output for m_train, m_test and num_px**:
<table style="width:15%">
<tr>
<td>**m_train**</td>
<td> 209 </td>
</tr>
<tr>
<td>**m_test**</td>
<td> 50 </td>
</tr>
<tr>
<td>**num_px**</td>
<td> 64 </td>
</tr>
</table>
For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
```python
X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
```
```
# Reshape the training and test examples
### START CODE HERE ### (โ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[1]*train_set_x_orig.shape[2]*3,-1)
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[1]*test_set_x_orig.shape[2]*3,-1)
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
```
**Expected Output**:
<table style="width:35%">
<tr>
<td>**train_set_x_flatten shape**</td>
<td> (12288, 209)</td>
</tr>
<tr>
<td>**train_set_y shape**</td>
<td>(1, 209)</td>
</tr>
<tr>
<td>**test_set_x_flatten shape**</td>
<td>(12288, 50)</td>
</tr>
<tr>
<td>**test_set_y shape**</td>
<td>(1, 50)</td>
</tr>
<tr>
<td>**sanity check after reshaping**</td>
<td>[17 31 56 22 33]</td>
</tr>
</table>
To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
Let's standardize our dataset.
```
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
```
<font color='blue'>
**What you need to remember:**
Common steps for pre-processing a new dataset are:
- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)
- "Standardize" the data
## 3 - General Architecture of the learning algorithm ##
It's time to design a simple algorithm to distinguish cat images from non-cat images.
You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
<img src="images/LogReg_kiank.png" style="width:650px;height:400px;">
**Mathematical expression of the algorithm**:
For one example $x^{(i)}$:
$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
The cost is then computed by summing over all training examples:
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
**Key steps**:
In this exercise, you will carry out the following steps:
- Initialize the parameters of the model
- Learn the parameters for the model by minimizing the cost
- Use the learned parameters to make predictions (on the test set)
- Analyse the results and conclude
## 4 - Building the parts of our algorithm ##
The main steps for building a Neural Network are:
1. Define the model structure (such as number of input features)
2. Initialize the model's parameters
3. Loop:
- Calculate current loss (forward propagation)
- Calculate current gradient (backward propagation)
- Update parameters (gradient descent)
You often build 1-3 separately and integrate them into one function we call `model()`.
### 4.1 - Helper functions
**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (โ 1 line of code)
s = 1/(1+ np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
```
**Expected Output**:
<table>
<tr>
<td>**sigmoid([0, 2])**</td>
<td> [ 0.5 0.88079708]</td>
</tr>
</table>
### 4.2 - Initializing parameters
**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
```
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (โ 1 line of code)
w = np.zeros((dim,1),dtype=float)
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
```
**Expected Output**:
<table style="width:15%">
<tr>
<td> ** w ** </td>
<td> [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td> ** b ** </td>
<td> 0 </td>
</tr>
</table>
For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
### 4.3 - Forward and Backward propagation
Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
**Hints**:
Forward Propagation:
- You get X
- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$
- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
Here are the two formulas you will be using:
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
```
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (โ 2 lines of code)
A = sigmoid(np.dot(w.T,X)+b) # compute activation
cost =np.sum(-Y*np.log(A)-(1-Y)*np.log(1-A))/m # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (โ 2 lines of code)
dw = np.dot(X,(A-Y).T)/m
db = np.sum(A-Y)/m
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db
}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td> ** dw ** </td>
<td> [[ 0.99845601]
[ 2.39507239]]</td>
</tr>
<tr>
<td> ** db ** </td>
<td> 0.00145557813678 </td>
</tr>
<tr>
<td> ** cost ** </td>
<td> 5.801545319394553 </td>
</tr>
</table>
### 4.4 - Optimization
- You have initialized your parameters.
- You are also able to compute a cost function and its gradient.
- Now, you want to update the parameters using gradient descent.
**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
```
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (โ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (โ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate*dw
b = b - learning_rate*db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **w** </td>
<td>[[ 0.19033591]
[ 0.12259159]] </td>
</tr>
<tr>
<td> **b** </td>
<td> 1.92535983008 </td>
</tr>
<tr>
<td> **dw** </td>
<td> [[ 0.67752042]
[ 1.41625495]] </td>
</tr>
<tr>
<td> **db** </td>
<td> 0.219194504541 </td>
</tr>
</table>
**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:
1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
```
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (โ 1 line of code)
A = sigmoid(np.dot(w.T,X)+b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (โ 4 lines of code)
if A[0,i] <= 0.5:
Y_prediction[0,i] =0
else:
Y_prediction[0,i] =1
#Y_prediction[0,i] = 1 if A[0, i] > 0.5 else 0
pass
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
```
**Expected Output**:
<table style="width:30%">
<tr>
<td>
**predictions**
</td>
<td>
[[ 1. 1. 0.]]
</td>
</tr>
</table>
<font color='blue'>
**What to remember:**
You've implemented several functions that:
- Initialize (w,b)
- Optimize the loss iteratively to learn parameters (w,b):
- computing the cost and its gradient
- updating the parameters using gradient descent
- Use the learned (w,b) to predict the labels for a given set of examples
## 5 - Merge all functions into a model ##
You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
**Exercise:** Implement the model function. Use the following notation:
- Y_prediction_test for your predictions on the test set
- Y_prediction_train for your predictions on the train set
- w, costs, grads for the outputs of optimize()
```
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (โ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (โ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (โ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
```
Run the following cell to train your model.
```
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **Cost after iteration 0 ** </td>
<td> 0.693147 </td>
</tr>
<tr>
<td> <center> $\vdots$ </center> </td>
<td> <center> $\vdots$ </center> </td>
</tr>
<tr>
<td> **Train Accuracy** </td>
<td> 99.04306220095694 % </td>
</tr>
<tr>
<td>**Test Accuracy** </td>
<td> 70.0 % </td>
</tr>
</table>
**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
```
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
```
Let's also plot the cost function and the gradients.
```
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
```
**Interpretation**:
You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
## 6 - Further analysis (optional/ungraded exercise) ##
Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$.
#### Choice of learning rate ####
**Reminder**:
In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
```
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
```
**Interpretation**:
- Different learning rates give different costs and thus different predictions results.
- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
- In deep learning, we usually recommend that you:
- Choose the learning rate that better minimizes the cost function.
- If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
## 7 - Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
<font color='blue'>
**What to remember from this assignment:**
1. Preprocessing the dataset is important.
2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().
3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course!
Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:
- Play with the learning rate and the number of iterations
- Try different initialization methods and compare the results
- Test other preprocessings (center the data, or divide each row by its standard deviation)
Bibliography:
- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/
- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
| github_jupyter |
```
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
!pip install wget
!pip install nemo_toolkit[tts]
!mkdir configs
!wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/master/examples/tts/configs/tacotron2.yaml
!wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/master/examples/tts/configs/waveglow.yaml
import argparse
import math
import os
import copy
import shutil
import librosa
import matplotlib.pyplot as plt
from functools import partial
from scipy.io.wavfile import write
import numpy as np
import IPython.display as ipd
from ruamel.yaml import YAML
import torch
import nemo
import nemo.collections.asr as nemo_asr
import nemo.collections.tts as nemo_tts
import nemo.utils.argparse as nm_argparse
logging = nemo.logging
# Download config files
config_path = '../configs/tacotron2.yaml'
waveglow_config_path = '../configs/waveglow.yaml'
yaml = YAML(typ="safe")
with open(config_path) as file:
tacotron2_config = yaml.load(file)
labels = tacotron2_config["labels"]
with open(waveglow_config_path) as file:
waveglow_config = yaml.load(file)
```
# Download pre-trained checkpoints
Note: The checkpoint for WaveGlow is very large (>1GB), so please ensure you have sufficient storage space.
```
base_checkpoint_path = './checkpoints/'
WAVEGLOW = os.path.join(base_checkpoint_path, 'WaveGlowNM.pt')
TACOTRON_ENCODER = os.path.join(base_checkpoint_path, 'Tacotron2Encoder.pt')
TACOTRON_DECODER = os.path.join(base_checkpoint_path, 'Tacotron2Decoder.pt')
TACOTRON_POSTNET = os.path.join(base_checkpoint_path, 'Tacotron2Postnet.pt')
TEXT_EMBEDDING = os.path.join(base_checkpoint_path, 'TextEmbedding.pt')
if not os.path.exists(base_checkpoint_path):
os.makedirs(base_checkpoint_path)
if not os.path.exists(WAVEGLOW):
!wget wget https://api.ngc.nvidia.com/v2/models/nvidia/waveglow_ljspeech/versions/2/files/WaveGlowNM.pt -P {base_checkpoint_path};
if not os.path.exists(TACOTRON_ENCODER):
!wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_ljspeech/versions/2/files/Tacotron2Encoder.pt -P {base_checkpoint_path};
if not os.path.exists(TACOTRON_DECODER):
!wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_ljspeech/versions/2/files/Tacotron2Decoder.pt -P {base_checkpoint_path};
if not os.path.exists(TACOTRON_POSTNET):
!wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_ljspeech/versions/2/files/Tacotron2Postnet.pt -P {base_checkpoint_path};
if not os.path.exists(TEXT_EMBEDDING):
!wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_ljspeech/versions/2/files/TextEmbedding.pt -P {base_checkpoint_path};
# Prepare the Neural Factory
neural_factory = nemo.core.NeuralModuleFactory(
optimization_level="O0", backend=nemo.core.Backend.PyTorch
)
```
## Text Line Data Layer
Construct a simple datalayer to load a single line of text (accepted from the user) and pass it to the model
```
from nemo.backends.pytorch import DataLayerNM
from nemo.core.neural_types import *
from nemo.utils.misc import pad_to
from nemo.collections.asr.parts.dataset import TranscriptDataset
class SentenceDataLayer(DataLayerNM):
"""A simple Neural Module for loading textual transcript data.
The path, labels, and eos_id arguments are dataset parameters.
Args:
pad_id (int): Label position of padding symbol
batch_size (int): Size of batches to generate in data loader
drop_last (bool): Whether we drop last (possibly) incomplete batch.
Defaults to False.
num_workers (int): Number of processes to work on data loading (0 for
just main process).
Defaults to 0.
"""
@property
def output_ports(self):
"""Returns definitions of module output ports.
texts:
0: AxisType(BatchTag)
1: AxisType(TimeTag)
texts_length:
0: AxisType(BatchTag)
"""
return {
'texts': NeuralType(('B', 'T'), LabelsType()),
'texts_length': NeuralType(tuple('B'), LengthsType()),
}
def __init__(
self,
path,
labels,
batch_size,
bos_id=None,
eos_id=None,
pad_id=None,
drop_last=False,
num_workers=0,
shuffle=True,
):
super().__init__()
# Set up dataset
self.dataset_params = {
'path': path,
'labels': labels,
'bos_id': bos_id,
'eos_id': eos_id,
}
self._dataset = TranscriptDataset(**self.dataset_params)
# Set up data loader
sampler = None
pad_id = 0 if pad_id is None else pad_id
def update_dataset(self):
self._dataset = TranscriptDataset(**self.dataset_params)
logging.info('Dataset updated.')
def __len__(self):
return len(self._dataset)
@property
def dataset(self):
return self._dataset
@property
def data_iterator(self):
return None
```
# Create the Tacotron 2 + WaveGlow Neural Modules
```
def create_NMs(tacotron2_config, waveglow_config, labels, decoder_infer=False, waveglow_sigma=0.6):
data_preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor(
**tacotron2_config["AudioToMelSpectrogramPreprocessor"]["init_params"]
)
text_embedding_params = copy.deepcopy(tacotron2_config["TextEmbedding"]["init_params"])
text_embedding_params['n_symbols'] = len(labels) + 3
# Load checkpoint for text embedding
text_embedding = nemo_tts.TextEmbedding(**text_embedding_params)
text_embedding.restore_from(TEXT_EMBEDDING)
# Load checkpoint for encoder
t2_enc = nemo_tts.Tacotron2Encoder(**tacotron2_config["Tacotron2Encoder"]["init_params"])
t2_enc.restore_from(TACOTRON_ENCODER)
# Load checkpoint for decoder
decoder_params = copy.deepcopy(tacotron2_config["Tacotron2Decoder"]["init_params"])
t2_dec = nemo_tts.Tacotron2DecoderInfer(**decoder_params)
t2_dec.restore_from(TACOTRON_DECODER)
# Load checkpoint for PortNet
t2_postnet = nemo_tts.Tacotron2Postnet(**tacotron2_config["Tacotron2Postnet"]["init_params"])
t2_postnet.restore_from(TACOTRON_POSTNET)
t2_loss = nemo_tts.Tacotron2Loss(**tacotron2_config["Tacotron2Loss"]["init_params"])
makegatetarget = nemo_tts.MakeGate()
total_weights = text_embedding.num_weights + t2_enc.num_weights + t2_dec.num_weights + t2_postnet.num_weights
logging.info('================================')
logging.info(f"Total number of parameters (Tacotron 2): {total_weights}")
logging.info('================================')
# Load WaveGlow model
waveglow_args = copy.deepcopy(waveglow_config["WaveGlowNM"]["init_params"])
waveglow_args['sigma'] = waveglow_sigma
waveglow = nemo_tts.WaveGlowInferNM(**waveglow_args)
waveglow.restore_from(WAVEGLOW)
total_weights = waveglow.num_weights
logging.info('================================')
logging.info(f"Total number of parameters (WaveGlow): {total_weights}")
logging.info('================================')
return (
data_preprocessor,
text_embedding,
t2_enc,
t2_dec,
t2_postnet,
t2_loss,
makegatetarget,
), waveglow
neural_modules, waveglow = create_NMs(tacotron2_config, waveglow_config, labels, decoder_infer=True, waveglow_sigma=0.6);
```
# Utility functions
```
def update_text(text):
if not os.path.exists('cache/'):
os.makedirs('cache/')
fp = os.path.join('cache', 'input.txt')
with open(fp, 'w', encoding='utf8') as f:
f.write('{}\n'.format(text))
f.flush()
logging.info("Updated input file with value : %s", text)
return fp
def cleanup_cachedir():
if os.path.exists('cache/'):
shutil.rmtree('cache/')
logging.info("Cleaned up cache directory !")
def plot_and_save_spec(spectrogram, i, save_dir=None):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
save_file = f"spec_{i}.png"
if save_dir:
save_file = os.path.join(save_dir, save_file)
plt.savefig(save_file)
plt.close()
```
# Initializing the inference DAG
To initialize the graph, we accept some text from the user. Later, we will accept the actual text that we want to convert to speech !
```
text = input('Please enter some initial text here :')
filepath = update_text(text)
```
## Create inference DAG
```
# Tacotron 2 DAG
(_, text_embedding, t2_enc, t2_dec, t2_postnet, _, _) = neural_modules
data_layer = SentenceDataLayer(
path=filepath,
labels=labels,
batch_size=1,
num_workers=0,
bos_id=len(labels),
eos_id=len(labels) + 1,
pad_id=len(labels) + 2,
shuffle=False,
)
transcript, transcript_len = data_layer()
transcript_embedded = text_embedding(char_phone=transcript)
transcript_encoded = t2_enc(char_phone_embeddings=transcript_embedded, embedding_length=transcript_len,)
mel_decoder, gate, alignments, mel_len = t2_dec(
char_phone_encoded=transcript_encoded, encoded_length=transcript_len,
)
mel_postnet = t2_postnet(mel_input=mel_decoder)
# WaveGlow DAG
audio_pred = waveglow(mel_spectrogram=mel_postnet)
# Setup inference tensors
infer_tensors = [mel_postnet, gate, alignments, mel_len]
```
## Run inference DAG
```
def run_tacotron2():
logging.info("Running Tacotron 2")
# Run tacotron 2
evaluated_tensors = neural_factory.infer(
tensors=infer_tensors, offload_to_cpu=False
)
logging.info("Done Running Tacotron 2")
mel_len_val = evaluated_tensors[-1]
filterbank = librosa.filters.mel(
sr=tacotron2_config["sample_rate"],
n_fft=tacotron2_config["n_fft"],
n_mels=tacotron2_config["n_mels"],
fmax=tacotron2_config["fmax"],
)
return evaluated_tensors, filterbank, mel_len_val
def run_waveglow(save_dir, waveglow_denoiser_strength=0.0):
# Run Tacotron 2 and WaveGlow
evaluated_tensors, filterbank, mel_len_val = run_tacotron2()
logging.info("Running Waveglow")
evaluated_tensors = neural_factory.infer(
tensors=[audio_pred],
)
logging.info("Done Running Waveglow")
if waveglow_denoiser_strength > 0:
logging.info("Setup WaveGlow denoiser")
waveglow.setup_denoiser()
logging.info("Saving results to disk")
for i, batch in enumerate(evaluated_tensors[0]):
audio = batch.cpu().numpy()
for j, sample in enumerate(audio):
sample_len = mel_len_val[i][j] * tacotron2_config["n_stride"]
sample = sample[:sample_len]
save_file = f"sample_{i * 32 + j}.wav"
if save_dir:
save_file = os.path.join(save_dir, save_file)
if waveglow_denoiser_strength > 0:
sample, spec = waveglow.denoise(sample, strength=waveglow_denoiser_strength)
else:
spec, _ = librosa.core.magphase(librosa.core.stft(sample, n_fft=waveglow_config["n_fft"]))
write(save_file, waveglow_config["sample_rate"], sample)
spec = np.dot(filterbank, spec)
spec = np.log(np.clip(spec, a_min=1e-5, a_max=None))
plot_and_save_spec(spec, i * 32 + j, save_dir)
```
# Run Tacotron 2 + WaveGlow on input text
```
text = input('Please enter some initial text here :')
filepath = update_text(text)
data_layer.update_dataset()
```
## Prepare directories to save results
```
savedir = 'results/'
saved_audio = os.path.join(savedir, 'sample_0.wav')
saved_spectrogram = os.path.join(savedir, 'spec_0.png')
if not os.path.exists(savedir):
os.makedirs(savedir)
```
## Generate the audio
Lets run the Tacotron 2 model and send the results to WaveGlow to generate the audio!
```
run_waveglow(savedir, waveglow_denoiser_strength=0.0)
```
## Lets hear the generated audio !
```
ipd.Audio(saved_audio, rate=16000)
ipd.Image(saved_spectrogram)
```
# Cleanup cachedir
```
cleanup_cachedir()
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
%env CUDA_VISIBLE_DEVICES=2
import numpy as np
import pandas as pd
import os
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import umap
from firelight.visualizers.colorization import get_distinct_colors
from matplotlib.colors import ListedColormap
import pickle
from umap.my_utils import acc_kNN, corr_pdist_subsample, \
reproducing_loss_keops, expected_loss_keops, filter_graph, KL_divergence, \
low_dim_sim_keops_dist, compute_low_dim_psim_keops_embd
import torch
import scipy.special
data_path_c_elegans = "../data/packer_c-elegans"
fig_path = "../figures"
seed = 0
repeats = 7
special_cell_type = "Seam_cell"
# load the data
pca100 = pd.read_csv(os.path.join(data_path_c_elegans,
"c-elegans_qc_final.txt"),
sep='\t',
header=None)
pca100.shape
# read meta data, obtain colors and ordering of cells that puts special_cell_type to the front and unlabelled cells to
# the back
meta = pd.read_csv(os.path.join(data_path_c_elegans,
"c-elegans_qc_final_metadata.txt"),
sep=',',
header=0)
cell_types = meta["cell.type"].to_numpy().astype(str)
labels = np.zeros(len(cell_types)).astype(int)
name_to_label = {}
for i, phase in enumerate(np.unique(cell_types)):
name_to_label[phase] = i
labels[cell_types==phase] = i
special_cell_label = name_to_label[special_cell_type]
unnamed_label = name_to_label["nan"]
np.random.seed(seed)
colors = get_distinct_colors(len(name_to_label))
np.random.shuffle(colors)
colors[special_cell_label] = [0,0,0]
colors[unnamed_label] = [0.5, 0.5, 0.5]
cmap = ListedColormap(colors)
special_order1 = np.argsort(labels == special_cell_label, kind="stable") # put idx of special label to the back
special_order2 = np.argsort(labels[special_order1] != unnamed_label, kind="stable") # put idx of unnamed label to the front
special_order = special_order1[special_order2]
special_order_no_nan = special_order[(labels==unnamed_label).sum():]
```
## PCA
```
# load / compute and save 2D PCA
try:
pca2 = np.load(os.path.join(data_path_c_elegans, "PCA2D.np"))
except FileNotFoundError:
pca_projector = PCA(n_components = 2)
pca2 = pca_projector.fit_transform(np.array(pca100))
np.save(os.path.join(data_path_c_elegans, "PCA2D.npy"), pca2)
```
## UMAP
We use the hyperparameter settings of Narayan et al.'s "Assessing single-cell transcriptomic variability through density-preserving data
visualization" paper (https://doi.org/10.1038/s41587-020-00801-7) but with high-dimensional cosine metric
### Log losses after full epoch
```
# can take long, approx repeats * 30 min
umappers_c_elegans_after = []
for repeat in range(repeats):
try:
with open(os.path.join(data_path_c_elegans, f"umapperns_after_seed_{repeat}.pkl"), "rb") as file:
umapper = pickle.load((file))
except FileNotFoundError:
umapper= umap.UMAP(metric="cosine",
n_neighbors=30,
n_epochs=750,
log_losses="after",
log_samples=False,
random_state=repeat,
verbose=True)
_ = umapper.fit_transform(pca100)
with open(os.path.join(data_path_c_elegans, f"umapperns_after_seed_{repeat}.pkl"), "wb") as file:
pickle.dump(umapper, file, pickle.HIGHEST_PROTOCOL)
umappers_c_elegans_after.append(umapper)
print(f"done with run {repeat}")
# inverted high dim similarities
# get inverted similarities
inv_graphs = []
assert "umappers_c_elegans_after" in locals()
for umapper in umappers_c_elegans_after:
inv_graph = umapper.graph_.copy()
inv_graph.data[inv_graph.data < inv_graph.data.max() / float(750)] = 0
inv_graph.eliminate_zeros()
inv_graph.data = inv_graph.data.min() / inv_graph.data
inv_graphs.append(inv_graph)
umappers_c_elegans_inv_after = []
for i, repeat in enumerate(range(repeats)):
try:
with open(os.path.join(data_path_c_elegans, f"umapperns_inv_seed_{repeat}.pkl"), "rb") as file:
umapper = pickle.load((file))
except FileNotFoundError:
umapper= umap.UMAP(metric="cosine",
n_neighbors=30,
n_epochs=750,
graph=inv_graphs[i],
log_samples=False,
log_loses="after",
random_state=repeat,
verbose=True)
_ = umapper.fit_transform(pca100)
with open(os.path.join(data_path_c_elegans, f"umapperns_inv_seed_{repeat}.pkl"), "wb") as file:
pickle.dump(umapper, file, pickle.HIGHEST_PROTOCOL)
umappers_c_elegans_inv_after.append(umapper)
print(f"done with run {repeat}")
#### Quality measures of the embedding
# correlation measures
sample_size = 10000
pear_rs = []
spear_rs = []
pear_rs_inv = []
spear_rs_inv = []
pear_rs_pca = []
spear_rs_pca = []
for i in range(repeats):
pear_r, spear_r = corr_pdist_subsample(pca100.to_numpy(),
umappers_c_elegans_after[i].embedding_,
sample_size,
seed=i,
metric="cosine")
pear_rs.append(pear_r)
spear_rs.append(spear_r)
pear_r_inv, spear_r_inv = corr_pdist_subsample(pca100.to_numpy(),
umappers_c_elegans_inv_after[i].embedding_,
sample_size,
seed=i,
metric="cosine")
pear_rs_inv.append(pear_r_inv)
spear_rs_inv.append(spear_r_inv)
pear_r_pca, spear_r_pca = corr_pdist_subsample(pca100.to_numpy(),
pca2,
sample_size,
seed=i,
metric="cosine")
pear_rs_pca.append(pear_r_pca)
spear_rs_pca.append(spear_r_pca)
print(f"Done with run {i}")
pear_rs = np.stack(pear_rs)
spear_rs = np.stack(spear_rs)
pear_rs_inv = np.stack(pear_rs_inv)
spear_rs_inv = np.stack(spear_rs_inv)
pear_rs_pca = np.stack(pear_rs_pca)
spear_rs_pca = np.stack(spear_rs_pca)
print(f"Pearson UMAP mean: {pear_rs.mean()}")
print(f"Pearson UMAP std: {pear_rs.std()}")
print("\n")
print(f"Spearman UMAP mean: {spear_rs.mean()}")
print(f"Spearman UMAP std: {spear_rs.std()}")
print("\n\n")
print(f"Pearson UMAP inv mean: {pear_rs_inv.mean()}")
print(f"Pearson UMAP inv std: {pear_rs_inv.std()}")
print("\n")
print(f"Spearman UMAP inv mean: {spear_rs_inv.mean()}")
print(f"Spearman UMAP inv std: {spear_rs_inv.std()}")
print("\n\n")
print(f"Pearson PCA mean: {pear_rs_pca.mean()}")
print(f"Pearson PCA std: {pear_rs_pca.std()}")
print("\n")
print(f"Spearman PCA mean: {spear_rs_pca.mean()}")
print(f"Spearman PCA std: {spear_rs_pca.std()}")
# kNN based measure
acc10 = []
acc10_inv = []
acc10_pca = []
acc30 = []
acc30_inv = []
acc30_pca = []
for i in range(repeats):
# k=10
acc10.append(acc_kNN(pca100.to_numpy(),
umappers_c_elegans_after[i].embedding_,
k=10,
metric="cosine"))
acc10_inv.append(acc_kNN(pca100.to_numpy(),
umappers_c_elegans_inv_after[i].embedding_,
k=10,
metric="cosine"))
acc10_pca.append(acc_kNN(pca100.to_numpy(),
pca2, k=10,
metric="cosine"))
# k=30
acc30.append(acc_kNN(pca100.to_numpy(),
umappers_c_elegans_after[i].embedding_,
k=30,
metric="cosine"))
acc30_inv.append(acc_kNN(pca100.to_numpy(),
umappers_c_elegans_inv_after[i].embedding_,
k=30,
metric="cosine"))
acc30_pca.append(acc_kNN(pca100.to_numpy(),
pca2, k=30,
metric="cosine"))
acc10 = np.stack(acc10)
acc10_inv = np.stack(acc10_inv)
acc10_pca = np.stack(acc10_pca)
acc30 = np.stack(acc30)
acc30_inv = np.stack(acc30_inv)
acc30_pca = np.stack(acc30_pca)
print(f"10-NN accuracy UMAP mean: {acc10.mean()}")
print(f"10-NN accuracy UMAP std: {acc10.std()}\n")
print(f"10-NN accuracy UMAP inv mean: {acc10_inv.mean()}")
print(f"10-NN accuracy UMAP inv std: {acc10_inv.std()}\n")
print(f"10-NN accuracy PCA mean: {acc10_pca.mean()}")
print(f"10-NN accuracy PCA std: {acc10_pca.std()}\n")
print(f"30-NN accuracy UMAP mean: {acc30.mean()}")
print(f"30-NN accuracy UMAP std: {acc30.std()}\n")
print(f"30-NN accuracy UMAP inv mean: {acc30_inv.mean()}")
print(f"30-NN accuracy UMAP inv std: {acc30_inv.std()}\n")
print(f"30-NN accuracy PCA mean: {acc30_pca.mean()}")
print(f"30-NN accuracy PCA std: {acc30_pca.std()}\n")
# various loss values:
min_dist = 0.1
spread = 1.0
a, b= umap.umap_.find_ab_params(spread=spread, min_dist=min_dist)
def get_losses(embd, graph, a,b, negative_sample_rate=5):
loss_a_reprod, \
loss_r_reprod = reproducing_loss_keops(high_sim=graph.tocoo(),
embedding=embd,
a=a,
b=b)
loss_total_reprod = loss_a_reprod + loss_r_reprod
loss_a_exp, \
loss_r_exp = expected_loss_keops(high_sim=graph.tocoo(),
embedding=embd,
a=a,
b=b,
negative_sample_rate=negative_sample_rate,
push_tail=True)
loss_total_exp = loss_a_exp + loss_r_exp
KL_div_norm_pos = KL_divergence(high_sim=graph.tocoo(),
a=a,
b=b,
embedding=embd
)
KL_div = KL_divergence(high_sim=graph.tocoo(),
a=a,
b=b,
embedding=embd,
norm_over_pos=False
)
return loss_total_reprod, loss_total_exp, KL_div_norm_pos, KL_div
loss_reprod, loss_exp, KL_div_pos, KL_div, \
loss_reprod_inv, loss_exp_inv, KL_div_pos_inv, KL_div_inv, \
loss_reprod_pca, loss_exp_pca, KL_div_pos_pca, KL_div_pca = [], [], [], [], [], [], [], [], [], [], [], []
for i in range(repeats):
graph = filter_graph(umappers_c_elegans_after[i].graph_,
umappers_c_elegans_after[i].n_epochs)
l_reprod,\
l_exp,\
kl_div_pos,\
kl_div = get_losses(umappers_c_elegans_after[i].embedding_,
graph,
a,
b)
loss_reprod.append(l_reprod)
loss_exp.append(l_exp)
KL_div_pos.append(kl_div_pos)
KL_div.append(kl_div)
l_reprod_inv, \
l_exp_inv, \
kl_div_pos_inv,\
kl_div_inv = get_losses(umappers_c_elegans_inv_after[i].embedding_,
graph,
a,
b)
loss_reprod_inv.append(l_reprod_inv)
loss_exp_inv.append(l_exp_inv)
KL_div_pos_inv.append(kl_div_pos_inv)
KL_div_inv.append(kl_div_inv)
l_reprod_pca,\
l_exp_pca, \
kl_div_pos_pca, \
kl_div_pca = get_losses(pca2.astype(np.single),
graph,
a,
b)
loss_reprod_pca.append(l_reprod_pca)
loss_exp_pca.append(l_exp_pca)
KL_div_pos_pca.append(kl_div_pos_pca)
KL_div_pca.append(kl_div_pca)
loss_reprod = np.stack(loss_reprod)
loss_exp = np.stack(loss_exp)
KL_div_pos = np.stack(KL_div_pos)
KL_div = np.stack(KL_div)
loss_reprod_inv = np.stack(loss_reprod_inv)
loss_exp_inv = np.stack(loss_exp_inv)
KL_div_pos_inv = np.stack(KL_div_pos_inv)
KL_div_inv = np.stack(KL_div_inv)
loss_reprod_pca = np.stack(loss_reprod_pca)
loss_exp_pca = np.stack(loss_exp_pca)
KL_div_pos_pca = np.stack(KL_div_pos_pca)
KL_div_pca = np.stack(KL_div_pca)
print("UMAP")
print(f"Original UMAP loss mean: {loss_reprod.mean(): .2E}")
print(f"Original UMAP loss std: {loss_reprod.std(): .2E}\n")
print(f"True UMAP loss mean: {loss_exp.mean(): .2E}")
print(f"True UMAP loss std: {loss_exp.std(): .2E}\n")
print(f"KL divergence pos mean: {KL_div_pos.mean()}")
print(f"KL divergence pos std: {KL_div_pos.std()}\n")
print(f"KL divergence mean: {KL_div.mean()}")
print(f"KL divergence std: {KL_div.std()}\n")
print("\n")
print("UMAP inv ")
print(f"Original UMAP loss mean: {loss_reprod_inv.mean(): .2E}")
print(f"Original UMAP loss std: {loss_reprod_inv.std(): .2E}\n")
print(f"True UMAP loss mean: {loss_exp_inv.mean(): .2E}")
print(f"True UMAP loss std: {loss_exp_inv.std(): .2E}\n")
print(f"KL divergence pos mean: {KL_div_pos_inv.mean()}")
print(f"KL divergence pos std: {KL_div_pos_inv.std()}\n")
print(f"KL divergence mean: {KL_div_inv.mean()}")
print(f"KL divergence std: {KL_div_inv.std()}\n")
print("\n")
print("PCA")
print(f"Original UMAP loss mean: {loss_reprod_pca.mean(): .2E}")
print(f"Original UMAP loss std: {loss_reprod_pca.std(): .2E}\n")
print(f"True UMAP loss mean: {loss_exp_pca.mean(): .2E}")
print(f"True UMAP loss std: {loss_exp_pca.std(): .2E}\n")
print(f"KL divergence pos mean: {KL_div_pos_pca.mean()}")
print(f"KL divergence pos std: {KL_div_pos_pca.std()}\n")
print(f"KL divergence mean: {KL_div_pca.mean()}")
print(f"KL divergence std: {KL_div_pca.std()}\n")
def get_low_sim_pos_edges(high_sim, embd, a, b):
heads = high_sim.row
tails = high_sim.col
sq_dist_pos_edges = ((embd[heads]-embd[tails])**2).sum(-1)
low_sim_pos_edges = low_dim_sim_keops_dist(sq_dist_pos_edges, a, b, squared=True)
return low_sim_pos_edges
# sanity check KL pos and normal KL differ only be normalization of embedding sims
for i in range(repeats):
graph = filter_graph(umappers_c_elegans_after[i].graph_,
umappers_c_elegans_after[i].n_epochs)
# normalization by embedding sims that have positive input sim
Z_pos = get_low_sim_pos_edges(graph.tocoo(),
umappers_c_elegans_after[i].embedding_,
a,
b).sum()
# normalization by all pairs of embedding sims
Z = compute_low_dim_psim_keops_embd(umappers_c_elegans_after[i].embedding_,
a,
b).sum(1).cpu().numpy().sum()
assert np.abs((KL_div_pos[i] - np.log(Z_pos)
- (KL_div[i] - np.log(Z)))) < 0.0006
```
| github_jupyter |
# CROP Arima model
This notebook checks outputs of the Arima model
```
#!pip3 install psycopg2
#!pip3 install plotly
import os
from datetime import datetime, timedelta
import psycopg2
import pandas as pd
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
```
Parameters
```
crop_host = "cropapptestsqlserver.postgres.database.azure.com"
crop_port = "5432"
crop_dbname = "app_db"
crop_user = "cropdbadmin@cropapptestsqlserver"
crop_password = "QhXZ7qZddDr224Mc2P4k"
conn = psycopg2.connect(
host=crop_host, port=crop_port, dbname=crop_dbname,
user=crop_user, password=crop_password)
cur = conn.cursor()
```
### Arima Data Access
All Arima data for 3 sensors
```
#dt_to = datetime.now()
dt_to = datetime(2021, 4, 27)
dt_from = dt_to + timedelta(days=-15)
#sql_command = """SELECT * FROM model_run"""
sql_command = """SELECT model.id, model.model_name, model_run.sensor_id, model_run.time_forecast, model_measure.measure_name, model_product.run_id, model_value.prediction_index,model_value.prediction_value
FROM model, model_run, model_measure, model_product, model_value WHERE model.id =1 AND model_run.model_id = model.id
AND model_product.run_id = model_run.id AND model_product.measure_id = model_measure.id AND model_value.product_id = model_product.id
AND model_run.time_forecast >= '%s' AND model_run.time_forecast < '%s'""" % (dt_from, dt_to)
arima_raw = pd.read_sql(sql_command, conn)
arima = arima_raw.drop_duplicates()
arima
```
## Calculate and add column with prediciton time
```
prediction_hours= []
for i in range(len(arima)):
prediction_hours.append(arima['time_forecast'][i] + timedelta(hours= int(arima['prediction_index'][i])))
arima['prediction_time'] = prediction_hours
arima
#test = arima[arima['run_id'] == 86]
#unique_run_ids = arima['run_id'].unique()
unique_sensors = arima['sensor_id'].unique()
unique_time_forecast = arima['time_forecast'].unique()
unique_measures = arima['measure_name'].unique()
unique_time_forecast
date_time = pd.to_datetime(unique_time_forecast[0])
date_time
#type(date_time)
#type(datetime(2021, 6, 16))
```
### Get Temperature data from Zensie Sensors
All Zensie sensors
```
dt_to_z = date_time + timedelta(days=+3) #datetime(2021, 6, 16)
dt_from_z = dt_to_z + timedelta(days=-5)
sql_command_zensie = """SELECT sensors.name, zensie_trh_data.* FROM sensor_types, sensors, zensie_trh_data WHERE sensors.id = zensie_trh_data.sensor_id AND zensie_trh_data.timestamp >= '%s' AND zensie_trh_data.timestamp < '%s'""" % (dt_from_z, dt_to_z)
zensie_raw = pd.read_sql(sql_command_zensie, conn)
zensie_df = zensie_raw.drop_duplicates()
zensie_df
```
## Visualisation and parameters
```
time_forecast_id= 0
time_forecast = unique_time_forecast[time_forecast_id]
color = ["red", 'white', 'white']
for j in range (len(unique_sensors)):
# set up plotly figure
fig = go.Figure()
arima_df = arima[(arima['sensor_id'] == unique_sensors[j]) & (arima['time_forecast'] == time_forecast)]
# add line / trace 1 to figure
for i in range(len (unique_measures)):
trace_sensor = arima_df[arima_df['measure_name'] == unique_measures[i]]
fig.add_trace(go.Scatter(
x=trace_sensor['prediction_time'],
y=trace_sensor['prediction_value'],
#hovertext=df['A_info'],
#hoverinfo="text",
name=unique_measures[i],#unique_measures[j],
marker=dict(
color=color[i],
),
showlegend=True
))
# add line / trace 2 to figure
zensie_df_s = zensie_df[zensie_df['sensor_id'] == unique_sensors[j]]
fig.add_trace(go.Scatter(
x=zensie_df_s['timestamp'],
y=zensie_df_s['temperature'],
name='zensie',
marker=dict(
color="green"
),
showlegend=False
))
fig.update_layout(
title="Arima and Zensie data: Sensor id %s" % unique_sensors[j] ,
xaxis_title="Date",
yaxis_title="Temperature")
fig.show()
customAnnotations(df=trace_sensor, xStart = '2020-08-04', xEnd = '2020-08-06', yVal='regression_sales')
```
## Calculate Square root mean error
```
import sklearn
from sklearn.metrics import mean_squared_error
import bisect
unique_measures
arima['prediction_time']
zensie_f = zensie_df[['timestamp','temperature']]
f= zensie_f.groupby(by=[zensie_f.timestamp.map(lambda x: "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour)), "temperature"]).sum()
f
myPairs = list(zensie_f.iteritems())
myPairs.sort()
import bisect
i = bisect.bisect_left(myPairs, arima['prediction_time'])
#i = bisect.bisect_left(myPairs, arima['prediction_time'][0])
actual = zensie_df['temperature']
predicted = arima_df [(arima['measure_name'] == unique_measures[0])]
mse = sklearn.metrics.mean_squared_error(actual, predicted)
rmse = math.sqrt(mse)
```
| github_jupyter |
# The Egg data object
This tutorial will go over the basics of the `Egg` data object, the essential quail data structure that contains all the data you need to run analyses and plot the results. An egg is made up of two primary pieces of data:
1. `pres` data - words/stimuli that were presented to a subject
2. `rec` data - words/stimuli that were recalled by the subject.
You cannot create an `egg` without both of these components. Additionally, there are a few optional fields:
1. `features` data - features that describe each of the stimuli (for example, the category of the word, word length, etc.). This field is required for fingerprint analysis (see the fingerprint tutorial).
2. `dist_funcs` dictionary - this field allows you to control the distance functions for each of the stimulus features. For more on this, see the fingerprint tutorial.
3. `meta` dictionary - this is an optional field that allows you to store custom meta data about the dataset, such as the date collected, experiment version etc.
There are also a few other fields and functions to make organizing and modifying `eggs` easier (discussed at the bottom). Now, lets dive in and create an `egg` from scratch.
## Load in the library
```
import quail
```
## The `pres` data structure
The first piece of an `egg` is the `pres` data, or in other words the words/stimuli that were presented to the subject. For a single subject's data, the form of the input will be a list of lists, where each list is comprised of the words presented to the subject during a particular study block. Let's create a fake dataset of one subject who saw two encoding lists:
```
presented_words = [['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]
```
## The `rec` data structure
The second fundamental component of an egg is the `rec` data, or the words/stimuli that were recalled by the subject. Now, let's create the recall lists:
```
recalled_words = [['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']]
```
We now have the two components necessary to build an `egg`, so let's do that and then take a look at the result.
```
egg = quail.Egg(pres=presented_words, rec=recalled_words)
```
That's it! We've created our first `egg`. Let's take a closer look at how the `egg` is setup. We can use the `info` method to get a quick snapshot of the `egg`:
```
egg.info()
```
Now, let's take a closer look at how the `egg` is structured. First, we will check out the `pres` field:
```
egg.pres
```
As you can see above, the `pres` field was turned into a multi-index Pandas DataFrame organized by subject and by list. This is how the `pres` data is stored within an egg, which will make more sense when we consider larger datasets with more subjects. Next, let's take a look at the `rec` data:
```
egg.rec
```
The `rec` data is also stored as a DataFrame. Notice that if the number of recalled words is shorter than the number of presented words (as it typically will be), those columns are filled with a `None` value. Now, let's create an `egg` with two subject's data and take a look at the result.
## Multisubject `eggs`
```
# presented words
sub1_presented=[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]
sub2_presented=[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]
# recalled words
sub1_recalled=[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']]
sub2_recalled=[['cat', 'goat', 'bat', 'hat'],['horse', 'zebra', 'zoo', 'animal']]
# combine subject data
presented_words = [sub1_presented, sub2_presented]
recalled_words = [sub1_recalled, sub2_recalled]
# create Egg
multisubject_egg = quail.Egg(pres=presented_words,rec=recalled_words)
```
As you can see above, in order to create an `egg` with more than one subject's data, all you do is create a list of subjects. Let's see how the `pres` data is organized in the egg with more than one subject:
```
multisubject_egg.pres
```
Looks identical to the single subject data, but now we have two unique subject identifiers in the `DataFrame`. The `rec` data is set up in the same way:
```
multisubject_egg.rec
```
As you add more subjects, they are simply appended to the bottom of the df with a unique subject identifier.
## The `features` data structure
The `features` data structure is an optional field that can be added to an egg. It contains features of the presented words that are required for the fingerprint analysis. The `features` data is set up like the `pres` and `rec` data, but instead of a single word, there is a dictionary of features. For example take the word "cat". A `features` dictionary for this word might look something like this:
```
cat_features = {
'category' : 'animal',
'word_length' : 3,
'starting_letter' : 'c',
}
```
You can include any stimulus feature you want in this dictionary, such as the position of the word on the screen, the color, or perhaps the font of the word. To create the `features` data structure in an `egg`, use the format of the `pres` structure, but replace the words with dictionaries:
```
# presented words
presented_words=[['cat', 'bat', 'hat', 'goat'],['zoo', 'donkey', 'zebra', 'horse']]
# presentation features
presented_words_features = [
[
{
'category' : 'animal',
'word_length' : 3,
'starting_letter' : 'c'
},
{
'category' : 'object',
'word_length' : 3,
'starting_letter' : 'b'
},
{
'category' : 'object',
'word_length' : 3,
'starting_letter' : 'h'
},
{
'category' : 'animal',
'word_length' : 4,
'starting_letter' : 'g'
},
],
[
{
'category' : 'place',
'word_length' : 3,
'starting_letter' : 'z'
},
{
'category' : 'animal',
'word_length' : 6,
'starting_letter' : 'd'
},
{
'category' : 'animal',
'word_length' : 5,
'starting_letter' : 'z'
},
{
'category' : 'animal',
'word_length' : 5,
'starting_letter' : 'h'
},
],
]
# recalled words
recalled_words=[['bat', 'cat', 'goat', 'hat'],['donkey', 'horse', 'zoo']]
```
Then, simply pass the features to the `Egg` class using the `features` key word argument:
```
# create egg object
egg = quail.Egg(pres=presented_words, rec=recalled_words, features=presented_words_features)
egg.features
```
## Defining custom distance functions for the stimulus feature dimensions
As described in the fingerprint tutorial, the `features` data structure is used to estimate how subjects cluster their recall responses with respect to the features of the encoded stimuli. Briefly, these estimates are derived by computing the similarity of neighboring recall words along each feature dimension. For example, if you recall "dog", and then the next word you recall is "cat", your clustering by category score would increase because the two recalled words are in the same category. Similarly, if after you recall "cat" you recall the word "can", your clustering by starting letter score would increase, since both words share the first letter "c". This logic can be extended to any number of feature dimensions.
Similarity between the words can be computed in a number of ways. By default, the distance function for all textual features (like category, starting letter) is binary. In other words, if the words are in the same category (cat, dog), there similarity would be 1, whereas if they are in different categories (cat, can) their similarity would be 0. For numerical features (such as word length), by default similarity between words is computed using Euclidean distance. However, the point of this digression is that you can define your own distance functions by passing a `dist_func` dictionary to the `Egg` class. This could be for all feature dimensions, or only a subset. Let's see an example:
```
dist_funcs = {
'word_length' : lambda x,y: (x-y)**2
}
egg = quail.Egg(pres=presented_words, rec=recalled_words, features=presented_words_features, dist_funcs=dist_funcs)
```
In the example code above, similarity between words for the word_length feature dimension will now be computed using this custom distance function, while all other feature dimensions will be set to the default.
## Adding meta data to an `egg`
Lastly, we can add meta data to the `egg`. We added this field to help researchers keep their eggs organized by adding custom meta data to the `egg` object. The data is added to the `egg` by passing the `meta` key word argument when creating the `egg`:
```
meta = {
'Researcher' : 'Andy Heusser',
'Study' : 'Egg Tutorial'
}
egg = quail.Egg(pres=presented_words, rec=recalled_words, meta=meta)
egg.info()
```
## Adding `listgroup` and `subjgroup` to an `egg`
While the `listgroup` and `subjgroup` arguments can be used within the `analyze` function, they can also be attached directly to the `egg`, allowing you to save condition labels for easy organization and easy data sharing.
To do this, simply pass one or both of the arguments when creating the `egg`:
```
# presented words
sub1_presented=[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]
sub2_presented=[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]
# recalled words
sub1_recalled=[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']]
sub2_recalled=[['cat', 'goat', 'bat', 'hat'],['horse', 'zebra', 'zoo', 'animal']]
# combine subject data
presented_words = [sub1_presented, sub2_presented]
recalled_words = [sub1_recalled, sub2_recalled]
# create Egg
multisubject_egg = quail.Egg(pres=presented_words,rec=recalled_words, subjgroup=['condition1', 'condition2'],
listgroup=['early','late'])
```
## Saving an `egg`
Once you have created your egg, you can save it for use later, or to share with colleagues. To do this, simply call the `save` method with a filepath:
```
multisubject_egg.save('myegg.p')
```
This will save you `egg` using the package `pickle`. The result? A pickled egg! To load this egg later, simply call the `load_egg` function with the path of the egg:
```
egg = quail.load('myegg.p')
```
## Stacking `eggs`
We now have two separate eggs, each with a single subject's data. Let's combine them by passing a `list` of `eggs` to the `stack_eggs` function:
```
# subject 1 data
sub1_presented=[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]
sub1_recalled=[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']]
# create subject 2 egg
subject1_egg = quail.Egg(pres=sub1_presented, rec=sub1_recalled)
# subject 2 data
sub2_presented=[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]
sub2_recalled=[['cat', 'goat', 'bat', 'hat'],['horse', 'zebra', 'zoo', 'animal']]
# create subject 2 egg
subject2_egg = quail.Egg(pres=sub2_presented, rec=sub2_recalled)
stacked_eggs = quail.stack_eggs([subject1_egg, subject2_egg])
stacked_eggs.pres
```
## Cracking `eggs`
You can use the `crack_egg` function to slice out a subset of subjects or lists:
```
cracked_egg = quail.crack_egg(stacked_eggs, subjects=[1], lists=[0])
cracked_egg.pres
```
Alternatively, you can use the `crack` method, which does the same thing:
```
stacked_eggs.crack(subjects=[0,1], lists=[1]).pres
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import os
import glob
np.random.seed(42)
# translate SNANA types
types_names = {90:'Ia', 67: '91bg', 52:'Iax', 42:'II', 62:'Ibc',
95: 'SLSN', 15:'TDE', 64:'KN', 88:'AGN', 92:'RRL', 65:'M-dwarf',
16:'EB',53:'Mira', 6:'MicroL', 991:'MicroLB', 992:'ILOT',
993:'CART', 994:'PISN',995:'MLString'}
SNANA_types = {90:11, 62:{1:3, 2:13}, 42:{1:2, 2:12, 3:14},
67:41, 52:43, 64:51, 95:60, 994:61, 992:62,
993:63, 15:64, 88:70, 92:80, 65:81, 16:83,
53:84, 991:90, 6:{1:91, 2:93}}
SNANA_names = {11: 'Ia', 3:'Ibc', 13: 'Ibc', 2:'II', 12:'II', 14:'II',
41: '91bg', 43:'Iax', 51:'KN', 60:'SLSN', 61:'PISN', 62:'ILOT',
63:'CART', 64:'TDE', 70:'AGN', 80:'RRL', 81:'M-dwarf', 83:'EB',
84:'Mira', 90:'MicroLB', 91:'MicroL', 93:'MicroL'}
```
```
# read zenodo metadata
fname = '/media/RESSPECT/data/PLAsTiCC/PLAsTiCC_zenodo/plasticc_test_metadata.csv'
test_metadata = pd.read_csv(fname)
# separate fields
ddf_flag = test_metadata['ddf_bool'].values == 1
ids_ddf = test_metadata['object_id'].values[ddf_flag]
ids_wfd = test_metadata['object_id'].values[~ddf_flag]
```
# Create perfect samples
```
data_dir = '/media2/RESSPECT2/clean_output/'
```
## For DDF
```
# read all Ias in DDF
salt2_Ia_DDF = pd.read_csv(data_dir + '/DDF/SALT2_fit/Ia/fitres/master_fitres_1.fitres',
comment='#', delim_whitespace=True)
salt2_Ia_DDF['zHD'] = salt2_Ia_DDF['SIM_ZCMB'] # requirement of so SALT2mu can work
# choose sample size
nobjs = 3000
# choose number of versions of the same sample to generate
v = 6
for i in range(v):
perfect_Ia_DDF = salt2_Ia_DDF.sample(n=nobjs, replace=False)
perfect_Ia_DDF.to_csv(data_dir + 'DDF/v' + str(i) + '/samples/perfect' + \
str(nobjs) + '.csv', sep=' ', index=False)
```
## For WFD
```
# read all Ias in WFD
fnames_Ia = glob.glob(data_dir + '/WFD/SALT2_fit/Ia/master_fitres_*.fitres')
salt2_WFD = []
for name in fnames_Ia:
fitres_temp = pd.read_csv(name, delim_whitespace=True,
comment='#')
fitres_temp['zHD'] = fitres_temp['SIM_ZCMB']
salt2_WFD.append(fitres_temp)
salt2_Ia_WFD = pd.concat(salt2_WFD, ignore_index=True)
# choose sample size
nobjs = 3000
# choose number of versions of the same sample to generate
v = 6
for i in range(v):
perfect_Ia_WFD = salt2_Ia_WFD.sample(n=nobjs, replace=False)
#perfect_Ia_WFD.to_csv(data_dir + 'WFD/v' + str(i) + '/samples/perfect' + \
# str(nobjs) + '.csv', sep=' ', index=False)
```
# Create Random samples
## For DDF
```
# list of classes surviving SALT2 fit
surv_class_DDF = ['91bg', 'AGN', 'CART', 'Ia', 'Iax', 'Ibc', 'II', 'TDE']
# read all SALT2 fit results for DDF
all_DDF = []
for obj_type in surv_class_DDF:
flist = glob.glob(data_dir + '/DDF/SALT2_fit/' + obj_type + '/fitres/master_fitres_*.fitres')
for name in flist:
data_temp = pd.read_csv(name, comment='#', delim_whitespace=True)
data_temp['zHD'] = data_temp['SIM_ZCMB']
all_DDF.append(data_temp)
all_surv_DDF = pd.concat(all_DDF, ignore_index=True)
all_surv_DDF.fillna(-99, inplace=True)
# choose sample size
nobjs = 3000
# choose number of versions of the same sample to generate
v = 6
for i in range(v):
random_DDF = all_surv_DDF.sample(n=nobjs, replace=False)
random_DDF.to_csv(data_dir + 'DDF/v' + str(i) + '/samples/random' + \
str(nobjs) + '.csv', sep=' ', index=False)
snana_numbers_ddf, freq_ddf = np.unique(all_surv_DDF['SIM_TYPE_INDEX'].values,
return_counts=True)
for i in range(len(freq_ddf)):
if snana_numbers_ddf[i] > 0:
print(SNANA_names[snana_numbers_ddf[i]], '\t', freq_ddf[i],
'\t', 100*freq_ddf[i]/all_surv_DDF.shape[0])
# type II
100*(525+262+63)/all_surv_DDF.shape[0]
# type Ibc
100*(158+93)/all_surv_DDF.shape[0]
all_surv_DDF.shape
```
## For WFD
```
# list of classes surviving SALT2 fit
surv_class_WFD = ['91bg', 'AGN', 'CART', 'Ia', 'Iax', 'Ibc', 'II', 'TDE', 'ILOT', 'PISN', 'SLSN']
# read all SALT2 fit results for WFD
all_WFD = []
for obj_type in surv_class_WFD:
flist = glob.glob('/media/RESSPECT/data/PLAsTiCC/for_metrics/final_data/WFD/SALT2_fit/' + obj_type + '/master_fitres_*.fitres')
for name in flist:
data_temp = pd.read_csv(name, comment='#', delim_whitespace=True)
data_temp['zHD'] = data_temp['SIM_ZCMB']
data_temp.fillna(-99, inplace=True)
all_WFD.append(data_temp)
all_surv_WFD = pd.concat(all_WFD, ignore_index=True)
all_surv_WFD.fillna(-99, inplace=True)
# choose sample size
nobjs = 3000
# choose number of versions of the same sample to generate
v = 6
for i in range(6, v):
random_WFD = all_surv_WFD.sample(n=nobjs, replace=False)
#random_WFD.to_csv(data_dir + 'WFD/v' + str(i) + '/samples/perfect' + \
# str(nobjs) + '.csv', sep=' ', index=False)
salt2_Ia_WFD.shape[0]/all_surv_WFD.shape[0]
snana_numbers_wfd, freq_wfd = np.unique(all_surv_WFD['SIM_TYPE_INDEX'].values, return_counts=True)
for i in range(len(freq_wfd)):
print(SNANA_names[snana_numbers_wfd[i]], '\t', freq_wfd[i], '\t', 100*freq_wfd[i]/all_surv_WFD.shape[0])
# type II
100*(31414+ 26065+4854)/all_surv_WFD.shape[0]
# type Ibc
100*(3558+2508)/all_surv_WFD.shape[0]
all_surv_WFD.shape
```
# Create Fiducial samples
## For DDF
```
# read results from avocado
fname_DDF = data_dir + 'DDF/avocado/avocado_DDF.csv'
avocado_DDF = pd.read_csv(fname_DDF, names=['object_id','6','15','16','42','52','53','62','64','65','67','88',
'90','92','95'], skiprows=1)
# determine final classification
class_final_DDF = []
for i in range(avocado_DDF.shape[0]):
indx = np.argsort(avocado_DDF.iloc[i].values[1:])[-1]
code = int(avocado_DDF.keys()[indx + 1])
class_final_DDF.append(types_names[code])
class_final_DDF = np.array(class_final_DDF)
# get photometrically classified Ia
flag_class_Ia_DDF = class_final_DDF == 'Ia'
avocado_DDF_Ia = avocado_DDF[flag_class_Ia_DDF]
# get SALT2 fit for objs photometrically classified as Ia
avocado_DDF_Ia_fitres_flag = np.array([item in avocado_DDF_Ia['object_id'].values for item in all_surv_DDF['CID'].values])
all_avocado_DDF_Ia = all_surv_DDF[avocado_DDF_Ia_fitres_flag]
# choose sample size
nobjs = 3000
# choose number of versions of the same sample to generate
v = 6
for i in range(v):
fiducial_DDF = all_avocado_DDF_Ia.sample(n=nobjs, replace=False)
fiducial_DDF.to_csv(data_dir + 'DDF/v' + str(i) + '/samples/fiducial' + \
str(nobjs) + '.csv', sep=' ', index=False)
```
## For WFD
```
# read results from avocado
fname_WFD = data_dir + 'WFD/avocado/avocado_WFD.csv'
avocado_WFD = pd.read_csv(fname_WFD, names=['object_id','6','15','16','42','52','53','62','64','65','67','88',
'90','92','95'], skiprows=1)
# determine final classification
class_final_WFD = []
for i in range(avocado_WFD.shape[0]):
indx = np.argsort(avocado_WFD.iloc[i].values[1:])[-1]
code = int(avocado_WFD.keys()[indx + 1])
class_final_WFD.append(types_names[code])
# get photometrically classified Ia
class_final_WFD = np.array(class_final_WFD)
flag_class_Ia_WFD = class_final_WFD == 'Ia'
avocado_WFD_Ia = avocado_WFD[flag_class_Ia_WFD]
# get SALT2 fit for objs photometrically classified as Ia
avocado_WFD_Ia_fitres_flag = np.array([item in avocado_WFD_Ia['object_id'].values
for item in all_surv_WFD['CID'].values])
all_avocado_WFD_Ia = all_surv_WFD[avocado_WFD_Ia_fitres_flag]
# choose sample size
nobjs = 3000
# choose number of versions of the same sample to generate
v = 6
for i in range(v):
fiducial_WFD = all_avocado_WFD_Ia.sample(n=nobjs, replace=False)
#fiducial_WFD.to_csv(data_dir + 'WFD/v' + str(i) + '/samples/perfect' + \
# str(nobjs) + '.csv', sep=' ', index=False)
```
# Create single contaminant samples
## For DDF
```
# levels of contamination
cont_DDF = {'II': [0.28, 0.25, 0.1, 0.05, 0.02, 0.01],
'Ibc': [0.05, 0.02, 0.01],
'Iax': [0.14, 0.1, 0.05, 0.02, 0.01],
'CART': [0.009],
'91bg': [0.002],
'AGN': [0.001]}
complete_names ={'II': 'SNII', 'Ibc': 'SNIbc', 'Iax': 'SNIax', 'CART':'CART',
'91bg':'SNIa-91bg', 'AGN':'AGN'}
# choose sample size
nobjs = 3000
# choose number of versions of the same sample to generate
v = 6
for i in range(v):
for obj_class in list(cont_DDF.keys()):
# read all contaminants surviving SALT2 fit
flist = glob.glob(data_dir + '/DDF/SALT2_fit/' + obj_class + '/fitres/master_fitres_*.fitres')
sample_temp = []
for name in flist:
sample_cont0 = pd.read_csv(name, comment='#', delim_whitespace=True)
sample_cont0['zHD'] = sample_cont0['SIM_ZCMB']
sample_temp.append(sample_cont0)
sample_cont = pd.concat(sample_temp, ignore_index=True)
sample_cont.fillna(-99)
for perc in cont_DDF[obj_class]:
Ia_temp = salt2_Ia_DDF.sample(n=int((1 - perc) * nobjs), replace=False)
cont_temp = sample_cont.sample(n=int(perc * nobjs), replace = False)
sample_final = pd.concat([Ia_temp, cont_temp], ignore_index=True)
sample_final.fillna(-99, inplace=True)
if obj_class not in ['CART', '91bg', 'AGN']:
sample_final.to_csv(data_dir + 'DDF/v' + str(i) + '/samples/' + str(int(100 - 100 * perc)) + \
'SNIa' + str(int(100 * perc)) + complete_names[obj_class] + '.csv',
sep=' ', index=False)
else:
sample_final.to_csv(data_dir + 'DDF/v' + str(i) + '/samples/' + str(round(100 - 100 * perc, 1)) + \
'SNIa' + str(round(100 * perc, 1)) + complete_names[obj_class] + '.csv',
sep=' ', index=False)
```
## For WFD
```
# levels of contamination
cont_WFD = {'II': [0.28, 0.25, 0.1, 0.05, 0.02, 0.01],
'Ibc': [0.1, 0.05, 0.02, 0.01],
'Iax': [0.25, 0.1, 0.05, 0.02, 0.01],
'91bg': [0.05, 0.02, 0.01],
'AGN': [0.05, 0.02, 0.01],
'TDE': [0.004],
'CART': [0.003]}
complete_names ={'II': 'SNII', 'Ibc': 'SNIbc', 'Iax': 'SNIax', 'CART':'CART',
'91bg':'SNIa-91bg', 'AGN':'AGN', 'TDE':'TDE'}
# choose sample size
nobjs = 3000
# choose number of versions of the same sample to generate
v = 6
for i in range(v):
for obj_class in list(cont_WFD.keys()):
# read all contaminants surviving SALT2 fit
flist = glob.glob(data_dir + '/WFD/SALT2_fit/' + obj_class + '/fitres/master_fitres_*.fitres')
sample_cont = []
for name in flist:
temp_cont = pd.read_csv(name, comment='#', delim_whitespace=True)
temp_cont['zHD'] = temp_cont['SIM_ZCMB']
sample_cont.append(temp_cont)
sample_cont2 = pd.concat(sample_cont, ignore_index=True)
for perc in cont_WFD[obj_class]:
Ia_temp2 = salt2_Ia_WFD.sample(n=int((1-perc)*nobjs), replace=False)
cont_temp2 = sample_cont2.sample(n=int(perc*nobjs), replace = False)
sample_final = pd.concat([Ia_temp2, cont_temp2], ignore_index=True)
sample_final.fillna(-99, inplace=True)
#sample_final.to_csv(data_dir + 'WFD/v' + str(i) + '/samples/' + str(int(100 - 100 * perc)) + \
# 'SNIa' + str(int(100 * perc)) + complete_names[obj_class] + '.csv',
# sep=' ', index=False)
```
| github_jupyter |
# Graphillionใซ่งฆใใฆใฟใใ
ใใใใGraphillionใฎ่งฃ่ชฌใซๅ
ฅใใพใ๏ผใพใใฏใฏใใใซๆฐใไธใใๅงใใๅ้กใ็ดนไปใ๏ผใใใGraphillionใไฝฟใฃใฆใฉใฎใใใซ่งฃใใใๅ
ทไฝ็ใชใณใผใใไบคใใฆ่งฃ่ชฌใใพใ๏ผGraphillionใฎๆฉ่ฝใฎ่ฉณ็ดฐใใใณๅ
้จใงใฉใฎใใใชๅฆ็ใ่ตฐใฃใฆใใใฎใใซใคใใฆใฏๆฌก็ซ ไปฅ้ใง่งฃ่ชฌใใพใ๏ผ
## ๆฐใไธใใๅงใใๅ้ก
ใพใใฏไปฅไธใฎๅ็ปใๅพก่ฆงใใ ใใ๏ผ
```
from IPython.display import YouTubeVideo
YouTubeVideo("Q4gTV4r0zRs")
```
ใใฎๅ็ปใงๅใไธใใฆใใๅ้กใ**ๆฐใไธใใๅงใใๅ้ก**ใจใใถใใจใซใใพใ๏ผๅ็ปใงใฏใใงในใใผใใฎใใใชๅฝขใใใใฐใฉใใฎ่งใฎ้ ็น้ใฎ็ต่ทฏใฎ็ทๆฐใ่ชฟในไธใใฆใใพใใ๏ผใใฎใใใช็ต่ทฏใฎ็ทๆฐใฏใฐใฉใใๅคงใใใชใใซใคใใฆๆๆฐ็ใซๅขๅ ใใใใ๏ผๅ
จใฆใฎ็ต่ทฏใไธใคใใคๆฐใไธใใใจๅคฉๆๅญฆ็ใชๆ้ใใใใฆใใพใใพใ๏ผใใฎๅ้กใGraphilionใ็จใใฆ่งฃใๆนๆณใ่ชฌๆใใพใ๏ผ
## Graphillion ใฏใใใฎไธๆญฉ
ๅ
ฌ้ใใใฆใใGraphillionใฉใคใใฉใชใซใฏๆฐใไธใใๅงใใๅ้กใ่งฃใๆนๆณๅฎๆผใใใใใฎใขใธใฅใผใซใๅซใพใใฆใใพใ๏ผไปๅใฏใใฎใขใธใฅใผใซใ็จใใฆๆฐใไธใใๅงใใๅ้กใ่งฃใใฆใใใพใ๏ผ
Colabใๅฉ็จใใฆใใๆนใฏไปฅไธใฎใปใซใๅฎ่กใใฆGraphillionใจใใฅใผใใชใขใซ็จใฎใขใธใฅใผใซ[tutorial_util.py](https://github.com/nsnmsak/graphillion_tutorial/blob/master/ja/tutorial_util.py)ใใคใณในใใผใซใใพใใใ๏ผ
ๅฅใฎใใผใใใใฏใ้ใใใณใซColab็ฐๅขใซgraphillionใใคใณในใใผใซใใๅฟ
่ฆใใใใพใ๏ผ
```
!pip install graphillion
!git clone https://github.com/nsnmsak/graphillion_tutorial
!cp graphillion_tutorial/ja/tutorial_util.py .
```
ๆฌกใซgraphillionใขใธใฅใผใซใใ`GraphSet`ใจ`tutorial`ใใคใณใใผใใใพใ๏ผ
```
from graphillion import GraphSet, tutorial
from tutorial_util import draw_subgraph, draw_universe
```
`GraphSet`ใฏGraphillionใฎไธญๅฟ็ใชๅฝนๅฒใๆใใใฏใฉในใงใใ๏ผใฐใฉใใฎ้ๅใๆฑใๅ็จฎใกใฝใใใๅฎ่ฃ
ใใใฆใใพใ๏ผ`tutorial`ใขใธใฅใผใซใซใฏGraphillionใฎWebใใผใธใซๆฒ่ผใใใฆใใ[ใใฅใผใใชใขใซ](https://github.com/takemaru/graphillion/wiki#%E3%83%81%E3%83%A5%E3%83%BC%E3%83%88%E3%83%AA%E3%82%A2%E3%83%AB) ใฎใใใฎ่ฃๅฉ็ใช้ขๆฐใๅฎ่ฃ
ใใใฆใใพใ๏ผ`draw_subgraph`ใฏใใฎใใฅใผใใชใขใซใฎใใใซ็จๆใใ๏ผใฐใฉใๆ็ปใฎใใใฎ่ฃๅฉ้ขๆฐใงใ๏ผ
ใงใฏ๏ผGraphillionใงๆฐใไธใใๅงใใๅ้กใ่งฃใใฆใใใพใใใ๏ผใพใๅ้กใฎใฐใฉใใๆบๅใใพใ๏ผ
```
grid = tutorial.grid(7, 7)
grid
```
`tutorial.grid(n, m)`ใฏใๅงใใๅ้กใงๆฑใฃใฆใใ๏ผๆ ผๅญ็ถใฎใฐใฉใใ็ๆใใใใใฎใกใฝใใใงใ๏ผใชใ๏ผไปฅไธใงใฏใใฎใฐใฉใใใฐใชใใใฐใฉใใจใใณใพใ๏ผ็ๆใใใใฐใชใใใฐใฉใใฏๆดๆฐใฎใใขใฎใชในใใจใใฆ่กจ็พใใใฆใใพใ๏ผๅๆดๆฐใฏ้ ็นใฎ็ชๅทใ่กจใ๏ผๅใใขใฏ้ ็น้ใฎ่พบใ่กจใใฆใใพใ๏ผ
ๆฌกใซใใฎใฐใชใใใฐใฉใไธใฎ็ต่ทฏใฎ้ๅใ่กจใGraphSetใใคใใฃใฆใใใพใ๏ผใพใ๏ผ`GraphSet`ใฏใฉในใซ็ต่ทฏใ่ใใๅฏพ่ฑกใจใชใใฐใชใใใฐใฉใ`grid`ใ็ป้ฒใใพใ๏ผ
```
GraphSet.set_universe(grid)
```
Graphillionใๆฑใใฐใฉใใฎ้ๅใฏ๏ผใใใฐใฉใ๏ผไปฅไธ๏ผ*universe*ใจใใถ๏ผใฎ้จๅใฐใฉใใฎ้ๅใจใใฆๅฎ็พฉใใใพใ๏ผ`GraphSet.set_universe()`ใฏ๏ผuniverseใ่จญๅฎใใใกใฝใใใงใ๏ผ
`draw_universe()`ใกใฝใใใ็จใใใใจใงuniverseใๆ็ปใใใใจใใงใใพใ๏ผ
```
draw_universe()
```
ใงใฏใใใใๆฐใไธใใๅงใใๅ้กใ่งฃใใฆใฟใพใใใ๏ผGraphillionใงใฏไปฅไธใฎใกใฝใใใง็ญใใๆฑใใใใจใใงใใพใ๏ผ
```
paths = GraphSet.paths(1, 64)
```
`GraphSet.paths(s, t)`ใฏ๏ผ้ ็น`s` ใใ`t`ใพใงใฎ๏ผใซใผใใๅซใพใชใๅ
จใฆใฎ็ต่ทฏใฎ้ๅใ่กจใGraphSetใชใใธใงใฏใใๆฑใใใกใฝใใใงใ๏ผๅคงใใใฐใฉใใๆฑใๅ ดๅใฏๅฎ่กใซๆ้ใใใใใใจใใใใพใใ๏ผใใฎใใฅใผใใชใขใซใงๆฑใ็จๅบฆใฎ่ฆๆจกใฎใฐใฉใใชใไธ็ฌใง่จ็ฎ็ตๆใๅพใใใใฏใใงใ๏ผ`paths`ใฏๅ
จใฆใฎ็ต่ทฏใฎ้ๅใ่กจใGraphSetใชใใธใงใฏใใงใ๏ผ
ๆฌกใซ`paths`ใซๅซใพใใ็ต่ทฏใฎๆฐใๆฑใใฆใฟใพใใใ๏ผ
```
len(paths)
```
Graphillionใ็จใใใใจใง๏ผๆฐใไธใใๅงใใๅ็ปใงใฏในใผใใผใณใณใใฅใผใฟใง่งฃใใฆใใๅ้กใไธ็ฌใง่งฃใใใจใใงใใพใใ๏ผ
Graphillionใฏใๅงใใๅ้กใ่งฃใไปฅไธใฎ่ฑๅฏใชๆฉ่ฝใๅใใฆใใพใ๏ผไปฅไธใงใฏ๏ผ`paths`ใซๅซใพใใ็ต่ทฏใฎไธญใใ่พบใฎๆฐใๆๅฐใฎใใฎ๏ผๆๅคงใฎใใฎ๏ผ
้ทใ๏ผ่พบใฎๆฐ๏ผใๆ็ญใฎใใฎ๏ผๆ้ทใฎใใฎใใใใใไธใคๅใๅบใใฆใใพใ๏ผ
```
shortest_path = next(paths.min_iter())
longest_path = next(paths.max_iter())
draw_subgraph(shortest_path)
draw_subgraph(longest_path)
```
`draw_subgraph(subgraph)` ใกใฝใใใ็จใใใจ`universe`ใจ`subgraph`ใจใ้ใญใฆๆ็ปใใใใจใใงใใพใ๏ผ
## ใใฎ็ซ ใฎใพใจใ
ใใฎ็ซ ใงใฏๆฐใไธใใๅงใใๅ้กใจ๏ผใใใGraphillionใไฝฟใฃใฆ่งฃใๆนๆณใ็ดนไปใใพใใ๏ผGraphillionใ็จใใใจ่จๅคงใชๆฐใฎ็ต่ทฏใใใฃใจใใ้ใซๆฐใไธใใใใจใใงใใพใ๏ผGraphillionใใชใ้ซ้ใซๅ้กใ่งฃใใใจใใงใใใใซใคใใฆ[ๆฌก็ซ ](05_graphillion_and_zdd.ipynb)ใง่งฃ่ชฌใใพใ๏ผ
| github_jupyter |
```
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.font_manager
from matplotlib.patches import Rectangle, PathPatch
from matplotlib.textpath import TextPath
import matplotlib.transforms as mtrans
%matplotlib inline
MPL_BLUE = '#11557c'
ziti = mpl.font_manager.FontProperties(fname = '/Users/liuzaoqi/Desktop/ๅฏ่งๅๅพ้ด/font/calibri bold .ttf')
def create_icon_axes(fig, ax_position, lw_bars, lw_grid, lw_border, rgrid):
with plt.rc_context({'axes.edgecolor': MPL_BLUE,
'axes.linewidth': lw_border}):
ax = fig.add_axes(ax_position, projection='polar')
ax.set_axisbelow(True)
N = 7
arc = 2. * np.pi
theta = np.arange(0.0, arc, arc / N)
radii = np.array([2, 6, 8, 7, 4, 5, 8])
width = np.pi / 4 * np.array([0.4, 0.4, 0.6, 0.8, 0.2, 0.5, 0.3])
bars = ax.bar(theta, radii, width=width, bottom=0.0, align='edge',
edgecolor='0.3', lw=lw_bars)
for r, bar in zip(radii, bars):
color = *cm.jet(r / 10.)[:3], 0.6
bar.set_facecolor(color)
ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax.grid(lw=lw_grid, color='0.9')
ax.set_rmax(9)
ax.set_yticks(rgrid)
ax.add_patch(Rectangle((0, 0), arc, 9.58,
facecolor='white', zorder=0,
clip_on=False, in_layout=False))
return ax
def create_text_axes(fig, height_px):
ax = fig.add_axes((0, 0, 1, 1))
ax.set_aspect("equal")
ax.set_axis_off()
path = TextPath((0, 0), "matplotlib", size=height_px * 0.8,
prop=ziti)
angle = 4.25 # degrees
trans = mtrans.Affine2D().skew_deg(angle, 0)
patch = PathPatch(path, transform=trans + ax.transData, color=MPL_BLUE,
lw=0)
ax.add_patch(patch)
ax.autoscale()
def make_logo(height_px, lw_bars, lw_grid, lw_border, rgrid, with_text=False):
dpi = 100
height = height_px / dpi
figsize = (5 * height, height) if with_text else (height, height)
fig = plt.figure(figsize=figsize, dpi=dpi)
fig.patch.set_alpha(0)
if with_text:
create_text_axes(fig, height_px)
ax_pos = (0.535, 0.12, .17, 0.75) if with_text else (0.03, 0.03, .94, .94)
ax = create_icon_axes(fig, ax_pos, lw_bars, lw_grid, lw_border, rgrid)
return fig, ax
make_logo(height_px=210, lw_bars=0.7, lw_grid=0.5, lw_border=1,
rgrid=[1, 3, 5, 7])
plt.show()
make_logo(height_px=110, lw_bars=0.7, lw_grid=0.5, lw_border=1,
rgrid=[1, 3, 5, 7], with_text=True)
plt.show()
```
| github_jupyter |
# Example for computing a price serie's spectrogram
```
# Put these at the top of every notebook, to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
pd.set_option('display.max_rows', 5)
import matplotlib as mpl
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = [12, 4]
from utils import *
```
# Data
```
prices_dict = get_quandl_edi(list(QUANDL_FREE_SAMPLES_EDI.keys()))
print(len(prices_dict))
print(list(prices_dict.keys()))
j = np.random.choice(len(prices_dict) - 1)
ticker_j = list(prices_dict.keys())[j]
print('j:', j, ' - ', ticker_j)
# prices_dict[ticker_j][['Open', 'High', 'Low', 'Close']].plot()
plot_prices(prices_dict[ticker_j], name=ticker_j)
price = prices_dict[ticker_j]
price
price.groupby(price.index.year).count()
days_per_year = 260
```
# Fabricated Signal
```
sigma = 0.1
x = np.arange(10 * days_per_year)
years = x / days_per_year
yearly_cycle = np.sin(2 * np.pi * x / (1 * days_per_year))
monthly_cycle = np.sin(2 * np.pi * x * 12 / days_per_year)
noise = sigma * np.random.randn(len(x))
y = yearly_cycle + monthly_cycle + np.log(x + 5) + noise
plt.subplots(3, 1, figsize=(14, 8))
plt.subplot(3, 1, 1)
plt.plot(years, yearly_cycle, label='yearly cycle: every year', alpha=0.5)
plt.grid(alpha=0.5); plt.legend()
plt.subplot(3, 1, 2)
plt.plot(years, monthly_cycle, label='monthly cycle: every month', alpha=0.5)
plt.grid(alpha=0.5); plt.legend()
plt.subplot(3, 1, 3)
plt.plot(years, y, label='signal', alpha=0.5)
plt.grid(alpha=0.5); plt.legend()
plt.tight_layout()
Fs = 10 * days_per_year # the sampling frequency
NFFT = 5 * days_per_year # the length of the windowing segments
plt.subplots(figsize=(14, 6))
ax1 = plt.subplot(211)
plt.plot(years / 10, y)
plt.subplot(212, sharex=ax1)
Pxx, freqs, bins, im = plt.specgram(y, NFFT=NFFT, Fs=Fs)
plt.show()
print('Monthly cycle freq.:', 12)
print('Yearly cycle freq.:', 1)
mean_power = Series(Pxx.mean(axis=1), freqs)
# np.log(mean_power).plot()
plt.loglog(mean_power)
list(zip(mean_power.sort_values(ascending=False).head(10).index,
Fs / mean_power.sort_values(ascending=False).head(10).index,
mean_power.sort_values(ascending=False).head(10)))
```
# Scipy Spectrogram
```
f, t, Sxx = signal.spectrogram(x=price.Close.values, fs=261, nperseg=40,
window=('tukey', 0.25), detrend='constant')
t_date = price.index[np.floor((len(price) - 1) * t / np.max(t)).astype('int')]
plt.pcolormesh(t_date, f, Sxx)
plt.ylabel('Frequency [times/year]')
plt.xlabel('Time [years]')
plt.show()
print('t.shape:', t.shape)
print('f.shape:', f.shape)
print('Sxx.shape:', Sxx.shape)
len(price) / 261
len(price)
```
# Pyplot Spectrogram
```
Fs = 10 * days_per_year # the sampling frequency
NFFT = days_per_year # the length of the windowing segments
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
plt.subplots(figsize=(14, 6))
ax1 = plt.subplot(211)
plt.plot(np.arange(len(price)) / Fs, price['Close'].values)
plt.subplot(212, sharex=ax1)
Pxx, freqs, bins, im = plt.specgram(price['Close'], NFFT=NFFT, Fs=Fs)
plt.show()
mean_power = Series(Pxx.mean(axis=1), freqs)
plt.loglog(mean_power)
n = 20
list(zip(mean_power.sort_values(ascending=False).head(n).index,
Fs / mean_power.sort_values(ascending=False).head(n).index,
mean_power.sort_values(ascending=False).head(n)))
plt.plot(Fs / mean_power.sort_values(ascending=False).head(n).index, 'o')
```
# RS System
```
from trading_strategies import *
test_RS_Trading_Strategy()
rs = RS_Trading_Strategy(price, equity=1e6,
days_fast=24, days_slow=260, name=ticker_j)
rs.excecute()
rs.plot_state()
rs.plot_equity()
rs.plot_lake_ratio()
```
| github_jupyter |
# inference only demo
We're done! We have a working pair of models which produce meaninful shared embeddings for text and images, which we can use to run image searches without relying on detailed metadata. The only thing to do now is ensure that the search process is fast enough to be practical, and lay out all of the pieces we need to run this outside of a notebook environment.
```
import torch
import pickle
import nmslib
import urllib
import numpy as np
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
nltk.download("punkt")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
```
# load data
First we'll load a bunch of the lookup data we need to make this thing work. Nothing new here.
```
index_to_wordvec = np.load("/mnt/efs/models/index_to_wordvec.npy")
word_to_index = pickle.load(open("/mnt/efs/models/word_to_index.pkl", "rb"))
path_to_id = lambda x: x.split("/")[-1].split(".")[0]
image_ids = np.array(list(map(path_to_id, np.load("/mnt/efs/models/image_ids.npy"))))
```
# load devise'd embeddings for all images
We pre-computed the learned visual-semantic embeddings for all of our images at the end of the last notebook, so we can just reload them here. Remember, they're sentence-space representations of the images, so all that needs to happen at query-time is the embedding of the query sentence into the same space, and a KNN lookup of the most similar images.
```
embeddings = np.load("/mnt/efs/models/embeddings.npy").reshape(-1, 4096)
```
# utils
Again, we'll create a couple of utility functions to shrink the sentence embedding process down to a single function call.
```
def sentence_to_indexes(sentence):
tokenised = word_tokenize(sentence)
indexes = [word_to_index[word] for word in tokenised if word in word_to_index]
return indexes
def embed(sentence):
indexes = (
[word_to_index["<s>"]] + sentence_to_indexes(sentence) + [word_to_index["</s>"]]
)
wvs = np.stack([index_to_wordvec[i] for i in indexes])
embedding = model(torch.Tensor([wvs]).cuda()).cpu().data.numpy()
return embedding.squeeze()
def embed_paragraph(paragraph):
sentences = sent_tokenize(paragraph)
if len(sentences) == 0:
return None
else:
embeddings = [embed(sentence) for sentence in sentences]
return np.array(embeddings).max(axis=0)
```
# sentence embedding model
Now that we're only inferring an embedding for each sentence, we can ignore the `NLINet()` part of the network from notebook 8. We no longer need to classify sentence pairs or backpropagate any weights, so the remaining network is incredibly small and can be run without much trouble on a CPU. We saved the weights for this half of the network at the end of the last notebook, which we can inject into the matching network architecture here.
```
hidden_size = 2048
class SentenceEncoder(nn.Module):
def __init__(self):
super(SentenceEncoder, self).__init__()
self.enc_lstm = nn.LSTM(
input_size=300, hidden_size=hidden_size, num_layers=1, bidirectional=True
)
def forward(self, wv_batch):
embedded, _ = self.enc_lstm(wv_batch)
max_pooled = torch.max(embedded, 1)[0]
return max_pooled
model = SentenceEncoder().to(device)
model_path = "/mnt/efs/models/sentence-encoder-2018-10-08.pt"
model.load_state_dict(torch.load(model_path))
```
# create nmslib search index
In the previous notebooks we've run searches by brute-forcing our way across the dataset, measuring the distance from our query embedding to every other individual point in sentence-space. This is exact, but _super_ inefficient, especially in a high-volume, high-dimensional case like ours. Here, and in our demo app, we'll use an _approximate_-nearest neighbours algorithm which transforms our data in sentence-embedding space into a hierarchical graph/tree structure, allowing us to traverse the whole thing with very few calculations. The approximate-ness of this ANN algorithm is small, and in the end we lose very little information by transforming it into this structure.
Similar libraries like [annoy](https://github.com/spotify/annoy) leverage roughly the same technique to find nearest neighbours in high dimensional space, but [nmslib has been shown to be the most efficient](https://www.benfrederickson.com/approximate-nearest-neighbours-for-recommender-systems/) and we have no reason not to use it here.
Pre-computing the index takes a while, but it vastly reduces the search time when we run a query. The index can also be saved in binary form and reloaded elsewhere, so we don't have to re-run that expensive computation every time we restart our demo. The python bindings for nmslib are very straightforward - we can create our fully functional index in just three lines of code.
```
index = nmslib.init(method="hnsw", space="cosinesimil")
index.addDataPointBatch(embeddings)
index.createIndex({"post": 2}, print_progress=True)
```
# search
Let's run a search, returning the closest MIRO IDs and attaching them to a `/works` query URL
```
def search(query):
neighbour_indexes, _ = index.knnQuery(embed(query), k=10)
return image_ids[neighbour_indexes]
results = search("mri brain scan")
base_url = "https://wellcomecollection.org/works?query="
url_query = urllib.parse.quote_plus(" ".join(results))
print(base_url + url_query)
```
That's it - super fast, super effective image search with no metadata necessary!
We've turned this notebook into a demo app hosted on AWS, which you can play with [here](http://labs.wellcomecollection.org/devise/index.html).
| github_jupyter |
<span style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">An Exception was encountered at '<a href="#papermill-error-cell">In [8]</a>'.</span>
```
YEAR = "2020"
BASE_DIR = "."
# Parameters
id = None
YEAR = "2302"
BASE_DIR = "/Users/cfe/Dev/jupyter-api/src"
DATA_DIR = "/Users/cfe/Dev/jupyter-api/src/data"
arg_year = 1999
if isinstance(YEAR, int):
arg_year = YEAR
elif isinstance(YEAR, str):
try:
arg_year = int(YEAR)
except:
pass
arg_year
import sys
import pathlib
BASE_DIR = pathlib.Path(BASE_DIR)
# !pip install requests requests-html pandas
from dataclasses import dataclass
import pathlib
import pandas as pd
import requests
from requests_html import HTML
@dataclass
class ScrapeBoxOffice:
base_endpoint:str = "https://www.boxofficemojo.com/year/world/"
year:int = None
save_raw:bool = False
save:bool = False
output_dir: str = "."
table_selector: str = '.imdb-scroll-table'
table_data = []
table_header_names = []
df = pd.DataFrame()
@property
def name(self):
return self.year if isinstance(self.year, int) else 'world'
def get_endpoint(self):
endpoint = self.base_endpoint
if isinstance(self.year, int):
endpoint = f"{endpoint}{self.year}/"
return endpoint
def get_output_dir(self):
return pathlib.Path(self.output_dir)
def extract_html_str(self, endpoint=None):
url = endpoint if endpoint is not None else self.get_endpoint()
r = requests.get(url, stream=True)
html_text = None
status = r.status_code
if r.status_code == 200:
html_text = r.text
if self.save_raw:
output_fname = f"{self.name}.html"
raw_output_dir = self.get_output_dir() / 'html'
raw_output_dir.mkdir(exist_ok=True, parents=True)
output_fname = raw_output_dir / output_fname
with open(f"{output_fname}", 'w') as f:
f.write(html_text)
return html_text, status
return html_text, status
def parse_html(self, html_str=''):
r_html = HTML(html=html_str)
r_table = r_html.find(self.table_selector)
if len(r_table) == 0:
return None
table_data = []
header_names = []
parsed_table = r_table[0]
rows = parsed_table.find("tr")
header_row = rows[0]
header_cols = header_row.find('th')
header_names = [x.text for x in header_cols]
for row in rows[1:]:
cols = row.find("td")
row_data = []
row_dict_data = {}
for i, col in enumerate(cols):
header_name = header_names[i]
row_data.append(col.text)
table_data.append(row_data)
self.table_data = table_data
self.table_header_names = header_names
return self.table_data, self.table_header_names
def to_df(self, data=[], columns=[]):
return pd.DataFrame(data, columns=columns)
def run(self, save=False):
save = self.save if save is False else save
endpoint = self.get_endpoint()
sys.stdout.write(f"Endpoint:\t{endpoint}\n")
html_str, status = self.extract_html_str(endpoint=endpoint)
sys.stdout.write(f"Response Status:\t{status}\n")
if status not in range(200, 299):
raise Exception(f"Extraction failed, endpoint status {status} at {endpoint}")
data, headers = self.parse_html(html_str if html_str is not None else '')
sys.stdout.write(f"Rows:\t{len(data)}\nColumns:\t{len(headers)}\n")
df = self.to_df(data=data, columns=headers)
self.df = df
if save:
filepath = self.get_output_dir() / f'{self.name}.csv'
sys.stdout.write(f"Saved to {filepath}")
df.to_csv(filepath, index=False)
sys.stdout.write(f"\n\n")
return self.df
```
<span id="papermill-error-cell" style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">Execution using papermill encountered an exception here and stopped:</span>
```
scraper = ScrapeBoxOffice(year=arg_year, save=True, save_raw=True, output_dir=str(BASE_DIR / 'data'))
df = scraper.run()
df.head()
```
| github_jupyter |
# 20 Newsgroups text classification with pre-trained word embeddings
In this notebook, we'll use pre-trained [GloVe word embeddings](http://nlp.stanford.edu/projects/glove/) for text classification using PyTorch. Tokenization and word-to-id mapping is done using [gensim](https://radimrehurek.com/gensim/index.html). This notebook is largely based on the blog post [Using pre-trained word embeddings in a Keras model](https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html) by Franรงois Chollet.
**Note that using a GPU with this notebook is highly recommended.**
First, the needed imports.
```
%matplotlib inline
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader
from distutils.version import LooseVersion as LV
from gensim.utils import simple_preprocess
from gensim.corpora import Dictionary
from gensim import __version__ as gensim_version
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
if torch.cuda.is_available():
device = torch.device('cuda')
devicename = '['+torch.cuda.get_device_name(0)+']'
else:
device = torch.device('cpu')
devicename = ""
print('Using PyTorch version:', torch.__version__,
'gensim version:', gensim_version,
'Device:', device, devicename)
assert(LV(torch.__version__) >= LV("1.0.0"))
```
TensorBoard is a tool for visualizing progress during training. Although TensorBoard was created for TensorFlow, it can also be used with PyTorch. It is easiest to use it with the tensorboardX module.
```
try:
import tensorboardX
import os, datetime
logdir = os.path.join(os.getcwd(), "logs",
"20ng-"+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print('TensorBoard log directory:', logdir)
os.makedirs(logdir)
log = tensorboardX.SummaryWriter(logdir)
except ImportError as e:
log = None
```
## GloVe word embeddings
Let's begin by loading a datafile containing pre-trained word embeddings from [Pouta Object Storage](https://research.csc.fi/pouta-object-storage). The datafile contains 100-dimensional embeddings for 400,000 English words.
```
!wget -nc https://object.pouta.csc.fi/swift/v1/AUTH_dac/mldata/glove6b100dtxt.zip
!unzip -n glove6b100dtxt.zip
GLOVE_DIR = "."
print('Indexing word vectors.')
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt')) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
print('Examples of embeddings:')
for w in ['some', 'random', 'words']:
print(w, embeddings_index[w])
```
## 20 Newsgroups data set
Next we'll load the [20 Newsgroups](http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html) data set.
The dataset contains 20000 messages collected from 20 different Usenet newsgroups (1000 messages from each group):
|[]()|[]()|[]()|[]()|
| --- | --- |--- | --- |
| alt.atheism | soc.religion.christian | comp.windows.x | sci.crypt |
| talk.politics.guns | comp.sys.ibm.pc.hardware | rec.autos | sci.electronics |
| talk.politics.mideast | comp.graphics | rec.motorcycles | sci.space |
| talk.politics.misc | comp.os.ms-windows.misc | rec.sport.baseball | sci.med |
| talk.religion.misc | comp.sys.mac.hardware | rec.sport.hockey | misc.forsale |
```
!wget -nc https://object.pouta.csc.fi/swift/v1/AUTH_dac/mldata/news20.tar.gz
!tar -x --skip-old-files -f news20.tar.gz
TEXT_DATA_DIR = "./20_newsgroup"
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
args = {} if sys.version_info < (3,) else {'encoding': 'latin-1'}
with open(fpath, **args) as f:
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
print('Found %s texts.' % len(texts))
```
First message and its label:
```
print(texts[0])
print('label:', labels[0], labels_index)
```
Tokenize the texts using gensim.
```
tokens = list()
for text in texts:
tokens.append(simple_preprocess(text))
```
Vectorize the text samples into a 2D integer tensor.
```
MAX_NUM_WORDS = 10000 # 2 words reserved: 0=pad, 1=oov
MAX_SEQUENCE_LENGTH = 1000
dictionary = Dictionary(tokens)
dictionary.filter_extremes(no_below=0, no_above=1.0,
keep_n=MAX_NUM_WORDS-2)
word_index = dictionary.token2id
print('Found %s unique tokens.' % len(word_index))
data = [dictionary.doc2idx(t) for t in tokens]
```
Truncate and pad sequences.
```
data = [i[:MAX_SEQUENCE_LENGTH] for i in data]
data = np.array([np.pad(i, (0, MAX_SEQUENCE_LENGTH-len(i)),
mode='constant', constant_values=-2)
for i in data], dtype=int)
data = data + 2
print('Shape of data tensor:', data.shape)
print('Length of label vector:', len(labels))
```
Split the data into a training set and a validation set:
```
VALIDATION_SET, TEST_SET = 1000, 4000
x_train, x_test, y_train, y_test = train_test_split(data, labels,
test_size=TEST_SET,
shuffle=True, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
test_size=VALIDATION_SET,
shuffle=False)
print('Shape of training data tensor:', x_train.shape)
print('Length of training label vector:', len(y_train))
print('Shape of validation data tensor:', x_val.shape)
print('Length of validation label vector:', len(y_val))
print('Shape of test data tensor:', x_test.shape)
print('Length of test label vector:', len(y_test))
```
Create PyTorch *DataLoader*s for all data sets:
```
BATCH_SIZE = 128
print('Train: ', end="")
train_dataset = TensorDataset(torch.LongTensor(x_train),
torch.LongTensor(y_train))
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=4)
print(len(train_dataset), 'messages')
print('Validation: ', end="")
validation_dataset = TensorDataset(torch.LongTensor(x_val),
torch.LongTensor(y_val))
validation_loader = DataLoader(validation_dataset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=4)
print(len(validation_dataset), 'messages')
print('Test: ', end="")
test_dataset = TensorDataset(torch.LongTensor(x_test),
torch.LongTensor(y_test))
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=4)
print(len(test_dataset), 'messages')
```
Prepare the embedding matrix:
```
print('Preparing embedding matrix.')
EMBEDDING_DIM = 100
embedding_matrix = np.zeros((MAX_NUM_WORDS, EMBEDDING_DIM))
n_not_found = 0
for word, i in word_index.items():
if i >= MAX_NUM_WORDS-2:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i+2] = embedding_vector
else:
n_not_found += 1
embedding_matrix = torch.FloatTensor(embedding_matrix)
print('Shape of embedding matrix:', embedding_matrix.shape)
print('Words not found in pre-trained embeddings:', n_not_found)
```
## 1-D CNN
### Initialization
```
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.embed = nn.Embedding.from_pretrained(embedding_matrix, freeze=True)
self.conv1 = nn.Conv1d(100, 128, 5)
self.pool1 = nn.MaxPool1d(5)
self.conv2 = nn.Conv1d(128, 128, 5)
self.pool2 = nn.MaxPool1d(5)
self.conv3 = nn.Conv1d(128, 128, 5)
self.pool3 = nn.MaxPool1d(35)
self.fc1 = nn.Linear(128, 128)
self.fc2 = nn.Linear(128, 20)
def forward(self, x):
x = self.embed(x)
x = x.transpose(1,2)
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.pool3(x)
x = x.view(-1, 128)
x = F.relu(self.fc1(x))
return F.log_softmax(self.fc2(x), dim=1)
model = Net().to(device)
optimizer = optim.RMSprop(model.parameters(), lr=0.005)
criterion = nn.CrossEntropyLoss()
print(model)
```
### Learning
```
def train(epoch, log_interval=200):
# Set model to training mode
model.train()
# Loop over each batch from the training set
for batch_idx, (data, target) in enumerate(train_loader):
# Copy data to GPU if needed
data = data.to(device)
target = target.to(device)
# Zero gradient buffers
optimizer.zero_grad()
# Pass data through the network
output = model(data)
# Calculate loss
loss = criterion(output, target)
# Backpropagate
loss.backward()
# Update weights
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item()))
def evaluate(loader, loss_vector=None, accuracy_vector=None):
model.eval()
loss, correct = 0, 0
pred_vector = torch.LongTensor()
pred_vector = pred_vector.to(device)
for data, target in loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss += criterion(output, target).data.item()
pred = output.data.max(1)[1] # get the index of the max log-probability
pred_vector = torch.cat((pred_vector, pred))
correct += pred.eq(target.data).cpu().sum()
loss /= len(validation_loader)
if loss_vector is not None:
loss_vector.append(loss)
accuracy = 100. * correct.to(torch.float32) / len(loader.dataset)
if accuracy_vector is not None:
accuracy_vector.append(accuracy)
print('Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
loss, correct, len(loader.dataset), accuracy))
return np.array(pred_vector.cpu())
%%time
epochs = 20
lossv, accv = [], []
for epoch in range(1, epochs + 1):
train(epoch)
with torch.no_grad():
print('\nValidation set:')
evaluate(validation_loader, lossv, accv)
plt.figure(figsize=(5,3))
plt.plot(np.arange(1,epochs+1), lossv)
plt.title('validation loss')
plt.figure(figsize=(5,3))
plt.plot(np.arange(1,epochs+1), accv)
plt.title('validation accuracy');
```
### Inference
We evaluate the model using the test set. If accuracy on the test set is notably worse than with the training set, the model has likely overfitted to the training samples.
```
%%time
with torch.no_grad():
predictions = evaluate(test_loader)
```
We can also look at classification accuracies separately for each newsgroup, and compute a confusion matrix to see which newsgroups get mixed the most:
```
cm=confusion_matrix(y_test, predictions, labels=list(range(20)))
print('Classification accuracy for each newsgroup:'); print()
labels = [l[0] for l in sorted(labels_index.items(), key=lambda x: x[1])]
for i,j in enumerate(cm.diagonal()/cm.sum(axis=1)): print("%s: %.4f" % (labels[i].ljust(26), j))
print()
print('Confusion matrix (rows: true newsgroup; columns: predicted newsgroup):'); print()
np.set_printoptions(linewidth=9999)
print(cm); print()
plt.figure(figsize=(10,10))
plt.imshow(cm, cmap="gray", interpolation="none")
plt.grid(False)
plt.title('Confusion matrix (rows: true newsgroup; columns: predicted newsgroup)')
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=90)
plt.yticks(tick_marks, labels);
```
## LSTM
### Initialization
```
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.embed = nn.Embedding.from_pretrained(embedding_matrix, freeze=True)
self.lstm = nn.LSTM(100, 128, num_layers=2, batch_first=True)
self.fc1 = nn.Linear(128, 128)
self.fc2 = nn.Linear(128, 20)
def forward(self, x):
x = self.embed(x)
_, (h_n, _) = self.lstm(x)
x = h_n[1,:,:]
x = F.relu(self.fc1(x))
return F.log_softmax(self.fc2(x), dim=1)
model = Net().to(device)
optimizer = optim.RMSprop(model.parameters(), lr=0.005)
criterion = nn.CrossEntropyLoss()
print(model)
```
### Learning
```
%%time
epochs = 20
lossv, accv = [], []
for epoch in range(1, epochs + 1):
train(epoch)
with torch.no_grad():
print('\nValidation set:')
evaluate(validation_loader, lossv, accv)
plt.figure(figsize=(5,3))
plt.plot(np.arange(1,epochs+1), lossv)
plt.title('validation loss')
plt.figure(figsize=(5,3))
plt.plot(np.arange(1,epochs+1), accv)
plt.title('validation accuracy');
```
### Inference
```
%%time
with torch.no_grad():
predictions = evaluate(test_loader)
cm=confusion_matrix(y_test, predictions, labels=list(range(20)))
print('Classification accuracy for each newsgroup:'); print()
labels = [l[0] for l in sorted(labels_index.items(), key=lambda x: x[1])]
for i,j in enumerate(cm.diagonal()/cm.sum(axis=1)): print("%s: %.4f" % (labels[i].ljust(26), j))
print()
print('Confusion matrix (rows: true newsgroup; columns: predicted newsgroup):'); print()
np.set_printoptions(linewidth=9999)
print(cm); print()
plt.figure(figsize=(10,10))
plt.imshow(cm, cmap="gray", interpolation="none")
plt.grid(False)
plt.title('Confusion matrix (rows: true newsgroup; columns: predicted newsgroup)')
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=90)
plt.yticks(tick_marks, labels);
```
| github_jupyter |
# Striplog expert functions
This notebooks looks at the main `striplog` object. For the basic objects it depends on, see [Basic objects](./Basic_objects.ipynb).
First, import anything we might need.
```
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import striplog
striplog.__version__
from striplog import Legend, Lexicon, Interval, Component
legend = Legend.builtin('NSDOE')
lexicon = Lexicon.default()
```
----
## Making a `striplog` from a PNG
```
from striplog import Striplog
imgfile = "M-MG-70_14.3_135.9.png"
strip = Striplog.from_img(imgfile, 14.3, 135.9, legend=legend)
strip
strip.plot(legend, ladder=True, aspect=3)
```
----
## Making, finding and annealing gaps
This striplog doesn't have any gaps...
```
strip.find_gaps() or "No gaps!"
```
But we can make some by deleting indices:
```
del strip[[2, 7, 12]]
strip.find_gaps()
```
We can also get a list of the indices of intervals that are followed by gaps (i.e. are directly above gaps in 'depth' order, or directly below gaps in 'elevation' order).
```
strip.find_gaps(index=True)
strip.thinnest()
strip.thickest(n=5).plot(legend=legend, aspect=2)
strip
strip = strip.prune(limit=1)
strip.plot(legend=legend, aspect=5)
strip = strip.anneal()
strip.plot(legend=legend, aspect=5)
strip.find_gaps() or "No gaps!"
```
## Label the intervals
```
strip.plot(label='lithology', legend=legend, aspect=5)
fig, axs = plt.subplots(ncols=2, sharey=True)
axs[0] = strip.plot(ax=axs[0], legend=legend)
axs[1] = strip.plot_tops(axs[1], field='lithology', )
axs[1].axis('off')
plt.show()
strip[0]
round(3.1415936, 2)
import random
for iv in strip:
iv.data['por'] = round(random.random() / 4, 3)
strip[0]
strip.plot(style='field', field='por', aspect=3)
strip[0].middle
strip[0].primary['lithology']
```
### Other types of anneal
```
s = strip[[1, 3]]
s.plot(legend=legend, aspect=1)
# Default behaviour: meet in middle.
fig, axs = plt.subplots(ncols=2, figsize=(4,2))
s.plot(legend=legend, aspect=1, ax=axs[0])
s.anneal(mode='middle').plot(legend=legend, ax=axs[1])
plt.show()
fig, axs = plt.subplots(ncols=2, figsize=(4,2))
s.plot(legend=legend, aspect=1, ax=axs[0])
s.anneal(mode='up').plot(legend=legend, ax=axs[1])
plt.show()
fig, axs = plt.subplots(ncols=2, figsize=(4,2))
s.plot(legend=legend, aspect=1, ax=axs[0])
s.anneal(mode='down').plot(legend=legend, ax=axs[1])
plt.show()
```
----
## Overlapping intervals
If there are overlapping intervals in a striplog, you can find them with `find_overlaps()`.
```
lappy = Striplog([Interval(**{'top': 50, 'base': 60, 'components':[Component({'lithology': 'dolomite'}),]}),
Interval(**{'top': 55, 'base': 75, 'components':[Component({'lithology': 'limestone'}),]}),
Interval(**{'top': 75, 'base': 80, 'components':[Component({'lithology': 'volcanic'}),]}),
Interval(**{'top': 78, 'base': 90, 'components':[Component({'lithology': 'anhydrite'}),]})
])
lappy.find_overlaps(index=True)
overlaps = lappy.find_overlaps()
fig, (ax0, ax1) = plt.subplots(1, 2, sharey=True, figsize=(3,5))
# Use alpha to highlight overlaps.
ax0 = lappy.plot(legend, ax=ax0, alpha=0.75, lw=1)
ax1 = overlaps.plot(ax=ax1)
ax0.set_title('lappy')
ax1.set_title('overlaps')
ax0.set_ylim(100, 40)
plt.show()
```
The `merge_overlaps()` method operates in place and returns nothing.
```
lappy.merge_overlaps()
```
Now there are no overlaps!
```
lappy.find_overlaps()
lappy.plot(legend, aspect=3, alpha=0.75, ec='k', lw=0.5)
```
The merged intervals have mixed components:
```
lappy[1].components
```
Note that the description is rather garbled.
```
lappy[1].description
```
## Merge
New way to merge. Uses precendence to decide what to take in each merge zone.
**Note, does not combine the components in a merge zone. You have to choose one or the other.**
```
lappy = Striplog([Interval(**{'top': 50, 'base': 60, 'components':[Component({'lithology': 'dolomite'}),]}),
Interval(**{'top': 55, 'base': 75, 'components':[Component({'lithology': 'limestone'}),]}),
Interval(**{'top': 75, 'base': 80, 'components':[Component({'lithology': 'volcanic'}),]}),
Interval(**{'top': 78, 'base': 90, 'components':[Component({'lithology': 'anhydrite'}),]})
])
lappy.plot(legend, aspect=2, alpha=0.75)
```
You have to provide an `Interval` attribute (like 'top', 'base', or 'thickness') **or** a component attribute to merge on (if you use a component attribute, `merge` gets this attribute from the primary component to decide what to keep in each zone). The attribute must suport ordering — `striplog` keeps the thing with the larger value (or the thing which comes last in the ordering), e.g. the thickest, deepest, etc. If you want the reverse behaviour (keep the thinnest, shallowest, etc, then pass the `reverse=True`.
```
lappy.plot(legend, aspect=2, alpha=0.75)
lappy.merge('top').plot(legend, aspect=2, lw=1)
lappy.merge('top')[0].base.z
lappy.merge('top', reverse=True)[0].base.z
fig, axs = plt.subplots(ncols=2)
lappy.plot(legend, aspect=2, alpha=0.75, ax=axs[0])
lappy.merge('top').plot(legend, aspect=2, alpha=0.75, ax=axs[1])
plt.show()
fig, axs = plt.subplots(ncols=2)
lappy.plot(legend, aspect=2, alpha=0.75, ax=axs[0])
lappy.merge('top', reverse=True).plot(legend, aspect=2, alpha=0.75, ax=axs[1])
plt.show()
```
It is not currently possible to blend or combine units in the merge zones; you have to choose one.
----
## Querying the striplog
This results in a new Striplog, contianing only the intervals requested.
```
strip.find('sandstone')
strip.find('sandstone').unique
strip.find('sandstone').cum
strip.find('sandstone').plot(aspect=3)
```
Let's ask for the rock we just found by seaching.
```
rock = strip.find('sandstone')[1].components[0]
rock
```
We can also search for a rock...
```
strip.find(rock).plot(legend, aspect=3)
rock in strip
```
And we can ask what is at a particular depth.
```
strip.read_at(90).primary
```
----
## Combining striplogs with binary operations
We'd like to operate on the pairs of striplogs, finding overlaps and their intersections.
```
chrono = Striplog([Interval(**{'top': 0, 'base': 60, 'components':[Component({'age': 'Holocene'})]}),
Interval(**{'top': 60, 'base': 75, 'components':[Component({'age': 'Palaeogene'})]}),
Interval(**{'top': 75, 'base': 100, 'components':[Component({'age': 'Cretaceous'})]}),
])
time = Legend.default_timescale()
time[2]
chrono.plot(time, aspect=3)
sands = strip.find('sandstone')
cretaceous = chrono.find('Palaeogene')
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, sharey=True)
ax0 = sands.plot(legend, ax=ax0)
ax1 = chrono.plot(time, ax=ax1)
ax2 = sands.intersect(cretaceous).plot(legend, ax=ax2)
ax0.set_title('Sands')
ax1.set_title('Chrono')
ax2.set_title('Tert. sands')
ax0.set_ylim(100, 40)
plt.show()
for i in sands:
a = chrono.read_at(i.top.z)
i.data = {'age': a.primary['age']}
sands[1]
```
----
## Map, reduce, filter
Striplogs are just lists of `Intervals`, so you can use Python's functional programming patterns on them quite easily. For example, you can map functions and lambdas onto striplogs:
```
tops_in_feet = map(lambda i: i.top.z/0.3048, strip)
list(tops_in_feet)[:5] # First 5 only
```
Don't forget the humble list comprehension though...
```
[i.thickness for i in strip][:5]
```
Add all the thicknesses of intervals with a top depth > 100 m:
```
from functools import reduce
def sumr(a, b): return a + b
reduce(sumr, [i.thickness for i in strip if i.top.z > 100])
```
To go even further, let's add a porosity array to each interval's primary component:
```
import random
for iv in strip:
iv.data['porosity'] = np.random.random(3)/4
strip[4]
```
We can also write a function that returns `True` for some condition, and then `filter` intervals on that condition:
```
def porous(component):
return component.data['porosity'].mean() > 0.15
Striplog(list(filter(porous, strip)))
```
It's a bit clunky now that `filter` returns an iterator. But it's also clunky because you can't pass arguments to the function you're giving `filter` โย so you can't set the porosity to compare against when you call it, you have to edit the function itself.
To pass another argument to the filter function, you'll have to use a closure:
```
def min_porosity(x):
def compare(component):
return component.data['porosity'].mean() > x
return compare
Striplog(list(filter(min_porosity(0.15), strip)))
```
<hr />
## Logs from striplogs
The default behaviour is to assign the integer values in order of abundance of each primary component, starting with 1, and leaving 0 for 'unassigned'.
```
liths = strip.to_log()
plt.plot(liths)
plt.show()
```
Pass a legend to get the ordering from the legend ('1' is given to the first component in the legend, '2' to the next, and so on).
```
liths = strip.to_log(legend=legend)
plt.plot(liths)
plt.show()
```
Recall that we added porosity to the components in this striplog:
```
strip[4]
# I have broken this.
# por = strip.to_log(field='porosity', field_function=np.mean)
# plt.plot(por)
# plt.show()
```
We can also export any value corresponding to components from the legend, for example a 'width' log:
```
w, z, table = strip.to_log(legend=legend, legend_field='width', return_meta=True, step=0.1)
w
```
...and we can make a composite plot in `matplotlib`:
```
width = 3
fig = plt.figure(figsize=(1,10))
ax = fig.add_axes([0, 0, 1, 1])
ax = strip.plot_axis(ax, legend, default_width=width+1)
plt.plot(w, z, color='white')
plt.fill_betweenx(z, w, width+1, edgecolor='white', facecolor='white', zorder=2)
ax.set_xlim([0, width+1])
ax.set_ylim([strip.stop.z, strip.start.z])
plt.show()
```
----
## Filtering an array with a striplog
I'd like to 'extract' the data from a log, only where there are intervals. Since we already have ways to filter the striplog to zones of interest (e.g. with `find()` or `filter`) it would be easy to, say, get the GR curve where the striplog indicates sandstone.
```
import lasio
l = lasio.read("P-129_out.LAS")
z, gr = l['DEPT'], l['GR']
strip.find('sandstone').plot(aspect=2) # There are actually 4 intervals here; 2 are touching
sand = strip.find('sandstone').to_flag(basis=z)
gr[sand].mean()
```
Which is just a convenience; it does the same as:
```
sand = strip.find('sandstone').to_log(basis=z).astype(bool)
gr[sand].mean()
```
Do we need to make this easier?
<hr />
## Reversing a striplog
You can transform a striplog from 'depth' order to 'elevation' order.
```
strip.plot(aspect=3)
strip[-1]
s3 = strip.invert(copy=True)
s3.plot(aspect=3)
s3[0]
```
----
## Outcrop
Tell the CSV loader the columns:
1. base
1. top
1. description
```
l = """base,top,description
101,120,Till
100,101,Gypsum
50,100,Limestone Formation
28,50,Shale Formation
13,28,Granite Wash
0,13,Basement"""
log = Striplog.from_csv(text=l, lexicon=Lexicon.default())
log.read_at(30)
log.order
log.plot(aspect=3)
```
----
## Handling tops
I recommend treating tops as intervals, not as point data.
```
tops_csv = """top,formation
100, Escanilla Fm.
200, Sobrarbe Fm.
350, San Vicente Fm.
500, Cretaceous
"""
tops = Striplog.from_csv(text=tops_csv)
print(tops)
tops.read_at(254.0)
```
----
## Handling point data
Some things really are point data. Sort of like a log, but irregular, more discrete. Here are some lab measurements...
```
data_csv = """depth, bodacity
1200, 6.4
1205, 7.3
1210, 8.2
1250, 9.2
1275, 4.3
1300, 2.2
"""
```
You must specify `points=True` otherwise Striplog will 'fill in' and create the bases for you, based on the next top.
```
points = Striplog.from_csv(text=data_csv, points=True)
print(points)
```
One day, when we have a use case, we can do something nice with this, like treat it as numerical data, and make a plot for it. We need an elegant way to get that number into a 'rock', like {'x': 6.4}, etc.
```
points.order
```
<hr />
## Striplogs from logs
We can read a log from an LAS file with `lasio`:
```
import lasio
```
Read a gamma-ray log.
```
l = lasio.read("P-129_out.LAS")
z, gr = l['DEPT'], l['GR']
z[-2000]
```
Next we make a list of components to pass into the new striplog. The order must match the values you pass in the `to_log()` function:
```
comps = [Component({'lithology': 'sandstone'}),
Component({'lithology': 'greywacke'}),
Component({'lithology': 'shale'}), ]
```
Make a striplog from the GR curve, using the cutoffs given as `cutoff = [10, 50]`. These cutoffs define 3 lithologies, whichi is what we're passing in as `comps`. There must be enough components for the intervals you're defining.
If you don't provide `components`, you can provide `legend` instead; the components will be drawn from that. If you pass 'too many' components, they will be used in order and the 'extra' ones ignored.
You have to pass in the depth/elevation basis as well, because no assumptions are made about the log's extent.
```
s = Striplog.from_log(gr, cutoff=[10, 50], components=comps, basis=z)
s
```
Now we can, say, remove the thin beds:
```
s.prune(limit=5)
s.anneal()
s
```
And then read the log back into the intervals, 'reducing' with a function if we want:
```
s.extract(gr[2000:-2000], basis=z[2000:-2000], name='GR', function=np.mean)
s[20]
```
Now close the loop by exporting these values as a new log and comparing to the original. Since we reduced with `np.mean`, we will get a blocked log...
```
g, gz, _ = s.to_log(field="GR", start=500, stop=1500, return_meta=True)
g2, gz2, _ = s.to_log(field="GR", return_meta=True)
plt.figure(figsize=(16,3))
plt.plot(z, gr, color='lightblue')
plt.plot(gz, g, lw=3, color='red')
plt.plot(gz2, g2, lw=1, color='black')
plt.show()
```
## Another log to striplog
```
a = np.array([1,1,1,1,1,3,2,2,2,2,3,2,2,2,2,2,2,2,1,1,1,1,0,0,0,0,0,2,3,3,3,3,3,3,10,2,2,2,2,2,2,10,10,10,10,2,2,2,2,2])
z = np.linspace(100,200,50)
s = Striplog.from_log(a, legend=legend[:5], basis=z)
s[1]
fig, (ax0, ax1) = plt.subplots(1, 2, sharey=True, figsize=(3,20))
# Use alpha to highlight overlaps.
ax0 = s.plot(ax=ax0)
ax1.plot(a, z, 'o-')
ax0.set_title('Striplog')
ax1.set_title('Log')
ax1.set_ylim(200, 100)
plt.show()
```
<hr />
## Histogram
```
_ = strip.hist(legend=legend, rotation=-45, ha='left')
```
<hr />
## Crop a striplog
```
strip.crop((20, 100), copy=True)
strip.crop((20, 100)) # in place
strip[0]
```
**Limitation** โย right now you cannot 'crop' to an extent larger than the current striplog. Maybe we should allow that, with padding...
```
strip.crop((0, 200))
# This should result in an error:
```
<hr />
## Graphical output formats
You should be able to natively save any format. If `matplotlib` complains, try replacing your usual import with
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
Or in Jupyter Notebook...
%matplotlib Agg
Then carry on as normal.
You need the figure object to save the striplog plot, so set `return_fig` to `True`:
```
fig = strip.plot(return_fig=True)
fig.savefig('test.png')
fig.savefig('test.pdf')
fig.savefig('test.svg')
```
To find out which backend you are using:
```
import matplotlib
matplotlib.get_backend()
```
<hr />
<p style="color:gray">ยฉ2015 Agile Geoscience. Licensed CC-BY. <a href="https://github.com/agile-geoscience/striplog">striplog.py</a></p>
| github_jupyter |
# Human Protien Atlas Data Processing
Here we are pulling data in and processing it.
* [Here](http://www.sciencemag.org/content/347/6220/1260419.full) is the paper by Uhlen et al. on the dataset
* The data was obtained from [proteinatlas.org](http://www.proteinatlas.org/)
```
%matplotlib inline
import pandas as pd
path = '/cellar/users/agross/Data/Protein_Atlas/'
```
### Cancer File
```
cancer = pd.read_csv(path + 'cancer.csv')
cancer.head()
cancer['Expression type'].value_counts()
cancer.Level.value_counts()
cancer.Tumor.value_counts()
```
Just double checking that the counts add up to the 'Total patients' column.
```
gb = cancer.groupby(['Gene','Tumor'])
assert all(gb['Count patients'].sum() == gb['Total patients'].first())
stacked = cancer.set_index(['Gene','Tumor','Level'])['Count patients']
stacked = stacked.unstack('Level')
stacked.head()
cancer = stacked
```
### Normal Tissue File
```
normals = pd.read_csv(path + 'normal_tissue.csv')
normals.head()
normals['Expression type'].value_counts()
del normals['Expression type']
normals['Reliability'].value_counts()
normals['Level'].value_counts()
normals.Tissue.unique()
normals['Cell type'].unique()
normals[['Tissue','Cell type']].drop_duplicates().shape
```
### Subcellular location
```
loc = pd.read_csv(path + 'subcellular_location.csv')
loc.head()
loc['Expression type'].value_counts()
loc.Reliability.value_counts()
loc['Main location'].value_counts().head(10)
```
### RNA
```
rna = pd.read_csv(path + 'rna.csv')
rna.head()
rna.Unit.value_counts()
del rna['Unit']
rna.groupby('Abundance').Value.agg({'mean':'mean','count':'count'}).sort('mean')
rna_df = rna.set_index(['Gene','Sample']).Value.unstack()
rna_df.shape
```
### Metadata
```
meta = pd.read_table(path + 'proteinatlas.tab')
meta.head(3).T
meta.Gene.value_counts().value_counts()
mapping = meta.set_index('Ensembl')['Gene']
meta['Subcellular location'].value_counts().head()
```
### Mapping tumor to normals
```
tn_map = {'breast cancer': [('breast','glandular cells')],
'carcinoid': [('pancreas','islets of Langerhans')],
'cervical cancer': [('cervix, uterine','squamous epithelial cells'),
('cervix, uterine','glandular cells')],
'colorectal cancer': [('colon','glandular cells'),
('rectum','glandular cells')],
'endometrial cancer': [('endometrium 1', 'glandular cells'),
('endometrium 2', 'glandular cells')],
'glioma':[('cerebral cortex', 'glial cells')],
'head and neck cancer': [('oral mucosa', 'squamous epithelial cells'),
('tonsil', 'squamous epithelial cells'),
('salivary gland', 'glandular cells')],
'liver cancer': [('liver', 'bile duct cells'),
('liver', 'hepatocytes')],
'lung cancer': [('bronchus', 'respiratory epithelial cells'),
('lung', 'pneumocytes')],
'lymphoma': [('lymph node', 'germinal center cells'),
('lymph node', 'non-germinal center cells')],
'melanoma': [('skin 1', 'melanocytes')],
'pancreatic cancer': [('pancreas', 'exocrine glandular cells')],
'prostate cancer': [('prostate', 'glandular cells')],
'renal cancer': [('kidney', 'cells in tubules')],
'skin cancer': [('skin 1', 'keratinocytes')],
'stomach cancer': [('stomach 1', 'glandular cells'),
('stomach 2', 'glandular cells')],
'testis cancer': [('testis', 'cells in seminiferous ducts')],
'urothelial cancer': [('urinary bladder', 'urothelial cells')]}
cancer.index.get_level_values('Tumor').unique()
normals[['Tissue','Cell type']].drop_duplicates().as_matrix()
```
| github_jupyter |
```
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
```
## Exercise 1
- load the dataset: `../data/international-airline-passengers.csv`
- inspect it using the `.info()` and `.head()` commands
- use the function `pd.to_datetime()` to change the column type of 'Month' to a datatime type
- set the index of df to be a datetime index using the column 'Month' and the `df.set_index()` method
- choose the appropriate plot and display the data
- choose appropriate scale
- label the axes
```
# - load the dataset: ../data/international-airline-passengers.csv
df = pd.read_csv('../data/international-airline-passengers.csv')
# - inspect it using the .info() and .head() commands
df.info()
df.head()
# - use the function to_datetime() to change the column type of 'Month' to a datatime type
# - set the index of df to be a datetime index using the column 'Month' and tthe set_index() method
df['Month'] = pd.to_datetime(df['Month'])
df = df.set_index('Month')
df.head()
# - choose the appropriate plot and display the data
# - choose appropriate scale
# - label the axes
df.plot()
```
## Exercise 2
- load the dataset: `../data/weight-height.csv`
- inspect it
- plot it using a scatter plot with Weight as a function of Height
- plot the male and female populations with 2 different colors on a new scatter plot
- remember to label the axes
```
# - load the dataset: ../data/weight-height.csv
# - inspect it
df = pd.read_csv('../data/weight-height.csv')
df.head()
df.info()
df.describe()
df['Gender'].value_counts()
# - plot it using a scatter plot with Weight as a function of Height
_ = df.plot(kind='scatter', x='Height', y='Weight')
# - plot the male and female populations with 2 different colors on a new scatter plot
# - remember to label the axes
# this can be done in several ways, showing 2 here:
males = df[df['Gender'] == 'Male']
females = df.query('Gender == "Female"')
fig, ax = plt.subplots()
males.plot(kind='scatter', x='Height', y='Weight',
ax=ax, color='blue', alpha=0.3,
title='Male & Female Populations')
females.plot(kind='scatter', x='Height', y='Weight',
ax=ax, color='red', alpha=0.3)
df['Gendercolor'] = df['Gender'].map({'Male': 'blue', 'Female': 'red'})
df.head()
df.plot(kind='scatter',
x='Height',
y='Weight',
c=df['Gendercolor'],
alpha=0.3,
title='Male & Female Populations')
fig, ax = plt.subplots()
ax.plot(males['Height'], males['Weight'], 'ob',
females['Height'], females['Weight'], 'or', alpha=0.3)
plt.xlabel('Height')
plt.ylabel('Weight')
plt.title('Male & Female Populations')
```
## Exercise 3
- plot the histogram of the heights for males and for females on the same plot
- use alpha to control transparency in the plot comand
- plot a vertical line at the mean of each population using `plt.axvline()`
```
males['Height'].plot(kind='hist',
bins=50,
range=(50, 80),
alpha=0.3,
color='blue')
females['Height'].plot(kind='hist',
bins=50,
range=(50, 80),
alpha=0.3,
color='red')
plt.title('Height distribution')
plt.legend(["Males", "Females"])
plt.xlabel("Heigth (in)")
plt.axvline(males['Height'].mean(), color='blue', linewidth=2)
plt.axvline(females['Height'].mean(), color='red', linewidth=2)
males['Height'].plot(kind='hist',
bins=200,
range=(50, 80),
alpha=0.3,
color='blue',
cumulative=True,
normed=True)
females['Height'].plot(kind='hist',
bins=200,
range=(50, 80),
alpha=0.3,
color='red',
cumulative=True,
normed=True)
plt.title('Height distribution')
plt.legend(["Males", "Females"])
plt.xlabel("Heigth (in)")
plt.axhline(0.8)
plt.axhline(0.5)
plt.axhline(0.2)
```
## Exercise 4
- plot the weights of the males and females using a box plot
- which one is easier to read?
- (remember to put in titles, axes and legends)
```
dfpvt = df.pivot(columns = 'Gender', values = 'Weight')
dfpvt.head()
dfpvt.info()
dfpvt.plot(kind='box')
plt.title('Weight Box Plot')
plt.ylabel("Weight (lbs)")
```
## Exercise 5
- load the dataset: `../data/titanic-train.csv`
- learn about scattermatrix here: http://pandas.pydata.org/pandas-docs/stable/visualization.html
- display the data using a scattermatrix
```
df = pd.read_csv('../data/titanic-train.csv')
df.head()
from pandas.plotting import scatter_matrix
_ = scatter_matrix(df.drop('PassengerId', axis=1), figsize=(10, 10))
```
| github_jupyter |
# Merge
Combine data files into a CSV that's ready for analysis
```
import pandas as pd
```
Import data files
```
deaths_df = pd.read_csv(
"../input/processed/death-records.csv",
parse_dates=["date_of_death", "date_of_birth"],
dtype={
"last_name": str,
"first_name": str,
"middle_name": str,
"sex": str,
"place_of_birth_state_or_foreign_country": str,
"place_of_death_county": str,
"fathers_last_name": str,
"file_name": str,
"file_year": int,
"year_of_death": int,
"death_facility_name_location": str,
"place_of_death_address_street_number": str,
"place_of_death_address_street": str,
"place_of_death_city": str,
"father's_last_name": str,
"place_of_death_facility": str,
"place_of_death_facility_name_location": str,
"place_of_death_address_street_name": str,
"export_date": str,
"father_last_name": str
}
)
pops_df = pd.read_csv("../input/raw/dof/county-populations.csv")
recent_temps_df = pd.read_csv(
"../input/processed/recent-temps-by-county.csv",
parse_dates=["date"]
)
historic_temps_df = pd.read_csv(
"../input/processed/historic-temps-by-county.csv",
)
```
Aggregate
```
deaths_by_day = deaths_df.groupby([
"date_of_death",
"place_of_death_county"
]).size().rename("deaths").reset_index().rename(
columns={"date_of_death":"date", "place_of_death_county":"county"}
)
```
Backfill
```
backfilled_deaths_by_day = (
deaths_by_day.set_index(["county", "date"])
.unstack(["county"])
.stack("county", dropna=False)
.reset_index()
.sort_values(["county", "date"])
).fillna(0)
```
Annotate
```
backfilled_deaths_by_day['day_name'] = backfilled_deaths_by_day['date'].dt.day_name()
backfilled_deaths_by_day['is_weekend'] = backfilled_deaths_by_day.day_name.isin(
['Saturday', 'Sunday']
)
backfilled_deaths_by_day['day'] = backfilled_deaths_by_day['date'].dt.day
backfilled_deaths_by_day['month'] = backfilled_deaths_by_day['date'].dt.month
backfilled_deaths_by_day['year'] = backfilled_deaths_by_day['date'].dt.year
```
Merge pops to deaths
```
pops_by_year = pops_df.set_index("county").stack().reset_index().rename(columns={
"level_1": "year",
0: "population"
})
pops_by_year.year = pops_by_year.year.astype(int)
pops_by_year.county = pops_by_year.county.str.upper()
pops_merge = backfilled_deaths_by_day.merge(pops_by_year, on=["county", "year"], how="inner")
len(pops_merge)
assert len(pops_merge) == len(backfilled_deaths_by_day)
```
Merge temps
```
recent_temps_df.county = recent_temps_df.county.str.upper()
recent_temps_merge = pops_merge.merge(
recent_temps_df,
on=["county", "date"],
how="inner"
)
#assert len(recent_temps_merge) == len(pops_merge) == len(recent_temps_df)
recent_temps_merge.head()
historic_temps_df.county = historic_temps_df.county.str.upper()
historic_temps_merge = recent_temps_merge.merge(
historic_temps_df,
on=["county", "month", "day"],
how="inner"
).rename(columns={'tmax_95':'heat_event_threshold_f'})
assert (historic_temps_merge.month.unique() == [5, 6, 7, 8, 9, 10]).all()
historic_temps_merge['heat_event_threshold_f'] = historic_temps_merge['heat_event_threshold_f'].round()
```
Calculate
```
historic_temps_merge['is_heat_event'] = historic_temps_merge.tmax_f >= historic_temps_merge.heat_event_threshold_f
```
Export
```
historic_temps_merge.to_csv("../output/totals-by-day.csv", index=False)
```
| github_jupyter |
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
## Model Training Script for Synapse-AI-Retail-Recommender
Model Author (Data Scientist): Xiaoyong Zhu
This script is an adapted script of the full Model Training script that can be found in `4. ML Model Building`. This is a slimmed down version that only has the required operations for producing a model that the Model Deployment Process and the RecommendationRefresh notebook can consume.
```
import sys
print(sys.version)
# import libraries
import matplotlib.pyplot as plt
from datetime import datetime
from dateutil import parser
from pyspark.sql.functions import unix_timestamp
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark.ml import Pipeline
from pyspark.ml import PipelineModel
from pyspark.ml.feature import RFormula
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorIndexer
from pyspark.ml.classification import LogisticRegression
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
import azureml.core
from azureml.core import Workspace
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.run import Run
from azureml.core.experiment import Experiment
from azureml.core.model import Model
import os
import shutil
from shutil import rmtree
import json
import pprint
```
## Connect To Azure Machine Learning Workspace Using Service Principal
```
subscription_id = ''
workspace_name = ''
tenant_id = ''
service_principal_id = ''
service_principal_password = ''
# Service Principal Authentication
sp = ServicePrincipalAuthentication(tenant_id = tenant_id, # tenantID
service_principal_id = service_principal_id, # clientId
service_principal_password = service_principal_password) # clientSecret
# Connect to your Azure Machine Learning Workspace using the Service Principal
ws = Workspace.get(name = workspace_name,
auth = sp,
subscription_id = subscription_id)
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
```
## Data Ingestion
Read Spark table as a Spark dataframe
```
df = spark.read.table("retailaidb.cleaned_dataset")
spark.sparkContext.setCheckpointDir('checkpoint/')
# Filter only for Electronics items
df = df.withColumn('category_code_new', df['category_code'].substr(0, 11))
df = df.filter("category_code_new = 'electronics'")
top_category = df.groupBy('category_code_new').count().sort('count', ascending=False).limit(5) # only keep top 5 categories
top_category = top_category.withColumnRenamed("category_code_new","category_code_tmp")
item_to_save = df.groupBy('product_id', "category_code").count().sort('count', ascending=False)
item_to_save = item_to_save.join(top_category, top_category.category_code_tmp == item_to_save.category_code).limit(20)
raw_df = df
product_count = df.groupBy('product_id').count()
product_count = product_count.filter("count >= 30000").orderBy('count', ascending=False) # only counts when the product has 30000 views
raw_df = raw_df.withColumnRenamed("product_id","product_id_tmp")
raw_df = raw_df.join(product_count, raw_df.product_id_tmp == product_count.product_id)
user_count = df.groupBy('user_id').count()
user_count = user_count.filter("count >= 200").orderBy('count', ascending=False) # only counts when the user has 200 clicks
raw_df = raw_df.withColumnRenamed("user_id","user_id_tmp")
raw_df = raw_df.join(user_count, raw_df.user_id_tmp == user_count.user_id)
df = raw_df
df = df.where(df.event_type == "view")
df = df.drop("event_time","category_code","user_session","price","brand","category_id")
df = df.groupBy([df.product_id, df.user_id]).count()
# save table for further use
df.write.saveAsTable("retailaidb.cleaned_dataset_electronics", mode="overwrite")
df = df.withColumn("user_id", df["user_id"].cast(IntegerType()))
df = df.withColumn("product_id", df["product_id"].cast(IntegerType()))
#split the data into training and test datatset
train,test=df.randomSplit([0.75,0.25])
os.path.join(os.getcwd())
# define variables for experiment, model name, file path, seed value
experiment_name = 'retail_ai_experiment'
model_name = 'retailai_recommendation_model.pkl'
model_path = os.path.join(os.path.join(os.getcwd()), model_name)
random_seed_val = 12345
# start a training run by defining an experiment
experiment = Experiment(workspace = ws, name = experiment_name)
run = experiment.start_logging()
# create an ALS recommender
maxIter = 40
regParam = 0.20
rank = 25
rec = ALS(maxIter = maxIter,regParam = regParam, rank = rank, implicitPrefs = True, userCol = 'user_id', itemCol = 'product_id', \
ratingCol = 'count', nonnegative = True, coldStartStrategy = 'drop')
# fit the model on train set
rec_model = rec.fit(train)
# making predictions on test set
predicted_ratings = rec_model.transform(test)
# create Regressor evaluator object for measuring accuracy
evaluator = RegressionEvaluator(metricName = 'rmse', predictionCol = 'prediction', labelCol = 'count')
# apply the RE on predictions dataframe to calculate RMSE
rmse = evaluator.evaluate(predicted_ratings)
# log hyperparameters and evaluation metrics to Azure ML
run.log('maxIter', maxIter)
run.log('regParam', regParam)
run.log('rank', rank)
run.log('RMSE', rmse)
run.log_list('columns', train.columns)
# save model
rec_model.write().overwrite().save("retailai_recommendation_model")
# Declare run completed
run.complete()
run_id = run.id
print ("run id:", run.id)
predicted_ratings.printSchema()
# view current run in Azure ML
run
# query metrics tracked
pprint.pprint(run.get_metrics(recursive = True))
predicted_ratings_witherr = predicted_ratings.withColumn('err',abs(predicted_ratings["prediction"] - predicted_ratings["count"]))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/hatimnaitlho/ml-sklearn/blob/master/ExtraTreeClassifier_for_breast_cancer_diagnosis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Extra Tree Classifier for Breast Cancer Diagnosis
In this notebook we will build, assess and tune ExtraTreeClassifier with 100% recall, and up to 95% precision. The classifier will be trained and assessed based on the `Wisconsin Breast Cancer diagnosis dataset`.
This dataset is computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image.
### Why recall?
Imagine that we built a model that gives an accuracy_score of 98%. The score may seem good, but what if accuracy is not pertinent in our case.
So, let's dive on the meaning of accuracy to unterstand better the notion of this performance metric.
Accuracy will answer to the follwing question:
- how often is the classifier correct?
Thus, we can say that our model is correct 98% of time. So, for 1000 patients, the model is able to predict correctly 980 of the cases, which can be considered as a very good performance.
Now, let's imagine that the model fails in classifing 10 malignant tumors. The accuracy is still 98%, but 10 sick patients were sent home without treatment, which will represent a catastroph!
This is why, accuracy is not considered as the good performance metric in our context (prediction if a tumor is malignant or benign).
We need instead, `Precision` and `Recall` metrics which are more relevant.
- The precision is the ratio tp / (tp + fp) where tp is the number of true positives and fp the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative.
- Recall is The recall is the ratio tp / (tp + fn) where tp is the number of true positives and fn the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples.
`Recall is more important than precision in a breast cancer predictor`
```
# Common imports
import numpy as np
import pandas as pd
import matplotlib as pl
import matplotlib.pyplot as plt
import seaborn as sns
# to make this notebook's output reproducible across runs
np.random.seed(42)
```
### Loading the cancer dataset
```
from sklearn import datasets
cancer_dataset = datasets.load_breast_cancer()
```
# Exploring the cancer dataset
```
cancer_dataset.keys()
print(cancer_dataset.DESCR)
```
# Exploratory Data Analysis & Features Selection
Let's build the cancer breast dataframe based on `'data'` and `'feature_names'`, which is more convenient.
```
df = pd.DataFrame(data=cancer_dataset.data, columns=list(cancer_dataset.feature_names))
df['target']= cancer_dataset.target
df.head()
print('Data Set Characteristics:')
print('The Number of Instances is: {}'.format(df.shape[0]))
print('The Number of Attributes is: {}'.format(df.shape[1]-1))
# Fill empty and NaNs values with NaN
df = df.fillna(np.nan)
# Check for Null values
df.isnull().sum()
```
### Features selection
Let's first plot the Pearson correlation heatmap and see the correlation of independent variables with the output variable `target`.
```
# Correlation matrix
cor = df.corr()
plt.figure(figsize = (20,14))
g = sns.heatmap(df.corr(), annot=True, cmap = 'BuPu')
```
#### Relevant features
We will choose a treshold to filter features based on their correlation with the target.
```
#Correlation with the output variable
treshold= 0.3
cor_target = abs(cor['target'])
#Selecting moderate to highly correlated features
relevant_features = cor_target[cor_target>=treshold]
print('The number of highly relevant features is: {}'.format(relevant_features.shape[0]))
relevant_features
features_to_drop = cor_target[cor_target<treshold]
features_to_drop
```
Now let's drop the features considered as having low correlation with the target. `mean fractal dimension`, `texture error`, `smoothness error`, `compactness error`, `concavity error`, `symmetry error`, `fractal dimension error`
```
dfc= df.copy()
dfc.drop(columns=['mean fractal dimension', 'texture error', 'smoothness error', 'compactness error',
'concavity error', 'symmetry error', 'fractal dimension error'], inplace= True)
X= dfc.drop(columns=['target'])
y= dfc['target']
```
#### Spliting and scaling data
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
```
Let's normalize the model using `StandardScaler`
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
```
# Ensemble Methods
```
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
classifiers= [RandomForestClassifier(), AdaBoostClassifier(), BaggingClassifier(), ExtraTreesClassifier() ]
l1, l2, l3, l4 =([] for i in range(4))
metrics_dict = {}
for clf in classifiers:
clf.fit(X_train, y_train)
clf.predict(X_test)
name= clf.__class__.__name__
l1.append(name)
metrics_dict['model']= l1
l2.append(recall_score(y_test, clf.predict(X_test)))
metrics_dict['recall_score']= l2
l3.append(precision_score(y_test, clf.predict(X_test)))
metrics_dict['precision_score'] = l3
l4.append(accuracy_score(y_test, clf.predict(X_test)))
metrics_dict['accuracy_score'] = l4
df = pd.DataFrame(metrics_dict)
df
```
`ExtraTreesClassifier` performs well campared to the other classifiers. However, as we will see that the recall of `ExtraTreesClassifier` doesn't improve and stay at the same value, even when we improve the tuning of its hyperparameters.
Probably, we may have some features that have a significant impact and prevent the model from performing well.
In this case, we will have to reiterate to the step 'features selection' and focus on features that matters the most for the model.
# ExtraTreesClassifier
### Tuning ExtraTreeClassifier Parameters
```
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import recall_score, make_scorer
scorer= make_scorer(recall_score)
n_estimators = [15, 20, 30, 40, 50]
max_depth = [25, 30, 35, 40, 45]
min_samples_split = [2, 5, 10, 15]
min_samples_leaf = [2, 5, 10]
parameter_grid = {'n_estimators': n_estimators, 'max_depth': max_depth,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf}
clf_grid = GridSearchCV(estimator=ExtraTreesClassifier(),
param_grid=parameter_grid,
scoring= scorer,
cv=5,
refit=True,
error_score=0,
n_jobs=-1)
clf_grid.fit(X_train, y_train)
optimised_extratree = clf_grid.best_estimator_
print(clf_grid.best_params_)
y_predET= optimised_extratree.predict(X_test)
# Model Recall: what percentage of malignant tumors are labelled as such?
print("Recall:", recall_score(y_test, y_predET))
# Model Precision: what percentage of malignant tumors are labeled as such?
print("Precision:", precision_score(y_test, y_predET))
# Model Accuracy: how often is the classifier correct?
print("Accuracy:", accuracy_score(y_test, y_predET))
```
Tuning hyperparameters using GridSearchCV method improved the recall (recall_score=1), but the precision actually decreased. We can keep the model as such, but in realworld projects, we have to iterate until reaching the "optimal" model.
`Performing more advanced filtering features using the โfeature_importances_โ attribute of the Extra tree will be quite helpful and ultimately will boost the performance of the final model.`
## Filtering features using the `feature_importances_` attribute
```
optimised_extratree.feature_importances_
keys= list(X.columns)
values= list(optimised_extratree.feature_importances_)
new_dict = dict(zip(keys, values))
lf=[]
for (k,v) in new_dict.items():
if v>0.01:
lf.append(k)
print(lf)
X.head()
Xr=X.copy()
Xr= Xr[lf]
Xr.head()
```
## Spliting and normalizing the data
```
Xr_train, Xr_test, yr_train, yr_test= train_test_split(Xr, y, test_size=0.2,random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
Xr_train = scaler.fit_transform(Xr_train)
Xr_test = scaler.transform(Xr_test)
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import recall_score, make_scorer
scorer= make_scorer(recall_score)
n_estimators = [15, 20, 30, 40, 50]
max_depth = [25, 30, 35, 40, 45]
min_samples_split = [2, 5, 10, 15]
min_samples_leaf = [2, 5, 10]
parameter_grid = {'n_estimators': n_estimators, 'max_depth': max_depth,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf}
clf_grid = GridSearchCV(estimator=ExtraTreesClassifier(),
param_grid=parameter_grid,
scoring= scorer,
cv=5,
refit=True,
error_score=0,
n_jobs=-1)
clf_grid.fit(Xr_train, yr_train)
optimised_extratreeclf = clf_grid.best_estimator_
print(clf_grid.best_params_)
prediction=optimised_extratreeclf.predict(Xr_test)
# Model Recall: what percentage of malignant tumors are labelled as such?
print("Recall:", recall_score(yr_test, prediction))
# Model Precision: what percentage of malignant tumors are labeled as such?
print("Precision:", precision_score(yr_test, prediction))
print("Accuracy:", accuracy_score(yr_test, prediction))
```
Well, now we have improved our model with a recall equal to 1.0 by using advanced filtering techniques.
```
from sklearn.metrics import confusion_matrix
col= ['predicted "Malignant"', 'predicted "Benign"']
rws= ['Malignant', 'Benign']
mc= confusion_matrix(yr_test, prediction, labels=[1,0])
df_mc_ET=pd.DataFrame(data= mc, index=rws, columns=col)
df_mc_ET
```
| github_jupyter |
```
%pylab inline
import phreeqpython
import pandas as pd
pp = phreeqpython.PhreeqPython(database='phreeqc.dat')
```
## Oxygen
```
pressure_range = np.linspace(0.01, 100, 100)
o2 = []
for p in pressure_range:
sol = pp.add_solution({'temp':27})
gas = pp.add_gas({'O2(g)':p}, pressure=p, fixed_pressure=True)
sol.interact(gas)
o2.append(sol.total('O2', 'mol'))
sol.forget();gas.forget()
plt.figure(figsize=[8,5])
plt.plot(pressure_range, o2)
pd.read_csv('gas_data/O2_27.dat', sep='\t', index_col=0).plot(style='x', ax=plt.gca())
plt.title('Oxygen')
plt.xlabel('Pressure / atm')
plt.ylabel('O2 / (mol/kgw)')
plt.show()
```
## Nitrogen
```
pressure_range = np.linspace(0.01, 1000, 100)
n2 = []
for p in pressure_range:
sol = pp.add_solution({'temp':25})
gas = pp.add_gas({'N2(g)':p}, pressure=p, fixed_pressure=True)
sol.interact(gas)
n2.append(sol.total_element('N', 'mol')/2)
sol.forget();gas.forget()
plt.figure(figsize=[8,5])
plt.plot(pressure_range, n2)
pd.read_csv('gas_data/n2_25C.dat', sep='\t', index_col=0).plot(style='x', ax=plt.gca())
plt.title('Nitrogen')
plt.xlabel('Pressure / atm')
plt.ylabel('N2 / (mol/kgw)')
plt.show()
```
## Methane
```
plt.figure(figsize=[8,5])
colors = ['C0', 'C1', 'C2']
for temp in [25,50,100]:
pressure_range = np.linspace(0.01, 1000, 100)
ch4 = []
for p in pressure_range:
sol = pp.add_solution({'temp':temp})
gas = pp.add_gas({'CH4(g)':p}, pressure=p, fixed_pressure=True)
sol.interact(gas)
ch4.append(sol.total('CH4', 'mol'))
sol.forget();gas.forget()
plt.plot(pressure_range, ch4)
data = pd.read_csv('gas_data/ch4_{}c.dat'.format(temp), sep='\t', index_col=0)
plt.plot(data, 'x', color=colors.pop(0), label='{}C'.format(temp))
plt.title('Methane')
plt.xlabel('Pressure / atm')
plt.ylabel('CH4 / (mol/kgw)')
plt.legend()
plt.show()
```
## CO2 in 4M NaCl solution
```
# Use the Pitzer database for this calculation
pitzer = phreeqpython.PhreeqPython(database='pitzer.dat')
plt.figure(figsize=[8,5])
colors = ['C0', 'C1', 'C2']
index = 0
for temp in [80,120,160]:
pressure_range = np.linspace(0.01, 100, 100)
ch4 = []
for p in pressure_range:
sol = pitzer.add_solution({'temp':temp})
sol.add('NaCl', 4 , 'mol')
gas = pitzer.add_gas({'CO2(g)':p}, pressure=p, fixed_pressure=True)
sol.interact(gas)
ch4.append(sol.total_element('C', units='mol'))
sol.forget();gas.forget()
plt.plot(pressure_range*1.013, ch4)
data = pd.read_csv('gas_data/co2_4m_NaCl.dat', sep='\t', index_col=0)
plt.plot(data.iloc[:,index], 'x', color=colors.pop(0))
index+=1
plt.title('CO2')
plt.xlabel('Pressure / bar')
plt.ylabel('CO2 Solubility in 4 M NaCl / (mol/kgw)')
plt.legend()
plt.show()
```
| github_jupyter |
# Differences that modeling cause to the baseline model in i2b2 data
for reference, command that was run within scripts/ was ```CUDA_VISIBLE_DEVICES=<device_no> python main.py --<cross_validate/use_test> --dataset=i2b2 --preprocessing_type=<entity_blinding/punct_digit/punct_stop_digit> --border_size=-1 --num_epoches=150 --lr_values 0.001 0.0001 0.00001 --lr_boundaries 60 120```
This was gotten after preliminary hyperparameter tuning; and other options exist such as --use_elmo
```
# command for the old data - just classification
# for reference, command that was run within scripts/ was ```CUDA_VISIBLE_DEVICES=<device_no> python main.py --<cross_validate/use_test> --dataset=i2b2 --preprocessing_type=<entity_blinding/punct_digit/punct_stop_digit> --num_epoches=100 --lr_values 0.001 0.0001 --lr_boundaries 70```
# This was gotten after preliminary hyperparameter tuning
from scipy.stats import ttest_rel
def paired_ttest(score1, score2):
all_three_macroF1_score1 = [x for x in zip(*score1)]
all_three_macroF1_score2 = [x for x in zip(*score2)]
ttests = [ttest_rel(macro_f1_score1, macro_f1_score2)
for macro_f1_score1, macro_f1_score2 in zip(all_three_macroF1_score1, all_three_macroF1_score2)]
print('8 way evaluation: \t', ttests[0])
print('2 way evaluation: \t', ttests[1])
print('Problem-Treatment: \t', ttests[2])
print('Problem-Test: \t\t', ttests[3])
print('Problem-Problem: \t\t', ttests[4])
```
## First compare the cross validated score differences
```
# the commented out values are those for the old dataset
# baseline_test = (84.37, 68.76, 90.68, 90.6)
# # model ID 6198ab41-3183-40f3-9254-d86a2b26e4ed on gray - deleted (let's keep results in harrison)
# below is for the new model but with the border size of 50
# baseline_test = (62.83, 86.55, 50.1, 78.48, 47.64)
# model ID 7789e891-fb56-433f-9e4c-006d81a89802 on harrison
baseline_test = (59.75, 83.17, 52.42, 70.91, 54.75)
#for baseline model with ID b960aa6a-1ff1-4c76-897a-4b1d289f86eb
# (8way, 2way, Prob-Treat, Prob-Test, Prob-Prob)
# results on the cross validation reporting
baseline = [(68.75, 86.54, 62.35, 75.95, 68.24), (71.29, 87.1, 65.38, 78.26, 70.25),
(70.53, 87.05, 64.92, 77.36, 70.16), (69.66, 85.72, 64.75, 77.12, 66.44),
(70.26, 85.85, 64.99, 77.46, 68.4)]
# model ID cd087669-3124-4899-ae93-107abfaa13a6
# 70.10 +- 0.85 86.45 +- 0.58 64.48 +- 1.08 77.23 +- 0.75
# # # Still need to run this baseline
# # #baseline = currently running on harrison Feb 15, 2019
# # # temp baseline for now
# # # baseline = [(90.35, 84.26, 92.58, 92.86), (88.71, 77.25, 92.89, 93.27), (89.57, 81.2, 92.55, 93.16),
# # # (86.16, 75.21, 89.89, 91.82), (87.79, 78.66, 92.47, 89.47)]
# # baseline = [(89.65, 83.48, 91.88, 92.04), (88.47, 79.31, 91.69, 92.31), (90.52, 83.62, 92.59, 94.02),
# # (88.07, 78.79, 92.35, 90.35), (88.73, 81.67, 92.11, 90.52)]
# # # model ID de365f82-b85d-415a-acb5-c43d7e7f4040 on gray
# baseline = [(73.82, 88.97, 68.6, 83.79, 61.61), (73.7, 88.71, 63.07, 84.99, 65.5),
# (72.99, 88.88, 66.67, 81.54, 64.39), (72.01, 89.88, 57.96, 85.19, 64.79),
# (72.04, 88.15, 64.34, 83.54, 61.41)]
# # model ID 3244b20d-e82f-44f1-a459-46f66e132481 in models_to_keep data medg misc
elmo_model = [(72.08, 87.9, 65.25, 79.05, 73.17), (72.86, 87.93, 67.69, 78.3, 73.31),
(73.2, 88.03, 68.09, 79.65, 72.24), (71.19, 87.14, 63.98, 79.92, 69.93),
(73.34, 88.06, 66.54, 82.07, 71.43)]
# model ID d4bce62a-233c-4d6a-9ef4-2d088dea0a3b
# 72.53 +- 0.80 87.81 +- 0.34 66.31 +- 1.53 79.80 +- 1.27
# # below is with the PubMed model weights
# elmo_model = [(73.54, 89.67, 67.19, 83.25, 62.8), (76.66, 90.11, 70.09, 85.57, 68.1),
# (74.17, 90.16, 68.6, 83.55, 63.93), (74.85, 90.72, 66.67, 85.56, 64.68),
# (73.88, 88.41, 68.18, 84.65, 61.4)]
# # model ID 4c162539-5a8e-4c4b-bd91-e4bbf1e26dee
# # elmo_model = [(74.05, 89.41, 63.45, 85.94, 65.42), (72.51, 89.99, 63.57, 84.46, 61.61),
# # (74.97, 89.71, 69.42, 83.12, 66.96), (70.67, 87.77, 64.17, 81.65, 58.56),
# # (74.7, 90.83, 66.13, 84.97, 66.04)]
# # model ID a4ba512c-c0d2-4911-8eb5-1a236b4f2457
# # below is with the problematic folds
# # elmo_model = [(72.1, 89.16, 65.29, 82.14, 61.32), (51.91, 85.78, 42.93, 71.18, 0.0),
# # (49.7, 83.13, 44.59, 65.68, 0.0), (44.61, 84.64, 22.86, 64.25, 0.0),
# # (45.57, 84.01, 36.59, 60.35, 0.0)]
# # model ID 5a13415b-3f9c-4554-ad55-b150e64456ea -- need to delete
# # 52.78 +- 10.02 85.34 +- 2.09 42.45 +- 13.74 68.72 +- 7.56
# # Above indicates a problem with the way that the data has been split - because the std is too high
# # seed for splitting should be changed in this case.
piecewise_model = [(73.43, 89.22, 69.11, 80.08, 70.43), (74.36, 89.89, 71.91, 76.03, 75.86),
(75.37, 89.98, 73.56, 80.6, 70.27), (73.11, 89.05, 69.94, 79.0, 69.01),
(72.67, 88.3, 70.87, 79.74, 64.67)]
# model ID fb56fba5-e514-4d7c-aaa6-b39556755d4f
# 73.79 +- 0.97 89.29 +- 0.61 71.08 +- 1.55 79.09 +- 1.62
# piecewise_model = [(73.47, 89.54, 70.23, 80.0, 64.76), (76.0, 90.5, 67.47, 85.93, 67.86),
# (75.66, 89.97, 73.02, 83.38, 65.18),
# (74.41, 90.78, 66.4, 85.19, 64.81), (73.34, 89.11, 68.42, 83.92, 60.44)]
# # model ID 50f2975f-fb21-4805-b380-b305a1e04ca2
# #74.58 +- 1.09 89.98 +- 0.61 69.11 +- 2.33 83.68 +- 2.05
bert_CLS = [(65.83, 84.93, 58.67, 73.8, 66.22), (69.0, 86.03, 61.71, 76.11, 71.23),
(68.06, 85.43, 60.45, 76.96, 68.37), (66.97, 85.28, 59.53, 76.6, 65.54),
(66.98, 85.46, 60.19, 75.16, 66.24)]
# model ID 47bd09bf-af9e-4859-8942-b106d4731b04
# 67.37 +- 1.08 85.43 +- 0.36 60.11 +- 1.01 75.73 +- 1.14
bert_tokens = [(71.23, 87.51, 63.08, 79.57, 72.47), (72.91, 88.47, 65.78, 80.0, 74.23),
(73.24, 87.83, 67.68, 79.74, 73.14), (69.78, 86.21, 64.0, 77.54, 67.6),
(73.16, 87.81, 67.32, 80.78, 71.28)]
# model ID 061331e0-087c-46b0-b53e-7aab8ac87801
# 72.06 +- 1.36 87.57 +- 0.75 65.57 +- 1.80 79.53 +- 1.08
paired_ttest(baseline, piecewise_model)
paired_ttest(baseline, elmo_model)
paired_ttest(baseline, bert_CLS)
paired_ttest(baseline, bert_tokens)
paired_ttest(elmo_model, bert_tokens)
# elmo_model_general_big = [(75.32, 90.5, 72.43, 84.05, 62.2), (75.41, 90.31, 65.25, 85.71, 67.89),
# (74.58, 90.03, 67.5, 83.12, 66.99), (72.68, 90.52, 61.22, 84.8, 64.0),
# (73.52, 88.66, 69.02, 83.84, 60.44)]
# # model ID 750a3dd2-6719-43f5-ad01-12c234b4fda5
# # 74.30 +- 1.06 90.00 +- 0.69 67.08 +- 3.75 84.30 +- 0.88
```
### Additional Experiments for i2b2
Entity blinding (+ Elmo) (+Bert)
```
# this is on the evaluation fold
entity_blinding_elmo = [(76.12, 88.88, 72.73, 77.35, 79.73), (78.88, 90.03, 74.54, 83.51, 78.77),
(78.26, 89.54, 74.79, 82.86, 76.77), (76.25, 88.7, 74.55, 77.49, 77.18),
(78.99, 89.67, 75.68, 82.86, 78.35)]
# model ID a484fac5-02c9-4005-8210-7c0b824b1d34
# 77.70 +- 1.26 89.36 +- 0.50 74.46 +- 0.96 80.81 +- 2.78
# entity_blinding_elmo = [(76.16, 90.24, 75.95, 82.05, 65.74), (77.29, 89.86, 73.21, 85.71, 66.67),
# (79.58, 90.93, 76.19, 86.22, 71.17), (80.19, 91.49, 77.92, 85.57, 73.21),
# (77.21, 89.43, 75.32, 84.03, 66.67)]
# #model ID 4f446314-3da7-43fd-bc98-d1c0507098bd
# # 78.09 +- 1.53 90.39 +- 0.74 75.72 +- 1.52 84.72 +- 1.52
# # this is with PubMed elmo
entity_blinding_bert_tokens = [(76.05, 88.24, 71.98, 79.32, 77.55), (77.24, 89.11, 73.64, 82.23, 75.42),
(76.61, 88.66, 73.22, 80.34, 76.19), (75.34, 88.31, 72.03, 78.45, 76.03),
(78.38, 88.84, 75.68, 83.65, 74.51)]
# model ID 32e95086-c338-4660-9d36-03c707601021
# 76.72 +- 1.04 88.63 +- 0.33 73.31 +- 1.35 80.80 +- 1.90
# execution time 32 hours
```
Entity blind + Piecewise pool (+Elmo) (+Bert tokens)
```
entity_blinding_piecewise_pool_elmo = [(79.05, 90.68, 74.0, 83.54, 80.41), (79.01, 90.62, 73.94, 83.33, 80.68),
(79.11, 90.13, 75.92, 83.58, 77.29), (79.46, 89.63, 76.95, 83.9, 76.61),
(80.41, 90.8, 77.58, 84.75, 78.26)]
# model ID 6e655ec8-3ec9-4c14-adc6-982974aa2cbb
# 79.41 +- 0.53 90.37 +- 0.44 75.68 +- 1.49 83.82 +- 0.50
entity_blinding_piecewise_pool_bert_tokens = [(78.37, 90.54, 73.05, 83.19, 79.86), (80.31, 90.86, 76.49, 83.68, 81.36),
(79.47, 89.93, 77.89, 82.16, 77.74), (78.31, 89.54, 75.45, 82.91, 75.79),
(81.11, 90.85, 79.43, 86.13, 75.84)]
# model ID 7e084293-d2a7-4033-8fe4-164beee8ffdf
# 79.51 +- 1.09 90.34 +- 0.53 76.46 +- 2.17 83.61 +- 1.35
```
Entity blind + piecewise pool
```
# this is on the cross val report mode
entity_blinding_piecewise_pool = [(76.34, 89.41, 71.94, 79.83, 78.15), (79.1, 90.52, 75.7, 82.25, 79.73),
(78.64, 89.59, 75.45, 83.9, 75.68), (77.37, 89.29, 74.51, 81.09, 76.29),
(79.17, 89.87, 78.75, 82.2, 75.08)]
# model ID b9128322-cbcf-4d5c-944b-e4fc26db38c4
# 78.12 +- 1.10 89.74 +- 0.44 75.27 +- 2.19 81.85 +- 1.35
# entity_blinding_piecewise_pool = [(76.23, 90.24, 76.73, 81.41, 66.67), (78.66, 90.37, 77.12, 85.57, 68.12),
# (80.56, 91.18, 79.49, 85.43, 72.89), (78.87, 90.65, 79.31, 85.35, 66.96),
# (77.38, 89.68, 74.4, 85.29, 66.37)]
# #model ID 03b9fe97-5692-47de-95b4-11afe90114ad
# # 78.34 +- 1.46 90.42 +- 0.49 77.41 +- 1.87 84.61 +- 1.60
```
Piecewise pool (+Elmo) (+Bert)
```
piecewise_pool_elmo = [(75.06, 89.8, 69.65, 81.86, 73.5), (74.22, 90.23, 69.77, 77.57, 76.66),
(75.79, 90.32, 72.34, 81.29, 73.1), (73.85, 89.88, 69.57, 79.83, 71.89),
(74.9, 89.28, 71.35, 82.22, 69.63)]
# model ID 1e21fcb0-2fd5-4edf-b317-68634c759c19
# 74.76 +- 0.68 89.90 +- 0.37 70.54 +- 1.12 80.55 +- 1.70
# piecewise_pool_elmo = [(75.14, 90.37, 71.21, 82.53, 66.02), (77.23, 91.26, 70.68, 85.14, 70.32),
# (77.14, 90.86, 72.73, 83.8, 70.14), (77.27, 91.93, 71.26, 87.23, 66.67),
# (72.77, 88.54, 67.15, 84.71, 58.18)]
# # model ID 0b105264-9ef7-4266-a7e5-f53d1d7d1099
# # 75.91 +- 1.76 90.59 +- 1.15 70.61 +- 1.86 84.68 +- 1.56
piecewise_pool_bert_tokens = [(74.28, 89.71, 67.7, 84.26, 69.86), (74.04, 90.08, 69.11, 78.13, 76.22),
(76.06, 90.52, 72.87, 81.55, 72.98), (73.64, 88.61, 71.54, 79.15, 68.33),
(75.32, 89.13, 72.12, 82.02, 70.38)]
# model ID 19af6aae-16ae-4440-af06-47b120c29d2b
# 74.67 +- 0.89 89.61 +- 0.68 70.67 +- 1.95 81.02 +- 2.17
```
### Paired ttests
```
paired_ttest(elmo_model, entity_blinding_elmo)
paired_ttest(bert_tokens, entity_blinding_bert_tokens)
paired_ttest(entity_blinding_elmo, entity_blinding_bert_tokens)
paired_ttest(elmo_model, entity_blinding_piecewise_pool_elmo)
paired_ttest(bert_tokens, entity_blinding_piecewise_pool_bert_tokens)
paired_ttest(entity_blinding_piecewise_pool_elmo, entity_blinding_piecewise_pool_bert_tokens)
paired_ttest(piecewise_model, entity_blinding_piecewise_pool)
paired_ttest(elmo_model, piecewise_pool_elmo)
paired_ttest(bert_tokens, piecewise_pool_bert_tokens)
paired_ttest(piecewise_pool_elmo, piecewise_pool_bert_tokens)
```
piecewise pool model is better for i2b2
elmo model does not seem statistically significantly differnet than the baseline model, but the above is with a pickle splitting seed of 2 rather than 5 which is the default.
Test score results for the above are (all model IDs the shared NFS folder): (border size -1)
```(59.75, 83.17, 52.42, 70.91, 54.75)``` for baseline model with ID b960aa6a-1ff1-4c76-897a-4b1d289f86eb
```(60.85, 83.69, 52.34, 72.72, 57.08)``` for piecewise pool model with model ID c1a272c2-0268-4641-bb7d-be7e32d3b836
```(63.18, 84.54, 54.73, 74.89, 59.55)``` for elmo model with model ID 2ef144cd-0d7d-4b01-942f-7b65380f9490
***
BERT (from clinical data - Emily's training)
`(56.79, 81.91, 48.56, 69.52, 52.16)` for the baseline model with bert CLS simple bert appending (to the fixed size sentence rep) with model ID 1458f1db-0290-4d8e-97e7-d5c298cfb683
Another run (just to verify): `(56.36, 82.05, 47.46, 69.66, 52.22)` with model ID d67c42a6-9410-481f-ab37-17021261e32e
`(63.11, 84.91, 54.53, 75.62, 57.49)` for baseline model with bert token level addition with model ID b5576118-9d6e-4b0a-948b-782705826a55
```
# Test score results for the above are (all model IDs the shared NFS folder): (with border size 50)
# ```(62.83, 86.55, 50.1, 78.48, 47.64)``` for baseline model with ID 7789e891-fb56-433f-9e4c-006d81a89802
# ```(66.73, 88.08, 54.74, 81.24, 51.28)``` for elmo model with model ID 63f1e537-da50-495c-be8f-fabd209a058c
# ```(64.67, 87.07, 53.88, 79.52, 47.58)``` for piecewise pool model with model ID 15344c2c-1f2a-4420-9000-83c2be452129
```
### Additional experiments
`(70.46, 86.17, 61.92, 78.32, 71.67)` for the elmo model and entity blinding with ID 1df015ba-d906-42c0-b22a-1db930cfc9d6
`(70.62, 86.14, 60.95, 78.67, 73.94)` for the piecewise pool model and entity blinding with elmo and ID is d0b840dc-fcab-4144-9714-37e82f2b95ec
`(69.73, 85.44, 60.03, 77.19, 73.9)` for the entity blinding and piecewise pool model with ID b9bc6c62-5ca8-4aa5-98e8-61eb3536209c
`(63.19, 84.92, 54.13, 74.81, 61.66)` for the piecewise pool model and elmo with ID b6a9db36-b334-41b0-a103-ee01cde0f34c
`(70.56, 85.66, 61.68, 78.39, 72.34)` for the bert tokens model and entity blinding with ID fe40eb2f-52b5-45dd-94a2-16f84973effd
`(71.01, 86.26, 61.71, 79.1, 73.77)` for the bert tokens model with entity blinding and piecewise pooling with model ID ceffcfde-a039-4e5e-bae9-8176f3e99868
`(63.23, 85.45, 54.76, 75.03, 59.44)` for the bert tokens model with piecewise pooling with model ID 49c14cda-f3f3-4eb5-a77f-4860363cfbae
```
# with border size 50
# `(73.03, 88.79, 64.25, 84.19, 59.2)` for the elmo model and entity blinding with ID 63d9fda1-2931-4dec-b7e9-cfd56cae58e8
# `(73.38, 89.0, 64.75, 84.78, 58.5)` for the piecewise pool model and entity blinding with elmo and ID is eb55046d-7bdd-4fc7-9f0c-c40c9808e8a6
# `(72.75, 88.17, 65.95, 83.13, 58.59)` for the entity blinding and piecewise pool model with ID 7c46e59a-e335-44c5-90c3-ce4782ab2f66
# `(67.01, 88.05, 55.66, 81.75, 50.25)` for the piecewise pool model and elmo with ID 1e76f364-8509-4106-8280-6b862b920e70
# border size 50
# Elmo model with the embeddings of the large model returns a result of `(65.05, 87.62, 51.74, 80.6, 48.43)` with model ID 77cea5cb-ab0c-482d-b9f9-762b0eb1ee28
# ```(64.8, 87.02, 55.43, 78.23, 47.29)``` for elmo model with model ID fd25ca11-27fc-4b89-816e-22867aa586a6 for the old elmo model
```
| github_jupyter |
# ORF307 Precept 5
# Converting LPs
Convert the following LP into 2 forms
\begin{array}{ll} \mbox{min} & \|Ax - b\|_1 \\
\mbox{subject to} & \|x\|_{\infty} \leq k \\
\end{array}
form (1)
\begin{array}{ll} \mbox{min} & c^T x \\
\mbox{subject to} & Ax \leq b \\
& Cx = d \\
\end{array}
form (2)
\begin{array}{ll} \mbox{min} & c^T x \\
\mbox{subject to} & Ax = b \\
& x \geq 0 \\
\end{array}
# Steel company operations
A steel company must decide how to allocate next weekโs time on a rolling mill, which is a machine that takes unfinished slabs of steel as input and can
produce either of two semi-finished products: bands and coils. The millโs two products come off the rolling line at different rates: Bands $200 tons/h$ and Coils $140 tons/h$.
They also produce different profits: Bands $\$ 25/ton$ and Coils $ \$30/ton $.
Based on currently booked orders, the following upper bounds are placed on the amount of each product to produce: Bands $6000$ tons and Coils $4000$ tons.
Given that there are $40 h$ of production time available this week, the problem is to decide how many tons of bands and how many tons of coils should be produced to yield the greatest profit. Formulate this problem as a linear
programming problem. Can you solve this problem by inspection?
# The moment problem
Suppose that $Z$ is a random variable
taking values in the set $0,1, \dots , K$, with probabilities $p_O, p_l, \dots ,p_K$, respectively.
We are given the values of the first two moments $E[Z] = \sum_{k=1}^K k p_k$ and $E[Z] = \sum_{k=1}^K k^2 p_k$ of $Z$, and we would like to obtain upper and lower.bounds on the value of the fourth moment $E[Z] = \sum_{k=1}^K k^4 p_k$ of Z. Show how linear programming
can be used to approach this problem.
# Chebyshev center
Consider a set P described by linear
inequality constraints, that is, $P = \{x \in \mathbb{R}^n | a_i^T x \leq b_i \quad i = 1, \dots ,m\}$. A ball
with center $y$ and radius $r$ is defined as the set of all points within (Euclidean)
distance $r$ from $y$. We are interested in finding a ball with the largest possible
radius, which is entirely contained within the set $P$. (The center of such a ball is
called the Chebychev center of $P$.) Provide a linear programming formulation of
this problem.
```
import numpy as np
import cvxpy as cp
import matplotlib.pyplot as plt
A = np.array([[-1, 0],
[0, -1],
[4, 4],
[3, -1]])
norms = np.linalg.norm(A, axis=1)
b = np.array([0, 0, 20, 5])
xx = np.arange(-1, 5, .1)
y1 = 0*xx
y2 = np.arange(-5, 8, .1)
xx2 = 0*y2
y3 = (b[2] - A[2,0]*xx)/A[2,1]
y4 = (b[3] - A[3,0]*xx)/A[3,1]
plt.plot(xx, y1)
plt.plot(xx, y3)
plt.plot(xx, y4)
plt.plot(xx2, y2)
# setup the LP
r = cp.Variable(1)
xc = cp.Variable(2)
constraints = []
for i in range(4):
constraints.append(A[i,:] @ xc + norms[i] * r <= b[i])
prob = cp.Problem(cp.Maximize(r), constraints)
sol = prob.solve()
print('sol', sol)
print('xc.value', xc.value)
print('r.value', r.value)
center = xc.value
circle = plt.Circle((center[0], center[1]), r.value[0], fill=False)
fig = plt.gcf()
ax = fig.gca()
plt.scatter(center[0], center[1])
ax.add_patch(circle)
ax.axis('equal')
ax.set(xlim=(-1, 5), ylim=(-1, 5))
```
| github_jupyter |
**Tools - pandas**
*The `pandas` library provides high-performance, easy-to-use data structures and data analysis tools. The main data structure is the `DataFrame`, which you can think of as an in-memory 2D table (like a spreadsheet, with column names and row labels). Many features available in Excel are available programmatically, such as creating pivot tables, computing columns based on other columns, plotting graphs, etc. You can also group rows by column value, or join tables much like in SQL. Pandas is also great at handling time series.*
Prerequisites:
* NumPy โ if you are not familiar with NumPy, we recommend that you go through the [NumPy tutorial](tools_numpy.ipynb) now.
# Setup
First, let's make sure this notebook works well in both python 2 and 3:
```
from __future__ import division, print_function, unicode_literals
```
Now let's import `pandas`. People usually import it as `pd`:
```
import pandas as pd
```
# `Series` objects
The `pandas` library contains these useful data structures:
* `Series` objects, that we will discuss now. A `Series` object is 1D array, similar to a column in a spreadsheet (with a column name and row labels).
* `DataFrame` objects. This is a 2D table, similar to a spreadsheet (with column names and row labels).
* `Panel` objects. You can see a `Panel` as a dictionary of `DataFrame`s. These are less used, so we will not discuss them here.
## Creating a `Series`
Let's start by creating our first `Series` object!
```
s = pd.Series([2,-1,3,5])
s
```
## Similar to a 1D `ndarray`
`Series` objects behave much like one-dimensional NumPy `ndarray`s, and you can often pass them as parameters to NumPy functions:
```
import numpy as np
np.exp(s)
```
Arithmetic operations on `Series` are also possible, and they apply *elementwise*, just like for `ndarray`s:
```
s + [1000,2000,3000,4000]
```
Similar to NumPy, if you add a single number to a `Series`, that number is added to all items in the `Series`. This is called * broadcasting*:
```
s + 1000
```
The same is true for all binary operations such as `*` or `/`, and even conditional operations:
```
s < 0
```
## Index labels
Each item in a `Series` object has a unique identifier called the *index label*. By default, it is simply the rank of the item in the `Series` (starting at `0`) but you can also set the index labels manually:
```
s2 = pd.Series([68, 83, 112, 68], index=["alice", "bob", "charles", "darwin"])
s2
```
You can then use the `Series` just like a `dict`:
```
s2["bob"]
```
You can still access the items by integer location, like in a regular array:
```
s2[1]
```
To make it clear when you are accessing by label or by integer location, it is recommended to always use the `loc` attribute when accessing by label, and the `iloc` attribute when accessing by integer location:
```
s2.loc["bob"]
s2.iloc[1]
```
Slicing a `Series` also slices the index labels:
```
s2.iloc[1:3]
```
This can lead to unexpected results when using the default numeric labels, so be careful:
```
surprise = pd.Series([1000, 1001, 1002, 1003])
surprise
surprise_slice = surprise[2:]
surprise_slice
```
Oh look! The first element has index label `2`. The element with index label `0` is absent from the slice:
```
try:
surprise_slice[0]
except KeyError as e:
print("Key error:", e)
```
But remember that you can access elements by integer location using the `iloc` attribute. This illustrates another reason why it's always better to use `loc` and `iloc` to access `Series` objects:
```
surprise_slice.iloc[0]
```
## Init from `dict`
You can create a `Series` object from a `dict`. The keys will be used as index labels:
```
weights = {"alice": 68, "bob": 83, "colin": 86, "darwin": 68}
s3 = pd.Series(weights)
s3
```
You can control which elements you want to include in the `Series` and in what order by explicitly specifying the desired `index`:
```
s4 = pd.Series(weights, index = ["colin", "alice"])
s4
```
## Automatic alignment
When an operation involves multiple `Series` objects, `pandas` automatically aligns items by matching index labels.
```
print(s2.keys())
print(s3.keys())
s2 + s3
```
The resulting `Series` contains the union of index labels from `s2` and `s3`. Since `"colin"` is missing from `s2` and `"charles"` is missing from `s3`, these items have a `NaN` result value. (ie. Not-a-Number means *missing*).
Automatic alignment is very handy when working with data that may come from various sources with varying structure and missing items. But if you forget to set the right index labels, you can have surprising results:
```
s5 = pd.Series([1000,1000,1000,1000])
print("s2 =", s2.values)
print("s5 =", s5.values)
s2 + s5
```
Pandas could not align the `Series`, since their labels do not match at all, hence the full `NaN` result.
## Init with a scalar
You can also initialize a `Series` object using a scalar and a list of index labels: all items will be set to the scalar.
```
meaning = pd.Series(42, ["life", "universe", "everything"])
meaning
```
## `Series` name
A `Series` can have a `name`:
```
s6 = pd.Series([83, 68], index=["bob", "alice"], name="weights")
s6
```
## Plotting a `Series`
Pandas makes it easy to plot `Series` data using matplotlib (for more details on matplotlib, check out the [matplotlib tutorial](tools_matplotlib.ipynb)). Just import matplotlib and call the `plot()` method:
```
%matplotlib inline
import matplotlib.pyplot as plt
temperatures = [4.4,5.1,6.1,6.2,6.1,6.1,5.7,5.2,4.7,4.1,3.9,3.5]
s7 = pd.Series(temperatures, name="Temperature")
s7.plot()
plt.show()
```
There are *many* options for plotting your data. It is not necessary to list them all here: if you need a particular type of plot (histograms, pie charts, etc.), just look for it in the excellent [Visualization](http://pandas.pydata.org/pandas-docs/stable/visualization.html) section of pandas' documentation, and look at the example code.
# Handling time
Many datasets have timestamps, and pandas is awesome at manipulating such data:
* it can represent periods (such as 2016Q3) and frequencies (such as "monthly"),
* it can convert periods to actual timestamps, and *vice versa*,
* it can resample data and aggregate values any way you like,
* it can handle timezones.
## Time range
Let's start by creating a time series using `pd.date_range()`. This returns a `DatetimeIndex` containing one datetime per hour for 12 hours starting on October 29th 2016 at 5:30pm.
```
dates = pd.date_range('2016/10/29 5:30pm', periods=12, freq='H')
dates
```
This `DatetimeIndex` may be used as an index in a `Series`:
```
temp_series = pd.Series(temperatures, dates)
temp_series
```
Let's plot this series:
```
temp_series.plot(kind="bar")
plt.grid(True)
plt.show()
```
## Resampling
Pandas lets us resample a time series very simply. Just call the `resample()` method and specify a new frequency:
```
temp_series_freq_2H = temp_series.resample("2H")
temp_series_freq_2H
```
The resampling operation is actually a deferred operation, which is why we did not get a `Series` object, but a `DatetimeIndexResampler` object instead. To actually perform the resampling operation, we can simply call the `mean()` method: Pandas will compute the mean of every pair of consecutive hours:
```
temp_series_freq_2H = temp_series_freq_2H.mean()
```
Let's plot the result:
```
temp_series_freq_2H.plot(kind="bar")
plt.show()
```
Note how the values have automatically been aggregated into 2-hour periods. If we look at the 6-8pm period, for example, we had a value of `5.1` at 6:30pm, and `6.1` at 7:30pm. After resampling, we just have one value of `5.6`, which is the mean of `5.1` and `6.1`. Rather than computing the mean, we could have used any other aggregation function, for example we can decide to keep the minimum value of each period:
```
temp_series_freq_2H = temp_series.resample("2H").min()
temp_series_freq_2H
```
Or, equivalently, we could use the `apply()` method instead:
```
temp_series_freq_2H = temp_series.resample("2H").apply(np.min)
temp_series_freq_2H
```
## Upsampling and interpolation
This was an example of downsampling. We can also upsample (ie. increase the frequency), but this creates holes in our data:
```
temp_series_freq_15min = temp_series.resample("15Min").mean()
temp_series_freq_15min.head(n=10) # `head` displays the top n values
```
One solution is to fill the gaps by interpolating. We just call the `interpolate()` method. The default is to use linear interpolation, but we can also select another method, such as cubic interpolation:
```
temp_series_freq_15min = temp_series.resample("15Min").interpolate(method="cubic")
temp_series_freq_15min.head(n=10)
temp_series.plot(label="Period: 1 hour")
temp_series_freq_15min.plot(label="Period: 15 minutes")
plt.legend()
plt.show()
```
## Timezones
By default datetimes are *naive*: they are not aware of timezones, so 2016-10-30 02:30 might mean October 30th 2016 at 2:30am in Paris or in New York. We can make datetimes timezone *aware* by calling the `tz_localize()` method:
```
temp_series_ny = temp_series.tz_localize("America/New_York")
temp_series_ny
```
Note that `-04:00` is now appended to all the datetimes. This means that these datetimes refer to [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time) - 4 hours.
We can convert these datetimes to Paris time like this:
```
temp_series_paris = temp_series_ny.tz_convert("Europe/Paris")
temp_series_paris
```
You may have noticed that the UTC offset changes from `+02:00` to `+01:00`: this is because France switches to winter time at 3am that particular night (time goes back to 2am). Notice that 2:30am occurs twice! Let's go back to a naive representation (if you log some data hourly using local time, without storing the timezone, you might get something like this):
```
temp_series_paris_naive = temp_series_paris.tz_localize(None)
temp_series_paris_naive
```
Now `02:30` is really ambiguous. If we try to localize these naive datetimes to the Paris timezone, we get an error:
```
try:
temp_series_paris_naive.tz_localize("Europe/Paris")
except Exception as e:
print(type(e))
print(e)
```
Fortunately using the `ambiguous` argument we can tell pandas to infer the right DST (Daylight Saving Time) based on the order of the ambiguous timestamps:
```
temp_series_paris_naive.tz_localize("Europe/Paris", ambiguous="infer")
```
## Periods
The `pd.period_range()` function returns a `PeriodIndex` instead of a `DatetimeIndex`. For example, let's get all quarters in 2016 and 2017:
```
quarters = pd.period_range('2016Q1', periods=8, freq='Q')
quarters
```
Adding a number `N` to a `PeriodIndex` shifts the periods by `N` times the `PeriodIndex`'s frequency:
```
quarters + 3
```
The `asfreq()` method lets us change the frequency of the `PeriodIndex`. All periods are lengthened or shortened accordingly. For example, let's convert all the quarterly periods to monthly periods (zooming in):
```
quarters.asfreq("M")
```
By default, the `asfreq` zooms on the end of each period. We can tell it to zoom on the start of each period instead:
```
quarters.asfreq("M", how="start")
```
And we can zoom out:
```
quarters.asfreq("A")
```
Of course we can create a `Series` with a `PeriodIndex`:
```
quarterly_revenue = pd.Series([300, 320, 290, 390, 320, 360, 310, 410], index = quarters)
quarterly_revenue
quarterly_revenue.plot(kind="line")
plt.show()
```
We can convert periods to timestamps by calling `to_timestamp`. By default this will give us the first day of each period, but by setting `how` and `freq`, we can get the last hour of each period:
```
last_hours = quarterly_revenue.to_timestamp(how="end", freq="H")
last_hours
```
And back to periods by calling `to_period`:
```
last_hours.to_period()
```
Pandas also provides many other time-related functions that we recommend you check out in the [documentation](http://pandas.pydata.org/pandas-docs/stable/timeseries.html). To whet your appetite, here is one way to get the last business day of each month in 2016, at 9am:
```
months_2016 = pd.period_range("2016", periods=12, freq="M")
one_day_after_last_days = months_2016.asfreq("D") + 1
last_bdays = one_day_after_last_days.to_timestamp() - pd.tseries.offsets.BDay()
last_bdays.to_period("H") + 9
```
# `DataFrame` objects
A DataFrame object represents a spreadsheet, with cell values, column names and row index labels. You can define expressions to compute columns based on other columns, create pivot-tables, group rows, draw graphs, etc. You can see `DataFrame`s as dictionaries of `Series`.
## Creating a `DataFrame`
You can create a DataFrame by passing a dictionary of `Series` objects:
```
people_dict = {
"weight": pd.Series([68, 83, 112], index=["alice", "bob", "charles"]),
"birthyear": pd.Series([1984, 1985, 1992], index=["bob", "alice", "charles"], name="year"),
"children": pd.Series([0, 3], index=["charles", "bob"]),
"hobby": pd.Series(["Biking", "Dancing"], index=["alice", "bob"]),
}
people = pd.DataFrame(people_dict)
people
```
A few things to note:
* the `Series` were automatically aligned based on their index,
* missing values are represented as `NaN`,
* `Series` names are ignored (the name `"year"` was dropped),
* `DataFrame`s are displayed nicely in Jupyter notebooks, woohoo!
You can access columns pretty much as you would expect. They are returned as `Series` objects:
```
people["birthyear"]
```
You can also get multiple columns at once:
```
people[["birthyear", "hobby"]]
```
If you pass a list of columns and/or index row labels to the `DataFrame` constructor, it will guarantee that these columns and/or rows will exist, in that order, and no other column/row will exist. For example:
```
d2 = pd.DataFrame(
people_dict,
columns=["birthyear", "weight", "height"],
index=["bob", "alice", "eugene"]
)
d2
```
Another convenient way to create a `DataFrame` is to pass all the values to the constructor as an `ndarray`, or a list of lists, and specify the column names and row index labels separately:
```
values = [
[1985, np.nan, "Biking", 68],
[1984, 3, "Dancing", 83],
[1992, 0, np.nan, 112]
]
d3 = pd.DataFrame(
values,
columns=["birthyear", "children", "hobby", "weight"],
index=["alice", "bob", "charles"]
)
d3
```
To specify missing values, you can either use `np.nan` or NumPy's masked arrays:
```
masked_array = np.ma.asarray(values, dtype=np.object)
masked_array[(0, 2), (1, 2)] = np.ma.masked
d3 = pd.DataFrame(
masked_array,
columns=["birthyear", "children", "hobby", "weight"],
index=["alice", "bob", "charles"]
)
d3
```
Instead of an `ndarray`, you can also pass a `DataFrame` object:
```
d4 = pd.DataFrame(
d3,
columns=["hobby", "children"],
index=["alice", "bob"]
)
d4
```
It is also possible to create a `DataFrame` with a dictionary (or list) of dictionaries (or list):
```
people = pd.DataFrame({
"birthyear": {"alice":1985, "bob": 1984, "charles": 1992},
"hobby": {"alice":"Biking", "bob": "Dancing"},
"weight": {"alice":68, "bob": 83, "charles": 112},
"children": {"bob": 3, "charles": 0}
})
people
```
## Multi-indexing
If all columns are tuples of the same size, then they are understood as a multi-index. The same goes for row index labels. For example:
```
d5 = pd.DataFrame(
{
("public", "birthyear"):
{("Paris","alice"):1985, ("Paris","bob"): 1984, ("London","charles"): 1992},
("public", "hobby"):
{("Paris","alice"):"Biking", ("Paris","bob"): "Dancing"},
("private", "weight"):
{("Paris","alice"):68, ("Paris","bob"): 83, ("London","charles"): 112},
("private", "children"):
{("Paris", "alice"):np.nan, ("Paris","bob"): 3, ("London","charles"): 0}
}
)
d5
```
You can now get a `DataFrame` containing all the `"public"` columns very simply:
```
d5["public"]
d5["public", "hobby"] # Same result as d5["public"]["hobby"]
```
## Dropping a level
Let's look at `d5` again:
```
d5
```
There are two levels of columns, and two levels of indices. We can drop a column level by calling `droplevel()` (the same goes for indices):
```
d5.columns = d5.columns.droplevel(level = 0)
d5
```
## Transposing
You can swap columns and indices using the `T` attribute:
```
d6 = d5.T
d6
```
## Stacking and unstacking levels
Calling the `stack()` method will push the lowest column level after the lowest index:
```
d7 = d6.stack()
d7
```
Note that many `NaN` values appeared. This makes sense because many new combinations did not exist before (eg. there was no `bob` in `London`).
Calling `unstack()` will do the reverse, once again creating many `NaN` values.
```
d8 = d7.unstack()
d8
```
If we call `unstack` again, we end up with a `Series` object:
```
d9 = d8.unstack()
d9
```
The `stack()` and `unstack()` methods let you select the `level` to stack/unstack. You can even stack/unstack multiple levels at once:
```
d10 = d9.unstack(level = (0,1))
d10
```
## Most methods return modified copies
As you may have noticed, the `stack()` and `unstack()` methods do not modify the object they apply to. Instead, they work on a copy and return that copy. This is true of most methods in pandas.
## Accessing rows
Let's go back to the `people` `DataFrame`:
```
people
```
The `loc` attribute lets you access rows instead of columns. The result is a `Series` object in which the `DataFrame`'s column names are mapped to row index labels:
```
people.loc["charles"]
```
You can also access rows by integer location using the `iloc` attribute:
```
people.iloc[2]
```
You can also get a slice of rows, and this returns a `DataFrame` object:
```
people.iloc[1:3]
```
Finally, you can pass a boolean array to get the matching rows:
```
people[np.array([True, False, True])]
```
This is most useful when combined with boolean expressions:
```
people[people["birthyear"] < 1990]
```
## Adding and removing columns
You can generally treat `DataFrame` objects like dictionaries of `Series`, so the following work fine:
```
people
people["age"] = 2018 - people["birthyear"] # adds a new column "age"
people["over 30"] = people["age"] > 30 # adds another column "over 30"
birthyears = people.pop("birthyear")
del people["children"]
people
birthyears
```
When you add a new colum, it must have the same number of rows. Missing rows are filled with NaN, and extra rows are ignored:
```
people["pets"] = pd.Series({"bob": 0, "charles": 5, "eugene":1}) # alice is missing, eugene is ignored
people
```
When adding a new column, it is added at the end (on the right) by default. You can also insert a column anywhere else using the `insert()` method:
```
people.insert(1, "height", [172, 181, 185])
people
```
## Assigning new columns
You can also create new columns by calling the `assign()` method. Note that this returns a new `DataFrame` object, the original is not modified:
```
people.assign(
body_mass_index = people["weight"] / (people["height"] / 100) ** 2,
has_pets = people["pets"] > 0
)
```
Note that you cannot access columns created within the same assignment:
```
try:
people.assign(
body_mass_index = people["weight"] / (people["height"] / 100) ** 2,
overweight = people["body_mass_index"] > 25
)
except KeyError as e:
print("Key error:", e)
```
The solution is to split this assignment in two consecutive assignments:
```
d6 = people.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2)
d6.assign(overweight = d6["body_mass_index"] > 25)
```
Having to create a temporary variable `d6` is not very convenient. You may want to just chain the assigment calls, but it does not work because the `people` object is not actually modified by the first assignment:
```
try:
(people
.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2)
.assign(overweight = people["body_mass_index"] > 25)
)
except KeyError as e:
print("Key error:", e)
```
But fear not, there is a simple solution. You can pass a function to the `assign()` method (typically a `lambda` function), and this function will be called with the `DataFrame` as a parameter:
```
(people
.assign(body_mass_index = lambda df: df["weight"] / (df["height"] / 100) ** 2)
.assign(overweight = lambda df: df["body_mass_index"] > 25)
)
```
Problem solved!
## Evaluating an expression
A great feature supported by pandas is expression evaluation. This relies on the `numexpr` library which must be installed.
```
people.eval("weight / (height/100) ** 2 > 25")
```
Assignment expressions are also supported. Let's set `inplace=True` to directly modify the `DataFrame` rather than getting a modified copy:
```
people.eval("body_mass_index = weight / (height/100) ** 2", inplace=True)
people
```
You can use a local or global variable in an expression by prefixing it with `'@'`:
```
overweight_threshold = 30
people.eval("overweight = body_mass_index > @overweight_threshold", inplace=True)
people
```
## Querying a `DataFrame`
The `query()` method lets you filter a `DataFrame` based on a query expression:
```
people.query("age > 30 and pets == 0")
```
## Sorting a `DataFrame`
You can sort a `DataFrame` by calling its `sort_index` method. By default it sorts the rows by their index label, in ascending order, but let's reverse the order:
```
people.sort_index(ascending=False)
```
Note that `sort_index` returned a sorted *copy* of the `DataFrame`. To modify `people` directly, we can set the `inplace` argument to `True`. Also, we can sort the columns instead of the rows by setting `axis=1`:
```
people.sort_index(axis=1, inplace=True)
people
```
To sort the `DataFrame` by the values instead of the labels, we can use `sort_values` and specify the column to sort by:
```
people.sort_values(by="age", inplace=True)
people
```
## Plotting a `DataFrame`
Just like for `Series`, pandas makes it easy to draw nice graphs based on a `DataFrame`.
For example, it is trivial to create a line plot from a `DataFrame`'s data by calling its `plot` method:
```
people.plot(kind = "line", x = "body_mass_index", y = ["height", "weight"])
plt.show()
```
You can pass extra arguments supported by matplotlib's functions. For example, we can create scatterplot and pass it a list of sizes using the `s` argument of matplotlib's `scatter()` function:
```
people.plot(kind = "scatter", x = "height", y = "weight", s=[40, 120, 200])
plt.show()
```
Again, there are way too many options to list here: the best option is to scroll through the [Visualization](http://pandas.pydata.org/pandas-docs/stable/visualization.html) page in pandas' documentation, find the plot you are interested in and look at the example code.
## Operations on `DataFrame`s
Although `DataFrame`s do not try to mimick NumPy arrays, there are a few similarities. Let's create a `DataFrame` to demonstrate this:
```
grades_array = np.array([[8,8,9],[10,9,9],[4, 8, 2], [9, 10, 10]])
grades = pd.DataFrame(grades_array, columns=["sep", "oct", "nov"], index=["alice","bob","charles","darwin"])
grades
```
You can apply NumPy mathematical functions on a `DataFrame`: the function is applied to all values:
```
np.sqrt(grades)
```
Similarly, adding a single value to a `DataFrame` will add that value to all elements in the `DataFrame`. This is called *broadcasting*:
```
grades + 1
```
Of course, the same is true for all other binary operations, including arithmetic (`*`,`/`,`**`...) and conditional (`>`, `==`...) operations:
```
grades >= 5
```
Aggregation operations, such as computing the `max`, the `sum` or the `mean` of a `DataFrame`, apply to each column, and you get back a `Series` object:
```
grades.mean()
```
The `all` method is also an aggregation operation: it checks whether all values are `True` or not. Let's see during which months all students got a grade greater than `5`:
```
(grades > 5).all()
```
Most of these functions take an optional `axis` parameter which lets you specify along which axis of the `DataFrame` you want the operation executed. The default is `axis=0`, meaning that the operation is executed vertically (on each column). You can set `axis=1` to execute the operation horizontally (on each row). For example, let's find out which students had all grades greater than `5`:
```
(grades > 5).all(axis = 1)
```
The `any` method returns `True` if any value is True. Let's see who got at least one grade 10:
```
(grades == 10).any(axis = 1)
```
If you add a `Series` object to a `DataFrame` (or execute any other binary operation), pandas attempts to broadcast the operation to all *rows* in the `DataFrame`. This only works if the `Series` has the same size as the `DataFrame`s rows. For example, let's substract the `mean` of the `DataFrame` (a `Series` object) from the `DataFrame`:
```
grades - grades.mean() # equivalent to: grades - [7.75, 8.75, 7.50]
```
We substracted `7.75` from all September grades, `8.75` from October grades and `7.50` from November grades. It is equivalent to substracting this `DataFrame`:
```
pd.DataFrame([[7.75, 8.75, 7.50]]*4, index=grades.index, columns=grades.columns)
```
If you want to substract the global mean from every grade, here is one way to do it:
```
grades - grades.values.mean() # substracts the global mean (8.00) from all grades
```
## Automatic alignment
Similar to `Series`, when operating on multiple `DataFrame`s, pandas automatically aligns them by row index label, but also by column names. Let's create a `DataFrame` with bonus points for each person from October to December:
```
bonus_array = np.array([[0,np.nan,2],[np.nan,1,0],[0, 1, 0], [3, 3, 0]])
bonus_points = pd.DataFrame(bonus_array, columns=["oct", "nov", "dec"], index=["bob","colin", "darwin", "charles"])
bonus_points
grades + bonus_points
```
Looks like the addition worked in some cases but way too many elements are now empty. That's because when aligning the `DataFrame`s, some columns and rows were only present on one side, and thus they were considered missing on the other side (`NaN`). Then adding `NaN` to a number results in `NaN`, hence the result.
## Handling missing data
Dealing with missing data is a frequent task when working with real life data. Pandas offers a few tools to handle missing data.
Let's try to fix the problem above. For example, we can decide that missing data should result in a zero, instead of `NaN`. We can replace all `NaN` values by a any value using the `fillna()` method:
```
(grades + bonus_points).fillna(0)
```
It's a bit unfair that we're setting grades to zero in September, though. Perhaps we should decide that missing grades are missing grades, but missing bonus points should be replaced by zeros:
```
fixed_bonus_points = bonus_points.fillna(0)
fixed_bonus_points.insert(0, "sep", 0)
fixed_bonus_points.loc["alice"] = 0
grades + fixed_bonus_points
```
That's much better: although we made up some data, we have not been too unfair.
Another way to handle missing data is to interpolate. Let's look at the `bonus_points` `DataFrame` again:
```
bonus_points
```
Now let's call the `interpolate` method. By default, it interpolates vertically (`axis=0`), so let's tell it to interpolate horizontally (`axis=1`).
```
bonus_points.interpolate(axis=1)
```
Bob had 0 bonus points in October, and 2 in December. When we interpolate for November, we get the mean: 1 bonus point. Colin had 1 bonus point in November, but we do not know how many bonus points he had in September, so we cannot interpolate, this is why there is still a missing value in October after interpolation. To fix this, we can set the September bonus points to 0 before interpolation.
```
better_bonus_points = bonus_points.copy()
better_bonus_points.insert(0, "sep", 0)
better_bonus_points.loc["alice"] = 0
better_bonus_points = better_bonus_points.interpolate(axis=1)
better_bonus_points
```
Great, now we have reasonable bonus points everywhere. Let's find out the final grades:
```
grades + better_bonus_points
```
It is slightly annoying that the September column ends up on the right. This is because the `DataFrame`s we are adding do not have the exact same columns (the `grades` `DataFrame` is missing the `"dec"` column), so to make things predictable, pandas orders the final columns alphabetically. To fix this, we can simply add the missing column before adding:
```
grades["dec"] = np.nan
final_grades = grades + better_bonus_points
final_grades
```
There's not much we can do about December and Colin: it's bad enough that we are making up bonus points, but we can't reasonably make up grades (well I guess some teachers probably do). So let's call the `dropna()` method to get rid of rows that are full of `NaN`s:
```
final_grades_clean = final_grades.dropna(how="all")
final_grades_clean
```
Now let's remove columns that are full of `NaN`s by setting the `axis` argument to `1`:
```
final_grades_clean = final_grades_clean.dropna(axis=1, how="all")
final_grades_clean
```
## Aggregating with `groupby`
Similar to the SQL language, pandas allows grouping your data into groups to run calculations over each group.
First, let's add some extra data about each person so we can group them, and let's go back to the `final_grades` `DataFrame` so we can see how `NaN` values are handled:
```
final_grades["hobby"] = ["Biking", "Dancing", np.nan, "Dancing", "Biking"]
final_grades
```
Now let's group data in this `DataFrame` by hobby:
```
grouped_grades = final_grades.groupby("hobby")
grouped_grades
```
We are ready to compute the average grade per hobby:
```
grouped_grades.mean()
```
That was easy! Note that the `NaN` values have simply been skipped when computing the means.
## Pivot tables
Pandas supports spreadsheet-like [pivot tables](https://en.wikipedia.org/wiki/Pivot_table) that allow quick data summarization. To illustrate this, let's create a simple `DataFrame`:
```
bonus_points
more_grades = final_grades_clean.stack().reset_index()
more_grades.columns = ["name", "month", "grade"]
more_grades["bonus"] = [np.nan, np.nan, np.nan, 0, np.nan, 2, 3, 3, 0, 0, 1, 0]
more_grades
```
Now we can call the `pd.pivot_table()` function for this `DataFrame`, asking to group by the `name` column. By default, `pivot_table()` computes the mean of each numeric column:
```
pd.pivot_table(more_grades, index="name")
```
We can change the aggregation function by setting the `aggfunc` argument, and we can also specify the list of columns whose values will be aggregated:
```
pd.pivot_table(more_grades, index="name", values=["grade","bonus"], aggfunc=np.max)
```
We can also specify the `columns` to aggregate over horizontally, and request the grand totals for each row and column by setting `margins=True`:
```
pd.pivot_table(more_grades, index="name", values="grade", columns="month", margins=True)
```
Finally, we can specify multiple index or column names, and pandas will create multi-level indices:
```
pd.pivot_table(more_grades, index=("name", "month"), margins=True)
```
## Overview functions
When dealing with large `DataFrames`, it is useful to get a quick overview of its content. Pandas offers a few functions for this. First, let's create a large `DataFrame` with a mix of numeric values, missing values and text values. Notice how Jupyter displays only the corners of the `DataFrame`:
```
much_data = np.fromfunction(lambda x,y: (x+y*y)%17*11, (10000, 26))
large_df = pd.DataFrame(much_data, columns=list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
large_df[large_df % 16 == 0] = np.nan
large_df.insert(3,"some_text", "Blabla")
large_df
```
The `head()` method returns the top 5 rows:
```
large_df.head()
```
Of course there's also a `tail()` function to view the bottom 5 rows. You can pass the number of rows you want:
```
large_df.tail(n=2)
```
The `info()` method prints out a summary of each columns contents:
```
large_df.info()
```
Finally, the `describe()` method gives a nice overview of the main aggregated values over each column:
* `count`: number of non-null (not NaN) values
* `mean`: mean of non-null values
* `std`: [standard deviation](https://en.wikipedia.org/wiki/Standard_deviation) of non-null values
* `min`: minimum of non-null values
* `25%`, `50%`, `75%`: 25th, 50th and 75th [percentile](https://en.wikipedia.org/wiki/Percentile) of non-null values
* `max`: maximum of non-null values
```
large_df.describe()
```
# Saving & loading
Pandas can save `DataFrame`s to various backends, including file formats such as CSV, Excel, JSON, HTML and HDF5, or to a SQL database. Let's create a `DataFrame` to demonstrate this:
```
my_df = pd.DataFrame(
[["Biking", 68.5, 1985, np.nan], ["Dancing", 83.1, 1984, 3]],
columns=["hobby","weight","birthyear","children"],
index=["alice", "bob"]
)
my_df
```
## Saving
Let's save it to CSV, HTML and JSON:
```
my_df.to_csv("my_df.csv")
my_df.to_html("my_df.html")
my_df.to_json("my_df.json")
```
Done! Let's take a peek at what was saved:
```
for filename in ("my_df.csv", "my_df.html", "my_df.json"):
print("#", filename)
with open(filename, "rt") as f:
print(f.read())
print()
```
Note that the index is saved as the first column (with no name) in a CSV file, as `<th>` tags in HTML and as keys in JSON.
Saving to other formats works very similarly, but some formats require extra libraries to be installed. For example, saving to Excel requires the openpyxl library:
```
try:
my_df.to_excel("my_df.xlsx", sheet_name='People')
except ImportError as e:
print(e)
```
## Loading
Now let's load our CSV file back into a `DataFrame`:
```
my_df_loaded = pd.read_csv("my_df.csv", index_col=0)
my_df_loaded
```
As you might guess, there are similar `read_json`, `read_html`, `read_excel` functions as well. We can also read data straight from the Internet. For example, let's load all U.S. cities from [simplemaps.com](http://simplemaps.com/):
```
us_cities = None
try:
csv_url = "http://simplemaps.com/files/cities.csv"
us_cities = pd.read_csv(csv_url, index_col=0)
us_cities = us_cities.head()
except IOError as e:
print(e)
us_cities
```
There are more options available, in particular regarding datetime format. Check out the [documentation](http://pandas.pydata.org/pandas-docs/stable/io.html) for more details.
# Combining `DataFrame`s
## SQL-like joins
One powerful feature of pandas is it's ability to perform SQL-like joins on `DataFrame`s. Various types of joins are supported: inner joins, left/right outer joins and full joins. To illustrate this, let's start by creating a couple simple `DataFrame`s:
```
city_loc = pd.DataFrame(
[
["CA", "San Francisco", 37.781334, -122.416728],
["NY", "New York", 40.705649, -74.008344],
["FL", "Miami", 25.791100, -80.320733],
["OH", "Cleveland", 41.473508, -81.739791],
["UT", "Salt Lake City", 40.755851, -111.896657]
], columns=["state", "city", "lat", "lng"])
city_loc
city_pop = pd.DataFrame(
[
[808976, "San Francisco", "California"],
[8363710, "New York", "New-York"],
[413201, "Miami", "Florida"],
[2242193, "Houston", "Texas"]
], index=[3,4,5,6], columns=["population", "city", "state"])
city_pop
```
Now let's join these `DataFrame`s using the `merge()` function:
```
pd.merge(left=city_loc, right=city_pop, on="city")
```
Note that both `DataFrame`s have a column named `state`, so in the result they got renamed to `state_x` and `state_y`.
Also, note that Cleveland, Salt Lake City and Houston were dropped because they don't exist in *both* `DataFrame`s. This is the equivalent of a SQL `INNER JOIN`. If you want a `FULL OUTER JOIN`, where no city gets dropped and `NaN` values are added, you must specify `how="outer"`:
```
all_cities = pd.merge(left=city_loc, right=city_pop, on="city", how="outer")
all_cities
```
Of course `LEFT OUTER JOIN` is also available by setting `how="left"`: only the cities present in the left `DataFrame` end up in the result. Similarly, with `how="right"` only cities in the right `DataFrame` appear in the result. For example:
```
pd.merge(left=city_loc, right=city_pop, on="city", how="right")
```
If the key to join on is actually in one (or both) `DataFrame`'s index, you must use `left_index=True` and/or `right_index=True`. If the key column names differ, you must use `left_on` and `right_on`. For example:
```
city_pop2 = city_pop.copy()
city_pop2.columns = ["population", "name", "state"]
pd.merge(left=city_loc, right=city_pop2, left_on="city", right_on="name")
```
## Concatenation
Rather than joining `DataFrame`s, we may just want to concatenate them. That's what `concat()` is for:
```
result_concat = pd.concat([city_loc, city_pop])
result_concat
```
Note that this operation aligned the data horizontally (by columns) but not vertically (by rows). In this example, we end up with multiple rows having the same index (eg. 3). Pandas handles this rather gracefully:
```
result_concat.loc[3]
```
Or you can tell pandas to just ignore the index:
```
pd.concat([city_loc, city_pop], ignore_index=True)
```
Notice that when a column does not exist in a `DataFrame`, it acts as if it was filled with `NaN` values. If we set `join="inner"`, then only columns that exist in *both* `DataFrame`s are returned:
```
pd.concat([city_loc, city_pop], join="inner")
```
You can concatenate `DataFrame`s horizontally instead of vertically by setting `axis=1`:
```
pd.concat([city_loc, city_pop], axis=1)
```
In this case it really does not make much sense because the indices do not align well (eg. Cleveland and San Francisco end up on the same row, because they shared the index label `3`). So let's reindex the `DataFrame`s by city name before concatenating:
```
pd.concat([city_loc.set_index("city"), city_pop.set_index("city")], axis=1)
```
This looks a lot like a `FULL OUTER JOIN`, except that the `state` columns were not renamed to `state_x` and `state_y`, and the `city` column is now the index.
The `append()` method is a useful shorthand for concatenating `DataFrame`s vertically:
```
city_loc.append(city_pop)
```
As always in pandas, the `append()` method does *not* actually modify `city_loc`: it works on a copy and returns the modified copy.
# Categories
It is quite frequent to have values that represent categories, for example `1` for female and `2` for male, or `"A"` for Good, `"B"` for Average, `"C"` for Bad. These categorical values can be hard to read and cumbersome to handle, but fortunately pandas makes it easy. To illustrate this, let's take the `city_pop` `DataFrame` we created earlier, and add a column that represents a category:
```
city_eco = city_pop.copy()
city_eco["eco_code"] = [17, 17, 34, 20]
city_eco
```
Right now the `eco_code` column is full of apparently meaningless codes. Let's fix that. First, we will create a new categorical column based on the `eco_code`s:
```
city_eco["economy"] = city_eco["eco_code"].astype('category')
city_eco["economy"].cat.categories
```
Now we can give each category a meaningful name:
```
city_eco["economy"].cat.categories = ["Finance", "Energy", "Tourism"]
city_eco
```
Note that categorical values are sorted according to their categorical order, *not* their alphabetical order:
```
city_eco.sort_values(by="economy", ascending=False)
```
# What next?
As you probably noticed by now, pandas is quite a large library with *many* features. Although we went through the most important features, there is still a lot to discover. Probably the best way to learn more is to get your hands dirty with some real-life data. It is also a good idea to go through pandas' excellent [documentation](http://pandas.pydata.org/pandas-docs/stable/index.html), in particular the [Cookbook](http://pandas.pydata.org/pandas-docs/stable/cookbook.html).
| github_jupyter |
# Bayesian Temporal Matrix Factorization
**Published**: October 8, 2019
**Author**: Xinyu Chen [[**GitHub homepage**](https://github.com/xinychen)]
**Download**: This Jupyter notebook is at our GitHub repository. If you want to evaluate the code, please download the notebook from the repository of [**tensor-learning**](https://github.com/xinychen/tensor-learning/blob/master/content/BTMF.ipynb).
## Abstract
Large-scale and multidimensional spatiotemporal data sets are becoming ubiquitous in many real-world applications such as monitoring traffic and air quality. Making predictions on these time series has become a critical challenge due to not only the large-scale and high-dimensional nature but also the considerable amount of missing data. In this work, we propose a Bayesian Temporal Matrix Factorization (BTMF) model for modeling multidimensional time series - and in particular spatiotemporal data - in the presence of missing data. By integrating low-rank matrix factorization and vector autoregressive (VAR) process into a single probabilistic graphical model, our model can effectively perform predictions without imputing those missing values. We develop efficient Gibbs sampling algorithms for model inference and test the proposed BTMF on several real-world spatiotemporal data sets for both missing data imputation and short-term rolling prediction tasks. This post is mainly about BTMF models and their **`Python`** implementation with an application of spatiotemporal data imputation.
## 1 Motivation
## 2 Problem Description
We assume a spatiotemporal setting for multidimensional time series data throughout this work. In general, modern spatiotemporal data sets collected from sensor networks can be organized as matrix time series. For example, we can denote by matrix $Y\in\mathbb{R}^{N\times T}$ a multivariate time series collected from $N$ locations/sensors on $T$ time stamps, with each row $$\boldsymbol{y}_{i}=\left(y_{i,1},y_{i,2},...,y_{i,t-1},y_{i,t},y_{i,t+1},...,y_{i,T}\right)$$
corresponding to the time series collected at location $i$.
As mentioned, making accurate predictions on incomplete time series is very challenging, while missing data problem is almost inevitable in real-world applications. Figure 1 illustrates the prediction problem for incomplete time series data. Here we use $(i,t)\in\Omega$ to index the observed entries in matrix $Y$.
<img src="../images/graphical_matrix_time_series.png" alt="drawing" width="500"/>
> **Figure 1**: Illustration of multivariate time series and the prediction problem in the presence of missing values (green: observed data; white: missing data; red: prediction).
## 3 Model Description
Given a partially observed spatiotemporal matrix $Y\in\mathbb{R}^{N \times T}$, one can factorize it into a spatial factor matrix $W\in\mathbb{R}^{R \times N}$ and a temporal factor matrix $X\in\mathbb{R}^{R \times T}$ following general matrix factorization model:
\begin{equation}
Y\approx W^{\top}X,
\label{btmf_equation1}
\end{equation}
and element-wise, we have
\begin{equation}
y_{it}\approx \boldsymbol{w}_{i}^\top\boldsymbol{x}_{t}, \quad \forall (i,t),
\label{btmf_equation2}
\end{equation}
where vectors $\boldsymbol{w}_{i}$ and $\boldsymbol{x}_{t}$ refer to the $i$-th column of $W$ and the $t$-th column of $X$, respectively.
The standard matrix factorization model is a good approach to deal with the missing data problem; however, it cannot capture the dependencies among different columns in $X$, which are critical in modeling time series data. To better characterize the temporal dependencies and impose temporal smoothness, a novel AR regularizer is introduced on $X$ in TRMF (i.e., Temporal Regularizer Matrix Factorization proposed by [Yu et al., 2016](https://www.cs.utexas.edu/~rofuyu/papers/tr-mf-nips.pdf)):
\begin{equation} \label{equ:VAR}
\begin{aligned}
\boldsymbol{x}_{t+1}&=\sum\nolimits_{k=1}^{d}A_{k}\boldsymbol{x}_{t+1-h_k}+\boldsymbol{\epsilon}_t, \\
&=A^\top \boldsymbol{v}_{t+1}+\boldsymbol{\epsilon}_{t}, \\
\end{aligned}
\end{equation}
where $\mathcal{L}=\left\{h_1,\ldots,h_k,\ldots,h_d\right\}$ is a lag set ($d$ is the order of this AR model), each $A_k$ ($k\in\left\{1,...,d\right\}$) is a $R\times R$ coefficient matrix, and $\boldsymbol{\epsilon}_t$ is a zero mean Gaussian noise vector. For brevity, matrix $A\in \mathbb{R}^{(R d) \times R}$ and vector $\boldsymbol{v}_{t+1}\in \mathbb{R}^{(R d) \times 1}$ are defined as
\begin{equation*}
A=\left[A_{1}, \ldots, A_{d}\right]^{\top} ,\quad \boldsymbol{v}_{t+1}=\left[\begin{array}{c}{\boldsymbol{x}_{t+1-h_1}} \\ {\vdots} \\ {\boldsymbol{x}_{t+1-h_d}}\end{array}\right] .
\end{equation*}
<img src="../images/rolling_prediction.png" alt="drawing" width="400"/>
> **Figure 2**: A graphical illustration of the rolling prediction scheme using BTMF (with VAR process) (green: observed data; white: missing data; red: prediction).
In [Yu et al., 2016](https://www.cs.utexas.edu/~rofuyu/papers/tr-mf-nips.pdf), to avoid overfitting and reduce the number of parameters, the coefficient matrix in TRMF is further assumed to be a diagonal $A_k=\text{diag}(\boldsymbol{\theta}_{k})$. Therefore, they have
\begin{equation} \label{equ:AR}
\boldsymbol{x}_{t+1}=\boldsymbol{\theta}_{1}\circledast\boldsymbol{x}_{t+1-h_1}+\cdots+\boldsymbol{\theta}_{d}\circledast\boldsymbol{x}_{t+1-h_d}+\boldsymbol{\epsilon}_t,
\end{equation}
where the symbol $\circledast$ denotes the element-wise Hadamard product. However, unlike Equation (4), a vector autoregressive (VAR) model in Equation (3) is actually more powerful for capturing multivariate time series patterns.
<img src="../images/rolling_prediction_strategy.png" alt="drawing" width="400"/>
> **Figure 3**: A graphical illustration of the rolling prediction scheme using BTMF (with AR process) (green: observed data; white: missing data; red: prediction).
In the following, we first introduce a Bayesian temporal matrix factorization model with an autoregressive model given in Equation (4), and then discuss another model with a vector autoregressive (VAR) model shown in Equation (3).
## 4 Bayesian Sequential Matrix Factorization (BSMF)
## 5 Bayesian Temporal Matrix Factorization with Vector Autoregressive Model
### 5.1 Model Specification
Following the general Bayesian probabilistic matrix factorization models (e.g., BPMF proposed by [Salakhutdinov & Mnih, 2008](https://www.cs.toronto.edu/~amnih/papers/bpmf.pdf)), we assume that each observed entry in $Y$ follows a Gaussian distribution with precision $\tau$:
\begin{equation}
y_{i,t}\sim\mathcal{N}\left(\boldsymbol{w}_i^\top\boldsymbol{x}_t,\tau^{-1}\right),\quad \left(i,t\right)\in\Omega.
\label{btmf_equation3}
\end{equation}
On the spatial dimension, we use a simple Gaussian factor matrix without imposing any dependencies explicitly:
\begin{equation}
\boldsymbol{w}_i\sim\mathcal{N}\left(\boldsymbol{\mu}_{w},\Lambda_w^{-1}\right),
\end{equation}
and we place a conjugate Gaussian-Wishart prior on the mean vector and the precision matrix:
\begin{equation}
\boldsymbol{\mu}_w | \Lambda_w \sim\mathcal{N}\left(\boldsymbol{\mu}_0,(\beta_0\Lambda_w)^{-1}\right),\Lambda_w\sim\mathcal{W}\left(W_0,\nu_0\right),
\end{equation}
where $\boldsymbol{\mu}_0\in \mathbb{R}^{R}$ is a mean vector, $\mathcal{W}\left(W_0,\nu_0\right)$ is a Wishart distribution with a $R\times R$ scale matrix $W_0$ and $\nu_0$ degrees of freedom.
In modeling the temporal factor matrix $X$, we re-write the VAR process as:
\begin{equation}
\begin{aligned}
\boldsymbol{x}_{t}&\sim\begin{cases}
\mathcal{N}\left(\boldsymbol{0},I_R\right),&\text{if $t\in\left\{1,2,...,h_d\right\}$}, \\
\mathcal{N}\left(A^\top \boldsymbol{v}_{t},\Sigma\right),&\text{otherwise},\\
\end{cases}\\
\end{aligned}
\label{btmf_equation5}
\end{equation}
Since the mean vector is defined by VAR, we need to place the conjugate matrix normal inverse Wishart (MNIW) prior on the coefficient matrix $A$ and the covariance matrix $\Sigma$ as follows,
\begin{equation}
\begin{aligned}
A\sim\mathcal{MN}_{(Rd)\times R}\left(M_0,\Psi_0,\Sigma\right),\quad
\Sigma \sim\mathcal{IW}\left(S_0,\nu_0\right), \\
\end{aligned}
\end{equation}
where the probability density function for the $Rd$-by-$R$ random matrix $A$ has the form:
\begin{equation}
\begin{aligned}
&p\left(A\mid M_0,\Psi_0,\Sigma\right) \\
=&\left(2\pi\right)^{-R^2d/2}\left|\Psi_0\right|^{-R/2}\left|\Sigma\right|^{-Rd/2} \\
&\times \exp\left(-\frac{1}{2}\text{tr}\left[\Sigma^{-1}\left(A-M_0\right)^{\top}\Psi_{0}^{-1}\left(A-M_0\right)\right]\right), \\
\end{aligned}
\label{mnpdf}
\end{equation}
where $\Psi_0\in\mathbb{R}^{(Rd)\times (Rd)}$ and $\Sigma\in\mathbb{R}^{R\times R}$ are played as covariance matrices.
For the only remaining parameter $\tau$, we place a Gamma prior $\tau\sim\text{Gamma}\left(\alpha,\beta\right)$ where $\alpha$ and $\beta$ are the shape and rate parameters, respectively.
The above specifies the full generative process of BTMF, and we could also see the Bayesian graphical model shown in Figure 4. Several parameters are introduced to define the prior distributions for hyperparameters, including $\boldsymbol{\mu}_{0}$, $W_0$, $\nu_0$, $\beta_0$, $\alpha$, $\beta$, $M_0$, $\Psi_0$, and $S_0$. These parameters need to provided in advance when training the model. However, it should be noted that the specification of these parameters has little impact on the final results, as the training data will play a much more important role in defining the posteriors of the hyperparameters.
<img src="../images/btmf_net.png" alt="drawing" width="450"/>
> **Figure 4**: An overview graphical model of BTMF (time lag set: $\left\{1,2,...,d\right\}$). The shaded nodes ($y_{i,t}$) are the observed data in $\Omega$.
### 5.2 Model Inference
Given the complex structure of BTMF, it is intractable to write down the posterior distribution. Here we rely on the MCMC technique for Bayesian learning. In detail, we introduce a Gibbs sampling algorithm by deriving the full conditional distributions for all parameters and hyperparameters. Thanks to the use of conjugate priors in Figure 4, we can actually write down all the conditional distributions analytically. Below we summarize the Gibbs sampling procedure.
#### 1) Sampling Factor Matrix $W$ and Its Hyperparameters
> For programming convenience, we use $W\in\mathbb{R}^{N\times R}$ to replace $W\in\mathbb{R}^{R\times N}$.
```
import numpy as np
from numpy.linalg import inv as inv
from numpy.random import multivariate_normal as mvnrnd
from scipy.stats import wishart
def cov_mat(mat):
new_mat = mat - np.mean(mat, axis = 0)
return np.einsum('ti, tj -> ij', new_mat, new_mat)
def sample_factor_w(sparse_mat, binary_mat, W, X, tau):
"""Sampling N-by-R factor matrix W and its hyperparameters (mu_w, Lambda_w)."""
dim1, rank = W.shape
beta0 = 1
W_bar = np.mean(W, axis = 0)
var_mu_hyper = (dim1 * W_bar) / (dim1 + beta0)
var_W_hyper = inv(np.eye(rank) + cov_mat(W) + dim1 * beta0 / (dim1 + beta0) * np.outer(W_bar, W_bar))
var_Lambda_hyper = wishart(df = dim1 + rank, scale = var_W_hyper, seed = None).rvs()
var_mu_hyper = mvnrnd(var_mu_hyper, inv((dim1 + beta0) * var_Lambda_hyper))
for i in range(dim1):
pos0 = np.where(sparse_mat[i, :] != 0)
Xt = X[pos0[0], :]
var_mu = tau * np.matmul(Xt.T, sparse_mat[i, pos0[0]]) + np.matmul(var_Lambda_hyper, var_mu_hyper)
inv_var_Lambda = inv(tau * np.matmul(Xt.T, Xt) + var_Lambda_hyper)
W[i, :] = mvnrnd(np.matmul(inv_var_Lambda, var_mu), inv_var_Lambda)
return W
```
#### 2) Sampling VAR Coefficients $A$ and Its Hyperparameters
**Foundations of VAR**
Vector autoregression (VAR) is a multivariate extension of autoregression (AR). Formally, VAR for $R$-dimensional vectors $\boldsymbol{x}_{t}$ can be written as follows,
\begin{equation}
\begin{aligned}
\boldsymbol{x}_{t}&=A_{1} \boldsymbol{x}_{t-h_1}+\cdots+A_{d} \boldsymbol{x}_{t-h_d}+\boldsymbol{\epsilon}_{t}, \\
&= A^\top \boldsymbol{v}_{t}+\boldsymbol{\epsilon}_{t},~t=h_d+1, \ldots, T, \\
\end{aligned}
\end{equation}
where
\begin{equation}
A=\left[A_{1}, \ldots, A_{d}\right]^{\top} \in \mathbb{R}^{(R d) \times R},\quad \boldsymbol{v}_{t}=\left[\begin{array}{c}{\boldsymbol{x}_{t-h_1}} \\ {\vdots} \\ {\boldsymbol{x}_{t-h_d}}\end{array}\right] \in \mathbb{R}^{(R d) \times 1}.
\end{equation}
In the following, if we define
\begin{equation}
Z=\left[\begin{array}{c}{\boldsymbol{x}_{h_d+1}^{\top}} \\ {\vdots} \\ {\boldsymbol{x}_{T}^{\top}}\end{array}\right] \in \mathbb{R}^{(T-h_d) \times R},\quad Q=\left[\begin{array}{c}{\boldsymbol{v}_{h_d+1}^{\top}} \\ {\vdots} \\ {\boldsymbol{v}_{T}^{\top}}\end{array}\right] \in \mathbb{R}^{(T-h_d) \times(R d)},
\end{equation}
then, we could write the above mentioned VAR as
\begin{equation}
\underbrace{Z}_{(T-h_d)\times R}\approx \underbrace{Q}_{(T-h_d)\times (Rd)}\times \underbrace{A}_{(Rd)\times R}.
\end{equation}
> To include temporal factors $\boldsymbol{x}_{t},t=1,...,h_d$, we also define $$Z_0=\left[\begin{array}{c}{\boldsymbol{x}_{1}^{\top}} \\ {\vdots} \\ {\boldsymbol{x}_{h_d}^{\top}}\end{array}\right] \in \mathbb{R}^{h_d \times R}.$$
**Build a Bayesian VAR on temporal factors $\boldsymbol{x}_{t}$**
\begin{equation}
\begin{aligned}
\boldsymbol{x}_{t}&\sim\begin{cases}\mathcal{N}\left(A^\top \boldsymbol{v}_{t},\Sigma\right),~\text{if $t\in\left\{h_d+1,...,T\right\}$},\\{\mathcal{N}\left(\boldsymbol{0},I_R\right),~\text{otherwise}}.\end{cases}\\
A&\sim\mathcal{MN}_{(Rd)\times R}\left(M_0,\Psi_0,\Sigma\right), \\
\Sigma &\sim\mathcal{IW}\left(S_0,\nu_0\right), \\
\end{aligned}
\end{equation}
where
\begin{equation}
\begin{aligned}
&\mathcal{M N}_{(R d) \times R}\left(A | M_{0}, \Psi_{0}, \Sigma\right)\\
\propto|&\Sigma|^{-R d / 2} \exp \left(-\frac{1}{2} \operatorname{tr}\left[\Sigma^{-1}\left(A-M_{0}\right)^{\top} \Psi_{0}^{-1}\left(A-M_{0}\right)\right]\right), \\
\end{aligned}
\end{equation}
and
\begin{equation}
\mathcal{I} \mathcal{W}\left(\Sigma | S_{0}, \nu_{0}\right) \propto|\Sigma|^{-\left(\nu_{0}+R+1\right) / 2} \exp \left(-\frac{1}{2} \operatorname{tr}\left(\Sigma^{-1}S_{0}\right)\right).
\end{equation}
**Likelihood from temporal factors $\boldsymbol{x}_{t}$**
\begin{equation}
\begin{aligned}
&\mathcal{L}\left(X\mid A,\Sigma\right) \\
\propto &\prod_{t=1}^{h_d}p\left(\boldsymbol{x}_{t}\mid \Sigma\right)\times \prod_{t=h_d+1}^{T}p\left(\boldsymbol{x}_{t}\mid A,\Sigma\right) \\
\propto &\left|\Sigma\right|^{-T/2}\exp\left\{-\frac{1}{2}\sum_{t=h_d+1}^{T}\left(\boldsymbol{x}_{t}-A^\top \boldsymbol{v}_{t}\right)^\top\Sigma^{-1}\left(\boldsymbol{x}_{t}-A^\top \boldsymbol{v}_{t}\right)\right\} \\
\propto &\left|\Sigma\right|^{-T/2}\exp\left\{-\frac{1}{2}\text{tr}\left[\Sigma^{-1}\left(Z_0^\top Z_0+\left(Z-QA\right)^\top \left(Z-QA\right)\right)\right]\right\}
\end{aligned}
\end{equation}
**Posterior distribution**
Consider
\begin{equation}
\begin{aligned}
&\left(A-M_{0}\right)^{\top} \Psi_{0}^{-1}\left(A-M_{0}\right)+S_0+Z_0^\top Z_0+\left(Z-QA\right)^\top \left(Z-QA\right) \\
=&A^\top\left(\Psi_0^{-1}+Q^\top Q\right)A-A^\top\left(\Psi_0^{-1}M_0+Q^\top Z\right) \\
&-\left(\Psi_0^{-1}M_0+Q^\top Z\right)^\top A \\
&+\left(\Psi_0^{-1}M_0+Q^\top Z\right)^\top\left(\Psi_0^{-1}+Q^\top Q\right)\left(\Psi_0^{-1}M_0+Q^\top Z\right) \\
&-\left(\Psi_0^{-1}M_0+Q^\top Z\right)^\top\left(\Psi_0^{-1}+Q^\top Q\right)\left(\Psi_0^{-1}M_0+Q^\top Z\right) \\
&+M_0^\top\Psi_0^{-1}M_0+S_0+Z_0^\top Z_0+Z^\top Z \\
=&\left(A-M^{*}\right)^\top\left(\Psi^{*}\right)^{-1}\left(A-M^{*}\right)+S^{*}, \\
\end{aligned}
\end{equation}
which is in the form of $\mathcal{MN}\left(\cdot\right)$ and $\mathcal{IW}\left(\cdot\right)$.
The $Rd$-by-$R$ matrix $A$ has a matrix normal distribution, and $R$-by-$R$ covariance matrix $\Sigma$ has an inverse Wishart distribution, that is,
\begin{equation}
A \sim \mathcal{M N}_{(R d) \times R}\left(M^{*}, \Psi^{*}, \Sigma\right), \quad \Sigma \sim \mathcal{I} \mathcal{W}\left(S^{*}, \nu^{*}\right),
\end{equation}
with
\begin{equation}
\begin{cases}
{\Psi^{*}=\left(\Psi_{0}^{-1}+Q^{\top} Q\right)^{-1}}, \\ {M^{*}=\Psi^{*}\left(\Psi_{0}^{-1} M_{0}+Q^{\top} Z\right)}, \\ {S^{*}=S_{0}+Z^\top Z+M_0^\top\Psi_0^{-1}M_0-\left(M^{*}\right)^\top\left(\Psi^{*}\right)^{-1}M^{*}}, \\
{\nu^{*}=\nu_{0}+T-h_d}.
\end{cases}
\end{equation}
```
from scipy.stats import invwishart
def mnrnd(M, U, V):
"""
Generate matrix normal distributed random matrix.
M is a m-by-n matrix, U is a m-by-m matrix, and V is a n-by-n matrix.
"""
dim1, dim2 = M.shape
X0 = np.random.rand(dim1, dim2)
P = np.linalg.cholesky(U)
Q = np.linalg.cholesky(V)
return M + np.matmul(np.matmul(P, X0), Q.T)
def sample_var_coefficient(X, time_lags):
dim2, rank = X.shape
d = time_lags.shape[0]
Z_mat = X[np.max(time_lags) : dim2, :]
Q_mat = X[np.max(time_lags) - time_lags[0] : dim2 - time_lags[0], :]
for k in range(1, d):
Q_mat = np.append(Q_mat, X[np.max(time_lags) - time_lags[k] : dim2 - time_lags[k], :], axis = 1)
var_Psi = inv(np.eye(rank * d) + np.matmul(Q_mat.T, Q_mat))
var_M = np.matmul(var_Psi, np.matmul(Q_mat.T, Z_mat))
var_S = (np.eye(rank) + np.matmul(Z_mat.T, Z_mat) - np.matmul(np.matmul(var_M.T, inv(var_Psi)), var_M))
Sigma = invwishart(df = rank + dim2 - np.max(time_lags), scale = var_S, seed = None).rvs()
return mnrnd(var_M, var_Psi, Sigma), Sigma
```
#### 3) Sampling Factor Matrix $X$
**Posterior distribution**
\begin{equation}
\begin{aligned}
y_{it}&\sim\mathcal{N}\left(\boldsymbol{w}_{i}^\top\boldsymbol{x}_{t},\tau^{-1}\right),~\left(i,t\right)\in\Omega, \\
\boldsymbol{x}_{t}&\sim\begin{cases}\mathcal{N}\left(\sum_{k=1}^{d}A_{k} \boldsymbol{x}_{t-h_k},\Sigma\right),~\text{if $t\in\left\{h_d+1,...,T\right\}$},\\{\mathcal{N}\left(\boldsymbol{0},I\right),~\text{otherwise}}.\end{cases}\\
\end{aligned}
\end{equation}
If $t\in\left\{1,...,h_d\right\}$, parameters of the posterior distribution $\mathcal{N}\left(\boldsymbol{x}_{t}\mid \boldsymbol{\mu}_{t}^{*},\Sigma_{t}^{*}\right)$ are
\footnotesize{
\begin{equation}
\begin{aligned}
\Sigma_{t}^{*}&=\left(\sum_{k=1, h_{d}<t+h_{k} \leq T}^{d} {A}_{k}^{\top} \Sigma^{-1} A_{k}+\tau\sum_{i:(i,t)\in\Omega}\boldsymbol{w}_{i}\boldsymbol{w}_{i}^\top+I\right)^{-1}, \\
\boldsymbol{\mu}_{t}^{*}&=\Sigma_{t}^{*}\left(\sum_{k=1, h_{d}<t+h_{k} \leq T}^{d} A_{k}^{\top} \Sigma^{-1} \boldsymbol{\psi}_{t+h_{k}}+\tau\sum_{i:(i,t)\in\Omega}\boldsymbol{w}_{i}y_{it}\right). \\
\end{aligned}
\end{equation}
If $t\in\left\{h_d+1,...,T\right\}$, then parameters of the posterior distribution $\mathcal{N}\left(\boldsymbol{x}_{t}\mid \boldsymbol{\mu}_{t}^{*},\Sigma_{t}^{*}\right)$ are
\begin{equation}
\begin{aligned}
\Sigma_{t}^{*}&=\left(\sum_{k=1, h_{d}<t+h_{k} \leq T}^{d} {A}_{k}^{\top} \Sigma^{-1} A_{k}+\tau\sum_{i:(i,t)\in\Omega}\boldsymbol{w}_{i}\boldsymbol{w}_{i}^\top+\Sigma^{-1}\right)^{-1}, \\
\boldsymbol{\mu}_{t}^{*}&=\Sigma_{t}^{*}\left(\sum_{k=1, h_{d}<t+h_{k} \leq T}^{d} A_{k}^{\top} \Sigma^{-1} \boldsymbol{\psi}_{t+h_{k}}+\tau\sum_{i:(i,t)\in\Omega}\boldsymbol{w}_{i}y_{it}+\Sigma^{-1}\sum_{k=1}^{d}A_{k}\boldsymbol{x}_{t-h_k}\right), \\
\end{aligned}
\end{equation}
where
$$\boldsymbol{\psi}_{t+h_k}=\boldsymbol{x}_{t+h_k}-\sum_{l=1,l\neq k}^{d}A_{l}\boldsymbol{x}_{t+h_k-h_l}.$$
```
def sample_factor_x(sparse_mat, binary_mat, time_lags, W, X, tau, A, Lambda_x):
dim2, rank = X.shape
d = time_lags.shape[0]
mat0 = np.matmul(Lambda_x, A.T)
mat1 = np.zeros((rank, rank, d))
mat2 = np.zeros((rank, rank))
for k in range(d):
Ak = A[k * rank : (k + 1) * rank, :]
mat1[:, :, k] = np.matmul(Ak, Lambda_x)
mat2 += np.matmul(mat1[:, :, k], Ak.T)
for t in range(dim2):
pos0 = np.where(sparse_mat[:, t] != 0)
Wt = W[pos0[0], :]
Nt = np.zeros(rank)
if t >= np.max(time_lags):
Qt = np.matmul(mat0, X[t - time_lags, :].reshape([rank * d]))
if t < dim2 - np.max(time_lags):
Mt = mat2.copy()
for k in range(d):
A0 = A.copy()
A0[k * rank : (k + 1) * rank, :] = 0
var5 = (X[t + time_lags[k], :]
- np.matmul(A0.T, X[t + time_lags[k]
- time_lags, :].reshape([rank * d])))
Nt += np.matmul(mat1[:, :, k], var5)
elif t >= dim2 - np.max(time_lags) and t < dim2 - np.min(time_lags):
index = list(np.where(t + time_lags < dim2))[0]
Mt = np.zeros((rank, rank))
for k in index:
Ak = A[k * rank : (k + 1) * rank, :]
Mt += np.matmul(np.matmul(Ak, Lambda_x), Ak.T)
A0 = A.copy()
A0[k * rank : (k + 1) * rank, :] = 0
var5 = (X[t + time_lags[k], :]
- np.matmul(A0.T, X[t + time_lags[k]
- time_lags, :].reshape([rank * d])))
Nt += np.matmul(np.matmul(Ak, Lambda_x), var5)
inv_var_Lambda = inv(tau * np.matmul(Wt.T, Wt) + Mt + Lambda_x)
elif t < np.max(time_lags):
Qt = np.zeros(rank)
index = list(np.where(t + time_lags >= np.max(time_lags)))[0]
Mt = np.zeros((rank, rank))
for k in index:
Ak = A[k * rank : (k + 1) * rank, :]
Mt += np.matmul(np.matmul(Ak, Lambda_x), Ak.T)
A0 = A.copy()
A0[k * rank : (k + 1) * rank, :] = 0
var5 = (X[t + time_lags[k], :]
- np.matmul(A0.T, X[t + time_lags[k]
- time_lags, :].reshape([rank * d])))
Nt += np.matmul(np.matmul(Ak, Lambda_x), var5)
inv_var_Lambda = inv(tau * np.matmul(Wt.T, Wt) + Mt + np.eye(rank))
var_mu = tau * np.matmul(Wt.T, sparse_mat[pos0[0], t]) + Nt + Qt
X[t, :] = mvnrnd(np.matmul(inv_var_Lambda, var_mu), inv_var_Lambda)
return X
def sample_factor_x(sparse_mat, binary_mat, time_lags, W, X, tau, A, Lambda_x):
dim2, rank = X.shape
d = time_lags.shape[0]
for t in range(dim2):
pos0 = np.where(sparse_mat[:, t] != 0)
Wt = W[pos0[0], :]
Mt = np.zeros((rank, rank))
Nt = np.zeros(rank)
if t >= np.max(time_lags):
Qt = np.matmul(Lambda_x, np.matmul(A.T, X[t - time_lags, :].reshape([rank * d])))
if t >= np.max(time_lags) and t < dim2 - np.max(time_lags):
index = list(range(0, d))
elif t >= dim2 - np.max(time_lags) and t < dim2 - np.min(time_lags):
index = list(np.where(t + time_lags < dim2))[0]
elif t < np.max(time_lags):
Qt = np.zeros(rank)
index = list(np.where(t + time_lags >= np.max(time_lags)))[0]
if t < dim2 - np.min(time_lags):
for k in index:
Ak = A[k * rank : (k + 1) * rank, :]
Mt += np.matmul(np.matmul(Ak, Lambda_x), Ak.T)
A0 = A.copy()
A0[k * rank : (k + 1) * rank, :] = 0
var5 = (X[t + time_lags[k], :]
- np.matmul(A0.T, X[t + time_lags[k] - time_lags, :].reshape([rank * d])))
Nt += np.matmul(np.matmul(Ak, Lambda_x), var5)
var_mu = tau * np.matmul(Wt.T, sparse_mat[pos0[0], t]) + Nt + Qt
if t < np.max(time_lags):
inv_var_Lambda = inv(tau * np.matmul(Wt.T, Wt) + Mt + np.eye(rank))
else:
inv_var_Lambda = inv(tau * np.matmul(Wt.T, Wt) + Mt + Lambda_x)
X[t, :] = mvnrnd(np.matmul(inv_var_Lambda, var_mu), inv_var_Lambda)
return X
```
#### 4) Sampling Precision $\tau$
```
def sample_precision_tau(sparse_mat, mat_hat, position):
var_alpha = 1e-6 + 0.5 * sparse_mat[position].shape[0]
var_beta = 1e-6 + 0.5 * np.sum((sparse_mat - mat_hat)[position] ** 2)
return np.random.gamma(var_alpha, 1 / var_beta)
```
#### 5) BTMF Implementation
- **Gibbs sampling**
- Burn-in process
- Sampling process
- **Imputation**
- **Prediction**
```
def BTMF(dense_mat, sparse_mat, init, rank, time_lags, burn_iter, gibbs_iter):
"""Bayesian Temporal Matrix Factorization, BTMF."""
W = init["W"]
X = init["X"]
dim1, dim2 = sparse_mat.shape
d = time_lags.shape[0]
pos = np.where((dense_mat != 0) & (sparse_mat == 0))
position = np.where(sparse_mat != 0)
binary_mat = np.zeros((dim1, dim2))
binary_mat[position] = 1
tau = 1
mat_hat_plus = np.zeros((dim1, dim2))
for it in range(burn_iter + gibbs_iter):
W = sample_factor_w(sparse_mat, binary_mat, W, X, tau)
A, Sigma = sample_var_coefficient(X, time_lags)
X = sample_factor_x(sparse_mat, binary_mat, time_lags, W, X, tau, A, inv(Sigma))
mat_hat = np.matmul(W, X.T)
tau = sample_precision_tau(sparse_mat, mat_hat, position)
rmse = np.sqrt(np.sum((dense_mat[pos] - mat_hat[pos]) ** 2) / dense_mat[pos].shape[0])
if (it + 1) % 1 == 0 and it < burn_iter:
print('Iteration: {}'.format(it + 1))
print('RMSE: {:.6}'.format(rmse))
print()
if it + 1 > burn_iter:
mat_hat_plus += mat_hat
mat_hat = mat_hat_plus / gibbs_iter
final_mape = np.sum(np.abs(dense_mat[pos] - mat_hat[pos]) / dense_mat[pos]) / dense_mat[pos].shape[0]
final_rmse = np.sqrt(np.sum((dense_mat[pos] - mat_hat[pos]) ** 2) / dense_mat[pos].shape[0])
print('Imputation MAPE: {:.6}'.format(final_mape))
print('Imputation RMSE: {:.6}'.format(final_rmse))
print()
return mat_hat
```
## 6 Spatiotemporal Missing Data Imputation
```
import scipy.io
tensor = scipy.io.loadmat('../datasets/Guangzhou-data-set/tensor.mat')
tensor = tensor['tensor']
random_matrix = scipy.io.loadmat('../datasets/Guangzhou-data-set/random_matrix.mat')
random_matrix = random_matrix['random_matrix']
random_tensor = scipy.io.loadmat('../datasets/Guangzhou-data-set/random_tensor.mat')
random_tensor = random_tensor['random_tensor']
dense_mat = tensor.reshape([tensor.shape[0], tensor.shape[1] * tensor.shape[2]])
missing_rate = 0.4
# =============================================================================
### Random missing (RM) scenario
### Set the RM scenario by:
binary_mat = (np.round(random_tensor + 0.5 - missing_rate)
.reshape([random_tensor.shape[0], random_tensor.shape[1] * random_tensor.shape[2]]))
# =============================================================================
# binary_tensor = np.zeros(tensor.shape)
# for i1 in range(tensor.shape[0]):
# for i2 in range(tensor.shape[1]):
# binary_tensor[i1, i2, :] = np.round(random_matrix[i1, i2] + 0.5 - missing_rate)
# binary_mat = binary_tensor.reshape([binary_tensor.shape[0], binary_tensor.shape[1] * binary_tensor.shape[2]])
sparse_mat = np.multiply(dense_mat, binary_mat)
import time
start = time.time()
dim1, dim2 = sparse_mat.shape
rank = 10
time_lags = np.array([1, 2, 144])
init = {"W": 0.1 * np.random.rand(dim1, rank), "X": 0.1 * np.random.rand(dim2, rank)}
burn_iter = 1000
gibbs_iter = 100
BTMF(dense_mat, sparse_mat, init, rank, time_lags, burn_iter, gibbs_iter)
end = time.time()
print('Running time: %d seconds'%(end - start))
import pandas as pd
dense_mat = pd.read_csv('../datasets/Seattle-data-set/mat.csv', index_col = 0)
RM_mat = pd.read_csv('../datasets/Seattle-data-set/RM_mat.csv', index_col = 0)
dense_mat = dense_mat.values
RM_mat = RM_mat.values
missing_rate = 0.4
# =============================================================================
### Random missing (RM) scenario
### Set the RM scenario by:
binary_mat = np.round(RM_mat + 0.5 - missing_rate)
# =============================================================================
sparse_mat = np.multiply(dense_mat, binary_mat)
import time
start = time.time()
dim1, dim2 = sparse_mat.shape
rank = 50
time_lags = np.array([1, 2, 288])
init = {"W": 0.1 * np.random.rand(dim1, rank), "X": 0.1 * np.random.rand(dim2, rank)}
maxiter1 = 1100
maxiter2 = 100
BTMF(dense_mat, sparse_mat, init, rank, time_lags, maxiter1, maxiter2)
end = time.time()
print('Running time: %d seconds'%(end - start))
```
## 7 Multivariate Time Series Prediction
```
def BTMF_burn(dense_mat, sparse_mat, init, time_lags, burn_iter):
W = init["W"]
X = init["X"]
dim1, dim2 = sparse_mat.shape
d = time_lags.shape[0]
pos = np.where((dense_mat != 0) & (sparse_mat == 0))
position = np.where(sparse_mat != 0)
binary_mat = np.zeros((dim1, dim2))
binary_mat[position] = 1
tau = 1
for it in range(burn_iter):
W = sample_factor_w(sparse_mat, binary_mat, W, X, tau)
A, Sigma = sample_var_coefficient(X, time_lags)
X = sample_factor_x(sparse_mat, binary_mat, time_lags, W, X, tau, A, inv(Sigma))
mat_hat = np.matmul(W, X.T)
tau = sample_precision_tau(sparse_mat, mat_hat, position)
rmse = np.sqrt(np.sum((dense_mat[pos] - mat_hat[pos]) ** 2) / dense_mat[pos].shape[0])
if (it + 1) % 1 == 0 and it < burn_iter:
print('Iteration: {}'.format(it + 1))
print('RMSE: {:.6}'.format(rmse))
print()
return W, X, tau, A
def BTMF_4cast(mat, binary_mat, num_step, time_lags, init, gibbs_iter):
"""Forecast (`4cast`) time series with Bayesian Temporal Matrix Factorization (BTMF)."""
W = init["W"]
X = init["X"]
tau = init["tau"]
A = init["A"]
rank = W.shape[1]
d = time_lags.shape[0]
mat_hat = np.zeros((W.shape[0], num_step, gibbs_iter))
for it in range(gibbs_iter):
W = sample_factor_w(mat, binary_mat, W, X, tau)
A, Sigma = sample_var_coefficient(X, time_lags)
X = sample_factor_x(sparse_mat, binary_mat, time_lags, W, X, tau, A, inv(Sigma))
X_new = X.copy()
for t in range(num_step):
var = X_new[X.shape[0] + t - 1 - time_lags, :].reshape([rank * d])
X_new = np.append(X_new, np.matmul(A.T, var).reshape([1, rank]), axis = 0)
# mat_hat[:, :, it] = np.random.normal(np.matmul(W, X_new[-1 - num_step : -1, :].T), 1 / tau) # dim1 * num_step
mat_hat[:, :, it] = np.matmul(W, X_new[-1 - num_step : -1, :].T) # dim1 * num_step
return mat_hat, W, X_new, tau, A
def forecastor(dense_mat, sparse_mat, init, time_lags,
num_roll, start_time, num_step, burn_iter, gibbs_iter):
W, X, tau, A = BTMF_burn(dense_mat[:, : start_time], sparse_mat[:, : start_time],
init, time_lags, burn_iter)
result = np.zeros((W.shape[0], num_roll * num_step, gibbs_iter))
for t in range(num_roll):
mat = sparse_mat[:, : start_time + t * num_step]
print(mat.shape[1])
position = np.where(mat != 0)
binary_mat = mat.copy()
binary_mat[position] = 1
init = {"W": W, "X": X, "tau": tau, "A": A}
mat_hat, W, X, tau, A = BTMF_4cast(mat, binary_mat,
num_step, time_lags, init, gibbs_iter)
result[:, t * num_step : (t + 1) * num_step, :] = mat_hat
mat_hat0 = np.mean(result, axis = 2)
small_dense_mat = dense_mat[:, start_time : dense_mat.shape[1]]
pos = np.where(small_dense_mat != 0)
final_mape = np.sum(np.abs(small_dense_mat[pos] -
mat_hat0[pos]) / small_dense_mat[pos]) / small_dense_mat[pos].shape[0]
final_rmse = np.sqrt(np.sum((small_dense_mat[pos] -
mat_hat0[pos]) ** 2) / small_dense_mat[pos].shape[0])
print('Final MAPE: {:.6}'.format(final_mape))
print('Final RMSE: {:.6}'.format(final_rmse))
print()
return result
import scipy.io
tensor = scipy.io.loadmat('../datasets/Guangzhou-data-set/tensor.mat')
tensor = tensor['tensor']
random_matrix = scipy.io.loadmat('../datasets/Guangzhou-data-set/random_matrix.mat')
random_matrix = random_matrix['random_matrix']
random_tensor = scipy.io.loadmat('../datasets/Guangzhou-data-set/random_tensor.mat')
random_tensor = random_tensor['random_tensor']
dense_mat = tensor.reshape([tensor.shape[0], tensor.shape[1] * tensor.shape[2]])
missing_rate = 0.4
# =============================================================================
### Random missing (RM) scenario
### Set the RM scenario by:
binary_mat = (np.round(random_tensor + 0.5 - missing_rate)
.reshape([random_tensor.shape[0], random_tensor.shape[1] * random_tensor.shape[2]]))
# =============================================================================
# binary_tensor = np.zeros(tensor.shape)
# for i1 in range(tensor.shape[0]):
# for i2 in range(tensor.shape[1]):
# binary_tensor[i1, i2, :] = np.round(random_matrix[i1, i2] + 0.5 - missing_rate)
# binary_mat = binary_tensor.reshape([binary_tensor.shape[0], binary_tensor.shape[1] * binary_tensor.shape[2]])
sparse_mat = np.multiply(dense_mat, binary_mat)
import time
start = time.time()
dim1, dim2 = sparse_mat.shape
rank = 10
time_lags = np.array([1, 2, 144])
num_step = 6
num_roll = int(144 * 5 / num_step)
start_time = dim2 - num_roll * num_step
init = {"W": 0.1 * np.random.rand(dim1, rank),
"X": 0.1 * np.random.rand(start_time, rank)}
burn_iter = 10
gibbs_iter = 2
result = forecastor(dense_mat, sparse_mat, init, time_lags,
num_roll, start_time, num_step, burn_iter, gibbs_iter)
end = time.time()
print('Running time: %d seconds'%(end - start))
import time
start = time.time()
dim1, dim2 = sparse_mat.shape
rank = 10
time_lags = np.array([1, 2, 144])
num_step = 6
num_roll = int(144 * 5 / num_step)
start_time = dim2 - num_roll * num_step
init = {"W": 0.1 * np.random.rand(dim1, rank),
"X": 0.1 * np.random.rand(start_time, rank)}
burn_iter = 100
gibbs_iter = 20
result = forecastor(dense_mat, sparse_mat, init, time_lags,
num_roll, start_time, num_step, burn_iter, gibbs_iter)
end = time.time()
print('Running time: %d seconds'%(end - start))
import time
start = time.time()
dim1, dim2 = sparse_mat.shape
rank = 10
time_lags = np.array([1, 2, 144])
num_step = 6
num_roll = int(144 * 5 / num_step)
start_time = dim2 - num_roll * num_step
init = {"W": 0.1 * np.random.rand(dim1, rank),
"X": 0.1 * np.random.rand(start_time, rank)}
burn_iter = 1000
gibbs_iter = 100
result = forecastor(dense_mat, sparse_mat, init, time_lags,
num_roll, start_time, num_step, burn_iter, gibbs_iter)
end = time.time()
print('Running time: %d seconds'%(end - start))
mat_hat10 = np.percentile(result, 5, axis = 2)
mat_hat90 = np.percentile(result, 95, axis = 2)
mat_hat = np.mean(result, axis = 2)
X = dense_mat.copy()
pred_steps = int(num_roll * num_step)
tv = 144
import matplotlib.pyplot as plt
plt.style.use('ggplot')
figsize = 2
for i in range(3):
fig = plt.figure(figsize = (4 * figsize, 1 * figsize))
ax = fig.add_axes([0.13, 0.28, 0.85, 0.68])
plt.plot(X[i, 54 * tv :], color = "black", linewidth = 0.5)
plt.plot(list(range(X.shape[1] - pred_steps - 54 * tv, X.shape[1] - 54 * tv)),
mat_hat[i, :], color = "#e3120b", linewidth = 2.0)
plt.plot(list(range(X.shape[1] - pred_steps - 54 * tv, X.shape[1] - 54 * tv)),
mat_hat10[i, :], color = "blue", linewidth = 0.5)
plt.plot(list(range(X.shape[1] - pred_steps - 54 * tv, X.shape[1] - 54 * tv)),
mat_hat90[i, :], color = "green", linewidth = 0.5)
import scipy.io
tensor = scipy.io.loadmat('../datasets/Hangzhou-data-set/tensor.mat')
tensor = tensor['tensor']
random_matrix = scipy.io.loadmat('../datasets/Hangzhou-data-set/random_matrix.mat')
random_matrix = random_matrix['random_matrix']
random_tensor = scipy.io.loadmat('../datasets/Hangzhou-data-set/random_tensor.mat')
random_tensor = random_tensor['random_tensor']
dense_mat = tensor.reshape([tensor.shape[0], tensor.shape[1] * tensor.shape[2]])
missing_rate = 0.4
# =============================================================================
### Random missing (RM) scenario
### Set the RM scenario by:
binary_mat = np.round(random_tensor + 0.5 - missing_rate).reshape([random_tensor.shape[0],
random_tensor.shape[1]
* random_tensor.shape[2]])
# =============================================================================
sparse_mat = np.multiply(dense_mat, binary_mat)
import time
start = time.time()
dim1, dim2 = sparse_mat.shape
rank = 10
time_lags = np.array([1, 2, 108])
num_step = 5
num_roll = int(108 * 5 / num_step)
start_time = dim2 - num_roll * num_step
init = {"W": 0.1 * np.random.rand(dim1, rank),
"X": 0.1 * np.random.rand(start_time, rank)}
burn_iter = 100
gibbs_iter = 10
result = forecastor(dense_mat, sparse_mat, init, time_lags,
num_roll, start_time, num_step, burn_iter, gibbs_iter)
end = time.time()
print('Running time: %d seconds'%(end - start))
mat_hat10 = np.percentile(result, 10, axis = 2)
mat_hat90 = np.percentile(result, 90, axis = 2)
mat_hat = np.mean(result, axis = 2)
X = dense_mat.copy()
pred_steps = int(num_roll * num_step)
tv = 108
import matplotlib.pyplot as plt
plt.style.use('ggplot')
figsize = 2
for i in range(3):
fig = plt.figure(figsize = (8 * figsize, 2 * figsize))
ax = fig.add_axes([0.13, 0.28, 0.85, 0.68])
plt.plot(X[i, 18 * tv :], color = "black", linewidth = 0.5)
plt.plot(list(range(X.shape[1] - pred_steps - 18 * tv, X.shape[1] - 18 * tv)),
mat_hat[i, :], color = "#e3120b", linewidth = 2.0)
plt.plot(list(range(X.shape[1] - pred_steps - 18 * tv, X.shape[1] - 18 * tv)),
mat_hat10[i, :], color = "blue", linewidth = 0.5)
plt.plot(list(range(X.shape[1] - pred_steps - 18 * tv, X.shape[1] - 18 * tv)),
mat_hat90[i, :], color = "green", linewidth = 0.5)
import scipy.io
tensor = scipy.io.loadmat('../datasets/Hangzhou-data-set/tensor.mat')
tensor = tensor['tensor']
random_matrix = scipy.io.loadmat('../datasets/Hangzhou-data-set/random_matrix.mat')
random_matrix = random_matrix['random_matrix']
random_tensor = scipy.io.loadmat('../datasets/Hangzhou-data-set/random_tensor.mat')
random_tensor = random_tensor['random_tensor']
dense_mat = tensor.reshape([tensor.shape[0], tensor.shape[1] * tensor.shape[2]])
missing_rate = 0.0
# =============================================================================
### Random missing (RM) scenario
### Set the RM scenario by:
binary_mat = np.round(random_tensor + 0.5 - missing_rate).reshape([random_tensor.shape[0],
random_tensor.shape[1]
* random_tensor.shape[2]])
# =============================================================================
sparse_mat = np.multiply(dense_mat, binary_mat)
import time
start = time.time()
dim1, dim2 = sparse_mat.shape
rank = 10
time_lags = np.array([1, 2, 108])
num_step = 5
num_roll = int(108 * 5 / num_step)
start_time = dim2 - num_roll * num_step
init = {"W": 0.1 * np.random.rand(dim1, rank),
"X": 0.1 * np.random.rand(start_time, rank)}
burn_iter = 100
gibbs_iter = 10
result = forecastor(dense_mat, sparse_mat, init, time_lags,
num_roll, start_time, num_step, burn_iter, gibbs_iter)
end = time.time()
print('Running time: %d seconds'%(end - start))
import time
start = time.time()
dim1, dim2 = sparse_mat.shape
rank = 10
time_lags = np.array([1, 2, 108])
num_step = 5
num_roll = int(108 * 5 / num_step)
start_time = dim2 - num_roll * num_step
init = {"W": 0.1 * np.random.rand(dim1, rank),
"X": 0.1 * np.random.rand(start_time, rank)}
burn_iter = 500
gibbs_iter = 50
result = forecastor(dense_mat, sparse_mat, init, time_lags,
num_roll, start_time, num_step, burn_iter, gibbs_iter)
end = time.time()
print('Running time: %d seconds'%(end - start))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ziatdinovmax/gpax/blob/v0.0.3/examples/GP_sGP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install -q git+https://github.com/ziatdinovmax/gpax@v0.0.3
```
Imports:
```
import gpax
import numpy as np
import matplotlib.pyplot as plt
gpax.utils.enable_x64()
```
## Standard Gaussian Process (GP)
Generate some noisy observations (aka 'training data'):
```
np.random.seed(0)
NUM_INIT_POINTS = 25 # number of observation points
NOISE_LEVEL = 0.1 # noise level
# Generate noisy data from a known function
f = lambda x: np.sin(10*x)
X = np.random.uniform(-1., 1., NUM_INIT_POINTS)
y = f(X) + np.random.normal(0., NOISE_LEVEL, NUM_INIT_POINTS)
# Plot generated data
plt.figure(dpi=100)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.scatter(X, y, marker='x', c='k', zorder=1, label='Noisy observations')
plt.ylim(-1.8, 2.2);
# Get random number generator keys (see JAX documentation for why it is neccessary)
rng_key, rng_key_predict = gpax.utils.get_keys()
# initialize model
gp_model = gpax.ExactGP(1, kernel='RBF')
# run MCMC to obtain posterior samples
gp_model.fit(rng_key, X, y, num_chains=1)
```
Now let's use our 'trained' model to obtain predictive posterior distribution on new/'test' data:
```
# prepare test inputs
X_test = np.linspace(-1, 1, 100)
# Get GP prediction
y_pred, y_sampled = gp_model.predict(rng_key_predict, X_test, n=200)
```
Plot results:
```
_, ax = plt.subplots(dpi=100)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.scatter(X, y, marker='x', c='k', zorder=1, label="Noisy observations", alpha=0.7)
for y1 in y_sampled:
ax.plot(X_test, y1.mean(0), lw=.1, zorder=0, c='r', alpha=.1)
l, = ax.plot(X_test, y_sampled[0].mean(0), lw=1, c='r', alpha=1, label="Sampled predictions")
ax.plot(X_test, y_pred, lw=1.5, zorder=1, c='b', label='Sampled means (CoM)')
ax.legend(loc='upper left')
l.set_alpha(0)
ax.set_ylim(-1.8, 2.2);
```
It is also common in literature to draw the GP uncertainty (here defined as standard deviation across all sampled predictions) as a $2\sigma$ shaded region around the GP posterior mean:
```
_, ax = plt.subplots(dpi=100)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.scatter(X, y, marker='x', c='k', zorder=2, label="Noisy observations", alpha=0.7)
ax.plot(X_test, y_pred, lw=1.5, zorder=2, c='b', label='Sampled means (CoM)')
ax.fill_between(X_test, y_pred - y_sampled.std(axis=(0,1)), y_pred + y_sampled.std(axis=(0,1)),
color='r', alpha=0.3, label="Model uncertainty")
ax.legend(loc='upper left')
ax.set_ylim(-1.8, 2.2);
```
## Incorporating prior knowledge into GP
We consider noisy observations of a discontinuous function...
```
def piecewise1(x: np.ndarray, params) -> np.ndarray:
return np.piecewise(
x,
[x < params["t"], x >= params["t"]],
[lambda x: x**params["beta1"], lambda x: x**params["beta2"]])
NUM_INIT_POINTS = 15 # number of observation points
NOISE_LEVEL = 0.1
PARAMS = {"t": 1.7, "beta1": 4.5, "beta2": 2.5}
np.random.seed(1)
X = np.random.uniform(0, 3, NUM_INIT_POINTS)
y = piecewise1(X, PARAMS) + np.random.normal(0., NOISE_LEVEL, NUM_INIT_POINTS)
X_test = np.linspace(0, 3, 200)
_, ax = plt.subplots(dpi=100)
ax.scatter(X, y, alpha=0.5, c='k', marker='x', label="Noisy observations")
ax.legend()
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_xlim(0, 3)
```
... and try to reconstruct this underlying function with a standard GP
```
# Get random number generator keys (see JAX documentation for why it is neccessary)
rng_key, rng_key_predict = gpax.utils.get_keys()
# Initialize model
gp_model = gpax.ExactGP(1, kernel='Matern')
# Run MCMC to obtain posterior samples
gp_model.fit(rng_key, X, y, num_chains=1)
# Get GP prediction
y_pred, y_sampled = gp_model.predict(rng_key_predict, X_test, n=200)
# Plot results
_, ax = plt.subplots(dpi=100)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.scatter(X, y, marker='x', c='k', zorder=1, label="Noisy observations", alpha=0.7)
for y1 in y_sampled:
ax.plot(X_test, y1.mean(0), lw=.1, zorder=0, c='r', alpha=.1)
l, = ax.plot(X_test, y_sampled[0].mean(0), lw=1, c='r', alpha=1, label="Sampled predictions")
ax.plot(X_test, y_pred, lw=1.5, zorder=1, c='b', label='Sampled means (CoM)')
ax.plot(X_test, piecewise1(X_test, PARAMS), c='k', linestyle='--', label='True function', alpha=0.5)
ax.legend(loc='upper left')
l.set_alpha(0)
```
The standard GP did not perform very well. Now let's try GP augmented by a probabilistic models of *expected* system's behavior. We'l need to use JAX's version of numpy for defining operations on arrays and NumPyro for placing priors over model parameters:
```
from typing import Dict
import numpyro
import jax.numpy as jnp
```
Define possible models as deterministic functions. Notice that the first model has a correct 'guess' about the underlying function. The second model is only partially correct (it assumes the existence of transition point, but describe the behavior before and after that point as linear).
```
def piecewise1(x: jnp.ndarray, params: Dict[str, float]) -> jnp.ndarray:
"""Power-law behavior before and after the transition"""
return jnp.piecewise(
x, [x < params["t"], x >= params["t"]],
[lambda x: x**params["beta1"], lambda x: x**params["beta2"]])
def piecewise2(x: jnp.ndarray, params: Dict[str, float]) -> jnp.ndarray:
"""Linear behavior before and after the transition"""
return jnp.piecewise(
x, [x < params["t"], x >= params["t"]],
[lambda x: params["b"]*x, lambda x: params["c"]*x])
```
Put priors over parameters of each model (to make them probabilistic):
```
def piecewise1_priors():
# Sample model parameters
t = numpyro.sample("t", numpyro.distributions.Uniform(0.5, 2.5))
beta1 = numpyro.sample("beta1", numpyro.distributions.LogNormal(0, 1))
beta2 = numpyro.sample("beta2", numpyro.distributions.LogNormal(0, 1))
# Return sampled parameters as a dictionary
return {"t": t, "beta1": beta1, "beta2": beta2}
def piecewise2_priors():
# Sample model parameters
t = numpyro.sample("t", numpyro.distributions.Uniform(0.5, 2.5))
b = numpyro.sample("b", numpyro.distributions.LogNormal(0, 1))
c = numpyro.sample("c", numpyro.distributions.LogNormal(0, 1))
# Return sampled parameters as a dictionary
return {"t": t, "b": b, "c": c}
```
Run a 'structured' GP (*s*GP) for each model. Note that to make our GP 'structured', we pass the ```mean_fn``` (deterministic function) and ```mean_fn_prior``` (priors over the function parameters) arguments to it at the initialization stage.
```
mean_fn = [piecewise1, piecewise2]
mean_fn_priors = [piecewise1_priors, piecewise2_priors]
for m, mp in zip(mean_fn, mean_fn_priors):
# Initialize model
gp_model = gpax.ExactGP(1, kernel='Matern', mean_fn=m, mean_fn_prior=mp)
# Run MCMC to obtain posterior samples
gp_model.fit(rng_key, X, y)
# Get GP prediction
y_pred, y_sampled = gp_model.predict(rng_key_predict, X_test, n=200)
# Plot results
_, ax = plt.subplots(dpi=100)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.scatter(X, y, marker='x', c='k', zorder=1, label="Noisy observations", alpha=0.7)
for y1 in y_sampled:
ax.plot(X_test, y1.mean(0), lw=.1, zorder=0, c='r', alpha=.1)
l, = ax.plot(X_test, y_sampled[0].mean(0), lw=1, c='r', alpha=1, label="Sampled predictions")
ax.plot(X_test, y_pred, lw=1.5, zorder=1, c='b', label='Sampled means (CoM)')
ax.plot(X_test, piecewise1(X_test, PARAMS), c='k', linestyle='--', label='True function', alpha=0.5)
ax.legend(loc='upper left')
l.set_alpha(0)
plt.show()
```
We can see that the first *s*GP model performed exceptionally well, except for the transition region where we do not have enough observations. However, this region is also characterized by very large uncertainty (variation in the sampled predictions), suggesting that one may want to perform extra measurements in that area. For the second *s*GP model, the reconstruction quality is much lower, even though it is still somewhat better than for vanilla GP. This is not surprising because the second model is much less acccurate (it assumes linear behavior before and after the transition point, which is obviously not the case here)
We can further explore regions with high uncertainty using the active learning approach. First, we are going to use the model that produced the lowest total uncertainty on the original set of observations.
```
# Copy the initial observations so that we can re-use them later
Xo, yo = X.copy(), y.copy()
rng_key, rng_key_predict = gpax.utils.get_keys(1)
for i in range(6):
print("\nExploration step {}".format(i+1))
# Obtain/update GP posterior
gp_model = gpax.ExactGP(1, kernel='Matern', mean_fn=piecewise1, mean_fn_prior=piecewise1_priors)
gp_model.fit(rng_key, X, y, print_summary=1)
# Compute acquisition function and get coordinate of the next point
obj = gpax.acquisition.UE(rng_key_predict, gp_model, X_test)
next_point_idx = obj.argmax()
# Append the 'suggested' point
X = np.append(X, X_test[next_point_idx])
measured = piecewise1(X_test[next_point_idx], PARAMS) + np.random.normal(0., NOISE_LEVEL) # we assume that new observations are also noisy
y = np.append(y, measured)
```
Make the prediction again, using the newly discovered points:
```
rng_key, rng_key_predict = gpax.utils.get_keys(1)
# Update GP posterior
gp_model = gpax.ExactGP(1, kernel='Matern', mean_fn=piecewise1, mean_fn_prior=piecewise1_priors)
gp_model.fit(rng_key, X, y)
# Get GP prediction
y_pred, y_sampled = gp_model.predict(rng_key_predict, X_test, n=200)
# Plot results
truefunc = piecewise1(X_test, PARAMS)
seed_points = 15
plt.figure(dpi=100)
plt.scatter(X[seed_points:], y[seed_points:], c=jnp.arange(1, len(X[seed_points:])+1),
cmap='viridis', label="Sampled points", zorder=1)
cbar = plt.colorbar(label="Exploration step")
cbar_ticks = np.arange(2, len(X[seed_points:]) + 1, 2)
cbar.set_ticks(cbar_ticks)
plt.scatter(X[:seed_points], y[:seed_points], marker='x', s=64,
c='k', label="Seed points", zorder=1)
plt.plot(X_test, truefunc, c='k', label='True function', zorder=0)
plt.plot(X_test, y_pred, '--', c='red', label='Model reconstruction', zorder=0)
plt.fill_between(X_test, y_pred - y_sampled.std((0,1)), y_pred + y_sampled.std((0,1)),
color='r', alpha=0.2, label="Model uncertainty", zorder=0)
plt.xlabel("$x$", fontsize=12)
plt.ylabel("$y$", fontsize=12)
plt.legend(fontsize=9)
plt.show()
```
Now we are going do active learning with the 'wrong' (or partially correct, depending on whether you are pessimist or optimist) model. We will start from the same set of observations as before.
```
X, y = Xo, yo # start from the same set of observations
rng_key, rng_key_predict = gpax.utils.get_keys(1)
for i in range(9):
print("\nExploration step {}".format(i+1))
# Obtain/update GP posterior
gp_model = gpax.ExactGP(1, kernel='Matern', mean_fn=piecewise2, mean_fn_prior=piecewise2_priors)
gp_model.fit(rng_key, X, y, print_summary=1)
# Compute acquisition function and get coordinate of the next point
obj = gpax.acquisition.UE(rng_key_predict, gp_model, X_test)
next_point_idx = obj.argmax()
# Append the 'suggested' point
X = np.append(X, X_test[next_point_idx])
measured = piecewise1(X_test[next_point_idx], PARAMS) + np.random.normal(0., NOISE_LEVEL) # we assume that new observations are also noisy
y = np.append(y, measured)
```
Make the prediction again, using the newly discovered points:
```
rng_key, rng_key_predict = gpax.utils.get_keys(1)
# Update GP posterior
gp_model = gpax.ExactGP(1, kernel='Matern', mean_fn=piecewise2, mean_fn_prior=piecewise2_priors)
gp_model.fit(rng_key, X, y)
# Get GP prediction
y_pred, y_sampled = gp_model.predict(rng_key_predict, X_test, n=200)
# Plot results
plt.figure(dpi=100)
plt.scatter(X[seed_points:], y[seed_points:], c=jnp.arange(1, len(X[seed_points:])+1),
cmap='viridis', label="Sampled points", zorder=1)
cbar = plt.colorbar(label="Exploration step")
cbar_ticks = np.arange(2, len(X[seed_points:]) + 1, 2)
cbar.set_ticks(cbar_ticks)
plt.scatter(X[:seed_points], y[:seed_points], marker='x', s=64,
c='k', label="Seed points", zorder=1)
plt.plot(X_test, truefunc, c='k', label='True function', zorder=0)
plt.plot(X_test, y_pred, '--', c='red', label='Model reconstruction', zorder=0)
plt.fill_between(X_test, y_pred - y_sampled.std((0,1)), y_pred + y_sampled.std((0,1)),
color='r', alpha=0.2, label="Model uncertainty", zorder=0)
plt.xlabel("$x$", fontsize=12)
plt.ylabel("$y$", fontsize=12)
plt.legend(fontsize=9)
plt.show()
```
Even though it required more steps (and each step took longer to converge), it is still able to reconstuct the underlying discontinuous function. Note that the vanilla GP won't be able to do so (you can check it by removing the mean_fn and mean_fn_prior arguments from the GP initialization and rerunning the process).
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv('E:/kaggle/Benz/data/train.csv/train.csv')
```
## First, we have a glance of the our dataset
```
train.head()
train.describe()
train.info()
train[train.isnull().values == True]
```
#### This is a regression problem, so let's take a look at the distribution of 'y' in train set
```
plt.figure(figsize=(18, 8))
train['y'].plot(kind = 'kde')
plt.figure(figsize=(18,8))
plt.xticks(range(0, 180, 20))
plt.xlabel('y_value')
sns.distplot(train['y'],kde = True)
test = pd.read_csv('E:/kaggle/Benz/data/test.csv/test.csv')
test.head()
from sklearn.model_selection import KFold
new_train = train.iloc[:,10:]
new_train
```
From observation, we find no feature has NaN, and most of the features are filled in 0 and 1. However, the premier problem is how we can reduce the dimension, which means we need to find the features with top importances, and then put them into prediction model.
```
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
```
Here I introduce one method to select the top features via training a machine learning model(Here is Ramdom Forest Regressor) with randomly chosen features each epoch.
```
n_elements = train.shape[0]
feats = np.array(train.columns.values[10:])
n_feats = len(feats)
weights = np.zeros(n_feats)
weights += 1 / n_feats
epochs = 3000
n_features = 10
wgr = 0.2 # weighted growth rate
wthreshold = 0.2 # weight threshold
y = train['y'].values
topfeats = np.zeros(5)
```
## Here is a brief summary of the feature selection
- Set total epoch number to 3000
- Use KFold to do cross validation in each epoch
- Randomly pick up 10 features from the entire feature sets with bootstrap for each epoch
- Use these 10 features to train, then record the r2_score of each epoch
- Record the feature importance of each feature in every epoch
- Select the top 5 features for each epoch and store the feature importance in an array of shape (epoch, 5)
- After finishing first 30 epochs, judge if the r2_score is higher than the average of previous 30 epoch, if true, record this epoch, otherwise omit it
```
def select_features(n_elements, feats, weights, epochs, n_features, topfeats):
kf = KFold(n_splits = 3, shuffle = True)
n_feats = len(feats)
final_weights = np.zeros(n_feats)
scores = np.zeros(epochs)
top5_feature_epoch_container = np.zeros((epochs, 5))
valid_feature_recorder = []
for epoch in range(epochs):
feature_weights = np.zeros(n_feats)
epoch_predict = np.zeros(n_elements)
feature_score = np.zeros(n_features)
sample_feature_id = np.random.choice(n_feats, size = n_features, replace = False)
sample_feature = feats[sample_feature_id]
for train_index, test_index in kf.split(train[sample_feature], train['y']):
feature_weights_temp = np.zeros(n_feats)
X_train = train.loc[train_index, sample_feature]
X_test = train.loc[test_index, sample_feature]
y_train = train.loc[train_index, 'y']
y_test = train.loc[test_index, 'y']
RFR = RandomForestRegressor(n_estimators = 100, max_depth = 12, max_features = 'auto', min_samples_leaf = 4, n_jobs = 5, random_state = 42)
RFR.fit(X_train, y_train)
epoch_predict[test_index] = RFR.predict(X_test)
feature_weights_temp[sample_feature_id] = RFR.feature_importances_
feature_weights += feature_weights_temp
weights = feature_weights
feature_weights_sorted = np.sort(feature_weights)
feature_index_sorted = np.argsort(feature_weights)
top5_features_weights = feature_weights_sorted[-5:]
top5_features_index = feature_index_sorted[-5:]
topfeats = feats[top5_features_index]
final_weights[top5_features_index] = top5_features_weights
metric = r2_score(train['y'], epoch_predict)
scores[epoch] = metric
top5_feature_epoch_container[epoch, :] = top5_features_index
if epoch > 29:
if epoch % 30 == 0:
print('-----------------------------------')
print('epoch:' + str(epoch))
avg_score = np.mean(scores[epoch - 30 : epoch])
if metric > avg_score:
valid_feature_recorder.append((metric, epoch))
#weights[top5_features_index] += 1/n_feats
#print('current score is:' + str(metric))
print('epoch is:' + str(epoch))
print('top5_features_index are :{}'.format(top5_features_index))
print()
return valid_feature_recorder, top5_feature_epoch_container, scores
import time
valid_feature_recorder, top5_feature_epoch_container, scores = select_features(n_elements, feats, weights, epochs, n_features, topfeats)
len(valid_feature_recorder)
```
Store the file with .npy document so that we will not do feature selection when we restart the kernel
```
valid_feature_recorder_file_name = 'valid_feature_recorder.npy'
np.save(valid_feature_recorder_file_name, valid_feature_recorder)
np.save('top5_feature_epoch_container.npy', top5_feature_epoch_container)
np.save('scores.npy', scores)
```
## A brief introduction to selecting top features from previous selection
- Sort the epochs based on r2_score
- Choose the top 100 epochs
- Count the occurance frequence of each feature appearing in the top 100 epochs
- Choose top 7 features as the ultimate features for training
```
valid_record_list = []
for score, index in valid_feature_recorder:
valid_record_list.append(index)
sorted_scores_index = scores.argsort()
top100_scores_index = sorted_scores_index[-100:]
top100_features = top5_feature_epoch_container[top100_scores_index,:]
top100_scores = scores[top100_scores_index]
top100_features_flatten = top100_features.reshape(-1)
from collections import Counter
count = Counter(top100_features_flatten)
most_important_features = count.most_common()
most_important_features[:7]
feature_for_training = []
for feature, freq in most_important_features[:7]:
feature_for_training.append(feature)
np.save('feature_for_training.npy', feature_for_training)
train['new_X0'] = pd.factorize(train['X0'])[0]
train['new_X0']
```
| github_jupyter |
# Training and hosting SageMaker Models using the Apache MXNet Module API
The **SageMaker Python SDK** makes it easy to train and deploy MXNet models. In this example, we train a simple neural network using the Apache MXNet [Module API](https://mxnet.apache.org/api/python/module/module.html) and the MNIST dataset. The MNIST dataset is widely used for handwritten digit classification, and consists of 70,000 labeled 28x28 pixel grayscale images of hand-written digits. The dataset is split into 60,000 training images and 10,000 test images. There are 10 classes (one for each of the 10 digits). The task at hand is to train a model using the 60,000 training images and subsequently test its classification accuracy on the 10,000 test images.
### Setup
First we need to define a few variables that will be needed later in the example.
```
from sagemaker import get_execution_role
from sagemaker.session import Session
# S3 bucket for saving code and model artifacts.
# Feel free to specify a different bucket here if you wish.
bucket = Session().default_bucket()
# Location to save your custom code in tar.gz format.
custom_code_upload_location = 's3://{}/customcode/mxnet'.format(bucket)
# Location where results of model training are saved.
model_artifacts_location = 's3://{}/artifacts'.format(bucket)
# IAM execution role that gives SageMaker access to resources in your AWS account.
# We can use the SageMaker Python SDK to get the role from our notebook environment.
role = get_execution_role()
```
### The training script
The ``mnist.py`` script provides all the code we need for training and hosting a SageMaker model. The script we will use is adaptated from Apache MXNet [MNIST tutorial (https://mxnet.incubator.apache.org/tutorials/python/mnist.html).
```
!cat mnist.py
```
### SageMaker's MXNet estimator class
The SageMaker ```MXNet``` estimator allows us to run single machine or distributed training in SageMaker, using CPU or GPU-based instances.
When we create the estimator, we pass in the filename of our training script, the name of our IAM execution role, and the S3 locations we defined in the setup section. We also provide a few other parameters. ``train_instance_count`` and ``train_instance_type`` determine the number and type of SageMaker instances that will be used for the training job. The ``hyperparameters`` parameter is a ``dict`` of values that will be passed to your training script -- you can see how to access these values in the ``mnist.py`` script above.
For this example, we will choose one ``ml.m4.xlarge`` instance.
```
from sagemaker.mxnet import MXNet
mnist_estimator = MXNet(entry_point='mnist.py',
role=role,
output_path=model_artifacts_location,
code_location=custom_code_upload_location,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
framework_version='1.4.1',
py_version='py3',
distributions={'parameter_server': {'enabled': True}},
hyperparameters={'learning-rate': 0.1})
```
### Running the Training Job
After we've constructed our MXNet object, we can fit it using data stored in S3. Below we run SageMaker training on two input channels: **train** and **test**.
During training, SageMaker makes this data stored in S3 available in the local filesystem where the mnist script is running. The ```mnist.py``` script simply loads the train and test data from disk.
```
%%time
import boto3
region = boto3.Session().region_name
train_data_location = 's3://sagemaker-sample-data-{}/mxnet/mnist/train'.format(region)
test_data_location = 's3://sagemaker-sample-data-{}/mxnet/mnist/test'.format(region)
mnist_estimator.fit({'train': train_data_location, 'test': test_data_location})
```
### Opimtize your model with Neo API
Neo API allows to optimize our model for a specific hardware type. When calling compile_model() function, we specify the target instance family (C5) as well as the S3 bucket to which the compiled model would be stored.
#### Important. If the following command result in a permission error, scroll up and locate the value of execution role returned by get_execution_role(). The role must have access to the S3 bucket specified in output_path.
```
neo_optimize = False
compiled_model = mnist_estimator
if mnist_estimator.create_model().check_neo_region(boto3.Session().region_name) is False:
print('Neo is not currently supported in', boto3.Session().region_name)
else:
output_path = '/'.join(mnist_estimator.output_path.split('/')[:-1])
neo_optimize = True
compiled_model = mnist_estimator.compile_model(target_instance_family='ml_m4',
input_shape={'data':[1, 784]},
role=role,
output_path=output_path)
```
### Creating an inference Endpoint
After training, we use the ``MXNet estimator`` object to build and deploy an ``MXNetPredictor``. This creates a Sagemaker **Endpoint** -- a hosted prediction service that we can use to perform inference.
The arguments to the ``deploy`` function allow us to set the number and type of instances that will be used for the Endpoint. These do not need to be the same as the values we used for the training job. For example, you can train a model on a set of GPU-based instances, and then deploy the Endpoint to a fleet of CPU-based instances. Here we will deploy the model to a single ``ml.m4.xlarge`` instance.
```
%%time
predictor = compiled_model.deploy(initial_instance_count=1,
instance_type='ml.m4.xlarge')
import io
import numpy as np
def numpy_bytes_serializer(data):
f = io.BytesIO()
np.save(f, data)
f.seek(0)
return f.read()
if neo_optimize is True:
predictor.content_type = 'application/vnd+python.numpy+binary'
predictor.serializer = numpy_bytes_serializer
```
The request handling behavior of the Endpoint is determined by the ``mnist.py`` script. In this case, the script doesn't include any request handling functions, so the Endpoint will use the default handlers provided by SageMaker. These default handlers allow us to perform inference on input data encoded as a multi-dimensional JSON array.
### Making an inference request
Now that our Endpoint is deployed and we have a ``predictor`` object, we can use it to classify handwritten digits.
To see inference in action, draw a digit in the image box below. The pixel data from your drawing will be loaded into a ``data`` variable in this notebook.
*Note: after drawing the image, you'll need to move to the next notebook cell.*
```
from IPython.display import HTML
HTML(open("input.html").read())
```
Now we can use the ``predictor`` object to classify the handwritten digit:
```
response = predictor.predict(data)
print('Raw prediction result:')
if neo_optimize is False:
response = response[0]
print(response)
labeled_predictions = list(zip(range(10), response))
print('Labeled predictions: ')
print(labeled_predictions)
labeled_predictions.sort(key=lambda label_and_prob: 1.0 - label_and_prob[1])
print('Most likely answer: {}'.format(labeled_predictions[0]))
```
# (Optional) Delete the Endpoint
After you have finished with this example, remember to delete the prediction endpoint to release the instance(s) associated with it.
```
print("Endpoint name: " + predictor.endpoint)
import sagemaker
predictor.delete_endpoint()
```
| github_jupyter |
# Deep Q-learning
```
import gym
import tensorflow
from matplotlib import pyplot
import dqn
```
## Atari 2600 Breakout
```
env = gym.make('Breakout-v0')
env.action_space, env.observation_space
# env.observation_space.low, env.observation_space.high
env.env.get_action_meanings()
S = env.reset()
for t in range(250):
env.render()
# take a random action
A = env.action_space.sample()
S_next, R, done, info = env.step(A)
if R:
print(R)
if done:
break
env.close()
info, S.shape
fig, (ax1, ax2) = pyplot.subplots(ncols=2)
ax1.imshow(S)
ax2.imshow(S_next)
```
## Preprocessing
```
preprocess = dqn.Preprocessor()
sess = tensorflow.InteractiveSession()
theta = preprocess(S, sess)
theta_next = preprocess(S_next, sess)
fig, (ax1, ax2) = pyplot.subplots(ncols=2)
ax1.imshow(theta, cmap='gray')
ax2.imshow(theta_next, cmap='gray')
```
## Experience Replay
```
D = dqn.ReplayMemory(env, preprocess, sess)
len(D)
D.store(theta, A, R)
D[-1] == (theta, A, R), len(D)
S = D.get_recent_state()
S.shape
ys = D.sample_minibatch()
S, A, R, S_next = ys[0]
len(ys)
f, axes = pyplot.subplots(nrows=2, ncols=4)
for i, ax in enumerate(axes.T):
ax[0].imshow(S[..., i], cmap='gray')
ax[1].imshow(S_next[..., i], cmap='gray')
```
## Deep Q-network
```
deep_q_network = dqn.DQN(env)
```
## Algorithm
```
def deep_q_learning(env, time_steps):
C = 10000 # target network update frequency
time_steps_counter = range(1, time_steps + 1)
preprocess = dqn.Preprocessor()
# initialize replay memory
D = dqn.ReplayMemory()
# initialize action-value function with random weights
Q = dqn.DQN(env)
# intialize taget action-value function
target_Q = dqn.DQN(env, Q.clone_weights())
for episode in itertools.count():
# intialize sequence
X = env.reset()
theta = preprocess(X)
for t in time_step_counter:
# choose epsilon greedy action
A = Q.epsilon_greedy_action(D.get_recent_state())
# execute action in emulator and observe reward and image
X_next, R, done, _ = env.step(A)
theta_next = preprocess(X_next)
# store transition in replay memory
D.store(theta, A, R)
# sample minibatch from replay memory
transitions = D.sample_minibatch()
# set y
ys = target_Q.get_q_value(transitions)
# perform gradient descent step
Q.gradient_descent_step(ys, Ss, As, sess)
# every C steps reset target_Q = Q
if time_step_counter % C == 0:
target_Q.weights = Q.clone_weights()
if done:
D.store(theta_next, -1, 0)
break
X, theta = X_next, theta_next
deep_q_learning(env, 100)
```
| github_jupyter |
<!--NOTEBOOK_HEADER-->
*This notebook contains material from [CBE40455-2020](https://jckantor.github.io/CBE40455-2020);
content is available [on Github](https://github.com/jckantor/CBE40455-2020.git).*
<!--NAVIGATION-->
< [3.3 Agent Based Models](https://jckantor.github.io/CBE40455-2020/03.03-Agent-Based-Models.html) | [Contents](toc.html) | [3.5 Assignment](https://jckantor.github.io/CBE40455-2020/03.05-Assignment.html) ><p><a href="https://colab.research.google.com/github/jckantor/CBE40455-2020/blob/master/docs/03.04-Modeling-Events.ipynb"> <img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://jckantor.github.io/CBE40455-2020/03.04-Modeling-Events.ipynb"> <img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
# 3.4 Modeling Events
## 3.4.1 Learning Objectives
* Key performance indicators
* Visualization with Gannt charts
* Adding 'state' to models
* Combining events with `AnyOf` and `AllOf`
* Deterministic versus stochastic models
## 3.4.2 Example: A room full of Roombas
Let's imagine a large facility that is being cleaned by a collection of Roomba-type robotic cleaning units. Each unit is characterized by time required to charge, and an amount of time it can clean before needing to be recharged. The facility must be cleaned during a 16 hour overnight shift. On average, 3 units must be operating continuously to meet the cleaning requirements, i.e., 3 x 16 = 48 hours machine cleaning each night. We would like to determine how many charging stations will be required.
| Unit | Charge Time (hrs) | Clean Time (hrs) |
| :--: | :--: | :--: |
| A | 1.0 | 2.5 |
| B | 0.5 | 1.5 |
| C | 0.8 | 2.0 |
| D | 1.4 | 3.5 |
| E | 0.5 | 1.2 |
## 3.4.3 Shared resource
### 3.4.3.1 Installation, Import, and Setup section
```
# necessary installations
!pip install simpy
# import section
import simpy
import pandas as pd
import numpy as np
```
### 3.4.3.2 Data section
```
# data section
roomba_data = [
["A", 1.0, 2.5],
["B", 0.5, 1.5],
["C", 0.8, 2.0],
["D", 1.4, 3.5],
["E", 0.5, 1.2],
]
roomba_df = pd.DataFrame(roomba_data, columns=["id", "charge_time", "clean_time"])
display(roomba_df)
```
### 3.4.3.3 Modeling and simulation section
The output of the modeling and simulation is a Pandas DataFrame giving a complete history of the events occuring during a simulation.
```
def roomba_model(id, charge_time, clean_time):
while True:
with chargers.request() as request:
yield request
tic = env.now
yield env.timeout(charge_time)
toc = env.now
data_log.append([id, "charging", tic, toc])
tic = env.now
yield env.timeout(clean_time)
toc = env.now
data_log.append([id, "cleaning", tic, toc])
data_log = []
env = simpy.Environment()
chargers = simpy.Resource(env, capacity=1)
for r in roomba_df.index:
env.process(roomba_model(roomba_df["id"][r], roomba_df["charge_time"][r], roomba_df["clean_time"][r]))
env.run(until=16)
df = pd.DataFrame(data_log, columns=["id", "event", "begin", "end"])
df.head()
```
### 3.4.3.4 Key performance indictors
Key Performance Indicators (KPI) is a commonly used termed to denote quantitative measures important to the performance of an organization, unit, or system.
```
def kpi(df):
df["time"] = df["end"] - df["begin"]
return pd.pivot_table(df, index=["event"], values="time", aggfunc={"time":np.sum} )
kpi(df)
```
### 3.4.3.5 Visualization with Gantt charts
Designed circa 1910-1915 by Henry Gantt, an American mechanical engineer who worked with Frederick W. Taylor, one of the first management consultants and a leader in the Efficiency Movement of the late 19th century.
Gantt charts provide a convenient method to display complex schedules and the time-dependent allocation of resources.
```
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def gantt(df, lw=10):
# create sorted lists of the unique ids and events appearing in the data log
ids = sorted(list(set(df["id"])))
events = sorted(list(set(df["event"])))
# create list of unique colors for each event
colors = [f"C{i}" for i in range(len(events))]
# create plot window
fig, ax = plt.subplots(1, 1, figsize=(12, 3))
# for each event and id, find entries in the data log and plot the begin and end points
for i, event in enumerate(events):
for j, id in enumerate(ids):
for k in df[(df["id"]==id) & (df["event"]==event)].index:
ax.plot([df["begin"][k], df["end"][k]], [j,j],
colors[i], solid_capstyle="butt", lw=lw)
# create legend
lines = [Line2D([0], [0], lw=lw, color=colors[i]) for i in range(len(events))]
ax.legend(lines, events, bbox_to_anchor=(0.0, 1.1), loc="lower left")
# annotate the axes
ax.set_yticks(range(len(ids)))
ax.set_yticklabels(ids)
ax.grid(True)
ax.set_xlabel("Time")
ax.set_title("Gannt Chart")
for sp in ['top', 'bottom', 'right', 'left']:
ax.spines[sp].set_visible(False)
gantt(df)
```
## 3.4.4 Assignment Review
### 3.4.4.1 Exercise 1.
Answer the question posed above: How many charging stations are needed to provide 48 hours cleaning services in the overnight shift?
#### 3.4.4.1.1 Solution
```
data_log = []
env = simpy.Environment()
chargers = simpy.Resource(env, capacity=2)
for r in roomba_df.index:
env.process(roomba_model(roomba_df["id"][r], roomba_df["charge_time"][r], roomba_df["clean_time"][r]))
env.run(until=16)
df = pd.DataFrame(data_log, columns=["id", "event", "begin", "end"])
display(kpi(df))
gantt(df)
```
We see that two chargers are sufficient to meet the performance requirement. But keep in mind there has been no deliberate strategy for allocating the chargers. The Roombas simply wait in line when they need access. There may be better ways to allocate a scare resource than first-come, first-serve.
### 3.4.4.2 Exercise 2.
Modify the model to assume the changers are fully charged at the start of the cleaning shift. Does that reduce the number of chargers required?
#### 3.4.4.2.1 Solution
```
def roomba_model(id, charge_time, clean_time):
while True:
tic = env.now
yield env.timeout(clean_time)
toc = env.now
data_log.append([id, "cleaning", tic, toc])
with chargers.request() as request:
yield request
tic = env.now
yield env.timeout(charge_time)
toc = env.now
data_log.append([id, "charging", tic, toc])
data_log = []
env = simpy.Environment()
chargers = simpy.Resource(env, capacity=2)
for r in roomba_df.index:
env.process(roomba_model(roomba_df["id"][r], roomba_df["charge_time"][r], roomba_df["clean_time"][r]))
env.run(until=16)
df = pd.DataFrame(data_log, columns=["id", "event", "begin", "end"])
display(kpi(df))
gantt(df)
```
## 3.4.5 Introducing State Variables
The solution to this exercise can be generalized by introducing a state variable `soc` denoting the 'state of charge'. The state of charge is the fraction of charge remaining in the device battery. The state of charge is reduced during cleaning operations, and restored during charging operation, in proportion to the time spent cleaning and charging, respectively.
Given an initial full charge, let parameter $\tau_{clean}$ denote the maximum time the device can clean before completely exhausting the battery. Let $\text{SOC}_{k}$ be the state of charge after the $k_{th}$ cleaning cycle. Then
$$\text{SOC}_{k+1} = \max(0, \text{SOC}_k - \frac{t_{k, clean}}{\tau_{clean}})$$
where $t_{k,clean}$ is the period of the $k_{th}$ cleaning cycle.
An additional parameter is introduced to represent the minimum battery reserve that would be allowed for normal operations.
Similarly, the state of charge following a charging cycle is given by
$$\text{SOC}_{k+1} = \min(1, \text{SOC}_k + \frac{t_{k, charge}}{\tau_{charge}})$$
It is interesting to explore the behavior of this system as a function of the initial state of charge and reserve requirements. Take time to explore the role of these two parameters. See if you find any surprising behaviors.
```
def roomba_model(id, charge_time, clean_time, reserve=0.1):
soc = 1.0 # state of charge
while True:
if soc > reserve:
tic = env.now
yield env.timeout((soc - reserve) * clean_time)
toc = env.now
soc = max(0.0, soc - (toc - tic)/clean_time)
data_log.append([id, "cleaning", tic, toc])
with chargers.request() as request:
yield request
tic = env.now
yield env.timeout((1 - soc)*charge_time)
toc = env.now
soc = min(1.0, soc + (toc - tic)/charge_time)
data_log.append([id, "charging", tic, toc])
data_log = []
env = simpy.Environment()
chargers = simpy.Resource(env, capacity=2)
for r in roomba_df.index:
env.process(roomba_model(roomba_df["id"][r], roomba_df["charge_time"][r], roomba_df["clean_time"][r]))
env.run(until=16)
df = pd.DataFrame(data_log, columns=["id", "event", "begin", "end"])
display(kpi(df))
gantt(df)
```
### 3.4.5.1 Exercise 3.
Assume each Roomba needs to dispose of waste after 20 minutes of cleaning, that it takes 5 minutes to dispose of the waste, and requires access to a waste disposal station.
Hints:
* You will need to create a log a new event called 'waste disposal'.
* Model the waste disposal station as a shared resource.
* You may need to make some decisions on how to handle the waste at the end of a cleaning cycle. Don't get too bogged down, just make some reasonable assumptions. We'll address this issue in the next class.
```
five_min = 1.0/12.0
twenty_min = 1.0/3.0
def roomba_model(id, charge_time, clean_time, reserve=0.1):
soc = 1.0 # state of charge
sow = 0.0 # state of waste
while True:
if soc > reserve and sow < 1.0:
tic = env.now
events = [env.timeout((soc - reserve) * clean_time),
env.timeout((1.0 - sow)*twenty_min)]
yield simpy.AnyOf(env, events)
toc = env.now
soc = max(0.0, soc - (toc - tic)/clean_time)
sow = min(1.0, sow + (toc - tic)/twenty_min)
data_log.append([id, "cleaning", tic, toc])
if sow >= 1.0 - 0.001:
with waste_stations.request() as request:
yield request
tic = env.now
yield env.timeout(five_min)
toc = env.now
sow = 0.0
data_log.append([id, "waste disposal", tic, toc])
if soc <= reserve + 0.001:
with chargers.request() as request:
yield request
tic = env.now
yield env.timeout((1 - soc)*charge_time)
toc = env.now
soc = min(1.0, soc + (toc - tic)/charge_time)
data_log.append([id, "charging", tic, toc])
data_log = []
env = simpy.Environment()
chargers = simpy.Resource(env, capacity=3)
waste_stations = simpy.Resource(env, capacity=5)
for r in roomba_df.index:
env.process(roomba_model(roomba_df["id"][r], roomba_df["charge_time"][r], roomba_df["clean_time"][r]))
env.run(until=16)
df = pd.DataFrame(data_log, columns=["id", "event", "begin", "end"])
display(kpi(df))
gantt(df)
```
## 3.4.6 Stochastic Model
```
import random
five_min = 5.0/60.0
ten_min = 10.0/60.0
twenty_min = 20.0/60.0
thirty_min = 30.0/60.0
def roomba_model(id, charge_time, clean_time, reserve=0.1):
soc = 1.0 # state of charge
sow = 0.0 # state of waste
while True:
if soc > reserve and sow < 1.0:
tic = env.now
yield simpy.AnyOf(env, [env.timeout((soc - reserve) * clean_time),
env.timeout((1.0 - sow)*random.uniform(ten_min, thirty_min))])
toc = env.now
soc = max(0.0, soc - (toc - tic)/clean_time)
sow = min(1.0, sow + (toc - tic)/twenty_min)
data_log.append([id, "cleaning", tic, toc])
if sow >= 1.0 - 0.001:
with waste_stations.request() as request:
yield request
tic = env.now
yield env.timeout(five_min)
toc = env.now
sow = 0.0
data_log.append([id, "waste disposal", tic, toc])
if soc <= reserve+ 0.001:
with chargers.request() as request:
yield request
tic = env.now
yield env.timeout((1 - soc)*charge_time)
toc = env.now
soc = min(1.0, soc + (toc - tic)/charge_time)
data_log.append([id, "charging", tic, toc])
data_log = []
env = simpy.Environment()
chargers = simpy.Resource(env, capacity=3)
waste_stations = simpy.Resource(env, capacity=2)
for r in roomba_df.index:
env.process(roomba_model(roomba_df["id"][r], roomba_df["charge_time"][r], roomba_df["clean_time"][r]))
env.run(until=16)
df = pd.DataFrame(data_log, columns=["id", "event", "begin", "end"])
display(kpi(df))
gantt(df)
```
## 3.4.7 Monte Carlo Simulation
```
import random
five_min = 5.0/60.0
ten_min = 10.0/60.0
twenty_min = 20.0/60.0
thirty_min = 30.0/60.0
def roomba_model(id, charge_time, clean_time, reserve=0.1):
soc = 1.0 # state of charge
sow = 0.0 # state of waste
while True:
if soc > reserve and sow < 1.0:
tic = env.now
yield simpy.AnyOf(env, [env.timeout((soc - reserve) * clean_time),
env.timeout((1.0 - sow)*random.triangular(ten_min, thirty_min))])
toc = env.now
soc = max(0.0, soc - (toc - tic)/clean_time)
sow = min(1.0, sow + (toc - tic)/twenty_min)
data_log.append([id, "cleaning", tic, toc])
if sow >= 1.0 - 0.001:
with waste_stations.request() as request:
yield request
tic = env.now
yield env.timeout(five_min)
toc = env.now
sow = 0.0
data_log.append([id, "waste disposal", tic, toc])
if soc <= reserve+ 0.001:
with chargers.request() as request:
yield request
tic = env.now
yield env.timeout((1 - soc)*charge_time)
toc = env.now
soc = min(1.0, soc + (toc - tic)/charge_time)
data_log.append([id, "charging", tic, toc])
y = []
for k in range(1000):
data_log = []
env = simpy.Environment()
chargers = simpy.Resource(env, capacity=2)
waste_stations = simpy.Resource(env, capacity=2)
for r in roomba_df.index:
env.process(roomba_model(roomba_df["id"][r], roomba_df["charge_time"][r], roomba_df["clean_time"][r]))
env.run(until=16)
df = pd.DataFrame(data_log, columns=["id", "event", "begin", "end"])
y.append(kpi(df)["time"]["cleaning"])
y = np.array(y)
plt.hist(y, bins=20)
plt.xlim(45.0, 52.0)
plt.plot([48,48], plt.ylim(), lw=3)
print("mean =", round(y.mean(), 2))
print("std =", round(y.std(), 2))
```
## 3.4.8 Assignment (to be submitted Thursday, Sept. 3rd).
The facility has expanded to an average of 100 hours of machine cleaning time are required during the 16 hour overnight shift. The company would like to settle on a single cleaning model rather than servicing five different models.
a. Modify the above model to determine the the model (A, B, C, D, or E) and number of devices required to meet the service requirement.
b. Modify the above model to include a second process that writes the number of charging stations in use at every minute to a second data log. Prepare a plot and histogram of charging station usage.
<!--NAVIGATION-->
< [3.3 Agent Based Models](https://jckantor.github.io/CBE40455-2020/03.03-Agent-Based-Models.html) | [Contents](toc.html) | [3.5 Assignment](https://jckantor.github.io/CBE40455-2020/03.05-Assignment.html) ><p><a href="https://colab.research.google.com/github/jckantor/CBE40455-2020/blob/master/docs/03.04-Modeling-Events.ipynb"> <img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://jckantor.github.io/CBE40455-2020/03.04-Modeling-Events.ipynb"> <img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
| github_jupyter |
```
import sqlite3
from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import requests
import pandas as pd
import csv
import time
import re
df = pd.read_csv('C:/Develops/newspapers/๊ฐ์ /lexicon/expressive-type.csv')
df.head(100)
pd.read_csv('C:/Develops/newspapers/๊ฐ์ /lexicon/nested-order.csv')
df = pd.read_excel('.//NewsResult_20200901-20210430 (8).xlsx', names=['identical', 'date', 'press', 'name', 'title', 'c1', 'c2', 'c3', 'a1', 'a2', 'a3', 'person', 'place', 'institute', 'keyword', 'topkeyword', 'body', 'url', 'tf'])
# ์ ์ฒ๋ฆฌ
df = df[df.tf != '์์ธ']
df = df[df.tf != '์ค๋ณต']
df = df[df.tf != '์ค๋ณต, ์์ธ']
# df = df[~df.title.str.contains('๊ฒฝํฅํฌํ ')]
# df = df[~df.title.str.contains('์ธํฐ๋ํฐ๋ธ')]
# df = df[~df.place.str.contains('korea', na=False)]
# df = df[~df.place.str.contains('la', na=False)]
# df = df[~df.place.str.contains('LA', na=False)]
df = df.reset_index()
df = df.drop(columns=['index'], axis=1)
len(df)
df = df.iloc[0:7952] # 4/30 ~ 4/1 1๋ฌ๊ฐ ์ด 7952๊ฐ(8์๊ฐ 50๋ถ) *50๊ฐ(3๋ถ20์ด)
len(df)
df
text_0_7953 = pd.read_excel('C:/Develops/newspapers/press_8/text_0_7953.xls', header=None)
text_0_7953 = text_0_7953.values.tolist()
len(text_0_7953)
texts = []
for i in range(0,len(text_0_7953)):
body = ''.join(text_0_7953[i])
texts.append(body)
i = 0
df.title[i]
texts[i]
df['total_body'] = texts
db_news = sqlite3.connect('./news.db')
c = db_news.cursor()
c.execute("CREATE TABLE newspapers (id INTEGER PRIMARY KEY AUTOINCREMENT, identical TEXT, date TEXT, press TEXT, name TEXT, title TEXT, c1 TEXT, c2 TEXT, c3 TEXT, a1 TEXT, a2 TEXT, a3 TEXT, person TEXT, place TEXT, institute TEXT, keyword TEXT, topkeyword TEXT, body TEXT, url TEXT, tf TEXT, total_body TEXT)")
# identical', 'date', 'press', 'name', 'title', 'c1', 'c2', 'c3', 'a1', 'a2', 'a3', 'person', 'place', 'institute', 'keyword', 'topkeyword', 'body', 'url', 'tf
for row in df.iterrows():
identical = row[1][0]
date = row[1][1]
press = row[1][2]
name = row[1][3]
title = row[1][4]
c1 = row[1][5]
c2 = row[1][6]
c3 = row[1][7]
a1 = row[1][8]
a2 = row[1][9]
a3 = row[1][10]
person = row[1][11]
place = row[1][12]
institute = row[1][13]
keyword = row[1][14]
topkeyword = row[1][15]
body = row[1][16]
url = row[1][17]
tf = row[1][18]
total_body = row[1][19]
c.execute("INSERT INTO newspapers (identical, date, press, name, title, c1, c2, c3, a1, a2, a3, person, place, institute, keyword, topkeyword, body, url, tf, total_body) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) ",(identical, date, press, name, title, c1, c2, c3, a1, a2, a3, person, place, institute, keyword, topkeyword, body, url, tf, total_body))
db_news.commit()
์นผ๋ผ ๊ธ์ ๊ธ์ ์ ๋ถ์ ๋ถ์ ์
------------
๊ธฐ์ฌ1 ์๋
2 ์๋
2 ๊ฐ์ 5 ์๋
2 ใ
ใ
ใ
ใ
์๋
2
๊ธฐ์ฌ2 ๊ฐ์ 5 ์๋
2
๊ธฐ์ฌ3 ใ
ใ
ใ
ใ
์๋
2
์นผ๋ผ ๋จ์ด ํ์
------------
์๋
2
๊ฐ์ 5
์นผ๋ผ ๋จ์ด ํ์
------------
์๋
2
๊ฐ์ 5
for i in df.interros():
key = row[i][0] # ์๋
value = row[i][1] # 2
์ธํธ = dict(key : value) # ์๋
: 2
df['์๋ก์ด์นผ๋ผ'] = values
์๋ก์ด์นผ๋ผ
1
0
1
0
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 Franรงois Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Text classification with preprocessed text: Movie reviews
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/text_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of *binary*โor two-classโclassification, an important and widely applicable kind of machine learning problem.
We'll use the [IMDB dataset](https://www.tensorflow.org/datasets/catalog/imdb_reviews) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews.
This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For a more advanced text classification tutorial using `tf.keras`, see the [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/).
## Setup
```
import tensorflow as tf
from tensorflow import keras
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
import numpy as np
print(tf.__version__)
```
<a id="download"></a>
## Download the IMDB dataset
The IMDB movie reviews dataset comes packaged in `tfds`. It has already been preprocessed so that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.
The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
To encode your own text see the [Loading text tutorial](../load_data/text.ipynb)
```
(train_data, test_data), info = tfds.load(
# Use the version pre-encoded with an ~8k vocabulary.
'imdb_reviews/subwords8k',
# Return the train/test datasets as a tuple.
split = (tfds.Split.TRAIN, tfds.Split.TEST),
# Return (example, label) pairs from the dataset (instead of a dictionary).
as_supervised=True,
# Also return the `info` structure.
with_info=True)
```
<a id="encoder"></a>
## Try the encoder
The dataset `info` includes the text encoder (a `tfds.features.text.SubwordTextEncoder`).
```
encoder = info.features['text'].encoder
print ('Vocabulary size: {}'.format(encoder.vocab_size))
```
This text encoder will reversibly encode any string:
```
sample_string = 'Hello TensorFlow.'
encoded_string = encoder.encode(sample_string)
print ('Encoded string is {}'.format(encoded_string))
original_string = encoder.decode(encoded_string)
print ('The original string: "{}"'.format(original_string))
assert original_string == sample_string
```
The encoder encodes the string by breaking it into subwords or characters if the word is not in its dictionary. So the more a string resembles the dataset, the shorter the encoded representation will be.
```
for ts in encoded_string:
print ('{} ----> {}'.format(ts, encoder.decode([ts])))
```
## Explore the data
Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review.
The text of reviews have been converted to integers, where each integer represents a specific word-piece in the dictionary.
Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.
Here's what the first review looks like:
```
for train_example, train_label in train_data.take(1):
print('Encoded text:', train_example[:10].numpy())
print('Label:', train_label.numpy())
```
The `info` structure contains the encoder/decoder. The encoder can be used to recover the original text:
```
encoder.decode(train_example)
```
## Prepare the data for training
You will want to create batches of training data for your model. The reviews are all different lengths, so use `padded_batch` to zero pad the sequences while batching:
```
BUFFER_SIZE = 1000
train_batches = (
train_data
.shuffle(BUFFER_SIZE)
.padded_batch(32))
test_batches = (
test_data
.padded_batch(32))
```
Each batch will have a shape of `(batch_size, sequence_length)` because the padding is dynamic each batch will have a different length:
```
for example_batch, label_batch in train_batches.take(2):
print("Batch shape:", example_batch.shape)
print("label shape:", label_batch.shape)
```
## Build the model
The neural network is created by stacking layersโthis requires two main architectural decisions:
* How many layers to use in the model?
* How many *hidden units* to use for each layer?
In this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1. Let's build a "Continuous bag of words" style model for this problem:
Caution: This model doesn't use masking, so the zero-padding is used as part of the input, so the padding length may affect the output. To fix this, see the [masking and padding guide](../../guide/keras/masking_and_padding.ipynb).
```
model = keras.Sequential([
keras.layers.Embedding(encoder.vocab_size, 16),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(1)])
model.summary()
```
The layers are stacked sequentially to build the classifier:
1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`. *To learn more about embeddings, see the [word embedding tutorial](../text/word_embeddings.ipynb).*
2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible.
3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.
4. The last layer is densely connected with a single output node. This uses the default *linear* activation function that outputs *logits* for numerical stability. Another option is to use the *sigmoid* activation function that returns a float value between 0 and 1, representing a probability, or confidence level.
### Hidden units
The above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes, or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.
If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patternsโpatterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later.
### Loss function and optimizer
A model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs logits (a single-unit layer with a linear activation), we'll use the `binary_crossentropy` loss function.
This isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilitiesโit measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.
Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.
Now, configure the model to use an optimizer and a loss function:
```
model.compile(optimizer='adam',
loss=tf.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
```
## Train the model
Train the model by passing the `Dataset` object to the model's fit function. Set the number of epochs.
```
history = model.fit(train_batches,
epochs=10,
validation_data=test_batches,
validation_steps=30)
```
## Evaluate the model
And let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
```
loss, accuracy = model.evaluate(test_batches)
print("Loss: ", loss)
print("Accuracy: ", accuracy)
```
This fairly naive approach achieves an accuracy of about 87%. With more advanced approaches, the model should get closer to 95%.
## Create a graph of accuracy and loss over time
`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
```
history_dict = history.history
history_dict.keys()
```
There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
```
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
```
In this plot, the dots represent the training loss and accuracy, and the solid lines are the validation loss and accuracy.
Notice the training loss *decreases* with each epoch and the training accuracy *increases* with each epoch. This is expected when using a gradient descent optimizationโit should minimize the desired quantity on every iteration.
This isn't the case for the validation loss and accuracyโthey seem to peak after about twenty epochs. This is an example of overfitting: the model performs better on the training data than it does on data it has never seen before. After this point, the model over-optimizes and learns representations *specific* to the training data that do not *generalize* to test data.
For this particular case, we could prevent overfitting by simply stopping the training after twenty or so epochs. Later, you'll see how to do this automatically with a callback.
| github_jupyter |
# RUL estimation UNIBO Powertools Dataset
```
import numpy as np
import pandas as pd
import scipy.io
import math
import os
import ntpath
import sys
import logging
import time
import sys
import random
from importlib import reload
import plotly.graph_objects as go
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, regularizers
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import LSTM, Masking
IS_COLAB = False
IS_TRAINING = True
RESULT_NAME = ""
if IS_COLAB:
from google.colab import drive
drive.mount('/content/drive')
data_path = "/content/drive/My Drive/battery-state-estimation/battery-state-estimation/"
else:
data_path = "../../"
sys.path.append(data_path)
from data_processing.unibo_powertools_data import UniboPowertoolsData, CycleCols
from data_processing.model_data_handler import ModelDataHandler
from data_processing.prepare_rul_data import RulHandler
```
### Config logging
```
reload(logging)
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=logging.DEBUG, datefmt='%Y/%m/%d %H:%M:%S')
```
# Load Data
```
dataset = UniboPowertoolsData(
test_types=[],
chunk_size=1000000,
lines=[37, 40],
charge_line=37,
discharge_line=40,
base_path=data_path
)
train_names = [
'000-DM-3.0-4019-S',#minimum capacity 1.48
'001-DM-3.0-4019-S',#minimum capacity 1.81
'002-DM-3.0-4019-S',#minimum capacity 2.06
'009-DM-3.0-4019-H',#minimum capacity 1.41
'010-DM-3.0-4019-H',#minimum capacity 1.44
'014-DM-3.0-4019-P',#minimum capacity 1.7
'015-DM-3.0-4019-P',#minimum capacity 1.76
'016-DM-3.0-4019-P',#minimum capacity 1.56
'017-DM-3.0-4019-P',#minimum capacity 1.29
#'047-DM-3.0-4019-P',#new 1.98
#'049-DM-3.0-4019-P',#new 2.19
'007-EE-2.85-0820-S',#2.5
'008-EE-2.85-0820-S',#2.49
'042-EE-2.85-0820-S',#2.51
'043-EE-2.85-0820-H',#2.31
'040-DM-4.00-2320-S',#minimum capacity 3.75, cycles 188
'018-DP-2.00-1320-S',#minimum capacity 1.82
#'019-DP-2.00-1320-S',#minimum capacity 1.61
'036-DP-2.00-1720-S',#minimum capacity 1.91
'037-DP-2.00-1720-S',#minimum capacity 1.84
'038-DP-2.00-2420-S',#minimum capacity 1.854 (to 0)
'050-DP-2.00-4020-S',#new 1.81
'051-DP-2.00-4020-S',#new 1.866
]
test_names = [
'003-DM-3.0-4019-S',#minimum capacity 1.84
'011-DM-3.0-4019-H',#minimum capacity 1.36
'013-DM-3.0-4019-P',#minimum capacity 1.6
'006-EE-2.85-0820-S',# 2.621
'044-EE-2.85-0820-H',# 2.43
'039-DP-2.00-2420-S',#minimum capacity 1.93
'041-DM-4.00-2320-S',#minimum capacity 3.76, cycles 190
]
dataset.prepare_data(train_names, test_names)
dataset_handler = ModelDataHandler(dataset, [
CycleCols.VOLTAGE,
CycleCols.CURRENT,
CycleCols.TEMPERATURE
])
rul_handler = RulHandler()
```
# Data preparation
```
CAPACITY_THRESHOLDS = {
3.0 : 2.7,#th 90% - min 2.1, 70%
2.85 : 2.7,#th 94.7% - min 2.622, 92%
2.0 : 1.93,#th 96.5% - min 1.93, 96.5%
4.0 : 3.77,#th 94.2% - min 3.77 94.2%
4.9 : 4.7,#th 95.9% - min 4.3, 87.7%
5.0 : 4.5#th 90% - min 3.63, 72.6%
}
N_CYCLE = 500
WARMUP_TRAIN = 15
WARMUP_TEST = 30
(train_x, train_y_soh, test_x, test_y_soh,
train_battery_range, test_battery_range,
time_train, time_test, current_train, current_test) = dataset_handler.get_discharge_whole_cycle_future(train_names, test_names)
train_x = train_x[:,:284,:]
test_x = test_x[:,:284,:]
print("cut train shape {}".format(train_x.shape))
print("cut test shape {}".format(test_x.shape))
train_y = rul_handler.prepare_y_future(train_names, train_battery_range, train_y_soh, current_train, time_train, CAPACITY_THRESHOLDS)
del globals()["current_train"]
del globals()["time_train"]
test_y = rul_handler.prepare_y_future(test_names, test_battery_range, test_y_soh, current_test, time_test, CAPACITY_THRESHOLDS)
del globals()["current_test"]
del globals()["time_test"]
x_norm = rul_handler.Normalization()
train_x, test_x = x_norm.normalize(train_x, test_x)
```
## compressing x using autoencoder
```
AUTOENCODER_WEIGHTS = '2021-07-19-17-29-18_autoencoder_unibo_powertools'
# Model definition
opt = tf.keras.optimizers.Adam(learning_rate=0.0002)
LATENT_DIM = 10
class Autoencoder(Model):
def __init__(self, latent_dim):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential([
layers.Input(shape=(train_x.shape[1], train_x.shape[2])),
#layers.MaxPooling1D(5, padding='same'),
layers.Conv1D(filters=16, kernel_size=5, strides=2, activation='relu', padding='same'),
layers.Conv1D(filters=8, kernel_size=3, strides=2, activation='relu', padding='same'),
layers.Flatten(),
layers.Dense(self.latent_dim, activation='relu')
])
self.decoder = tf.keras.Sequential([
layers.Input(shape=(self.latent_dim)),
layers.Dense(568, activation='relu'),
layers.Reshape((71, 8)),
layers.Conv1DTranspose(filters=8, kernel_size=3, strides=2, activation='relu', padding='same'),
layers.Conv1DTranspose(filters=16, kernel_size=5, strides=2, activation='relu', padding='same'),
layers.Conv1D(3, kernel_size=3, activation='relu', padding='same'),
#layers.UpSampling1D(5),
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Autoencoder(LATENT_DIM)
autoencoder.compile(optimizer=opt, loss='mse', metrics=['mse', 'mae', 'mape', tf.keras.metrics.RootMeanSquaredError(name='rmse')])
autoencoder.encoder.summary()
autoencoder.decoder.summary()
autoencoder.load_weights(data_path + 'results/trained_model/%s/model' % AUTOENCODER_WEIGHTS)
# compression
train_x = autoencoder.encoder(train_x).numpy()
test_x = autoencoder.encoder(test_x).numpy()
print("compressed train x shape {}".format(train_x.shape))
print("compressed test x shape {}".format(test_x.shape))
test_x = test_x[:,~np.all(train_x == 0, axis=0)]#we need same column number of training
train_x = train_x[:,~np.all(train_x == 0, axis=0)]
print("compressed train x shape without zero column {}".format(train_x.shape))
print("compressed test x shape without zero column {}".format(test_x.shape))
x_norm = rul_handler.Normalization()
train_x, test_x = x_norm.normalize(train_x, test_x)
train_x = rul_handler.battery_life_to_time_series(train_x, N_CYCLE, train_battery_range)
test_x = rul_handler.battery_life_to_time_series(test_x, N_CYCLE, test_battery_range)
train_x, train_y, train_battery_range, train_y_soh = rul_handler.delete_initial(train_x, train_y, train_battery_range, train_y_soh, WARMUP_TRAIN)
test_x, test_y, test_battery_range, test_y_soh = rul_handler.delete_initial(test_x, test_y, test_battery_range, test_y_soh, WARMUP_TEST)
# first one is SOH, we keep only RUL
train_y = train_y[:,1]
test_y = test_y[:,1]
```
## Y normalization
```
y_norm = rul_handler.Normalization()
train_y, test_y = y_norm.normalize(train_y, test_y)
```
# Model training
```
if IS_TRAINING:
EXPERIMENT = "lstm_autoencoder_rul_unibo_powertools"
experiment_name = time.strftime("%Y-%m-%d-%H-%M-%S") + '_' + EXPERIMENT
print(experiment_name)
# Model definition
opt = tf.keras.optimizers.Adam(lr=0.000003)
model = Sequential()
model.add(Masking(input_shape=(train_x.shape[1], train_x.shape[2])))
model.add(LSTM(128, activation='selu',
return_sequences=True,
kernel_regularizer=regularizers.l2(0.0002)))
model.add(LSTM(64, activation='selu', return_sequences=False,
kernel_regularizer=regularizers.l2(0.0002)))
model.add(Dense(64, activation='selu', kernel_regularizer=regularizers.l2(0.0002)))
model.add(Dense(32, activation='selu', kernel_regularizer=regularizers.l2(0.0002)))
model.add(Dense(1, activation='linear'))
model.summary()
model.compile(optimizer=opt, loss='huber', metrics=['mse', 'mae', 'mape', tf.keras.metrics.RootMeanSquaredError(name='rmse')])
if IS_TRAINING:
history = model.fit(train_x, train_y,
epochs=500,
batch_size=32,
verbose=1,
validation_split=0
)
if IS_TRAINING:
model.save(data_path + 'results/trained_model/%s.h5' % experiment_name)
hist_df = pd.DataFrame(history.history)
hist_csv_file = data_path + 'results/trained_model/%s_history.csv' % experiment_name
with open(hist_csv_file, mode='w') as f:
hist_df.to_csv(f)
history = history.history
if not IS_TRAINING:
history = pd.read_csv(data_path + 'results/trained_model/%s_history.csv' % RESULT_NAME)
model = keras.models.load_model(data_path + 'results/trained_model/%s.h5' % RESULT_NAME)
model.summary()
if not IS_TRAINING:
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(history)
```
### Testing
```
results = model.evaluate(test_x, test_y, return_dict = True)
print(results)
max_rmse = 0
for index in range(test_x.shape[0]):
result = model.evaluate(np.array([test_x[index, :, :]]), np.array([test_y[index]]), return_dict = True, verbose=0)
max_rmse = max(max_rmse, result['rmse'])
print("Max rmse: {}".format(max_rmse))
```
# Results Visualization
```
fig = go.Figure()
fig.add_trace(go.Scatter(y=history['loss'],
mode='lines', name='train'))
fig.update_layout(title='Loss trend',
xaxis_title='epoch',
yaxis_title='loss',
width=1400,
height=600)
fig.show()
train_predictions = model.predict(train_x)
train_y = y_norm.denormalize(train_y)
train_predictions = y_norm.denormalize(train_predictions)
a = 0
for b in train_battery_range:
fig = go.Figure()
fig.add_trace(go.Scatter(x=train_y_soh[a:b], y=train_predictions[a:b,0],
mode='lines', name='predicted'))
fig.add_trace(go.Scatter(x=train_y_soh[a:b], y=train_y[a:b],
mode='lines', name='actual'))
fig.update_layout(title='Results on training',
xaxis_title='SoH Capacity',
yaxis_title='Remaining Ah until EOL',
xaxis={'autorange':'reversed'},
width=1400,
height=600)
fig.show()
a = b
a = 0
for b in train_battery_range:
fig = go.Figure()
fig.add_trace(go.Scatter(y=train_predictions[a:b,0],
mode='lines', name='predicted'))
fig.add_trace(go.Scatter(y=train_y[a:b],
mode='lines', name='actual'))
fig.update_layout(title='Results on training',
xaxis_title='Cycle',
yaxis_title='Remaining Ah until EOL',
width=1400,
height=600)
fig.show()
a = b
test_predictions = model.predict(test_x)
test_y = y_norm.denormalize(test_y)
test_predictions = y_norm.denormalize(test_predictions)
a = 0
for b in test_battery_range:
fig = go.Figure()
fig.add_trace(go.Scatter(x=test_y_soh[a:b], y=test_predictions[a:b,0],
mode='lines', name='predicted'))
fig.add_trace(go.Scatter(x = test_y_soh[a:b], y=test_y[a:b],
mode='lines', name='actual'))
fig.update_layout(title='Results on testing',
xaxis_title='SoH Capacity',
yaxis_title='Remaining Ah until EOL',
xaxis={'autorange':'reversed'},
width=1400,
height=600)
fig.show()
a = b
a = 0
for b in test_battery_range:
fig = go.Figure()
fig.add_trace(go.Scatter(y=test_predictions[a:b, 0],
mode='lines', name='predicted'))
fig.add_trace(go.Scatter(y=test_y[a:b],
mode='lines', name='actual'))
fig.update_layout(title='Results on testing',
xaxis_title='Cycle',
yaxis_title='Remaining Ah until EOL',
width=1400,
height=600)
fig.show()
a = b
```
| github_jupyter |
# Analyze Product Sentiment
```
import turicreate
import os
```
# Read product review data
```
d = os.getcwd() #Gets the current working directory
os.chdir("..")
products = turicreate.SFrame('./data/amazon_baby.sframe/m_bfaa91c17752f745.frame_idx')
```
# Explore data
```
products
products.groupby('name',operations={'count':turicreate.aggregate.COUNT()}).sort('count',ascending=False)
```
# Examine the reivews for the most-reviewed product
```
giraffe_reviews = products[products['name']=='Vulli Sophie the Giraffe Teether']
giraffe_reviews
len(giraffe_reviews)
giraffe_reviews['rating'].show()
```
# Building a sentiment classifier
## Build word count vectors
```
products['word_count'] = turicreate.text_analytics.count_words(products['review'])
products
```
# Define what is positive and negative sentiment
```
products['rating'].show()
#ignore all 3* reviews
products = products[products['rating']!= 3]
#positive sentiment = 4-star or 5-star reviews
products['sentiment'] = products['rating'] >= 4
products
products['sentiment'].show()
```
# Train our sentiment classifier
```
train_data,test_data = products.random_split(.8,seed=0)
sentiment_model = turicreate.logistic_classifier.create(train_data,target='sentiment', features=['word_count'], validation_set=test_data)
```
# Apply the sentiment classifier to better understand the Giraffe reviews
```
products['predicted_sentiment'] = sentiment_model.predict(products, output_type = 'probability')
products
giraffe_reviews = products[products['name']== 'Vulli Sophie the Giraffe Teether']
giraffe_reviews
```
# Sort the Giraffe reviews according to predicted sentiment
```
giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False)
giraffe_reviews
giraffe_reviews.tail()
```
## Show the most positive reviews
```
giraffe_reviews[0]['review']
giraffe_reviews[1]['review']
```
# Most negative reivews
```
giraffe_reviews[-1]['review']
giraffe_reviews[-2]['review']
# some selected words to measure the data
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
# count how many times did the customers mentioned the selected words
def getWordCount(data, word):
return int(data[word]) if word in data else 0
# create new column with every selected words and count
for word in selected_words:
products[word] = products['word_count'].apply(lambda x:getWordCount(x,word))
dicts = {}
for word in selected_words:
if word not in dicts:
dicts[word] = products[word].sum()
dicts
print('Max:', max(dicts, key=dicts.get))
print('Min:', min(dicts, key=dicts.get))
train_data,test_data = products.random_split(.8, seed=0)
features=selected_words
selected_words_model = turicreate.logistic_classifier.create(train_data,target='sentiment', features=features, validation_set=test_data)
selected_words_model
products['predicted_selected_words'] = selected_words_model.predict(products, output_type = 'probability')
products.print_rows(num_rows=30)
products[products['name']== 'Baby Trend Diaper Champ'].sort('predicted_sentiment', ascending=False)[0]
selected_words_model.evaluate(test_data)
sentiment_model.evaluate(test_data)
test_data[test_data['rating'] >3].num_rows()/test_data.num_rows()
```
| github_jupyter |
# Disease Outbreak Response Decision-making Under Uncertainty: A retrospective analysis of measles in Sao Paulo
```
%matplotlib inline
import pandas as pd
import numpy as np
import numpy.ma as ma
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sb
sb.set()
import pdb
np.random.seed(20090425)
data_dir = "data/"
```
Import outbreak data
```
measles_data = pd.read_csv(data_dir+"measles.csv", index_col=0, encoding='latin-1')
measles_data.NOTIFICATION = pd.to_datetime(measles_data.NOTIFICATION)
measles_data.BIRTH = pd.to_datetime(measles_data.BIRTH)
measles_data.ONSET = pd.to_datetime(measles_data.ONSET)
measles_data = (measles_data.replace({'DISTRICT': {'BRASILANDIA':'BRAZILANDIA'}})
.drop('AGE', axis=1))
```
Sao Paulo population by district
```
sp_pop = pd.read_csv(data_dir+'sp_pop.csv', index_col=0)
_names = sp_pop.index.values
_names[_names=='BRASILANDIA'] = 'BRAZILANDIA'
sp_pop.set_index(_names, inplace = True)
sp_pop.head(3)
```
Plot of cumulative cases by district
```
measles_onset_dist = measles_data.groupby(['DISTRICT','ONSET']).size().unstack(level=0).fillna(0)
measles_onset_dist.cumsum().plot(legend=False, grid=False)
measles_data[measles_data.ONSET<'1997-06-15'].shape
measles_data[measles_data.ONSET<'1997-07-15'].shape
```
Age distribution of cases, by confirmation status
```
by_conclusion = measles_data.groupby(["YEAR_AGE", "CONCLUSION"])
counts_by_cause = by_conclusion.size().unstack().fillna(0)
ax = counts_by_cause.plot(kind='bar', stacked=True, xlim=(0,50), figsize=(15,5))
```
### Vaccination Data
```
vaccination_data = pd.read_csv('data/BrazilVaxRecords.csv', index_col=0)
vaccination_data.head()
```
Calculate residual susceptibility from routine vaccination
```
vax_97 = np.r_[[0]*(1979-1921+1), vaccination_data.VAX[:17]]
n = len(vax_97)
FOI_mat = np.resize((1 - vax_97*0.9), (n,n)).T
vacc_susc = (1 - vax_97*0.9)[::-1]
vacc_susc[0] = 0.5
```
Susceptiblity accounting for SIAs
```
sia_susc = np.ones(len(vax_97))
birth_year = np.arange(1922, 1998)[::-1]
by_mask = (birth_year > 1983) & (birth_year < 1992)
sia_susc[by_mask] *= 0.2
```
## Compilation of cases into 2-week intervals by age class
Age classes are defined in 5-year intervals. We will combine 40+ ages into a single class.
```
age_classes = [0,5,10,15,20,25,30,35,40,100]
measles_data.dropna(subset=['YEAR_AGE'], inplace=True)
measles_data['YEAR_AGE'] = measles_data.YEAR_AGE.astype(int)
measles_data['AGE_GROUP'] = pd.cut(measles_data.YEAR_AGE, age_classes, right=False)
```
Lab-checked observations are extracted for use in estimating lab confirmation probability.
```
CONFIRMED = measles_data.CONCLUSION == 'CONFIRMED'
CLINICAL = measles_data.CONCLUSION == 'CLINICAL'
DISCARDED = measles_data.CONCLUSION == 'DISCARDED'
```
Extract lab-confirmed and clinical-confirmed subsets, with no missing county information.
```
lab_subset = measles_data[(CONFIRMED | DISCARDED) & measles_data.COUNTY.notnull()].copy()
age = lab_subset.YEAR_AGE.values
ages = lab_subset.YEAR_AGE.unique()
counties = lab_subset.COUNTY.unique()
confirmed = (lab_subset.CONCLUSION=='CONFIRMED').values
clinic_subset = measles_data[CLINICAL & measles_data.COUNTY.notnull()].copy()
```
Histogram of lab subset, by outcome.
```
_lab_subset = lab_subset.replace({"CONCLUSION": {"CLINICAL": "UNCONFIRMED"}})
by_conclusion = _lab_subset.groupby(["YEAR_AGE", "CONCLUSION"])
counts_by_cause = by_conclusion.size().unstack().fillna(0)
ax = counts_by_cause.plot(kind='bar', stacked=True, xlim=(0,50), figsize=(15,5), grid=False)
lab_subset.shape
```
Define age groups
```
age_group = pd.cut(age, age_classes, right=False)
age_index = np.array([age_group.categories.tolist().index(i) for i in age_group])
age_groups = age_group.categories
age_groups
age_slice_endpoints = [g[1:-1].split(',') for g in age_groups]
age_slices = [slice(int(i[0]), int(i[1])) for i in age_slice_endpoints]
```
Get index from full cross-tabulation to use as index for each district
```
dates_index = measles_data.groupby(['ONSET', 'AGE_GROUP']).size().unstack().index
```
## Cleanup of Sao Paulo population data
Match age groupings, exclude invalid districts.
```
unique_districts = measles_data.DISTRICT.dropna().unique()
excludes = ['BOM RETIRO']
N = sp_pop.drop(excludes).ix[unique_districts].sum().drop('Total')
N_age = N.iloc[:8]
N_age.index = age_groups[:-1]
N_age[age_groups[-1]] = N.iloc[8:].sum()
N_age.plot(kind='bar')
```
Compile bi-weekly confirmed and unconfirmed data by Sao Paulo district
```
# All confirmed cases, by district
confirmed_data = lab_subset[lab_subset.CONCLUSION=='CONFIRMED']
confirmed_counts = (confirmed_data.groupby(['ONSET', 'AGE_GROUP'])
.size()
.unstack()
.reindex(dates_index)
.fillna(0)
.sum())
all_confirmed_cases = (confirmed_counts.reindex_axis(measles_data['AGE_GROUP'].unique())
.fillna(0).values.astype(int))
confirmed_counts_2w = (confirmed_data
.groupby(['ONSET', 'AGE_GROUP'])
.size()
.unstack()
.reindex(dates_index)
.fillna(0)
.resample('2W')
.sum())
confirmed_counts_2w
# All clinical cases, by district
clinical_counts = (clinic_subset.groupby(['ONSET', 'AGE_GROUP'])
.size()
.unstack()
.reindex(dates_index)
.fillna(0)
.sum())
all_clinical_cases = (clinical_counts.reindex_axis(measles_data['AGE_GROUP'].unique())
.fillna(0).values.astype(int))
clinical_counts_2w = (clinic_subset
.groupby(['ONSET', 'AGE_GROUP'])
.size()
.unstack()
.reindex(dates_index)
.fillna(0)
.resample('2W')
.sum())
confirmed_counts_2w.head()
clinical_counts_2w.head()
```
Check shape of data frame
- 28 bi-monthly intervals, 9 age groups
```
assert clinical_counts_2w.shape == (28, len(age_groups))
```
## Stochastic Disease Transmission Model
We will extend a simple SIR disease model, to account for confirmation status, which will be fit using MCMC.
This model fits the series of 2-week infection totals for each age group $a$ as a set of Poisson random variables:
\\[Pr(I_{a}(t) | \lambda_a(t)) = \text{Poisson}(\lambda_a(t)) \\]
Where the age-specific outbreak intensity at time $t$ is modeled as:
\\[\lambda_a(t) = S_a(t-1) \frac{I(t-1)\mathbf{B}}{N_a} \\]
where $S_a(t-1)$ is the number of susceptibles in age group $a$ in the previous time period, $I(t-1)$ an age-specific vector of the number of infected individuals in the previous time period, $\mathbf{B}$ a matrix of transmission coefficients (both within- and between-ages), and $N_a$ an estimate of the population of age-$a$ people in Sao Paulo.
The matrix $B$ was constructed from a scalar transmission parameter $\beta$, which was given a vague half-Cauchy prior (scale=25). This was used to represent within-age-group transmission, and hence placed on the diagonal of a square transmission matrix of size $A$. Off-diagonal elements, representing transmission between age groups were scaled by a decay parameter $\delta$ which was used to scale the transmission to adjacent groups according to:
\\[\beta \delta^{|a-b|}\\]
where a and b are indices of two age group. The resulting transmission matrix is parameterized as follows:
$$\begin{aligned}
\mathbf{B} = \left[{
\begin{array}{c}
{\beta} & {\beta \delta} & {\beta \delta^2}& \ldots & {\beta \delta^{A-2}} & {\beta \delta^{A-1}} \\
{\beta \delta} & {\beta} & \beta \delta & \ldots & {\beta \delta^{A-3}} & {\beta \delta^{A-2}} \\
{\beta \delta^2} & \beta \delta & {\beta} & \ldots & {\beta \delta^{A-4}} & {\beta \delta^{A-3}} \\
\vdots & \vdots & \vdots & & \vdots & \vdots\\
{\beta \delta^{A-2}} & {\beta \delta^{A-3}} & {\beta \delta^{A-4}} & \ldots & {\beta} & \beta \delta \\
{\beta \delta^{A-1}} & {\beta \delta^{A-2}} & \beta \delta^{A-3} & \ldots & \beta \delta & {\beta}
\end{array}
}\right]
\end{aligned}$$
The basic reproductive number $R_0$ was calculated as the largest real-valued eigenvalue of the matrix $\mathbf{B}$. To impose a mild constraint on $R_0$, we applied a Gaussian prior distribution whose 1st and 99th quantiles are 8 and 24, respectively, a reasonable range for a measles outbreak:
```
from pymc import MCMC, Matplot, AdaptiveMetropolis, MAP, Slicer
from pymc import (Uniform, DiscreteUniform, Beta, Binomial, Normal,
CompletedDirichlet, Pareto,
Poisson, NegativeBinomial, negative_binomial_like, poisson_like,
Lognormal, Exponential, binomial_like,
TruncatedNormal, Binomial, Gamma, HalfCauchy, normal_like,
MvNormalCov, Bernoulli, Uninformative,
Multinomial, rmultinomial, rbinomial,
Dirichlet, multinomial_like, uniform_like)
from pymc import (Lambda, observed, invlogit, deterministic, potential, stochastic, logit)
def measles_model(obs_date, confirmation=True, migrant=False, constrain_R=True):
'''
Truncate data at observation period
'''
obs_index = clinical_counts_2w.index <= obs_date
confirmed_obs_t = confirmed_counts_2w[obs_index].values.astype(int)
clinical_obs_t = clinical_counts_2w[obs_index].values.astype(int)
n_periods, n_age_groups = confirmed_obs_t.shape
# Index for observation date, used to index out values of interest
# from the model.
t_obs = obs_index.sum() - 1
lab_index = (lab_subset.NOTIFICATION > obs_date).values
confirmed_t = confirmed[lab_index]
age_index_t = age_index[lab_index]
'''
Confirmation sub-model
'''
if confirmation:
# Specify priors on age-specific means
age_classes = np.unique(age_index)
ฮผ = Normal("ฮผ", mu=0, tau=0.0001, value=[0]*len(age_classes))
ฯ = HalfCauchy('ฯ', 0, 25, value=1)
var = ฯ**2
ฯ = Uniform('ฯ', -1, 1, value=0)
# Build variance-covariance matrix with first-order correlation
# among age classes
@deterministic
def ฮฃ(var=var, cor=ฯ):
I = np.eye(len(age_classes))*var
E = np.diag(np.ones(len(age_classes)-1), k=-1)*var*cor
return I + E + E.T
# Age-specific probabilities of confirmation as multivariate normal
# random variables
ฮฒ_age = MvNormalCov("ฮฒ_age", mu=ฮผ, C=ฮฃ, value=[1]*len(age_classes))
p_age = Lambda('p_age', lambda b=ฮฒ_age: invlogit(b))
@deterministic(trace=False)
def p_confirm(b=ฮฒ_age):
return invlogit(b[age_index_t])
# Confirmation likelihood
lab_confirmed = Bernoulli('lab_confirmed', p=p_confirm, value=confirmed_t,
observed=True)
if confirmation:
@stochastic(dtype=int)
def clinical_cases(value=(clinical_obs_t*0.5).astype(int),
n=clinical_obs_t, p=p_age):
# Binomial confirmation process
return np.sum([binomial_like(xi, ni, p) for xi,ni in zip(value,n)])
I = Lambda('I', lambda clinical=clinical_cases:
clinical + confirmed_obs_t.astype(int))
assert I.value.shape == (t_obs +1, n_age_groups)
age_dist_init = np.sum(I.value, 0)/ float(I.value.sum())
else:
I = confirmed_obs_t + clinical_obs_t
assert I.shape == (t_obs +1, n_age_groups)
age_dist_init = np.sum(I, 0) / float(I.sum())
# Calcuate age distribution from observed distribution of infecteds to date
_age_dist = Dirichlet('_age_dist', np.ones(n_age_groups),
value=age_dist_init[:-1]/age_dist_init.sum())
age_dist = CompletedDirichlet('age_dist', _age_dist)
@potential
def age_dist_like(p=age_dist, I=I):
return multinomial_like(I.sum(0), I.sum(), p)
'''
Disease transmission model
'''
# Transmission parameter
ฮฒ = HalfCauchy('ฮฒ', 0, 25, value=5) #[1]*n_age_groups)
decay = Beta('decay', 1, 5, value=0.8)
@deterministic
def B(b=ฮฒ, d=decay):
b = np.ones(n_age_groups)*b
B = b*np.eye(n_age_groups)
for i in range(1, n_age_groups):
B += np.diag(np.ones(n_age_groups-i)*b[i:]*d**i, k=-i)
B += np.diag(np.ones(n_age_groups-i)*b[:-i]*d**i, k=i)
return B
# Downsample annual series to observed age groups
downsample = lambda x: np.array([x[s].mean() for s in age_slices])
@deterministic
def R0(B=B):
evs = np.linalg.eigvals(B)
return max(evs[np.isreal(evs)])
if constrain_R:
@potential
def constrain_R0(R0=R0):
# Weakly-informative prior to constrain R0 to be within the
# typical measles range
return normal_like(R0, 16, 3.4**-2)
A = Lambda('A', lambda R0=R0: 75./(R0 - 1))
lt_sum = downsample(np.tril(FOI_mat).sum(0)[::-1])
natural_susc = Lambda('natural_susc', lambda A=A: np.exp((-1/A) * lt_sum))
@deterministic
def p_ฮผ(natural_susc=natural_susc):
return downsample(sia_susc) * downsample(vacc_susc) * natural_susc
if True:
# Following Stan manual chapter 16.2
ฮป_p = Pareto('ฮป_p', 1.5, 0.1, value=0.5)
a = Lambda('a', lambda mu=p_ฮผ, lam=ฮป_p: mu*lam, trace=False)
b = Lambda('b', lambda mu=p_ฮผ, lam=ฮป_p: (1-mu)*lam, trace=False)
p_susceptible = Beta('p_susceptible', a, b, value=p_ฮผ.value)
else:
p_ฯ = HalfCauchy('p_ฯ', 0, 5, value=1)
m = Lambda('m', lambda p=p_ฮผ: logit(p))
ฮธ_p = Normal('theta_p', m, p_ฯ)
p_susceptible = Lambda('p_susceptible', lambda ฮธ_p=ฮธ_p: invlogit(ฮธ_p))
# Estimated total initial susceptibles
S_0 = Binomial('S_0', n=N_age.values.astype(int), p=p_susceptible)
'''
Model of migrant influx of susceptibles
'''
if migrant:
# Data augmentation for migrant susceptibles
imaginary_migrants = 1000000
N_migrant = DiscreteUniform('N_migrant', 0, imaginary_migrants, value=100000)
ฮผ_age = Uniform('ฮผ_age', 15, 35, value=25)
ฯ_age = Uniform('ฯ_age', 1, 10, value=5)
M_age = Normal('M_age', ฮผ_age, ฯ_age**-2,
size=imaginary_migrants, trace=False)
@deterministic
def M_0(M=M_age, N=N_migrant):
# Take first N augmented susceptibles
M_real = M[:N]
# Drop into age groups
M_group = pd.cut(M_real,
[0, 5, 10, 15, 20, 25, 30, 35, 40, 100],
right=False)
return M_group.value_counts().values
p_migrant = Lambda('p_migrant', lambda M_0=M_0, S_0=S_0: M_0/(M_0 + S_0))
I_migrant = [Binomial('I_migrant_%i' % i, I[i], p_migrant)
for i in range(t_obs + 1)]
I_local = Lambda('I_local',
lambda I=I, I_m=I_migrant:
np.array([Ii - Imi for Ii,Imi in zip(I,I_m)]))
S = Lambda('S', lambda I=I, S_0=S_0, M_0=M_0: S_0 + M_0 - I.cumsum(0))
S_local = Lambda('S_local', lambda I=I_local, S_0=S_0: S_0 - I.cumsum(0))
else:
# Remaining susceptibles at each 2-week period
S = Lambda('S', lambda I=I, S_0=S_0: S_0 - I.cumsum(axis=0))
# Check shape
assert S.value.shape == (t_obs+1., n_age_groups)
# Susceptibles at time t, by age
S_age = Lambda('S_age', lambda S=S: S[-1].astype(int))
# Force of infection
@deterministic
def ฮป(B=B, I=I, S=S):
return S * (I.dot(B) / N_age.values)
# Check shape
assert ฮป.value.shape == (t_obs+1, n_age_groups)
# FOI in observation period
ฮป_t = Lambda('ฮป_t', lambda lam=ฮป: lam[-1])
# Effective reproductive number
R_t = Lambda('R_t', lambda S=S, R0=R0: S.sum(1) * R0 / N_age.sum())
if migrant:
R_t_local = Lambda('R_t_local', lambda S=S_local, R0=R0: S.sum(1) * R0 / N_age.sum())
# Poisson likelihood for observed cases
@potential
def new_cases(I=I, lam=ฮป):
return poisson_like(I[1:], lam[:-1])
'''
Vaccination targets
'''
@deterministic
def vacc_5(S=S_age):
# Vaccination of 5 and under
p = [0.95] + [0]*(n_age_groups - 1)
return rbinomial(S, p)
# Proportion of susceptibles vaccinated
pct_5 = Lambda('pct_5',
lambda V=vacc_5, S=S_age: V.sum()/S.sum())
@deterministic
def vacc_15(S=S_age):
# Vaccination of 15 and under
p = [0.95]*3 + [0]*(n_age_groups - 3)
return rbinomial(S, p)
# Proportion of susceptibles vaccinated
pct_15 = Lambda('pct_15',
lambda V=vacc_15, S=S_age: V.sum()/S.sum())
@deterministic
def vacc_30(S=S_age):
# Vaccination of 30 and under
p = [0.95]*6 + [0]*(n_age_groups - 6)
return rbinomial(S, p)
# Proportion of 30 and under susceptibles vaccinated
pct_30 = Lambda('pct_30',
lambda V=vacc_30, S=S_age: V.sum()/S.sum())
@deterministic
def vacc_adult(S=S_age):
# Vaccination of adults under 30 (and young kids)
p = [0.95, 0, 0, 0, 0.95, 0.95] + [0]*(n_age_groups - 6)
return rbinomial(S, p)
# Proportion of adults under 30 (and young kids)
pct_adult = Lambda('pct_adult',
lambda V=vacc_adult, S=S_age: V.sum()/S.sum())
return locals()
```
## Model execution
Run models for June 15 and July 15 observation points, both with and without clinical confirmation.
```
n_iterations = 50000
n_burn = 40000
migrant = True
model_july_nomigrant = MCMC(measles_model('1997-07-15', migrant=False))
```
Use `backgroundjobs` to run the models each in their own thread:
```
from IPython.lib import backgroundjobs as bg
jobs = bg.BackgroundJobManager()
```
Instantiate models
```
model_june = MCMC(measles_model('1997-06-15', migrant=migrant))
model_july = MCMC(measles_model('1997-07-15', migrant=migrant))
model_june_noconf = MCMC(measles_model('1997-06-15', confirmation=False, migrant=migrant))
model_july_noconf = MCMC(measles_model('1997-07-15', confirmation=False, migrant=migrant))
```
Run models
```
for model in model_june, model_july, model_june_noconf, model_july_noconf:
jobs.new(model.sample, n_iterations, n_burn, kw=dict(progress_bar=False))
jobs.status()
```
## Summary of model output
Estimate of R0 for june (with confirmation submodel)
```
if model_june.R0.value.shape:
Matplot.summary_plot(model_june.R0, custom_labels=age_groups)
else:
Matplot.plot(model_june.R0)
```
Estimate of R0 for june (no confirmation submodel)
```
if model_june.R0.value.shape:
Matplot.summary_plot(model_june_noconf.R0, custom_labels=age_groups)
else:
Matplot.plot(model_june_noconf.R0)
```
Estimate of R0 for july (with confirmation submodel)
```
if model_july.ฮฒ.shape:
Matplot.summary_plot(model_july.R0, custom_labels=age_groups)
else:
Matplot.plot(model_july.R0)
```
Estimate of R0 for july (no confirmation submodel)
```
if model_july_noconf.ฮฒ.shape:
Matplot.summary_plot(model_july_noconf.R0, custom_labels=age_groups)
else:
Matplot.plot(model_july_noconf.R0)
```
Lab confirmation rates, June model
```
p_age = pd.DataFrame(model_june.p_age.trace(), columns=age_groups)
f, axes = plt.subplots(figsize=(14,6))
sb.boxplot(data=p_age, linewidth=0.3, fliersize=0, ax=axes,
color=sb.color_palette("coolwarm", 5)[0],
order=age_group.categories)
axes.set_ylabel('Confirmation rate')
axes.set_xlabel('Age group')
```
Proportion of **local** population susceptible, June model.
```
Matplot.summary_plot(model_june.p_susceptible, custom_labels=age_groups)
```
Proportion of **local** population susceptible, June model with no confirmation correction
```
Matplot.summary_plot(model_june_noconf.p_susceptible, custom_labels=age_groups)
```
Epidemic intensity estimates at June or July observation time, by age group.
```
Matplot.summary_plot(model_june.ฮป_t, custom_labels=age_groups)
Matplot.summary_plot(model_july.ฮป_t, custom_labels=age_groups)
```
Time series of epidemic intensities for lab- versus clinical-confirmation models, for each age group.
```
lam_june = model_june.ฮป.stats()
fig, axes = plt.subplots(2, 1, sharey=True)
axes[0].plot(lam_june['quantiles'][50], 'b-', alpha=0.4)
axes[0].set_ylabel('Epidemic intensity')
axes[0].set_xlabel('time (2-week periods)')
axes[0].set_title('Lab confirmation')
lam_june_noconf = model_june_noconf.ฮป.stats()
axes[1].plot(lam_june_noconf['quantiles'][50], 'b-', alpha=0.4)
axes[1].set_ylabel('Epidemic intensity')
axes[1].set_xlabel('time (2-week periods)')
axes[1].set_title('Clinical confirmation')
plt.tight_layout()
Rt_values = pd.DataFrame(np.c_[model_june.R_t.trace()[:, -1],
model_june.R_t_local.trace()[:, -1]],
columns=['June, total', 'June, local'])
ax = Rt_values.boxplot(return_type='axes');
ax.set_ylabel('R(t)')
Rt_values = pd.DataFrame(np.c_[model_june.R_t.trace()[:, -1],
model_june_noconf.R_t.trace()[:, -1],
model_july.R_t.trace()[:, -1],
model_july_noconf.R_t.trace()[:, -1]],
columns=['June, confirmation', 'June, no confirmation',
'July, confirmation', 'July, no confirmation'])
ax = Rt_values.boxplot(return_type='axes', figsize=(14,6));
ax.set_ylabel('R(t)')
S_age_june = pd.DataFrame(model_june.S_age.trace().squeeze(), columns=age_groups).unstack().reset_index()
S_age_june.columns = 'Age', 'Iteration', 'S'
S_age_june['Confirmation'] = 'Lab'
S_age_june_noconf = pd.DataFrame(model_june_noconf.S_age.trace().squeeze(), columns=age_groups).unstack().reset_index()
S_age_june_noconf.columns = 'Age', 'Iteration', 'S'
S_age_june_noconf['Confirmation'] = 'Clinical'
S_age_june = pd.concat([S_age_june, S_age_june_noconf], ignore_index=True)
S_age_july = pd.DataFrame(model_july.S_age.trace().squeeze(), columns=age_groups).unstack().reset_index()
S_age_july.columns = 'Age', 'Iteration', 'S'
S_age_july['Confirmation'] = 'Lab'
S_age_july_noconf = pd.DataFrame(model_july_noconf.S_age.trace().squeeze(), columns=age_groups).unstack().reset_index()
S_age_july_noconf.columns = 'Age', 'Iteration', 'S'
S_age_july_noconf['Confirmation'] = 'Clinical'
S_age_july = pd.concat([S_age_july, S_age_july_noconf], ignore_index=True)
```
Numbers of suscepibles in each age group, under lab vs clinical confirmation
```
g = sb.factorplot("Age", "S", "Confirmation", S_age_june, kind="box",
palette="hls", size=6, aspect=2, linewidth=0.3, fliersize=0,
order=age_group.categories)
g.despine(offset=10, trim=True)
g.set_axis_labels("Age Group", "Susceptibles");
```
### Vaccination coverage by strategy
```
model_june.summary(['pct_5', 'pct_15', 'pct_30', 'pct_adult'])
june_coverage = pd.DataFrame({name: model_june.trace(name)[:9999] for name in ['pct_5', 'pct_15', 'pct_30', 'pct_adult']})
june_coverage['Month'] = 'June'
june_coverage['Confirmation'] = 'Lab'
june_noconf_coverage = pd.DataFrame({name: model_june_noconf.trace(name)[:9999] for name in ['pct_5', 'pct_15', 'pct_30', 'pct_adult']})
june_noconf_coverage['Month'] = 'June'
june_noconf_coverage['Confirmation'] = 'Clinical'
july_coverage = pd.DataFrame({name: model_july.trace(name)[:9999] for name in ['pct_5', 'pct_15', 'pct_30', 'pct_adult']})
july_coverage['Month'] = 'July'
july_coverage['Confirmation'] = 'Lab'
july_noconf_coverage = pd.DataFrame({name: model_july_noconf.trace(name)[:9999] for name in ['pct_5', 'pct_15', 'pct_30', 'pct_adult']})
july_noconf_coverage['Month'] = 'July'
july_noconf_coverage['Confirmation'] = 'Clinical'
coverage = pd.concat([june_coverage, june_noconf_coverage, july_coverage, july_noconf_coverage],
ignore_index=True)
sb.factorplot(row="Month", col="Confirmation", data=coverage, kind='box',
row_order=['June', 'July'],
order=['pct_5', 'pct_15', 'pct_30', 'pct_adult'],
palette="YlGnBu_d", linewidth=0.7, fliersize=0, aspect=1.25).despine(left=True)
axes = sb.boxplot(data=june_coverage, order=['pct_5', 'pct_15', 'pct_30', 'pct_adult'],
color=sb.color_palette("coolwarm", 5)[0])
axes.set_xticklabels(['Under 5', 'Under 15', 'Under 30', 'Under 5 + 20-30'])
axes.set_ylabel('% susceptibles vaccinated')
sb.despine(offset=10, trim=True)
model_june_noconf.summary(['pct_5', 'pct_15', 'pct_30', 'pct_adult'])
model_july.summary(['pct_5', 'pct_15', 'pct_30', 'pct_adult'])
model_july_noconf.summary(['pct_5', 'pct_15', 'pct_30', 'pct_adult'])
```
Initial migrant susceptibles (June model, with confirmation)
```
model_june.summary(['N_migrant'])
```
By age group:
```
Matplot.summary_plot(model_june.M_0, custom_labels=age_group.categories)
june_r = pd.DataFrame({'local': model_june.trace('R_t_local')[:, -1],
'total': model_june.trace('R_t')[:, -1]})
june_r['Month'] = 'June'
june_r['Confirmation'] = 'Lab'
june_noconf_r = pd.DataFrame({'local': model_june_noconf.trace('R_t_local')[:, -1],
'total': model_june_noconf.trace('R_t')[:, -1]})
june_noconf_r['Month'] = 'June'
june_noconf_r['Confirmation'] = 'Clinical'
july_r = pd.DataFrame({'local': model_july.trace('R_t_local')[:, -1],
'total': model_july.trace('R_t')[:, -1]})
july_r['Month'] = 'July'
july_r['Confirmation'] = 'Lab'
july_noconf_r = pd.DataFrame({'local': model_july_noconf.trace('R_t_local')[:, -1],
'total': model_july_noconf.trace('R_t')[:, -1]})
july_noconf_r['Month'] = 'July'
july_noconf_r['Confirmation'] = 'Clinical'
r_estimates = pd.concat([june_r, june_noconf_r, july_r, july_noconf_r],
ignore_index=True)
g = sb.factorplot(row="Month", col="Confirmation", data=r_estimates, kind='box',
row_order=['June', 'July'],
order=['local', 'total'], margin_titles=True,
palette="YlGnBu_d", linewidth=0.7, fliersize=0, aspect=1.25).despine(left=True)
g.set_ylabels(r"$R_t$")
for ax in g.axes.ravel():
ax.hlines(1, -1, 2, linestyles='dashed')
```
| github_jupyter |
# Keras tutorial - the Happy House
Welcome to the first assignment of week 2. In this assignment, you will:
1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
2. See how you can in a couple of hours build a deep learning algorithm.
Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
```
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras import regularizers, optimizers
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from keras.models import Sequential
%matplotlib inline
```
**Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
## 1 - The Happy House
For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
<img src="images/happy-house.jpg" style="width:350px;height:270px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
<img src="images/house-members.png" style="width:550px;height:250px;">
Run the following code to normalize the dataset and learn about its shapes.
```
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Details of the "Happy" dataset**:
- Images are of shape (64,64,3)
- Training: 600 pictures
- Test: 150 pictures
It is now time to solve the "Happy" Challenge.
## 2 - Building a model in Keras
Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
Here is an example of a model in Keras:
```python
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
```
Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
**Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
**Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
```
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
weight_decay = 1e-4;
baseMapNum = 32
model = Sequential()
model.add(Conv2D(baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=(input_shape,input_shape,3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(2*baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(2*baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.3))
model.add(Conv2D(4*baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(4*baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(1, activation='softmax'))
model.summary()
### END CODE HERE ###
return model
# GRADED FUNCTION: HappyModel
def HappyModel2(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
X_input = Input(shape= (input_shape,input_shape,3))
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
### END CODE HERE ###
return model
```
You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
1. Create the model by calling the function above
2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
**Exercise**: Implement step 1, i.e. create the model.
```
### START CODE HERE ### (1 line)
happyModel = HappyModel(64)
### END CODE HERE ###
### START CODE HERE ### (1 line)
happyModel2 = HappyModel2(64)
### END CODE HERE ###
```
**Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
```
### START CODE HERE ### (1 line)
happyModel.compile(loss='binary_crossentropy',
optimizer="Adam",
metrics=['accuracy'])
### END CODE HERE ###
### START CODE HERE ### (1 line)
happyModel2.compile(loss='binary_crossentropy',
optimizer="Adam",
metrics=['accuracy'])
### END CODE HERE ###
```
**Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
```
### START CODE HERE ### (1 line)
happyModel.fit(X_train, Y_train,
batch_size=16,
epochs=40,
validation_data=(X_test, Y_test),
shuffle=True)
### END CODE HERE ###
### START CODE HERE ### (1 line)
happyModel2.fit(X_train, Y_train,
batch_size=16,
epochs=40,
validation_data=(X_test, Y_test),
shuffle=True)
### END CODE HERE ###
```
Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
**Exercise**: Implement step 4, i.e. test/evaluate the model.
```
### START CODE HERE ### (1 line)
preds = happyModel2.evaluate(X_test,Y_test)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
```
If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.
To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:
- Try using blocks of CONV->BATCHNORM->RELU such as:
```python
X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
```
until your height and width dimensions are quite low and your number of channels quite large (โ32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
- You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
- Change your optimizer. We find Adam works well.
- If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
- Run on more epochs, until you see the train accuracy plateauing.
Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results.
**Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
## 3 - Conclusion
Congratulations, you have solved the Happy House challenge!
Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
<font color='blue'>
**What we would like you to remember from this assignment:**
- Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
- Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
## 4 - Test with your own image (Optional)
Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
```
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
```
## 5 - Other useful functions in Keras (Optional)
Two other basic features of Keras that you'll find useful are:
- `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
- `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
Run the following code.
```
happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Algorithms/landsat_surface_reflectance.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/landsat_surface_reflectance.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Algorithms/landsat_surface_reflectance.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/landsat_surface_reflectance.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
```
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
```
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
# You can access pre-computed surface reflectance images directly from the SR collections. For example, to load a Landsat 7 surface reflectance image, use:
srImage = ee.Image('LANDSAT/LE07/C01/T1_SR/LE07_044034_19990707')
# The surface reflectance datasets for Collection 1 Landsat 4 through 7 are:
surfaceReflectanceL4 = ee.ImageCollection('LANDSAT/LT04/C01/T1_SR')
surfaceReflectanceL5 = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR')
surfaceReflectanceL7 = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR')
Map.centerObject(srImage, 9)
Map.addLayer(srImage, {'bands': ['B4', 'B3', 'B2']}, 'Landsat Surface Reflectance')
# Map.addLayer(srImage)
# print(srImage.getInfo())
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
# Example: CanvasXpress violin Chart No. 1
This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
https://www.canvasxpress.org/examples/violin-1.html
This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
Everything required for the chart to render is included in the code below. Simply run the code block.
```
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="violin1",
data={
"y": {
"smps": [
"Var1",
"Var2",
"Var3",
"Var4",
"Var5",
"Var6",
"Var7",
"Var8",
"Var9",
"Var10",
"Var11",
"Var12",
"Var13",
"Var14",
"Var15",
"Var16",
"Var17",
"Var18",
"Var19",
"Var20",
"Var21",
"Var22",
"Var23",
"Var24",
"Var25",
"Var26",
"Var27",
"Var28",
"Var29",
"Var30",
"Var31",
"Var32",
"Var33",
"Var34",
"Var35",
"Var36",
"Var37",
"Var38",
"Var39",
"Var40",
"Var41",
"Var42",
"Var43",
"Var44",
"Var45",
"Var46",
"Var47",
"Var48",
"Var49",
"Var50",
"Var51",
"Var52",
"Var53",
"Var54",
"Var55",
"Var56",
"Var57",
"Var58",
"Var59",
"Var60"
],
"data": [
[
4.2,
11.5,
7.3,
5.8,
6.4,
10,
11.2,
11.2,
5.2,
7,
16.5,
16.5,
15.2,
17.3,
22.5,
17.3,
13.6,
14.5,
18.8,
15.5,
23.6,
18.5,
33.9,
25.5,
26.4,
32.5,
26.7,
21.5,
23.3,
29.5,
15.2,
21.5,
17.6,
9.7,
14.5,
10,
8.2,
9.4,
16.5,
9.7,
19.7,
23.3,
23.6,
26.4,
20,
25.2,
25.8,
21.2,
14.5,
27.3,
25.5,
26.4,
22.4,
24.5,
24.8,
30.9,
26.4,
27.3,
29.4,
23
]
],
"vars": [
"len"
]
},
"x": {
"supp": [
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ"
],
"order": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
],
"dose": [
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2
]
}
},
config={
"axisAlgorithm": "rPretty",
"axisTickScaleFontFactor": 1.8,
"axisTitleFontStyle": "bold",
"axisTitleScaleFontFactor": 1.8,
"background": "white",
"backgroundType": "window",
"backgroundWindow": "#E5E5E5",
"graphOrientation": "vertical",
"graphType": "Boxplot",
"groupingFactors": [
"dose"
],
"guides": "solid",
"guidesColor": "white",
"showBoxplotIfViolin": False,
"showLegend": False,
"showViolinBoxplot": True,
"smpLabelRotate": 90,
"smpLabelScaleFontFactor": 1.8,
"smpTitle": "dose",
"smpTitleFontStyle": "bold",
"smpTitleScaleFontFactor": 1.8,
"theme": "CanvasXpress",
"title": "The Effect of Vitamin C on Tooth Growth in Guinea Pigs",
"violinScale": "area",
"xAxis2Show": False,
"xAxisMinorTicks": False,
"xAxisTickColor": "white",
"xAxisTitle": "len"
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="violin_1.html")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/srimanthtenneti/Deep-Learning-NanoDegree/blob/main/Capsule_Networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Capsule Networks
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import torchvision
from torchvision import transforms , datasets
# Configuring the seed
seed = 2
np.random.seed(seed)
torch.manual_seed(seed)
%matplotlib inline
transform = transforms.ToTensor()
# Fetching the dataset and the testset
trainset = datasets.MNIST('./root' , train = True , download = True , transform = transform)
testset = datasets.MNIST('./root' , train = False , download= True , transform = transform)
# Defining the batch_size -> 32
batch_size =32
# Defining the trainloader and testloader
train_loader = torch.utils.data.DataLoader(trainset , batch_size , shuffle = True)
test_loader = torch.utils.data.DataLoader(testset , batch_size)
# Visualizing the data
dataiter = iter(train_loader)
images , labels = dataiter.next()
images , labels = images.numpy() , labels.numpy()
fig = plt.figure(figsize = (25 , 4))
for i in range(batch_size):
ax = fig.add_subplot(2 , batch_size/2 , i+1 , xticks = [] , yticks = [])
ax.imshow(np.squeeze(images[i]) , cmap = 'gray')
ax.set_title(labels[i])
```
### Encoder Architecture
The encoder for the capsule networks has 4 primary components. They are :
1. Primary Convolutional layer
2. Secondary Capsule layer
3. Final Capsule layer
4. Dynamic routing via agreement
```
class ConvLayer(nn.Module):
def __init__(self, in_channels=1, out_channels=256):
super(ConvLayer, self).__init__()
# defining a convolutional layer of the specified size
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=9, stride=1, padding=0)
def forward(self, x):
# applying a ReLu activation to the outputs of the conv layer
features = F.relu(self.conv(x)) # will have dimensions (batch_size, 20, 20, 256)
return features
class PrimaryCaps(nn.Module):
def __init__(self, num_capsules=8, in_channels=256, out_channels=32):
super(PrimaryCaps, self).__init__()
# creating a list of convolutional layers for each capsule I want to create
# all capsules have a conv layer with the same parameters
self.capsules = nn.ModuleList([
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=9, stride=2, padding=0)
for _ in range(num_capsules)])
def forward(self, x):
# get batch size of inputs
batch_size = x.size(0)
# reshape convolutional layer outputs to be (batch_size, vector_dim=1152, 1)
u = [capsule(x).view(batch_size, 32 * 6 * 6, 1) for capsule in self.capsules]
# stack up output vectors, u, one for each capsule
u = torch.cat(u, dim=-1)
# squashing the stack of vectors
u_squash = self.squash(u)
return u_squash
def squash(self, input_tensor):
squared_norm = (input_tensor ** 2).sum(dim=-1, keepdim=True)
scale = squared_norm / (1 + squared_norm) # normalization coeff
output_tensor = scale * input_tensor / torch.sqrt(squared_norm)
return output_tensor
import torch
import torch.nn.functional as F
def softmax(input_tensor, dim=1):
# transpose input
transposed_input = input_tensor.transpose(dim, len(input_tensor.size()) - 1)
# calculate softmax
softmaxed_output = F.softmax(transposed_input.contiguous().view(-1, transposed_input.size(-1)), dim=-1)
# un-transpose result
return softmaxed_output.view(*transposed_input.size()).transpose(dim, len(input_tensor.size()) - 1)
!nvidia-smi
TRAIN_ON_GPU = torch.cuda.is_available()
if TRAIN_ON_GPU:
print("GPU Accelerator available !!!")
else:
print("No Acclerator available !!!")
def dynamic_routing(b_ij, u_hat, squash, routing_iterations=3):
# update b_ij, c_ij for number of routing iterations
for iteration in range(routing_iterations):
# softmax calculation of coupling coefficients, c_ij
c_ij = softmax(b_ij, dim=2)
# calculating total capsule inputs, s_j = sum(c_ij*u_hat)
s_j = (c_ij * u_hat).sum(dim=2, keepdim=True)
# squashing to get a normalized vector output, v_j
v_j = squash(s_j)
# if not on the last iteration, calculate agreement and new b_ij
if iteration < routing_iterations - 1:
# agreement
a_ij = (u_hat * v_j).sum(dim=-1, keepdim=True)
# new b_ij
b_ij = b_ij + a_ij
return v_j # return latest v_j
class DigitCaps(nn.Module):
def __init__(self, num_capsules=10, previous_layer_nodes=32*6*6,
in_channels=8, out_channels=16):
super(DigitCaps, self).__init__()
# setting class variables
self.num_capsules = num_capsules
self.previous_layer_nodes = previous_layer_nodes # vector input (dim=1152)
self.in_channels = in_channels # previous layer's number of capsules
# starting out with a randomly initialized weight matrix, W
# these will be the weights connecting the PrimaryCaps and DigitCaps layers
self.W = nn.Parameter(torch.randn(num_capsules, previous_layer_nodes,
in_channels, out_channels))
def forward(self, u):
# adding batch_size dims and stacking all u vectors
u = u[None, :, :, None, :]
# 4D weight matrix
W = self.W[:, None, :, :, :]
# calculating u_hat = W*u
u_hat = torch.matmul(u, W)
# getting the correct size of b_ij
# setting them all to 0, initially
b_ij = torch.zeros(*u_hat.size())
# moving b_ij to GPU, if available
if TRAIN_ON_GPU:
b_ij = b_ij.cuda()
# update coupling coefficients and calculate v_j
v_j = dynamic_routing(b_ij, u_hat, self.squash, routing_iterations=3)
return v_j # return final vector outputs
def squash(self, input_tensor):
# same squash function as before
squared_norm = (input_tensor ** 2).sum(dim=-1, keepdim=True)
scale = squared_norm / (1 + squared_norm) # normalization coeff
output_tensor = scale * input_tensor / torch.sqrt(squared_norm)
return output_tensor
```
### Decoder Architecture
1. This consists of three fully connected layers.
```
class Decoder(nn.Module):
def __init__(self, input_vector_length=16, input_capsules=10, hidden_dim=512):
super(Decoder, self).__init__()
# calculate input_dim
input_dim = input_vector_length * input_capsules
# define linear layers + activations
self.linear_layers = nn.Sequential(
nn.Linear(input_dim, hidden_dim), # first hidden layer
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim*2), # second, twice as deep
nn.ReLU(inplace=True),
nn.Linear(hidden_dim*2, 28*28), # can be reshaped into 28*28 image
nn.Sigmoid() # sigmoid activation to get output pixel values in a range from 0-1
)
def forward(self, x):
classes = (x ** 2).sum(dim=-1) ** 0.5
classes = F.softmax(classes, dim=-1)
# find the capsule with the maximum vector length
# here, vector length indicates the probability of a class' existence
_, max_length_indices = classes.max(dim=1)
# create a sparse class matrix
sparse_matrix = torch.eye(10) # 10 is the number of classes
if TRAIN_ON_GPU:
sparse_matrix = sparse_matrix.cuda()
# get the class scores from the "correct" capsule
y = sparse_matrix.index_select(dim=0, index=max_length_indices.data)
# create reconstructed pixels
x = x * y[:, :, None]
# flatten image into a vector shape (batch_size, vector_dim)
flattened_x = x.contiguous().view(x.size(0), -1)
# create reconstructed image vectors
reconstructions = self.linear_layers(flattened_x)
# return reconstructions and the class scores, y
return reconstructions, y
class CapsuleNetwork(nn.Module):
def __init__(self):
super(CapsuleNetwork, self).__init__()
self.conv_layer = ConvLayer()
self.primary_capsules = PrimaryCaps()
self.digit_capsules = DigitCaps()
self.decoder = Decoder()
def forward(self, images):
primary_caps_output = self.primary_capsules(self.conv_layer(images))
caps_output = self.digit_capsules(primary_caps_output).squeeze().transpose(0,1)
reconstructions, y = self.decoder(caps_output)
return caps_output, reconstructions, y
capsule_net = CapsuleNetwork()
print(capsule_net)
# move model to GPU, if available
if TRAIN_ON_GPU:
capsule_net = capsule_net.cuda()
```
### Defining the Custom Losses
1. In a capsule network the loss function is a weighted combination of 2 losses. They are :
1. Margin Loss
2. Reconstruction Loss
1. Margin Loss

* Reconstruction loss is just MSE
```
class CapsuleLoss(nn.Module):
def __init__(self):
super(CapsuleLoss, self).__init__()
self.reconstruction_loss = nn.MSELoss(reduction='sum') # cumulative loss, equiv to size_average=False
def forward(self, x, labels, images, reconstructions):
batch_size = x.size(0)
## calculate the margin loss ##
# get magnitude of digit capsule vectors, v_c
v_c = torch.sqrt((x**2).sum(dim=2, keepdim=True))
# calculate "correct" and incorrect loss
left = F.relu(0.9 - v_c).view(batch_size, -1)
right = F.relu(v_c - 0.1).view(batch_size, -1)
# sum the losses, with a lambda = 0.5
margin_loss = labels * left + 0.5 * (1. - labels) * right
margin_loss = margin_loss.sum()
## calculate the reconstruction loss ##
images = images.view(reconstructions.size()[0], -1)
reconstruction_loss = self.reconstruction_loss(reconstructions, images)
# return a weighted, summed loss, averaged over a batch size
return (margin_loss + 0.0005 * reconstruction_loss) / images.size(0)
import torch.optim as optim
criterion = CapsuleLoss()
optimizer = optim.Adam(capsule_net.parameters())
def train(capsule_net, criterion, optimizer,
n_epochs, print_every=300):
# track training loss over time
losses = []
# one epoch = one pass over all training data
for epoch in range(1, n_epochs+1):
# initialize training loss
train_loss = 0.0
capsule_net.train() # set to train mode
# get batches of training image data and targets
for batch_i, (images, target) in enumerate(train_loader):
# reshape and get target class
target = torch.eye(10).index_select(dim=0, index=target)
if TRAIN_ON_GPU:
images, target = images.cuda(), target.cuda()
# zero out gradients
optimizer.zero_grad()
# get model outputs
caps_output, reconstructions, y = capsule_net(images)
# calculate loss
loss = criterion(caps_output, target, images, reconstructions)
# perform backpropagation and optimization
loss.backward()
optimizer.step()
train_loss += loss.item() # accumulated training loss
# print and record training stats
if batch_i != 0 and batch_i % print_every == 0:
avg_train_loss = train_loss/print_every
losses.append(avg_train_loss)
print('Epoch: {} \tTraining Loss: {:.8f}'.format(epoch, avg_train_loss))
train_loss = 0 # reset accumulated training loss
return losses
n_epochs = 3
losses = train(capsule_net , criterion , optimizer , n_epochs = n_epochs)
plt.plot(losses)
plt.title("Training Loss")
plt.show()
def test(capsule_net, test_loader):
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
test_loss = 0 # loss tracking
capsule_net.eval() # eval mode
for batch_i, (images, target) in enumerate(test_loader):
target = torch.eye(10).index_select(dim=0, index=target)
batch_size = images.size(0)
if TRAIN_ON_GPU:
images, target = images.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
caps_output, reconstructions, y = capsule_net(images)
# calculate the loss
loss = criterion(caps_output, target, images, reconstructions)
# update average test loss
test_loss += loss.item()
# convert output probabilities to predicted class
_, pred = torch.max(y.data.cpu(), 1)
_, target_shape = torch.max(target.data.cpu(), 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target_shape.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target_shape.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# avg test loss
avg_test_loss = test_loss/len(test_loader)
print('Test Loss: {:.8f}\n'.format(avg_test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# return last batch of capsule vectors, images, reconstructions
return caps_output, images, reconstructions
caps_output, images, reconstructions = test(capsule_net, test_loader)
transform = transforms.Compose(
[transforms.RandomAffine(degrees=30, translate=(0.1,0.1)),
transforms.ToTensor()]
)
# test dataset
transformed_test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loader
transformed_test_loader = torch.utils.data.DataLoader(transformed_test_data,
batch_size=batch_size,
num_workers=0)
dataiter = iter(transformed_test_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(batch_size):
ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
_, images, reconstructions = test(capsule_net, transformed_test_loader)
```
| github_jupyter |
```
import pickle
import matplotlib.pyplot as plt
from scipy.stats.mstats import gmean
import seaborn as sns
from statistics import stdev
from math import log
import numpy as np
from scipy import stats
from statistics import mean
%matplotlib inline
price_100_stan = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\Initial\\NetScape_Standard\\stan_total_price.p","rb"))
price_100_net5 = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy1\\total_price_5_policy_better.p", "rb"))
price_100_net_ging5 = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy2\\total_price_ging5_policy_better.p", "rb"))
price_100_net10 = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy1\\total_price_10_policy_better.p", "rb"))
price_100_net_ging10 = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy2\\total_price_ging10_policy_better.p", "rb"))
from collections import defaultdict
def make_distro(price_100):
all_stds =[]
total_log = defaultdict(list)
for run, output in price_100.items():
for step, prices in output.items():
log_pr = [log(p) for p in prices]
if len(log_pr) <2:
pass
else:
out = stdev(log_pr)
total_log[run].append(out)
all_stds.append(out)
return all_stds
price_stan = make_distro(price_100_stan)
price_net5 = make_distro(price_100_net5)
price_net_g5 = make_distro(price_100_net_ging5)
price_net10 = make_distro(price_100_net10)
price_net_g10 = make_distro(price_100_net_ging10)
fig, ax = plt.subplots(figsize = (4,9))
ax.hist(price_stan, 500, label = "Standard", color ='green')
ax.hist(price_net10, 500, label = "One Level", color='blue')
ax.hist(price_net_g10, 500, label = "Multi-Level", color = 'red')
plt.title("SugarScape Common Resources (10 Trades):\nSDLM Price Distribution of 100 Runs, 1000 Steps",\
fontsize = 15, fontweight = "bold")
plt.xlabel("SDLM Price", fontsize = 15, fontweight = "bold")
plt.ylabel("Frequency", fontsize = 15, fontweight = "bold")
#plt.xlim(.75,2)
plt.ylim(0,1500)
plt.legend()
## Calculate price
ind_stan = price_100_stan["Run42"]
ind_net = price_100_net10["Run42"]
ind_net_g = price_100_net_ging10["Run42"]
def ind_price(ind_e):
x = []
y =[]
for st, pr in ind_e.items():
#if step <=400:
x.append(st)
y.append(gmean(pr))
return (x,y)
x_s, y_s = ind_price(ind_stan)
x_b, y_b = ind_price(ind_net_g)
x_n, y_n = ind_price(ind_net)
for p in ind_net[736]:
print (abs(log(p)))
fig, ax = plt.subplots(figsize = (4,9))
ax.scatter(x_n, y_n, label = "One Level", color='blue')
ax.scatter(x_b,y_b, label = "Multi-Level", color = 'red')
ax.scatter(x_s,y_s, label = "Standard", color ='green')
plt.title("SugarScape with Common Resource (10 Trades):\nMean Trade Price of 1 Run, 1000 Steps",\
fontsize = 15, fontweight = "bold")
plt.xlabel("Time", fontsize = 15, fontweight = "bold")
plt.ylabel("Price", fontsize = 15, fontweight = "bold")
plt.legend()
plt.ylim(0,3)
def ind_volume(ind_e):
x_vol = []
y_vol = []
total = 0
for s, p in ind_e.items():
#if step <=400:
x_vol.append(s)
y_vol.append(len(p))
total += len(p)
return (x_vol, y_vol, total)
x_vol_stan, y_vol_stan, stan_vol = ind_volume(ind_stan)
x_vol_net_g, y_vol_net_g, net_g_vol = ind_volume(ind_net_g)
x_vol_net, y_vol_net, net_vol = ind_volume(ind_net)
len(y_vol_net)
fig2, ax2 = plt.subplots(figsize = (7,7))
ax2.hist(y_vol_net, 100, label = "One Level", color='blue')
ax2.hist(y_vol_net_g, 100, label = "Multiple Levels", color = 'red')
ax2.hist(y_vol_stan, 100, label = "Standard", color ='green')
plt.title("SugarScape with Common Resource (10 Trades):\nTrade Volume of 1 Run",\
fontsize = 15, fontweight = "bold")
plt.xlabel("Trade Volume", fontsize = 15, fontweight = "bold")
plt.ylabel("Frequency", fontsize = 15, fontweight = "bold")
plt.legend()
fig2, ax2 = plt.subplots(figsize = (7,7))
ax2.plot(x_vol_net, y_vol_net, label = "One Level", color='blue')
ax2.plot(x_vol_net_g, y_vol_net_g, label = "Multi-Level", color = 'red')
ax2.plot(x_vol_stan, y_vol_stan, label = "Standard", color ='green')
plt.title("SugarScape with Common Resource (10 Trades):\nTrade Volume of 1 Run, 1000 Steps",\
fontsize = 15, fontweight = "bold")
plt.xlabel("Time", fontsize = 15, fontweight = "bold")
plt.ylabel("Volume", fontsize = 15, fontweight = "bold")
#ax2.text(600,300, "Total Trade Volume: \n "+str(total), fontsize = 15, fontweight = 'bold')
#plt.ylim(0,400)
plt.legend()
def sdlm(ind_e):
x_dev =[]
y_dev = []
x_all = []
y_all = []
log_prices = {}
for step, prices in ind_e.items():
log_prices[step] = [log(p) for p in prices]
for step, log_p in log_prices.items():
#if step <= 400:
if len(log_p) <2:
pass
else:
for each in log_p:
x_all.append(step)
y_all.append(each)
x_dev.append(step)
y_dev.append(stdev(log_p))
return (x_dev, y_dev, x_all, y_all)
x_dev_stan, y_dev_stan, x_all_stan, y_all_stan = sdlm(ind_stan)
x_dev_net_g, y_dev_net_g, x_all_net_g, y_all_net_g = sdlm(ind_net_g)
x_dev_net, y_dev_net, x_all_net, y_all_net = sdlm(ind_net)
#from numpy.polynomial.polynomial import polyfit
fig3, ax3 = plt.subplots(figsize=(4,9))
ax3.scatter(x_dev_net, y_dev_net, label = "One Level", color='blue')
ax3.scatter(x_dev_net_g, y_dev_net_g, label = "Multi-level", color = 'red')
ax3.scatter(x_dev_stan, y_dev_stan, label = "Standard", color ='green')
plt.title("SugarScape with Common Resource(10 Trades):\nSDLM Price of 1 Run, 1000 Steps",\
fontsize = 15, fontweight = "bold")
plt.xlabel("Time", fontsize = 15, fontweight = "bold")
plt.ylabel("Logarithmic Price", fontsize = 15, fontweight = "bold")
plt.legend()
stan_multi_s = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\Initial\\NetScape_Standard\\stan_multi_sur.p", "rb"))
stan_multi_t = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\Initial\\NetScape_Standard\\stan_multi_time.p", "rb"))
net_multi_s = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy1\\net_multi_sur_10_policy_better.p", "rb"))
net_multi_t =pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy1\\net_multi_time_10_policy_better.p", "rb"))
net_multi_s_g = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy2\\net_multi_sur_ging10_policy_better.p", "rb"))
net_multi_t_g =pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy2\\net_multi_time_ging10_policy_better.p", "rb"))
net_mean = mean(net_multi_s)
stan_mean = mean(stan_multi_s)
net_time = round(mean(net_multi_t),2)
stan_time = round(mean(stan_multi_t),2)
t2, p2 = stats.ttest_ind(net_multi_s,net_multi_s_g)
net_p = p2 * 2
net_p
fig5, ax5 = plt.subplots(figsize=(4,9))
plt.hist(stan_multi_s, label = "Standard", color = 'green')
plt.hist(net_multi_s_g, label = "Multi-Levels", color = 'red')
plt.hist(net_multi_s, label = "One Level", color = 'blue')
#plt.text(20, 24.8, "Network-Explicit\np-value: " + str(round(net_p,2)))
plt.legend()
plt.title("SugarScape with Common Resource (10 Trades):\nSurvivors of 100 Runs, 1000 Steps", fontweight = "bold", fontsize = 15)
plt.xlabel("Survivors", fontweight = "bold", fontsize = 15)
plt.ylabel('Frequency', fontweight = "bold", fontsize = 15)
fig6, ax6 = plt.subplots(figsize=(4,9))
plt.hist(stan_multi_t, label = "Standard", color ='green')
plt.hist(net_multi_t_g, label = "Multi-Level", color = 'red')
plt.hist(net_multi_t, label = "One Level", color = 'blue')
#plt.text(78, 25, "Network p-value: "+str(net_t_p) +"\nExplicit p-value: "+str(brute_t_p))
plt.legend()
plt.title("SugarScape with Common Resource (10 Trades):\nTime of 100 Runs, 1000 Steps", fontweight = "bold", fontsize = 15)
#plt.text(108, 20, "\nStandard Mean:\n"+str(stan_time) + "\nNetwork Mean:\n"+str(net_time) +"\nExplicit Mean:\n"+str(brute_time))
plt.xlabel("Seconds", fontweight = "bold", fontsize = 15)
plt.ylabel("Frequency", fontweight = "bold", fontsize = 15)
```
## Type Analysis
```
net_type_df = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy1\\final_groups_5_policy_better.p", "rb"))
net_type_df_g = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy2\\final_groups_ging5_policy_better.p", "rb"))
net_type_df10 = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy1\\final_groups_10_policy_better.p", "rb"))
net_type_df_g10 = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_policy2\\final_groups_ging10_policy_better.p", "rb"))
common_groups = []
num_agents_per_group = []
for run, groups in net_type_df_g10.items():
common_groups.append(len(run))
sub_group = []
for group in groups.values():
sub_group.append(len(group))
num_agents_per_group.append(round(gmean(sub_group),2))
fig6, ax6 = plt.subplots(figsize=(4,9))
plt.hist(num_agents_per_group, label = "Agent Per Group", color = 'blue')
plt.hist(common_groups, label = "Multi_Level Groups", color ='green')
#plt.hist(net_multi_t_g, label = "Common Resource", color = 'red')
#plt.text(78, 25, "Network p-value: "+str(net_t_p) +"\nExplicit p-value: "+str(brute_t_p))
plt.legend()
plt.title("SugarScape with Module Policy (10 Trades):\nTime of 100 Runs, 1000 Steps", fontweight = "bold", fontsize = 15)
#plt.text(108, 20, "\nStandard Mean:\n"+str(stan_time) + "\nNetwork Mean:\n"+str(net_time) +"\nExplicit Mean:\n"+str(brute_time))
plt.xlabel("Seconds", fontweight = "bold", fontsize = 15)
plt.ylabel("Frequency", fontweight = "bold", fontsize = 15)
```
| github_jupyter |
```
!git clone https://github.com/karpathy/minGPT.git
!pip install snakeviz
from fastai.text.all import *
from minGPT.mingpt.model import GPT, GPTConfig, GPT1Config
with open('/kaggle/input/lyrics-v2/lyrics.txt', encoding="utf8", errors='ignore') as f:
raw_text=f.read()
len(raw_text)
class CharTransform(DisplayedTransform):
def __init__(self, data, block_size):
self.cat = Categorize()
self.block_size = block_size + 1
self.data = data
def setups(self, items=None):
self.data_len = len(self.data)
self.cat.setup(L(*self.data))
self.itos = self.cat.vocab
self.stoi = self.itos.items.val2idx()
print(f'data has {self.data_len} characters, {len(self.itos)} unique.')
self.n_sequences = math.ceil(self.data_len / (self.block_size))
self.idxs = L(np.random.randint(0, self.data_len - (self.block_size), self.n_sequences).tolist())
def encodes(self, o):
chunk = self.data[self.idxs[o]:self.idxs[o]+self.block_size]
return tensor([self.stoi[s] for s in chunk])
def decodes(self, o): return TitledStr(''.join([self.itos[s.item()] for s in o]))
block_size=128
n_sequences = math.ceil(len(raw_text) / (block_size+1)); n_sequences
t = CharTransform(data=raw_text, block_size=block_size)
t.setups()
dset = Datasets(L(range(n_sequences)), tfms=[CharTransform(raw_text, block_size)], dl_type=LMDataLoader)
dset[0],show_at(dset.train, 0)
bs = 256
lens = [block_size+1 for i in range(len(dset.train))]
dls = dset.dataloaders(bs=bs, seq_len=block_size, lens=lens)
dls.show_batch(max_n=2)
class DropLoss(Callback):
def after_pred(self): self.learn.pred = self.pred[0]
mconf = GPTConfig(vocab_size=len(dls.itos), block_size=block_size, n_layer=6, n_head=8, n_embd=512)
model = GPT(mconf)
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(),
opt_func=partial(Adam, sqr_mom=0.95, wd=0.1), cbs=[DropLoss]).to_fp16()
learn.lr_find()
learn.fit_one_cycle(30, 6e-4, div_final=10)
learn.recorder.plot_loss()
from minGPT.mingpt.utils import sample
context = "This is nice "
x = torch.tensor([dls.char_transform.stoi[s] for s in context], dtype=torch.long)[None,...].to(dls.device)
y = sample(model, x, 2000, temperature=0.9, sample=True, top_k=20)[0]
completion = ''.join([dls.char_transform.itos[int(i)] for i in y])
print(completion)
learn1=load_learner('/kaggle/input/models/24.pkl')
learn1.model
from minGPT.mingpt.utils import sample
context = "To explore is to "
x = torch.tensor([dls.char_transform.stoi[s] for s in context], dtype=torch.long)[None,...].to(dls.device)
y = sample(learn1.model, x, 500, temperature=0.9, sample=True, top_k=5)[0]
completion = ''.join([dls.char_transform.itos[int(i)] for i in y])
print(completion)
from minGPT.mingpt.utils import sample
context = "To infinity and beyond "
x = torch.tensor([dls.char_transform.stoi[s] for s in context], dtype=torch.long)[None,...].to(dls.device)
y = sample(model, x, 999, temperature=0.9, sample=True, top_k=5)[0]
completion = ''.join([dls.char_transform.itos[int(i)] for i in y])
print(completion)
from minGPT.mingpt.utils import sample
context = "I wish our college had a super computer "
x = torch.tensor([dls.char_transform.stoi[s] for s in context], dtype=torch.long)[None,...].to(dls.device)
y = sample(model, x, 2000, temperature=0.9, sample=True, top_k=40)[0]
completion = ''.join([dls.char_transform.itos[int(i)] for i in y])
print(completion)
learn.export("24.pkl")
```
| github_jupyter |
## Tutorial for building a feature vector distribution plot
In this tutorial we will build an interactive widget using bqplot and ipywidgets. bqplot is a powerful interactive plotting library for jupyter. Its main power comes from how well integrated it is into the ipywidgets library. There are a few things you should understand before diving into this tutorial.
### ipywidgets:
* Widgets: Widgets python objects which link directly to their html counterpart allowing easy interaction between js,css,html and python.
* Boxes: Boxes allow you to group widgets together, this can be either vertical or horizontally.
### bqplot:
* Figures: Figures are a canvas that you will mark on. Its best to think of the figure as another widget (which it is)
* Marks: marks are things that you draw onto the figure, these are composed of a variety of chart types such as bars, lines, histograms etc. You can put a bunch of marks on a single figure.
If you are used to matplotlib, the paradigm of how axis and scales are used in bqplot can be somewhat counterintuitive at first, so take some time to read the documentation and play around until you understand them. Once you do, they are very powerful when you want to link multiple plots together.
* Axis: Axis describe what the lines around a figure will look like. Only figures have axis, marks don't.
* Scales: The scale describes how ranges should be displayed ie. linear or logarithmic. Scales are used by both axis and marks. Their max and min can auto aujust to the data, or be set. Be careful, you can add an axis to a figure that has a different scale than the one a mark you are adding to the figure has.
* Tooltips: These allow you to add information on hover. They only accept three fields 'name', 'x' and 'y'. So in this tutorial we put all of the infomration we want to show into the name column as a string.
```
from ipywidgets import HBox, VBox, Dropdown
from bqplot.marks import Scatter, Bars
from bqplot.scales import LinearScale, OrdinalScale
from bqplot.figure import Figure
from bqplot import Tooltip
from bqplot.axes import Axis
import numpy as np
# simple function to return the bins for the plot
def get_h_bins(df, bins, f_lim):
if f_lim:
return np.arange(
f_lim["min"], f_lim["max"], (f_lim["max"] - f_lim["min"]) / float(bins)
)
scale_max = int(df.describe().loc["max"].max() + 1)
scale_min = int(df.describe().loc["min"].min() - 1)
return np.arange(scale_min, scale_max, (scale_max - scale_min) / float(bins))
def feature_vector_distribution(
features, label_column, bins=25, group_columns=None, f_lim=None, colors=None
):
"""
features (dataframe): a data frame of feature vectors along with a label column and other metadata
label_column (str): the name of the column in the features dataframe that refers to the label infomration
bins (int): the number of bins in the histograms
group_columns (list): if you want other metadata in the tooltip, these columns will be added
f_lim (dict): this sets the limits for max and min of the plots to a constant
{'max':10, 'min':10}. otherwise defaults to the values of the current features
which can be misleading.
colors (list): list of colors to use. Internally has a list of 10. If the labels
are longer you will need to pass your own
"""
dist = "640px"
third_dist = "213px"
if f_lim:
sc_x = LinearScale(min=f_lim["min"], max=f_lim["max"])
sc_y = LinearScale(min=f_lim["min"], max=f_lim["max"])
else:
sc_x = LinearScale()
sc_y = LinearScale()
scale_y = LinearScale(min=0)
x_ord_legend = OrdinalScale()
y_lin_legend = LinearScale()
if group_columns is None:
count_column = features.columns[1]
group_columns = []
else:
count_column = group_columns[0]
if colors is None:
colors = [
"#E6B0AA",
"#C39BD3",
"#73C6B6",
"#F7DC6F",
"#F0B27A",
"#D0D3D4",
"#85929E",
"#6E2C00",
"#1A5276",
"#17202A",
]
box_color = "black"
feature_x = Dropdown(description="Feature 1")
feature_y = Dropdown(description="Feature 2")
feature_x.options = [
x for x in features.columns if x not in [label_column] + group_columns
]
feature_y.options = [
x for x in features.columns if x not in [label_column] + group_columns
]
feature1 = feature_x.options[0]
feature2 = feature_y.options[1]
feature_y.value = feature2
tt = Tooltip(
fields=["name"], labels=[", ".join(["index", label_column] + group_columns)]
)
scatters = []
hists_y = []
hists_x = []
h_bins_x = get_h_bins(features[[feature1]], bins, f_lim)
h_bins_y = get_h_bins(features[[feature2]], bins, f_lim)
for index, group in enumerate(features.groupby([label_column])):
# put the label column and any group column data in the tooltip
names = []
for row in range(group[1].shape[0]):
names.append(
"{},".format(row)
+ ",".join(
[
str(x)
for x in group[1][[label_column] + group_columns]
.iloc[row]
.values
]
)
)
# create a scatter plot for each group
scatters.append(
Scatter(
x=group[1][feature1].values,
y=group[1][feature2].values,
names=names,
display_names=False,
opacities=[0.5],
default_size=30,
scales={"x": sc_x, "y": sc_y},
colors=[colors[index]],
tooltip=tt,
)
)
# create a histograms using a bar chart for each group
# histogram plot for bqplot does not have enough options (no setting range, no setting orientation)
h_y, h_x = np.histogram(group[1][feature1].values, bins=h_bins_x)
hists_x.append(
Bars(
x=h_x,
y=h_y,
opacities=[0.3] * bins,
scales={"x": sc_x, "y": scale_y},
colors=[colors[index]],
orientation="vertical",
)
)
h_y, h_x = np.histogram(group[1][feature2].values, bins=h_bins_y)
hists_y.append(
Bars(
x=h_x,
y=h_y,
opacities=[0.3] * bins,
scales={"x": sc_x, "y": scale_y},
colors=[colors[index]],
orientation="horizontal",
)
)
# legend will show the names of the labels as well as a total count of each
legend_bar = Bars(
x=features.groupby(label_column).count()[count_column].index,
y=features.groupby(label_column).count()[count_column].values,
colors=colors,
opacities=[0.3] * 6,
scales={"x": x_ord_legend, "y": y_lin_legend},
orientation="horizontal",
)
ax_x_legend = Axis(
scale=x_ord_legend,
tick_style={"font-size": 24},
label="",
orientation="vertical",
tick_values=features.groupby(label_column).count()[count_column].index,
)
ax_y_legend = Axis(
scale=y_lin_legend,
orientation="horizontal",
label="Total",
color=box_color,
num_ticks=4,
)
# these are blank axes that are used to fill in the border for the top and right of the figures
ax_top = Axis(scale=sc_x, color=box_color, side="top", tick_style={"font-size": 0})
ax_right = Axis(
scale=sc_x, color=box_color, side="right", tick_style={"font-size": 0}
)
ax_left = Axis(
scale=sc_x, color=box_color, side="left", tick_style={"font-size": 0}
)
ax_bottom = Axis(
scale=sc_x, color=box_color, side="bottom", tick_style={"font-size": 0}
)
ax_top = Axis(scale=sc_x, color=box_color, side="top", num_ticks=0)
ax_right = Axis(scale=sc_x, color=box_color, side="right", num_ticks=0)
ax_left = Axis(scale=sc_x, color=box_color, side="left", num_ticks=0)
ax_bottom = Axis(scale=sc_x, color=box_color, side="bottom", num_ticks=0)
# scatter plot axis
ax_x = Axis(label=feature1, scale=sc_x, color=box_color)
ax_y = Axis(label=feature2, scale=sc_y, orientation="vertical", color=box_color)
# count column of histogram
ax_count_vert = Axis(
label="", scale=scale_y, orientation="vertical", color=box_color, num_ticks=5
)
ax_count_horiz = Axis(
label="", scale=scale_y, orientation="horizontal", color=box_color, num_ticks=5
)
# histogram bin axis
ax_hist_x = Axis(label="", scale=sc_x, orientation="vertical", color=box_color)
ax_hist_y = Axis(label="", scale=sc_x, orientation="horizontal", color=box_color)
# create figures for each plot
f_scatter = Figure(
axes=[ax_x, ax_y, ax_top, ax_right],
background_style={"fill": "white"}, # css is inserted directly
marks=scatters,
min_aspect_ratio=1,
max_aspect_ratio=1,
fig_margin={"top": 0, "bottom": 60, "left": 60, "right": 0},
)
f_hists_y = Figure(
axes=[ax_left, ax_count_horiz, ax_top, ax_right],
background_style={"fill": "white"},
marks=hists_y,
min_aspect_ratio=0.33,
max_aspect_ratio=0.33,
fig_margin={"top": 0, "bottom": 60, "left": 10, "right": 0},
)
f_hists_x = Figure(
axes=[ax_count_vert, ax_bottom, ax_top, ax_right],
background_style={"fill": "white"},
marks=hists_x,
min_aspect_ratio=3,
max_aspect_ratio=3,
fig_margin={"top": 20, "bottom": 10, "left": 60, "right": 0},
)
f_legend = Figure(
marks=[legend_bar],
axes=[ax_x_legend, ax_y_legend],
title="",
legend_location="bottom-right",
background_style={"fill": "white"},
min_aspect_ratio=1,
max_aspect_ratio=1,
fig_margin={"top": 10, "bottom": 30, "left": 20, "right": 20},
)
# we already set the ratios, but it is necessary to set the size explicitly anyway
# this is kind of cool, inserts this into the style in html
f_legend.layout.height = third_dist
f_legend.layout.width = third_dist
f_hists_x.layout.height = third_dist
f_hists_x.layout.width = dist
f_hists_y.layout.height = dist
f_hists_y.layout.width = third_dist
f_scatter.layout.height = dist
f_scatter.layout.width = dist
# we create some functions that allow changes when the widgets notice an event
def change_x_feature(b):
h_bins_x = get_h_bins(features[[feature_x.value]], bins, f_lim)
for index, group in enumerate(features.groupby([label_column])):
scatters[index].x = group[1][feature_x.value]
h_y, h_x = np.histogram(group[1][feature_x.value].values, bins=h_bins_x)
hists_x[index].y = h_y
ax_x.label = feature_x.value
def change_y_feature(b):
h_bins_y = get_h_bins(features[[feature_y.value]], bins, f_lim)
for index, group in enumerate(features.groupby([label_column])):
scatters[index].y = group[1][feature_y.value]
h_y, h_x = np.histogram(group[1][feature_y.value].values, bins=h_bins_y)
hists_y[index].y = h_y
ax_y.label = feature_y.value
# when the user selects a different feature, switch the data plotted
feature_x.observe(change_x_feature, "value")
feature_y.observe(change_y_feature, "value")
# return the stacked figures to be plotted
return VBox(
[
HBox([feature_x, feature_y]),
HBox([f_hists_x, f_legend]),
HBox([f_scatter, f_hists_y]),
]
)
```
### Iris Data Set
```
# Install scikit-learn
!pip install sklearn
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_iris()
data = scale(digits.data)
n_features = 4
# n_pca=3
# pca = PCA(n_components=n_pca).fit(data)
df = pd.DataFrame(data, columns=["feature_{}".format(x) for x in range(n_features)])
df["leaf"] = digits.target
df["extra_info"] = [np.random.randint(100) for x in range(digits.target.shape[0])]
feature_vector_distribution(
df, "leaf", group_columns=["extra_info"], bins=25, f_lim={"min": -3, "max": 3}
)
```
### Digits data set, with PCA applied to reduce to 10 features
```
import numpy as np
import pandas as pd
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_pca = 10
pca = PCA(n_components=n_pca).fit(data)
df = pd.DataFrame(
pca.transform(data), columns=["pca_{}".format(x) for x in range(n_pca)]
)
df["digit"] = digits.target
df["test"] = [np.random.randint(100) for x in range(digits.target.shape[0])]
feature_vector_distribution(
df, "digit", group_columns=["test"], bins=20, f_lim={"min": -7, "max": 7}
)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.