text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
```
## Load data
- Dataset Description: https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html
```
diabetes_df = pd.read_csv('../../../Datasets/diabetes.txt', delimiter='\t')
diabetes_df.columns
diabetes_df.plot(kind='scatter', x='AGE', y='BP')
plt.show()
diabetes_df.shape
# let's predict BP from AGE
x_train, x_test, y_train, y_test = train_test_split(diabetes_df[['AGE', 'SEX', 'BMI', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6']], diabetes_df[['BP']], test_size=0.1, random_state=2018)
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
```
## Simple Linear Regression
- link: https://en.wikipedia.org/wiki/Simple_linear_regression
```
# initialize linear regression object
lr = LinearRegression(fit_intercept=True)
# fit data to the model
lr.fit(x_train, y_train)
# make prediction
predicted_train = lr.predict(x_train)
predicted_test = lr.predict(x_test)
print('Coefficients: \n', lr.coef_)
print('Train stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_train, y_pred=predicted_train)))
print('\tvariance score: {}'.format(r2_score(y_true=y_train, y_pred=predicted_train)))
print('Test stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_test, y_pred=predicted_test)))
print('\tvariance score: {}'.format(r2_score(y_true=y_test, y_pred=predicted_test)))
# plot outputs
# plt.figure(figsize=(10,5))
# plt.scatter(x_test, y_test, color='blue')
# plt.plot(x_test, predicted_test, color='red', linewidth=1)
# plt.show()
# Plot the residuals after fitting a linear model
plt.scatter(predicted_test, predicted_test-y_test)
plt.hlines(y=0, xmin=80, xmax=110)
plt.show()
```
## Ridge Regression
```
# initialize linear regression object
ridge_lr = Ridge(fit_intercept=True, alpha=0.5)
# fit data to the model
ridge_lr.fit(x_train, y_train)
# make prediction
predicted_train = ridge_lr.predict(x_train)
predicted_test = ridge_lr.predict(x_test)
print('Coefficients: \n', ridge_lr.coef_)
print('Train stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_train, y_pred=predicted_train)))
print('\tvariance score: {}'.format(r2_score(y_true=y_train, y_pred=predicted_train)))
print('Test stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_test, y_pred=predicted_test)))
print('\tvariance score: {}'.format(r2_score(y_true=y_test, y_pred=predicted_test)))
# plot outputs
# plt.figure(figsize=(10,5))
# plt.scatter(x_test, y_test, color='blue')
# plt.plot(x_test, predicted_test, color='red', linewidth=1)
# plt.show()
```
## Lasso Regression
```
# initialize linear regression object
lasso_lr = Lasso(fit_intercept=True, alpha=0.5)
# fit data to the model
lasso_lr.fit(x_train, y_train)
# make prediction
predicted_train = lasso_lr.predict(x_train)
predicted_test = lasso_lr.predict(x_test)
print('Coefficients: \n', lasso_lr.coef_)
print('Train stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_train, y_pred=predicted_train)))
print('\tvariance score: {}'.format(r2_score(y_true=y_train, y_pred=predicted_train)))
print('Test stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_test, y_pred=predicted_test)))
print('\tvariance score: {}'.format(r2_score(y_true=y_test, y_pred=predicted_test)))
# plot outputs
plt.figure(figsize=(10,5))
plt.scatter(x_test, y_test, color='blue')
plt.plot(x_test, predicted_test, color='red', linewidth=1)
plt.show()
```
## kNN Regression
```
knn_r = KNeighborsRegressor(n_neighbors=40)
# fit data to the model
knn_r.fit(x_train, y_train)
# make prediction
predicted_train = knn_r.predict(x_train)
predicted_test = knn_r.predict(x_test)
print('Train stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_train, y_pred=predicted_train)))
print('\tvariance score: {}'.format(r2_score(y_true=y_train, y_pred=predicted_train)))
print('Test stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_test, y_pred=predicted_test)))
print('\tvariance score: {}'.format(r2_score(y_true=y_test, y_pred=predicted_test)))
```
## Trees
```
dt_r = DecisionTreeRegressor()
# fit data to the model
dt_r.fit(x_train, y_train)
# make prediction
predicted_train = dt_r.predict(x_train)
predicted_test = dt_r.predict(x_test)
print('Train stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_train, y_pred=predicted_train)))
print('\tvariance score: {}'.format(r2_score(y_true=y_train, y_pred=predicted_train)))
print('Test stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_test, y_pred=predicted_test)))
print('\tvariance score: {}'.format(r2_score(y_true=y_test, y_pred=predicted_test)))
rf_r = RandomForestRegressor(n_estimators=100)
# fit data to the model
rf_r.fit(x_train, y_train.values.reshape(len(y_train),))
# make prediction
predicted_train = rf_r.predict(x_train)
predicted_test = rf_r.predict(x_test)
print('Train stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_train, y_pred=predicted_train)))
print('\tvariance score: {}'.format(r2_score(y_true=y_train, y_pred=predicted_train)))
print('Test stats: ')
print("\tmean squared error: {}".format(mean_squared_error(y_true=y_test, y_pred=predicted_test)))
print('\tvariance score: {}'.format(r2_score(y_true=y_test, y_pred=predicted_test)))
```
| github_jupyter |
# Testing of multimodal speech-vision models
**Author:** Ryan Eloff<br>
**Contact:** ryan.peter.eloff@gmail.com<br>
**Date:** October 2018
Experiments notebook 2.
## Overview
*Multimodal one-shot learning* is the problem of learning novel concepts from only *one or a few* examples of features in multiple modalities, with the only supervisory signal being that these features co-occur.
Here we specifically consider multimodal one-shot learning on a dataset of isolated spoken digits paired with images (although any paired sensory information may be used).
We approach this problem by extending unimodal one-shot models to the multimodal case. Assuming that we have such models that can measure similarity within a modality (see [experiments notebook 1](https://github.com/rpeloff/multimodal-one-shot-learning/blob/master/experiments/nb1_unimodal_train_test.ipynb)), we can perform one-shot cross-modal matching by unimodal comparisons through the multimodal support set.
This notebook demonstrates how to extend unimodal models to multimodal one-shot learning, and reproduces the one-shot cross-modal matching (of speech-image digits) results presented in [our paper](https://arxiv.org/abs/1811.03875):
R. Eloff, H. A. Engelbrecht, H. Kamper, "Multimodal One-Shot Learning of Speech and Images," 2018.
## Navigation
1. [Generate random model seeds](#seeds)<br>
2. [Multimodal one-shot models](#multimodal)<br>
2.1. [Test parameters](#test_params)<br>
2.2. [One-shot cross-modal matching tests](#multimodal_test)<br>
2.3. [Summaries](#multimodal_summ)<br>
### Imports:
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import json
import numpy as np
sys.path.append('..')
try: # check that DTW has been compiled
from src.dtw.speech_dtw import _dtw
except ImportError:
print("Building DTW Cython code ...")
!make clean -C ../src/dtw
!make -C ../src/dtw
from src.dtw.speech_dtw import _dtw # should no longer raise ImportError after building Cython DTW code
```
### Utility functions:
```
def test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot):
print("--------------------------------------------------------------------------------")
print("Testing multimodal model:\n\t--speech-model-dir={}\n\t--vision-model-dir={}"
.format(speech_model_dir, vision_model_dir))
print("--------------------------------------------------------------------------------")
!python ../src/multimodal/test_multimodal.py \
--speech-data-dir=../kaldi_features/tidigits \
--speech-model-dir={speech_model_dir} \
--vision-model-dir={vision_model_dir} \
--output-dir={out_dir} \
--random-seed={random_seed} \
--zeros-different \
--n-queries=10 \
--n-test-episodes=400 \
--k-shot={k_shot} \
--l-way=11
def test_speaker_invariance(speech_model_dir, vision_model_dir, out_dir, random_seed):
print("--------------------------------------------------------------------------------")
print("Testing multimodal model:\n\t--speech-model-dir={}\n\t--vision-model-dir={}"
.format(speech_model_dir, vision_model_dir))
print("--------------------------------------------------------------------------------")
!python ../src/multimodal/test_multimodal.py \
--speech-data-dir=../kaldi_features/tidigits \
--speech-model-dir={speech_model_dir} \
--vision-model-dir={vision_model_dir} \
--output-dir={out_dir} \
--random-seed={random_seed} \
--zeros-different \
--originator-type='difficult' \
--n-queries=1 \
--n-test-episodes=4000 \
--k-shot=1 \
--l-way=11
def summarise_tests(result_dir, result_file='test_result.txt', speaker_invariance=False):
overall_results = []
easy_overall_results = []
dist_overall_results = []
for root, subdirs, files in os.walk(result_dir):
subdirs.sort()
for dirname in subdirs:
res_file = os.path.join(root, dirname, result_file)
if os.path.isfile(res_file):
print("--------------------------------------------------------------------------------")
print("Model summary: directory={}".format(os.path.join(root, dirname)))
print("--------------------------------------------------------------------------------")
with open(res_file, 'r') as fp:
results = fp.read()
print('\tResults: {}'.format(results))
overall_results.append(float(results.split('\n')[0].split('accuracy: ')[1]))
if speaker_invariance:
invariance_results = results.split('\n')[1].strip().split('\t')
easy_overall_results.append(float(invariance_results[0].split('accuracy: ')[1]))
dist_overall_results.append(float(invariance_results[1].split('accuracy: ')[1]))
conf_interval_95 = 1.96 * np.std(overall_results) / np.sqrt(len(overall_results))
easy_conf_interval_95 = 1.96 * np.std(easy_overall_results) / np.sqrt(len(easy_overall_results))
dist_conf_interval_95 = 1.96 * np.std(dist_overall_results) / np.sqrt(len(dist_overall_results))
print("================================================================================")
print("OVERALL: AVERAGE ACCURACY: {:.4f} % +- {:.4f} (total tests: {})"
.format(np.mean(overall_results)*100, conf_interval_95*100, len(overall_results)))
if speaker_invariance:
print("\tAVERAGE EASY SPEAKER ACCURACY: {:.4f} % +- {:.4f} (total tests: {})"
.format(np.mean(easy_overall_results)*100, easy_conf_interval_95*100, len(easy_overall_results)))
print("\tAVERAGE DISTRACTOR SPEAKER ACCURACY: {:.4f} % +- {:.4f} (total tests: {})"
.format(np.mean(dist_overall_results)*100, dist_conf_interval_95*100, len(dist_overall_results)))
print("--------------------------------------------------------------------------------")
```
## 1. Generate random model seeds
<a id='seeds'></a>
We average results over 10 models trained with different seeds so that we can report average accuracies with 95% confidence intervals.
These seeds are generated as follows:
```
np.random.seed(42)
random_seeds = np.random.randint(1000, size=10)
print("Random seeds:", random_seeds)
```
## 2. Multimodal one-shot models
<a id='multimodal'></a>
The multimodal one-shot models that we present here are a combination of unimodal one-shot speech and vision models which are previosuly trained on background data that does not overlap with the multimodal one-shot task.
These models require no further training, and we can directly perform one-shot cross-modal matching by unimodal comparisons through the multimodal support set.
We specifically investigate Siamese neural networks trained for one-shot speech or image classification,
and compare to directly matching images (pixels) and extracted speech features (dynamic time warping), as well as to transfer learning with neural network classifiers.
## 2.1. Test parameters
<a id='test_params'></a>
The following parameters were used to produce the multimodal one-shot learning results in the paper (only used for selecting correct models for testing):
```
# DTW + pixels
dtw_feats_type = 'mfcc'
# FFNN classifier
ffnn_batch_size = 200 # same for both modalities
# CNN classifier
cnn_batch_size = 200 # same for both modalities
# Siamese CNN (offline)
speech_offline_n_train_episodes = 200
vision_offline_n_train_episodes = 600
# Siamese CNN (online)
speech_online_n_train_episodes = 50
vision_online_n_train_episodes = 150
```
## 2.2. One-shot cross-modal matching tests
<a id='multimodal_test'></a>
We test the trained multimodal speech-vision models on three tasks, where speech-image pairs are randomly selected from the [TIDigits speech corpus](https://catalog.ldc.upenn.edu/LDC93S10) and [MNIST handwritten digit dataset](http://yann.lecun.com/exdb/mnist/):
1. One-shot 11-way cross-modal speech-image digit matching
2. Five-shot 11-way cross-modal speech-image digit matching
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
### Dynamic Time Warping (DTW) for Speech + Pixel Matching for Images
1. One-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/dtw_pixels/1_shot/{}".format(dtw_feats_type)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/dtw/{}/random_seed={}".format(dtw_feats_type,
random_seed)
vision_model_dir = "./models/vision/pixels/random_seed={}".format(random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=1)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/dtw_pixels/5_shot/{}".format(dtw_feats_type)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/dtw/{}/random_seed={}".format(dtw_feats_type,
random_seed)
vision_model_dir = "./models/vision/pixels/random_seed={}".format(random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=5)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
output_dir = "./results/multimodal/dtw_pixels/speaker_invariance/{}".format(dtw_feats_type)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/dtw/{}/random_seed={}".format(dtw_feats_type,
random_seed)
vision_model_dir = "./models/vision/pixels/random_seed={}".format(random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_speaker_invariance(speech_model_dir, vision_model_dir, out_dir, random_seed)
```
### Feedforward Neural Network (FFNN) Softmax Classifiers for Speech and Images
1. One-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/ffnn_softmax/1_shot/batch_size={}".format(ffnn_batch_size)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/ffnn_softmax/batch_size={}/random_seed={}".format(ffnn_batch_size,
random_seed)
vision_model_dir = "./models/vision/ffnn_softmax/batch_size={}/random_seed={}".format(ffnn_batch_size,
random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=1)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/ffnn_softmax/5_shot/batch_size={}".format(ffnn_batch_size)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/ffnn_softmax/batch_size={}/random_seed={}".format(ffnn_batch_size,
random_seed)
vision_model_dir = "./models/vision/ffnn_softmax/batch_size={}/random_seed={}".format(ffnn_batch_size,
random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=5)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
output_dir = "./results/multimodal/ffnn_softmax/speaker_invariance/batch_size={}".format(ffnn_batch_size)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/ffnn_softmax/batch_size={}/random_seed={}".format(ffnn_batch_size,
random_seed)
vision_model_dir = "./models/vision/ffnn_softmax/batch_size={}/random_seed={}".format(ffnn_batch_size,
random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_speaker_invariance(speech_model_dir, vision_model_dir, out_dir, random_seed)
```
### Convolutional Neural Network (CNN) Softmax Classifiers for Speech and Images
1. One-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/cnn_softmax/1_shot/batch_size={}".format(cnn_batch_size)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/cnn_softmax/batch_size={}/random_seed={}".format(cnn_batch_size,
random_seed)
vision_model_dir = "./models/vision/cnn_softmax/batch_size={}/random_seed={}".format(cnn_batch_size,
random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=1)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/cnn_softmax/5_shot/batch_size={}".format(cnn_batch_size)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/cnn_softmax/batch_size={}/random_seed={}".format(cnn_batch_size,
random_seed)
vision_model_dir = "./models/vision/cnn_softmax/batch_size={}/random_seed={}".format(cnn_batch_size,
random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=5)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
output_dir = "./results/multimodal/cnn_softmax/speaker_invariance/batch_size={}".format(cnn_batch_size)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/cnn_softmax/batch_size={}/random_seed={}".format(cnn_batch_size,
random_seed)
vision_model_dir = "./models/vision/cnn_softmax/batch_size={}/random_seed={}".format(cnn_batch_size,
random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_speaker_invariance(speech_model_dir, vision_model_dir, out_dir, random_seed)
```
### Siamese CNN (offline) Comparators for Speech and Images
1. One-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/siamese_offline/1_shot/n_train_speech={}_vision={}".format(
speech_offline_n_train_episodes, vision_offline_n_train_episodes)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/siamese_offline/n_train={}/random_seed={}".format(
speech_offline_n_train_episodes, random_seed)
vision_model_dir = "./models/vision/siamese_offline/n_train={}/random_seed={}".format(
vision_offline_n_train_episodes, random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=1)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/siamese_offline/5_shot/n_train_speech={}_vision={}".format(
speech_offline_n_train_episodes, vision_offline_n_train_episodes)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/siamese_offline/n_train={}/random_seed={}".format(
speech_offline_n_train_episodes, random_seed)
vision_model_dir = "./models/vision/siamese_offline/n_train={}/random_seed={}".format(
vision_offline_n_train_episodes, random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=5)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
output_dir = "./results/multimodal/siamese_offline/speaker_invariance/n_train_speech={}_vision={}".format(
speech_offline_n_train_episodes, vision_offline_n_train_episodes)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/siamese_offline/n_train={}/random_seed={}".format(
speech_offline_n_train_episodes, random_seed)
vision_model_dir = "./models/vision/siamese_offline/n_train={}/random_seed={}".format(
vision_offline_n_train_episodes, random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_speaker_invariance(speech_model_dir, vision_model_dir, out_dir, random_seed)
```
### Siamese CNN (online) Comparators for Speech and Images
1. One-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/siamese_online/1_shot/n_train_speech={}_vision={}".format(
speech_online_n_train_episodes, vision_online_n_train_episodes)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/siamese_online/n_train={}/random_seed={}".format(
speech_online_n_train_episodes, random_seed)
vision_model_dir = "./models/vision/siamese_online/n_train={}/random_seed={}".format(
vision_online_n_train_episodes, random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=1)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
output_dir = "./results/multimodal/siamese_online/5_shot/n_train_speech={}_vision={}".format(
speech_online_n_train_episodes, vision_online_n_train_episodes)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/siamese_online/n_train={}/random_seed={}".format(
speech_online_n_train_episodes, random_seed)
vision_model_dir = "./models/vision/siamese_online/n_train={}/random_seed={}".format(
vision_online_n_train_episodes, random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_multimodal_k_shot(speech_model_dir, vision_model_dir, out_dir, random_seed, k_shot=5)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
output_dir = "./results/multimodal/siamese_online/speaker_invariance/n_train_speech={}_vision={}".format(
speech_online_n_train_episodes, vision_online_n_train_episodes)
for random_seed in random_seeds:
speech_model_dir = "./models/speech/siamese_online/n_train={}/random_seed={}".format(
speech_online_n_train_episodes, random_seed)
vision_model_dir = "./models/vision/siamese_online/n_train={}/random_seed={}".format(
vision_online_n_train_episodes, random_seed)
out_dir = os.path.join(output_dir, 'random_seed={}'.format(random_seed))
test_speaker_invariance(speech_model_dir, vision_model_dir, out_dir, random_seed)
```
## 2.3. Summaries
<a id='multimodal_summ'></a>
This section presents summaries on the one-shot testing of the multimodal models.
### Dynamic Time Warping (DTW) for Speech + Pixel Matching for Images
1. One-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/dtw_pixels/1_shot/{}".format(dtw_feats_type)
summarise_tests(result_dir)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/dtw_pixels/5_shot/{}".format(dtw_feats_type)
summarise_tests(result_dir)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
result_dir = "./results/multimodal/dtw_pixels/speaker_invariance/{}".format(dtw_feats_type)
summarise_tests(result_dir, speaker_invariance=True)
```
### Feedforward Neural Network (FFNN) Softmax Classifiers for Speech and Images
1. One-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/ffnn_softmax/1_shot/batch_size={}".format(ffnn_batch_size)
summarise_tests(result_dir)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/ffnn_softmax/5_shot/batch_size={}".format(ffnn_batch_size)
summarise_tests(result_dir)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
result_dir = "./results/multimodal/ffnn_softmax/speaker_invariance/batch_size={}".format(ffnn_batch_size)
summarise_tests(result_dir, speaker_invariance=True)
```
### Convolutional Neural Network (CNN) Softmax Classifiers for Speech and Images
1. One-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/cnn_softmax/1_shot/batch_size={}".format(cnn_batch_size)
summarise_tests(result_dir)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/cnn_softmax/5_shot/batch_size={}".format(cnn_batch_size)
summarise_tests(result_dir)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
result_dir = "./results/multimodal/cnn_softmax/speaker_invariance/batch_size={}".format(cnn_batch_size)
summarise_tests(result_dir, speaker_invariance=True)
```
### Siamese CNN (offline) Comparators for Speech and Images
1. One-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/siamese_offline/1_shot/n_train_speech={}_vision={}".format(
speech_offline_n_train_episodes, vision_offline_n_train_episodes)
summarise_tests(result_dir)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/siamese_offline/5_shot/n_train_speech={}_vision={}".format(
speech_offline_n_train_episodes, vision_offline_n_train_episodes)
summarise_tests(result_dir)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
result_dir = "./results/multimodal/siamese_offline/speaker_invariance/n_train_speech={}_vision={}".format(
speech_offline_n_train_episodes, vision_offline_n_train_episodes)
summarise_tests(result_dir, speaker_invariance=True)
```
### Siamese CNN (online) Comparators for Speech and Images
1. One-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/siamese_online/1_shot/n_train_speech={}_vision={}".format(
speech_online_n_train_episodes, vision_online_n_train_episodes)
summarise_tests(result_dir)
```
2. Five-shot 11-way cross-modal speech-image digit matching
```
result_dir = "./results/multimodal/siamese_online/5_shot/n_train_speech={}_vision={}".format(
speech_online_n_train_episodes, vision_online_n_train_episodes)
summarise_tests(result_dir)
```
3. Speaker invariance for one-shot 11-way cross-modal speech-image digit matching in the presence of query speaker distractors
```
result_dir = "./results/multimodal/siamese_online/speaker_invariance/n_train_speech={}_vision={}".format(
speech_online_n_train_episodes, vision_online_n_train_episodes)
summarise_tests(result_dir, speaker_invariance=True)
```
| github_jupyter |
# Host-guest usage with stk
by: Andrew Tarzia
# Imports
```
import sys
sys.path.append('/usr/local/lib/python3.7/site-packages/')
from rdkit import Chem
from rdkit.Chem import AllChem as rdkit
from collections import defaultdict
from rdkit.Chem import rdFMCS
from rdkit.Chem import Draw
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem import rdDistGeom
IPythonConsole.ipython_3d = True
import py3Dmol
from IPython.display import Image
import matplotlib.pyplot as plt
import subprocess
import time
import stk
import stko
import spindry as spd
%matplotlib inline
```
# Some useful functions
```
def show_stk_mol(stk_mol):
data = rdkit.MolToMolBlock(stk_mol.to_rdkit_mol())
p = py3Dmol.view(
data=data,
style={'stick':{'colorscheme':'cyanCarbon'}},
width=400,
height=400,
)
p.setBackgroundColor('0xeeeeee')
p.zoomTo()
p.show()
```
Working:
```
# Produce a Pd+2 atom with 4 functional groups.
palladium_atom = stk.BuildingBlock(
smiles='[Pd+2]',
functional_groups=(
stk.SingleAtom(stk.Pd(0, charge=2))
for i in range(4)
),
position_matrix=[[0., 0., 0.]],
)
# Build a building block with two functional groups using
# the SmartsFunctionalGroupFactory.
bb1 = stk.BuildingBlock(
smiles=(
'C1=NC=CC(C2=CC=CC(C3=CC=NC=C3)=C2)=C1'
),
functional_groups=[
stk.SmartsFunctionalGroupFactory(
smarts='[#6]~[#7X2]~[#6]',
bonders=(1, ),
deleters=(),
),
],
)
cage1 = stk.ConstructedMolecule(
stk.cage.M6L12Cube(
building_blocks=(palladium_atom, bb1),
# Ensure that bonds between the GenericFunctionalGroups
# of the ligand and the SingleAtom functional groups
# of the metal are dative.
reaction_factory=stk.DativeReactionFactory(
stk.GenericReactionFactory(
bond_orders={
frozenset({
stk.GenericFunctionalGroup,
stk.SingleAtom
}): 9
}
)
),
optimizer=stk.MCHammer(num_steps=2000),
)
)
show_stk_mol(cage1)
stk_guest = stk.BuildingBlock('C1C(O[H])C2C(C=CC(=C2)CP([H])[H])CC1')
show_stk_mol(stk_guest)
cage_atoms = [
(atom.get_id(), atom.__class__.__name__)
for atom in cage1.get_atoms()
]
stk_mol_atoms = [
(atom.get_id(), atom.__class__.__name__)
for atom in stk_guest.get_atoms()
]
host = spd.Molecule.init(
atoms=(
spd.Atom(id=i[0], element_string=i[1])
for i in cage_atoms
),
position_matrix=cage1.get_position_matrix(),
)
guest = spd.Molecule.init(
atoms=(
spd.Atom(id=i[0], element_string=i[1])
for i in stk_mol_atoms
),
position_matrix=stk_guest.get_position_matrix(),
)
print(host, guest)
cg = spd.Spinner(
step_size=0.5,
rotation_step_size=5,
num_conformers=100,
max_attempts=2000,
)
energies = {}
for conformer in cg.get_conformers(host, guest):
print(conformer)
print(conformer.get_cid(), conformer.get_potential())
cage1 = cage1.with_position_matrix(
conformer.get_host().get_position_matrix()
)
stk_guest = stk_guest.with_position_matrix(
conformer.get_guest().get_position_matrix()
)
complex_mol = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(stk.BuildingBlock.init_from_molecule(cage1), stk_guest)
)
complex_mol.write(
f'hg_example_output/hg_conf_{conformer.get_cid()}.mol'
)
energies[conformer.get_cid()] = stko.UFFEnergy(ignore_inter_interactions=False).get_energy(complex_mol)
energies
```
| github_jupyter |
# Name
Data preparation using Apache Hive on YARN with Cloud Dataproc
# Label
Cloud Dataproc, GCP, Cloud Storage, YARN, Hive, Apache
# Summary
A Kubeflow Pipeline component to prepare data by submitting an Apache Hive job on YARN to Cloud Dataproc.
# Details
## Intended use
Use the component to run an Apache Hive job as one preprocessing step in a Kubeflow Pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|----------|-------------|----------|-----------|-----------------|---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectId | | |
| region | The Cloud Dataproc region to handle the request. | No | GCPRegion | | |
| cluster_name | The name of the cluster to run the job. | No | String | | |
| queries | The queries to execute the Hive job. Specify multiple queries in one string by separating them with semicolons. You do not need to terminate queries with semicolons. | Yes | List | | None |
| query_file_uri | The HCFS URI of the script that contains the Hive queries. | Yes | GCSPath | | None |
| script_variables | Mapping of the query’s variable names to their values (equivalent to the Hive command: SET name="value";). | Yes | Dict | | None |
| hive_job | The payload of a [HiveJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) | Yes | Dict | | None |
| job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | Dict | | None |
| wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
## Detailed description
This component creates a Hive job from [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline SDK:
```
%%capture --no-stderr
!pip3 install kfp --upgrade
```
2. Load the component using KFP SDK
```
import kfp.components as comp
dataproc_submit_hive_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.1.2-rc.1/components/gcp/dataproc/submit_hive_job/component.yaml')
help(dataproc_submit_hive_job_op)
```
### Sample
Note: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Setup a Dataproc cluster
[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
#### Prepare a Hive query
Put your Hive queries in the queries list, or upload your Hive queries into a file saved in a Cloud Storage bucket and then enter the Cloud Storage bucket’s path in `query_file_uri.` In this sample, we will use a hard coded query in the queries list to select data from a public CSV file from Cloud Storage.
For more details, see the [Hive language manual.](https://cwiki.apache.org/confluence/display/Hive/LanguageManual)
#### Set sample parameters
```
PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
DROP TABLE IF EXISTS natality_csv;
CREATE EXTERNAL TABLE natality_csv (
source_year BIGINT, year BIGINT, month BIGINT, day BIGINT, wday BIGINT,
state STRING, is_male BOOLEAN, child_race BIGINT, weight_pounds FLOAT,
plurality BIGINT, apgar_1min BIGINT, apgar_5min BIGINT,
mother_residence_state STRING, mother_race BIGINT, mother_age BIGINT,
gestation_weeks BIGINT, lmp STRING, mother_married BOOLEAN,
mother_birth_state STRING, cigarette_use BOOLEAN, cigarettes_per_day BIGINT,
alcohol_use BOOLEAN, drinks_per_week BIGINT, weight_gain_pounds BIGINT,
born_alive_alive BIGINT, born_alive_dead BIGINT, born_dead BIGINT,
ever_born BIGINT, father_race BIGINT, father_age BIGINT,
record_weight BIGINT
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
LOCATION 'gs://public-datasets/natality/csv';
SELECT * FROM natality_csv LIMIT 10;'''
EXPERIMENT_NAME = 'Dataproc - Submit Hive Job'
```
#### Example pipeline that uses the component
```
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Hive job pipeline',
description='Dataproc submit Hive job pipeline'
)
def dataproc_submit_hive_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
hive_job='',
job='',
wait_interval='30'
):
dataproc_submit_hive_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
hive_job=hive_job,
job=job,
wait_interval=wait_interval)
```
#### Compile the pipeline
```
pipeline_func = dataproc_submit_hive_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Component python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_submit_hive_job.py)
* [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/submit_hive_job/sample.ipynb)
* [Dataproc HiveJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| github_jupyter |
```
import numpy as np
from astropy.io import fits
from astropy.io.fits import HDUList
from astropy.wcs import WCS
from numpy import arcsinh
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
from reproject import reproject_interp
from aplpy.rgb import make_rgb_cube
%ls -l
def load_file(filename):
fits_data = fits.open(filename)
hdu = fits_data[0]
header = hdu.header
data = hdu.data
return data, header, WCS(header), hdu
def world2pix(ra, dec, wcs):
coords = np.array([[ra, dec]])
location = wcs.wcs_world2pix(coords, 0, ra_dec_order=True)
return location[0][0], location[0][1]
R_IDX = 0
G_IDX = 1
B_IDX = 2
def scale_rgb(r, g, b, sigma=6, min=0, max=10000, gains=[0.9,1.1,1.8], gamma=0.1):
r = r.copy()
g = g.copy()
b = b.copy()
slope = 255 / arcsinh((max - min)/sigma)
max_x, max_y = r.shape
mean = (r + g + b)/3
mean[mean < min] = 0
r[mean == 0] = 0
g[mean == 0] = 0
b[mean == 0] = 0
scale = slope * arcsinh((mean - min) / sigma) / mean
r = (r * scale).astype(int)
g = (g * scale).astype(int)
b = (b * scale).astype(int)
r = (r * gains[R_IDX]).astype(int)
g = (g * gains[G_IDX]).astype(int)
b = (b * gains[B_IDX]).astype(int)
r += (gamma * (r - g)).astype(int)
b += (gamma * (b - g)).astype(int)
r[r < 0] = 0
r[r > 255] = 255
g[g < 0] = 0
g[g > 255] = 255
b[b < 0] = 0
b[b > 255] = 255
return r, g, b
def cutout(data, wcs, ra, dec, x_size=100, y_size=100):
x_centre, y_centre = world2pix(ra, dec, wcs)
x_top = int(round(x_centre) - x_size/2)
y_top = int(round(y_centre) - y_size/2)
x_bottom = x_top + x_size
y_bottom = y_top + y_size
return data[y_top:y_bottom, x_top:x_bottom]
u_data, u_header, u_wcs, u_hdu = load_file('frame-u-002141-3-0076.fits')
g_data, g_header, g_wcs, g_hdu = load_file('frame-g-002141-3-0076.fits')
r_data, r_header, r_wcs, r_hdu = load_file('frame-r-002141-3-0076.fits')
i_data, i_header, i_wcs, i_hdu = load_file('frame-i-002141-3-0076.fits')
z_data, z_header, z_wcs, z_hdu = load_file('frame-z-002141-3-0076.fits')
min((g_data.min(), r_data.min(), i_data.min()))
max((g_data.max(), r_data.max(), i_data.max()))
hdulist = HDUList([g_hdu, r_hdu, i_hdu])
i_hdu.header
data_array, footprint = reproject_interp(g_hdu, r_header)
g_data
plt.imshow(g_data)
data_array
plt.imshow(data_array)
ra, dec = (153.937453927558,-0.371692014858263)
red_cutout = cutout(i_data, i_wcs, ra, dec, 50, 50)
green_cutout = cutout(r_data, r_wcs, ra, dec, 50, 50)
blue_cutout = cutout(g_data, g_wcs, ra, dec, 50, 50)
r, g, b = scale_rgb(red_cutout, green_cutout, blue_cutout)
grey = np.power((0.2126 * np.power(r, 2.2) + 0.7152 * np.power(g, 2.2) + 0.0722 + np.power(b, 2.2)), 1/2.2) # (0.2126×Red2.2 + 0.7152×Green2.2 + 0.0722×Blue2.2
grey *= 255/np.max(grey)
plt.figure(figsize=(20,20))
plt.imshow(grey.T, cmap='Reds')
plt.colorbar()
plt.figure(figsize=(20,20))
plt.imshow(scaled_data[2], cmap='gray')
plt.colorbar()
ra = 153.925821207403
dec = -0.348890142024622
print(world2pix(ra, dec, g_wcs))
print(world2pix(ra, dec, i_wcs))
print(world2pix(ra, dec, r_wcs))
print(world2pix(ra, dec, u_wcs))
print(world2pix(ra, dec, z_wcs))
ra_centre = location[0][0]
dec_centre = location[0][1]
size = 40
print(ra_centre, dec_centre)
ra_top = int(ra_centre - size/2)
dec_top = int(dec_centre - size/2)
ra_bottom = ra_top + size
dec_bottom = dec_top + size
fits_data[0].data.shape
image_data = fits_data[0].data[dec_top:dec_bottom, ra_top:ra_bottom]
image_data.size
plt.figure(figsize=(10,10))
plt.imshow(image_data, cmap='gray')
plt.colorbar()
```
| github_jupyter |
# Reading the data
Keeping only the votes per party. The previous columns are the settlement names, etc <br>
Dropping booths with zero votes
```
import numpy as np
import pandas as pd
df = pd.read_csv('../input/results_by_booth_2015 - english - v3.csv', encoding='iso-8859-1')
votes=df.select_dtypes(include=[np.number])
votes=votes.drop(votes.columns[range(0,6)],axis=1)
party_titles=df.select_dtypes(include=[np.number]).columns.tolist()
party_titles=party_titles[6:]
votes=(votes[(votes.sum(axis=1)>0)])
N=votes.shape[0]
M=votes.shape[1]
```
# Partitioning the booths into different clusters using K-Means, and plotting them on a 2-D plot using PCA
Inspired by [This analysis of the 2013 elections (Hebrew)](http://https://sadnadearaa.blogspot.co.il/2013/01/blog-post_24.html) <br>
Note that we analyze the partition of votes per booth, to account for booths with different sizes <br>
The top 5 parties is each cluster is shown. <br>
K=4 was chosen because it is possible to see all clusters clearly in a 2-D plot.
```
import sklearn
from sklearn.decomposition import PCA as sklearnPCA
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
pca=sklearnPCA(n_components=2)
relative_votes=votes.div(votes.sum(axis=1), axis=0)
X=pca.fit_transform(relative_votes)
K=4;
kmeanModel = KMeans(n_clusters=K, max_iter=100).fit(relative_votes)
kmeanModel.fit(relative_votes)
for k in range(0,K):
plt.plot(X[kmeanModel.labels_==k,0],X[kmeanModel.labels_==k,1],'.',markersize=3)
current_cluster_x=np.mean(X[kmeanModel.labels_==k,0])
current_cluster_y=np.mean(X[kmeanModel.labels_==k,1])
plt.plot(current_cluster_x,current_cluster_y,'ok',markersize=10)
plt.text(current_cluster_x,current_cluster_y,r' Cluster '+str(k+1), fontsize=16)
plt.title('PCA plot with clustering', fontsize=22)
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
plt.show()
cluster=[1]*K
for k in range(0,K):
cluster[k]=tuple(zip(kmeanModel.cluster_centers_[k],party_titles))
cluster[k]=sorted(cluster[k],reverse=True)
print ('Cluster '+str(k+1)+':', end=' ')
for i in range(0,5):
print(cluster[k][i][1]+" - "+str(round(100*cluster[k][i][0])/100), end=', ')
print()
```
## Evidently, the Joint List (Arab party) dominated booths are clearly separated from booths ruled by other major parties.
## Those are separeted clearly into clusters dominated by the Zionist Union, Likud, and United Torah Judaism (from top to bottom), with the first two clusters showing some significant mixing.
# Showing the correlation plot between parties.
## Each node corresponds to a party, and an edge between parties corresponds to positive correlation between the fraction of votes for those two parties.
No threshold is taken for the correlation (at this phase). <br>
Parties are partitioned into communities, maximizing connections inside a community and minimizing connections between nodes from different communities.
```
import networkx as nx
from community import best_partition # --> http://perso.crans.org/aynaud/communities/
C=np.corrcoef(relative_votes,rowvar=0)
A=1*(C>0)
G=nx.Graph(A)
G=nx.relabel_nodes(G,dict(zip(G.nodes(),relative_votes.columns.values)))
communities=best_partition(G)
community_colors={0:0,1:0.5,2:1}
node_coloring=[community_colors[communities[node]] for node in G.nodes()]
nx.pos=nx.fruchterman_reingold_layout(G, dim=2, k=None, pos=None, fixed=None, iterations=5000, weight='weight', scale=1.0, center=None)
nx.draw_networkx(G, cmap=plt.get_cmap('jet'), with_labels=True, node_color=node_coloring,font_size=16)
plt.title('Party correlation network and community partition (all parties)', fontsize=22)
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
```
# Correlation between major parties
We now consider only the major parties, which recieved a significant (over 2%) of total votes, and show the correlation network between those.
```
threshold=0.02
party_is_major=((votes.sum(axis=0)/sum(votes.sum(axis=0)))>threshold)
major_parties=relative_votes.columns.values[party_is_major==True]
major_party_votes=relative_votes[major_parties]
C=np.corrcoef(major_party_votes,rowvar=0)
A=1*(C>0)
G=nx.Graph(A)
G=nx.relabel_nodes(G,dict(zip(G.nodes(),major_parties)))
communities=best_partition(G)
community_colors={0:0,1:0.2,2:0.5,3:0.7,4:0.9}
node_coloring=[community_colors[communities[node]] for node in G.nodes()]
nx.pos=nx.fruchterman_reingold_layout(G, dim=2, k=None, pos=None, fixed=None, iterations=5000, weight='weight', scale=1.0, center=None)
nx.draw_networkx(G, cmap=plt.get_cmap('jet'), with_labels=True, node_color=node_coloring,font_size=30)
plt.title('Party correlation network and community partition (major parties)', fontsize=22)
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
```
# The network clearly shows the Likud as the central party, with a central community of parties, distinct 'left' and 'right', and the Joint List (the Arab party) disjoint from all other major parties.
| github_jupyter |
```
import pandas as pd
from sklearn.preprocessing import Binarizer, LabelEncoder, OneHotEncoder
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
data = pd.read_csv('sales_loss_win_data.csv')
data.head()
data = data.drop('Opportunity Number', axis =1 )
data.head()
le = LabelEncoder()
class MultiColumnLabelEncoder:
def __init__(self,columns = None):
self.columns = columns # array of column names to encode
def fit(self,X,y=None):
return self # not relevant here
def transform(self,X):
'''
Transforms columns of X specified in self.columns using
LabelEncoder(). If no columns specified, transforms all
columns in X.
'''
output = X.copy()
if self.columns is not None:
for col in self.columns:
output[col] = LabelEncoder().fit_transform(output[col])
else:
for colname,col in output.iteritems():
output[colname] = LabelEncoder().fit_transform(col)
return output
def fit_transform(self,X,y=None):
return self.fit(X,y).transform(X)
final_data = MultiColumnLabelEncoder(columns = ['Supplies Subgroup', 'Supplies Group', 'Region', 'Route To Market',
'Opportunity Result', 'Competitor Type']).fit_transform(data)
final_data.head()
```
## decision tree algorithm
Decision Trees (DTs) are a non-parametric supervised learning method used for classification and regression. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
Source: https://scikit-learn.org/stable/modules/tree.html (check out the advantages & disadvantages of using DTs)
```
yVar = final_data['Opportunity Result']
xVar = final_data.loc[:, final_data.columns != 'Opportunity Result']
X_train, X_test, y_train, y_test = train_test_split(xVar, yVar, test_size=0.2)
print (X_train.shape, y_train.shape)
print (X_test.shape, y_test.shape)
```
## sklearn's default criteria for measuring the quality of the tree splits is gini index
```
clf_gini = DecisionTreeClassifier(criterion = "gini", random_state = 100,
max_depth=3, min_samples_leaf=5)
clf_gini.fit(X_train, y_train)
clf_entropy = DecisionTreeClassifier(criterion = "entropy", random_state = 100,
max_depth=3, min_samples_leaf=5)
clf_entropy.fit(X_train, y_train)
y_pred_gini = clf_gini.predict(X_test)
y_pred_en = clf_entropy.predict(X_test)
print ("Gini accuracy is ", accuracy_score(y_test,y_pred_gini)*100)
print ("Entropy accuracy is ", accuracy_score(y_test,y_pred_en)*100)
```
| github_jupyter |
介绍如何在pytorch环境下,使用JSMA算法攻击基于ImageNet数据集预训练的alexnet模型。
Jupyter notebook中使用Anaconda中的环境需要单独配置,默认情况下使用的是系统默认的Python环境,以使用advbox环境为例。
首先在默认系统环境下执行以下命令,安装ipykernel。
conda install ipykernel
conda install -n advbox ipykernel
在advbox环境下激活,这样启动后就可以在界面上看到advbox了。
python -m ipykernel install --user --name advbox --display-name advbox
```
#打开调试日志
import logging
logging.basicConfig(level=logging.INFO,format="%(filename)s[line:%(lineno)d] %(levelname)s %(message)s")
logger=logging.getLogger(__name__)
import torch
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.utils.data.dataloader as Data
import torch.nn as nn
from torchvision import models
from adversarialbox.adversary import Adversary
from adversarialbox.attacks.saliency import JSMA
from adversarialbox.models.pytorch import PytorchModel
import numpy as np
import cv2
from tools import show_images_diff
#定义被攻击的图片
image_path="tutorials/cropped_panda.jpg"
# Define what device we are using
logging.info("CUDA Available: {}".format(torch.cuda.is_available()))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#cv2默认读取格式为bgr bgr -> rgb
orig = cv2.imread(image_path)[..., ::-1]
#转换成224*224
orig = cv2.resize(orig, (224, 224))
adv=None
img = orig.copy().astype(np.float32)
#图像数据标准化
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img /= 255.0
img = (img - mean) / std
#pytorch中图像格式为CHW
#[224,224,3]->[3,224,224]
img = img.transpose(2, 0, 1)
img = Variable(torch.from_numpy(img).to(device).float().unsqueeze(0)).cpu().numpy()
# Initialize the network
#Alexnet
model = models.alexnet(pretrained=True).to(device).eval()
#print(model)
#设置为不保存梯度值 自然也无法修改
for param in model.parameters():
param.requires_grad = False
# advbox demo
m = PytorchModel(
model, None,(-3, 3),
channel_axis=1)
#实例化JSMA max_iter为最大迭代次数 theta为扰动系数 max_perturbations_per_pixel为单像素最大修改次数
attack = JSMA(m)
attack_config = {
"max_iter": 2000,
"theta": 0.3,
"max_perturbations_per_pixel": 7,
"fast":True,
"two_pix":False
}
inputs=img
labels = None
print(inputs.shape)
adversary = Adversary(inputs, labels)
#定向攻击
tlabel = 538
adversary.set_target(is_targeted_attack=True, target_label=tlabel)
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
print(
'attack success, adversarial_label=%d'
% (adversary.adversarial_label))
adv=adversary.adversarial_example[0]
else:
print('attack failed')
print("jsma attack done")
#格式转换
adv = adv.transpose(1, 2, 0)
adv = (adv * std) + mean
adv = adv * 256.0
adv = np.clip(adv, 0, 255).astype(np.uint8)
#显示原始图片 抵抗样本 以及两张图之间的差异 其中灰色代表没有差异的像素点
show_images_diff(orig,adversary.original_label,adv,adversary.adversarial_label)
```
相对FGSM、DeepFool,JSMA修改的像素个数少,即l0非常小
| github_jupyter |
# Amazon Lex Create CoffeeBot
***
Copyright [2017]-[2017] Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
***
### Prerequisites:
#### Identity and Acces Management
The user or role that executes the commands must have permissions in AWS Identity and Access Management (IAM) to perform those actions. AWS provides a set of managed policies that help you get started quickly. For our example, you should apply the following managed policy to your user or role:
AmazonLexFullAccess
Be aware that we recommend you follow AWS IAM best practices for production implementations, which is out of scope for this workshop.
#### Coffee Bot
Use the code examples provided in this notebook to create the fictional Coffee Bot as documented in [this Github repository](https://github.com/awslabs/amz-ai-building-better-bots/blob/master/README.md).
```
import boto3
import IPython
import base64
import time
from pprint import pprint
aws_region = 'eu-west-1'
bot = boto3.client('lex-models', region_name=aws_region)
OUTPUT_BLACKLIST = ("ResponseMetadata")
slot_types = {
'cafeBeverageType':{'values': {'mocha','latte machiato','cappucino','hot chocolate','frappucino'},
'resolution': 'TOP_RESOLUTION'},
'cafeBeverageSize': {'values': {'small','medium','large'},
'resolution': 'TOP_RESOLUTION'},
'cafeBeverageStrength':{'values': {'single','double','triple','quadruple'},
'resolution': 'TOP_RESOLUTION'},
'cafeCreamerType': {'values': {'skim','half and half','almond','whole'},
'resolution': 'TOP_RESOLUTION'}}
intent_name = 'cafeOrderBeverage'
bot_name = 'CoffeeBot'
```
### create custom slot types
```
for type_name in slot_types:
all_types = bot.get_slot_types(
nameContains=type_name
)
# only proceed if slot type doesn't exist yet
if len(all_types['slotTypes'])== 0:
enumerations = []
values = slot_types[type_name]['values']
for val in values:
enumerations.append({'value': val})
response = bot.put_slot_type(
name=type_name,
enumerationValues=enumerations,
valueSelectionStrategy=slot_types[type_name]['resolution']
)
if response['ResponseMetadata']['HTTPStatusCode'] ==200:
print ("%s slot type created, checksum: %s" %(type_name,response['checksum']))
else:
pprint (response)
else:
print ("%s already exists, skipping slot type" %type_name)
```
### create order beverage intent
```
response = bot.put_intent(
name=intent_name,
description='Handles coffee order requests',
slots=[
{
'name': 'BeverageType',
'slotConstraint': 'Required',
'slotType': 'cafeBeverageType',
'slotTypeVersion': '$LATEST',
'valueElicitationPrompt': {
'messages': [
{
'contentType': 'PlainText',
'content': 'What type of coffee would you like'
},
{
'contentType': 'PlainText',
'content': 'What type of coffee do you fancy?'
},
],
'maxAttempts': 3
},
'priority': 1,
'sampleUtterances': [
'I would like to have a {BeverageType} please.','A {BeverageType} please.'
]
},
{
'name': 'BeverageSize',
'slotConstraint': 'Required',
'slotType': 'cafeBeverageSize',
'slotTypeVersion': '$LATEST',
'valueElicitationPrompt': {
'messages': [
{
'contentType': 'PlainText',
'content': 'What size would you like, small, medium or large?'
}
],
'maxAttempts': 3
},
'priority': 2,
'sampleUtterances': [
'I have a {BeverageSize} one please.','{BeverageSize} please.'
]
},
{
'name': 'BeverageStrength',
'slotConstraint': 'Optional',
'slotType': 'cafeBeverageStrength',
'slotTypeVersion': '$LATEST',
'valueElicitationPrompt': {
'messages': [
{
'contentType': 'PlainText',
'content': 'How many shots do you want?'
}
],
'maxAttempts': 3
},
'priority': 3,
'sampleUtterances': [
'{BeverageStrength} shot please.'
]
},
{
'name': 'Creamer',
'slotConstraint': 'Optional',
'slotType': 'cafeCreamerType',
'slotTypeVersion': '$LATEST',
'valueElicitationPrompt': {
'messages': [
{
'contentType': 'PlainText',
'content': 'What milk do you like?'
}
],
'maxAttempts': 3
},
'priority': 4
},
],
sampleUtterances=[
'I would like to order a coffee',
'I would like to order a {BeverageType}',
'Can I order a {BeverageType} please',
'Can I get a {BeverageSize} {Creamer} {BeverageStrength} {BeverageType}',
'Can I get a {BeverageType}',
'I would like a {BeverageSize} {BeverageType}'
],
followUpPrompt={
'prompt': {
'messages': [
{
'contentType': 'PlainText',
'content': 'Sure. Is there anything else I can do for you?'
}
],
'maxAttempts': 2
},
'rejectionStatement': {
'messages': [
{
'contentType': 'PlainText',
'content': 'Your {BeverageSize} {BeverageType} will be ready for pick-up soon.'
},
{
'contentType': 'PlainText',
'content': 'We will have your {BeverageSize} {BeverageType} ready for pick-up soon.'
},
]
}
},
fulfillmentActivity={
'type': 'ReturnIntent'
}
)
if response['ResponseMetadata']['HTTPStatusCode'] ==200:
print ('Intent created, checksum: %s' %response['checksum'])
else:
pprint(response)
```
### create coffee bot
```
response = bot.put_bot(
name=bot_name,
description='Coffee order bot for Amazon Lex demo',
intents=[
{
'intentName': intent_name,
'intentVersion': '$LATEST'
},
],
clarificationPrompt={
'messages': [
{
'contentType': 'PlainText',
'content': "Sorry, can you please repeat that?"
},
{
'contentType': 'PlainText',
'content': "Sorry, but I didn't understand that. Could you try again, please?"
},
],
'maxAttempts': 2
},
abortStatement={
'messages': [
{
'contentType': 'PlainText',
'content': 'Sorry, I could not understand. Goodbye.'
},
]
},
idleSessionTTLInSeconds=300,
voiceId='Salli',
processBehavior='BUILD',
locale='en-US',
childDirected=False
)
if response['ResponseMetadata']['HTTPStatusCode'] ==200:
print ('Bot created, checksum: %s' %response['checksum'])
print ('Console URL: https://eu-west-1.console.aws.amazon.com/lex/home?region=eu-west-1#bot-editor:bot=%s' %bot_name)
else:
pprint (response)
```
### open coffee bot in the AWS console
Open [Amazon Lex console](https://eu-west-1.console.aws.amazon.com/lex/home?region=eu-west-1#bots:) and wait for the build to complete before testing.
### backup code for debugging and clean-up
```
# get checksums for resource updated
response = bot.get_intent(
name=intent_name,
version='$LATEST')
pprint ('Intent cheksum: %s' %response['checksum'])
response = bot.get_bot(
name=bot_name,
versionOrAlias='$LATEST')
pprint ('Bot cheksum: %s' %response['checksum'])
# delete resources
def delete_bot():
response = bot.delete_bot(
name=bot_name)
if response['ResponseMetadata']['HTTPStatusCode'] ==204:
print ("%s deleted" %bot_name)
else:
pprint (response)
time.sleep(3)
response = bot.delete_intent(
name=intent_name)
if response['ResponseMetadata']['HTTPStatusCode'] ==204:
print ("%s intent deleted" %intent_name)
else:
pprint (response)
for type_name in slot_types:
time.sleep(3)
response = bot.delete_slot_type(
name=type_name
)
if response['ResponseMetadata']['HTTPStatusCode'] ==204:
print ("%s slot type deleted" %type_name)
else:
pprint (response)
#delete_bot()
```
| github_jupyter |
# Group Data Analysis PCA 4th Trial - node velocity
* Version: '0.0.4'
* Date: 2021-05-03
* Author: Jea Kwon
* Description: Previously PCA analysis with avatar coordinates, spine aligned on plane.
this time using spine aligned on axis
```
from avatarpy import Avatar
import os
import glob
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import cufflinks as cf
from scipy.stats import zscore
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
cf.go_offline(connected=True)
root = r"C:\Users\Jay\Desktop\avatar_young_adult\data\best1_20210503"
avatars = dict(
wt=dict(
young=[],
adult=[],
),
ko=dict(
young=[],
adult=[],
)
)
for path, subdirs, files in os.walk(root):
for name in files:
if name.lower().endswith('.csv'):
csv_path = os.path.join(path, name)
age = os.path.basename(os.path.dirname(path))
genotype = os.path.basename(os.path.dirname(os.path.dirname(path)))
avatars[genotype][age].append(Avatar(csv_path=csv_path, ID=name))
```
## Create walking event data
### Definition of walking
- Moved more than 5 cm in 1 second(20=Frame)
- More details take a look Group_Data_Analysis_PCA_1st_Trial
## Event Search function
```
def get_event_indices(boo, event_length):
"""Returns list of event indices.
ex) [(start 1, end 1), (start 2, end 2), (start 3, end 3), ..., (start N, end N)]
"""
indices = np.arange(len(boo))
condition = np.nonzero(boo[1:] != boo[:-1])[0] + 1
split_indices = np.split(indices, condition)
true_indices = split_indices[0::2] if boo[0] else split_indices[1::2]
event_indice_pair = [(idx[0]-event_length+1, idx[0]+1) for idx in true_indices]
return event_indice_pair
```
## Validation of event search
- Take a look Group_Data_Analysis_PCA_2nd_Trial
## Collecting Event velocity data
```
ava = avatars['wt']['young'][0]
ava.velocity
```
- Take a look Group_Data_Analysis_PCA_2nd_Trial
```
wt_young_event_data = []
for avatar in avatars['wt']['young']:
boo = (avatar.distance['anus'].rolling(20).sum()>5).values # boolean array
event_indices = get_event_indices(boo, 20)
for i, idx in enumerate(event_indices):
x = avatar.velocity.loc[avatar.index[idx[0]:idx[1]]]
if x.shape[0]!=20:
continue
wt_young_event_data.append(x.values.flatten())
wt_young_event_data = np.stack(wt_young_event_data)
wt_adult_event_data = []
for avatar in avatars['wt']['adult']:
boo = (avatar.distance['anus'].rolling(20).sum()>5).values # boolean array
event_indices = get_event_indices(boo, 20)
event_data = []
for i, idx in enumerate(event_indices):
x = avatar.velocity.loc[avatar.index[idx[0]:idx[1]]]
if x.shape[0]!=20:
continue
wt_adult_event_data.append(x.values.flatten())
wt_adult_event_data = np.stack(wt_adult_event_data)
```
total 1857 events acquired from 5 wt young mice with 5 session.
total 2248 events acquired from 5 wt adult mice with 5 session.
```
X = np.concatenate([wt_young_event_data, wt_adult_event_data])
X_ = StandardScaler().fit_transform(X)
pca = PCA(n_components=2)
pc = pca.fit_transform(X_)
y = np.concatenate([np.zeros(wt_young_event_data.shape[0]), np.ones(wt_adult_event_data.shape[0])])
pc_y = np.c_[pc,y]
df = pd.DataFrame(pc_y,columns=['PC1','PC2','genotype'])
sns.scatterplot(data=df,x='PC1',y='PC2',hue='genotype', alpha=0.2)
# plt.xlim(-10, 10)
# plt.ylim(-10, 10)
```
| github_jupyter |
```
number_list = [1.0,
2.0,
3.0,
2.0,
1.0,
2.0,
3.0,
4.0,
5.0,
1.0,
4.0,
3.0,
4.0,
2.0,
3.0,
2.0,
1.0,
5.0,
1.0,
2.0,
3.0,
2.0,
3.0,
4.0,
5.0,
4.0,
3.0,
5.0,
1.0,
4.0,
3.0,
2.0,
1.0,
2.0,
4.0,
2.0,
1.0,
2.0,
1.0,
2.0,
3.0,
4.0,
3.0,
2.0,
5.0,
3.0,
5.0,
2.0,
3.0,
4.0,
5.0,
4.0,
3.0,
1.0,
2.0,
1.0,
3.0,
4.0,
5.0,
4.0,
3.0,
2.0,
3.0,
1.0,
4.0,
5.0,
4.0,
3.0,
1.0,
4.0,
3.0,
2.0,
1.0,
5.0,
1.0,
2.0,
1.0,
5.0,
1.0,
2.0,
1.0,
4.0,
1.0,
3.0,
2.0,
5.0,
1.0,
2.0,
3.0,
2.0,
3.0,
1.0,
4.0,
3.0,
2.0,
5.0,
2.0,
4.0,
2.0,
1.0,
2.0,
1.0,
4.0,
5.0,
2.0,
4.0,
2.0,
3.0,
4.0,
2.0,
4.0,
3.0,
2.0,
4.0,
5.0,
1.0,
2.0,
3.0,
1.0,
3.0,
1.0,
3.0,
4.0,
1.0,
2.0,
1.0,
5.0,
1.0,
2.0,
3.0,
4.0,
1.0,
4.0,
2.0,
3.0,
1.0,
5.0,
1.0,
5.0,
1.0,
4.0,
1.0,
2.0,
3.0,
1.0,
5.0,
1.0,
3.0,
5.0,
4.0,
1.0,
4.0,
1.0,
2.0,
3.0,
5.0,
3.0,
5.0,
4.0,
1.0,
2.0,
3.0,
1.0,
2.0,
1.0,
3.0,
2.0,
3.0,
4.0,
2.0,
4.0,
1.0,
5.0,
4.0,
1.0,
5.0,
2.0,
3.0,
4.0,
3.0]
number_list = [str(int(value)) for value in number_list]
type(number_list[0])
number_list
x = 0
combination_list_2 = []
combination_list_3 = []
combination_list_4 = []
combination_list_5 = []
while x <= len(number_list) - 2:
combination_list_2.append(f"{number_list[x]},{number_list[x+1]}")
x += 1
print(combination_list_2)
x = 0
while x <= len(number_list) - 3:
combination_list_3.append(f"{number_list[x]},{number_list[x+1]},{number_list[x+2]}")
x += 1
print(combination_list_3)
x = 0
while x <= len(number_list) - 4:
combination_list_4.append(f"{number_list[x]},{number_list[x+1]},{number_list[x+2]},{number_list[x+3]}")
x += 1
print(combination_list_4)
x = 0
while x <= len(number_list) - 5:
combination_list_5.append(f"{number_list[x]},{number_list[x+1]},{number_list[x+2]},{number_list[x+3]},{number_list[x+4]}")
x += 1
print(combination_list_5)
my_dict_2 = []
my_dict_3 = []
my_dict_4 = []
my_dict_5 = []
for i in combination_list_2:
my_dict_2.append(f"{i}: {combination_list_2.count(i)}")
my_dict_2 = list(dict.fromkeys(my_dict_2))
for i in combination_list_3:
my_dict_3.append(f"{i}: {combination_list_3.count(i)}")
my_dict_3 = list(dict.fromkeys(my_dict_3))
for i in combination_list_4:
my_dict_4.append(f"{i}: {combination_list_4.count(i)}")
my_dict_4 = list(dict.fromkeys(my_dict_4))
for i in combination_list_5:
my_dict_5.append(f"{i}: {combination_list_5.count(i)}")
my_dict_5 = list(dict.fromkeys(my_dict_5))
print('\n')
print(my_dict_2)
print(my_dict_3)
print(my_dict_4)
print(my_dict_5)
```
| github_jupyter |
# Assignment 2 - Semi-gradient TD with a Neural Network
Welcome to Course 3 Programming Assignment 2. In the previous assignment, you implemented semi-gradient TD with State Aggregation for solving a **policy evaluation task**. In this assignment, you will implement **semi-gradient TD with a simple Neural Network** and use it for the same policy evaluation problem.
You will implement an agent to evaluate a fixed policy on the 500-State Randomwalk. As you may remember from the previous assignment, the 500-state Randomwalk includes 500 states. Each episode begins with the agent at the center and terminates when the agent goes far left beyond state 1 or far right beyond state 500. At each time step, the agent selects to move either left or right with equal probability. The environment determines how much the agent moves in the selected direction.
**In this assignment, you will:**
- Implement stochastic gradient descent method for state-value prediction.
- Implement semi-gradient TD with a neural network as the function approximator and Adam algorithm.
- Compare performance of semi-gradient TD with a neural network and semi-gradient TD with tile-coding.
## Packages
We import the following libraries that are required for this assignment:
- [numpy](www.numpy.org) : Fundamental package for scientific computing with Python.
- [matplotlib](http://matplotlib.org) : Library for plotting graphs in Python.
- [RL-Glue](http://www.jmlr.org/papers/v10/tanner09a.html) : Library for reinforcement learning experiments.
- [tqdm](https://tqdm.github.io/) : A package to display progress bar when running experiments.
- BaseOptimizer : An abstract class that specifies the optimizer API for Agent.
- plot_script : Custom script to plot results.
- RandomWalkEnvironment : The Randomwalk environment script from Course 3 Assignment 1.
```
# Do not modify this cell!
# Import necessary libraries
# DO NOT IMPORT OTHER LIBRARIES - This will break the autograder.
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import os, shutil
from tqdm import tqdm
from rl_glue import RLGlue
from environment import BaseEnvironment
from agent import BaseAgent
from optimizer import BaseOptimizer
import plot_script
from randomwalk_environment import RandomWalkEnvironment
```
## Section 1: Create semi-gradient TD with a Neural Network
In this section, you will implement an Agent that learns with semi-gradient TD with a neural network. You will use a neural network with one hidden layer. The input of the neural network is the one-hot encoding of the state number. We use the one-hot encoding of the state number instead of the state number itself because we do not want to build the prior knowledge that integer number inputs close to each other have similar values. The hidden layer contains 100 rectifier linear units (ReLUs) which pass their input if it is bigger than one and return 0 otherwise. ReLU gates are commonly used in neural networks due to their nice properties such as the sparsity of the activation and having non-vanishing gradients. The output of the neural network is the estimated state value. It is a linear function of the hidden units as is commonly the case when estimating the value of a continuous target using neural networks.
The neural network looks like this:

For a given input, $s$, value of $s$ is computed by:
$$
\begin{align}
\psi &= sW^{[0]} + b^{[0]} \\
x &= \textit{max}(0, \psi) \\
v &= xW^{[1]} + b^{[1]}
\end{align}
$$
where $W^{[0]}$, $b^{[0]}$, $W^{[1]}$, $b^{[1]}$ are the parameters of the network and will be learned when training the agent.
## 1-1: Implement helper methods
Before implementing the agent, you first implement some helper functions which you will later use in agent's main methods.
### Implement `get_value()`
First, you will implement get_value() method which feeds an input $s$ into the neural network and returns the output of the network $v$ according to the equations above. To implement get_value(), take into account the following notes:
- `get_value()` gets the one-hot encoded state number denoted by s as an input.
- `get_value()` receives the weights of the neural network as input, denoted by weights and structured as an array of dictionaries. Each dictionary corresponds to weights from one layer of the neural network to the next. Each dictionary includes $W$ and $b$. The shape of the elements in weights are as follows:
- weights[0]["W"]: num_states $\times$ num_hidden_units
- weights[0]["b"]: 1 $\times$ num_hidden_units
- weights[1]["W"]: num_hidden_units $\times$ 1
- weights[1]["b"]: 1 $\times$ 1
- The input of the neural network is a sparse vector. To make computation faster, we take advantage of input sparsity. To do so, we provided a helper method `my_matmul()`. **Make sure that you use `my_matmul()` for all matrix multiplications except for element-wise multiplications in this notebook.**
- The max operator used for computing $x$ is element-wise.
```
def my_matmul(x1, x2):
"""
Given matrices x1 and x2, return the multiplication of them
"""
result = np.zeros((x1.shape[0], x2.shape[1]))
x1_non_zero_indices = x1.nonzero()
if x1.shape[0] == 1 and len(x1_non_zero_indices[1]) == 1:
result = x2[x1_non_zero_indices[1], :]
elif x1.shape[1] == 1 and len(x1_non_zero_indices[0]) == 1:
result[x1_non_zero_indices[0], :] = x2 * x1[x1_non_zero_indices[0], 0]
else:
result = np.matmul(x1, x2)
return result
#GRADED FUNCTION: [get_value]
def get_value(s, weights):
"""
Compute value of input s given the weights of a neural network
"""
### Compute the ouput of the neural network, v, for input s (3 lines)
### START CODE HERE ###
psi = my_matmul(s,weights[0]['W']) + weights[0]['b']
x = (psi>0)*psi
v = my_matmul(x,weights[1]['W']) + weights[1]['b']
### END CODE HERE ###
return v
```
Run the following code to test your implementation of the `get_value()` function:
```
## Test Code for get_value() ##
# Suppose num_states = 5, num_hidden_layer = 1, and num_hidden_units = 10
num_hidden_layer = 1
s = np.array([[0, 0, 0, 1, 0]])
weights_data = np.load("asserts/get_value_weights.npz")
weights = [dict() for i in range(num_hidden_layer+1)]
weights[0]["W"] = weights_data["W0"]
weights[0]["b"] = weights_data["b0"]
weights[1]["W"] = weights_data["W1"]
weights[1]["b"] = weights_data["b1"]
estimated_value = get_value(s, weights)
print ("Estimated value: {}".format(estimated_value))
assert(np.allclose(estimated_value, np.array([[-0.21915705]])))
print ("Passed the assert!")
```
**Expected output**:
Estimated value: [[-0.21915705]]
### Implement `get_gradient()`
You will also implement `get_gradient()` method which computes the gradient of the value function for a given input, using backpropagation. You will later use this function to update the value function.
As you know, we compute the value of a state $s$ according to:
$$
\begin{align}
\psi &= sW^{[0]} + b^{[0]} \\
x &= \textit{max}(0, \psi) \\
v &= xW^{[1]} + b^{[1]}
\end{align}
$$
To update the weights of the neural network ($W^{[0]}$, $b^{[0]}$, $W^{[1]}$, $b^{[1]}$), we compute the gradient of $v$ with respect to the weights according to:
$$
\begin{align}
\frac{\partial v}{\partial W^{[0]}} &= s^T(W^{[1]T} \odot I_{x>0}) \\
\frac{\partial v}{\partial b^{[0]}} &= W^{[1]T} \odot I_{x>0} \\
\frac{\partial v}{\partial W^{[1]}} &= x^T \\
\frac{\partial v}{\partial b^{[1]}} &= 1
\end{align}
$$
where $\odot$ denotes element-wise matrix multiplication and $I_{x>0}$ is the gradient of the ReLU activation function which is an indicator whose $i$th element is 1 if $x[i]>0$ and 0 otherwise.
```
#GRADED FUNCTION: [get_gradient]
def get_gradient(s, weights):
"""
Given inputs s and weights, return the gradient of v with respect to the weights
"""
### Compute the gradient of the value function with respect to W0, b0, W1, b1 for input s (6~8 lines)
# grads[0]["W"] = ?
# grads[0]["b"] = ?
# grads[1]["W"] = ?
# grads[1]["b"] = ?
# Note that grads[0]["W"], grads[0]["b"], grads[1]["W"], and grads[1]["b"] should have the same shape as
# weights[0]["W"], weights[0]["b"], weights[1]["W"], and weights[1]["b"] respectively
# Note that to compute the gradients, you need to compute the activation of the hidden layer (x)
grads = [dict() for i in range(len(weights))]
### START CODE HERE ###
psi = my_matmul(s,weights[0]['W']) + weights[0]['b']
x = (psi>0)*1
grads[0]["W"] = my_matmul( s.T, weights[1]['W'].T * x)
grads[0]["b"] = weights[1]['W'].T * x
grads[1]["W"] = (x*psi).T
grads[1]["b"] = np.array([[1.0]])
### END CODE HERE ###
return grads
```
Run the following code to test your implementation of the `get_gradient()` function:
```
## Test Code for get_gradient() ##
# Suppose num_states = 5, num_hidden_layer = 1, and num_hidden_units = 2
num_hidden_layer = 1
s = np.array([[0, 0, 0, 1, 0]])
weights_data = np.load("asserts/get_gradient_weights.npz")
weights = [dict() for i in range(num_hidden_layer+1)]
weights[0]["W"] = weights_data["W0"]
weights[0]["b"] = weights_data["b0"]
weights[1]["W"] = weights_data["W1"]
weights[1]["b"] = weights_data["b1"]
grads = get_gradient(s, weights)
grads_answer = np.load("asserts/get_gradient_grads.npz")
print("grads[0][\"W\"]\n", grads[0]["W"], "\n")
print("grads[0][\"b\"]\n", grads[0]["b"], "\n")
print("grads[1][\"W\"]\n", grads[1]["W"], "\n")
print("grads[1][\"b\"]\n", grads[1]["b"], "\n")
assert(np.allclose(grads[0]["W"], grads_answer["W0"]))
assert(np.allclose(grads[0]["b"], grads_answer["b0"]))
assert(np.allclose(grads[1]["W"], grads_answer["W1"]))
assert(np.allclose(grads[1]["b"], grads_answer["b1"]))
print("Passed the asserts!")
```
**Expected output**:
grads[0]["W"]
[[0. 0. ]
[0. 0. ]
[0. 0. ]
[0.76103773 0.12167502]
[0. 0. ]]
grads[0]["b"]
[[0.76103773 0.12167502]]
grads[1]["W"]
[[0.69198983]
[0.82403662]]
grads[1]["b"]
[[1.]]
### Implement stochastic gradient descent method for state-value prediction
In this section, you will implement stochastic gradient descent (SGD) method for state_value prediction. Here is the basic SGD update for state-value prediction with TD:
$$\mathbf{w_{t+1}} = \mathbf{w_{t}} + \alpha \delta_t \nabla \hat{v}(S_t,\mathbf{w_{t}})$$
At each time step, we update the weights in the direction $g_t = \delta_t \nabla \hat{v}(S_t,\mathbf{w_t})$ using a fixed step-size $\alpha$. $\delta_t = R_{t+1} + \gamma \hat{v}(S_{t+1},\mathbf{w_{t}}) - \hat{v}(S_t,\mathbf{w_t})$ is the TD-error. $\nabla \hat{v}(S_t,\mathbf{w_{t}})$ is the gradient of the value function with respect to the weights.
The following cell includes the SGD class. You will complete the `update_weight()` method of SGD assuming that the weights and update g are provided.
**As you know, in this assignment, we structured the weights as an array of dictionaries. Note that the updates $g_t$, in the case of TD, is $\delta_t \nabla \hat{v}(S_t,\mathbf{w_t})$. As a result, $g_t$ has the same structure as $\nabla \hat{v}(S_t,\mathbf{w_t})$ which is also an array of dictionaries.**
```
#GRADED FUNCTION: [SGD]
class SGD(BaseOptimizer):
def __init__(self):
pass
def optimizer_init(self, optimizer_info):
"""Setup for the optimizer.
Set parameters needed to setup the stochastic gradient descent method.
Assume optimizer_info dict contains:
{
step_size: float
}
"""
self.step_size = optimizer_info.get("step_size")
def update_weights(self, weights, g):
"""
Given weights and update g, return updated weights
"""
for i in range(len(weights)):
for param in weights[i].keys():
### update weights (1 line)
# weights[i][param] = None
### START CODE HERE ###
weights[i][param] += self.step_size*g[i][param]
### END CODE HERE ###
return weights
```
Run the following code to test your implementation of the `update_weights()` function:
```
# Do not modify this cell!
## Test Code for update_weights() ##
# Suppose num_states = 5, num_hidden_layer = 1, and num_hidden_units = 2
num_hidden_layer = 1
weights_data = np.load("asserts/update_weights_weights.npz")
weights = [dict() for i in range(num_hidden_layer+1)]
weights[0]["W"] = weights_data["W0"]
weights[0]["b"] = weights_data["b0"]
weights[1]["W"] = weights_data["W1"]
weights[1]["b"] = weights_data["b1"]
g_data = np.load("asserts/update_weights_g.npz")
g = [dict() for i in range(num_hidden_layer+1)]
g[0]["W"] = g_data["W0"]
g[0]["b"] = g_data["b0"]
g[1]["W"] = g_data["W1"]
g[1]["b"] = g_data["b1"]
test_sgd = SGD()
optimizer_info = {"step_size": 0.3}
test_sgd.optimizer_init(optimizer_info)
updated_weights = test_sgd.update_weights(weights, g)
# updated weights asserts
updated_weights_answer = np.load("asserts/update_weights_updated_weights.npz")
print("updated_weights[0][\"W\"]\n", updated_weights[0]["W"], "\n")
print("updated_weights[0][\"b\"]\n", updated_weights[0]["b"], "\n")
print("updated_weights[1][\"W\"]\n", updated_weights[1]["W"], "\n")
print("updated_weights[1][\"b\"]\n", updated_weights[1]["b"], "\n")
assert(np.allclose(updated_weights[0]["W"], updated_weights_answer["W0"]))
assert(np.allclose(updated_weights[0]["b"], updated_weights_answer["b0"]))
assert(np.allclose(updated_weights[1]["W"], updated_weights_answer["W1"]))
assert(np.allclose(updated_weights[1]["b"], updated_weights_answer["b1"]))
print("Passed the asserts!")
```
**Expected output**:
updated_weights[0]["W"]
[[ 1.17899492 0.53656321]
[ 0.58008221 1.47666572]
[ 1.01909411 -1.10248056]
[ 0.72490408 0.06828853]
[-0.20609725 0.69034095]]
updated_weights[0]["b"]
[[-0.18484533 0.92844539]]
updated_weights[1]["W"]
[[0.70488257]
[0.58150878]]
updated_weights[1]["b"]
[[0.88467086]]
### Adam Algorithm
In this assignment, instead of using SGD for updating the weights, we use a more advanced algorithm called Adam. The Adam algorithm improves the SGD update with two concepts: adaptive vector step-sizes and momentum. It keeps estimates of the mean and second moment of the updates, denoted by $\mathbf{m}$ and $\mathbf{v}$ respectively:
$$\mathbf{m_t} = \beta_m \mathbf{m_{t-1}} + (1 - \beta_m)g_t \\
\mathbf{v_t} = \beta_v \mathbf{v_{t-1}} + (1 - \beta_v)g^2_t
$$
Given that $\mathbf{m}$ and $\mathbf{v}$ are initialized to zero, they are biased toward zero. To get unbiased estimates of the mean and second moment, Adam defines $\mathbf{\hat{m}}$ and $\mathbf{\hat{v}}$ as:
$$ \mathbf{\hat{m_t}} = \frac{\mathbf{m_t}}{1 - \beta_m^t} \\
\mathbf{\hat{v_t}} = \frac{\mathbf{v_t}}{1 - \beta_v^t}
$$
The weights are then updated as follows:
$$ \mathbf{w_t} = \mathbf{w_{t-1}} + \frac{\alpha}{\sqrt{\mathbf{\hat{v_t}}}+\epsilon} \mathbf{\hat{m_t}}
$$
When implementing the agent you will use the Adam algorithm instead of SGD because it is more efficient. We have already provided you the implementation of the Adam algorithm in the cell below. You will use it when implementing your agent.
```
class Adam(BaseOptimizer):
def __init__(self):
pass
def optimizer_init(self, optimizer_info):
"""Setup for the optimizer.
Set parameters needed to setup the Adam algorithm.
Assume optimizer_info dict contains:
{
num_states: integer,
num_hidden_layer: integer,
num_hidden_units: integer,
step_size: float,
self.beta_m: float
self.beta_v: float
self.epsilon: float
}
"""
self.num_states = optimizer_info.get("num_states")
self.num_hidden_layer = optimizer_info.get("num_hidden_layer")
self.num_hidden_units = optimizer_info.get("num_hidden_units")
# Specify Adam algorithm's hyper parameters
self.step_size = optimizer_info.get("step_size")
self.beta_m = optimizer_info.get("beta_m")
self.beta_v = optimizer_info.get("beta_v")
self.epsilon = optimizer_info.get("epsilon")
self.layer_size = np.array([self.num_states, self.num_hidden_units, 1])
# Initialize Adam algorithm's m and v
self.m = [dict() for i in range(self.num_hidden_layer+1)]
self.v = [dict() for i in range(self.num_hidden_layer+1)]
for i in range(self.num_hidden_layer+1):
# Initialize self.m[i]["W"], self.m[i]["b"], self.v[i]["W"], self.v[i]["b"] to zero
self.m[i]["W"] = np.zeros((self.layer_size[i], self.layer_size[i+1]))
self.m[i]["b"] = np.zeros((1, self.layer_size[i+1]))
self.v[i]["W"] = np.zeros((self.layer_size[i], self.layer_size[i+1]))
self.v[i]["b"] = np.zeros((1, self.layer_size[i+1]))
# Initialize beta_m_product and beta_v_product to be later used for computing m_hat and v_hat
self.beta_m_product = self.beta_m
self.beta_v_product = self.beta_v
def update_weights(self, weights, g):
"""
Given weights and update g, return updated weights
"""
for i in range(len(weights)):
for param in weights[i].keys():
### update self.m and self.v
self.m[i][param] = self.beta_m * self.m[i][param] + (1 - self.beta_m) * g[i][param]
self.v[i][param] = self.beta_v * self.v[i][param] + (1 - self.beta_v) * (g[i][param] * g[i][param])
### compute m_hat and v_hat
m_hat = self.m[i][param] / (1 - self.beta_m_product)
v_hat = self.v[i][param] / (1 - self.beta_v_product)
### update weights
weights[i][param] += self.step_size * m_hat / (np.sqrt(v_hat) + self.epsilon)
### update self.beta_m_product and self.beta_v_product
self.beta_m_product *= self.beta_m
self.beta_v_product *= self.beta_v
return weights
```
## 1-2: Implement Agent Methods
In this section, you will implement `agent_init()`, `agent_start()`, `agent_step()`, and `agent_end()`.
In `agent_init()`, you will:
- specify the neural network structure by filling self.layer_size with the size of the input layer, hidden layer, and output layer.
- initialize the network's parameters. We show the parameters as an array of dictionaries, self.weights, where each dictionary corresponds to weights from one layer to the next. Each dictionary includes $W$ and $b$. To initialize the parameters, you will use a normal distribution with mean 0 and standard deviation $\sqrt{\frac{2}{\text{# input of each node}}}$. This initialization heuristic is commonly used when using ReLU gates and helps keep the output of a neuron from getting too big or too small. To initialize the network's parameters, use **self.rand_generator.normal()** which draws random samples from a normal distribution. The parameters of self.rand_generator.normal are mean of the distribution, standard deviation of the distribution, and output shape in the form of tuple of integers.
In `agent_start()`, you will:
- specify self.last_state and self.last_action.
In `agent_step()` and `agent_end()`, you will:
- compute the TD error using $v(S_t)$ and $v(S_{t+1})$. To compute the value function for $S_t$ and $S_{t+1}$, you will get their one-hot encoding using `one_hot()` method that we provided below. You feed the one-hot encoded state number to the neural networks using `get_value()` method that you implemented above. Note that `one_hot()` method returns the one-hot encoding of a state as a numpy array of shape (1, num_states).
- retrieve the gradients using `get_gradient()` function that you implemented.
- use Adam_algorithm that we provided to update the neural network's parameters, self.weights.
- use `agent_policy()` method to select actions with. (only in `agent_step()`)
```
def one_hot(state, num_states):
"""
Given num_state and a state, return the one-hot encoding of the state
"""
# Create the one-hot encoding of state
# one_hot_vector is a numpy array of shape (1, num_states)
one_hot_vector = np.zeros((1, num_states))
one_hot_vector[0, int((state - 1))] = 1
return one_hot_vector
#GRADED FUNCTION: [Agent]
class TDAgent(BaseAgent):
def __init__(self):
self.name = "td_agent"
pass
def agent_init(self, agent_info={}):
"""Setup for the agent called when the experiment first starts.
Set parameters needed to setup the semi-gradient TD with a Neural Network.
Assume agent_info dict contains:
{
num_states: integer,
num_hidden_layer: integer,
num_hidden_units: integer,
step_size: float,
discount_factor: float,
self.beta_m: float
self.beta_v: float
self.epsilon: float
seed: int
}
"""
# Set random seed for weights initialization for each run
self.rand_generator = np.random.RandomState(agent_info.get("seed"))
# Set random seed for policy for each run
self.policy_rand_generator = np.random.RandomState(agent_info.get("seed"))
# Set attributes according to agent_info
self.num_states = agent_info.get("num_states")
self.num_hidden_layer = agent_info.get("num_hidden_layer")
self.num_hidden_units = agent_info.get("num_hidden_units")
self.discount_factor = agent_info.get("discount_factor")
### Define the neural network's structure (1 line)
# Specify self.layer_size which shows the number of nodes in each layer
# self.layer_size = np.array([None, None, None])
# Hint: Checkout the NN diagram at the beginning of the notebook
### START CODE HERE ###
self.layer_size = np.array([self.num_states, self.num_hidden_units, 1])
### END CODE HERE ###
# Initialize the neural network's parameter (2 lines)
self.weights = [dict() for i in range(self.num_hidden_layer+1)]
for i in range(self.num_hidden_layer+1):
### Initialize self.weights[i]["W"] and self.weights[i]["b"] using self.rand_generator.normal()
# Note that The parameters of self.rand_generator.normal are mean of the distribution,
# standard deviation of the distribution, and output shape in the form of tuple of integers.
# To specify output shape, use self.layer_size.
### START CODE HERE ###
self.weights[i]['W'] = self.rand_generator.normal(0,np.sqrt(2/self.layer_size[i]),(self.layer_size[i],self.layer_size[i+1]))
self.weights[i]['b'] = self.rand_generator.normal(0,np.sqrt(2/self.layer_size[i]),(1,self.layer_size[i+1]))
### END CODE HERE ###
# Specify the optimizer
self.optimizer = Adam()
optimizer_info = {"num_states": agent_info["num_states"],
"num_hidden_layer": agent_info["num_hidden_layer"],
"num_hidden_units": agent_info["num_hidden_units"],
"step_size": agent_info["step_size"],
"beta_m": agent_info["beta_m"],
"beta_v": agent_info["beta_v"],
"epsilon": agent_info["epsilon"]}
self.optimizer.optimizer_init(optimizer_info)
self.last_state = None
self.last_action = None
def agent_policy(self, state):
### Set chosen_action as 0 or 1 with equal probability.
chosen_action = self.policy_rand_generator.choice([0,1])
return chosen_action
def agent_start(self, state):
"""The first method called when the experiment starts, called after
the environment starts.
Args:
state (Numpy array): the state from the
environment's evn_start function.
Returns:
The first action the agent takes.
"""
### select action given state (using self.agent_policy()), and save current state and action (2 lines)
# self.last_state = ?
# self.last_action = ?
### START CODE HERE ###
self.last_state = state
self.last_action = self.agent_policy(state)
### END CODE HERE ###
return self.last_action
def agent_step(self, reward, state):
"""A step taken by the agent.
Args:
reward (float): the reward received for taking the last action taken
state (Numpy array): the state from the
environment's step based, where the agent ended up after the
last step
Returns:
The action the agent is taking.
"""
### Compute TD error (5 lines)
# delta = None
### START CODE HERE ###
last_state = one_hot(self.last_state, self.num_states)
v_last = get_value( last_state, self.weights )
curr_state = one_hot(state, self.num_states)
v = get_value( curr_state, self.weights )
delta = self.discount_factor*v - v_last
### END CODE HERE ###
### Retrieve gradients (1 line)
# grads = None
### START CODE HERE ###
grads = get_gradient(last_state,self.weights)
### END CODE HERE ###
### Compute g (1 line)
g = [dict() for i in range(self.num_hidden_layer+1)]
for i in range(self.num_hidden_layer+1):
for param in self.weights[i].keys():
# g[i][param] = None
### START CODE HERE ###
g[i][param] = (reward+delta)*grads[i][param]
### END CODE HERE ###
### update the weights using self.optimizer (1 line)
# self.weights = None
### START CODE HERE ###
self.weights = self.optimizer.update_weights(self.weights,g)
### END CODE HERE ###
### update self.last_state and self.last_action (2 lines)
### START CODE HERE ###
self.last_state = state
self.last_action = self.agent_policy(state)
### END CODE HERE ###
return self.last_action
def agent_end(self, reward):
"""Run when the agent terminates.
Args:
reward (float): the reward the agent received for entering the
terminal state.
"""
### compute TD error (3 lines)
# delta = None
### START CODE HERE ###
last_state = one_hot(self.last_state, self.num_states)
v_last = get_value( last_state, self.weights )
delta = -1 * v_last
### END CODE HERE ###
### Retrieve gradients (1 line)
# grads = None
### START CODE HERE ###
grads = get_gradient(last_state,self.weights)
### END CODE HERE ###
### Compute g (1 line)
g = [dict() for i in range(self.num_hidden_layer+1)]
for i in range(self.num_hidden_layer+1):
for param in self.weights[i].keys():
# g[i][param] = None
### START CODE HERE ###
g[i][param] = (reward+delta)*grads[i][param]
### END CODE HERE ###
### update the weights using self.optimizer (1 line)
# self.weights = None
### START CODE HERE ###
self.weights = self.optimizer.update_weights(self.weights,g)
### END CODE HERE ###
def agent_message(self, message):
if message == 'get state value':
state_value = np.zeros(self.num_states)
for state in range(1, self.num_states + 1):
s = one_hot(state, self.num_states)
state_value[state - 1] = get_value(s, self.weights)
return state_value
```
Run the following code to test your implementation of the `agent_init()` function:
```
## Test Code for agent_init() ##
agent_info = {"num_states": 5,
"num_hidden_layer": 1,
"num_hidden_units": 2,
"step_size": 0.25,
"discount_factor": 0.9,
"beta_m": 0.9,
"beta_v": 0.99,
"epsilon": 0.0001,
"seed": 0
}
test_agent = TDAgent()
test_agent.agent_init(agent_info)
print("layer_size: {}".format(test_agent.layer_size))
assert(np.allclose(test_agent.layer_size, np.array([agent_info["num_states"],
agent_info["num_hidden_units"],
1])))
print("weights[0][\"W\"] shape: {}".format(test_agent.weights[0]["W"].shape))
print("weights[0][\"b\"] shape: {}".format(test_agent.weights[0]["b"].shape))
print("weights[1][\"W\"] shape: {}".format(test_agent.weights[1]["W"].shape))
print("weights[1][\"b\"] shape: {}".format(test_agent.weights[1]["b"].shape), "\n")
assert(test_agent.weights[0]["W"].shape == (agent_info["num_states"], agent_info["num_hidden_units"]))
assert(test_agent.weights[0]["b"].shape == (1, agent_info["num_hidden_units"]))
assert(test_agent.weights[1]["W"].shape == (agent_info["num_hidden_units"], 1))
assert(test_agent.weights[1]["b"].shape == (1, 1))
print("weights[0][\"W\"]\n", (test_agent.weights[0]["W"]), "\n")
print("weights[0][\"b\"]\n", (test_agent.weights[0]["b"]), "\n")
print("weights[1][\"W\"]\n", (test_agent.weights[1]["W"]), "\n")
print("weights[1][\"b\"]\n", (test_agent.weights[1]["b"]), "\n")
agent_weight_answer = np.load("asserts/agent_init_weights_1.npz")
assert(np.allclose(test_agent.weights[0]["W"], agent_weight_answer["W0"]))
assert(np.allclose(test_agent.weights[0]["b"], agent_weight_answer["b0"]))
assert(np.allclose(test_agent.weights[1]["W"], agent_weight_answer["W1"]))
assert(np.allclose(test_agent.weights[1]["b"], agent_weight_answer["b1"]))
print("Passed the asserts!")
```
**Expected output**:
layer_size: [5 2 1]
weights[0]["W"] shape: (5, 2)
weights[0]["b"] shape: (1, 2)
weights[1]["W"] shape: (2, 1)
weights[1]["b"] shape: (1, 1)
weights[0]["W"]
[[ 1.11568467 0.25308164]
[ 0.61900825 1.4172653 ]
[ 1.18114738 -0.6180848 ]
[ 0.60088868 -0.0957267 ]
[-0.06528133 0.25968529]]
weights[0]["b"]
[[0.09110115 0.91976332]]
weights[1]["W"]
[[0.76103773]
[0.12167502]]
weights[1]["b"]
[[0.44386323]]
Run the following code to test your implementation of the `agent_start()` function:
```
# Do not modify this cell!
## Test Code for agent_start() ##
agent_info = {"num_states": 500,
"num_hidden_layer": 1,
"num_hidden_units": 100,
"step_size": 0.1,
"discount_factor": 1.0,
"beta_m": 0.9,
"beta_v": 0.99,
"epsilon": 0.0001,
"seed": 10
}
# Suppose state = 250
state = 250
test_agent = TDAgent()
test_agent.agent_init(agent_info)
test_agent.agent_start(state)
print("Agent state: {}".format(test_agent.last_state))
print("Agent selected action: {}".format(test_agent.last_action))
assert(test_agent.last_state == 250)
assert(test_agent.last_action == 1)
print("Passed the asserts!")
```
**Expected output**:
Agent state: 250
Agent selected action: 1
Run the following code to test your implementation of the `agent_step()` function:
```
# Do not modify this cell!
## Test Code for agent_step() ##
agent_info = {"num_states": 5,
"num_hidden_layer": 1,
"num_hidden_units": 2,
"step_size": 0.1,
"discount_factor": 1.0,
"beta_m": 0.9,
"beta_v": 0.99,
"epsilon": 0.0001,
"seed": 0
}
test_agent = TDAgent()
test_agent.agent_init(agent_info)
# load initial weights
agent_initial_weight = np.load("asserts/agent_step_initial_weights.npz")
test_agent.weights[0]["W"] = agent_initial_weight["W0"]
test_agent.weights[0]["b"] = agent_initial_weight["b0"]
test_agent.weights[1]["W"] = agent_initial_weight["W1"]
test_agent.weights[1]["b"] = agent_initial_weight["b1"]
# load m and v for the optimizer
m_data = np.load("asserts/agent_step_initial_m.npz")
test_agent.optimizer.m[0]["W"] = m_data["W0"]
test_agent.optimizer.m[0]["b"] = m_data["b0"]
test_agent.optimizer.m[1]["W"] = m_data["W1"]
test_agent.optimizer.m[1]["b"] = m_data["b1"]
v_data = np.load("asserts/agent_step_initial_v.npz")
test_agent.optimizer.v[0]["W"] = v_data["W0"]
test_agent.optimizer.v[0]["b"] = v_data["b0"]
test_agent.optimizer.v[1]["W"] = v_data["W1"]
test_agent.optimizer.v[1]["b"] = v_data["b1"]
# Assume the agent started at State 3
start_state = 3
test_agent.agent_start(start_state)
# Assume the reward was 10.0 and the next state observed was State 1
reward = 10.0
next_state = 1
test_agent.agent_step(reward, next_state)
# updated weights asserts
print("updated_weights[0][\"W\"]\n", test_agent.weights[0]["W"], "\n")
print("updated_weights[0][\"b\"]\n", test_agent.weights[0]["b"], "\n")
print("updated_weights[1][\"W\"]\n", test_agent.weights[1]["W"], "\n")
print("updated_weights[1][\"b\"]\n", test_agent.weights[1]["b"], "\n")
agent_updated_weight_answer = np.load("asserts/agent_step_updated_weights.npz")
assert(np.allclose(test_agent.weights[0]["W"], agent_updated_weight_answer["W0"]))
assert(np.allclose(test_agent.weights[0]["b"], agent_updated_weight_answer["b0"]))
assert(np.allclose(test_agent.weights[1]["W"], agent_updated_weight_answer["W1"]))
assert(np.allclose(test_agent.weights[1]["b"], agent_updated_weight_answer["b1"]))
# last_state and last_action assert
print("Agent last state:", test_agent.last_state)
print("Agent last action:", test_agent.last_action, "\n")
assert(test_agent.last_state == 1)
assert(test_agent.last_action == 1)
print ("Passed the asserts!")
```
**Expected output**:
updated_weights[0]["W"]
[[ 1.10893459 0.30763738]
[ 0.63690565 1.14778865]
[ 1.23397791 -0.48152743]
[ 0.72792093 -0.15829832]
[ 0.15021996 0.39822163]]
updated_weights[0]["b"]
[[0.29798822 0.96254535]]
updated_weights[1]["W"]
[[0.76628754]
[0.11486511]]
updated_weights[1]["b"]
[[0.58530057]]
Agent last state: 1
Agent last action: 1
Run the following code to test your implementation of the `agent_end()` function:
```
# Do not modify this cell!
## Test Code for agent_end() ##
agent_info = {"num_states": 5,
"num_hidden_layer": 1,
"num_hidden_units": 2,
"step_size": 0.1,
"discount_factor": 1.0,
"beta_m": 0.9,
"beta_v": 0.99,
"epsilon": 0.0001,
"seed": 0
}
test_agent = TDAgent()
test_agent.agent_init(agent_info)
# load initial weights
agent_initial_weight = np.load("asserts/agent_end_initial_weights.npz")
test_agent.weights[0]["W"] = agent_initial_weight["W0"]
test_agent.weights[0]["b"] = agent_initial_weight["b0"]
test_agent.weights[1]["W"] = agent_initial_weight["W1"]
test_agent.weights[1]["b"] = agent_initial_weight["b1"]
# load m and v for the optimizer
m_data = np.load("asserts/agent_step_initial_m.npz")
test_agent.optimizer.m[0]["W"] = m_data["W0"]
test_agent.optimizer.m[0]["b"] = m_data["b0"]
test_agent.optimizer.m[1]["W"] = m_data["W1"]
test_agent.optimizer.m[1]["b"] = m_data["b1"]
v_data = np.load("asserts/agent_step_initial_v.npz")
test_agent.optimizer.v[0]["W"] = v_data["W0"]
test_agent.optimizer.v[0]["b"] = v_data["b0"]
test_agent.optimizer.v[1]["W"] = v_data["W1"]
test_agent.optimizer.v[1]["b"] = v_data["b1"]
# Assume the agent started at State 4
start_state = 4
test_agent.agent_start(start_state)
# Assume the reward was 10.0 and reached the terminal state
reward = 10.0
test_agent.agent_end(reward)
# updated weights asserts
print("updated_weights[0][\"W\"]\n", test_agent.weights[0]["W"], "\n")
print("updated_weights[0][\"b\"]\n", test_agent.weights[0]["b"], "\n")
print("updated_weights[1][\"W\"]\n", test_agent.weights[1]["W"], "\n")
print("updated_weights[1][\"b\"]\n", test_agent.weights[1]["b"], "\n")
agent_updated_weight_answer = np.load("asserts/agent_end_updated_weights.npz")
assert(np.allclose(test_agent.weights[0]["W"], agent_updated_weight_answer["W0"]))
assert(np.allclose(test_agent.weights[0]["b"], agent_updated_weight_answer["b0"]))
assert(np.allclose(test_agent.weights[1]["W"], agent_updated_weight_answer["W1"]))
assert(np.allclose(test_agent.weights[1]["b"], agent_updated_weight_answer["b1"]))
print ("Passed the asserts!")
```
**Expected output:**
updated_weights[0]["W"]
[[ 1.10893459 0.30763738]
[ 0.63690565 1.14778865]
[ 1.17531054 -0.51043162]
[ 0.75062903 -0.13736817]
[ 0.15021996 0.39822163]]
updated_weights[0]["b"]
[[0.30846523 0.95937346]]
updated_weights[1]["W"]
[[0.68861703]
[0.15986364]]
updated_weights[1]["b"]
[[0.586074]]
## Section 2 - Run Experiment
Now that you implemented the agent, we can run the experiment. Similar to Course 3 Programming Assignment 1, we will plot the learned state value function and the learning curve of the TD agent. To plot the learning curve, we use Root Mean Squared Value Error (RMSVE).
## 2-1: Run Experiment for Semi-gradient TD with a Neural Network
We have already provided you the experiment/plot code, so you can go ahead and run the two cells below.
Note that running the cell below will take **approximately 12 minutes**.
```
# Do not modify this cell!
true_state_val = np.load('data/true_V.npy')
state_distribution = np.load('data/state_distribution.npy')
def calc_RMSVE(learned_state_val):
assert(len(true_state_val) == len(learned_state_val) == len(state_distribution))
MSVE = np.sum(np.multiply(state_distribution, np.square(true_state_val - learned_state_val)))
RMSVE = np.sqrt(MSVE)
return RMSVE
# Define function to run experiment
def run_experiment(environment, agent, environment_parameters, agent_parameters, experiment_parameters):
rl_glue = RLGlue(environment, agent)
# save rmsve at the end of each episode
agent_rmsve = np.zeros((experiment_parameters["num_runs"],
int(experiment_parameters["num_episodes"]/experiment_parameters["episode_eval_frequency"]) + 1))
# save learned state value at the end of each run
agent_state_val = np.zeros((experiment_parameters["num_runs"],
environment_parameters["num_states"]))
env_info = {"num_states": environment_parameters["num_states"],
"start_state": environment_parameters["start_state"],
"left_terminal_state": environment_parameters["left_terminal_state"],
"right_terminal_state": environment_parameters["right_terminal_state"]}
agent_info = {"num_states": environment_parameters["num_states"],
"num_hidden_layer": agent_parameters["num_hidden_layer"],
"num_hidden_units": agent_parameters["num_hidden_units"],
"step_size": agent_parameters["step_size"],
"discount_factor": environment_parameters["discount_factor"],
"beta_m": agent_parameters["beta_m"],
"beta_v": agent_parameters["beta_v"],
"epsilon": agent_parameters["epsilon"]
}
print('Setting - Neural Network with 100 hidden units')
os.system('sleep 1')
# one agent setting
for run in tqdm(range(1, experiment_parameters["num_runs"]+1)):
env_info["seed"] = run
agent_info["seed"] = run
rl_glue.rl_init(agent_info, env_info)
# Compute initial RMSVE before training
current_V = rl_glue.rl_agent_message("get state value")
agent_rmsve[run-1, 0] = calc_RMSVE(current_V)
for episode in range(1, experiment_parameters["num_episodes"]+1):
# run episode
rl_glue.rl_episode(0) # no step limit
if episode % experiment_parameters["episode_eval_frequency"] == 0:
current_V = rl_glue.rl_agent_message("get state value")
agent_rmsve[run-1, int(episode/experiment_parameters["episode_eval_frequency"])] = calc_RMSVE(current_V)
elif episode == experiment_parameters["num_episodes"]: # if last episode
current_V = rl_glue.rl_agent_message("get state value")
agent_state_val[run-1, :] = current_V
save_name = "{}".format(rl_glue.agent.name).replace('.','')
if not os.path.exists('results'):
os.makedirs('results')
# save avg. state value
np.save("results/V_{}".format(save_name), agent_state_val)
# save avg. rmsve
np.savez("results/RMSVE_{}".format(save_name), rmsve = agent_rmsve,
eval_freq = experiment_parameters["episode_eval_frequency"],
num_episodes = experiment_parameters["num_episodes"])
# Run Experiment
# Experiment parameters
experiment_parameters = {
"num_runs" : 20,
"num_episodes" : 1000,
"episode_eval_frequency" : 10 # evaluate every 10 episode
}
# Environment parameters
environment_parameters = {
"num_states" : 500,
"start_state" : 250,
"left_terminal_state" : 0,
"right_terminal_state" : 501,
"discount_factor" : 1.0
}
# Agent parameters
agent_parameters = {
"num_hidden_layer": 1,
"num_hidden_units": 100,
"step_size": 0.001,
"beta_m": 0.9,
"beta_v": 0.999,
"epsilon": 0.0001,
}
current_env = RandomWalkEnvironment
current_agent = TDAgent
# run experiment
run_experiment(current_env, current_agent, environment_parameters, agent_parameters, experiment_parameters)
# plot result
plot_script.plot_result(["td_agent"])
shutil.make_archive('results', 'zip', 'results')
```
You plotted the learning curve for 1000 episodes. As you can see the RMSVE is still decreasing. Here we provide the pre-computed result for 5000 episodes and 20 runs so that you can see the performance of semi-gradient TD with a neural network after being trained for a long time.

Does semi-gradient TD with a neural network find a good approximation within 5000 episodes?
As you may remember from the previous assignment, semi-gradient TD with 10-state aggregation converged within 100 episodes. Why is TD with a neural network slower?
Would it be faster if we decrease the number of hidden units? Or what about if we increase the number of hidden units?
## 2-2: Compare Performance of Semi-gradient TD with a Neural Network and Semi-gradient TD with Tile-coding
In this section, we compare the performance of semi-gradient TD with a Neural Network and semi-gradient TD with tile-coding. Tile-coding is a kind of coarse coding that uses multiple overlapping partitions of the state space to produce features. For tile-coding, we used 50 tilings each with 6 tiles. We set the step-size for semi-gradient TD with tile-coding to $\frac{0.1}{\text{# tilings}}$. See the figure below for the comparison between semi-gradient TD with tile-coding and semi-gradient TD with a neural network and Adam algorithm. This result is for 5000 episodes and 20 runs:

How are the results?
Semi-gradient TD with tile-coding is much faster than semi-gradient TD with a neural network. Why?
Which method has a lower RMSVE at the end of 5000 episodes?
### Wrapping up!
You have successfully implemented Course 3 Programming Assignment 2.
You have implemented **semi-gradient TD with a Neural Network and Adam algorithm** in 500-state Random Walk.
You also compared semi-gradient TD with a neural network and semi-gradient TD with tile-coding.
From the experiments and lectures, you should be more familiar with some of the strengths and weaknesses of using neural networks as the function approximator for an RL agent. On one hand, neural networks are powerful function approximators capable of representing a wide class of functions. They are also capable of producing features without exclusively relying on hand-crafted mechanisms. On the other hand, compared to a linear function approximator with tile-coding, neural networks can be less sample efficient. When implementing your own Reinforcement Learning agents, you may consider these strengths and weaknesses to choose the proper function approximator for your problems.
---
**Note**: Apart from using the 'Submit' button in the notebook, you have to submit an additional `zip` file containing the 'npy' files that were generated from running the experiment cells. To do so:
1. Generate the zip file by running the experiment cells in the notebook. On the top of the notebook, navigate to 'File->Open' to open the directory view of this assignment. Select "results.zip" and click on "Download". Alternatively, you can download just the results folder and run "zip -jr results.zip results/" (The flag 'j' is required by the grader!).
2. Go to the "My submission" tab and click on "+ Create submission".
3. Click on "C3M2 Data-file Grader" and upload your `results.zip`.
**These account for 40% of the marks, so don't forget to do so!**
| github_jupyter |
# Lab Eight - Space Weather With Objects
## *Bootstrap Analysis and Object Oriented Data Analysis*
You will learn how to:
- Perform bootstrap analysis (or resampling).
- Set up a boostrap analysis problem.
- Analyze bootstrap outputs.
- Plot bootstrap analysis results.
- Use styles in Python plots (optional).
- See alternative plotting packages (optional).
- Basics of object oriented plotting (optional).
By the end of this lab you should be able to: perform bootstrap analysis on linear problems and analyze the output. We are learning more blatant object oriented programming in this lecture as well which is optional. For your final projects you are welcome to use object oriented as well as styles plotting, but it will not be required.
Additional materials for reading:
- Lecture Notes - Lectures 7 & 8 & 9
- Igual & Seguí Chapter 4 for discussion in 4.3.1.3 of resampling.
- For programming tips and object orientied see the online [Python Textbook](http://python-textbok.readthedocs.io/en/1.0/Introduction.html) and [Python Documentation](https://docs.python.org/3/tutorial/classes.html).
- For a handy look at styles in matplotlib see [here](https://matplotlib.org/gallery/style_sheets/style_sheets_reference.html) and [here](https://tonysyu.github.io/raw_content/matplotlib-style-gallery/gallery.html).
- Seaborn at the documentation - https://seaborn.pydata.org/
## Part A: Space Weather Review from Last Time
In Lab 7 we learned about space weather which we had defined as follows:
"Space-weather events are naturally occurring phenomena that have the potential to disrupt electric power systems; satellite, aircraft, and spacecraft operations; telecommunications; position, navigation, and timing services; and other technologies and infrastructures..." Source - [National Space Weather Action Plan](https://obamawhitehouse.archives.gov/sites/default/files/microsites/ostp/final_nationalspaceweatheractionplan_20151028.pdf)
"Space weather refers to the environmental conditions in Earth's magnetosphere (e.g. magnetic environment), ionosphere and thermosphere (e.g. upper atmosphere) due to the Sun and the solar wind that can influence the functioning and reliability of spaceborne and ground-based systems and services or endanger property or human health." Source - [European Space Agency](http://swe.ssa.esa.int/what-is-space-weather)
In Lab 7 we looked at how the Sun through solar wind and magenetic field influenced near-space conditions at Earth. Specifically how f10.7 and sunspots could be used as a proxy to evaluate solar activity and impact on Dst.
We also used the Pierson correlation coefficent to analyze linear regression and implemented our very own classification problems with ROC analysis.
## What questions do you have about space weather or your midterm projects?
<img src="./Images/SpaceWeatherNOAA.jpg" alt="SpaceWeatherInfographic" width="800px"/>
Source: [NOAA](http://www.noaa.gov/explainers/space-weather-storms-from-sun)
----
## Part B: Lecture of Bootstrap Method
In this lab we will continue using space weather data, but implement the bootstrap method we learned about in Lecture 9 on Monday.
The bootstrap method is where you repeatedly resample a dataset with replacement, ie you can have repeat values, to gain an estimation of error.
## Take 3 minutes to chat with a neighbor. What might be some reasons we might want to resample a dataset and do a bootstrap analysis to gain an estimation of error? Recall this is beyond just the error on the y-intercept, the root mean square error, or the slope values.
### See below for the example within the class lecture notes.
<img src="./Images/ExampleBootstrap.png" alt="ExampleBootstrapImage" height="400px" width="350px"/>
----
# *Part 1 - Brief Intro to Object Oriented Programming With Solar Cycle Data*
Let's take a look again at the solar cycle information we had from before. This time we are going to see a new way to plot.
We've been using object oriented programming so far in this class, but in a very "see it and then copy it" way.
This is a great way to start with object oriented and in effect you are ready to learn it more explicitly.
We are showing you this because it can be a very efficent and effective way to code (and useful when Googling for coding help in the future after you are done with this course).
So far we've seen object oriented programming at work with Pandas dataframes and other types of data structures (numpy ndarrays, dictionaries etc).
We have used the following in our work:
### Classes
- A way to bundle data and functions. Creating a type of class creates an "object".
### Objects
- In Python everything is an object and every object is an "instance" of a class.
### Methods
- These are functions that "belong" to a class.
### Attributes
- These are data that "belong" to a class.
You likely saw these words as you found errors as you wrote up your homework :). You commonly run into these when you try to do something that can not be done to an "object" of a certain type. Let's see this in action.
```
#import required modules
import numpy as np #for accuracy
import datetime as dt #for datetime objects
import matplotlib.pyplot as plt #for pretty plotting
import matplotlib as mpl #NEW IN THIS LAB - for styles
import pandas as pd #for data structures
from scipy import stats #for linear regression
import matplotlib.dates as mdt #for manipulation of dates in matplotlib
from matplotlib.ticker import MultipleLocator #for pretty plotting
import importlib #for reimporting libaries / package
import linerror as lr #same as Lab6 - for calculating linear fits
import externalfunctions as extFunc #same as Lab7 - for importing
from scipy.stats import skew, kurtosis, norm #same as in Lab5 - for normal dist. stats
import omni #NEW IN THIS LAB - example
#object oriented instance of omni class
import bootstrap #NEW IN THIS LAB - collection
#of functions to calculate bootstrap
#for plotting in notebook
%matplotlib inline
```
## Part A. Introduction to Object Oriented
```
#download daily, hourly, and monthly resolution data
hourlyOmni = extFunc.loadOmniData('./Data/omni2_Hourly1980_2018.lst')
dailyOmni = extFunc.loadOmniData('./Data/omni2_Daily1980_2018.lst')
monthlyOmni = extFunc.loadOmniData('./Data/omni2_27Day1980_2018.lst')
#this is the data we were using last lecture
#This data is from the OMNI site - https://omniweb.gsfc.nasa.gov/html/ow_data.html#1
#"primarily a 1963-to-current compilation of hourly-averaged, near-Earth solar
#wind magnetic field and plasma parameter data from several spacecraft in geocentric or
#L1 (Lagrange point) orbits. The data have been extensively cross compared, and,
#for some spacecraft and parameters, cross-normalized. Time-shifts of higher
#resolution data to expected magnetosphere-arrival times are done for data from
#spacecraft in L1 orbits (ISEE 3, Wind, ACE), prior to taking hourly averages."
```
### Let's find out what class hourly OMNI belong to...
We can use the type() function to find this -
```
print(type(hourlyOmni))
```
### Activity for you! Check the class of hourlyOmni['Dst'], and then of the first value within the column.
Try this on your own and then compare with a partner.
Hint: you will want to use the .loc method for the first value in the column, the first value can be accessed using the index.
```
###------write your commands here
```
### Now let's access our the hourlyOmni dataframes attributes.
You can see all the different options for attributes in the help documentation here https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
```
#lets access our objects attributes - or data - through .shape, this accesses
#information about the object hourlyOmni
print(hourlyOmni.shape)
```
### Now let's access our the hourlyOmni dataframes methods.
You can see all the different options for methods in the SAME documentation as above.
```
#In the Ice Sheet lab, we used .describe() to summarize
#a dataframe. This was us using an attribute.
print(hourlyOmni.describe())
```
### Notice the syntax different between .shape and .describe()? What do you think would happen if we tried .shape()? Try it below!
```
#UNCOMMENT THIS LINE
#print(hourlyOmni.shape())
```
### Why did we get an error for .shape() rather than .shape?
-
-
-
-
### So far we've used all these concepts before. We are just using different terminology now to be more accurate.
## Part B. Introduction to Object Oriented Concepts
In general there are three themes or ideas we use in our object-oriented programming.
### Encapsulation.
- This is the hallmark of object oriented. Grouping data, and functions, together as objects or "encapsulating" them. This means that programming can now be objects that you can access, do things with, not just a list. We've taken advantage of this in our use of Pandas dataframes.
### Inheritance.
- A trickle down hierarchy of data types. Allows a data type to aquire (inherit) the properties of another.
- A real world example - beets are a vegetable, vegetables are food, vegetables are edible, therefore, a beet is edible.
### Polymorphism.
- You can use the same interface, for different types/classes. This allows for functions to use different types at different times.
- A real world example - the steering wheel.
- If you have different types of steering wheels in cars, power steering, manual steering, etc you would expect to use them all to drive a car the same way.
## Part C. Putting it Together - Object Oriented Creation Example
Let's give this a shot - and see how this works if we want to make our OWN classes.
```
class Bird():
'''
Creating a class called Bird - note classes first letter is in caps for Python standard..
'''
#create a method for this class
#note the syntax where self is in the function - ie
#the function ACTS on itself.
def fly(self):
print('The bird is out to fly.')
#create an instance of the class Bird
testBird = Bird()
#use the method fly
testBird.fly()
```
### Now let's try something slightly more complicated with class definitions.
```
class Bird():
'''
Creating a class called Bird - note classes first letter is in caps for Python standard.
'''
#what if we want to initialize with some attributes?
def __init__(self, species = 'NotInitialized'):
self.species = species
#create a method for this class
#note the syntax where self is in the function - ie
#the function ACTS on itself.
def fly(self):
print('The {} is out to fly.'.format(self.species))
#now when we try this -
testBird = Bird()
#and acess that attribute -
print(testBird.species)
#now when we try this -
testBird = Bird(species = 'Eagle')
#and acess that attribute -
print(testBird.species)
#and access the method
testBird.fly()
```
### This is new - we've not yet defined our own classes. You can start to see where this might be useful, let's say you always need objects to be able to fly then this is a quick way to achive that.
Let's look at something we have seen before.
## Part D. How to Plot with Object Oriented.
So far we've used plotting in a very object oriented way - IE this is pretty much the same. There are some short cuts that you can implement but the below code is **exactly the same as in Lab 7**, but now you can start to see where we've been using object-oriented programming this whole time.
```
#Create an instance of a matplotlib figure class -
fig = plt.figure(figsize=(8, 5))
#set the title
fig.suptitle('Long Duration Solar Behavior', fontsize=20)
#Create an instance of a matplotlib gridspec class
gs = plt.GridSpec(1, 1, hspace=0.0, wspace=0.2, right = 0.85)
#add subplots - adds an instance of a subplot type to
#the figure object
ax1 = fig.add_subplot(gs[0,0])
#this ties together ax1 and ax2 -
ax2 = ax1.twinx()
#set up titles for axis
ax1.set_ylabel('Number of Sunspots', fontsize = 20, color = 'grey')
ax2.set_ylabel('f10.7 Standard Flux Units (sfu)', fontsize = 20,
color = '#983b59', rotation = 270, labelpad = 30)
ax1.plot(dailyOmni.index.to_pydatetime(), dailyOmni['SunspotNumber'],
color = 'grey', lw = 1.0)
ax2.plot(dailyOmni.index.to_pydatetime(), dailyOmni['f10.7_index'],
color = '#983b59', lw = 1.0, alpha = 0.8)
ax1.set_xlabel('Years', fontsize = 20)
#SET UP FORMAT
ax1.xaxis.set_major_locator(mdt.YearLocator(5))
ax1.xaxis.set_major_formatter(mdt.DateFormatter('%Y'))
ax1.grid(color='gray', linestyle='dashed')
#how to set up a share axis with alternative colors
ax1.tick_params('y', colors = 'grey', labelsize = 16)
ax2.tick_params('y', colors = '#983b59', labelsize = 16)
#set label size of the x axis
ax1.tick_params('x', labelsize = 16)
#nudge it up a bit
ax2.set_ylim([25, 400])
```
### Can you imagine where you can save repetition if you wanted to define *your own* objects for plotting?
## *Part 2 - Introduction to Bootstrap Analysis*
Before we saw that the number of sunspots and f10.7 were highly linear when plotted against each other and analyzed.
### What about looking at the f10.7 and the Dst?
```
#since this doesn't work on nan values first we remove all potential nans'
nanMask = ((~np.isnan(monthlyOmni['f10.7_index'])) &
(~np.isnan(monthlyOmni['Dst'])))
xVals = monthlyOmni.loc[nanMask, 'f10.7_index']
yVals = monthlyOmni.loc[nanMask, 'Dst']
#calculate the linear fit
slopeOrig, interceptOrig, rvalOrig, pvalOrig, stderrOrig = stats.linregress(xVals, yVals)
#calculate the yvalues given the linear fit
yModelOrig = interceptOrig + slopeOrig * xVals
#find y errors
RMSE = lr.calcRMSE(yModelOrig, yVals)
#error on coefficents, slope and y-intercept and.
errSlope, errInter = lr.calcCoeffsErr(xVals, RMSE)
#print out fit report -
print("Fit Report: \n \tUncert. on Y: +/- {:.2f}".format(RMSE) +
"\n \tIntercept: {:.2f} +/- {:.2f}".format(interceptOrig, errInter)
+ "\n\tSlope: {:.2f} +/- {:.2f}".format(slopeOrig, errSlope)
+ "\n\tPearson linear correlation: {:.2f}, r-squared: {:.2f}".format(rvalOrig, rvalOrig**2))
#make plot
fig = plt.figure(figsize=(8.5, 5))
fig.suptitle('Dependence of D$_{st}$ on f10.7 by Month', fontsize=20)
gs = plt.GridSpec(1, 1, hspace=0.0, wspace=0.2, right = 0.9)
#add subplots
ax1 = fig.add_subplot(gs[0,0])
#set up titles for axis
ax1.set_xlabel('f10.7 Standard Flux Units (sfu)', fontsize = 20)
ax1.set_ylabel('D$_{st}$ (nT)', fontsize = 20)
#plot values
ax1.scatter(xVals, yVals, color = '#660066', alpha = 0.6, s = 20)
ax1.plot(xVals, yModelOrig, color = '#006666', alpha = 1.0, linestyle = '--', lw = 3)
#set up grid
ax1.grid(color='gray', linestyle='dashed')
#how to set up a share axis with alternative colors
ax1.tick_params(labelsize = 16)
```
### But what is the estimation on the correlation coefficent (Pearson linear correlation)? Here we need to set up our bootstrap analysis.
### Bootstrap Method:
### Step 1 - Resample our pandas dataframe.
```
#.sample is a method of dataframes -
#the commands frac means 100% of the dataframe we want in the sampling
reSamped = monthlyOmni[['f10.7_index', 'Dst']].sample(frac = 1, replace = True)
print(reSamped.head())
#And redo the fit for our newly resampled data frame
nanMask = ((~np.isnan(reSamped['f10.7_index'])) &
(~np.isnan(reSamped['Dst'])))
xVals = reSamped.loc[nanMask, 'f10.7_index']
yVals = reSamped.loc[nanMask, 'Dst']
slope, intercept, rval, pval, stderr = stats.linregress(xVals, yVals)
#calculate the yvalues given the linear fit
yModel = intercept + slope * xVals
#find y errors
RMSE = lr.calcRMSE(yModel, yVals)
#error on coefficents, slope and y-intercept and.
errSlope, errInter = lr.calcCoeffsErr(xVals, RMSE)
#print out fit report -
print("Fit Report: \n \tUncert. on Y: +/- {:.2f}".format(RMSE) +
"\n \tIntercept: {:.2f} +/- {:.2f}".format(interceptOrig, errInter)
+ "\n\tSlope: {:.2f} +/- {:.2f}".format(slope, errSlope)
+ "\n\tPearson linear correlation: {:.2f}, r-squared: {:.2f}".format(rval, rval**2))
```
### Take a second and compare this to the fit you ran before...does this look a bit different or similar? Compare to your neighbor, is it the same as theirs?
-
-
-
-
-
-
-
-
### Step 2: Implementing Repetition
Let's put this into a loop and resample for many values -
```
#let's resample and calculate the fits quite a number of times - this
#will take a while to run - be patient -
#As suggested in class 500 would be appropiate -
numIterations = 500
#create dictionary of bootstrap values
bootstrapVals = {}
bootstrapVals['rVals'] = np.zeros((numIterations))
bootstrapVals['Intercepts'] = np.zeros((numIterations))
bootstrapVals['Slopes'] = np.zeros((numIterations))
for i in range(numIterations):
#resample the dataframe
reSamped = monthlyOmni[['Dst',
'f10.7_index']].sample(frac = 1, replace = True)
#create nan mask
nanMask = ((~np.isnan(reSamped['f10.7_index'])) &
(~np.isnan(reSamped['Dst'])))
#new values of x, and y without nans
xVals = reSamped.loc[nanMask, 'f10.7_index']
yVals = reSamped.loc[nanMask, 'Dst']
#perform fit
slope, intercept, rval, pval, stderr = stats.linregress(xVals, yVals)
#and set up dictionay with the values
bootstrapVals['rVals'][i] = rval
bootstrapVals['Intercepts'][i] = intercept
bootstrapVals['Slopes'][i] = slope
#print out final values
print("The mean rVal is {:.2f} with a standard deviation of {:.5f}.".format(
np.mean(bootstrapVals['rVals']),
np.std(bootstrapVals['rVals'], ddof = 1)))
```
### Step 3: Evaluation of the Fit
Now let's take a look at these values.
```
fig = plt.figure(figsize=(8.5, 5))
fig.suptitle('D$_{st}$ on f10.7 by Month, ' +
'Resampling Amount: {} \n'.format(numIterations),
fontsize=20)
gs = plt.GridSpec(1, 1, hspace=0.0, wspace=0.2, right = 0.9)
#add subplots
ax1 = fig.add_subplot(gs[0,0])
#set up titles for axis
ax1.set_xlabel('f10.7 Standard Flux Units (sfu)', fontsize = 20)
ax1.set_ylabel('D$_{st}$ (nT)', fontsize = 20)
#plot values
ax1.scatter(monthlyOmni['f10.7_index'], monthlyOmni['Dst'],
color = '#660066', alpha = 0.6, s = 20)
#make line equally spaced
minVal = np.nanmin(monthlyOmni['f10.7_index'])
maxVal = np.nanmax(monthlyOmni['f10.7_index'])
spacing = (maxVal - minVal) / 20.0
#create new xarray for pretty plotting
xVals = np.arange(minVal, maxVal+spacing, int(spacing))
for slope, intercept in zip(bootstrapVals['Slopes'], bootstrapVals['Intercepts']):
#calculate y values
yModel = slope*xVals + intercept
#and plot in gray
ax1.plot(xVals, yModel, color = 'grey', alpha = 0.2, lw = 1.0)
#plot final value in turquoise -
yModelOrig = slopeOrig*xVals + interceptOrig
ax1.plot(xVals, yModelOrig,
color = '#006666', alpha = 1.0, linestyle = '--', lw = 3.0)
#set up grid
ax1.grid(color='gray', linestyle='dashed')
#set up label sizes
ax1.tick_params(labelsize = 16)
plt.savefig('./Figures/BootstrapLinearFits.png')
```
### Let's take a look at a distribution of these r values.
```
#what does the histogram of this look like?
#set up the figure
fig = plt.figure(figsize=(11, 7))
fig.suptitle('Histogram of Pearson Corr. Coeff. for D$_{st}$ vs. f10.7 \n' +
'Resampling Amount: {}'.format(numIterations),
fontsize=20)
gs = plt.GridSpec(1, 1, hspace=0.2, wspace=0.0, right = 0.8)
#add subplots
ax1 = fig.add_subplot(gs[0,0])
#calculate using the general rule of thumb -
#minimum as the sqrt(sampleSize)
sampleSize = len(bootstrapVals['rVals'])
#could also use np.floor - both find the integer
# - ceil rounds up and I want MORE bins than
#minimum
numBins = np.ceil(np.sqrt(sampleSize))
print("The number of bins for the histogram is: {}".format(numBins))
#values is first entry, followed by number of bins, normed is the
#normalization factor, ie normalize to 1.
#edgecolor and linewidth set up the bin edges
ax1.hist(bootstrapVals['rVals'], int(numBins), density = 1, facecolor = '#822f59',
edgecolor="k")
#set up grid
plt.grid(color='gray', linestyle='dashed')
#labels
plt.xlabel('Pearson Correlation Coeff.', fontsize = 20)
plt.ylabel('Normalized Occurrence', fontsize = 20)
#large ticks
plt.xticks(fontsize=16) #make the xaxis labels larger
plt.yticks(fontsize=16) #make the yaxis labels larger
#print out stats on the skew etc
print("Skew: {:.1f}, Kurtosis: {:.1f}, Standard Dev: {:.1f}, Mean: {:.1f}".format(
skew(bootstrapVals['rVals']),
kurtosis(bootstrapVals['rVals'], fisher = False),
np.std(bootstrapVals['rVals'], ddof = 1),
np.mean(bootstrapVals['rVals'])))
```
### What does this chart inform you about the bootstrap analysis?
-
-
-
-
-
### Finally we calculate the z-value for this comparison -
```
#how does the z value look?
tVal = (abs((rvalOrig - np.mean(bootstrapVals['rVals']))) /
(np.std(bootstrapVals['rVals'], ddof = 1)))
print("The t-test results in a comparison" +
" of the original fit to the bootstrap fit of {:.5f}.".format(tVal))
```
### Is this a small or a large t-value? What does that mean about our fit?
-
-
-
-
-
-
### What if we wanted to implement this onto *any* dataset without copy paste? Let's learn some time saving tools here.
## *Part 3 - Time Saving for Plotting*
If you just want to run forward with some quick exploratory plots - I reccomend Seaborn. Please note it's difficult to obtain finalized plots in seaborn with what we've learned so far but I support using these quick look tools
```
import seaborn as sns #this is a new package here
#I like to think of this as a stats + visualization package
#makes a quick plot AND linear fit
#runs nicely with pandas dataframes
sns.regplot(x = 'f10.7_index', y = 'Dst', data = monthlyOmni, n_boot = 1000);
#note the confidence interval drawn around the dataset
#dependent on the bootstrap via n_boot
```
### You can also get the same "seaborn style" with some nifty visualization tricks...such as...
```
mpl.style.use('fivethirtyeight')
fig = plt.figure(figsize=(8.5, 5))
gs = plt.GridSpec(1, 1, hspace=0.0, wspace=0.2, right = 0.9)
#add subplots
ax1 = fig.add_subplot(gs[0,0])
#set up titles for axis
ax1.set_xlabel('f10.7 Standard Flux Units (sfu)', fontsize = 20)
ax1.set_ylabel('D$_{st}$ (nT)', fontsize = 20)
#plot values
xVals = monthlyOmni['f10.7_index']
yVals = monthlyOmni['Dst']
ax1.scatter(xVals, yVals)
ax1.tick_params(labelsize = 16)
print("The following styles are availible:")
print(plt.style.available)
```
### The great thing about these is that you can use some of the defaults to set fontsizes etc, each time you plot!
You can find more about this here - https://matplotlib.org/tutorials/introductory/customizing.html
You can also use objects as we discussed above to make the entire script cleaner for bootstrap analysis - see Extra information after the summary.
# SUMMARY
From this lab you have learned a new way to code - explictedly object oriented programming :). As well as how to start a bootstrap method.
Homework:
- Midterm reports! Keep at it - remember many things are not always linearly correlated and we are not expecting you to find a perfect fit in your projects.
## *EXTRA: Now With More Explicit Object Oriented*
Within this section you can see section 2 repeated but by now taking advantage of a
new class called omni which we define in the omni.py file -
```
#we reload the omni file just in case of changes
importlib.reload(omni)
#reset the style back to basic
mpl.rcParams.update(mpl.rcParamsDefault)
#create a new instance of the object -
exampleObj = omni.Omni(hourlyOmni['FlowPressure'], hourlyOmni['Dst'],
xLabel = 'Flow Pressure (nPa)',
yLabel = 'D$_{st}$ (nT)')
#check out the type of this object
print(type(exampleObj))
#now plot out the linear fit
exampleObj.plotLinReg()
#and print out fit report as a method of the object
exampleObj.printFitReport()
#let's resample and calculate the fits quite a number of times - to clean
#up the process from before we wrote a bootstrap function -
importlib.reload(bootstrap)
#given the original data and fit, run a boostrap analysis and output
#a dictionary of values of those fits
bVals = bootstrap.bootstrap(exampleObj, numIterations = 100)
#go ahead and print out what bVals is as a type
#let's take a look at the histogram of the rValues
bootstrap.plotRValsHist(exampleObj, bVals)
#and let's plot the final fits given these bootstrap
bootstrap.plotFits(exampleObj, bVals)
#and the fit report -
bootstrap.printFitReport(exampleObj, bVals)
```
| github_jupyter |
# Using a neural network to emulate the atmospheric convection scheme in a global climate model
### (John Dwyer & Paul O'Gorman)
## Overview:
Global climate models (GCMs) solve computational fluid PDEs to represent the dynamics and thermodynamics of the atmosphere and ocean. They are important for predicting climate change given some amount of greenhouse gas emissions.
## Problem:
State-of-the-art climate models have a horizontal grid resolution of 100km x 100km, far too coarse to represent many physical processes directly. Some approximations, like that for atmospheric convection and cloud formation, are not very accurate, and leads to large uncertainty in future predictions.
```
from IPython.display import Image
Image('assets/Stephens_and_Bony_2013.png')
```
## Solution:
Use machine learning algorithms! Train a neural network to emulate atmospheric convection from observations or very high-resolution (non-global) climate models. Then put the trained algorithm back into the climate model.
## My approach:
As a first step, I check to make sure that this approach can work. I train a neural network to learn the atmospheric convection scheme in the global model. Then put the trained scheme back into the model and see if it can replicate the original (physics-based) scheme.
```
# First load packages
import numpy as np
from netCDF4 import Dataset
import matplotlib as mpl
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import os
import time
from sklearn import preprocessing, metrics
from importlib import reload
import scipy.stats
import sknn.mlp
import pickle
%matplotlib inline
pylab.rcParams['figure.figsize'] = (10, 6)
inline_rc = dict(mpl.rcParams)
import src.nnload as nnload
import src.nntrain as nntrain
import src.nnplot as nnplot
```
### Load data for training and cross-validation
```
def loaddata(filename, minlev, all_lats=True, indlat=None,
N_trn_examples=None, rainonly=False, noshallow=False,
verbose=True):
"""v2 of the script to load data. See prep_convection_output.py for how
the input filename is generated.
Args:
filename: The file to be loaded. Use convection_50day.pkl or
convection_50day_validation.pkl
minlev: The topmost model level for which to load data. Set to 0. to
load all data
all_lats: Logical value for whether to load data from all latitudes
indlat: If all_lats is false, give the index value [0-31] for the
latitude at which to load data.
rainonly: If true, only return training examples of when it is raining
noshallow: If true, only return training examples of when the shallow
convection scheme does NOT happen. (So, only return examples
with deep convection, or no convection at all)
verbose: If true, prints some basic stats about training set
Returns:
x : 2-d numpy array of input features (m_training examples x
N_input features). If minlev is 0., there will be 60 input
features, the top 30 for temperature and the bottom 30 for
humidity.
y : 2-d numpy array of output targets (m_traning examples x
N_output targets). If minlev is 0., there will be 60 output
features, the top 30 for temp. tendencies and the bottom 30
for q tend.
cv : 1-d array (m_training examples x 1) that gives 1 if convection
occurs and 0 if it does not.
Pout : 1-d arrray (m_training examples x 1) of how much precipitation
occurs in kg/m^2/s (multiply by 3600*24 to convert
precipitation to mm/day)
lat2 : 1-d array of latitude for one hemisphere (since hemispheres
are combined)
lev : The vertical model levels (1 is the surface and 0 is the top
of the atmosphere).
dlev : The difference between model levels, useful for calculating
some derived quantities.
timestep: How large each model timestep is in seconds.
"""
v = dict()
[v['Tin'], v['qin'], v['Tout'], v['qout'], Pout, lat] = \
pickle.load(open(filename, 'rb'), encoding='latin1')
# Use this to calculate the real sigma levels
lev, dlev, indlev = nnload.get_levs(minlev)
# Comine NH & SH data since they are statistically equivalent
varis = ['Tin', 'qin', 'Tout', 'qout']
for var in varis:
[v[var], lat2] = nnload.avg_hem(v[var], lat, axis=1)
# Change shape of data to be N_samp x N_lev
if all_lats:
v[var] = nnload.reshape_all_lats(v[var], indlev)
else:
if indlat is not None:
v[var] = nnload.reshape_one_lat(v[var], indlev, indlat)
# Pout = Pout[indlat,:]
else:
raise TypeError('Need to set an index value for indlat')
# Randomize the order of these events
m = v['Tin'].shape[0]
randind = np.random.permutation(m)
for var in varis:
v[var] = v[var][randind,:]
timestep = 10*60 # 10 minute timestep in seconds
# Converted heating rates to K/day and g/kg/day in prep_convection_output.py
# Concatenate input and output variables together
x = nnload.pack(v['Tin'], v['qin'] , axis=1)
y = nnload.pack(v['Tout'], v['qout'], axis=1)
Pout2 = nnplot.calc_precip(y, dlev)
# The outputs get lined up in prep_convection_output.py
# Print some statistics about rain and limit to when it's raining if True
x, y, Pout2 = nnload.limitrain(x, y, Pout2, rainonly, noshallow=noshallow,
verbose=verbose)
# Limit to only certain events if requested
if N_trn_examples is not None:
if N_trn_examples > y.shape[0]:
warnings.warn('Requested more samples than available. Using the' +
'maximum number available')
N_trn_examples = y.shape[0]
ind = np.arange(N_trn_examples)
x = x[ind,:]
y = y[ind,:]
Pout2 = Pout2[:]
# Store when convection occurs
cv,_ = nnload.whenconvection(y, verbose=verbose)
return (x, y, cv, Pout2, lat2, lev, dlev, timestep)
x_orig, y_orig, cv, Pout, lat, lev, dlev, timestep = loaddata(
'./data/convection_50day.pkl', minlev=0.25,
all_lats=True, indlat=None, rainonly=False)
```
### Preprocess Data
```
def unpack(data,vari,axis=1):
"""Reverse pack operation to turn ouput matrix into T & q"""
N = int(data.shape[axis]/2)
varipos = {'T':np.arange(N),'q':np.arange(N,2*N)}
out = np.take(data,varipos[vari],axis=axis)
return out
def pack(d1,d2,axis=1):
"""Combines T & q profiles as an input matrix to NN"""
return np.concatenate((d1,d2), axis=axis)
# Initialize & fit scaler
def init_pp(ppi, raw_data):
"""Initialize list of scaler objects"""
if ppi['name'] == 'MinMax':
pp =[preprocessing.MinMaxScaler(feature_range=(-1.0,1.0)), # for temperature
preprocessing.MinMaxScaler(feature_range=(-1.0,1.0))] # and humidity
elif ppi['name'] == 'MaxAbs':
pp =[preprocessing.MaxAbsScaler(), # for temperature
preprocessing.MaxAbsScaler()] # and humidity
elif ppi['name'] == 'StandardScaler':
pp =[preprocessing.StandardScaler(), # for temperature
preprocessing.StandardScaler()] # and humidity
elif ppi['name'] == 'RobustScaler':
pp =[preprocessing.RobustScaler(), # for temperature
preprocessing.RobustScaler()] # and humidity
elif ppi['name'] == 'SimpleY':
pp =[15.,10.] # for temperature
else:
ValueError('Incorrect scaler name')
#Initialize scalers with data
if ppi['method'] == 'individually':
pp[0].fit(unpack(raw_data,'T'))
pp[1].fit(unpack(raw_data,'q'))
elif ppi['method'] == 'alltogether':
pp[0].fit(np.reshape(unpack(raw_data,'T'), (-1,1)))
pp[1].fit(np.reshape(unpack(raw_data,'q'), (-1,1)))
elif ppi['method'] == 'qTindividually':
if ppi['name'] != 'SimpleY':
pp = pp[0]
pp.fit(raw_data)
else:
raise ValueError('Incorrect scaler method')
return pp
# Transform data using initialized scaler
def transform_data(ppi, pp, raw_data):
"""Scales data before ML algorithm"""
if ppi['method'] == 'individually':
T_data = pp[0].transform(unpack(raw_data,'T'))
q_data = pp[1].transform(unpack(raw_data,'q'))
elif ppi['method'] == 'alltogether':
T_data = pp[0].transform(np.reshape(unpack(raw_data,'T'), (-1,1)))
q_data = pp[1].transform(np.reshape(unpack(raw_data,'q'), (-1,1)))
# Return to original shape (N_samples x N_features) rather than (N_s*N_f x 1)
shp = unpack(raw_data,'T').shape
T_data = np.reshape(T_data, shp)
q_data = np.reshape(q_data, shp)
elif ppi['method'] == 'qTindividually':
if ppi['name'] == 'SimpleY':
T_data = unpack(raw_data, 'T')/pp[0]
q_data = unpack(raw_data, 'q')/pp[1]
else:
all_data = pp.transform(raw_data)
T_data = unpack(all_data, 'T')
q_data = unpack(all_data, 'q')
else:
print('Given method is ' + ppi['method'])
raise ValueError('Incorrect scaler method')
# Return single transformed array as output
return pack(T_data, q_data)
# Apply inverse transformation to unscale data
def inverse_transform_data(ppi, pp, trans_data):
"""Reverse transform_data operation"""
if ppi['method'] == 'individually':
T_data = pp[0].inverse_transform(unpack(trans_data,'T'))
q_data = pp[1].inverse_transform(unpack(trans_data,'q'))
elif ppi['method'] == 'alltogether':
T_data = pp[0].inverse_transform(np.reshape(unpack(trans_data,'T'), (-1,1)))
q_data = pp[1].inverse_transform(np.reshape(unpack(trans_data,'q'), (-1,1)))
# Return to original shape (N_samples x N_features) rather than (N_s*N_f x 1)
shp = unpack(trans_data,'T').shape
T_data = np.reshape(T_data, shp)
q_data = np.reshape(q_data, shp)
elif ppi['method'] == 'qTindividually':
if ppi['name'] == 'SimpleY':
T_data = unpack(trans_data,'T') * pp[0]
q_data = unpack(trans_data,'q') * pp[1]
else:
all_data = pp.inverse_transform(trans_data)
T_data = unpack(all_data, 'T')
q_data = unpack(all_data, 'q')
else:
raise ValueError('Incorrect scaler method')
# Return single transformed array as output
return pack(T_data, q_data)
# Define preprocessing method to use
x_ppi={'name':'StandardScaler','method':'qTindividually'}
y_ppi={'name':'SimpleY' ,'method':'qTindividually'}
# Apply preprocessing to input data
x_pp = init_pp(x_ppi, x_orig)
x = transform_data(x_ppi, x_pp, x_orig)
# Apply preprocessing to output data
y_pp = init_pp(y_ppi, y_orig)
y = transform_data(y_ppi, y_pp, y_orig)
# Make preprocessor string for saving
pp_str = 'X-' + x_ppi['name'] + '-' + x_ppi['method'][:6] + '_'
pp_str = pp_str + 'Y-' + y_ppi['name'] + '-' + y_ppi['method'][:6] + '_'
```
#### Show the input data and define helper script for plotting data
```
def _plot_distribution(z, lat, lev, fig, ax, titlestr,
xl=None, xu=None, bins=None):
"""Plots a stack of histograms of log10(data) at all levels"""
# Initialize the bins and the frequency
num_bins = 100
if bins is None:
bins = np.linspace(np.amin(z), np.amax(z), num_bins+1)
n = np.zeros((num_bins, lev.size))
# Calculate distribution at each level
for i in range(lev.size):
n[:,i], _ = np.histogram(z[:,i], bins=bins)
bins1=bins[:-1]
# Take a logarithm and deal with case where we take log of 0
n = np.log10(n)
n_small = np.amin(n[np.isfinite(n)])
n[np.isinf(n)] = n_small
# Plot histogram
ca = ax.contourf(bins[:-1], lev, n.T)
ax.set_ylim(1,0)
if xl is not None:
ax.set_xlim(xl,xu)
plt.colorbar(ca, ax=ax)
ax.set_ylabel(r'$\sigma$')
ax.set_title(titlestr)
xl,xr = ax.set_xlim()
return xl, xr, bins
# Show how preprocessing scales the input data
fig, ax = plt.subplots(2, 2)
_, _, _ = _plot_distribution(unpack(x_orig, 'T'), lat, lev, fig, ax[0,0],
'Temperature (unscaled) [K]')
_, _, _ = _plot_distribution(unpack(x, 'T'), lat, lev, fig, ax[0,1],
'Temperature (scaled) []')
_, _, _ = _plot_distribution(unpack(x_orig, 'q'), lat, lev, fig, ax[1,0],
'Humidity (unscaled) [g/kg]')
_, _, _ = _plot_distribution(unpack(x, 'q'), lat, lev, fig, ax[1,1],
'Humidity (scaled) []')
fig.suptitle('Distributions of Raw and Preprocessed Inputs', fontsize=20)
```
#### Preprocessing output values
```
fig, ax = plt.subplots(2, 2)
_, _, _ = _plot_distribution(unpack(y_orig, 'T'), lat, lev, fig, ax[0,0],
'Temperature (unscaled) [K]')
_, _, _ = _plot_distribution(unpack(y, 'T'), lat, lev, fig, ax[0,1],
'Temperature (scaled) []')
_, _, _ = _plot_distribution(unpack(y_orig, 'q'), lat, lev, fig, ax[1,0],
'Humidity (unscaled) [g/kg]')
_, _, _ = _plot_distribution(unpack(y, 'q'), lat, lev, fig, ax[1,1],
'Humidity (scaled) []')
fig.suptitle('Distributions of Raw and Preprocessed Outputs', fontsize=20)
```
## Build Neural Network
#### Functions to build NN
```
def store_stats(i, avg_train_error, best_train_error, avg_valid_error,
best_valid_error,**_):
if i==1:
global errors_stored
errors_stored = []
errors_stored.append((avg_train_error, best_train_error, avg_valid_error,
best_valid_error))
def build_nn(method, actv_fnc, hid_neur, learning_rule, pp_str,
batch_size=100, n_iter=None, n_stable=None,
learning_rate=0.01, learning_momentum=0.9,
regularize='L2', weight_decay=0.0, valid_size=0.5,
f_stable=.001):
"""Builds a multi-layer perceptron via the scikit neural network interface"""
# First build layers
layers = [sknn.mlp.Layer(f,units=h) for f,h in zip(actv_fnc,hid_neur)]
# Append a linear output layer
layers.append(sknn.mlp.Layer("Linear"))
mlp = sknn.mlp.Regressor(layers, n_iter=n_iter, batch_size=batch_size,
learning_rule=learning_rule, learning_rate=learning_rate,
learning_momentum=learning_momentum, regularize=regularize,
weight_decay=weight_decay, n_stable=n_stable, valid_size=valid_size,
f_stable=f_stable, callback={'on_epoch_finish': store_stats})
# Create a name for the neural network
# First build names of each layer
layerstr = '_'.join([str(h) + f[0] for h, f in zip(hid_neur, actv_fnc)])
# Get str of appropriate learning rate
if learning_rule == 'momentum':
lrn_str = str(learning_momentum)
else:
lrn_str = str(learning_rate)
# Construct name
mlp_str = pp_str + method[0] + "_" + layerstr + "_" + \
learning_rule[0:3] + lrn_str
# If using regularization, add that to the name too
if weight_decay > 0.0:
mlp_str = mlp_str + 'reg' + str(weight_decay)
# Add the number of iterations too
mlp_str = mlp_str + '_Niter' + str(n_iter)
return mlp, mlp_str
```
#### Build the NN
```
actv_fnc = ['Rectifier', 'Rectifier']
hid_neur = [50, 25]
learning_rule='momentum'
n_iter = 100
r_mlp, r_str = build_nn('regress', actv_fnc, hid_neur, learning_rule,
pp_str, n_iter=n_iter, learning_momentum=0.9,
regularize='L2', weight_decay=1e-4)
```
## Train the Neural Network
```
def train_nn(mlp,mlp_str,x,y, w=None):
"""Train each item in a list of multi-layer perceptrons and then score
on test data. Expects that mlp is a list of MLP objects"""
# Initialize
start = time.time()
# Train the model using training data
mlp.fit(x, y, w)
train_score = mlp.score(x, y)
end = time.time()
print("Training Score: {:.4f} for Model {:s} ({:.1f} seconds)".format(
train_score, mlp_str, end-start))
# This is an N_iter x 4 array...see score_stats
errors = np.asarray(errors_stored)
# Return the fitted models and the scores
return mlp, errors
# Train neural network
r_mlp, r_errors = train_nn(r_mlp, r_str, x, y)
# Save neural network
pickle.dump([r_mlp, r_str, r_errors, x_ppi, y_ppi, x_pp, y_pp, lat, lev, dlev],
open('data/regressors/' + r_str + '.pkl', 'wb'))
```
# Evaluate the NN
Load a more fully trained neural network as well as new (different) data for testing purposes
Also load validation data
```
# Define a stored neural net to load
r_str = 'X-StandardScaler-qTindi_Y-SimpleY-qTindi_r_60R_60R_mom0.9reg1e-05'
# Load the NN
r_mlp_eval, _, errors, x_ppi, y_ppi, x_pp, y_pp, lat, lev, dlev = \
pickle.load(open('./data/regressors/' + r_str + '.pkl', 'rb'))
# Open the ***VALIDATION*** data set
datasource='./data/convection_50day_validation.pkl'
x_unscl, ytrue_unscl, _,_,_,_,_,_ = nnload.loaddata(datasource, minlev=min(lev))
# Scale data using input scalers
x_scl = nnload.transform_data(x_ppi, x_pp, x_unscl)
ytrue_scl = nnload.transform_data(y_ppi, y_pp, ytrue_unscl)
# Apply neural network to get predicted output
ypred_scl = r_mlp_eval.predict(x_scl)
ypred_unscl = nnload.inverse_transform_data(y_ppi, y_pp, ypred_scl)
```
## Plot error history as a function of iteration number
```
# plot_model_error_over_time(errors, r_str, figpath)
x = np.arange(errors.shape[0])
ytix = [.1e-3, .4e-3, .5e-3, 1e-3, 2e-3, 4e-3, 5e-3,
10e-3, 20e-3, 40e-3, 50e-3, 500e-3, 4]
# Plot error rate vs. iteration number
fig=plt.figure()
# Plot training errors
plt.semilogy(x, np.squeeze(errors[:,0]), alpha=0.5,color='blue',label='Training')
plt.semilogy(x, np.squeeze(errors[:,1]), alpha=0.5,color='blue')
plt.yticks(ytix,ytix)
plt.ylim((np.nanmin(errors), np.nanmax(errors)))
# Use a logarithmic y-axis
plt.semilogy(x, np.squeeze(errors[:,2]), alpha=0.5,label='Testing',color='green')
plt.semilogy(x, np.squeeze(errors[:,3]), alpha=0.5,color='green')
plt.legend()
plt.title('Error for ' + r_str)
plt.xlabel('Iteration Number')
```
## Plot Mean Statistics
```
def do_mean_or_std(method, vari, true, pred, lev, ind):
out_str_dict = {'T':'K/day','q':'g/kg/day'}
methods = {'mean':np.mean,'std':np.std}
plt.subplot(2,2,ind)
m = lambda x: methods[method](unpack(x,vari), axis=0).T
plt.plot(m(true), lev, label='true')
plt.plot(m(pred), lev, label='pred')
plt.ylim(np.amax(lev),np.amin(lev))
plt.ylabel('$\sigma$')
plt.xlabel(out_str_dict[vari])
plt.title(vari + " " + method)
plt.legend()
def plot_means_stds(y3_true, y3_pred, lev):
fig = plt.figure()
do_mean_or_std('mean','T',y3_true,y3_pred, lev, 1)
do_mean_or_std('mean','q',y3_true,y3_pred, lev, 2)
do_mean_or_std('std','T',y3_true,y3_pred, lev, 3)
do_mean_or_std('std','q',y3_true,y3_pred, lev, 4)
def plot_pearsonr(y_true, y_pred, vari, lev, label=None):
r = np.empty(y_true.shape[1])
prob = np.empty(y_true.shape[1])
for i in range(y_true.shape[1]):
r[i], prob[i] = scipy.stats.pearsonr(y_true[:,i],y_pred[:,i])
plt.plot(unpack(r,vari, axis=0), lev, label=label)
plt.ylim([np.amax(lev), np.amin(lev)])
plt.ylabel('$\sigma$')
plt.title('Correlation Coefficient')
# Plot means and standard deviations
plot_means_stds(ytrue_unscl, ypred_unscl, lev)
# Plot correlation coefficient versus height
fig = plt.figure()
plot_pearsonr(ytrue_unscl, ypred_unscl, 'T', lev, label=r'$\Delta$Temperature')
plot_pearsonr(ytrue_unscl, ypred_unscl, 'q', lev, label=r'$\Delta$Humidity')
plt.legend(loc="upper left")
```
## Check the distribution of output values
```
# Plot histogram showing how well true and predicted values match
def check_output_distribution(yt_unscl, yt_scl, yp_unscl, yp_scl, lat, lev):
# For unscaled variables
fig, ax = plt.subplots(2, 2)
x1, x2, bins = _plot_distribution(unpack(ypred_unscl,'T'), lat, lev, fig, ax[0,1],
r'Pred. $\Delta$Temp [K/day]')
_ , _ , _ = _plot_distribution(unpack(ytrue_unscl,'T'), lat, lev, fig, ax[0,0],
r'True $\Delta$Temp [K/day]', x1, x2, bins)
x1, x2, bins = _plot_distribution(unpack(ypred_unscl,'q'), lat, lev, fig, ax[1,1],
r'Pred. $\Delta$Humidity [g/kg/day]')
_ , _ , _ = _plot_distribution(unpack(ytrue_unscl,'q'), lat, lev, fig, ax[1,0],
r'True $\Delta$Humidity [g/kg/day]', x1, x2, bins)
check_output_distribution(ytrue_unscl, ytrue_scl, ypred_unscl, ypred_scl,
lat, lev)
```
## Make a scatter plot of precipitation
```
def calc_precip(y, dlev):
y = unpack(y,'q')
y = y / 1000. # kg/kg/day
return vertical_integral(y, dlev) #mm/day
def vertical_integral(data, dlev):
g = 9.8 #m/s2
data = -1./g * np.sum(data * dlev[:,None].T, axis=1)*1e5
return data
# Plot a scatter plot of true vs predicted precip
P_true = calc_precip(ytrue_unscl, dlev)
P_pred = calc_precip(ypred_unscl, dlev)
# Plot data
plt.scatter(P_true, P_pred, s=5, alpha=0.25)
# Calcualte mins and maxs and set axis bounds appropriately
xmin = np.min(P_true)
xmax = np.max(P_true)
ymin = np.min(P_pred)
ymax = np.max(P_pred)
xymin = np.min([xmin,ymin])
xymax = np.max([xmax,ymax])
# Plot 1-1 line
plt.plot([xymin,xymax], [xymin, xymax], color='k', ls='--')
plt.xlim(xymin, xymax)
plt.ylim(xymin, xymax)
plt.xlabel('True')
plt.ylabel('Predicted')
Plessthan0 = sum(P_pred < 0.0)
Plessthan0pct = 100.*Plessthan0/len(P_pred)
plt.text(0.01,0.95,"Pred. P<0 {:.1f}% of time".format(Plessthan0pct),
transform=plt.gca().transAxes)
```
## Some examples of the NN in action
```
def plot_sample_profile(y_true, y_pred, lev):
"""Plots the vertical profiles of input T & q and
predicted and true output tendencies"""
f, (ax1, ax3) = plt.subplots(1,2)
T = nnload.unpack(x, 'T', axis=0)
q = nnload.unpack(x, 'q', axis=0)
# Plot temperature tendencies
ax1.plot(unpack(y_true, 'T', axis=0), lev, color='red' ,
ls='-' , label=r'$\Delta$T true')
ax1.plot(unpack(y_pred, 'T', axis=0), lev, color='red' ,
ls='--', label=r'$\Delta$T pred')
ax1.set_xlabel(r'$\Delta$Temperature [K/day]')
# Plot humidity tendencies
ax3.plot(unpack(y_true, 'q', axis=0), lev, color='blue',
ls='-' , label=r'$\Delta$q true')
ax3.plot(unpack(y_pred, 'q', axis=0), lev, color='blue',
ls='--', label=r'$\Delta$q pred')
ax3.set_xlabel(r'$\Delta$Humidity [g/kg/day]')
# Set axis properties
for ax in [ax1, ax3]:
ax.set_ylim(1, 0.25)
ax.legend()
ax.grid(True)
fig.suptitle('Sample True and Predicted Outputs', fontsize=20)
samp = np.random.randint(0, x.shape[0])
print('Random sample index value: ' + str(samp))
plot_sample_profile(ytrue_unscl[samp,:], ypred_unscl[samp,:], lev)
```
| github_jupyter |
```
import importlib
import os
import json
import sys
import numpy as np
import torch
import torchvision
from PIL import Image
from models.gan_visualizer import GANVisualizer
from models.utils.utils import loadmodule, getLastCheckPoint, getVal, \
getNameAndPackage, parse_state_name
name = "hina"
module = "PGAN"
scale = None
iter = None
dir = "output_networks"
visualisation = importlib.import_module("visualization.np_visualizer")
checkPointDir = os.path.join(dir, name)
checkpointData = getLastCheckPoint(checkPointDir,
name,
scale=scale,
iter=iter)
if checkpointData is None:
raise FileNotFoundError(
"Not checkpoint found for model " + name + " at directory " + dir)
modelConfig, pathModel, _ = checkpointData
if scale is None:
_, scale, _ = parse_state_name(pathModel)
pathLoss = os.path.join(checkPointDir, name + "_losses.pkl")
pathOut = os.path.splitext(pathModel)[0] + "_fullavg.jpg"
packageStr, modelTypeStr = getNameAndPackage(module)
modelType = loadmodule(packageStr, modelTypeStr)
exportMask = module in ["PPGAN"]
visualizer = GANVisualizer(
pathModel, modelConfig, modelType, visualisation)
```
## Generate
```
batch_size = 49
z, _ = visualizer.model.buildNoiseData(batch_size)
z.shape
from copy import deepcopy
t = 0.3
fz = deepcopy(z[25].unsqueeze(0))*t + z[2].unsqueeze(0)*(1-t)
# fz[0,290] = 2.7
img = visualizer.model.test(z, getAvG=True)
fav = visualizer.model.test(fz, getAvG=True)
grid = torchvision.utils.make_grid(img, nrow=7, normalize=True).numpy().transpose(1,2,0)
fav = torchvision.utils.make_grid(fav, nrow=1, normalize=True).numpy().transpose(1,2,0)
Image.fromarray((grid*255).astype(np.uint8))
import matplotlib.pyplot as plt
# Image.fromarray((fav*255).astype(np.uint8)).save("fav6.jpg")
plt.imshow(fav)
np.save("fav6.npy", fz.cpu().numpy())
```
## Interpolate
```
fz1 = torch.tensor(np.load("fav1.npy")).cuda()
fz2 = torch.tensor(np.load("fav6.npy")).cuda()
pathOut = os.path.splitext(pathModel)[0] + "_interpolations"
os.makedirs(pathOut, exist_ok=True)
interpolationVectors = torch.cat([fz1, fz2])
nImgs = interpolationVectors.size(0)
for img in range(nImgs):
indexNext = (img + 1) % nImgs
path = os.path.join(pathOut, str(img) + "_" + str(indexNext))
if not os.path.isdir(path):
os.mkdir(path)
path = os.path.join(path, "")
visualizer.saveInterpolation(
100, interpolationVectors[img],
interpolationVectors[indexNext], path)
```
## Mofing Video
```
import cv2
img_dir = "./output_networks/hina/hina_s4_i96000_interpolations/0_1/"
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('mofing.avi', fourcc, 15.0, (64,64))
for fname in sorted(os.listdir(img_dir)):
fpath = os.path.join(img_dir, fname)
frame = cv2.imread(fpath)
# write the flipped frame
out.write(frame)
# Release everything if job is finished
out.release()
print(1)
```
| github_jupyter |
# Using WMLA Elastic Distributed Training via API - a sample notebook
<div class="alert alert-block alert-info">
### Contents
- [The big picture](#The-big-picture)
- [Changes to your code](#Changes-to-your-code)
- [Making dataset available](#Making-dataset-available)
- [Set up API end point and log on](#Set-up-API-end-point-and-log-on)
- [Submit job via API](#Submit-job-via-API)
- [Monitor running job](#Monitor-running-job)
- [Retrieve output and saved models](#Retrieve-output-and-saved-models)
- [Output - Retrieve training output](#Output:--Retrieve-Training-Metric)
- [Save Models](#Save-Model)
- [Debugging any issues](#Debugging-any-issues)
- [Further information and useful links](#Further-information-and-useful-links)
- [Appendix](#Appendix)
</div>
<div class="alert alert-block alert-danger">
Final things to do:
- add link to Learning Journey blog in sections: "The big picture" and "Changes to your code"
</div>
## The big picture
[Back to top](#Contents)
This notebook details the process of taking your existing model training code and making the changes required to run the code using [IBM Watson Machine Learning](https://developer.ibm.com/linuxonpower/deep-learning-powerai/powerai-enterprise/) (WMLA) using Elastic Distributed Training.
<span style='color:deeppink'>**TODO:** Link to Learning Journey Blog</span>
The image below shows the various elements required to use EDT. In this notebook we will step through each of these elements in more detail. Through this process you will offload your code to a WMLA cluster, monitor the running job, retrieve the output and debug any issues seen. A [static version](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/5_running_job.png) is also available.

## Changes to your code
[Back to top](#Contents)
In this section we will take existing sample code and make the relevant changes required for use with EDT. An overview of these changes can be seen in the diagram below. A [static version](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/2_code_adaptations.png) is also available.

The key changes to your code in order to use EDT are the following:
- Importing libraries and setting up environment variables
- Data loading function for EDT
- Extract parameters for training
- Replace training and testing loops with EDT equivalents
For the purpose of this tutorial we have adapted the following Mnist model for use with EDT: https://github.com/pytorch/examples/blob/master/mnist/main.py
Please see the blog associated with this notebook with more detailed explanation of the above changes.
<span style='color:deeppink'>**TODO:** Add link to Learning Journey blog in line above</span>
You can find the original code `pytorch_mnist.py` and the updated code `pytorch_mnist_EDT.py` in the zip file `pytorch_edt.tar.gz` contained in the [tutorial repository](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/elastic-distributed-training-module/pytorch-mnist-edt-model.zip).
You can run the following command to observe relative changes:
`diff -U4 pytorch_mnist.py pytorch_mnist_EDT.py`
Your modified code should be made available in a directory which also contains the EDT helper scripts: `edtcallback.py`, `emetrics.py` and `elog.py`. Sample versions can be found in the tarball in the tutorial repository; additionally they can be downloaded from http://ibm.biz/WMLA-samples.
## Making dataset available
[Back to top](#Contents)
Next we will make our dataset available to the WMLA cluster as seen in the diagram below.

1. Ask your system admin the path of $DLI_DATA_FS directory
2. ssh to WMLA-server and get access to $DLI_DATA_FS
3. Download dataset
```
[WMLA-server dlidata]# wget https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/elastic-distributed-training-module/pytorch-mnist-dataset.zip
--2020-05-12 22:27:07-- https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/elastic-distributed-training-module/pytorch-mnist-dataset.zip
Resolving github.com (github.com)... 140.82.114.3
Connecting to github.com (github.com)|140.82.114.3|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://raw.githubusercontent.com/IBM/wmla-assets/master/WMLA-learning-journey/elastic-distributed-training-module/pytorch-mnist-dataset.zip [following]
--2020-05-12 22:27:07-- https://raw.githubusercontent.com/IBM/wmla-assets/master/WMLA-learning-journey/elastic-distributed-training-module/pytorch-mnist-dataset.zip
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 199.232.36.133
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|199.232.36.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 23006288 (22M) [application/zip]
Saving to: ‘pytorch-mnist-dataset.zip’
100%[=============================================================================================================================================>] 23,006,288 67.4MB/s in 0.3s
2020-05-12 22:27:08 (67.4 MB/s) - ‘pytorch-mnist-dataset.zip’ saved [23006288/23006288]
[WMLA-server dlidata]# unzip pytorch-mnist-dataset.zip
```
4. Unzip the zip file and modify file owner/group, that is equivalent to Instance Group Execution User (in this case it is egoadmin)
```
[WMLA-server dlidata]# chown -R egoadmin:egoadmin pytorch-mnist/
[WMLA-server hymenopteradata]# pwd
/dlidata/pytorch-mnist
[WMLA-server hymenopteradata]# ls -lt
total 0
drwxr-x--- 4 egoadmin egoadmin 34 Jan 7 23:54 MNIST
```
5. Take note of the path name of this dataset, note that your path will likely be different that seen here.
```
/dlidata/pytorch-mnist
```
## Set up API end point and log on
[Back to top](#Contents)
In this section we set up the API endpoint which will be used in this notebook.
1. Source the environment
$EGO_TOP is the installation directory. The default value is /opt/ibm/spectrumcomputing
```
. $EGO_TOP/profile.platform
```
2. Login
```
egosh user logon -u <wmla_user>
Logged on successfully
```
3. Retrieve Conductor Rest API Port
```
egosh client view |grep -A 3 ASCD_REST_BASE_URL_1
CLIENT NAME: ASCD_REST_BASE_URL_1
DESCRIPTION: http://<WMLA-server>:8280/platform/rest/
```
4. Retrieve DLI (Deep Learning Impact) Rest API Port
```
egosh client view |grep -A 3 DLPD_REST_BASE_URL_1
CLIENT NAME: DLPD_REST_BASE_URL_1
DESCRIPTION: http://<WMLA-server>:9280/platform/rest/
```
5. Note that the port numbers in your URL will depend on whether SSL has been enabled or not
## Submit job via API
[Back to top](#Contents)
Now we need to structure our API job submission. There are various elements to this process as seen in the diagram below. Note that **this** jupyter notebook is the one referred to below. A [static version](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/4_api_setup.png) is also available.

The following sections use the Watson ML Accelerator API to complete the various tasks required.
We've given examples of a number of tasks but you should refer to the documentation at to see more details
of what is possible and sample output you might expect.
- https://www.ibm.com/support/knowledgecenter/SSFHA8_1.2.1/cm/deeplearning.html
- https://www.ibm.com/support/knowledgecenter/SSZU2E_2.4.0/reference_s/api_references.html
```
import requests, json
import pandas as pd
import datetime
# from IPython.display import display
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
# plt.rcParams['figure.figsize'] = [24, 8.0]
import seaborn as sns
pd.set_option('display.max_columns', 999)
pd.set_option('max_colwidth', 300)
import tarfile
import tempfile
import os
#Construct API call
master_host = 'dse-ac922h.cpolab.ibm.com'
dli_rest_port = '9243' #Deep Learning Impact Rest API Port
sc_rest_port = '8643' #Conductor Rest API Port
sc_rest_url = 'https://'+master_host+':'+sc_rest_port+'/platform/rest/conductor/v1'
dl_rest_url = 'https://'+master_host+':'+dli_rest_port+'/platform/rest/deeplearning/v1'
# User login details
wmla_user = 'dse_user'
wmla_pwd = 'cpd4ever'
myauth = (wmla_user, wmla_pwd)
# Instance Group to be used
sig_name = 'SIG-DSE-WSL-EDT'
#sig_name = 'SIG-EDT-Kelvin'
# REST call variables
headers = {'Accept': 'application/json'}
print (sc_rest_url)
print (dl_rest_url)
# Model Path
#model_path = '<path_to_your_local_model_directory>'
model_path = '/Users/kelvinlui/Github/wmla-assets/dli-learning-path/datasets/pytorch-mnist-edt-model'
```
### Package model files for training
Package the updated model files into a tar file ending with `.modelDir.tar`
```
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
MODEL_DIR_SUFFIX = ".modelDir.tar"
tempFile = tempfile.mktemp(MODEL_DIR_SUFFIX)
make_tarfile(tempFile, model_path)
print(" tempFile: " + tempFile)
files = {'file': open(tempFile, 'rb')}
```
### Log on
Obtain login session tokens to be used for session authentication within the RESTful API. Tokens are valid for 8 hours.
```
r = requests.get(sc_rest_url+'/auth/logon', verify=False, auth=myauth, headers=headers)
if r.ok:
print ('\nLogon succeeded')
else:
print('\nLogon failed with code={}, {}'. format(r.status_code, r.content))
```
### Check DL Frameworks details
Check what framework plugins are available and see example execution commands. In this demonstration we will use **edtPyTorch**
```
r = requests.get(dl_rest_url+'/execs/frameworks', auth=myauth, headers=headers, verify=False).json()
# Using the raw json, easier to see the examples given
print(json.dumps(r, indent=4))
```
### Arguments for API call
Equivalent of flags used if running command directly on WMLA CLI, including:
```
framework_name = 'edtPyTorch' # DL Framework to use, from list given above
dataset_location = 'pytorch-mnist' # relative path of your data set under $DLI_DATA_FS
local_dir_containing_your_code = 'pytorch-mnist-edt-model'
number_of_GPU = '4' # number of GPUs for elastic distribution
name_of_your_code_file = 'pytorch_mnist_EDT.py' # Main model file as opened locally above
args = '--exec-start {} \
--cs-datastore-meta type=fs,data_path={} \
--model-dir {} \
--edt-options maxWorkers={} \
--model-main {} \
--epochs 10 \
'.format(framework_name, dataset_location, local_dir_containing_your_code, number_of_GPU, name_of_your_code_file)
print ("args: " + args)
```
### Submit Job
```
r = requests.post(dl_rest_url+'/execs?sigName='+sig_name+'&args='+args, files=files,
auth=myauth, headers=headers, verify=False)
if r.ok:
exec_id = r.json()['id']
sig_id = r.json()['sigId']
driver_id = r.json()['submissionId']
print ('\nModel submitted successfully \Driver ID: {}'.format(driver_id))
print ('Exec ID: {}'.format(exec_id))
print ('SIG ID: {}'.format(sig_id))
else:
print('\nModel submission failed with code={}, {}'. format(r.status_code, r.content))
```
## Monitor running job
[Back to top](#Contents)
Once the job is submitted successfully we can monitor the running job.
```
# Check status of all RUNNING jobs in SIG (rerun cell to refresh)
monitor = []
monitor_output = []
r = requests.get(sc_rest_url+'/instances/'+sig_id+'/applications?state=RUNNING',
auth=myauth, headers=headers, verify=False).json()
if (len(r) == 0):
print ('No jobs running')
else:
# Filter out the relevant information
monitor.append([(
job['driver']['id'],
job['driver']['state'],
job['apprunduration'],
job['gpuslots'],
job['gpumemused']['total'],
job['gpudevutil']['total'],
) for job in r])
monitor_output = pd.DataFrame([item for monitor in monitor for item in monitor])
monitor_output.columns = [
'Driver ID',
'State',
'Run duration (mins)',
'GPU slots',
'Total GPU memory used',
'Total GPU utilsation (%) ',
]
for job in r:
executors = job['executors']
monitor_output
```
## Retrieve output and saved models
[Back to top](#Contents)
After the job completes then we can retrieve the output, logs and saved models.
### Output: Retrieve Training Metric
```
print ("exec_id : " + str(exec_id))
r = requests.get(dl_rest_url+'/execs/'+exec_id+'/log', auth=myauth, headers=headers, verify=False).json()
from io import StringIO
cols = ['timestamp','global_steps','iteration','loss','accuracy']
final_data = pd.read_csv(StringIO(r.replace(':',',')),
usecols=[4,6,8,10,12],
names=cols)
final_data['timestamp2'] = final_data.timestamp.apply(pd.to_datetime, unit='ms')
final_data.head(10)
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
# plt.rcParams['figure.figsize'] = [24, 8.0]
import seaborn as sns
fig, axes = plt.subplots(2, 2, figsize = (15,10))
sns.lineplot(final_data.timestamp2,final_data.global_steps, color="g", ax=axes[0,0])
axes[0,0].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
axes[0,0].set_xlabel("Time")
sns.lineplot(final_data.global_steps,final_data.loss, color="r", ax=axes[0,1])
sns.lineplot(final_data.global_steps,final_data.accuracy, color="b", ax=axes[1,0])
axes[1,1].axes.get_xaxis().set_visible(False)
axes[1,1].axes.get_yaxis().set_visible(False)
axes[1,1].text(0.05, 0.8, 'ID: '+exec_id, size=14)
axes[1,1].text(0.05, 0.65, 'SIG: '+sig_name, size=14)
#axes[1,1].text(0.05, 0.55, 'Status: '+status, size=14)
plt.show()
```
### Save Model
```
# Get model from training job - downloads zip file (with progress bar) of saved model to directory local to this notebook
# (note that you need to save model in your code using the environment variable for location)
import requests, zipfile, io
from tqdm.notebook import tqdm
#r = requests.get(dl_rest_url+'/execs/'+exec_id+'/log', auth=myauth, headers=headers, verify=False).json()
r = requests.get(dl_rest_url+'/execs/'+exec_id+'/result', auth=myauth, headers=headers,stream=True, verify=False)
print ("r: ", str(r))
total_size = int(r.headers.get('Content-Disposition').split('size=')[1])
block_size = 1024 #1 Kibibyte
t=tqdm(total=total_size, unit='iB', unit_scale=True)
with open('model.zip', 'wb') as f:
for data in r.iter_content(block_size):
t.update(len(data))
f.write(data)
t.close()
```
## Debugging any issues
[Back to top](#Contents)
In the case where you have issues during the process detailed above, there are a number of detailed logs that you can view to understand what is happening on the WMLA cluster.
WMLA leverages Spark architecture for distributing Deep Learning/Machine Learning jobs. In Spark, when an item of processing has to be done, there is a “driver” process that is in charge of taking the user’s code and converting it into a set of multiple tasks. There are also “executor” processes, each operating on a separate node in the cluster, that are in charge of running the tasks, as delegated by the driver.
You can monitor Deep Learning/Machine Learning application activity, performance and resource usage in Driver Log & Executor Log.
- Driver Log captures issues related to dependencies and environment variable, for example, missing dataset or invalid execution parameter flags.
- Executor Log records Deep Learning/Machine Learning training process.
### Retrieve Training Driver Stdout Log
```
# Get Spectrum Conductor logs for training run - shows various information including environment variables
r = requests.get(sc_rest_url+'/instances/'+sig_id+'/applications/'+driver_id+'/logs/stdout/download',
auth=myauth, headers={'Accept': 'application/octet-stream'}, verify=False)
print(r.text)
```
Note that the output from the training can be found in the `$DLI_WORK_DIR` referenced in this log, in the directory *under* `batchworkdir`. The structure of the files contained in this directory (which you will access via the API) are the following.
```
$ tree -h
.
├── [ 6] checkpoint
├── [ 247] log
│ ├── [ 6] 0-97eb84d4-6e4b-4bb7-95e0-fc7bfda461dc.<wmla_server>
│ ├── [ 6] 1-a111dd6d-c406-48f2-89ce-1ef526d5b34b.<wmla_server>
│ └── [ 50] driver-20200227104231-0007-3655c5b5-5d81-43ac-a8c6-c243635f60df.<wmla_server>
│ ├── [ 19K] evaluation-metrics.txt
│ └── [7.1K] stdout
├── [ 19] model
│ └── [ 214] train
│ ├── [4.7K] model_epoch_10_optimizer_state.pth
│ ├── [ 43M] model_epoch_10.pth
│ ├── [4.7K] model_epoch_5_optimizer_state.pth
│ ├── [ 43M] model_epoch_5.pth
│ ├── [4.7K] model_epoch_final_optimizer_state.pth
│ └── [ 43M] model_epoch_final.pth
├── [ 25] _submitted_code
│ └── [ 133] pytorch_edt
│ ├── [1.6K] edtcallback.py
│ ├── [2.0K] elog.py
│ ├── [4.1K] emetrics.py
│ ├── [ 67] __pycache__
│ │ ├── [2.0K] edtcallback.cpython-36.pyc
│ │ └── [2.4K] elog.cpython-36.pyc
│ ├── [3.1K] pytorch_mnist_EDT.py
│ └── [4.4K] pytorch_mnist.py
└── [2.5K] val_dict_list.json
```
### Retrieve Training Driver Stderr Log
```
# shows various information including environment variables
r = requests.get(sc_rest_url+'/instances/'+sig_id+'/applications/'+driver_id+'/logs/stderr/download',
auth=myauth, headers={'Accept': 'application/octet-stream'}, verify=False)
print(r.text)
```
### Retrieve Training Executor Log
#### Retrieve Executor ID
- The deep learning training log per GPU is written in executor log
- Execute following code to retrieve list of executor ID
```
for key in executors:
print ('executors: ' + key['id'])
```
### Retrieve Executor Stdout log
- set the parameter executor_id
```
executor_id = '1-a5c6d934-df3d-4b3e-8a9d-87ee8d168910'
r = requests.get(sc_rest_url+'/instances/'+sig_id+'/applications/'+driver_id +'/'+executor_id+'/logs/stdout/download',
auth=myauth, headers={'Accept': 'application/octet-stream'}, verify=False)
print(r.text)
```
### Retrieve Executor Stderr log
- set the parameter executor_id
```
executor_id = '1-a5c6d934-df3d-4b3e-8a9d-87ee8d168910'
r = requests.get(sc_rest_url+'/instances/'+sig_id+'/applications/'+driver_id +'/'+executor_id+'/logs/stderr/download',
auth=myauth, headers={'Accept': 'application/octet-stream'}, verify=False)
print(r.text)
```
## Further information and useful links
[Back to top](#Contents)
**WMLA Introductory videos:**
- WMLA overview video (6 mins): http://ibm.biz/WMLA-video
- WMLA getting started (3 mins): http://ibm.biz/WMLA-start
- Overview of adapting your code for Elastic Distributed Training via API: [video](https://youtu.be/RnZtYNX6meM) | [PDF](docs/wmla_api_pieces.pdf) (screenshot below)
**Further WMLA information & documentation**
- [Power Developer Portal (WMLCE frameworks information)](https://developer.ibm.com/linuxonpower/deep-learning-powerai/releases/)
- WMLA tutorials: http://ibm.biz/WMLA-blog
- Knowledge Centre links:
- [IBM Watson Machine Learning Accelerator](https://www.ibm.com/support/knowledgecenter/SSFHA8)
- [IBM Spectrum Conductor](https://www.ibm.com/support/knowledgecenter/en/SSZU2E/product_welcome_conductorspark.html)
- [IBM Spectrum Conductor Deep Learning Impact](https://www.ibm.com/support/knowledgecenter/SSWQ2D)
**Further Power Systems information**
- [AI on Power Systems Redbook (PDF)](https://www.redbooks.ibm.com/redbooks/pdfs/sg248435.pdf) (deep dive technical information)
## Appendix
[Back to top](#Contents)
#### This notebook requires Watson ML Accelerator 1.2.1 + Interim Fix 527174 & 536919 to run.
For details please refer to following link:
https://www.ibm.com/support/knowledgecenter/en/SSFHA8_1.2.1/wmla_fix_pack.html
#### This is version 1.0 and its content is copyright of IBM. All rights reserved.
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/JavaScripts/Image/Hillshade.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Image/Hillshade.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Image/Hillshade.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
```
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
import math
# Hillshade example. This is a demonstration of computing
# a hillshade from terrain data and displaying multiple
# layers based on multiple view geometries. Hillshade
# creation is also provided by ee.Terrain.hillshade().
# Define a function to convert from degrees to radians.
def radians(img):
return img.toFloat().multiply(math.pi).divide(180)
# Define a function to compute a hillshade from terrain data
# for the given sun azimuth and elevation.
def hillshade(az, ze, slope, aspect):
# Convert angles to radians.
azimuth = radians(ee.Image(az))
zenith = radians(ee.Image(ze))
# Note that methods on images are needed to do the computation.
# i.e. JavaScript operators (e.g. +, -, /, *) do not work on images.
# The following implements:
# Hillshade = cos(Azimuth - Aspect) * sin(Slope) * sin(Zenith) + \
# cos(Zenith) * cos(Slope)
return azimuth.subtract(aspect).cos() \
.multiply(slope.sin()) \
.multiply(zenith.sin()) \
.add(
zenith.cos().multiply(slope.cos()))
# Compute terrain meaasures from the SRTM DEM.
terrain = ee.Algorithms.Terrain(ee.Image('CGIAR/SRTM90_V4'))
slope = radians(terrain.select('slope'))
aspect = radians(terrain.select('aspect'))
# For loops are needed for control-flow operations on client-side
# operations. Here Map.addLayer() is a client operation that needs
# to be performed in a for loop. In general, avoid for loops
# for any server-side operation.
Map.setCenter(-121.767, 46.852, 11)
for i in range(0, 360, 60):
Map.addLayer(hillshade(i, 60, slope, aspect), {}, i + ' deg')
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
| github_jupyter |
<a href="https://colab.research.google.com/github/GiselaCS/Mujeres_Digitales/blob/main/Clase6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
```
**CONTINUACION DE LOS EJEMPLOS DEL USO DE LISTA DE COMPRENSION**
---
*COMPRENSION DE CONJUNTOS*
La comprension es muy similar a la que veniamos manejando con Lista. La unica diferencia es que se usan los {}
Recordemos que los conjuntos tambien denonimados "sets", no permiten duplicados, es decir, cada elemento es unico. si se llega añadir un elemento repetido, no arroja error,
```
sa="ema ama a mama"
#para cada valor que se puede iterar en sa, lo itera si algun valor es igual a m
sa2={i for i in sa if i=="m" }
print(sa2)
```
Ahora, veamos a continuacion el caso de diccionarios de comprensión.
la unica diferencia que tiene respecto a las listas o conjuntos de compresion es la especificacon de llaves, o claves(key).
```
#aqui estan las claves
lista=['nombre', "apellido", "ciudad"]
#aqui el contenido o informacion
lista2=["ana", "Rodiguez", "popayan"]
#i identifica la llave, y la j el contenido, zip conjunto de los datos
dict={i:j for i, j in zip(lista,lista2)}
print(dict)
```
Pudimos observar que añadiendo :, asignamos un valor a la llave declarada, a su vez, usamos zip() el cual tiene, como funcion iterar dos listas en paralelo.
es decir como zip podemos convertir dos listas en un diccionario y el for se manejaria iterando los valores en simultaneo.
**OTRAS ESTRUCTURAS DE CONTROL**
---
1. Switch
2. Brak
3. Continue
4. Complemento de iteracion (Enumerate)
**1. Switch**
esta es una herramienta, que permite ejecutar diferentes secciones de codigo dependiendo de las condiciones impuestas. Su funcion es muy similar a usar If anidados o varios por separados al mismo tiempo.
sin embargo en python no hay un Switch directo.
**2. break **
Nos permite alterar el comportamiento de los ciclos de while y for, directamente su funcion es la de finalizar los ciclos.
```
x="Andrea"
for i in x:
if i == "e":
print("se obtuvo la letra e")
break
print(i)
x = 15
while True:
x -=1
print(x)
if x==0:
break
print("Se finaliza el ciclo")
```
```
#uso del break con los ciclos anidados
for i in range(1,8):
for j in range(1,8):
break
print(i,j)
#el break no afecta el for exterior, rompe el ciclo anidado en su conjunto, pero no afecta directamente el cilco exterior
```
** 2. Continue**
---
El continue se salta todo el código restante en la iteración actual y vuelve al principio en caso que queden elementos por iterar.
Diferencias entre break y continue: La printipal diferencia es que el continue NO rompe el ciclo si no que pasa a la iteración pendiente.
```
x= "Andrea"
for i in x:
if i=="r":
continue
print(i)
x= 15
while x>0:
x -= 1
if x==3:
continue
print(x)
```
** Iterar con enumerate**
---
python nos permite iterar colecciones de objetos y elementos de la misma forma.
sin embargo hay una opcion que es que en algunas situaciones que queramos acceder a un elemento ordenado podemos hacerlo, nombrando el indice de cada elemento llamado.
```
w=["sa", "se", "si", "so", "su"]
indice =0
for i in w:
print(indice, i)
indice +=1
w=["sa", "se", "si", "so", "su"]
for indice, i in enumerate(w):
print(indice,1)
x= list(enumerate(w))
print(x)
```
**Uso zip()**
---
si pasamos dos listas a zip, el resultado sera una tupla donde cada elemento tendra todos y cada uno de los elementos de las entradas.
```
a=[1,2,3]
b=["a", "b", "c"]
x=("q", "w", "e")
z=(10,20,30)
c=zip(a,b)
print(list(c))
for i, j, g, m in zip(a,b,x,z):
print("numero",i, "letras", j)
print(i,j,g,m)
#solo itera la misma cantidad de filas y columnas, es decir en este caso solo itera 2
a=[1,2,3]
b=["a", "b"]
for sa,se in zip(a,b):
print(sa,se)
#para separar por categoria
c=[(1,"a"), (2,"b"), (3,"c")]
a,b=zip(*c)
print(a)
print(b)
```
**Funciones**
---
la funcion nos permite ordenar de manera adecuada la generacion de objetos y bloques de codigo por ciclos y demas estructura.
la sintaxis es la siguiente:
def nombre_funcion(argumento):
codigo
nombre_funcion(argumento2)
```
def f(a):
return 2*a
y=f(3)
print(y)
```
Las funciones no sólo realizan una operación con sus entradas (argumentos de inicio), sino tambien parten de los siguientes principios:
1. Reusabilidad si tenemos un fragmento de código usado en muchos sitios, y se guardan en una función nos evitará tener códigos repetidos. Ademas de la conveniencia que tiene por ser facilmente editable.
2. Modularidad Permite legibilidad de forma mas sencilla, es decir, los largos bloques de código se pueden agrupar en modulos o funciones y la lectura va a ser mucho mas sencilla.
```
#Ejemplo 1: Función SIN argumentos de entrada ni parametros de salida:
def s():
print("hi gisela") # hasta aqui DECLARCIÓN DE LA FUNCIÓN sale chulito verde al ejecutarla
s() #Al cerrar la definición de función me va aparecer la ejecución del código
#Ejemplo 2: incluir argmentos de entrada
def s(apellido):
print("Hola gisela", apellido) #apelllido no tiene valor alguno
s("Criollo suarez") #cuando se cierrar la funcion le doy un valor de salida
#Ejemplo 3: Usando dos argumentos en la entrada y salida y operación aritmetica
def suma(a,b):
return a+b #para no hacer declaracion y creacion de objetos de variables se usa asi el return
suma(3,5)
#Ejemplo 4: Reusabilidad del código
suma(a=5, b=25) # se esta llamando la funcion suma
#Ejemplo 5: Argumentos fijos desde el inicio
def suma(a,b,c,d=1):
return a+b+c+d
suma(2,3,4)
suma(5,6,7,8)
def suma(a=2,b=3, c=4,d=5): #declarados desde el inicio
return a+b+c+d
suma()
suma(1) #lo cambia en a
suma(1,5) # cambia los valores de a y b
suma(a=1, c=5) #modifica valores a y c
#Ejemplo 6: Argumentos de longitud variable
def suma(number): #usa suma ejercicio anterior
total=0
for i in number:
total +=i
return total
suma([1,3,5,4])
```
*Nota*: Se realizó la suma de todos los números de entrada. para ello la declaración de los argumentos de salida sería como una lista.
La forma de sintaxis anterior es valida, sin embargo tenemos un solo argumento que es una lista de números.
Si declarramos un argumento con asterisco * esto hará que el argumento sea guardado como tupla de manera automática.
```
def suma (*number):
print(type(number))
total=0
for i in number:
total+=i
return total
suma(2,3,5,4)
```
| github_jupyter |
```
# Purpose: Perform Feature Selection using Mutual Information for several K-values, save the output
# Inputs: Imputed Dataset w/added Homelessness Indicators
# Outputs: Several Files named after the K-cutoff used for MI on each outcome of interest
# Machine: Laptop, Runtime 45mins x #of K-Values (1 K-Value takes ~45 minutes)
import numpy as np
import scipy.stats as sp
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import glob
import warnings
from sklearn.preprocessing import Imputer
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import mutual_info_regression
warnings.filterwarnings('ignore')
%matplotlib inline
dfx = pd.read_csv('../output/data_mean_imputed_Homeless_added.csv',index_col='challengeID')
dfy = pd.read_csv('../data/train.csv',index_col='challengeID')
outcomes = list(dfy.columns) #get the names of the outcomes
np.shape(dfx)
outcomes
full = dfx.join(dfy, how='outer') #connect the background data to outcomes
training = full.dropna(subset=outcomes, how='all') ##drop observations that have None of the outcomes
full_features = []
for k in [5,15,50,100,200,300,500,700,1000,1500,2000,3000,4000]:
## Selecting top K in GPA
gpa_x = training.dropna(subset=['gpa'], how='all')
gpa_y = gpa_x['gpa']
for outcome in outcomes:
del gpa_x[outcome]
X_gpa = SelectKBest(mutual_info_regression, k=k).fit_transform(gpa_x, gpa_y)
gpa_featuers = []
for col in X_gpa.T:
gpa_featuers.append(gpa_x.columns[(gpa_x.values == np.asarray(col)[:,None]).all(0)].tolist()[0])
## Selecting top K in Grit
grit_x = training.dropna(subset=['grit'], how='all')
grit_y = grit_x['grit']
for outcome in outcomes:
del grit_x[outcome]
X_grit = SelectKBest(mutual_info_regression, k=k).fit_transform(grit_x, grit_y)
grit_featuers = []
for col in X_grit.T:
grit_featuers.append(grit_x.columns[(grit_x.values == np.asarray(col)[:,None]).all(0)].tolist()[0])
## Selecting top K in MaterialHardship
materialHardship_x = training.dropna(subset=['materialHardship'], how='all')
materialHardship_y = materialHardship_x['materialHardship']
for outcome in outcomes:
del materialHardship_x[outcome]
X_materialHardship = SelectKBest(mutual_info_regression, k=k).fit_transform(materialHardship_x, materialHardship_y)
materialHardship_featuers = []
for col in X_materialHardship.T:
materialHardship_featuers.append(materialHardship_x.columns[(materialHardship_x.values == np.asarray(col)[:,None]).all(0)].tolist()[0])
## Selecting top K in Eviction
eviction_x = training.dropna(subset=['eviction'], how='all')
eviction_y = eviction_x['eviction']
for outcome in outcomes:
del eviction_x[outcome]
X_eviction = SelectKBest(mutual_info_classif, k=k).fit_transform(eviction_x, eviction_y)
eviction_featuers = []
for col in X_eviction.T:
eviction_featuers.append(eviction_x.columns[(eviction_x.values == np.asarray(col)[:,None]).all(0)].tolist()[0])
# Selecting top K in Layoff
layoff_x = training.dropna(subset=['layoff'], how='all')
layoff_y = layoff_x['layoff']
for outcome in outcomes:
del layoff_x[outcome]
X_layoff = SelectKBest(mutual_info_classif, k=k).fit_transform(layoff_x, layoff_y)
layoff_featuers = []
for col in X_layoff.T:
layoff_featuers.append(layoff_x.columns[(layoff_x.values == np.asarray(col)[:,None]).all(0)].tolist()[0])
# Selecting top K in JobTraining
jobTraining_x = training.dropna(subset=['jobTraining'], how='all')
jobTraining_y = jobTraining_x['jobTraining']
for outcome in outcomes:
del jobTraining_x[outcome]
X_jobTraining = SelectKBest(mutual_info_classif, k=k).fit_transform(jobTraining_x, jobTraining_y)
jobTraining_featuers = []
for col in X_jobTraining.T:
jobTraining_featuers.append(jobTraining_x.columns[(jobTraining_x.values == np.asarray(col)[:,None]).all(0)].tolist()[0])
# Combine Features
final_features = list(set(jobTraining_featuers+layoff_featuers+eviction_featuers+materialHardship_featuers+grit_featuers+gpa_featuers))
selected_df = full[final_features]
# Save CSV
selected_df.to_csv('../output/MI/data_univariate_feature_selection_'+str(k)+'.csv')
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import preprocessing
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
df= pd.read_csv("https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter05/Datasets/online_shoppers_intention.csv")
df.head()
df.info()
df.isnull().sum()
```
**Activity 5.01**
```
# Administrative duration vs Bounce Rate
x = df.iloc[:, [1, 6]].values
x.shape
wcss = []
for i in range(1, 11):
km = KMeans(n_clusters = i,
init = 'k-means++',
max_iter = 300,
n_init = 10,
random_state = 0,
algorithm = 'elkan',
tol = 0.001)
km.fit(x)
labels = km.labels_
wcss.append(km.inertia_)
plt.rcParams['figure.figsize'] = (15, 7)
plt.plot(range(1, 11), wcss)
plt.grid()
plt.tight_layout()
plt.title('The Elbow Method', fontsize = 20)
plt.xlabel('No. of Clusters')
plt.ylabel('wcss')
plt.show()
km = KMeans(n_clusters = 2, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
y_means = km.fit_predict(x)
plt.scatter(x[y_means == 0, 0], x[y_means == 0, 1], s = 100, c = 'pink', label = 'Un-interested Customers')
plt.scatter(x[y_means == 1, 0], x[y_means == 1, 1], s = 100, c = 'cyan', label = 'Target Customers')
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:, 1], s = 50, c = 'blue' , label = 'centeroid')
plt.title('Administrative Duration vs Bounce Rate', fontsize = 20)
plt.grid()
plt.xlabel('Administrative Duration')
plt.ylabel('Bounce Rates')
plt.legend()
plt.show()
# Administrative duration vs Exit Rate
x = df.iloc[:, [1, 7]].values
wcss = []
for i in range(1, 11):
km = KMeans(n_clusters = i,
init = 'k-means++',
max_iter = 300,
n_init = 10,
random_state = 0,
algorithm = 'elkan',
tol = 0.001)
km.fit(x)
labels = km.labels_
wcss.append(km.inertia_)
plt.rcParams['figure.figsize'] = (15, 7)
plt.plot(range(1, 11), wcss)
plt.grid()
plt.tight_layout()
plt.title('The Elbow Method', fontsize = 20)
plt.xlabel('No. of Clusters')
plt.ylabel('wcss')
plt.show()
km = KMeans(n_clusters = 2, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
y_means = km.fit_predict(x)
plt.scatter(x[y_means == 0, 0], x[y_means == 0, 1], s = 100, c = 'pink', label = 'Un-interested Customers')
plt.scatter(x[y_means == 1, 0], x[y_means == 1, 1], s = 100, c = 'yellow', label = 'Target Customers')
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:, 1], s = 50, c = 'blue' , label = 'centeroid')
plt.title('Administrative Duration vs Exit Rates', fontsize = 20)
plt.grid()
plt.xlabel('Administrative Duration')
plt.ylabel('Exit Rates')
plt.legend()
plt.show()
```
| github_jupyter |
# Risk and return recap
## Introduction
### Portfolio returns during the crisis
```
# Select portfolio asset prices for the middle of the crisis, 2008-2009
asset_prices = portfolio.loc['2008-01-01':'2009-12-31']
# Plot portfolio's asset prices during this time
asset_prices.plot().set_ylabel("Closing Prices, USD")
plt.show()
# Compute the portfolio's daily returns
asset_returns = asset_prices.pct_change()
portfolio_returns = asset_returns.dot(weights)
# Plot portfolio returns
portfolio_returns.plot().set_ylabel("Daily Return, %")
plt.show()
```
### Asset covariance and portfolio volatility
```
# Generate the covariance matrix from portfolio asset's returns
covariance = asset_returns.cov()
# Annualize the covariance using 252 trading days per year
covariance = covariance * 252
# Display the covariance matrix
print(covariance)
# Compute and display portfolio volatility for 2008 - 2009
portfolio_variance = np.transpose(weights) @ covariance @ weights
portfolio_volatility = np.sqrt(portfolio_variance)
print(portfolio_volatility)
# Calculate the 30-day rolling window of portfolio returns
returns_windowed = portfolio_returns.rolling(30)
# Compute the annualized volatility series
volatility_series = returns_windowed.std()*np.sqrt(252)
# Plot the portfolio volatility
volatility_series.plot().set_ylabel("Annualized Volatility, 30-day Window")
plt.show()
```
## Risk factors and the financial crisis
### Frequency resampling primer
```
# Convert daily returns to quarterly average returns
returns_q = returns.resample('Q').mean()
# Examine the beginning of the quarterly series
print(returns_q.head())
# Now convert daily returns to weekly minimum returns
returns_w = returns.resample('W').min()
# Examine the beginning of the weekly series
print(returns_w.head())
```
### Visualizing risk factor correlation
```
# Transform the daily portfolio_returns into quarterly average returns
portfolio_q_average = portfolio_returns.resample('Q').mean().dropna()
# Create a scatterplot between delinquency and quarterly average returns
plot_average.scatter(mort_del, portfolio_q_average)
# Transform daily portfolio_returns returns into quarterly minimum returns
portfolio_q_min = portfolio_returns.resample('Q').min().dropna()
# Create a scatterplot between delinquency and quarterly minimum returns
plot_min.scatter(mort_del, portfolio_q_min)
plt.show()
```
### Least-squares factor model
```
# Add a constant to the regression
mort_del = sm.add_constant(mort_del)
# Create the regression factor model and fit it to the data
results = sm.OLS(port_q_mean, mort_del).fit()
# Print a summary of the results
print(results.summary())
# Add a constant to the regression
mort_del = sm.add_constant(mort_del)
# Create the regression factor model and fit it to the data
results = sm.OLS(port_q_min, mort_del).fit()
# Print a summary of the results
print(results.summary())
# Add a constant to the regression
mort_del = sm.add_constant(mort_del)
# Create the regression factor model and fit it to the data
results = sm.OLS(vol_q_mean, mort_del).fit()
# Print a summary of the results
print(results.summary())
```
## Modern Portfolio Theory
### Practice with PyPortfolioOpt: returns
```
# Load the investment portfolio price data into the price variable.
prices = pd.read_csv("portfolio.csv")
# Convert the 'Date' column to a datetime index
prices['Date'] = pd.to_datetime(prices['Date'], format='%d/%m/%Y')
prices.set_index(['Date'], inplace = True)
# Import the mean_historical_return method
from pypfopt.expected_returns import mean_historical_return
# Compute the annualized average historical return
mean_returns = mean_historical_return(prices, frequency = 252)
# Plot the annualized average historical return
plt.plot(mean_returns, linestyle = 'None', marker = 'o')
plt.show()
```
### Practice with PyPortfolioOpt: covariance
```
# Import the CovarianceShrinkage object
from pypfopt.risk_models import CovarianceShrinkage
# Create the CovarianceShrinkage instance variable
cs = CovarianceShrinkage(prices)
# Compute the sample covariance matrix of returns
sample_cov = prices.pct_change().cov() * 252
# Compute the efficient covariance matrix of returns
e_cov = cs.ledoit_wolf()
# Display both the sample covariance_matrix and the efficient e_cov estimate
print("Sample Covariance Matrix\n", sample_cov, "\n")
print("Efficient Covariance Matrix\n", e_cov, "\n")
```
### Breaking down the financial crisis
```
# Create a dictionary of time periods (or 'epochs')
epochs = { 'before' : {'start': '1-1-2005', 'end': '31-12-2006'},
'during' : {'start': '1-1-2007', 'end': '31-12-2008'},
'after' : {'start': '1-1-2009', 'end': '31-12-2010'}
}
# Compute the efficient covariance for each epoch
e_cov = {}
for x in epochs.keys():
sub_price = prices.loc[epochs[x]['start']:epochs[x]['end']]
e_cov[x] = CovarianceShrinkage(sub_price).ledoit_wolf()
# Display the efficient covariance matrices for all epochs
print("Efficient Covariance Matrices\n", e_cov)
```
### The efficient frontier and the financial crisis
```
# Initialize the Crtical Line Algorithm object
efficient_portfolio_during = CLA(returns_during, ecov_during)
# Find the minimum volatility portfolio weights and display them
print(efficient_portfolio_during.min_volatility())
# Compute the efficient frontier
(ret, vol, weights) = efficient_portfolio_during.efficient_frontier()
# Add the frontier to the plot showing the 'before' and 'after' frontiers
plt.scatter(vol, ret, s = 4, c = 'g', marker = '.', label = 'During')
plt.legend()
plt.show()
```
# Goal-oriented risk management
## Measuring Risk
### VaR for the Normal distribution
Value at Risk (VaR) will be applied to a normal distribution
In this exercise we assume loss is normally distributed
```
# Create the VaR measure at the 95% confidence level using norm.ppf() - percent point function
VaR_95 = norm.ppf(0.95)
# Create the VaR meaasure at the 5% significance level using numpy.quantile()
draws = norm.rvs(size = 100000)
VaR_99 = np.quantile(draws, 0.99)
# Compare the 95% and 99% VaR
print("95% VaR: ", VaR_95, "; 99% VaR: ", VaR_99)
# Plot the normal distribution histogram and 95% VaR measure
plt.hist(draws, bins = 100)
plt.axvline(x = VaR_95, c='r', label = "VaR at 95% Confidence Level")
plt.legend(); plt.show()
```
### Comparing CVaR and VaR
The conditional value at risk (CVaR), or expected shortfall (ES), asks what the average loss will be, conditional upon losses exceeding some threshold at a certain confidence level. It uses VaR as a point of departure, but contains more information because it takes into consideration the tail of the loss distribution.
```
# Compute the mean and variance of the portfolio returns
pm = portfolio_losses.mean()
ps = portfolio_losses.std()
# Compute the 95% VaR using the .ppf() - percent point function
VaR_95 = norm.ppf(0.95, loc = pm, scale = ps)
# Compute the expected tail loss and the CVaR in the worst 5% of cases
tail_loss = norm.expect(lambda x: x, loc = pm, scale = ps, lb = VaR_95)
CVaR_95 = (1 / (1 - 0.95)) * VaR_95
# Plot the normal distribution histogram and add lines for the VaR and CVaR
plt.hist(norm.rvs(size = 100000, loc = pm, scale = ps), bins = 100)
plt.axvline(x = VaR_95, c='r', label = "VaR, 95% confidence level")
plt.axvline(x = CVaR_95, c='g', label = "CVaR, worst 5% of outcomes")
plt.legend(); plt.show()
```
Although VaR and CVaR are similar (and only one letter apart!), it's generally the case that CVaR is the preferred risk measure for risk management. One reason is that it is affected by the tail of the loss distribution, while VaR is a static value.
## Risk exposure and loss
Risk exposure = probability of loss x loss measure
### VaR and risk exposure
```
# Import the Student's t-distribution
from scipy.stats import t
# Create rolling window parameter list
mu = losses.rolling(30).mean()
sigma = losses.rolling(30).std()
rolling_parameters = [(29, mu[i], s) for i,s in enumerate(sigma)]
# Compute the 99% VaR array using the rolling window parameters
VaR_99 = np.array( [ t.ppf(0.99, *params)
for params in rolling_parameters ] )
# Plot the minimum risk exposure over the 2005-2010 time period
plt.plot(losses.index, 0.01 * VaR_99 * 100000)
plt.show()
```
### CVaR and risk exposure
```
# Fit the Student's t distribution to crisis losses
p = t.fit(crisis_losses)
# Compute the VaR_99 for the fitted distribution
VaR_99 = t.ppf(0.99, *p)
# Use the fitted parameters and VaR_99 to compute CVaR_99
tail_loss = t.expect(lambda y: y, args = (p[0],), loc = p[1], scale = p[2], lb = VaR_99 )
CVaR_99 = (1 / (1 - 0.99)) * tail_loss
print(CVaR_99)
```
## Risk management using VaR & CVaR
### VaR from a fitted distribution
```
# Visualize the fitted distribution with a plot
x = np.linspace(-0.25,0.25,1000)
plt.plot(x,fitted.evaluate(x))
plt.show()
# Create a random sample of 100,000 observations from the fitted distribution
sample = fitted.resample(100000)
# Compute and display the 95% VaR from the random sample
VaR_95 = np.quantile(sample, 0.95)
print(VaR_95)
```
### Minimizing CVaR
```
# Import the EfficientFrontier class
from pypfopt.efficient_frontier import EfficientFrontier
# Import the negative_cvar objective function
from pypfopt.objective_functions import negative_cvar
# Create the efficient frontier instance
ef = EfficientFrontier(None, e_cov)
# Find the cVar-minimizing portfolio weights at the default 95% confidence level
optimal_weights = ef.custom_objective(negative_cvar, returns)
# Display the optimal weights
print(optimal_weights)
```
### CVaR risk management and the crisis
```
# Initialize the efficient portfolio dictionary
ef_dict = {}
# For each epoch, assign an efficient frontier instance to ef
for x in ['before', 'during', 'after']:
ef_dict[x] = EfficientFrontier(None, e_cov_dict[x])
# Initialize the dictionary of optimal weights
optimal_weights_dict = {}
# Find and display the CVaR-minimizing portfolio weights at the default 95% confidence level
for x in ['before', 'during', 'after']:
optimal_weights_dict[x] = ef_dict[x].custom_objective(negative_cvar, returns_dict[x])
# Compare the CVaR-minimizing weights to the minimum volatility weights for the 'before' epoch
print("CVaR:\n", pd.DataFrame.from_dict(optimal_weights_dict['before']), "\n")
print("Min Vol:\n", pd.DataFrame.from_dict(min_vol_dict['before']), "\n")
```
## Portfolio hedging: offsetting risk
### Black-Scholes options pricing
https://assets.datacamp.com/production/repositories/5157/datasets/f275319f10cf6bd59d01a98eb8d960178eedc945/black_scholes.py
Arguments:
S -- the current spot price of the underlying stock
X -- the option strike price
T -- the time until maturity (in fractions of a year)
r -- the risk-free interest rate
sigma -- the returns volatility of the underlying stock
option_type -- the option type, either 'call' or 'put'
```
# Compute the volatility as the annualized standard deviation of IBM returns
sigma = np.sqrt(252) * IBM_returns.std()
# Compute the Black-Scholes option price for this volatility
value_s = black_scholes(S = 90, X = 80, T = 0.5, r = 0.02,
sigma = sigma, option_type = "call")
# Compute the Black-Scholes option price for twice the volatility
value_2s = black_scholes(S = 90, X = 80, T = 0.5, r = 0.02,
sigma = 2*sigma, option_type = "call")
# Display and compare both values
print("Option value for sigma: ", value_s, "\n",
"Option value for 2 * sigma: ", value_2s)
```
### Options pricing and the underlying asset
```
# Select the first 100 observations of IBM data
IBM_spot = IBM[:100]
# Initialize the European put option values array
option_values = np.zeros(IBM_spot.size)
# Iterate through IBM's spot price and compute the option values
for i,S in enumerate(IBM_spot.values):
option_values[i] = black_scholes(S = S, X = 140, T = 0.5, r = 0.02,
sigma = sigma, option_type = "put")
# Display the option values array
option_axis.plot(option_values, color = "red", label = "Put Option")
option_axis.legend(loc = "upper left")
plt.show()
```
### Using options for hedging
```
# Compute the annualized standard deviation of `IBM` returns
sigma = np.sqrt(252) * IBM_returns.std()
# Compute the Black-Scholes value at IBM spot price 70
value = black_scholes(S = 70, X = 80, T = 0.5, r = 0.02,
sigma = sigma, option_type = "put")
# Find the delta of the option at IBM spot price 70
delta = bs_delta(S = 70, X = 80, T = 0.5, r = 0.02,
sigma = sigma, option_type = "put")
# Find the option value change when the price of IBM falls to 69.5
value_change = black_scholes(S = 69.5, X = 80, T = 0.5, r = 0.02,
sigma = sigma, option_type = "put") - value
# Show that the sum is close to zero
print( (69.5 - 70) + (1/delta) * value_change )
```
# Estimating and identifying risk
## Parametric Estimation
### Parameter estimation: Normal
Test with the scipy.stats.anderson Anderson-Darling test. If the test result is statistically different from zero, this indicates the data is not Normally distributed.
```
# Import the Normal distribution and skewness test from scipy.stats
from scipy.stats import norm, anderson
# Fit portfolio losses to the Normal distribution
params = norm.fit(losses)
# Compute the 95% VaR from the fitted distribution, using parameter estimates
VaR_95 = norm.ppf(0.95, *params)
print("VaR_95, Normal distribution: ", VaR_95)
# Test the data for Normality
print("Anderson-Darling test result: ", anderson(losses))
```
### Parameter estimation: Skewed Normal
```
# Import the skew-normal distribution and skewness test from scipy.stats
from scipy.stats import skewnorm, skewtest
# Test the data for skewness
print("Skewtest result: ", skewtest(losses))
# Fit the portfolio loss data to the skew-normal distribution
params = skewnorm.fit(losses)
# Compute the 95% VaR from the fitted distribution, using parameter estimates
VaR_95 = skewnorm.ppf(0.95, *params)
print("VaR_95 from skew-normal: ", VaR_95)
```
## Historical and Monte Carlo Simulation
## Historical Simulation
Historical simulation of VaR assumes that the distribution of historical losses is the same as the distribution of future losses. We'll test if this is true.
```
# Create portfolio returns for the two sub-periods using the list of asset returns
portfolio_returns = np.array([ x.dot(weights) for x in asset_returns])
# Derive portfolio losses from portfolio returns
losses = - portfolio_returns
# Find the historical simulated VaR estimates
VaR_95 = [np.quantile(x, 0.95) for x in losses]
# Display the VaR estimates
print("VaR_95, 2005-2006: ", VaR_95[0], '; VaR_95, 2007-2009: ', VaR_95[1])
```
### Monte Carlo Simulation
```
# Initialize daily cumulative loss for the assets, across N runs
daily_loss = np.zeros((4,N))
# Create the Monte Carlo simulations for N runs
for n in range(N):
# Compute simulated path of length total_steps for correlated returns
correlated_randomness = e_cov @ norm.rvs(size = (4,total_steps))
# Adjust simulated path by total_steps and mean of portfolio losses
steps = 1/total_steps
minute_losses = mu * steps + correlated_randomness * np.sqrt(steps)
daily_loss[:, n] = minute_losses.sum(axis=1)
# Generate the 95% VaR estimate
losses = weights @ daily_loss
print("Monte Carlo VaR_95 estimate: ", np.quantile(losses, 0.95))
```
## Structural breaks
Occurs when the distribution parameters changes due to an event over time.
### Crisis structural break: I
```
# Create a plot of quarterly minimum portfolio returns
plt.plot(port_q_min, label="Quarterly minimum return")
# Create a plot of quarterly mean volatility
plt.plot(vol_q_mean, label="Quarterly mean volatility")
# Create legend and plot
plt.legend()
plt.show()
```
### Crisis structural break: II
```
# Import the statsmodels API to be able to run regressions
import statsmodels.api as sm
# Add a constant to the regression
mort_del = sm.add_constant(mort_del)
# Regress quarterly minimum portfolio returns against mortgage delinquencies
result = sm.OLS(port_q_min, mort_del).fit()
# Retrieve the sum-of-squared residuals
ssr_total = result.ssr
print("Sum-of-squared residuals, 2005-2010: ", ssr_total)
```
### Crisis structural break: III
```
# Add intercept constants to each sub-period 'before' and 'after'
before_with_intercept = sm.add_constant(before['mort_del'])
after_with_intercept = sm.add_constant(after['mort_del'])
# Fit OLS regressions to each sub-period
r_b = sm.OLS(before['returns'], before_with_intercept).fit()
r_a = sm.OLS(after['returns'], after_with_intercept).fit()
# Get sum-of-squared residuals for both regressions
ssr_before = r_b.ssr
ssr_after = r_a.ssr
# Compute and display the Chow test statistic
numerator = ((ssr_total - (ssr_before + ssr_after)) / 2)
denominator = ((ssr_before + ssr_after) / (24 - 4))
print("Chow test statistic: ", numerator / denominator)
```
## Volatility and extreme values
### Volatility and structural breaks
```
# Find the time series of returns with and without Citibank
ret_with_citi = prices_with_citi.pct_change().dot(weights_with_citi)
ret_without_citi = prices_without_citi.pct_change().dot(weights_without_citi)
# Find the average 30-day rolling window volatility as the standard deviation
vol_with_citi = ret_with_citi.rolling(30).std().dropna().rename("With Citi")
vol_without_citi = ret_without_citi.rolling(30).std().dropna().rename("Without Citi")
# Combine two volatilities into one Pandas DataFrame
vol = pd.concat([vol_with_citi, vol_without_citi], axis=1)
# Plot volatilities over time
vol.plot().set_ylabel("Losses")
plt.show()
```
### Extreme values and backtesting
```
# Compute the 95% VaR on 2009-2010 losses
VaR_95 = np.quantile(estimate_data, 0.95)
# Find backtest_data exceeding the 95% VaR
extreme_values = backtest_data[backtest_data > VaR_95]
# Compare the fraction of extreme values for 2007-2008 to the Var_95 estimate
print("VaR_95: ", VaR_95, "; Backtest: ", len(extreme_values) / len(backtest_data) )
# Plot the extreme values and look for clustering
plt.stem(extreme_values.index, extreme_values)
plt.ylabel("Extreme values > VaR_95"); plt.xlabel("Date")
plt.show()
```
# Advanced risk management
## Extreme value theory
### Block maxima
```
# Resample the data into weekly blocks
weekly_maxima = losses.resample("W").max()
# Plot the resulting weekly maxima
axis_1.plot(weekly_maxima, label = "Weekly Maxima")
axis_1.legend()
plt.figure("weekly")
plt.show()
# Resample the data into monthly blocks
monthly_maxima = losses.resample("M").max()
# Plot the resulting monthly maxima
axis_2.plot(monthly_maxima, label = "Monthly Maxima")
axis_2.legend()
plt.figure("monthly")
plt.show()
```
### Extreme events during the crisis
You can use the Generalized Extreme Value (GEV) distribution to examine extreme values in the losses of General Electric (GE) during the financial crisis in 2008 and 2009.
```
# Plot the log daily losses of GE over the period 2007-2009
losses.plot()
# Find all daily losses greater than 10%
extreme_losses = losses[losses > 0.1]
# Scatter plot the extreme losses
extreme_losses.plot(style='o')
plt.show()
# Fit extreme distribution to weekly maximum of losses
fitted = genextreme.fit(weekly_max)
# Plot extreme distribution with weekly max losses historgram
x = np.linspace(min(weekly_max), max(weekly_max), 100)
plt.plot(x, genextreme.pdf(x, *fitted))
plt.hist(weekly_max, 50, density = True, alpha = 0.3)
plt.show()
```
### GEV risk estimation
```
# Compute the weekly block maxima for GE's stock
weekly_maxima = losses.resample("W").max()
# Fit the GEV distribution to the maxima
p = genextreme.fit(weekly_maxima)
# Compute the 99% VaR (needed for the CVaR computation)
VaR_99 = genextreme.ppf(0.99, *p)
# Compute the 99% CVaR estimate
CVaR_99 = (1 / (1 - 0.99)) * genextreme.expect(lambda x: x,
args=(p[0],), loc = p[1], scale = p[2], lb = VaR_99)
# Display the covering loss amount
print("Reserve amount: ", 1000000 * CVaR_99)
```
## Kernel density estimation
Risk factors que be assumed (Normal, T, etc), Fitted (parametric or Monte Carlo), or ignored (historical simulation)
Non parametric estimation smoths (filters) the data to better fit a distribution.
A histogram can be cuted in pieces and a distribution may be fitted to each slice (kernel). The gaussian kernel is the most famous.
### KDE of a loss distribution
```
# Generate a fitted T distribution over losses
params = t.fit(losses)
# Generate a Gaussian kernal density estimate over losses
kde = gaussian_kde(losses)
# Add the PDFs of both estimates to a histogram, and display
loss_range = np.linspace(np.min(losses), np.max(losses), 1000)
axis.plot(loss_range, t.pdf(loss_range, *params), label = 'T distribution')
axis.plot(loss_range, kde.pdf(loss_range), label = 'Gaussian KDE')
plt.legend(); plt.show()
```
### CVaR and loss cover selection
```
# Find the VaR as a quantile of random samples from the distributions
VaR_99_T = np.quantile(t.rvs(size=1000, *p), 0.99)
VaR_99_KDE = np.quantile(kde.resample(size=1000), 0.99)
# Find the expected tail losses, with lower bounds given by the VaR measures
integral_T = t.expect(lambda x: x, args = (p[0],), loc = p[1], scale = p[2], lb = VaR_99_T)
integral_KDE = kde.expect(lambda x: x, lb = VaR_99_KDE)
# Create the 99% CVaR estimates
CVaR_99_T = (1 / (1 - 0.99)) * integral_T
CVaR_99_KDE = (1 / (1 - 0.99)) * integral_KDE
# Display the results
print("99% CVaR for T: ", CVaR_99_T, "; 99% CVaR for KDE: ", CVaR_99_KDE)
```
## Neural network risk management
### Single layer neural networks
To become comfortable using neural networks it will be helpful to start with a simple approximation of a function.
You'll train a neural network to approximate a mapping between an input, x, and an output, y. They are related by the square root function.
```
# Create the training values from the square root function
y = np.sqrt(x)
# Create the neural network with one hidden layer of 16 neurons, one input value, and one output value
model = Sequential()
model.add(Dense(16, input_dim=1, activation='relu'))
model.add(Dense(1))
# Train the network
model.compile(loss='mean_squared_error', optimizer='rmsprop')
model.fit(x, y, epochs=100)
## Plot the resulting approximation and the training values
plt.plot(x, y, x, model.predict(x))
plt.show()
```
### Asset price prediction
This prediction uses the price of other assets.
```
# Set the input and output data
training_input = prices.drop('Morgan Stanley', axis=1)
training_output = prices['Morgan Stanley']
# Create and train the neural network with two hidden layers
model = Sequential()
model.add(Dense(16, input_dim=3, activation='sigmoid'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_logarithmic_error', optimizer='rmsprop')
model.fit(training_input, training_output, epochs=100)
# Scatter plot of the resulting model prediction
axis.scatter(training_output, model.predict(training_input)); plt.show()
```
### Real-time risk management
We used the pre-treined model because training would take so long.
```
# Create neural network model
model = Sequential()
model.add(Dense(128, input_dim = 4, activation = 'relu'))
model.add(Dense(64, activation = 'relu'))
model.add(Dense(4, activation = 'relu'))
# Use the pre-trained model to predict portfolio weights given new asset returns
asset_returns = np.array([0.001060, 0.003832, 0.000726, -0.002787])
asset_returns.shape = (1,4)
print("Predicted minimum volatility portfolio: ", pre_trained_model.predict(asset_returns))
```
| github_jupyter |
Import
```
import tensorflow as tf
# from PIL import Image
import numpy as np
from scipy.misc import imread, imresize
from imagenet_classes import class_names
import os
```
file_path_info
```
#File Path
filename_queue_description = tf.train.string_input_producer(['./data/description/v2/coded_data.csv'])
filepath_ckpt = "./ckpt/model_weight_v2_2.ckpt" #weight saver check point file path
filepath_pred = "./output/predicted_v2.csv" #predicted value file path
num_record = 50
```
Hyper Params - IMG
```
bilinear_size = 28
resized_size = bilinear_size*bilinear_size*3
img_label_size = 5
```
Hyper Params - LSTM
```
label_vec_size = 32
input_vec_size = 32
batch_size = num_record
state_size_1 = 20
state_size_2 = 100
hidden = 18
learning_rate = 0.001
```
vgg16_class
```
class vgg16:
def __init__(self, imgs, weights=None, sess=None):
self.imgs = imgs
self.convlayers()
self.fc_layers()
self.probs = tf.nn.softmax(self.fc3l)
if weights is not None and sess is not None:
self.load_weights(weights, sess)
def convlayers(self):
self.parameters = []
# zero-mean input
with tf.name_scope('preprocess') as scope:
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
images = self.imgs-mean
# conv1_1
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv1_2
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool1
self.pool1 = tf.nn.max_pool(self.conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# conv2_1
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv2_2
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool2
self.pool2 = tf.nn.max_pool(self.conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# conv3_1
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_2
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_3
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool3
self.pool3 = tf.nn.max_pool(self.conv3_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool3')
# conv4_1
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_2
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_3
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool4
self.pool4 = tf.nn.max_pool(self.conv4_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# conv5_1
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_2
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_3
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool5
self.pool5 = tf.nn.max_pool(self.conv5_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
def fc_layers(self):
# fc1
with tf.name_scope('fc1') as scope:
shape = int(np.prod(self.pool5.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
pool5_flat = tf.reshape(self.pool5, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
self.fc1 = tf.nn.relu(fc1l)
self.parameters += [fc1w, fc1b]
# fc2
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
self.fc2 = tf.nn.relu(fc2l)
self.parameters += [fc2w, fc2b]
# fc3
with tf.name_scope('fc3') as scope:
fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
trainable=True, name='biases')
self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
self.parameters += [fc3w, fc3b]
def load_weights(self, weight_file, sess):
weights = np.load(weight_file)
keys = sorted(weights.keys())
for i, k in enumerate(keys):
print(i, k, np.shape(weights[k]))
sess.run(self.parameters[i].assign(weights[k]))
```
Train
load_img_vgg16
```
with tf.Session() as sess_vgg:
imgs = tf.placeholder(tf.float32, [None, 200, 200, 3])
vgg = vgg16(imgs, 'vgg16_weights.npz', sess_vgg)
img_files = ['./data/img/cropped/' + i for i in os.listdir('./data/img/cropped')]
imgs = [imread(file, mode='RGB') for file in img_files]
#bilinear feature
imgs_bi = [sess_vgg.run(vgg.fc1, feed_dict={vgg.imgs: [img]})[0] for img in imgs]
# imgs_bi = [imresize(arr=img, interp='bilinear', size=bilinear_size) for img in imgs]
imgs_bi = np.reshape(a=imgs_bi, newshape=[50,-1])
#label
prob = [sess_vgg.run(vgg.probs, feed_dict={vgg.imgs: [img]})[0] for img in imgs]
preds = [(np.argsort(p)[::-1])[0:1] for p in prob]
preds = [p[0] for p in preds]
for i in range(len(preds)):
if(preds[i]==430):
preds[i]=0
elif(preds[i]==429):
preds[i]=1
elif(preds[i]==805):
preds[i]=2
elif(preds[i]==768):
preds[i]=3
elif(preds[i]==574):
preds[i]=4
img_label_onehot = tf.one_hot(indices=preds, depth = 5)
print(preds)
# print(sess_vgg.run(img_label_onehot))
#clear
imgs = None
vgg = None
img_files = None
prob = None
sess_vgg.close()
print(np.shape(imgs_bi))
```
Text_Reader_Setting
```
reader = tf.TextLineReader()
key,value = reader.read(filename_queue_description)
record_defaults =[[-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2]]
w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15, w16, w17, w18, w19 = tf.decode_csv(value, record_defaults)
feature_label = tf.stack([w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15, w16, w17, w18, w19])
feature_word = tf.stack([w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15, w16, w17, w18])
with tf.Session() as sess_data:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# img_queue = []
for i in range(num_record):
# image = sess.run(images)
raw_label, raw_input = sess_data.run([feature_label, feature_word])
onehot_input = tf.one_hot(indices=raw_input, depth=32)
onehot_label = tf.one_hot(indices=raw_label, depth=32)
if i == 0:
full_input = onehot_input
full_label = onehot_label
else:
full_input = tf.concat([full_input, onehot_input], 0)
full_label = tf.concat([full_label, onehot_label], 0)
raw_label = None
raw_input = None
onehot_input = None
onehot_label = None
coord.request_stop()
coord.join(threads)
sess_data.close()
with tf.name_scope('batch') as scope:
# full_label = tf.reshape(full_label, [batch_size, hidden, label_vec_size])
full_input = tf.reshape(full_input, [batch_size, hidden, input_vec_size])
full_label = tf.reshape(full_label, [batch_size, hidden, input_vec_size])
# input_batch, label_batch = tf.train.batch([full_input, full_label], batch_size=50)
```
Model
LSTM First Layer
```
with tf.name_scope('lstm_layer_1') as scope:
with tf.variable_scope('lstm_layer_1'):
rnn_cell_1 = tf.contrib.rnn.BasicLSTMCell(state_size_1, reuse=None)
output_1, _ = tf.contrib.rnn.static_rnn(rnn_cell_1, tf.unstack(full_input, axis=1), dtype=tf.float32)
input_2 = [tf.concat([out, imgs_bi, img_label_onehot], axis=1) for out in output_1]
# output_w_1 = tf.Variable(tf.truncated_normal([hidden, state_size_1, input_vec_size]))
# output_b_1 = tf.Variable(tf.zeros([input_vec_size]))
# pred_temp = tf.matmul(output_1, output_w_1) + output_b_1
```
LSTM Second Layer
```
with tf.name_scope('lstm_layer_2') as scope:
with tf.variable_scope('lstm_layer_2'):
rnn_cell_2 = tf.contrib.rnn.BasicLSTMCell(state_size_2, reuse=None)
output_2, _ = tf.contrib.rnn.static_rnn(rnn_cell_2, tf.unstack(input_2, axis=0), dtype=tf.float32)
output_w_2 = tf.Variable(tf.truncated_normal([hidden, state_size_2, input_vec_size]))
output_b_2 = tf.Variable(tf.zeros([input_vec_size]))
pred = tf.nn.softmax(tf.matmul(output_2, output_w_2) + output_b_2)
```
Loss
```
with tf.name_scope('loss') as scope:
loss = tf.constant(0, tf.float32)
for i in range(hidden):
loss += tf.losses.softmax_cross_entropy(tf.unstack(full_label, axis=1)[i], tf.unstack(pred, axis=0)[i])
train = tf.train.AdamOptimizer(learning_rate).minimize(loss)
```
Run Train
```
sess_train = tf.Session()
sess_train.run(tf.global_variables_initializer())
saver = tf.train.Saver()
save_path = saver.save(sess_train, filepath_ckpt)
for i in range(10001):
sess_train.run(train)
if i % 1000 == 0:
print("loss : ", sess_train.run(loss))
# print("pred : ", sess.run(pred))
save_path = saver.save(sess_train, filepath_ckpt)
print("= Weigths are saved in " + filepath_ckpt)
sess_train.close()
```
Test
```
with tf.Session() as sess_vgg_test:
imgs = tf.placeholder(tf.float32, [None, 200, 200, 3])
vgg = vgg16(imgs, 'vgg16_weights.npz', sess_vgg_test)
test_img_files = ['./data/img/test/cropped/test001.png', './data/img/cropped/005.png', './data/img/cropped/014.png', './data/img/cropped/021.png', './data/img/cropped/036.png', './data/img/cropped/050.png']
num_imgs = len(test_img_files)
test_imgs = [imread(file, mode='RGB') for file in test_img_files]
#bilinear feature
test_imgs_bi = [sess_vgg_test.run(vgg.fc1, feed_dict={vgg.imgs: [img]})[0] for img in test_imgs]
# test_imgs_bi = [imresize(arr=img, interp='bilinear', size=bilinear_size) for img in test_imgs]
test_imgs_bi = np.reshape(a=test_imgs_bi, newshape=[num_imgs,-1])
#label
prob = [sess_vgg_test.run(vgg.probs, feed_dict={vgg.imgs: [img]})[0] for img in test_imgs]
test_preds = [(np.argsort(p)[::-1])[0:1] for p in prob]
test_preds = [p[0] for p in test_preds]
for i in range(len(test_preds)):
if(test_preds[i]==430):
test_preds[i]=0
elif(test_preds[i]==429):
test_preds[i]=1
elif(test_preds[i]==805):
test_preds[i]=2
elif(test_preds[i]==768):
test_preds[i]=3
elif(test_preds[i]==574):
test_preds[i]=4
test_img_label_onehot = tf.one_hot(indices=test_preds, depth = 5)
print(sess_vgg_test.run(test_img_label_onehot))
#clear
test_imgs = None
vgg = None
test_img_files = None
prob = None
sess_vgg_test.close()
start_input = tf.zeros([num_imgs,hidden,input_vec_size])
with tf.Session() as sess_init_generator:
input_init = sess_init_generator.run(start_input)
for i in range(num_imgs):
input_init[i][0][0] = 1
```
Test-First_Layer-LSTM
```
with tf.name_scope('lstm_layer_1') as scope:
with tf.variable_scope('lstm_layer_1'):
rnn_cell_1 = tf.contrib.rnn.BasicLSTMCell(state_size_1, reuse=None)
output_test_1, _ = tf.contrib.rnn.static_rnn(rnn_cell_1, tf.unstack(input_init, axis=1), dtype=tf.float32)
input_2 = [tf.concat([out, test_imgs_bi, test_img_label_onehot], axis=1) for out in output_test_1]
# output_t_1 = tf.contrib.rnn.static_rnn(rnn_cell, tf.unstack(full_input, axis=1), dtype=tf.float32)
# pred = tf.nn.softmax(tf.matmul(output1, output_w[0]) + output_b[0])
with tf.name_scope('lstm_layer_2') as scope:
with tf.variable_scope('lstm_layer_2'):
rnn_cell_2 = tf.contrib.rnn.BasicLSTMCell(state_size_2, reuse=None)
output_2, _ = tf.contrib.rnn.static_rnn(rnn_cell_2, tf.unstack(input_2, axis=0), dtype=tf.float32)
output_w_2 = tf.Variable(tf.truncated_normal([hidden, state_size_2, input_vec_size]))
output_b_2 = tf.Variable(tf.zeros([input_vec_size]))
pred = tf.nn.softmax(tf.matmul(output_2, output_w_2) + output_b_2)
sess_model = tf.Session()
saver = tf.train.Saver(allow_empty=True)
saver.restore(sess_model, filepath_ckpt)
```
```
for i in range(hidden):
result = sess_model.run(pred)
result_temp = result[i]
if i == hidden -1:
pass
else:
input_init[:,i+1] = result_temp
print(np.shape(result))
```
Result Check
```
import csv
classes = []
f = open('./data/description/v2/class.csv', 'r')
csvReader = csv.reader(f)
for row in csvReader:
classes.append(row)
# print(row)
f.close()
decoded_result = np.argmax(a=result, axis=2)
print(np.shape(decoded_result))
for i in range(num_imgs):
str = " "
for r in decoded_result:
str += classes[r[i]][0] + " "
print(str)
```
Code Storage
| github_jupyter |
# DB Analysis
## With a few hundred papers in the DB, some experiments can be done on the data.
### 1. Connecting to the DB
```
import mariadb
import json
with open('../credentials.json', 'r') as crd_json_fd:
json_text = crd_json_fd.read()
json_obj = json.loads(json_text)
credentials = json_obj["Credentials"]
username = credentials["username"]
password = credentials["password"]
table_name = "publications"
db_name = "ubbcluj"
mariadb_connection = mariadb.connect(user=username, password=password, database=db_name)
mariadb_cursor = mariadb_connection.cursor()
```
### 2. Counting publications.
```
queryString = "SELECT COUNT(*) FROM "
queryString += table_name
try:
mariadb_cursor.execute(queryString)
except Exception as ex:
print(ex)
for item in mariadb_cursor:
count = item[0]
print("Number of publications: {0}".format(count))
```
### 3. Distinct Affiliations
```
queryString = "SELECT Affiliations, COUNT(*) AS c FROM publications GROUP BY Affiliations ORDER BY c DESC"
try:
mariadb_cursor.execute(queryString)
except Exception as ex:
print(ex)
affiliations = []
for item in mariadb_cursor:
Affiliation = item[0]
affiliations.append(item[0])
c = item[1]
print("{0} : {1} occurences".format(Affiliation, c))
for i in affiliations:
if "conference" in [k.lower() for k in i.split()]:
print(i)
for i in affiliations:
if "journal" in [k.lower() for k in i.split()]:
print(i)
tokens = []
for i in affiliations:
words = i.split()
for word in words:
tokens.append(word)
tokens
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
sr= stopwords.words('english')
clean_tokens = tokens[:]
for token in tokens:
if token in stopwords.words('english'):
clean_tokens.remove(token)
freq = nltk.FreqDist(clean_tokens)
for key,val in freq.items():
#print(str(key) + ':' + str(val))
pass
freq.plot(20, cumulative=False)
# Histogram of professors publication number
queryString = "SELECT (Select FullName from humanoid_entities where id = ProfessorId), ProfessorId, COUNT(Title) FROM publications GROUP BY ProfessorId ORDER BY COUNT(Title) desc"
try:
mariadb_cursor.execute(queryString)
except Exception as ex:
print(ex)
name_dict = {}
tup_list = []
max_id = 0
for item in mariadb_cursor:
ProfName = item[0]
ProfId = item[1]
max_id = max(max_id, ProfId)
Count = item[2]
tup_list.append((ProfName, ProfId, Count))
name_dict[ProfId] = ProfName
import pandas as pd
print(tup_list)
final_list = []
for i in range(max_id):
found_id = False
found_value = 0
for k in tup_list:
if i == k[1]:
found_id = True
found_value = k[2]
break
if not found_id:
name_dict[i] = "NONE"
final_list.append((i, found_value))
fa = pd.DataFrame([k[1] for k in final_list])
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.bar([name_dict[k[0]] for k in final_list], [k[1] for k in final_list])
loc = plticker.MultipleLocator(base=1.0)
ax.xaxis.set_major_locator(loc)
plt.xticks(rotation=90)
plt.show()
```
### 3. TF-IDF and K-Means?
### 3. Distinct Affiliations
```
queryString = "SELECT Title FROM publications"
try:
mariadb_cursor.execute(queryString)
except Exception as ex:
print(ex)
titles = []
for item in mariadb_cursor:
Title = item[0]
titles.append(item[0])
from sklearn.feature_extraction.text import CountVectorizer
corpus = titles[:]
corpus
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(corpus)
print(vectorizer.get_feature_names())
print(X.shape)
print(X.toarray())
for k in X.toarray():
for j in k:
if j > 1:
print(j)
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer(use_idf=False, norm='l1')
matrix = vec.fit_transform(corpus)
pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
from textblob import TextBlob
import nltk
nltk.download('punkt')
def textblob_tokenizer(str_input):
blob = TextBlob(str_input.lower())
tokens = blob.words
words = [token.stem() for token in tokens]
return words
vec = CountVectorizer(tokenizer=textblob_tokenizer)
matrix = vec.fit_transform(corpus)
pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
vec = TfidfVectorizer(tokenizer=textblob_tokenizer,
stop_words='english',
use_idf=True)
matrix = vec.fit_transform(corpus)
df = pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
for word in df.columns:
for row in df[word]:
if row != 0.0:
print(word, row)
from sklearn.cluster import KMeans
number_of_clusters = 10
km = KMeans(n_clusters=number_of_clusters)
km.fit(matrix)
km.fit
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vec.get_feature_names()
for i in range(number_of_clusters):
top_ten_words = [terms[ind] for ind in order_centroids[i, :5]]
print("Cluster {}: {}".format(i, ' '.join(top_ten_words)))
results = pd.DataFrame({
'corpus': corpus,
'category': km.labels_
})
results.sort_values('category')
for k in results.sort_values('category').values:
print(k[1], " --- ", k[0])
### GENSIM
from gensim.models import word2vec
from gensim.test.utils import common_texts, get_tmpfile
tokenized_sentences = [[j.lower() for j in st.split() if j not in stopwords.words('english')] for st in corpus]
model = word2vec.Word2Vec(tokenized_sentences, min_count=1)
model.save("word2vec.model")
#model = word2vec.load("word2vec.model")
model
model.wv["study"]
words = list(model.wv.vocab)
print(words)
X = model[model.wv.vocab]
df = pd.DataFrame(df)
df.shape
df.head()
import numpy as np
import io
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word in model.wv.vocab:
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in model[word]]) + "\n")
out_v.close()
out_m.close()
```
| github_jupyter |
# Completing the ML workflow
Over the past few tutorials we've seen many aspects of a supervised ML workflow. From loading data to preprocessing, selecting and training a model, optimizing hyperparameters and finally evaluating the model. It's time to put all these together into a complete workflow for supervised ML problems.
<img src="https://github.com/rasbt/pattern_classification/raw/master/Images/supervised_learning_flowchart.png" width="65%">
The main steps are:
1. **Load** the data into python
2. **Split** the data into train/test sets
3. **Preprocess** the data
1. Perform all **necessary** preprocessing steps. These include:
- Handling **missing** data (i.e. discard or impute)
- Feature **encoding** (i.e. convert alphanumeric features into numeric)
- Feature **scaling** (i.e. transform features so that they occupy similar value ranges)
2. **Optionally** we might want to perform:
- Feature **selection** (i.e. discard some of the features)
- Feature **extraction** (i.e. transform the data into a usually smaller feature space)
- **Resampling** (i.e. under/over-sampling)
4. **Select** a ML algorithm
5. Optimize the algorithm's **hyperparameters** through **cross-validation**.
6. **Evaluate** its performance on the test set. If it is inadequate, or if we want to improve on the results: **start over from step 2 and refine the process**!
7. Finally, if we've achieved an adequate performance on the test set: train the model one last time, with the optimal hyperparameters, **on the whole dataset**.
Scikit-learn has two very helpful classes that make our life easier when refining hyperparameters: **pipeline** and **grid search**.
## pipeline
*scikit-learn* [pipelines](http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) provide a convenient way for incorporating multiple steps in a ML workflow.
The concept of the `pipeline` is to encapsulate more than one steps into a single class. The first steps of the pipeline involve **preprocessing** steps. Through these the data is transformed accordingly. The last step of the pipeline is a model that can make predictions. Unfortunately **all preprocessing steps must be *scikit-learn* compatible objects**.
All intermediate steps in a pipeline are transforms and must implement both a `.fit()` and a `.transform()` argument (like the scaler we saw before). The last step should be an estimator (i.e. have `.fit()` and `.predict()` methods). We need to pass these steps, sequentially, as a *list* of *tuples*, each containing the name and object of the transform/estimator.
```python
from sklearn.pipeline import Pipeline
pipe = Pipeline([('transform1', transform1), ('transform2', transform2), ..., ('estimator', estimator)])
```
Let's try to implement a pipeline containing a StandardScaler and a k-NN model.
```
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn import datasets
# Load the iris dataset
iris = datasets.load_breast_cancer()
seed = 13 # random seed for reproducibility
# Shuffle and split the data
train, test, train_labels, test_labels = train_test_split(iris['data'], iris['target'], test_size=0.4, random_state=seed)
# Define a scaler (default parameters)
scaler = StandardScaler()
# Define a kNN model (not default parameters)
knn = KNeighborsClassifier(n_neighbors=11)
# Create a pipeline with the scaler and the kNN
pipe = Pipeline([('standardizer', scaler), ('classifier', knn)])
# Train on the training set
pipe.fit(train, train_labels)
# Evaluate on the test set
preds = pipe.predict(test)
print(accuracy_score(test_labels, preds))
```
What the pipeline did is that when we called `pipe.fit()`, internally it called `.fit_transform()` **for each of its transforms** and `.fit()` **for its estimator**. Assuming an estimator with $M$ preprocessing steps, when we called `pipe.fit()` it ran the equivalent of fitting and transforming the data through each of the preprocessing steps and fitting the last step (i.e. the estimator)
```python
# Assuming that our pipeline is:
pipe = Pipeline([('transform1', transform1), ('transform2', transform2), ..., ('estimator', estimator)])
# If we ran:
pipe.fit(train, train_labels)
# It would be the equivalent of:
tmp = transform1.fit_transform(train)
tmp = transform2.fit_transform(tmp)
# ...
tmp = transformM.fit_transform(tmp)
estimator.fit(tmp)
```
Running `pipe.predict()`, on the other hand, simply applied `.transform()` to each of the preprocessing steps and `.predict()` to the final step.
```python
# If we ran:
preds = pipe.predict(test, test_labels)
# It would be the equivalent of:
tmp = transform1.transform(test)
tmp = transform2.transform(tmp)
# ...
tmp = transformM.transform(tmp)
preds = estimator.predict(tmp)
```
An easier way to create Pipelines is through scikit-learn `make_pipeline` function. This is a shorthand for the Pipeline constructor, that does not require naming the estimators. Instead, their names will be set to the lowercase of their types automatically.
```python
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(scaler, knn)
```
**Note**: If we want to put a sampler from imblearn into our pipeline we **must** use ` imblearn.pipeline.Pipeline` which extends sklearn's pipeline.
```
from sklearn.feature_selection import VarianceThreshold
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline # import imblearn's pipeline because one of the steps is SMOTE
pipe = Pipeline([('selector', VarianceThreshold()),
('scaler', StandardScaler()),
('sampler', SMOTE()),
('pca', PCA()),
('knn', KNeighborsClassifier())])
pipe.fit(train, train_labels)
preds = pipe.predict(test)
print(accuracy_score(test_labels, preds))
```
## Grid search
Before, we attempted to optimize a model by selecting its hyperparameters through a for loop. There is a much easier way provided through scikit-learn's [GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html#sklearn.model_selection.GridSearchCV). This function takes two main arguments: an estimator (or pipeline) and a *grid* of parameters we want the grid search to consider. The grid could be one of two things:
- A dictionary with the hyperparameter names as its keys and a list of values as the corresponding dictionary value:
```python
grid = {'name1': [val1, val2, val3], 'name2': [val4, val5], ...}
```
This will force the grid search to search for **all** possible combinations of parameter values:
```python
(val1, val4, ...), (val1, val5, ...), (val2, val4, ...), (val2, val5, ...), ... etc.
```
- A list of such dictionaries:
```python
grid = [{'name1': [val1, val2, val3], 'name2': [val4, val5], ...},
{'name1': [val1, val2, val3], 'name3': [val6, val7], ...}]
```
This will create a grid that contains combinations from both dictionaries.
After creating such a grid:
```python
from sklearn.model_selection import GridSearchCV
grid = {...}
clf = GridSearchCV(estimator, grid)
clf.fit(X_train, y_train) # will search all possible combinations defined by the grid
preds = clf.predict(X_test) # will generate predictions based on the best configuration
# In order to access the best model:
clf.best_estimator_
```
```
from sklearn.model_selection import GridSearchCV
# Scale the data to be comparable to previous.
scaled_train = scaler.fit_transform(train)
scaled_test = scaler.transform(test)
# Define a search grid.
grid = {'n_neighbors': list(range(1, 15, 2)),
'p': [1, 2, 3, 4]}
# Create the GridSearch class. This will serve as our classifier from now on.
clf = GridSearchCV(knn, grid, cv=5) # 5-fold cross validation
# Train the model as many times as designated by the grid.
clf.fit(scaled_train, train_labels)
# Evaluate on the test set and print best hyperparameters
preds = clf.predict(scaled_test)
print(accuracy_score(test_labels, preds))
print(clf.best_estimator_)
```
Grid searches can be performed on pipelines too! The only thing that changes is that now we need to specify which step each parameter belongs to. This is done by adding both the name of the step and the name of the parameter separated by two underscores (i.e. `__`).
```python
pipe = Pipeline([('step1', ...), ...])
grid = {'step1__param1`': [val1, ...], ...} # this dictates param1 from step1 to take the values [val1, ...]
clf = GridSearchCV(pipe, grid)
clf.fit(X_train, y_train) # will search all possible combinations defined by the grid
preds = clf.predict(X_test) # will generate predictions based on the best configuration
```
```
# Revert to the previous pipeline
pipe = Pipeline([('standardizer', scaler), ('classifier', knn)])
# Define a grid that checks for hyperparameters for both steps
grid = {'standardizer__with_mean': [True, False], # Check parameters True/False for 'with_mean' argument of scaler
'standardizer__with_std': [True, False], # Check parameters True/False for 'with_std' argument of scaler
'classifier__n_neighbors': list(range(1, 15, 2)), # Check for values of 'n_neighbors' of knn
'classifier__p': [1, 2, 3, 4]} # Check for values of 'p' of knn
# Create and train the grid search
clf = GridSearchCV(pipe, grid, cv=5)
clf.fit(train, train_labels)
# Evaluate on the test set and print best hypterparameter values
print('Best accuracy: {:.2f}%'.format(accuracy_score(test_labels, clf.predict(test))*100))
print(clf.best_estimator_) # print the best configuration
```
Let's try to optimize the more complex pipeline.
```
pipe = Pipeline(steps=[('selector', VarianceThreshold()),
('scaler', StandardScaler()),
('sampler', SMOTE()),
('pca', PCA()),
('knn', KNeighborsClassifier())])
grid = {'selector__threshold': [0.0, 0.005],
'pca__n_components': list(range(5, 16, 5)),
'knn__n_neighbors': list(range(1, 15, 2)),
'knn__p': [1, 2, 3, 4]}
clf = GridSearchCV(pipe, grid, cv=5)
clf.fit(train, train_labels)
print('Best accuracy: {:.2f}%'.format(accuracy_score(test_labels, clf.predict(test)) * 100))
print(clf.best_estimator_)
```
With the inclusion of the feature selection/extraction steps, we actually managed to **hurt** our performance here.
### Tips for using grid search:
1. Always **calculate** the number of times a model is fit. In the example above we check for $2 \cdot 3 \cdot 4 \cdot 4 = 168$ different hyperparameter combinations. Because we are using a 5-fold cross validation, each combination is used for 5 separate model fits. So the above grid search accounts for 840 different fits! It is very easy when using a grid search for this number to go up to the thousands which would take a **long time to complete**. If we were using a feature selection or imputing through a model, we would need to take that into account too!
2. Grid search has a parameter called `verbose` which offers several **levels of verbosity**. I'd recommend setting a `verbose=1` so that *scikit-learn* informs you on the number of times a model needs to be trained and how much time it took. You can, however, set a larger value which will inform you on the progress of each fit in detail. Caution: this will flood your screen!
3. Instead of checking all different parameter combinations which would be computationally impossible to achieve, we could use a more **progressive** grid search! Imagine we want to optimize a hyperparameter `x` that ranges from $1$ to $1000$:
- First perform a grid search on `[1, 5, 10, 50, 100, 500, 1000]` (or even more sparse if it takes too long). We get the best performance for $x = 500$.
- Now perform a grid search on `[200, 350, 500, 650, 800]`. The best performance is produced with $x=800$.
- Choose an even more close grid `[725, 730, 735, 740, 745, 750]`.
- Repeat until you achieve the desired precision.
4. `GridSearchCV` has a parameter called `n_jobs`. This can determine the number of jobs to run in parallel. This can increase computation time, but might criple your pc.
### Drawbacks:
One major drawback of using pipelines is that they support only scikit-learn compatible objects. Many preprocessing steps, however, need to be implemented in a library like *pandas*. To refine these steps we'll need to do so manually! Either that or you can write your own class in an sklearn-like manner and incorporate them into a pipeline.
| github_jupyter |
# Xente Fraud Detection Challenge
<h2 style='bold'>Background:</h2>
<p style='font-size:20'>
Fraud detection is an important application of machine learning in the financial services sector. This solution will help Xente provide improved and safer service to its customers.The objective of this competition is to create a machine learning model to detect fraudulent transactions.
</p>
## Exploratory Data Analysis
```
'''Importing necessary libraries'''
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, matthews_corrcoef
import scipy as sp
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.metrics import confusion_matrix, f1_score
from google.colab import drive
drive.mount('/content/drive')
#importing the data
path = "drive/My Drive/X_fraud_detection/data"
train = pd.read_csv(path + "/training.csv")
test = pd.read_csv(path + "/test.csv")
sub = pd.read_csv(path + "/sample_submission.csv")
#train set info
train.info()
#viewing a sample of the training data
train.head()
#test set info
test.info()
#previewing the test set
test.head()
#checking the shape of the train and test datasets
tr = train.shape
te = test.shape
print("train_set_shape is: {} and test_set_shape is: {}".format(tr,te))
train.drop_duplicates(keep="first", inplace=True) #removing duplicate data if any
test.drop_duplicates(keep="first", inplace=True)
del tr
del te
#Count of fraudulent and non-fraudulent cases
print("Fraudulent: {}".format((train["FraudResult"]==1).sum()))
print("Non-Fraudulent: {}".format((train["FraudResult"]==0).sum()))
#Visualising Fraudulent and non-fraudulent cases
plt.bar("Fraudulent", train["FraudResult"].value_counts()[1], color="red")
plt.bar("Non_Fraudulent", train["FraudResult"].value_counts()[0], width=0.5, color="green")
plt.ylabel("Count", fontsize=14)
plt.title("Fraudulent VS Non-Fraudulent")
#label encoding columns
columns = train.columns.tolist()[1:11]
test_columns = test.columns.tolist()[1:11]
le = LabelEncoder()
for each in columns:
train[each] = le.fit_transform(train[each])
for column in test_columns:
test[column] = le.fit_transform(test[column])
train.head()
len_train = len(train)
new_df = pd.concat([train, test], sort=False)
#getting categorical dummies
categorical_columns = ["ProviderId", "ProductCategory", "ProductId", "ChannelId"]
new_df = pd.get_dummies(new_df, columns=categorical_columns)
new_df.head()
```
## Feature engineering(1)
```
#new_df
train = new_df[:len_train]
test = new_df[len_train:].reset_index(drop=True)
test.drop("FraudResult", axis=1, inplace=True)
#Extracting time and day from the TransactionStartTime column to create new features
train['hour'] = pd.to_datetime(train.TransactionStartTime).dt.hour
train['minute'] = pd.to_datetime(train.TransactionStartTime).dt.minute
train['day'] = pd.to_datetime(train.TransactionStartTime).dt.dayofweek
test['hour'] = pd.to_datetime(test.TransactionStartTime).dt.hour
test['minute'] = pd.to_datetime(test.TransactionStartTime).dt.minute
test['day'] = pd.to_datetime(test.TransactionStartTime).dt.dayofweek
# dropping the transaction starttime column
train = train.drop(["TransactionStartTime"], axis=1)
test = test.drop(["TransactionStartTime"], axis=1)
correlations = train.corr()
fig = plt.figure(figsize = (9, 6))
sns.heatmap(correlations, vmax = .8, square = True)
plt.show()
# Visualizing correlations of the various features to fraud_result
(correlations
.FraudResult
.drop("FraudResult") # can't compare the variable under study to itself
.sort_values(ascending=False)
.plot
.barh(figsize=(9,7)))
plt.title("correlation bar_hist")
```
## Feature Engineering
```
train["period"] = np.nan
test["period"] = np.nan
train.loc[train.hour < 7, "period"] = 'em'
train.loc[(train.hour >= 7) & (train.hour < 11), "period"] = 'am'
train.loc[(train.hour >= 11) & (train.hour < 15), "period"] = 'mid'
train.loc[(train.hour >= 15) & (train.hour < 19), "period"] = 'eve'
train.loc[(train.hour >= 19) & (train.hour <= 24), "period"] = 'pm'
test.loc[test.hour < 7, "period"] = 'em'
test.loc[(test.hour >= 7) & (test.hour < 11), "period"] = 'am'
test.loc[(test.hour >= 11) & (test.hour < 15), "period"] = 'mid'
test.loc[(test.hour >= 15) & (test.hour < 19), "period"] = 'eve'
test.loc[(test.hour >= 19) & (test.hour <= 24), "period"] = 'pm'
# Encoding the period feature columns
le = LabelEncoder()
train["period"] = le.fit_transform(train["period"])
test["period"] = le.fit_transform(test["period"])
# Dropping columns with negative correlation to the fraudresult
columns_to_drop = ["SubscriptionId"]
train = train.drop(columns_to_drop, axis=1)
test = test.drop(columns_to_drop, axis=1)
new_correlations = train.corr()
# Visualizing correlations of new features to fraud_result
(new_correlations
.FraudResult
.drop("FraudResult") # can't compare the variable under study to itself
.sort_values(ascending=False)
.plot
.barh(figsize=(9,7)))
plt.title("correlation bar_hist")
# dropping non-predictor feature columns and the target(train-set only)
target = train["FraudResult"]
train = train.drop(["TransactionId", "FraudResult"], axis=1)
test_id = test["TransactionId"]
test = test.drop(["TransactionId"], axis=1)
#Normalizing Amount and value columns
train["Value"] = train["Value"].abs()
train["Amount"] = train["Amount"].abs()
test["Value"] = test["Value"].abs()
test["Amount"] = test["Amount"].abs()
columns = columns
X = train
y = target
X_train, X_val, y_train, y_val = train_test_split(X,y, test_size=0.25,
random_state=1)
#boxcox transform of value col.
X_train.loc[:,'Value'], maxlog, (min_ci, max_ci) = sp.stats.boxcox(X_train['Value'],
alpha=0.01)
MCC_scorer = make_scorer(matthews_corrcoef)
# RandomForest
pipeline_rf = Pipeline([
('model', RandomForestClassifier(max_depth=9, n_jobs=-1, random_state=1))
])
param_grid_rf = {'model__n_estimators': [50, 75, 100, 150, 200, 250, 300]
}
grid_rf = GridSearchCV(estimator=pipeline_rf, param_grid=param_grid_rf,
scoring=MCC_scorer, n_jobs=-1, pre_dispatch='2*n_jobs',
cv=8, verbose=1, return_train_score=False)
grid_rf.fit(X_train, y_train)
# Xgboost
model = xgb.XGBClassifier()
max_depth = range(1, 11, 2)
random_state=[1]
subsample = [0.9]
base_score = [0.5]
n_estimators = [50, 100, 150, 200, 250]
param_grid = dict(max_depth=max_depth, n_estimators=n_estimators,
random_state=random_state, subsample=subsample,
base_score=base_score)
kfold = StratifiedKFold(n_splits=8, shuffle=True, random_state=1)
grid_xgb = GridSearchCV(model, param_grid, scoring=MCC_scorer, n_jobs=-1,
pre_dispatch='2*n_jobs', cv=kfold, verbose=1)
grid_xgb.fit(X_train, y_train)
print("RandomForest: \n")
print(grid_rf.best_score_)
print(grid_rf.best_params_)
print("\n")
print("Xgboost: \n")
print(grid_xgb.best_score_)
print(grid_xgb.best_params_)
y_pred = grid_rf.predict(X_val)
print(f'f1_score: {f1_score(y_val, y_pred)}\n')
print(confusion_matrix(y_val, y_pred))
print("\n")
val_pred = grid_xgb.predict(X_val)
print(f'f1_score: {f1_score(y_val, val_pred)}\n')
confusion_matrix(y_val, val_pred)
print("Predicting...")
predictions = grid_xgb.predict(test)
submission = pd.DataFrame({'TransactionId': test_id, 'FraudResult': predictions})
(submission['FraudResult']==1).value_counts()
#making a submission file
submission.to_csv('sub.csv',index=False) #currently waiting to test this submission
print("done...")
#LBscore 0.76363 currently with xgboost. (only dropping "SubscriptionId"/no new features)
```
| github_jupyter |
## Imports
```
import pandas as pd
```
## Data
```
df_test = pd.read_pickle("../data/interim/Test_extern_git_feat.pickle")
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
modes = df_train.click_mode.unique()
modes.sort()
modes
df_train[df_train.click_mode == 0].sid[:10]
df_train.click_mode = df_train.click_mode -1
modes = df_train.click_mode.unique()
modes.sort()
modes
df_train.head()
df_train = df_train.drop(['click_time', 'req_time', 'plan_time', 'req_date', 'weather',
'Response', 'Response_2'], axis=1)
test_X = df_test.drop(['req_time', 'plan_time', 'req_date', 'weather'], axis=1)
df_train = df_train.drop(['click_time', 'req_time', 'plan_time', 'req_date', 'weather',
'Response', 'Response_2'], axis=1)
train_X = df_train.drop("click_mode", axis=1).values
train_y = df_train['click_mode'].values
test_X = df_test.drop(['req_time', 'plan_time', 'req_date', 'weather'], axis=1).values
print(train_X.shape)
print(train_y.shape)
print(test_X.shape)
```
## Functions
```
def f1_weighted(labels, preds):
from sklearn.metrics import f1_score
import numpy as np
print(preds.shape)
preds = np.argmax(preds.reshape(12, -1), axis=0)
score = f1_score(y_true=labels, y_pred=preds, average='weighted')
return 'f1_weighted', score, True
def test_train_split_df(df_r, test_percentage=0.25):
import numpy as np
df = df_r.copy()
msk = np.random.rand(len(df)) < (1-test_percentage)
train = df[msk]
val = df[~msk]
train_X = train.drop("click_mode", axis=1).values
train_y = train['click_mode'].values
val_X = val.drop("click_mode", axis=1).values
val_y = val['click_mode'].values
return train_X, train_y, val_X, val_y
def train_lgb(train_x, train_y, test_x):
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
from time import gmtime, strftime
import pandas as pd
print('Train LGB')
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2019)
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': 0.05,
'num_leaves': 60,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 12,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4
}
count = 1
scores = []
result_proba = []
for tr_idx, val_idx in kfold.split(train_x, train_y):
print('Split Nr: ', count)
# Take one split
tr_x, tr_y, val_x, val_y = train_x[tr_idx], train_y[tr_idx], train_x[val_idx], train_y[val_idx]
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f)
# Predict on best iteration of this split with validation set
val_pred = np.argmax(lgb_model.predict(
val_x, num_iteration=lgb_model.best_iteration), axis=1)
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
lgb_model.predict
return pred_test
def train_lgb_11(train_x, train_y, test_x):
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
from time import gmtime, strftime
import pandas as pd
print('Train LGB')
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2019)
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': 0.05,
'num_leaves': 60,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 11,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4
}
count = 1
scores = []
result_proba = []
for tr_idx, val_idx in kfold.split(train_x, train_y):
print('Split Nr: ', count)
# Take one split
tr_x, tr_y, val_x, val_y = train_x[tr_idx], train_y[tr_idx], train_x[val_idx], train_y[val_idx]
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f_11)
# Predict on best iteration of this split with validation set
val_pred = np.argmax(lgb_model.predict(
val_x, num_iteration=lgb_model.best_iteration), axis=1)
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
lgb_model.predict
return pred_test
def eval_f(y_pred, train_data):
y_true = train_data.label
y_pred = y_pred.reshape((12, -1)).T
y_pred = np.argmax(y_pred, axis=1)
score = f1_score(y_true, y_pred, average='weighted')
return 'weighted-f1-score', score, True
def eval_f_11(y_pred, train_data):
y_true = train_data.label
y_pred = y_pred.reshape((11, -1)).T
y_pred = np.argmax(y_pred, axis=1)
score = f1_score(y_true, y_pred, average='weighted')
return 'weighted-f1-score', score, True
def submit_result(submit, result, model_name):
now_time = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
submit['recommend_mode'] = result
submit.to_csv(
'./{}_result_{}.csv'.format(model_name, now_time), index=False)
def plot_count_mode(df, title):
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax = plt.subplots(figsize=(20,10))
x = df.click_mode.unique()
x.sort()
y = df.groupby('click_mode').count()['sid']
ax.bar(x, y);
ax.set_xticklabels(x, fontsize=16);
ax.set_xticks(x);
ax.set_yticklabels(range(0,90000,10000), fontsize=16);
rects = ax.patches
# Make some labels.
labels = y
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height + 5, '{:.2f}'.format(label),
ha='center', va='bottom', fontsize=16)
ax.set_title(title, fontsize=16);
return fig
df_preds = train_lgb(train_X, train_y, test_X)
df_preds
submit = pd.read_pickle("../data/interim/submit.pickle")
submit_result(submit, df_preds, "lgb")
!ls
```
## With downsampled 2
```
df_downsampled = pd.read_pickle('../data/interim/df_train_downsampled_2.pickle')
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
df_downsampled.head()
df_downsampled = df_downsampled.sid
df_train = df_train.loc[df_train['sid'].isin(df_merged.values)]
df_train = df_train.drop(['click_time', 'req_time', 'plan_time', 'req_date', 'weather',
'Response', 'Response_2'], axis=1)
train_X = df_train.drop("click_mode", axis=1).values
train_y = df_train['click_mode'].values
test_X = df_test.drop(['req_time', 'plan_time', 'req_date', 'weather'], axis=1).values
df_preds = train_lgb(train_X, train_y, test_X)
submit_result(submit, df_preds, 'lgb_downsampled')
modes = df_train.click_mode.unique()
modes.sort()
modes
df_preds = train_lgb(train_X, train_y, test_X)
```
## Train just on them which are in click_mode
```
df_orig = pd.read_csv('../data/raw/data_set_phase1/train_clicks.csv')
df_orig = pd.read_csv('../data/raw/data_set_phase1/train_clicks.csv')
modes = df_orig.click_mode.unique()
modes.sort()
modes
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
df_train = df_train[df_train.click_mode != 0]
df_train.shape
df_train.to_pickle("../data/interim/Train_extern_git_feat_without_0.pickle")
df_downsampled = pd.read_pickle('../data/interim/df_train_downsampled_2.pickle')
df_downsampled = df_downsampled.sid
df_train = df_train.loc[df_train['sid'].isin(df_merged.values)]
df_train.shape
df_train.click_mode = df_train.click_mode - 1
df_train.click_mode.unique()
df_train = df_train.drop(['click_time', 'req_time', 'plan_time', 'req_date', 'weather',
'Response', 'Response_2'], axis=1)
train_X = df_train.drop("click_mode", axis=1).values
train_y = df_train['click_mode'].values
test_X = df_test.drop(['req_time', 'plan_time', 'req_date', 'weather'], axis=1).values
df_preds = train_lgb_11(train_X, train_y, test_X)
```
## Train with df upsampled all
```
df_up_all = pd.read_pickle('../data/interim/df_upsampled_all.pickle')
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
df_up_all = df_up_all.sid
df_train = df_train.loc[df_train['sid'].isin(df_merged.values)]
```
## Downsample 2 again
```
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
df_downsampled = pd.read_pickle('../data/interim/df_train_downsampled_2.pickle')
df_downsampled.head()
plot_count_mode(df_downsampled)
df_train = df_train.loc[df_train['sid'].isin(df_downsampled.sid.values)]
df_train.shape
plot_count_mode(df_train)
df_train.click_mode = df_train.click_mode - 1
df_train.click_mode.unique()
features_to_exclude = [
'o1', 'o2', 'd1', 'd2', 'o_lat', 'o_long', 'd_lat', 'd_long',
'pid', 'Response', 'sid', 'click_time', 'req_time', 'weather', 'plan_time',
'req_date', 'Response_2'
]
df_train = df_train.drop(features_to_exclude, axis=1)
df_train = df_train.drop(df_train.loc[:,'p0':'p65'].head(0).columns, axis=1)
train_X = df_train.drop("click_mode", axis=1).values
train_y = df_train['click_mode'].values
test_X = df_test.drop(['req_time', 'plan_time', 'req_date', 'weather'], axis=1).values
df_train.shape
df_preds = train_lgb_11(train_X, train_y, test_X)
submit_result(submit, df_preds, 'lgb_down_0_trunc')
!ls
df_train.head()
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
df_train[df_train.click_mode != 2].count()['sid']
def down_half(df, mode):
quantity = df[df.click_mode == mode].count()['sid']
print('Quantity: ', quantity)
half = quantity//2
print('Half: ', half)
df_without_mode = df[df.click_mode != mode]
df_just_mode = df[df.click_mode == mode]
df_just_mode = df_just_mode.sample(half)
df_end = pd.concat([df_without_mode, df_just_mode])
return df_end
plot_count_mode(df_train)
df_end = down_half(df_train, 2)
plot_count_mode(df_end)
```
## Train Down test not
```
def train_lgb_downsampling(df_train_x, df_train_y, test_x):
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
from time import gmtime, strftime
import pandas as pd
print('Train LGB')
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2019)
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': 0.05,
'num_leaves': 60,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 12,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4
}
count = 1
scores = []
result_proba = []
for tr_idx, val_idx in kfold.split(df_train_x, df_train_y):
print('Split Nr: ', count)
# Take one split
tr_x, tr_y, val_x, val_y = df_train_x[tr_idx], fd_train_y[tr_idx], df_train_x[val_idx], df_train_y[val_idx]
# Downsample tr_x
df = pd.DataFrame([tr_x, tr_y])
df_end = down_half(df, 2)
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f_11)
# Predict on best iteration of this split with validation set
val_pred = np.argmax(lgb_model.predict(
val_x, num_iteration=lgb_model.best_iteration), axis=1)
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
lgb_model.predict
return pred_test
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
from time import gmtime, strftime
import pandas as pd
print('Train LGB')
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2019)
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': 0.05,
'num_leaves': 60,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 12,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4
}
count = 1
scores = []
result_proba = []
for tr_idx, val_idx in kfold.split(train_X, train_y):
print('Split Nr: ', count)
print(tr_idx)
df_end = down_half(df_train[tr_idx], 2)
print(tr_idx)
# Take one split
tr_x, tr_y, val_x, val_y = df_train_x[tr_idx], fd_train_y[tr_idx], df_train_x[val_idx], df_train_y[val_idx]
# Downsample tr_x
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f_11)
# Predict on best iteration of this split with validation set
val_pred = np.argmax(lgb_model.predict(
val_x, num_iteration=lgb_model.best_iteration), axis=1)
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
print('cv f1-score: ', np.mean(scores))
df_preds = train_lgb_downsampling(train_X, train_y, test_X)
features = [
'o1',
'o2',
'd1',
'd2',
'mode_feas_0',
'mode_feas_1',
'mode_feas_2',
'mode_feas_3',
'mode_feas_4',
'mode_feas_5',
'mode_feas_6',
'mode_feas_7',
'mode_feas_8',
'mode_feas_9',
'mode_feas_10',
'mode_feas_11',
'max_dist',
'min_dist',
'mean_dist',
'std_dist',
'max_price',
'min_price',
'mean_price',
'std_price',
'max_eta',
'min_eta',
'mean_eta',
'std_eta',
'max_dist_mode',
'min_dist_mode',
'max_price_mode',
'min_price_mode',
'max_eta_mode',
'min_eta_mode',
'first_mode',
'weekday',
'hour',
'distance_query',
'dist_nearest_sub',
'dyq',
'qdy',
'dy',
'q',
'xydy',
'xq',
'max_temp',
'min_temp',
'wind',
'req_weekend',
'is_holiday',
'p0',
'p1',
'p2',
'p3',
'p4',
'p5',
'p6',
'p7',
'p8',
'p9',
'p10',
'p11',
'p12',
'p13',
'p14',
'p15',
'p16',
'p17',
'p18',
'p19',
'p20',
'p21',
'p22',
'p23',
'p24',
'p25',
'p26',
'p27',
'p28',
'p29',
'p30',
'p31',
'p32',
'p33',
'p34',
'p35',
'p36',
'p37',
'p38',
'p39',
'p40',
'p41',
'p42',
'p43',
'p44',
'p45',
'p46',
'p47',
'p48',
'p49',
'p50',
'p51',
'p52',
'p53',
'p54',
'p55',
'p56',
'p57',
'p58',
'p59',
'p60',
'p61',
'p62',
'p63',
'p64',
'p65'
]
import pickle
with open('../data/interim/features_pid_all.pickle', 'wb') as fp:
pickle.dump(features, fp)
with open ('../data/interim/features_pid_all.pickle', 'rb') as fp:
itemlist = pickle.load(fp)
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
features_to_exclude = [
'o1', 'o2', 'd1', 'd2', 'o_lat', 'o_long', 'd_lat', 'd_long',
'pid', 'Response', 'sid', 'click_time', 'req_time', 'weather', 'plan_time',
'req_date', 'Response_2'
]
# df_train = df_train.drop(features_to_exclude, axis=1)
# df_train = df_train.drop(df_train.loc[:,'p0':'p65'].head(0).columns, axis=1)
df_train = df_train[features + ['click_mode']]
df_test = pd.read_pickle("../data/interim/Test_extern_git_feat.pickle")
df_test = df_test[features]
train_X = df_train.drop("click_mode", axis=1)
train_y = df_train['click_mode']
test_x = df_test[features].values
test_X.shape
len(features)
train_X.shape
train_y.shape
test_X.shape
split_1 = pd.read_pickle('../data/interim/splits/SIDs_1.txt')
split_2 = pd.read_pickle('../data/interim/splits/SIDs_2.txt')
split_3 = pd.read_pickle('../data/interim/splits/SIDs_3.txt')
split_4 = pd.read_pickle('../data/interim/splits/SIDs_4.txt')
split_5 = pd.read_pickle('../data/interim/splits/SIDs_5.txt')
SIDs = [split_1, split_2, split_3, split_4, split_5]
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
modes = df_train.click_mode.unique()
modes.sort()
modes
import lightgbm as lgb
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
print("df train shape", df_train.shape)
data = df_train.copy()
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': 0.05,
'num_leaves': 60,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 12,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4
}
count = 1
scores = []
result_proba = []
# Loop over the different Test/Train Splits
for i in range(len(SIDs)):
# Print process
print(str(i) + " / " + str(len(SIDs) - 1))
# Extract the Test_Set based on the current SID:
val_x = data.loc[data["sid"].isin(SIDs[i]), features].values
val_y = data.loc[data["sid"].isin(SIDs[i]), "click_mode"].values
# Extract the SIDs we use for training, and select correponding train points!
train_sids = []
for j in range(len(SIDs)):
if j != i:
train_sids = train_sids + SIDs[j]
df_train_split = data.loc[data["sid"].isin(train_sids), :]
df_downsampled = down_half(df_train_split, 2)
tr_x = df_downsampled[features].values
tr_y = df_downsampled['click_mode'].values
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f)
# Predict on best iteration of this split with validation set
val_pred = np.argmax(lgb_model.predict(
val_x, num_iteration=lgb_model.best_iteration), axis=1)
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
submit_result(submit, pred_test, 'lgb_down')
lgb = pd.read_csv("lgb_down_result_2019-05-28-09-03-45.csv")
lgb.head()
rf = pd.read_csv("../data/interim/RF_100_all_features.csv", header=None)
rf.columns=["sid", "recommend_mode"]
rf.head()
rf[rf.recommend_mode == 0].count()
sid_zero = rf.loc[rf.recommend_mode == 0, 'sid']
rf.loc[rf.recommend_mode == 0, 'recommend_mode'] = lgb.loc[lgb.sid.isin(sid_zero), 'recommend_mode']
rf.groupby("recommend_mode").count()
# With adjusted hyperparameter
import lightgbm as lgb
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
print("df train shape", df_train.shape)
data = df_train.copy()
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': 0.05,
'num_leaves': 25,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 12,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4,
'colsample_bytree': 0.9492410177317588,
'min_child_samples': 243,
'min_child_weight': 1,
'reg_alpha': 10,
'reg_lambda': 0.1,
'subsample': 0.4592512443442188
}
count = 1
scores = []
result_proba = []
# Loop over the different Test/Train Splits
for i in range(len(SIDs)):
# Print process
print(str(i) + " / " + str(len(SIDs) - 1))
# Extract the Test_Set based on the current SID:
val_x = data.loc[data["sid"].isin(SIDs[i]), features].values
val_y = data.loc[data["sid"].isin(SIDs[i]), "click_mode"].values
# Extract the SIDs we use for training, and select correponding train points!
train_sids = []
for j in range(len(SIDs)):
if j != i:
train_sids = train_sids + SIDs[j]
df_train_split = data.loc[data["sid"].isin(train_sids), :]
df_downsampled = down_half(df_train_split, 2)
tr_x = df_downsampled[features].values
tr_y = df_downsampled['click_mode'].values
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f)
# Predict on best iteration of this split with validation set
val_pred = np.argmax(lgb_model.predict(
val_x, num_iteration=lgb_model.best_iteration), axis=1)
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
```
## With truncated pids
```
features = [
'o1',
'o2',
'd1',
'd2',
'mode_feas_0',
'mode_feas_1',
'mode_feas_2',
'mode_feas_3',
'mode_feas_4',
'mode_feas_5',
'mode_feas_6',
'mode_feas_7',
'mode_feas_8',
'mode_feas_9',
'mode_feas_10',
'mode_feas_11',
'max_dist',
'min_dist',
'mean_dist',
'std_dist',
'max_price',
'min_price',
'mean_price',
'std_price',
'max_eta',
'min_eta',
'mean_eta',
'std_eta',
'max_dist_mode',
'min_dist_mode',
'max_price_mode',
'min_price_mode',
'max_eta_mode',
'min_eta_mode',
'first_mode',
'weekday',
'hour',
'distance_query',
'dist_nearest_sub',
'dyq',
'qdy',
'dy',
'q',
'xydy',
'xq',
'max_temp',
'min_temp',
'wind',
'req_weekend',
'is_holiday',
'svd_fea_0',
'svd_fea_1',
'svd_fea_2',
'svd_fea_3',
'svd_fea_4',
'svd_fea_5',
'svd_fea_6',
'svd_fea_7',
'svd_fea_8',
'svd_fea_9',
'svd_fea_10',
'svd_fea_11',
'svd_fea_12',
'svd_fea_13',
'svd_fea_14',
'svd_fea_15',
'svd_fea_16',
'svd_fea_17',
'svd_fea_18',
'svd_fea_19'
]
# With adjusted hyperparameter
import lightgbm as lgb
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
print("df train shape", df_train.shape)
data = df_train.copy()
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': 0.05,
'num_leaves': 60,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 12,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4
}
count = 1
scores = []
result_proba = []
# Loop over the different Test/Train Splits
for i in range(len(SIDs)):
# Print process
print(str(i) + " / " + str(len(SIDs) - 1))
# Extract the Test_Set based on the current SID:
val_x = data.loc[data["sid"].isin(SIDs[i]), features].values
val_y = data.loc[data["sid"].isin(SIDs[i]), "click_mode"].values
# Extract the SIDs we use for training, and select correponding train points!
train_sids = []
for j in range(len(SIDs)):
if j != i:
train_sids = train_sids + SIDs[j]
df_train_split = data.loc[data["sid"].isin(train_sids), :]
df_downsampled = down_half(df_train_split, 2)
tr_x = df_downsampled[features].values
tr_y = df_downsampled['click_mode'].values
print("Train shape:", tr_x.shape)
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f)
# Predict on best iteration of this split with validation set
val_pred = np.argmax(lgb_model.predict(
val_x, num_iteration=lgb_model.best_iteration), axis=1)
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
```
## Training with pid=-1 / with 0 / without padding
```
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat_pid_-1.pickle")
df_train.shape
df_train.head()
df_train[df_train.pid == 196549.0]
import src.models.lgbm_multiclass_baseline.lgbm_mc_bl
df_test = pd.read_pickle("../data/interim/Test_extern_git_feat.pickle")
df_test.head()
df_test.to_pickle("../data/interim/Test_extern_git_feat_pid_-1.pickle")
profiles = pd.read_csv("../data/raw/data_set_phase1/profiles.csv")
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_na.columns = profiles.columns
profile_na.iloc[0] = -1
profile_na
profile_data = pd.read_csv('../data/profiles.csv')
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_na.columns = profile_data.columns
profile_data = profile_data.append(profile_na)
```
## Sample 2 down and 8 on 3 and 11 on 3
```
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat_pid_-1.pickle")
fig = plot_count_mode(df_train, 'Original distribution of transport modes')
fig.savefig("../data/interim/orig_data_sampling.png", bbox_inches='tight')
df_sampled = down_half(df_train, 2)
plot_count_mode(df_sampled)
df_train.groupby('click_mode').count().loc[3, 'sid']
print(df_train.groupby('click_mode').count()['sid'])
def up_othermode(df, mode, target_mode):
new_quantity = df.groupby('click_mode').count().loc[target_mode, 'sid']
df_just_mode =df.loc[df.click_mode == mode]
df_mode_target = df_just_mode.sample(new_quantity, replace=True)
df = df.loc[df.click_mode != mode]
df = pd.concat([df_mode_target, df], axis=0)
return df
df_sampled = up_othermode(df_sampled, 8, 3)
plot_count_mode(df_sampled)
def downsample(df, mode, amount):
df_just_mode =df.loc[df.click_mode == mode]
df_mode_target = df_just_mode.sample(amount, replace=True)
df = df.loc[df.click_mode != mode]
df = pd.concat([df_mode_target, df], axis=0)
return df
def upsample(df, mode, amount):
df_just_mode =df.loc[df.click_mode == mode]
df_mode_target = df_just_mode.sample(amount, replace=True)
df = df.loc[df.click_mode != mode]
df = pd.concat([df_mode_target, df], axis=0)
return df
df_sampled = df_train.copy()
df_sampled = downsample(df_sampled, 2, 70000)
df_sampled = upsample(df_sampled, 4, 20000)
df_sampled = upsample(df_sampled, 5, 20000)
df_sampled = upsample(df_sampled, 8, 3000)
df_sampled = upsample(df_sampled, 10, 20000)
fig = plot_count_mode(df_sampled, 'Modified down- / upsample')
fig.savefig("../data/interim/sampled.png", bbox_inches='tight')
df_train_old = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
list(df_train_old.columns)
features
import sys
sys.path.append("../src/")
from models.utils import load_model
model = load_model('../models/split_2_27')
model.predict(test_X.values)
```
## With new dataset from ANE
```
df_train_all_first = pd.read_pickle("../data/interim/processed_all/train_all_first.pickle")
df_test_all_first = pd.read_pickle("../data/interim/processed_all/test_all_first.pickle")
df_train_all_first.shape
df_train_all_first.head()
list(df_train_all_first.columns)
def one_hot_encode(df_all, column_to_encode, target_column="click_mode"):
# One hot encode month
one_hot = pd.get_dummies(df_all[column_to_encode])
one_hot.columns = [column_to_encode + "," + str(col) for col in one_hot.columns]
df_all = df_all.drop(columns=column_to_encode, axis=0)
df_all = df_all.join(one_hot)
if target_column is None:
return df_all
y = df_all[target_column]
df_all = df_all.drop(columns=target_column, axis=0)
df_all = df_all.join(y)
return df_all
df_test_all_first = one_hot_encode(df_test_all_first, 'weather', None)
df_train_all_first.head()
list(df_train_all_first.columns)
features = [
'distance_query',
'p0',
'p1',
'p2',
'p3',
'p4',
'p5',
'p6',
'p7',
'p8',
'p9',
'p10',
'p11',
'p12',
'p13',
'p14',
'p15',
'p16',
'p17',
'p18',
'p19',
'p20',
'p21',
'p22',
'p23',
'p24',
'p25',
'p26',
'p27',
'p28',
'p29',
'p30',
'p31',
'p32',
'p33',
'p34',
'p35',
'p36',
'p37',
'p38',
'p39',
'p40',
'p41',
'p42',
'p43',
'p44',
'p45',
'p46',
'p47',
'p48',
'p49',
'p50',
'p51',
'p52',
'p53',
'p54',
'p55',
'p56',
'p57',
'p58',
'p59',
'p60',
'p61',
'p62',
'p63',
'p64',
'p65',
'dist_0',
'price_0',
'eta_0',
'dist_1',
'price_1',
'eta_1',
'dist_2',
'price_2',
'eta_2',
'dist_3',
'price_3',
'eta_3',
'dist_4',
'price_4',
'eta_4',
'dist_5',
'price_5',
'eta_5',
'dist_6',
'price_6',
'eta_6',
'dist_7',
'price_7',
'eta_7',
'dist_8',
'price_8',
'eta_8',
'dist_9',
'price_9',
'eta_9',
'dist_10',
'price_10',
'eta_10',
'dist_11',
'price_11',
'eta_11',
'dist_nearest_sub',
'req_hour',
'req_weekend',
'req_night',
'req_day',
'req_evening',
'is_holiday',
'max_temp',
'min_temp',
'wind',
'weather,dy',
'weather,dyq',
'weather,q',
'weather,qdy'
]
#df_test_all_first.to_pickle('../data/interim/processed_all/test_all_first_ohe.pickle')
df_train_all_first.to_pickle('../data/interim/processed_all/train_all_first_ohe.pickle')
import pickle
with open('../data/interim/features_processed_all.pickle', 'wb') as fp:
pickle.dump(features, fp)
list(df_test_all_first.head(9)[features].columns)
df_train_all_first.loc[df_train_all_first.click_mode.isna(), 'click_mode'] = 0
```
## Save predictions with probabilities
```
SIDs[0]
# With adjusted hyperparameter
import lightgbm as lgb
df_train = pd.read_pickle("../data/interim/Train_extern_git_feat.pickle")
print("df train shape", df_train.shape)
data = df_train.copy()
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': 0.05,
'num_leaves': 60,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 12,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4
}
count = 1
scores = []
result_proba = []
# Loop over the different Test/Train Splits
for i in range(len(SIDs[0])):
# Print process
print(str(i) + " / " + str(len(SIDs) - 1))
# Extract the Test_Set based on the current SID:
val_x = data.loc[data["sid"].isin(SIDs[i]), features].values
val_y = data.loc[data["sid"].isin(SIDs[i]), "click_mode"].values
# Extract the SIDs we use for training, and select correponding train points!
train_sids = []
for j in range(len(SIDs)):
if j != i:
train_sids = train_sids + SIDs[j]
df_train_split = data.loc[data["sid"].isin(train_sids), :]
df_downsampled = down_half(df_train_split, 2)
tr_x = df_downsampled[features].values
tr_y = df_downsampled['click_mode'].values
print("Train shape:", tr_x.shape)
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f)
# Predict on best iteration of this split with validation set
val_pred = np.argmax(lgb_model.predict(
val_x, num_iteration=lgb_model.best_iteration), axis=1)
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
save_preds(None, val_pred, '../models/test')
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
df_train = pd.read_pickle("~/repo/kdd-cup-2019/data/interim/Train_extern_git_feat_pid_-1.pickle")
df_test = pd.read_pickle("~/repo/kdd-cup-2019/data/interim/Train_extern_git_feat_pid_-1.pickle")
import pickle
with open ("../data/interim/features_pid_all.pickle", 'rb') as fp:
features = pickle.load(fp)
test_x = df_test[features].values
sid, val_pred = lgbm_train(df_train, test_x, 'test_save_preds_', features, 0.05, 60)
save_preds(sid, val_pred, 'test.csv')
df = pd.DataFrame(val_pred, index=sid, columns=['p0','p1','p2','p3','p4','p5','p6','p7','p8','p9','p10','p11'])
df.to_csv('test.csv')
pd.read_csv("test.csv").head()
val_pred.shape
len(sid)
def save_preds(sids, preds, path):
df = pd.DataFrame(preds, index=sids, columns=['p0','p1','p2','p3','p4','p5','p6','p7','p8','p9','p10','p11'])
df.to_csv(path)
def lgbm_train(df_train, test_x, model_name, features, lr, num_leaves, downsample_mode=99, downsample_amount=99,
upsample_mode_1=99, upsample_1_amount=99, upsample_mode_2=99, upsample_2_amount=99, upsample_mode_3=99, upsample_3_amount=99,
upsample_mode_4=99, upsample_4_amount=99, upsample_mode_5=99, upsample_5_amount=99):
print("Start to train light gbm model")
print("df train shape", df_train.shape)
data = df_train.copy()
lgb_paras = {
'objective': 'multiclass',
'metrics': 'multiclass',
'learning_rate': lr,
'num_leaves': num_leaves,
'lambda_l1': 0.01,
'lambda_l2': 10,
'num_class': 12,
'seed': 2019,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4
}
count = 1
scores = []
result_proba = []
# Loop over the different Test/Train Splits
for i in range(len(SIDs)):
# Print process
print(str(i) + " / " + str(len(SIDs) - 1))
# Extract the Test_Set based on the current SID:
val_x = data.loc[data["sid"].isin(SIDs[i]), features].values
val_y = data.loc[data["sid"].isin(SIDs[i]), "click_mode"].values
# Extract the SIDs we use for training, and select correponding train points!
train_sids = []
for j in range(len(SIDs)):
if j != i:
train_sids = train_sids + SIDs[j]
df_train_split = data.loc[data["sid"].isin(train_sids), :]
if downsample_mode != 99:
print("Downsample mode {} on {}".format(downsample_mode, downsample_amount))
df_train_split = downsample(df_train_split, downsample_mode, downsample_amount)
else:
print("Downsample mode is 99")
if upsample_mode_1 != 99:
print("Upsample mode 1 is {} on {}".format(upsample_mode_1, upsample_1_amount))
df_train_split = upsample(df_train_split, upsample_mode_1, upsample_1_amount)
else:
print("Upsample mode 1 is 99")
if upsample_mode_2 != 99:
print("upsample_mode_2 is {} on {}".format(upsample_mode_2, upsample_2_amount))
df_train_split = upsample(df_train_split, upsample_mode_2, upsample_2_amount)
else:
print("Upsample mode 2 is 99")
if upsample_mode_3 != 99:
print("upsample_mode_3 is {} on {}".format(upsample_mode_3, upsample_3_amount))
df_train_split = upsample(df_train_split, upsample_mode_3, upsample_3_amount)
else:
print("Upsample mode 3 is 99")
if upsample_mode_4 != 99:
print("upsample_mode_4 is {} on {}".format(upsample_mode_4, upsample_4_amount))
df_train_split = upsample(df_train_split, upsample_mode_4, upsample_4_amount)
else:
print("Upsample mode 4 is 99")
if upsample_mode_5 != 99:
print("upsample_mode_5 is {} on {}".format(upsample_mode_5, upsample_5_amount))
df_train_split = upsample(df_train_split, upsample_mode_5, upsample_5_amount)
else:
print("Upsample mode is 99")
print('Current Splits')
print(df_train_split.groupby('click_mode').count()['sid'])
tr_x = df_train_split[features].values
tr_y = df_train_split['click_mode'].values
train_set = lgb.Dataset(tr_x, tr_y)
val_set = lgb.Dataset(val_x, val_y)
# Train on this split
lgb_model = lgb.train(lgb_paras, train_set,
valid_sets=[val_set], early_stopping_rounds=50,
num_boost_round=40000, verbose_eval=50, feval=eval_f)
# Predict on best iteration of this split with validation set
# Predict on best iteration of this split with validation set
val_probs = lgb_model.predict(val_x, num_iteration=lgb_model.best_iteration)
val_pred = np.argmax(val_probs, axis=1)
return SIDs[i], val_probs
save_preds(SIDs[i], val_probs, '/home/sandro/repo/kdd-cup-2019/models/test/split_' + str(count))
#save_preds(None, val_pred, '/home/sandro/repo/kdd-cup-2019/models/test/split_' + str(count))
count += 1
# F1 val score
val_score = f1_score(val_y, val_pred, average='weighted')
count += 1
# Predict with test set on best iteration of this split
result_proba.append(lgb_model.predict(
test_x, num_iteration=lgb_model.best_iteration))
scores.append(val_score)
save_model(lgb_model, os.path.join(os.getcwd(), 'models', 'split_' + str(count) + '_' + model_name))
# Train on first split again
# save_preds(SIDs[i], val_pred, '/home/sandro/repo/kdd-cup-2019/models/test/split_1_final')
print('cv f1-score: ', np.mean(scores))
pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)
save_model(lgb_model, os.path.join(os.getcwd(), 'models', 'final_' + model_name))
return pred_test
```
| github_jupyter |
# San Diego Burrito Analytics
Scott Cole
23 April 2016
This notebook contains analyses on the burrito ratings in San Diego, including:
* How each metric correlates with one another.
* Linear model of how each dimension contributes to the overall rating
# Default imports
```
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("white")
```
# Load data
```
filename="burrito_current.csv"
df = pd.read_csv(filename)
N = df.shape[0]
```
# Cali burritos vs. other burritos
```
# Identify california burritos
def caliburritoidx(x):
import re
idx = []
for b in range(len(x)):
re4str = re.compile('.*cali.*', re.IGNORECASE)
if re4str.match(x[b]) is not None:
idx.append(b)
return idx
caliidx = caliburritoidx(df.Burrito)
Ncaliidx = np.arange(len(df))
Ncaliidx = np.delete(Ncaliidx,caliidx)
met_Cali = ['Hunger','Volume','Cost','Tortilla','Temp','Meat','Fillings','Meat:filling',
'Uniformity','Salsa','Synergy','Wrap','overall']
for k in met_Cali:
Mcali = df[k][caliidx].dropna()
MNcali = df[k][Ncaliidx].dropna()
print k
print sp.stats.ttest_ind(Mcali,MNcali)
```
# Independence of each dimension
```
df_Scott = df[df.Reviewer=='Scott']
idx_Scott = df2.index.values
idx_NScott = np.arange(len(df))
idx_NScott = np.delete(idx_NScott,idx_Scott)
burritos_Scott = df.loc[df2.index.values]['Burrito']
dfScorr = df_Scott.corr()
metricscorr = ['Yelp','Google','Hunger','Cost','Volume','Tortilla','Temp','Meat','Fillings','Meat:filling',
'Uniformity','Salsa','Synergy','Wrap','overall']
M = len(metricscorr)
Mcorrmat = np.zeros((M,M))
Mpmat = np.zeros((M,M))
for m1 in range(M):
for m2 in range(M):
if m1 != m2:
Mcorrmat[m1,m2] = dfcorr[metricscorr[m1]][metricscorr[m2]]
Mpmat[m1,m2] = pearsonp(Mcorrmat[m1,m2],N)
clim1 = (-1,1)
plt.figure(figsize=(10,10))
cax = plt.pcolor(range(M+1), range(M+1), Mcorrmat, cmap=cm.bwr)
cbar = plt.colorbar(cax, ticks=(-1,-.5,0,.5,1))
cbar.ax.set_ylabel('Pearson correlation (r)', size=30)
plt.clim(clim1)
cbar.ax.set_yticklabels((-1,-.5,0,.5,1),size=20)
#plt.axis([2, M+1, floall[0],floall[-1]+10])
ax = plt.gca()
ax.set_yticks(np.arange(M)+.5)
ax.set_yticklabels(metricscorr,size=25)
ax.set_xticks(np.arange(M)+.5)
ax.set_xticklabels(metricscorr,size=9)
plt.tight_layout()
# Try to argue that me sampling a bunch of burritos is equivalent to a bunch of people sampling burritos
# you would not be able to tell if a rated burrito was by me or someone else.
# Tests:
# 1. Means of each metric are the same
# 2. Metric correlations are the same (between each quality and overall)
# 3. Do I like Cali burritos more than other people?
# 1. Metric means are the same: I give my meat and meat:filling lower ratings
met_Scott = ['Hunger','Volume','Cost','Tortilla','Temp','Meat','Fillings','Meat:filling',
'Uniformity','Salsa','Synergy','Wrap','overall']
for k in met_Scott:
Msc = df[k][idx_Scott].dropna()
MNsc = df[k][idx_NScott].dropna()
print k
print sp.stats.ttest_ind(Msc,MNsc)
```
| github_jupyter |
```
import pandas as pd
#先引入后面可能用到的包(package)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
#正常显示画图时出现的中文和负号
from pylab import mpl
mpl.rcParams['font.sans-serif']=['SimHei']
mpl.rcParams['axes.unicode_minus']=False
df = pd.read_excel('PPT图表更新-20200703(1).xlsx',sheet_name = '产品净值')
df
tmp_df = df.iloc[0:71,0:3]
tmp_df
names=tmp_df.columns
names
#计算净值
tmp_df[names[1]+'净值']=tmp_df[names[1]]/tmp_df[names[1]][0]
tmp_df[names[2]+'净值']=tmp_df[names[2]]/tmp_df[names[2]][0]
rate = tmp_df[names[1]]/tmp_df[names[1]].shift(1)-1
rate[np.isnan(rate)]=0.0
tmp_df[names[1]+'涨跌幅']=rate
rate = tmp_df[names[2]]/tmp_df[names[2]].shift(1)-1
rate[np.isnan(rate)]=0.0
tmp_df[names[2]+'涨跌幅']=rate
#将上述股票在回测期间内的净值可视化
tmp_df.index = tmp_df['日期']
tmp_df[list(names[1:3])].plot(figsize=(16,7))
#图标题
plt.title('股价净值走势',fontsize=15)
data_dict= {}
data_dict['产品名']=names[1]
data_dict['参考指数']=names[2]
data_dict['累计收益']=tmp_df[names[1]+'净值'][-1]-1
data_dict['年化收益']=(data_dict['累计收益']+1)**(52/len(tmp_df))-1
data_dict['标准差']=(tmp_df[names[1]+'涨跌幅']).std()*np.sqrt(52)
data_dict['夏普比率']=data_dict['年化收益']/data_dict['标准差']
data_dict['下侧波动率']=(tmp_df[tmp_df[names[1]+'涨跌幅']<0][names[1]+'涨跌幅']).std()
data_dict['索提诺比率']=data_dict['年化收益']/data_dict['下侧波动率']
data_dict['正收益周'] = len(tmp_df[tmp_df[names[1]+'涨跌幅']>0])
data_dict['负收益周'] = len(tmp_df[tmp_df[names[1]+'涨跌幅']<0])
data_dict['零收益周'] = len(tmp_df[tmp_df[names[1]+'涨跌幅']==0.0])
data_dict['胜率']=data_dict['正收益周']/data_dict['负收益周']
data_dict['盈亏比']=-tmp_df[tmp_df[names[1]+'涨跌幅']>0][names[1]+'涨跌幅'].sum()/data_dict['正收益周']*data_dict['负收益周']/tmp_df[tmp_df[names[1]+'涨跌幅']<0][names[1]+'涨跌幅'].sum()
data_dict['最大回撤']=-((df[names[1]].cummax()-df[names[1]])/df[names[1]].cummax()).max()
data_dict['收益回撤比']=-data_dict['年化收益']/data_dict['最大回撤']
data_dict['最大上涨']=((df[names[1]]-df[names[1]].cummin())/df[names[1]].cummin()).max()
data_dict['Calmar']=data_dict['年化收益']/data_dict['最大回撤']
data_dict['交易周']=len(tmp_df)
data_dict['周平均收益']=data_dict['累计收益']/data_dict['交易周']
data_dict
def plot_and_get_dict(tmp_df):
names=tmp_df.columns
#计算净值
tmp_df[names[1]+'净值']=tmp_df[names[1]]/tmp_df[names[1]][0]
tmp_df[names[2]+'净值']=tmp_df[names[2]]/tmp_df[names[2]][0]
rate = tmp_df[names[1]]/tmp_df[names[1]].shift(1)-1
rate[np.isnan(rate)]=0.0
tmp_df[names[1]+'涨跌幅']=rate
rate = tmp_df[names[2]]/tmp_df[names[2]].shift(1)-1
rate[np.isnan(rate)]=0.0
tmp_df[names[2]+'涨跌幅']=rate
#将上述股票在回测期间内的净值可视化
tmp_df.index = tmp_df.iloc[:,0]
tmp_df[list(names[1:3])].plot(figsize=(16,7))
#图标题
plt.title(names[1],fontsize=15)
data_dict= {}
data_dict['产品名']=names[1]
data_dict['参考指数']=names[2]
data_dict['累计收益']=tmp_df[names[1]+'净值'][-1]-1
data_dict['年化收益']=(data_dict['累计收益']+1)**(52/len(tmp_df))-1
data_dict['标准差']=(tmp_df[names[1]+'涨跌幅']).std()*np.sqrt(52)
data_dict['夏普比率']=data_dict['年化收益']/data_dict['标准差']
data_dict['下侧波动率']=(tmp_df[tmp_df[names[1]+'涨跌幅']<0][names[1]+'涨跌幅']).std()
data_dict['索提诺比率']=data_dict['年化收益']/data_dict['下侧波动率']
data_dict['正收益周'] = len(tmp_df[tmp_df[names[1]+'涨跌幅']>0])
data_dict['负收益周'] = len(tmp_df[tmp_df[names[1]+'涨跌幅']<0])
data_dict['零收益周'] = len(tmp_df[tmp_df[names[1]+'涨跌幅']==0.0])
data_dict['胜率']=data_dict['正收益周']/data_dict['负收益周']
data_dict['盈亏比']=-tmp_df[tmp_df[names[1]+'涨跌幅']>0][names[1]+'涨跌幅'].sum()/data_dict['正收益周']*data_dict['负收益周']/tmp_df[tmp_df[names[1]+'涨跌幅']<0][names[1]+'涨跌幅'].sum()
data_dict['最大回撤']=-((df[names[1]].cummax()-df[names[1]])/df[names[1]].cummax()).max()
data_dict['收益回撤比']=-data_dict['年化收益']/data_dict['最大回撤']
data_dict['最大上涨']=((df[names[1]]-df[names[1]].cummin())/df[names[1]].cummin()).max()
data_dict['Calmar']=data_dict['年化收益']/data_dict['最大回撤']
data_dict['交易周']=len(tmp_df)
data_dict['周平均收益']=data_dict['累计收益']/data_dict['交易周']
return data_dict
plot_and_get_dict(tmp_df)
data_for_df=[]
for tmp_df in [df.iloc[0:71,0:3],df.iloc[0:157,3:6],df.iloc[0:160,6:9],df.iloc[0:164,9:12],df.iloc[0:72,12:15],df.iloc[0:31,15:18]]:
data_for_df.append(plot_and_get_dict(tmp_df))
pd.DataFrame(data_for_df)
```
| github_jupyter |
# Accessing BLS API
**Part 3**
In this notebook we will see how to access the BLS API to retreive multiple series.
## step 1 - load packages and keys
```
import requests
import json
%run APIkeys.py
```
## step 2 - setting up
The communication with the API to download multiple series is done through a _POST_ request. This is how the BLS sets its API.
We need to define two dictionaries as follows.
```
base_url = 'https://api.bls.gov/publicAPI/v2/timeseries/data/' #this will not change
headers = {'Content-type': 'application/json'} #This will not changed !
# For the key seriesid enter a list of series names you wish to download
# For the key startyear enter the start year inside ""
# For the key endyear enter the end year inside ""
parameters = {
"seriesid":["CUUR0000SA0","CUUR0000SA0E"],
"startyear":"2011",
"endyear":"2021",
"catalog":True,
"calculations":False,
"annualaverage":False,
"aspects":False,
"registrationkey":os.environ['BLS_API_key']
}
data = json.dumps(parameters) #this converts the Python dictionary into a JSON format
# Note: we don't need to do json.dumps for the variable headers because this dictionary
# is simple and already satisfies the JSON format
```
# step 3 - POST request
```
p = requests.post(base_url, data=data, headers=headers)
json_data = json.loads(p.text)
json_data
```
## Step 4 - exploring the data
We need to dig in and find the numbers we want.
```
json_data['Results']
type(json_data['Results'])
json_data['Results'].keys()
json_data['Results']['series']
type(json_data['Results']['series'])
len(json_data['Results']['series'])
json_data['Results']['series'][0]
type(json_data['Results']['series'][0])
json_data['Results']['series'][0].keys()
json_data['Results']['series'][0]['catalog']
json_data['Results']['series'][0]['data']
type(json_data['Results']['series'][0]['data'])
len(json_data['Results']['series'][0]['data'])
json_data['Results']['series'][0]['data'][131]
float(json_data['Results']['series'][0]['data'][131]['value'])
```
How about the other series? How can we access it?
```
float(json_data['Results']['series'][1]['data'][131]['value'])
```
## Step 5 - creating a function
We want to make a function that will accept a list of variables (i.e. series names) and will return the dictionary output.
```
def multiSeries(varList,myKey):
base_url = 'https://api.bls.gov/publicAPI/v2/timeseries/data/' #this will not change
headers = {'Content-type': 'application/json'} #This will not changed !
parameters = {
"seriesid":varList,
"startyear":"2011",
"endyear":"2021",
"catalog":True,
"calculations":False,
"annualaverage":False,
"aspects":False,
"registrationkey": myKey
}
data = json.dumps(parameters) #this converts the Python dictionary into a JSON format
p = requests.post(base_url, data=data, headers=headers)
json_data = json.loads(p.text)
return json_data
```
Let's test it:
```
res = multiSeries(["CUUR0000SAM1","CUUR0400SEMC"],os.environ["BLS_API_key"])
res['Results']['series']
len(res['Results']['series'])
```
We still need to write a function that will parse the data and tease out the values.
But first, which variables exist?
<p style="font-size:24px;color:red;">
Not all combinations of area and item code exist.
</p>
> Price indexes are available for the U.S., the four Census regions, nine Census divisions, two size of city classes, eight cross-classifications of regions and size-classes, and for 23 local areas. Indexes are available for major groups of consumer expenditures (food and beverages, housing, apparel, transportation, medical care, recreation, education and communications, and other goods and services), for items within each group, and for special categories, such as services.
[Census divisions and regions](https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf) and [FIPS codes explanation](https://www.census.gov/library/reference/code-lists/ansi.html)
> Monthly indexes are available for the U.S., the four Census regions, and some local areas. More detailed item indexes are available for the U.S. than for regions and local areas.
What if we ask for a variable that doesn't exist?
```
# the first variable in the list doesn't exist
res = multiSeries(["CUUR0000S","CUUR0400SEMC"],os.environ["BLS_API_key"])
res
len(res['Results']['series'])
```
still length 2... but what is in the first position?
```
res['Results']['series'][0]
```
If a variable name doesn't exist, the 'data' key has a value equal to an empty list.
To be continued...
| github_jupyter |
```
!curl https://norvig.com/ngrams/count_1w.txt -o count_1w.txt
!curl https://raw.githubusercontent.com/first20hours/google-10000-english/master/google-10000-english-no-swears.txt -o top-10000-no-swears.txt
with open('top-10000-no-swears.txt', 'r') as f:
safe_words = set(list(f.read().splitlines()))
len(safe_words)
word_to_count = {}
with open('count_1w.txt', 'r') as f:
for x in f.readlines():
word, count = x.split()
if word not in safe_words:
continue
if len(word) < 3 or len(word) > 6:
continue
invalid = False
for i in range(len(word)-1):
if word[i] == word[i+1]:
invalid = True
break
if invalid:
continue
word_to_count[word] = int(count)
LINE_SIZE = 7
DICTIONARY_SIZE = 720
WORD_SIZE = 6
FIG_SIZE = 4
SIDE_SIZE = 3
!curl https://raw.githubusercontent.com/dwyl/english-words/master/words_dictionary.json -o full_dictionary.json
!curl https://raw.githubusercontent.com/fogleman/TWL06/master/twl.py -o twl.py
import json
import twl
# with open('full_dictionary.json') as json_file:
# full_dictionary = json.load(json_file)
def is_generateable(word, figure):
if len(word) > 6 or len(word) < 3:
return False
for i in range(len(word)-1):
cando = False
for side_j in figure:
for side_k in figure:
if side_j == side_k:
continue
if word[i] in side_j and word[i+1] in side_k:
cando = True
break
if not cando:
return False
return True
def get_full_dict(figure):
seed_dict = []
for word in twl.iterator():
if is_generateable(word, figure):
seed_dict.append(word)
return seed_dict
import random
import numpy as np
import string
from copy import deepcopy
import time
rolls = 0
st = time.time()
while True:
rolls += 1
if rolls % 100 == 0:
print("Rolls: {}, Elapsed time: {}".format(rolls, time.time()-st))
ch = random.choice(string.ascii_lowercase)
res = [ []*3 for i in range(4)]
prev_side = random.choice(list(range(4)))
res[prev_side].append(ch)
charset = set([ch])
past_words = set()
def can_use(chosen_word):
global res, prev_side, charset
modded_res = deepcopy(res)
modded_prev_side = prev_side
modded_charset = deepcopy(charset)
is_first_letter = True
for char in chosen_word:
if char not in modded_charset:
found_side = None
side_ordering = np.random.permutation(list(range(4)))
for side in side_ordering:
if side == modded_prev_side:
continue
if len(modded_res[side]) == 3:
continue
found_side = side
break
if found_side is None:
return False
modded_res[found_side].append(char)
modded_prev_side = found_side
modded_charset.add(char)
else:
for i in range(4):
if char in modded_res[i]:
# print(char, i)
if i == modded_prev_side and not is_first_letter:
return False
modded_prev_side = i
is_first_letter = False
res = modded_res
prev_side = modded_prev_side
charset = modded_charset
past_words.add(chosen_word)
return True
chosen_word_soln = []
while len(charset) < 12 and len(past_words) < 5:
filtered_words = list(filter(lambda x: x.startswith(ch) and x not in past_words, list(word_to_count.keys())))
filtered_weights = [word_to_count[word] for word in filtered_words]
sum_w = sum(filtered_weights)
filtered_weights = [x/sum_w for x in filtered_weights]
chosen_word = None
iter = 0
while (chosen_word is None or not can_use(chosen_word)) and iter < 100:
chosen_word = np.random.choice(filtered_words, p=filtered_weights)
iter += 1
# print(chosen_word, word_to_count[chosen_word])
chosen_word_soln.append(chosen_word)
ch = chosen_word[-1]
def is_figure_nice(figure):
if len(figure) != FIG_SIZE:
return False
for side in figure:
if len(side) != SIDE_SIZE:
return False
return True
figure = list([list(np.random.permutation(x)) for x in res])
if not is_figure_nice(figure):
continue
dict_size = len(get_full_dict(figure))
if dict_size > DICTIONARY_SIZE:
continue
print("Natural soln: {}".format(chosen_word_soln))
print("Figure: {}".format(figure))
print("Dict size: {}".format(dict_size))
# bad game seeds
bad_seeds = [
{
'figure': [['e', 'o', 'a'], ['n', 'l', 't'], ['r', 's', 'g'], ['v', 'i', 'd']],
'solution': ['starting', 'gold', 'design', 'national', 'live'],
},
{
'figure': [['c', 'e', 'y'], ['p', 'l', 'o'], ['i', 'u', 't'], ['v', 'm', 'r']],
'solution': ['improve', 'electric', 'city', 'your'],
},
{
'figure': [['i', 'n', 'e'], ['m', 'r', 's'], ['t', 'u', 'f'], ['o', 'd', 'l']],
'solution': ['for', 'results', 'stories', 'some', 'end'],
},
{
'figure': [['a', 's', 'e'], ['c', 'i', 'p'], ['t', 'l', 'n'], ['b', 'd', 'm']],
'solution': ['dance', 'email', 'labs', 'split'],
},
{
'figure': [['l', 'c', 'f'], ['i', 'o', 'h'], ['b', 'p', 'e'], ['u', 'r', 'a']],
'solution': ['blue', 'each', 'help', 'profile'],
}
]
# good game seeds
good_seeds = [
{
'figure': [['l', 'u', 'k'], ['r', 'm', 'i'], ['s', 'c', 'n'], ['o', 'g', 'f']],
'line': ['from', 'music', 'click', 'king'],
},
{
'figure': [['i', 'y', 'n'], ['k', 'v', 'u'], ['t', 'e', 'r'], ['o', 'c', 'h']],
'line': ['the', 'entry', 'your', 'river', 'rock'],
},
]
for seed in good_seeds:
seed['dict'] = get_full_dict(seed['figure'])
CHUNK_SIZE = 8
COMPRESSED_DICTIONARY_SIZE = 90
def get_poly_hash(wordlist):
res = 0
for val in wordlist:
res = res*32 + val
return res
def string_to_hash(word, req_len):
res = []
for ch in word:
res.append(ord(ch) - ord('a'))
while len(res) < req_len:
res.append(27)
return get_poly_hash(res)
def compress_hashes(words, chunk_len, req_len):
chunked_words = [words[i:i+chunk_len] for i in range(0, len(words), chunk_len)]
print(words, chunked_words)
res = []
for chunk in chunked_words:
hash = 0
for i in range(chunk_len):
if i < len(chunk):
hash = hash*(32**req_len) + string_to_hash(chunk[i], req_len)
else:
hash = hash*(32**req_len) + get_poly_hash([28]*req_len)
res.append(hash)
return res
def get_computer_readable_form(seed):
# Verify conditions
assert len(seed['figure']) == FIG_SIZE
for side in seed['figure']:
assert len(side) == SIDE_SIZE
assert len(seed['dict']) <= DICTIONARY_SIZE
for word in seed['dict']:
assert len(word) <= WORD_SIZE
assert len(seed['line']) <= LINE_SIZE
def get_padding(chunk_len, req_len):
x = get_poly_hash([28]*req_len)
return sum(x*((32**req_len)**i) for i in range(chunk_len))
comp_dictionary = compress_hashes(seed['dict'], CHUNK_SIZE, WORD_SIZE)
while len(comp_dictionary) < COMPRESSED_DICTIONARY_SIZE:
comp_dictionary.append(get_padding(CHUNK_SIZE, WORD_SIZE))
comp_line = [string_to_hash(word, WORD_SIZE) for word in seed['line']]
while len(comp_line) < LINE_SIZE:
comp_line.append(get_padding(1, WORD_SIZE))
unrolled_fig = [item for sublist in seed['figure'] for item in sublist]
comp_figure = string_to_hash(unrolled_fig, 12)
comp_line = [str(x) for x in comp_line]
comp_figure = str(comp_figure)
comp_dictionary = [str(x) for x in comp_dictionary]
return {
'line': comp_line,
'figure': comp_figure,
'compressed_dictionary': comp_dictionary,
'address': "FILL_HERE",
'private_address': "FILL_HERE"
}
import json
import random
from copy import deepcopy
tmp_seed = deepcopy(good_seeds[0])
print(tmp_seed)
print(len(tmp_seed['dict']))
comp_form = get_computer_readable_form(tmp_seed)
print(len(comp_form['compressed_dictionary']))
print(comp_form)
with open("seed.json", "w") as outfile:
json.dump(comp_form, outfile)
good_seeds[0]['figure']
good_seeds[0]['line']
```
| github_jupyter |
## Topic Models
Dynamic topic models can be used to vizualise the topics of a collection of documents.
<br>
<img src="https://raw.githubusercontent.com/MaartenGr/BERTopic/master/images/logo.png" width="40%">
Inspired by this notebook: https://colab.research.google.com/drive/1FieRA9fLdkQEGDIMYl0I3MCjSUKVF8C-?usp=sharing
# Enabling the GPU
First, you'll need to enable GPUs for the notebook:
- Navigate to Edit→Notebook Settings
- select GPU from the Hardware Accelerator drop-down
[Reference](https://colab.research.google.com/notebooks/gpu.ipynb)
# Installing BERTopic
We start by installing BERTopic from PyPi:
```
%%capture
!pip install bertopic
```
## Restart the Notebook
After installing BERTopic, some packages that were already loaded were updated and in order to correctly use them, we should now restart the notebook.
From the Menu:
Runtime → Restart Runtime
# **Data**
```
%cd ../..
import glob
import pandas as pd
import os
from pprint import pprint
import matplotlib.pyplot as plt
from src.utils.parse_data import parse_ast, parse_concept, parse_relation
train_data_path = "data/train"
val_data_path = "data/val"
ast_folder_name = "ast"
concept_folder_name = "concept"
rel_folder_name = "rel"
txt_folder_name = "txt"
text_files = glob.glob(train_data_path + os.sep + txt_folder_name + os.sep + "*.txt")
filename = ""
df = pd.DataFrame()
from tqdm import tqdm
for file in tqdm(text_files):
with open(file, 'r') as f:
text = f.read()
filename = file.split("/")[-1].split(".")[0]
concept = parse_concept(train_data_path + os.sep + concept_folder_name + os.sep + filename + ".con")
df = df.append(pd.DataFrame({"text": [text], "filename": [filename] , "concept": [concept]}), ignore_index=True)
df.head()
concept_df = pd.DataFrame(columns=[ "filename"]+list(concept.keys()))
for i, file in df.iterrows():
concept_dict = file["concept"]
tmp = pd.DataFrame(concept_dict)
tmp["filename"] = file["filename"]
concept_df = concept_df.append(tmp, ignore_index=True)
concept_df.head()
texts = concept_df.concept_text.values.tolist()
texts
```
# **Dynamic Topic Modeling**
## Basic Topic Model
To perform Dynamic Topic Modeling with BERTopic we will first need to create a basic topic model using all texts. The temporal aspect will be ignored as we are, for now, only interested in the topics that reside in those texts.
```
from bertopic import BERTopic
topic_model = BERTopic(min_topic_size=35, verbose=True)
topics, _ = topic_model.fit_transform(texts)
```
We can then extract most frequent topics:
```
freq = topic_model.get_topic_info(); freq.head(10)
```
-1 refers to all outliers and should typically be ignored. Next, let's take a look at a frequent topic that were generated:
```
topic_nr = freq.iloc[0]["Topic"] # We select a frequent topic
topic_model.get_topic(topic_nr) # You can select a topic number as shown above
```
We can visualize the basic topics that were created with the Intertopic Distance Map.
```
fig = topic_model.visualize_topics(); fig
```
| github_jupyter |
```
# Uncomment to run the notebook in Colab
# ! pip install -q "wax-ml[complete]@git+https://github.com/eserie/wax-ml.git"
# ! pip install -q --upgrade jax jaxlib==0.1.70+cuda111 -f https://storage.googleapis.com/jax-releases/jax_releases.html
# check available devices
import jax
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
jax.devices()
```
# 〰 Compute exponential moving averages with xarray and pandas accessors 〰
[](https://colab.research.google.com/github/eserie/wax-ml/blob/main/docs/notebooks/01_demo_EWMA.ipynb)
WAX-ML implements pandas and xarray accessors to ease the usage of machine-learning algorithms with
high-level data APIs :
- pandas's `DataFrame` and `Series`
- xarray's `Dataset` and `DataArray`.
These accessors allow to easily execute any function using Haiku modules
on these data containers.
For instance, WAX-ML propose an implementation of the exponential moving average realized
with this mechanism.
Let's show how it works.
## Load accessors
First you need to load accessors:
```
from wax.accessors import register_wax_accessors
register_wax_accessors()
```
## EWMA on dataframes
Let's look at a simple example: The exponential moving average (EWMA).
Let's apply the EWMA algorithm to the [NCEP/NCAR 's Air temperature data](http://www.esrl.noaa.gov/psd/data/gridded/data.ncep.reanalysis.html).
### 🌡 Load temperature dataset 🌡
```
import xarray as xr
dataset = xr.tutorial.open_dataset("air_temperature")
```
Let's see what this dataset looks like:
```
dataset
```
To compute a EWMA on some variables of a dataset, we usually need to convert data
in pandas
[series](https://pandas.pydata.org/docs/reference/api/pandas.Series.html) or
[dataframe](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html).
So, let's convert the dataset into a dataframe to illustrate `accessors` on a dataframe:
```
dataframe = dataset.air.to_series().unstack(["lon", "lat"])
```
### EWMA with pandas
```
air_temp_ewma = dataframe.ewm(alpha=1.0 / 10.0).mean()
_ = air_temp_ewma.iloc[:, 0].plot()
```
### EWMA with WAX-ML
```
air_temp_ewma = dataframe.wax.ewm(alpha=1.0 / 10.0).mean()
_ = air_temp_ewma.iloc[:, 0].plot()
```
On small data, WAX-ML's EWMA is slower than Pandas' because of the expensive data conversion steps.
WAX-ML's accessors are interesting to use on large data loads
(See our [three-steps_workflow](https://wax-ml.readthedocs.io/en/latest/notebooks/04_The_three_steps_workflow.html))
## Apply a custom function to a Dataset
Now let's illustrate how WAX-ML accessors work on [xarray datasets](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html).
```
from wax.modules import EWMA
def my_custom_function(dataset):
return {
"air_10": EWMA(1.0 / 10.0)(dataset["air"]),
"air_100": EWMA(1.0 / 100.0)(dataset["air"]),
}
dataset = xr.tutorial.open_dataset("air_temperature")
output, state = dataset.wax.stream().apply(
my_custom_function, format_dims=dataset.air.dims
)
_ = output.isel(lat=0, lon=0).drop(["lat", "lon"]).to_pandas().plot(figsize=(12, 8))
```
| github_jupyter |
# Support Vector Classification with StandardScaler
This Code template is for the Classification task using Support Vector Classifier(SVC) based on the Support Vector Machine algorithm and feature rescaling technique StandardScaler in a pipeline.
### Required Packages
```
!pip install imblearn
import warnings
import seaborn as se
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import LabelEncoder,StandardScaler
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features=[]
```
Target variable for prediction.
```
#y_value
target=""
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X=df[features]
Y=df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
#### Distribution Of Target Variable
```
plt.figure(figsize = (10,6))
se.countplot(Y)
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
```
#### Handling Target Imbalance
The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.
One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
```
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
```
### Model
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
Here we have used SVC, the svc implementation is based on libsvm.
* #### Model Tuning Parameters
> - C -> Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.
> - kernel -> Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’ or a callable. If none is given, ‘rbf’ will be used. If a callable is given it is used to pre-compute the kernel matrix from data matrices; that matrix should be an array of shape (n_samples, n_samples).
> - gamma -> Gamma is a hyperparameter that we have to set before the training model. Gamma decides how much curvature we want in a decision boundary.
> - degree -> Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels.Using degree 1 is similar to using a linear kernel. Also, increasing degree parameter leads to higher training times.
```
model=make_pipeline(StandardScaler(),SVC(random_state=123))
model.fit(x_train,y_train)
```
#### Model Accuracy
score() method return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
#### Confusion matrix
A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
```
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
```
#### Classification Report
A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.
* where:
- Precision:- Accuracy of positive predictions.
- Recall:- Fraction of positives that were correctly identified.
- f1-score:- percent of positive predictions were correct
- support:- Support is the number of actual occurrences of the class in the specified dataset.
```
print(classification_report(y_test,model.predict(x_test)))
```
#### Creator: Thilakraj Devadiga , Github: [Profile](https://github.com/Thilakraj1998)
| github_jupyter |
# Stacking & Successive Halving Random + Search Example
```
%load_ext watermark
%watermark -p scikit-learn,mlxtend
```
## Dataset
```
from sklearn import model_selection
from sklearn.model_selection import train_test_split
from sklearn import datasets
data = datasets.load_breast_cancer()
X, y = data.data, data.target
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
X_train_sub, X_valid, y_train_sub, y_valid = \
train_test_split(X_train, y_train, test_size=0.2, random_state=1, stratify=y_train)
print('Train/Valid/Test sizes:', y_train.shape[0], y_valid.shape[0], y_test.shape[0])
```
## Baseline
```
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from mlxtend.classifier import StackingCVClassifier
from sklearn.linear_model import LogisticRegression
forest = RandomForestClassifier(n_estimators=100,
random_state=123)
boost = XGBClassifier(random_state=123, verbosity=0, use_label_encoder=False)
metaclassifier = LogisticRegression(random_state=123)
sclf = StackingCVClassifier(classifiers=[forest, boost],
meta_classifier=metaclassifier,
random_state=123)
```
Random forest:
```
forest.fit(X_train_sub, y_train_sub)
print(f"Training Accuracy: {forest.score(X_train_sub, y_train_sub):0.2f}")
print(f"Validation Accuracy: {forest.score(X_valid, y_valid):0.2f}")
print(f"Test Accuracy: {forest.score(X_test, y_test):0.2f}")
```
Gradient boosting:
```
boost.fit(X_train_sub, y_train_sub)
print(f"Training Accuracy: {boost.score(X_train_sub, y_train_sub):0.2f}")
print(f"Validation Accuracy: {boost.score(X_valid, y_valid):0.2f}")
print(f"Test Accuracy: {boost.score(X_test, y_test):0.2f}")
```
Stacking:
```
sclf.fit(X_train_sub, y_train_sub)
print(f"Training Accuracy: {sclf.score(X_train_sub, y_train_sub):0.2f}")
print(f"Validation Accuracy: {sclf.score(X_valid, y_valid):0.2f}")
print(f"Test Accuracy: {sclf.score(X_test, y_test):0.2f}")
```
## Successive Halving + Random Search
- More info:
- https://scikit-learn.org/stable/modules/grid_search.html#successive-halving-user-guide
- https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.HalvingRandomSearchCV.html#sklearn.model_selection.HalvingRandomSearchCV
```
import numpy as np
import scipy.stats
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingRandomSearchCV
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(sclf)
params = {
'stackingcvclassifier__use_probas': [True],
'stackingcvclassifier__drop_proba_col': [None, 'last'],
'stackingcvclassifier__xgbclassifier__reg_alpha': scipy.stats.loguniform(1e-5, 1),
'stackingcvclassifier__xgbclassifier__max_depth': [2, 4, 6, 8],
'stackingcvclassifier__randomforestclassifier__n_estimators': [10, 100]
}
search = HalvingRandomSearchCV(
estimator=pipe,
param_distributions=params,
n_candidates='exhaust',
resource='n_samples',
factor=3,
random_state=123,
n_jobs=1)
search.fit(X_train, y_train)
search.best_score_
search.best_params_
print(f"Training Accuracy: {search.best_estimator_.score(X_train, y_train):0.2f}")
print(f"Test Accuracy: {search.best_estimator_.score(X_test, y_test):0.2f}")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/geotrush/Neural-Network-Zoo/blob/main/PyTorch/Image%2BVideo%2BAudio-Synthesis/Style-Transfer/Neural%20Style%20Transfer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neural Style Transfer
-----------------------
### Paper: [A Neural Algorithm of Artistic Style (Gatys et al., 2015)](https://arxiv.org/pdf/1508.06576.pdf)
### GitHub: [PyTorch Implementation of Neural Style Transfer (Gatys et al., 2015)](https://github.com/leongatys/PytorchNeuralStyleTransfer)
- Import libraries
```
# Notebook display
from tqdm import tqdm
from PIL import Image
from google.colab import files
import matplotlib.pyplot as plt
from IPython.display import clear_output
# PyTorch
import torch
from torch.optim import LBFGS
import torch.nn.functional as F
from torchvision import transforms
from torch.hub import load_state_dict_from_url
from torch.nn import Module, Conv2d, AvgPool2d
```
- Define constants and image processing functions
```
# Image size
SIZE = 1280
# ImageNet statistics
MEAN = (0.485, 0.456, 0.406)
STD = (0.229, 0.224, 0.225)
# Determine device
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Pre-processing
def prep(image, size=SIZE, normalize=True, mean=MEAN, std=STD, device=DEVICE):
resize = transforms.Compose([transforms.Resize(size, Image.LANCZOS),
transforms.CenterCrop(size)])
image = resize(image.convert('RGB'))
if normalize:
norm = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean, std)])
return norm(image).unsqueeze(0).to(device)
else:
return image
# Post-processing
def post(tensor, mean=MEAN, std=STD):
mean, std = torch.tensor(mean).view(3, 1, 1), torch.tensor(std).view(3, 1, 1)
tensor = transforms.Lambda(lambda x: x * std + mean)(tensor.detach().cpu().squeeze(0))
return transforms.ToPILImage()(tensor.clamp_(0, 1))
# Draw content, style and output images
def draw(input, content_image, style_image):
output = post(input)
clear_output(wait=True)
plt.figure(figsize=(18, 6))
ax1 = plt.subplot(1, 3, 1)
ax1.imshow(prep(content_image, normalize=False))
ax1.axis('off')
ax1.set_title('Content Image')
ax2 = plt.subplot(1, 3, 2)
ax2.imshow(prep(style_image, normalize=False))
ax2.axis('off')
ax2.set_title('Style Image')
ax3 = plt.subplot(1, 3, 3)
ax3.imshow(output)
ax3.axis('off')
ax3.set_title('Output Image')
plt.show()
return output
```
- Upload content and style images
```
# Manually upload the images
files.upload()
# Open and prepare images
content_image, style_image = Image.open('painter.jpg'), Image.open('abstract.jpg')
content, style = prep(content_image), prep(style_image)
input = content.clone()
# Display images
_ = draw(input, content_image, style_image)
```
- Define content and style losses
```
class ContentLoss(Module):
def forward(self, features, targets):
loss = (features - targets).pow(2)
return loss.mean()
class StyleLoss(Module):
def forward(self, features, GM_t):
GM_f = self.gram_matrix(features)
loss = (GM_f - GM_t).pow(2)
return loss.mean()
@staticmethod
def gram_matrix(inputs):
BS, C, H, W = inputs.size()
inputs = inputs.view(C, H * W)
GM = inputs.mm(inputs.t())
return GM.div_(H * W)
```
- Define VGG-19 with average pooling for better image synthesis
```
class VGG19(Module):
def __init__(self):
super().__init__()
# Feature Extractor
self.conv1_1 = Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = Conv2d(64, 64, kernel_size=3, padding=1)
self.pool1 = AvgPool2d(kernel_size=2, stride=2)
self.conv2_1 = Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = Conv2d(128, 128, kernel_size=3, padding=1)
self.pool2 = AvgPool2d(kernel_size=2, stride=2)
self.conv3_1 = Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_4 = Conv2d(256, 256, kernel_size=3, padding=1)
self.pool3 = AvgPool2d(kernel_size=2, stride=2)
self.conv4_1 = Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_4 = Conv2d(512, 512, kernel_size=3, padding=1)
self.pool4 = AvgPool2d(kernel_size=2, stride=2)
self.conv5_1 = Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_4 = Conv2d(512, 512, kernel_size=3, padding=1)
self.pool5 = AvgPool2d(kernel_size=2, stride=2)
# Freeze Parameters
for param in self.parameters():
param.requires_grad = False
def forward(self, x, out_layers):
out = dict()
out['conv1_1'] = F.relu(self.conv1_1(x))
out['conv1_2'] = F.relu(self.conv1_2(out['conv1_1']))
out['pool1'] = self.pool1(out['conv1_2'])
out['conv2_1'] = F.relu(self.conv2_1(out['pool1']))
out['conv2_2'] = F.relu(self.conv2_2(out['conv2_1']))
out['pool2'] = self.pool2(out['conv2_2'])
out['conv3_1'] = F.relu(self.conv3_1(out['pool2']))
out['conv3_2'] = F.relu(self.conv3_2(out['conv3_1']))
out['conv3_3'] = F.relu(self.conv3_3(out['conv3_2']))
out['conv3_4'] = F.relu(self.conv3_4(out['conv3_3']))
out['pool3'] = self.pool3(out['conv3_4'])
out['conv4_1'] = F.relu(self.conv4_1(out['pool3']))
out['conv4_2'] = F.relu(self.conv4_2(out['conv4_1']))
out['conv4_3'] = F.relu(self.conv4_3(out['conv4_2']))
out['conv4_4'] = F.relu(self.conv4_4(out['conv4_3']))
out['pool4'] = self.pool4(out['conv4_4'])
out['conv5_1'] = F.relu(self.conv5_1(out['pool4']))
out['conv5_2'] = F.relu(self.conv5_2(out['conv5_1']))
out['conv5_3'] = F.relu(self.conv5_3(out['conv5_2']))
out['conv5_4'] = F.relu(self.conv5_4(out['conv5_3']))
out['pool5'] = self.pool5(out['conv5_4'])
return [out[layer] for layer in out_layers]
```
- Load pre-trained VGG-19 weights & biases
```
vgg19 = VGG19().to(DEVICE)
state = vgg19.state_dict()
pretrained_vgg19 = 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'
new_state_values = list(load_state_dict_from_url(pretrained_vgg19).values())
for i, key in enumerate(state):
state[key] = new_state_values[i]
vgg19.load_state_dict(state)
```
- Pre-compute targets and initialize losses
```
# Layers of Content and Style reconstruction
content_layers = ['conv4_2']
style_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
loss_layers = content_layers + style_layers
# Targets
content_targets = [targets.detach() for targets in vgg19(content, content_layers)]
style_targets = [StyleLoss.gram_matrix(targets).detach() for targets in vgg19(style, style_layers)]
targets = content_targets + style_targets
# Losses
losses = [ContentLoss()] * len(content_layers) + [StyleLoss()] * len(style_layers)
# Loss weights
content_weights = [1] * len(content_layers)
style_weights = [1e3] * len(style_layers)
loss_weights = content_weights + style_weights
```
- Transfer style
```
def transfer_style(model, input, targets, losses, loss_weights, iterations=20):
optimizer = LBFGS([input.requires_grad_()])
for iteration in tqdm(range(1, iterations + 1)):
def closure():
optimizer.zero_grad()
out = model(input, loss_layers)
total_loss = sum([loss_weights[i] * losses[i](features, targets[i]) for i, features in enumerate(out)])
total_loss.backward()
return total_loss
optimizer.step(closure)
output = draw(input, content_image, style_image)
return output
# Run
output = transfer_style(vgg19, input, targets, losses, loss_weights)
```
- Save and download output
```
output.save('output.png')
files.download('output.png')
```
| github_jupyter |
# Notebook contents:
This notebook contains a lecture. The code for generating plots are found at the of the notebook. Links below.
- [presentation](#Session-1b:)
- [code for plots](#Code-for-plots)
# Session 12:
## Supervised learning, part 2
*Andreas Bjerre-Nielsen*
## Vaaaamos
```
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
```
# Recap from earlier sessions
Stuff about machine learning
- Supervised learning (having a target variable)
- Classification problems: Perceptron, Adaline, Logistic regression
- Regression problems: Linear regression
- We learned about optimization: gradient descent
- How can we say whether a model generalizes:
- We split data randomly into training and testing data.
## Optimal models
*What is the main dilemma when making supervised models?*
```
f_bias_var['regression'][2]
```
## Curbing overfitting
*What was a remedy to overfitting in linear models? How do we measure overfitting?*
Regularization
We add a penalty term our optimization procedure:
$$ \text{arg min}_\beta \, \underset{\text{MSE=SSE/n}}{\underbrace{E[(y_0 - \hat{f}(x_0))^2]}} + \underset{\text{penalty}}{\underbrace{\lambda \cdot R(\beta)}}$$
- Too many irrelevant features - solved by L1 regularization ~ lasso
- Exploding coefficients - solved by L2 regularization ~ ridge
## Agenda
1. [model bias and variance](#Model-bias-and-variance)
1. [model building](#Model-building)
1. model selection
- [basic validation](#Model-selection)
- [cross validation](#Cross-validation)
- [tools for selection](#Tools-for-model-selection)
# Model bias and variance
## Bias and variance (1)
*How do we describe the modelling error?*
From [Wikipedia](https://en.wikipedia.org/wiki/Bias%E2%80%93variance_tradeoff) 2019:
- model **bias**: _an error from erroneous assumptions in the learning algorithm_
- high bias can cause an algorithm to miss the relevant relations between features and target outputs (**underfitting**)
- model **variance**: _an error from sensitivity to small fluctuations in the training set_
- high variance can cause an algorithm to model the random noise in the training data, rather than the intended outputs (**overfitting**).
## Bias and variance (2)
*So what is overfitting?*
Overfitting is: low bias / high variance
- traning our model captures all patterns but we also find some irrelevant
- reacts too much to training sample errors
- some errors are just noise, and thus we find too many spurious relations
- examples of causes:
- too much polynomial expansion of variables (`PolynomialFeatures`)
- non-linear/logistic without properly tuned hyperparameters:
- Decision Trees, Support Vector Machines or Neural Networks
## Bias and variance (3)
*So what is underfitting?*
Underfitting is: high bias / low variance
- oversimplification of models, cannot approximate all patterns found
- examples of causes:
- linear and logistic regression (without polynomial expansion)
## Bias and variance (4)
*Not so fast.. OLS is unbiased, right?*
Yes, OLS is unbiased. But...?
- But .. only by assumption..
- Requires we know the true form of the model.
- However, we never know do..
*What happens if we introduce regularization?*
- Then model is no longer unbiased.
- (if we assume the model is true)
# Model building
## Model pipelines (1)
*Is there a smart way to build ML models?*
Yes, we build a pipeline (input (tidy) -> target)
- Preprocessing data
- Standard: adding polynomials, imputation, rescaling
- Unsupervised learning (not this course)
- Supervised learning
## Model pipelines (2)
*How does the pipeline look? Is there data leakage?*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_01.png' alt="Drawing" style="width: 700px;"/></center>
## Model pipelines (3)
*What are the advantages of using a pipeline?*
- Ensures good practice - we only fit on training data.
- No leakage of data from train to test!
- Much less code!
## Applying a model pipeline (1)
*What would this look like in Python?*
```
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
print(pipe_preproc.steps[0])
print(pipe_preproc.steps[1])
```
## Applying a model pipeline (2)
*Does this remind you of something?*
# YES!
### Method chaining from Pandas
## Applying a model pipeline (3)
*Let's some load Boston house price data*
```
from sklearn.datasets import load_boston
boston = load_boston()
# print(boston['DESCR'])
# print('\n'.join(load_boston()['DESCR'].split('\n')[12:26]))
X = boston.data # features
y = boston.target # target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
```
## Applying a model pipeline (4)
*And how do I apply the pipe on the data?*
```
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler()) # apply preproc - fit on train
X_train_prep = pipe_preproc.fit_transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
```
## Applying a model pipeline (5)
*What would it like look if we did use the pipe..?*
The more steps we have, the more code we save.
```
poly_trans = PolynomialFeatures()
scaler = StandardScaler()
# we call both transformations twice on both test and train
X_train_poly = poly_trans.fit_transform(X_train)
X_test_poly = poly_trans.transform(X_test)
X_train_prep_alt = scaler.fit_transform(X_train_poly)
X_test_prep_alt = scaler.transform(X_test_poly)
```
# Model selection
## Measuring the problem
*Does machine learning work out of the box?*
- In some cases ML works quite well out of the box.
- Often ML requires making careful choices.
- Note that automated machine learning packages and services exist.
- E.g. AutoML - this a hot research topic
*Which choices are to be made?*
- We need to pick model building hyperparameters.
- In this course we only focus on two hyperparameters: $\lambda$ for L1 and L2 regularization
- i.e. $\lambda$ for Lasso, Ridge and Elastic Net
## Model validation (1)
*How do we measure our model's performance for different hyperparameters?*
- Remember we cannot use the test set.
*Could we somehow mimick what we do with test data?*
- Yes, we can split the remaining non-test data into training and validation data:
- we train model for various hyperparameters on training data;
- pick the hyperparameters which performs best on validation data.
## Model validation (2)
*The non-test data is split into training and validation*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_02.png' alt="Drawing" style="width: 500px;"/></center>
## Model validation (3)
*What would this look like in Python?*
```
# splitting into development (2/3) and test data (1/3)
X_dev, X_test, y_dev, y_test = train_test_split(X, y, test_size=1/3, random_state=1)
# splitting development into train (1/3) and validation (1/3)
X_train, X_val, y_train, y_val = train_test_split(X_dev, y_dev, test_size=1/2, random_state=1)
```
## Model validation (4)
Let's train a linear regression model
```
from sklearn.linear_model import Lasso, LinearRegression
pipe_lr = make_pipeline(PolynomialFeatures(include_bias=True),
StandardScaler(),
LinearRegression())
pipe_lr.fit(X_dev, y_dev)
```
## Model validation (5)
Let's find the Lasso model which performs best in the validation set
```
from sklearn.metrics import mean_squared_error as mse
perform = []
lambdas = np.logspace(-4, 4, 33)
for lambda_ in lambdas:
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=True),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
pipe_lasso.fit(X_train, y_train)
y_pred = pipe_lasso.predict(X_val)
perform.append(mse(y_pred, y_val))
hyperparam_perform = pd.Series(perform,index=lambdas)
optimal = hyperparam_perform.nsmallest(1)
print('Optimal lambda:', optimal.index[0])
print('Validation MSE: %.3f' % optimal.values[0])
```
## Model validation (6)
Let's compare the performance of the Lasso vs. Linear Regression
```
# insert optimal lambda into new model
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimal.index[0]))
# fit new model on all of the development (non-test) data
pipe_lasso.fit(X_dev, y_dev)
# compare model performance on test data
print('Lasso', round(mse(pipe_lasso.predict(X_test),y_test), 1))
print('LinReg', round(mse(pipe_lr.predict(X_test),y_test), 1))
```
## Smarter validation
*Is this approach the smartest way for deciding on choice of hyperparameters?*
# NO
Our model choice depends a lot on which sample we pick. Could we use more of the data?
# Cross validation
## The holdout method
*How do we got the more out of the data?*
We reuse the data in the development set repeatedly
- We test on all the data
- Rotate which parts of data is used for test and train.
## Leave-one-out CV
*How do we got the most of the data?*
The most robust approach
- Each single observation in the training data we use the remaining data to train.
- Makes number of models equal to the number of observations
- Very computing intensive - does not scale!
LOOCV
## K fold method (1)
*How do balance computing time vs. overfitting?*
We split the sample into $K$ even sized test bins.
- For each test bin $k$ we use the remaining data for training.
Advantages:
- We use all our data for testing.
- Training is done with 100-(100/K) pct. of the data, i.e. 90 pct. for K=10.
## K fold method (2)
In K-fold cross validation we average the errors.
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_03.png' alt="Drawing" style="width: 900px;"/></center>
## K fold method (3)
*How would we use K-fold cross validation to select our model?*
We compute MSE for every lambda and every fold (nested for loop)
```
from sklearn.model_selection import KFold
kfolds = KFold(n_splits=10)
folds = list(kfolds.split(X_dev, y_dev))
# outer loop: lambdas
mseCV = []
for lambda_ in lambdas:
# inner loop: folds
mseCV_ = []
for train_idx, val_idx in folds:
# train model and compute MSE on test fold
pipe_lassoCV = make_pipeline(PolynomialFeatures(degree=2, include_bias=True),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
X_train, y_train = X_dev[train_idx], y_dev[train_idx]
X_val, y_val = X_dev[val_idx], y_dev[val_idx]
pipe_lassoCV.fit(X_train, y_train)
mseCV_.append(mse(pipe_lassoCV.predict(X_val), y_val))
# store result
mseCV.append(mseCV_)
# convert to DataFrame
lambdaCV = pd.DataFrame(mseCV, index=lambdas)
```
# K fold method (4)
Training the model with optimal hyperparameters and compare MSE
```
# choose optimal hyperparameters
optimal_lambda = lambdaCV.mean(axis=1).nsmallest(1)
# retrain/re-estimate model using optimal hyperparameters
pipe_lassoCV = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimal_lambda.index[0], random_state=1))
pipe_lassoCV.fit(X_dev,y_dev)
# compare performance
models = {'Lasso': pipe_lasso, 'Lasso CV': pipe_lassoCV, 'LinReg': pipe_lr}
for name, model in models.items():
score = mse(model.predict(X_test),y_test)
print(name, round(score, 1))
```
## K fold method (5)
*What else could we use cross-validation for?*
- Getting more evaluations of our model performance.
- We can cross validate at two levels:
- Outer: we make multiple splits of test and train/dev.
- Inner: within each train/dev. dataset we make cross validation to choose hyperparameters
# Tools for model selection
## Learning curves (1)
*What does a model that balances over- and underfitting look like?*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_04.png' alt="Drawing" style="width: 800px;"/></center>
## Learning curves (2)
*Is it easy to make learning curves in Python?*
```
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = \
learning_curve(estimator=pipe_lasso,
X=X_dev,
y=y_dev,
train_sizes=np.arange(0.2, 1.05, .05),
scoring='neg_mean_squared_error',
cv=3)
mse_ = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Test':-test_scores.mean(axis=1)})\
.set_index(pd.Index(train_sizes,name='sample size'))
print(mse_.head(5))
```
## Learning curves (3)
```
f_learn, ax = plt.subplots(figsize=(10,4))
ax.plot(train_sizes,-test_scores.mean(1), alpha=0.25, linewidth=2, label ='Test', color='blue')
ax.plot(train_sizes,-train_scores.mean(1),alpha=0.25, linewidth=2, label='Train', color='orange')
ax.set_title('Mean performance')
ax.set_ylabel('Mean squared error')
ax.set_yscale('log')
ax.legend()
```
## Learning curves (4)
```
f_learn, ax = plt.subplots(figsize=(10,4))
ax.fill_between(train_sizes,
-test_scores.min(1),
-test_scores.max(1),
alpha=0.25, label ='Test', color='blue')
ax.fill_between(train_sizes,
-train_scores.min(1),
-train_scores.max(1),
alpha=0.25, label='Train', color='orange')
ax.set_title('Range of performance (min, max)')
ax.set_ylabel('Mean squared error')
ax.set_yscale('log')
ax.legend()
```
## Validation curves (1)
*Can we plot the optimal hyperparameters?*
```
from sklearn.model_selection import validation_curve
train_scores, test_scores = \
validation_curve(estimator=pipe_lasso,
X=X_dev,
y=y_dev,
param_name='lasso__alpha',
param_range=lambdas,
scoring='neg_mean_squared_error',
cv=3)
mse_score = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Validation':-test_scores.mean(axis=1),
'lambda':lambdas})\
.set_index('lambda')
print(mse_score.Validation.nsmallest(1))
```
## Validation curves (2)
```
mse_score.plot(logx=True, logy=True, figsize=(10,6))
```
## Grid search (1)
*How do we search for two or more optimal parameters? (e.g. elastic net)*
- Goal: find the optimal parameter combination: $$\lambda_1^*,\lambda_2^*=\arg\min_{\lambda_1,\lambda_2}MSE^{CV}(X_{train},y_{train})$$
- Option 1: We can loop over the joint grid of parameters.
- One level for each parameter.
- Caveats: a lot of code / SLOW
- Option 2: sklearn has `GridSearchCV` has a tool which tests all parameter combinations.
## Grid search (2)
*How does this look in Python?*
```
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import ElasticNet
pipe_el = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
ElasticNet())
gs = GridSearchCV(estimator=pipe_el,
param_grid={'elasticnet__alpha':np.logspace(-4,4,10)*2,
'elasticnet__l1_ratio':np.linspace(0,1,10)},
scoring='neg_mean_squared_error',
n_jobs=4,
iid=False,
cv=10)
models['ElasicNetCV'] = gs.fit(X_dev, y_dev)
```
- Notation: double underscore between estimator and hyperparameter, e.g. 'est__hyperparam'
- Scoring: negative MSE as we're maximizing the score ~ minimize MSE.
## Grid search (3)
*What does the grid search yield?*
```
for name, model in models.items():
score = mse(model.predict(X_test),y_test)
print(name, round(score, 2))
print()
print('CV params:', gs.best_params_)
```
## Grid search (4)
*What if we have 10,000 parameter combinations?*
- Option 1: you buy a cluster on Amazon, learn how to parallelize across computers.
- Option 2: you drop some of the parameter values
- Option 3: `RandomizedSearchCV` searches a subset of the combinations.
## Miscellanous
*How do we get the coefficient from the models?*
```
lasso_model = pipe_lassoCV.steps[2][1] # extract model from pipe
lasso_model.coef_[0:13] # extract coeffiecients from model
```
# The end
[Return to agenda](#Agenda)
# Code for plots
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
import seaborn as sns
plt.style.use('ggplot')
%matplotlib inline
SMALL_SIZE = 16
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['figure.figsize'] = 10, 4 # set default size of plots
```
### Plots of ML types
```
%run ../ML_plots.ipynb
```
| github_jupyter |
```
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics
import datetime as dt
import numpy as np
from census import Census # This is new...
import requests, io # internet and input tools
import zipfile as zf # zip file tools
import os
#import weightedcalcs as wc
#import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from bokeh.palettes import brewer, Spectral6
from bokeh.io import show, output_file, curdoc
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool, Panel, Tabs, GeoJSONDataSource, LinearColorMapper
from bokeh.models import ColorBar
from bokeh.layouts import column, gridplot, row
from bokeh.transform import factor_cmap
from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation
```
Ok here is teh strategy, grab the monthly data from here:
https://www.census.gov/foreign-trade/Press-Release/ft900_index.html
it looks like everything is systematically organized so that this can be accomplished
```
current_month = 2
years = range(2003,2020)
df = pd.DataFrame([])
##########################################################################################
for xxx in years:
year = str(xxx)
url = "https://www.census.gov/foreign-trade/Press-Release/"
url = url + year + "pr/final_revisions/exh11.xls"
#for some reason 2002 and back have final_revisions capitalized,
# its ok, this is all I need for this
foo = pd.read_excel(url,skiprows = 36, nrows = 12, header = None, usecols = [0,5,8])
foo.columns = ["time", "exports", "imports"]
foo["time"] = foo["time"] + ", " + year
foo.time = pd.to_datetime(foo.time)
df = df.append(foo)
##########################################################################################
# Then let's add in 2020 which is organized a differently
url = "https://www.census.gov/foreign-trade/Press-Release/current_press_release/exh12.xlsx"
foo = pd.read_excel(url, skiprows = 24, nrows = 12, header = None, usecols = [0,5,8])
foo.columns = ["time", "exports", "imports"]
foo.time = foo.time.str.split("(").str[0]
foo["time"] = foo["time"] + ", " + "2020"
foo.time = pd.to_datetime(foo.time)
df = df.append(foo)
##########################################################################################
# Then let's add in 2020 which is organized a differently
url = "https://www.census.gov/foreign-trade/Press-Release/current_press_release/exh12.xlsx"
foo = pd.read_excel(url, skiprows = 38, nrows = current_month, header = None, usecols = [0,5,8])
foo.columns = ["time", "exports", "imports"]
foo.time = foo.time.str.split("(").str[0]
foo["time"] = foo["time"] + ", " + "2021"
foo.time = pd.to_datetime(foo.time)
df = df.append(foo)
out_file = ".\\data"+ "\\aggregate-tradedata.parquet"
pq.write_table(pa.Table.from_pandas(df), out_file)
df.set_index("time", inplace = True)
df
def growth_trade(foo):
# what this function does is take a dataframe and create a relative
foo["import_growth"] = 100*((df.imports/foo.imports.shift(12)) - 1)
foo["export_growth"] = 100*((df.exports/foo.exports.shift(12)) - 1)
return foo
df = growth_trade(df)
def make_covid_GR_df(df,trade_type):
covid_df = df.copy()
covid_df["flag"] = np.nan
covid_df.loc[dt.datetime(2020,3,1),"flag"] = "covid-shock"
covid_df.flag.ffill(inplace = True)
covid_df.flag.bfill(inplace = True, limit = 12)
covid_df.reset_index(inplace = True)
covid_df = covid_df[covid_df["flag"] == "covid-shock"]
covid_df.index = list(range(-12, (current_month + 12) -2))
covid_dates = covid_df[["time"]]
foo = trade_type
covid_df = covid_df[[foo + "_growth"]]
if foo == "import":
covid_df.columns = ["Imports, Covid Shock"]
if foo == "export":
covid_df.columns = ["Exports, Covid Shock"]
#############################################################
GR_df = df.copy()
GR_df["flag"] = np.nan
GR_df.loc[dt.datetime(2008,9,1),"flag"] = "GR-shock"
GR_df.flag.ffill(inplace = True, limit = 12)
GR_df.flag.bfill(inplace = True, limit = 12)
GR_df.reset_index(inplace = True)
GR_df = GR_df[GR_df["flag"] == "GR-shock"]
GR_df.index = list(range(-12, 13))
GR_dates = GR_df[["time"]]
GR_df = GR_df[[foo + "_growth"]]
if foo == "import":
covid_df.columns = ["Imports, Covid Shock"]
GR_df.columns = ["Imports, Financial Crisis"]
if foo == "export":
covid_df.columns = ["Exports, Covid Shock"]
GR_df.columns = ["Exports, Financial Crisis"]
outdf = GR_df.merge(covid_df, left_index = True, right_index = True, how = "left")
dates = GR_dates.merge(covid_dates, left_index = True, right_index = True, how = "left")
return outdf, covid_dates, GR_dates
crl = ["darkblue","slategray","crimson"]
background = "#ffffff"
def make_trade_time(df, trade_type):
height = 533
width = 600
foobar, covid_dates, GR_dates = make_covid_GR_df(df,trade_type)
if trade_type == "import":
title = "The Growth of US Imports (GOODS) (%YoY) Around Shock"
if trade_type == "export":
title = "The Growth of US Exports (GOODS) (%YoY) Around Shock"
p = figure(plot_height=height, plot_width = width, toolbar_location = 'below',
tools = "box_zoom, reset", title = title )
numlines=len(foobar.columns)
multi_line_source = ColumnDataSource({
'xs': [foobar.index.values]*numlines,
'ys': [foobar[name].values for name in foobar.columns],
'label': [name for name in foobar.columns],
'color': ["crimson", "darkblue", "slategrey"],
"legend": ["Financial Crisis", "Covid-19 Pandemic"]})
p.multi_line(xs= "xs",
ys= "ys",
line_width=4, line_alpha=0.75, line_color = "color",
hover_line_alpha=0.75, hover_line_width = 5,
hover_line_color= "color",
legend_field= "legend",
source = multi_line_source)
######################################################################################
singlesourceGR = ColumnDataSource({
'xs': foobar.index.values,
'ys': foobar.iloc[:,0].values,
"dates": np.array(GR_dates.time),
"label": [foobar.columns[0]]*len(foobar.index.values),
})
cGR = p.circle(x="xs", y="ys", size=10,
source = singlesourceGR, color = "crimson",alpha=0)
singlesourceCovid = ColumnDataSource({
'xs': foobar.index.values,
'ys': foobar.iloc[:,1].values,
"dates": np.array(covid_dates.time),
"label": [foobar.columns[1]]*len(foobar.index.values),
})
ccovid = p.circle(x="xs", y="ys", size=10,
source = singlesourceCovid, color = "darkblue",alpha=0)
TIMETOOLTIPS = """
<div style="background-color:#F5F5F5; opacity: 0.95; border: 5px 5px 5px 5px;">
<div style = "text-align:left;">
<span style="font-size: 13px; font-weight: bold"> @label
</span>
</div>
<div style = "text-align:left;">"""
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> @dates{%b %Y} $data_y{0.0}%</span>
</div>
</div>"""
#p.hover.renderers = [ctest]
p.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'@dates': 'datetime'}, renderers = [cGR,ccovid]))
p.title.text_font_size = '13pt'
p.background_fill_color = background
p.background_fill_alpha = 0.75
p.border_fill_color = background
p.vbar(x = 0,
color='grey', top = 30, bottom = -50, width = 0.05, alpha = 0.50)
p.yaxis.axis_label_text_font_style = 'bold'
p.yaxis.axis_label_text_font_size = "13px"
p.yaxis.minor_tick_line_color = None
p.x_range.start = -12
p.y_range.start = -40
p.y_range.end = 30
p.xaxis.axis_label = 'Months Around Shock'
p.xaxis.axis_label_text_font_style = 'bold'
p.xaxis.axis_label_text_font_size = "13px"
p.legend.orientation = "vertical"
p.legend.background_fill_color = background
p.legend.background_fill_alpha = 0.10
p.legend.label_text_font_size = "1em"
p.outline_line_color = None
p.sizing_mode= "scale_both"
p.max_height = height
p.max_width = width
p.toolbar.active_drag = None
p.toolbar.autohide = True
p.min_border_left = 0
p.min_border_bottom = 0
return p
pimports = make_trade_time(df, "import")
pexports = make_trade_time(df, "export")
tab3 = Panel(child= pimports, title="Goods Imports % YoY")
tab4 = Panel(child= pexports, title="Goods Exports % YoY")
output_file('.\\docs\\' + "us_agg_trade.html")
div0 = Div(text = """Month 0 is September 2008 for the financial crisis series, Month 0 is March 2020 for the Covid-19 Pandemic series.
All values are percent changes year over year. Hover tool reports the change and the calander date.""", max_width=555, background = background )
div0.sizing_mode= "scale_both"
outfig = column(Tabs(tabs=[tab3, tab4], tabs_location = "above"), div0, sizing_mode="scale_both")
show(outfig)
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# MNIST Handwritten Digit Classification using ONNX and AzureML
This example shows how to train a model on the MNIST data using PyTorch, save it as an ONNX model, and deploy it as a web service using Azure Machine Learning services and the ONNX Runtime.
## What is ONNX
ONNX is an open format for representing machine learning and deep learning models. ONNX enables open and interoperable AI by enabling data scientists and developers to use the tools of their choice without worrying about lock-in and flexibility to deploy to a variety of platforms. ONNX is developed and supported by a community of partners including Microsoft, Facebook, and Amazon. For more information, explore the [ONNX website](http://onnx.ai).
## MNIST Details
The Modified National Institute of Standards and Technology (MNIST) dataset consists of 70,000 grayscale images. Each image is a handwritten digit of 28x28 pixels, representing numbers from 0 to 9. For more information about the MNIST dataset, please visit [Yan LeCun's website](http://yann.lecun.com/exdb/mnist/). For more information about the MNIST model and how it was created can be found on the [ONNX Model Zoo github](https://github.com/onnx/models/tree/master/mnist).
## Prerequisites
* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning
* Go through the [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) notebook to:
* install the AML SDK
* create a workspace and its configuration file (`config.json`)
```
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
```
## Initialize workspace
Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
```
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Resource group: ' + ws.resource_group, sep = '\n')
```
## Train model
### Create a remote compute target
You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) to execute your training script on. In this tutorial, you create AmlCompute as your training compute resource. This code creates new compute for you if it does not already exist in your workspace.
**Creation of the compute takes approximately 5 minutes.** If the compute is already in your workspace this code will skip the creation process.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your cluster
cluster_name = "gpucluster"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target.')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
max_nodes=4)
# create the cluster
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# Use the 'status' property to get a detailed status for the current cluster.
print(compute_target.status.serialize())
```
The above code creates a GPU cluster. If you instead want to create a CPU cluster, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`.
### Create a project directory
Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.
```
import os
project_folder = './pytorch-mnist'
os.makedirs(project_folder, exist_ok=True)
```
Copy the training script `mnist.py` into your project directory. Make sure the training script has the following code to create an ONNX file:
```python
dummy_input = torch.randn(1, 1, 28, 28, device=device)
model_path = os.path.join(output_dir, 'mnist.onnx')
torch.onnx.export(model, dummy_input, model_path)
```
```
import shutil
shutil.copy('mnist.py', project_folder)
```
### Create an experiment
Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this transfer learning PyTorch tutorial.
```
from azureml.core import Experiment
experiment_name = 'pytorch1-mnist'
experiment = Experiment(ws, name=experiment_name)
```
### Create a PyTorch estimator
The AML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch). The following code will define a single-node PyTorch job.
```
from azureml.train.dnn import PyTorch
estimator = PyTorch(source_directory=project_folder,
script_params={'--output-dir': './outputs'},
compute_target=compute_target,
entry_script='mnist.py',
use_gpu=True)
# upgrade to PyTorch 1.0 Preview, which has better support for ONNX
estimator.conda_dependencies.remove_conda_package('pytorch=0.4.0')
estimator.conda_dependencies.add_conda_package('pytorch-nightly')
estimator.conda_dependencies.add_channel('pytorch')
```
The `script_params` parameter is a dictionary containing the command-line arguments to your training script `entry_script`. Please note the following:
- We specified the output directory as `./outputs`. The `outputs` directory is specially treated by AML in that all the content in this directory gets uploaded to your workspace as part of your run history. The files written to this directory are therefore accessible even once your remote run is over. In this tutorial, we will save our trained model to this output directory.
To leverage the Azure VM's GPU for training, we set `use_gpu=True`.
### Submit job
Run your experiment by submitting your estimator object. Note that this call is asynchronous.
```
run = experiment.submit(estimator)
print(run.get_details())
```
### Monitor your run
You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes.
```
from azureml.widgets import RunDetails
RunDetails(run).show()
```
Alternatively, you can block until the script has completed training before running more code.
```
run.wait_for_completion(show_output=True)
```
### Download the model (optional)
Once the run completes, you can choose to download the ONNX model.
```
# list all the files from the run
run.get_file_names()
import os
model_path = os.path.join('outputs', 'mnist.onnx')
run.download_file(model_path, output_file_path=model_path)
```
### Register the model
You can also register the model from your run to your workspace. The `model_path` parameter takes in the relative path on the remote VM to the model file in your `outputs` directory. You can then deploy this registered model as a web service through the AML SDK.
```
model = run.register_model(model_name='mnist', model_path=model_path)
print(model.name, model.id, model.version, sep = '\t')
```
#### Displaying your registered models (optional)
You can optionally list out all the models that you have registered in this workspace.
```
models = ws.models
for name, m in models.items():
print("Name:", name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
```
## Deploying as a web service
### Write scoring file
We are now going to deploy our ONNX model on Azure ML using the ONNX Runtime. We begin by writing a score.py file that will be invoked by the web service call. The `init()` function is called once when the container is started so we load the model using the ONNX Runtime into a global session object.
```
%%writefile score.py
import json
import time
import sys
import os
from azureml.core.model import Model
import numpy as np # we're going to use numpy to process input and output data
import onnxruntime # to inference ONNX models, we use the ONNX Runtime
def init():
global session
model = Model.get_model_path(model_name = 'mnist')
session = onnxruntime.InferenceSession(model)
def preprocess(input_data_json):
# convert the JSON data into the tensor input
return np.array(json.loads(input_data_json)['data']).astype('float32')
def postprocess(result):
# We use argmax to pick the highest confidence label
return int(np.argmax(np.array(result).squeeze(), axis=0))
def run(input_data_json):
try:
start = time.time() # start timer
input_data = preprocess(input_data_json)
input_name = session.get_inputs()[0].name # get the id of the first input of the model
result = session.run([], {input_name: input_data})
end = time.time() # stop timer
return {"result": postprocess(result),
"time": end - start}
except Exception as e:
result = str(e)
return {"error": result}
```
### Create container image
First we create a YAML file that specifies which dependencies we would like to see in our container.
```
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(pip_packages=["numpy","onnxruntime","azureml-core"])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
```
Then we have Azure ML create the container. This step will likely take a few minutes.
```
from azureml.core.image import ContainerImage
image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
conda_file = "myenv.yml",
description = "MNIST ONNX Demo",
tags = {"demo": "onnx"}
)
image = ContainerImage.create(name = "onnxmnistdemo",
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
```
In case you need to debug your code, the next line of code accesses the log file.
```
print(image.image_build_log_uri)
```
We're all set! Let's get our model chugging.
### Deploy the container image
```
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'demo': 'onnx'},
description = 'web service for MNIST ONNX model')
```
The following cell will likely take a few minutes to run as well.
```
from azureml.core.webservice import Webservice
from random import randint
aci_service_name = 'onnx-demo-mnist'+str(randint(0,100))
print("Service", aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
```
In case the deployment fails, you can check the logs. Make sure to delete your aci_service before trying again.
```
if aci_service.state != 'Healthy':
# run this command for debugging.
print(aci_service.get_logs())
aci_service.delete()
```
## Success!
If you've made it this far, you've deployed a working web service that does handwritten digit classification using an ONNX model. You can get the URL for the webservice with the code below.
```
print(aci_service.scoring_uri)
```
When you are eventually done using the web service, remember to delete it.
```
#aci_service.delete()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df["MSSubClass"] = df["MSSubClass"].astype(str)
g1 = sns.catplot(x=df["MSSubClass"], y =df["SalePrice"], data = df, kind = "box")
g1.set_xticklabels(["2-STORY 1946 & NEWER","1-STORY 1946 & NEWER ALL STYLES","2-STORY 1945 & OLDER","1-1/2 STORY FINISHED ALL AGES","2 FAMILY CONVERSION - ALL STYLES AND AGES","1-1/2 STORY - UNFINISHED ALL AGES","DUPLEX - ALL STYLES AND AGES","1-STORY PUD (Planned Unit Development) - 1946 & NEWER","1-STORY 1945 & OLDER","SPLIT FOYER","SPLIT OR MULTI-LEVEL","2-STORY PUD - 1946 & NEWER","2-1/2 STORY ALL AGES","PUD - MULTILEVEL - INCL SPLIT LEV/FOYER","1-STORY W/FINISHED ATTIC ALL AGES"])
g1.fig.suptitle("Type of the dwelling", x = 0.6, y = 1.04)
g1.set(xlabel="Type", ylabel="Price")
plt.xticks(rotation=90)
plt.show()
for i in range(len(df["MSSubClass"])):
if df.at[i,"MSSubClass"]==20:
df.at[i,"MSSubClass"] = 10
df["MSSubClass"]
g2 = sns.catplot(x=df["MSZoning"], y =df["SalePrice"], data = df, kind = "box")
g2.fig.suptitle("General Zoning Classification", x = 0.6, y = 1.04)
g2.set_xticklabels(['Residential Low Density','Residential Medium Density','Commercial','Floating Village Residential',"Residential High Density"])
g2.set(xlabel="Zone Class", ylabel="Price")
plt.xticks(rotation = 90)
plt.show()
df["LotFrontage"].fillna(0, inplace = True)
g3 = sns.relplot(x=df["LotFrontage"], y =df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g3.fig.suptitle("Linear Feet of Street Connected to Property", x = 0.6, y = 1.04)
g3.set(xlabel="Feet", ylabel="Price")
plt.show()
g4 = sns.relplot(x=df["LotArea"], y =df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g4.fig.suptitle("Lot size", x = 0.6, y = 1.04)
g4.set(xlabel="Size (in square feet)", ylabel="Price")
plt.show()
g5 = sns.catplot(x=df["Street"], y =df["SalePrice"], data = df, kind = "box")
g5.fig.suptitle("Type of Road Access to Property", x = 0.6, y = 1.04)
g5.set_xticklabels(["Paved","Gravel"])
g5.set(xlabel="Type", ylabel="Price")
plt.show()
df["Alley"].fillna("No Access", inplace = True)
g6 = sns.catplot(x=df["Alley"], y =df["SalePrice"], data = df, kind = "box")
g6.fig.suptitle("Type of Alley Access to Property", x = 0.6, y = 1.04)
g6.set_xticklabels(["No Access","Gravel","Paved"])
g6.set(xlabel="Type", ylabel="Price")
plt.show()
g7 = sns.catplot(x=df["LotShape"], y =df["SalePrice"], data = df, kind = "box")
g7.fig.suptitle("General Shape of Property", x = 0.6, y = 1.04)
g7.set_xticklabels(["Regular","Slightly Irregular","Moderately Irregular","Irregular"])
g7.set(xlabel="Shape", ylabel="Price")
plt.xticks(rotation=90)
plt.show()
g8 = sns.catplot(x=df["LandContour"], y =df["SalePrice"], data = df, kind = "box")
g8.fig.suptitle("Flatness of The Property", x = 0.6, y = 1.04)
g8.set(xlabel="Flatness", ylabel="Price")
g8.set_xticklabels(["Near Flat/Level","Banked - Quick and significant rise from street grade to building","Depression","Hillside - Significant slope from side to side"])
plt.xticks(rotation=90)
plt.show()
g9 = sns.catplot(x=df["Utilities"], y =df["SalePrice"], data = df, kind = "box")
g9.fig.suptitle("Type of Utilities Available", x = 0.6, y = 1.04)
g9.set_xticklabels(["All public Utilities","Electricity and Gas Only"])
g9.set(xlabel="Utilities", ylabel="Price")
plt.show()
g10 = sns.catplot(x=df["LotConfig"], y =df["SalePrice"], data = df, kind = "box")
g10.fig.suptitle("Lot Configuration", x = 0.6, y = 1.04)
g10.set(xlabel="Configuration", ylabel="Price")
g10.set_xticklabels(["Inside","Frontage on 2 sides of property","Corner lot","Cul-de-sac","Frontage on 3 sides of property"])
plt.xticks(rotation=90)
plt.show()
g11 = sns.catplot(x=df["LandSlope"], y =df["SalePrice"], data = df, kind = "box")
g11.fig.suptitle("Slope of Property", x = 0.6, y = 1.04)
g11.set(xlabel="Slope", ylabel="Price")
g11.set_xticklabels(["Gentle slope","Moderate Slope","Severe Slope"])
plt.show()
g12 = sns.catplot(x=df["Neighborhood"], y =df["SalePrice"], data = df, kind = "box")
g12.fig.suptitle("Physical Neighbourhood", x = 0.6, y = 1.04)
g12.set(xlabel="Neighbourhood", ylabel="Price")
g12.set_xticklabels(["College Creek","Veenker","Crawford","Northridge","Mitchell","Somerset","Northwest Ames","Old Town","Brookside","Sawyer","Northridge Heights","North Ames","Sawyer West","Iowa DOT and Rail Road","Meadow Village","Edwards","Timberland","Gilbert","Stone Brook","Clear Creek","Northpark Villa","Bloomington Heights","Briardale","South & West of Iowa State University","Bluestem"])
plt.xticks(rotation=90)
plt.show()
g13 = sns.catplot(x=df["Condition1"], y =df["SalePrice"], data = df, kind = "box")
g13.fig.suptitle("Proximity To Various Conditions", x = 0.6, y = 1.04)
g13.set(xlabel="Conditions", ylabel="Price")
g13.set_xticklabels(["Normal","Adjacent to feeder street","Near positive off-site feature","Adjacent to arterial street","Adjacent to East-West Railroad","Within 200' of North-South Railroad","Adjacent to North-South Railroad","Adjacent to postive off-site feature","Within 200' of East-West Railroad"])
plt.xticks(rotation=90)
plt.show()
g14 = sns.catplot(x=df["Condition2"], y =df["SalePrice"], data = df, kind = "box")
g14.fig.suptitle("Proximity To Various Conditions (if more than one is present)", x = 0.6, y = 1.04)
g14.set(xlabel="Conditions", ylabel="Price")
g14.set_xticklabels(["Normal","Adjacent to arterial street","Within 200' of North-South Railroad","Adjacent to feeder street","Near positive off-site feature","Adjacent to postive off-site feature","Adjacent to North-South Railroad","Adjacent to East-West Railroad"])
plt.xticks(rotation=90)
plt.show()
g15 = sns.catplot(x=df["BldgType"], y =df["SalePrice"], data = df, kind = "box")
g15.fig.suptitle("Type of Dwelling", x = 0.6, y = 1.04)
g15.set(xlabel="Type", ylabel="Price")
g15.set_xticklabels(["Single-family Detached","Two-family Conversion; originally built as one-family dwelling","Duplex","Townhouse End Unit","Townhouse Inside Unit"])
plt.xticks(rotation=90)
plt.show()
g16 = sns.catplot(x=df["HouseStyle"], y =df["SalePrice"], data = df, kind = "box")
g16.fig.suptitle("Style of Dwelling", x = 0.6, y = 1.04)
g16.set(xlabel="Style", ylabel="Price")
g16.set_xticklabels(["Two story","One story","One and one-half story: 2nd level finished","One and one-half story: 2nd level unfinished","Split Foyer","Split Level","Two and one-half story: 2nd level unfinished","Two and one-half story: 2nd level finished"])
plt.xticks(rotation=90)
plt.show()
g17 = sns.catplot(x=df["OverallQual"], y =df["SalePrice"], data = df, kind = "box")
g17.fig.suptitle("Rates of The Overall Material of The House", x = 0.6, y = 1.04)
g17.set(xlabel="Rate", ylabel="Price")
plt.show()
g18 = sns.catplot(x=df["OverallCond"], y =df["SalePrice"], data = df, kind = "box")
g18.fig.suptitle("Rates of The Overall Condition of The House", x = 0.6, y = 1.04)
g18.set(xlabel="Rate", ylabel="Price")
plt.show()
g19 = sns.relplot(x = df["YearBuilt"], y = df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g19.fig.suptitle("Original Construction Date", x = 0.6, y = 1.04)
g19.set(xlabel="Year", ylabel="Price")
plt.show()
g20 = sns.relplot(x = df["YearRemodAdd"], y = df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g20.fig.suptitle("Remodel Date", x = 0.6, y = 1.04)
g20.set(xlabel="Year", ylabel="Price")
plt.show()
g21 = sns.catplot(x=df["RoofStyle"], y =df["SalePrice"], data = df, kind = "box")
g21.fig.suptitle("Type of Roof", x = 0.6, y = 1.04)
g21.set(xlabel="Type", ylabel="Price")
plt.show()
g22 = sns.catplot(x=df["RoofMatl"], y =df["SalePrice"], data = df, kind = "box")
g22.fig.suptitle("Roof Material", x = 0.6, y = 1.04)
g22.set(xlabel="Material", ylabel="Price")
g22.set_xticklabels(["Composite Shingle","Wood Shingles","Metal","Wood Shakes","Membrane","Gravel & Tar","Roll","Clay or Tile"])
plt.xticks(rotation=90)
plt.show()
g23 = sns.catplot(x=df["Exterior1st"], y =df["SalePrice"], data = df, kind = "box")
g23.fig.suptitle("Exterior Covering On House", x = 0.6, y = 1.04)
g23.set(xlabel="Material", ylabel="Price")
g23.set_xticklabels(["Vinyl Siding","Metal Siding","Wood Siding","Hard Board","Brick Face","Wood Shingles","Cement Board","Plywood","Asbestos Shingles","Stucco","Brick Common","Asphalt Shingles","Stone","Imitation Stucco","Cinder Block"])
plt.xticks(rotation=90)
plt.show()
g24 = sns.catplot(x=df["Exterior2nd"], y =df["SalePrice"], data = df, kind = "box")
g24.fig.suptitle("Exterior Covering On House (if more than one material)", x = 0.6, y = 1.04)
g24.set(xlabel="Material", ylabel="Price")
g24.set_xticklabels(["Vinyl Siding","Metal Siding","Wood Siding","Hard Board","Plywood","Wood Siding","Cement Board","Brick Face","Stucco","Asbestos Shingles","Brick Common","Imitation Stucco","Asphalt Shingles","Stone","Other","Cinder Block"])
plt.xticks(rotation=90)
plt.show()
g25 = sns.catplot(x=df["MasVnrType"], y =df["SalePrice"], data = df, kind = "box")
g25.fig.suptitle("Masonry Veneer Type", x = 0.6, y = 1.04)
g25.set(xlabel="Type", ylabel="Price")
g25.set_xticklabels(["Brick Face","None","Stone","Brick Common"])
plt.xticks(rotation=90)
plt.show()
g26 = sns.relplot(x = df["MasVnrArea"], y = df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g26.fig.suptitle("Masonry Veneer Area", x = 0.6, y = 1.04)
g26.set(xlabel="Area (in square feet)", ylabel="Price")
plt.show()
g27 = sns.catplot(x=df["ExterQual"], y =df["SalePrice"], data = df, kind = "box")
g27.fig.suptitle("The Quality of The Material On The Exterior", x = 0.6, y = 1.04)
g27.set(xlabel="Quality", ylabel="Price")
g27.set_xticklabels(["Good","Average","Excellent","Fair"])
plt.show()
g28 = sns.catplot(x=df["ExterCond"], y =df["SalePrice"], data = df, kind = "box")
g28.fig.suptitle("The Condition of The Material On The Exterior", x = 0.6, y = 1.04)
g28.set(xlabel="Condition", ylabel="Price")
g28.set_xticklabels(["Average","Good","Fair","Poor","Excellent"])
plt.show()
g29 = sns.catplot(x=df["Foundation"], y =df["SalePrice"], data = df, kind = "box")
g29.fig.suptitle("Type of Foundation", x = 0.6, y = 1.04)
g29.set(xlabel="Type", ylabel="Price")
g29.set_xticklabels(["Poured Contrete","Cinder Block","Brick & Tile","Wood","Slab","Stone"])
plt.xticks(rotation=90)
plt.show()
df["BsmtQual"].fillna("No Basement", inplace = True)
g30 = sns.catplot(x=df["BsmtQual"], y =df["SalePrice"], data = df, kind = "box")
g30.fig.suptitle("The Height of The Basement", x = 0.6, y = 1.04)
g30.set(xlabel="Height Satisfactory", ylabel="Price")
g30.set_xticklabels(["Good","Average","Excellent","No Basement","Fair"])
plt.xticks(rotation=90)
plt.show()
df["BsmtCond"].fillna("No Basement", inplace = True)
g31 = sns.catplot(x=df["BsmtCond"], y =df["SalePrice"], data = df, kind = "box")
g31.fig.suptitle("The Condition of The Basement", x = 0.6, y = 1.04)
g31.set(xlabel="Condition", ylabel="Price")
g31.set_xticklabels(["Average","Good","No Basement","Fair","Poor"])
plt.xticks(rotation=90)
plt.show()
df["BsmtExposure"].fillna("No Basement", inplace = True)
g32 = sns.catplot(x=df["BsmtExposure"], y =df["SalePrice"], data = df, kind = "box")
g32.fig.suptitle("Walkout or Garden Level Walls", x = 0.6, y = 1.04)
g32.set(xlabel="State", ylabel="Price")
g32.set_xticklabels(["No Exposure","Good Exposure","Minimum Exposure","Average Exposure","No Basement"])
plt.xticks(rotation=90)
plt.show()
df["BsmtFinType1"].fillna("No Basement", inplace = True)
g33 = sns.catplot(x=df["BsmtFinType1"], y =df["SalePrice"], data = df, kind = "box")
g33.fig.suptitle("Rating of Basement Finished Area (First)", x = 0.6, y = 1.04)
g33.set(xlabel="Rating", ylabel="Price")
g33.set_xticklabels(["Good Living Quarters","Average Living Quarters","Unfinished","Average Rec Room","Below Average Living Quarters","No Basement","Low Quality"])
plt.xticks(rotation=90)
plt.show()
g34 = sns.relplot(x = df["BsmtFinSF1"], y = df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g34.fig.suptitle("First Basement Finished Area", x = 0.6, y = 1.04)
g34.set(xlabel="Area (in square feet)", ylabel="Price")
plt.show()
df["BsmtFinType2"].fillna("No Basement", inplace = True)
g35 = sns.catplot(x=df["BsmtFinType2"], y =df["SalePrice"], data = df, kind = "box")
g35.fig.suptitle("Rating of Basement Finished Area (If Multiple Types)", x = 0.6, y = 1.04)
g35.set(xlabel="Rating", ylabel="Price")
g35.set_xticklabels(["Unfinished","Below Average Living Quarters","No Basement","Average Living Quarters","Average Rec Room","Low Quality","Good Living Quarters"])
plt.xticks(rotation=90)
plt.show()
g36 = sns.relplot(x = df["BsmtFinSF2"], y = df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g36.fig.suptitle("Multiple Basement Finished Area", x = 0.6, y = 1.04)
g36.set(xlabel="Area (in square feet)", ylabel="Price")
plt.show()
g37 = sns.relplot(x = df["BsmtUnfSF"], y = df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g37.fig.suptitle("Unfinished Square Feet of Basement Area", x = 0.6, y = 1.04)
g37.set(xlabel="Area", ylabel="Price")
plt.show()
g38 = sns.relplot(x = df["TotalBsmtSF"], y = df["SalePrice"], data = df, kind = "scatter", alpha = 0.2)
g38.fig.suptitle("Total Square Feet of Basement Area", x = 0.6, y = 1.04)
g38.set(xlabel="Area", ylabel="Price")
plt.show()
```
| github_jupyter |
## This notebook will create an interactive widget to illustrate network interconnections
```
import numpy as np
from pandas import *
import networkx as nx
import matplotlib.pyplot as plt
% matplotlib inline
```
### Import real gene network (from macular degeneration experiment)
Also import the measured differential expression levels
NOTE: change paths/filenames in this cell to apply network visualizer to other datasets. Network format from genemania (e.g. columns are 'Entity 1', 'Entity 2', 'Weight', 'Network_group', 'Networks')
NOTE: if no fold change analysis desired, set diff_exp_analysis=False
```
# set file name and directory: network format from GeneMania
filename = 'ayyagari_mass_spec_MD_network/09232015_ayyagari_mass_spec_genemania_network.txt'
md_network = read_csv(filename, sep='\t', header=6)
md_network.columns = ['Entity 1','Entity 2', 'Weight','Network_group','Networks']
print(md_network.head())
# also import differential expression
diff_exp_analysis = True
if diff_exp_analysis:
md_diff_exp = read_csv('ayyagari_mass_spec_MD_network/09232015_ayyagari_mass_spec_fold_changes.csv',sep=',')
md_diff_exp.index = md_diff_exp['GeneSymbol']
# drop empty columns
md_diff_exp = md_diff_exp.dropna(axis='columns',how='all')
md_diff_exp.columns = ['GeneSymbol','FoldChange'] # rename columns to be more universal
print('\n \n Differential expression (fold change): \n')
print(md_diff_exp.FoldChange[1:20])
```
### Section: Implement heat diffusion (HotNet2)
Note- these functions moved to plot_network module
### Create sample network
Create a random network using networkx, for use in testing widget
```
G_rand = nx.connected_watts_strogatz_graph(200,4,.1)
#G_rand = nx.random_graphs.powerlaw_cluster_graph(100,10,.2)
edges = G_rand.edges()
numedges = len(G_rand.edges())
for e in range(numedges):
edge_temp = edges[e]+(np.random.rand(),)
edges[e] = edge_temp
nodes = G_rand.nodes()
G_rand.add_weighted_edges_from(edges)
# rename nodes to strings for compatibility with widget
string_nodes = [str(i) for i in nodes]
node_map = dict(zip(nodes,string_nodes))
G_rand = nx.relabel_nodes(G_rand,node_map,copy=False)
# use md_network to build adjacency matrix
G_MD = nx.Graph()
G_MD.add_nodes_from(list(md_network['Entity 1']))
print(G_MD.nodes())
print('number of nodes = '+ str(len(G_MD.nodes())))
# add some edges
edge_list = zip(list(md_network['Entity 1']),list(md_network['Entity 2']))
G_MD.add_edges_from(edge_list)
print('number of edges = ' + str(len(G_MD.edges())))
# create version with weighted edges
G_MD_w = nx.Graph()
G_MD_w.add_nodes_from(G_MD.nodes())
edge_list_w = zip(list(md_network['Entity 1']),list(md_network['Entity 2']),list(md_network['Weight']))
G_MD_w.add_weighted_edges_from(edge_list_w)
import imp
import plot_network
imp.reload(plot_network)
from IPython.html.widgets import interact
from IPython.html import widgets
import matplotlib.colorbar as cb
import seaborn as sns
import community
# import network plotting module
from plot_network import *
Gtest = nx.Graph()
# select whether you want to analyze real data or simulated network
net_type = 'data'
if net_type == 'data':
Gtest.add_nodes_from(G_MD_w.nodes())
Gtest.add_edges_from(G_MD_w.edges(data=True))
# prep border colors
nodes = Gtest.nodes()
gene_list = md_diff_exp['GeneSymbol'].unique()
if diff_exp_analysis:
diff_exp = Series(md_diff_exp.FoldChange)
genes_intersect = np.intersect1d(gene_list,nodes)
border_cols = Series(index=nodes)
for i in genes_intersect:
if diff_exp[i]=='Unmeasured':
border_cols[i] = np.nan
else:
border_cols[i] = diff_exp[i]
else: # if no differential expression data
border_cols = [None]
elif net_type == 'random':
Gtest.add_nodes_from(G_rand.nodes())
Gtest.add_edges_from(G_rand.edges(data=True))
nodes = Gtest.nodes()
# make border colors array of ones
border_cols = np.ones(len(nodes))
border_cols = Series(border_cols,index=nodes)
# make other interaction type
node_0_list = [u for (u,v) in Gtest.edges()]
node_1_list = [v for (u,v) in Gtest.edges()]
NGlist = ['Other' for i in range(len(Gtest.edges()))]
md_network = DataFrame({'Entity 1':node_0_list,'Entity 2': node_1_list,'Weight':np.ones(len(Gtest.edges())),
'Network_group':NGlist})
numnodes = len(Gtest)
# make these three global to feed into widget
global Gtest
global boder_cols
global md_network
def plot_network_shell(focal_node_name,edge_thresh=.5,network_algo='spl',map_degree=True, plot_border_col=False, draw_shortest_paths=True,
coexpression=True, colocalization=True, other=False,physical_interactions=False,
predicted_interactions=False,shared_protein_domain=False):
# this is the main plotting function, called from plot_network module
fig = plot_network(Gtest, border_cols, md_network,
focal_node_name, edge_thresh, network_algo, map_degree, plot_border_col, draw_shortest_paths,
coexpression, colocalization, other, physical_interactions, predicted_interactions, shared_protein_domain)
return fig
# threshold slider parameters
if net_type=='data':
min_thresh = np.min(md_network['Weight'])
max_thresh = np.max(md_network['Weight']/50)
else:
min_thresh = 0
max_thresh = 1
thresh_step = (max_thresh-min_thresh)/1000.0
interact(plot_network_shell, focal_node_name=list(np.sort(nodes)),
edge_thresh=widgets.FloatSliderWidget(min=min_thresh,max=max_thresh,step=thresh_step,value=min_thresh,description='edge threshold'),
network_algo = ['community','clustering_coefficient','hotnet2','pagerank','spl']);
```
### To do: implement effective distance
(From Brockmann, Helbing: The Hidden Geometry of Complex, Network-Driven Contagion Phenomena)
Calculate the effective distance as:
- Pij is the fraction of connections going from J to I
- Pij = Aij/sum_j(Aij)
- dij = 1/(1-log(Pij)
- D = all shortest paths in dij
### Also to do:
- Implement alternative module/clustering mechanisms
- Implement force-directed edge bundling
- add animation componenent (e.g. heat spreading from a source)
- how do network measures change with node removal? Can we identify 'bottleneck' regions? What is the best node/ edge to remove?
- calculate network backbone (http://www.pnas.org/content/106/16/6483.abstract)
```
HEBim = plt.imread('hierarchical_edge-bundling3.png')
#plt.figure(figsize=(12,12))
#plt.imshow(HEBim);
```
| github_jupyter |
# 酒駕判決書文字資料清理與結構化
這次專題基於「民眾對於酒駕者的判刑度不理解,社會輿論缺乏針對酒駕修法的背景脈絡」出發,搜集了大量的酒駕判決書,但判決書以文字寫成,刑度、罰金或其他背景分析,都需要經過資料清理與轉化,才能進一步統計。
這次使用的判決書總數量達49萬份,若以人力編碼曠日廢時,且恐面臨編碼員信度考驗。這次我們以正規表達式(regular expression)鎖定文字特徵萃取,以程式掃描文字、輔以人工抽查,以確保分析數據能維持一定的正確性。
本專題以python進行資料整理與分析,這篇notebook會按步驟說明「不能安全駕駛」文字清理使用的regex模式說明,並附上酒駕致人於死、酒駕致人受傷的程式碼。
## 引入文字清理使用的模組
- 引入regular expresssion的re模組、數據使用的pandas模組
- 試著讀入我們open data
```
import re
import pandas as pd
df1 = pd.read_csv('/Volumes/Untitled/opendata/一般酒駕/drunkDrive_2013.csv',sep='\t')
df2 = pd.read_csv('/Volumes/Untitled/opendata/fatal_export.csv',sep='\t')
df3 = pd.read_csv('/Volumes/Untitled/opendata/hurt_export.csv',sep='\t')
df3.head(5)
```
## 不能安全駕駛切刑期、切罰金
- 判決書全文是非結構化資料,雖然述說同一件事,但是會因不同法官、不同時期,有各種不同的寫法,甚至有錯漏字,因此捕捉特徵的寫法有些複雜,以下會逐步說明。
- 判刑、判罰金都會出現在「判決書主文」中,我們讀入主文的資料進行萃取。至於酒精濃度、車種、教育程度則是從「判決書全文」萃取。
- 萃取時會取出比較大段的部分,再刪去不需要的部分。例如萃取「不能安全駕駛罪」判刑,會先從主文找到「而駕駛動力交通工具,處有期徒刑貳月」,而不直接找「處有期徒刑貳月」的片段,避免取到非不能安全駕駛罪的資料;接著再從中把「而駕駛動力交通工具」、「處有期徒刑」等字刪除。
- 寫regex pattern時,我習慣在 [regular expression101](https://regex101.com/) 網站操作,好處是模式結果可視化,若100筆案例、500筆案例大都可以抓到結果,再回到python批次大量操作。
|  |
|:--:|
| *當大部分的案例都有色塊時,代表模式可以抓取我們指定的特徵* |
### 不能安全駕駛判決的特徵
要能歸納出模式,必須要花費時間找出判決書寫法的「樣態」,再針對這些樣態進行模式描寫(regular expression pattern)。
例如,酒後駕車在刑法中歸類於不能安全駕駛動力交通工具,查看判決書會發現,有的判決書會提出酒精濃度超過標準、有的只寫不能安全駕駛。經過大量比對後,我們以a1、a2作為法官描述「不能安全駕駛罪」的特徵。
a1是法官提出酒精濃度超過標準,其中包含了幾個寫作用詞的變形,如酒精濃度有吐氣或血液兩種檢測、酒駕標準因年代不同而有零點二五、零點五五等不同標準、標點符號的使用習慣不同、行文的輔助詞(而、之)不同,針對這些詞出現與否進行正規表達式的描寫。
a2則是可以承續酒精濃度的句子,也可以針對比較簡寫的狀況進行抓取。其中融合三種可能描述:
- 甲○○吐氣所含酒精濃度達每公升零點二五毫克以上*__而駕駛動力交通工具__*
- 甲○○服用酒類*__不能安全駕駛動力交通工具而駕駛__*
- 甲○○犯*__不能安全駕駛動力交通工具罪__*
利用a1+a2即可標記出大部分的「不能安全駕駛罪」用詞。
不能安全駕駛罪這20年來有罰金、拘役、有期徒刑等處刑方式,因此以b1鎖定自由刑的用詞、b2鎖定罰金的用詞。以自由刑的b1為例,可以萃取這些語句:
- 甲○○吐氣所含酒精濃度達每公升零點二五毫克以上而駕駛動力交通工具*__,處有期徒刑__*貳月
- 甲○○服用酒類,不能安全駕駛動力交通工具而駕駛,*__處有期刑__*貳月
- 甲○○服用酒類,不能安全駕駛動力交通工具而駕駛*__,處拘役__*伍拾玖日
- 甲○○服用酒類,不能安全駕駛動力交通工具而駕駛*__,累犯,處有期徒刑__*陸月
接著我們再分流,以c1鎖定處刑數量、c2鎖定罰金數量。這部分主要是在處理數字組合與單位(年月日、元)。以罰金的c2為例,可以萃取這些語句:
- 主文甲○○服用酒類,不能安全駕駛動力交通工具而駕駛,處罰金新臺幣*__玖萬元__*
- 主文甲○○服用酒類,不能安全駕駛動力交通工具而駕駛,科罰金新臺幣*__柒萬伍仟元__*
綜上所述,我們以a1+a2+b1+c1來萃取不能安全駕駛遭處有期徒刑的語句、以a1+a2+b2+c2來萃取不能安全駕駛遭科罰金的語句。
```
#鎖定「不能安全駕駛罪」用詞
a1 = "(交通工具(而)*[,,,]*有*(吐氣|血液中)(所含)*酒精濃度達(每公升|百分之)*零點(貳伍|二五|伍伍|五五)(毫克)*以上(之情形)*|"
a2 = "(不能安全駕駛)*(而駕駛)*動力交通工具罪*(而駕駛)*者*)"
#鎖定累犯註記、判刑(拘役、有期徒刑)用詞
b1 = "[,,,](累犯)*(共*罪)*[,,,]*[各均有]*處*[,,]*(有期徒*刑徒*|拘役)"
b2 = "[,,,](累犯)*(共*罪)*[,,,]*[各均有]*[處科]*[,,]*罰金(新[台臺]幣)*"
#鎖定處刑用詞
c1 = "[一二三四五六七八九十壹貳參肆伍陸柒捌玖拾廿卅1234567890]*(年|個*月|日)"
c2 = "[一二三四五六七八九十壹貳參肆伍陸柒捌玖拾廿卅百佰千仟萬1234567890]*元*"
#找出剝奪自由刑之模式組合
findimprison = a1+a2+b1+c1
#找出罰金之模式組合
findpenalty = a1+a2+b2+c2
```
### 不能安全駕駛無罪部分與取出數字方式
透過findimprison與findpenalty兩種模式可以找出大部分的處刑,但會發現判決書中也有無罪的狀況,因此再增加一種判斷無罪的模式。
當我們有能力從文字中定位單一罪行的片段時,再從中取出我們需要的數字,可以提升精準度。這個過程如下:
1. 【判決主文】乙○○服用酒類,不能安全駕駛動力交通工具而駕駛,處有期徒刑參月;又因過失致人於死,處有期徒刑壹年;又駕駛動力交通工具肇事,致人死傷而逃逸,處有期徒刑捌月;應執行有期徒刑壹年陸月。
2. 【鎖定不能安全駕駛】不能安全駕駛動力交通工具而駕駛,處有期徒刑參月
3. 【取出數字】參月
當判斷出處有期徒刑、罰金、無罪,分別切出結果,再把非刑期與罰金的文字都刪除(如罰金、有期徒刑、累犯、標點符號等),就可以得到量刑。以這種方式逐步建立可操作的判決書結構化資料。
```
#無罪的模式組合
d = "[,,,、,]*部*[分份]*[,,,、,]*均*無罪"
findun = a2+d
#把非刑期、罰金的描述都刪掉(很暴力)
rep = "情形|罰金|科|處|新[台臺]幣|銀|元|不能安全|駕駛|動力|交通工具|而|吐氣|所含|酒精濃度|達|每公升.*毫克|以上|而|者|,|,|、.*|;.*|。.*|累犯|共.罪|有期徒*刑徒*|罪|拘役|過失|如易科.*|被訴.*|應執行.*|緩刑.*|各|均|部[份分].*|共|有|之"
```
### 實際運作方法
接下來以迴圈方式進行一連串的判斷:
- 是否判有期徒刑,取出量刑
- 是否判罰金,取出量刑
- 是否無罪
```
result_imprison_text = []
result_penalty_text = []
for i in df1['主文']:
if bool(re.search(findimprison, i)):
r = re.sub(rep,"",re.search(findimprison, i).group())
result_imprison_text.append(r)
result_penalty_text.append('')
elif bool(re.search(findf,i)):
r = re.sub(rep,"",re.search(findf,i).group())
result_imprison_text.append("")
result_penalty_text.append(r)
else:
if bool(re.search(findn,i)):
result_imprison_text.append("無罪")
result_penalty_text.append("")
else:
result_imprison_text.append("")
result_penalty_text.append("")
```
### 國字數字轉成數字function
抓出刑期與罰金的資訊之後,需將國字換為數字。另外寫成了兩個函式:todays(x) 將刑期國字換為數字、tomoney(x) 將罰金國字換為數字。
步驟大致是:
- 刑期需確定年月日單位,並依一年365日、一月30日進行換算
- 罰金需確定位數
- 將國字取代為數字
```
def repl(g):
u = ['壹','貳','參','肆','伍','陸','柒','捌','玖','拾','廿','卅','一','二','三','四','五','六','七','八','九','十',0,'1','2','3','4','5','6','7','8','9','0','佰','仟','萬','百','千']
p = u.index(g)
n = [1,2,3,4,5,6,7,8,9,10,20,30,1,2,3,4,5,6,7,8,9,10,0,1,2,3,4,5,6,7,8,9,0,100,1000,10000,100,1000]
return n[p]
def nds(y):
if bool(re.search('[壹貳參肆伍陸柒捌玖拾廿卅佰百仟千萬一二三四五六七八九十1234567890]', y)):
if len(y) == 1:
return repl(y)
elif len(y) == 2:
if y[1]=='拾':
return repl(y[0])*repl(y[1])
elif y[0] =='拾' or y[0] =='廿' or y[0]=='卅':
return repl(y[0])+repl(y[1])
else:
return repl(y[0])*10 + repl(y[1])
elif len(y) == 3:
return repl(y[0])*repl(y[1])+repl(y[2])
else:
return repl(y)
else:
return 0
def todays(x):
if '年' in x or '月' in x or '日' in x:
try:
y = x.index("年")
except:
y = 0
try:
m = x.index("月")
except:
m = 0
try:
d = x.index("日")
except:
d = 0
if y>0:
year = x[0:y]
if m>0:
month = x[0+y+1:m]
if d>0:
day = x[0+m+1:d]
else:
day = ""
else:
month = ""
if d>0:
day = x[0+y+1:d]
else:
day = ""
else:
year = ""
if m>0:
month = x[0:m]
if d>0:
day = x[0+m+1:d]
else:
day =""
else:
month = ""
day = x[0:d]
res = nds(year)*365+nds(month)*30+nds(day)
return res
else:
return '*'+x
def tomoney(x):
x = x.replace('千','仟').replace('百','佰')
if '萬' in x or '仟' in x or '佰' in x:
try:
w = x.index("萬")
except:
w = 0
try:
t = x.index("仟")
except:
t = 0
try:
h = x.index("佰")
except:
h = 0
if w>0:
wan = x[0:w]
if t>0:
tho = x[0+w+1:t]
if h>0:
hun = x[0+t+1:h]
else:
hun = ""
else:
tho = ""
if h>0:
hun = x[0+w+1:t]
else:
hun = ""
else:
wan = ""
if t>0:
tho = x[0:t]
if h>0:
hun = x[0+t+1:h]
else:
hun =""
else:
tho = ""
hun = x[0:h]
res = nds(wan)*10000+nds(tho)*1000+nds(hun)*100
return res
else:
return '*'+x
result_imprison_num = []
for j in result_imprison_text:
try:
result_imprison_num.append(todays(j))
except:
result_imprison_num.append('*'+j)
result_penalty_num = []
for j in result_penalty_text:
try:
result_penalty_num.append(tomoney(j))
except:
result_penalty_num.append('*'+j)
```
### 製作成一表格檢查正確與否,是否需要人工修正
```
dfnew = pd.DataFrame()
dfnew['主文'] = df1['主文']
dfnew['不-刑國字'] = result_imprison_text
dfnew['不-刑'] = result_imprison_num
dfnew['不-罰國字'] = result_penalty_text
dfnew['不-罰'] = result_penalty_num
dfnew
```
### 過失致死的模式與切刑期
```
findimprison = "致人於死[者罪]*[,,](累犯)*(共*罪)*[,,]*[各均]*處*[,,]*(有期徒*刑*徒*|拘役)[一二三四五六七八九十壹貳參肆伍陸柒捌玖拾廿卅1234567890]*(年|個*月|日)*"
findun = "(過失致人於死|過失傷害)[、,]*部*[份分]*[、,]*(均)*無罪"
rep = "致人於死|者|,|,|、.*|;.*|。.*|累犯|處|有期徒*刑徒*|罪|拘役|過失|如易科.*|被訴.*|應執行.*|緩刑.*|各|均|部份.*|共"
result_imprison_text = []
for i in df2['主文']:
try:
r = re.sub(rep, "", re.search(findimprison, i).group())
result_imprison_text.append(r)
except:
if bool(re.search(findn, i)):
result_imprison_text.append("無罪")
else:
result_imprison_text.append("")
result_imprison_num = [todays(j) for j in result_imprison_text]
dfnew = pd.DataFrame()
dfnew['主文'] = df2['主文']
dfnew['死-刑國字'] = result_imprison_text
dfnew['死-刑'] = result_imprison_num
dfnew
```
### 過失傷害的模式與切刑期
```
findimprison = "(過失)*傷害而*(人(之身體)*|罪)*者*[,,]*(致人*[重受]傷罪*)*[,,](累犯)*(共貳罪)*[,,]*處*[,,]*(拘役|有期徒*刑徒*)([一二三四五六七八九十壹貳參肆伍陸柒捌玖拾廿卅1234567890]*(年|個*月|日))*"
findun = "(過失)*傷害人*(致人*[重受]傷)*罪*嫌*[、,]*(及.*罪)*(部分|部份)*[、,]*(均)*(無罪|公訴不受理|不受理|不理)"
rep = "(過失)*傷害(人(之身體)*|罪)|者|,|,|、.*|;.*|。.*|累犯|處|有期刑|有期徒刑|罪|拘役|過失|如易科.*|被訴.*|應執行.*|緩刑.*|各|均|部份.*|共|致|人|重|受|傷|而|害"
result_imprison_text = []
for i in df3['主文']:
try:
r = re.sub(rep, "", re.search(findimprison, i).group())
result_imprison_text.append(r)
except:
if bool(re.search(findun, i)):
if "無罪" in re.search(findun, i).group():
result_imprison_text.append('無罪')
elif "不受理" in i:
result_imprison_text.append("不受理")
else:
result_imprison_text.append("*不符")
result_imprison_num = [todays(j) for j in result_imprison_text]
dfnew = pd.DataFrame()
dfnew['主文'] = df3['主文']
dfnew['傷-刑國字'] = result_imprison_text
dfnew['傷-刑'] = result_imprison_num
dfnew
```
### 補充說明與資料
1. 我們非資訊專業出身,程式能力是透過坊間課程、自學、不斷嘗試錯誤摸索而來,程式撰寫的風格、效率、與精確度等層面都有許多有待改進之處,歡迎方家指教。
2. 這次使用的regular expression由於是處理中文資料,仍在很直觀的層次,大致說明使用到的語法:
- \* (比對零到多個前一個字),用意多是因行文風格不同在省略部分字詞,如:「...致人於死,累犯,處有期徒刑...」、「...致人於死,處有期徒刑...」這兩種句子,以「致人於死,(累犯)*,處有期徒刑」一種模式就可以比對。
- [] (比對括號中的其中一個字),用意是在處理行文中相同意思,但不同用字的情況,如:部分、部份;標點符號的全半形等情況。
- | (或 or),用意是在組合各種可能情況,如:五年、5個月、參拾日,三種單位以「年|個*月|日」一種模式比對。
| github_jupyter |
```
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from keras_preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, EarlyStopping
import keras.initializers as KI
import keras.layers as KL
import keras.losses as KLoss
import tensorflow as tf
from keras import backend as K
from keras.engine.topology import Layer
from keras.layers import Convolution2D, GlobalAveragePooling2D, Dense, Activation, Dropout, Flatten, AveragePooling2D
from keras.models import Model
from keras.utils import conv_utils
from keras import applications
from keras import optimizers
from keras.utils import multi_gpu_model
from keras.utils.generic_utils import get_custom_objects
from keras.optimizers import Adam
import math
from keras.callbacks import LearningRateScheduler
# Seed value (can actually be different for each attribution step)
seed_value= 0
# 1. Set `PYTHONHASHSEED` environment variable at a fixed value
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
# 3. Set `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)
# 4. Set `tensorflow` pseudo-random generator at a fixed value
tf.random.set_seed(seed_value) # tensorflow 2.x
# tf.set_random_seed(seed_value) # tensorflow 1.x
```
# Utils
```
# Swish Activation Function
def swish(x):
return K.sigmoid(x) * x
get_custom_objects().update({"swish": Activation(swish)})
# Learning Step Decay by 10e-1 after every 4 epochs
def step_decay(epoch):
initial_lrate = 0.001
drop = 0.1
epochs_drop = 4.0
lrate = initial_lrate * math.pow(drop, math.floor((epoch) / epochs_drop))
return lrate
# Calculates Precision Accuracy
def precision(y_true, y_pred):
"""Precision metric.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
# Calculates Recall Accuracy
def recall(y_true, y_pred):
"""Recall metric.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
# Calculates F1 score
def f1(y_true, y_pred):
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
```
# InceptionV3 with Inaturalist dataset
## Unfreezed
## Inception V3
```
img_rows, img_cols = (334, 334)
train_batchsize = 16
val_batchsize = 16
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.3,
height_shift_range=0.3,
brightness_range=[0.2, 1.2],
horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'../input/bird-dasat/bird_dataset/train_images',
target_size=(img_rows, img_cols),
batch_size=train_batchsize,
class_mode='categorical',
interpolation='bilinear')
validation_generator = validation_datagen.flow_from_directory(
'../input/bird-dasat/bird_dataset/val_images',
target_size=(img_rows, img_cols),
batch_size=val_batchsize,
class_mode='categorical',
shuffle=False,
interpolation='bilinear')
# dimensions of our images.
img_width, img_height = 334, 334
base_model = applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3))
base_model.load_weights('../input/inaturalist/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers[:len(base_model.layers)-17]:
layer.trainable = False
for layer in base_model.layers[len(base_model.layers)-17:]:
layer.trainable = True
# Add final layers
x = base_model.output
x = AveragePooling2D((8, 8), strides=(8, 8), name="avg_pool")(x)
x = Flatten(name="flatten")(x)
x = Dense(
512,
activation="swish",
name="dense_1",
kernel_initializer="he_uniform")(x)
x = Dropout(0.25)(x)
predictions = Dense(
20,
activation="softmax",
name="predictions",
kernel_initializer="he_uniform")(x)
model_2 = Model(inputs=base_model.input, outputs=predictions)
optimizer = Adam(0.0001)
model_2.compile(loss="categorical_crossentropy",
optimizer=optimizer,
metrics=[precision, recall, f1, 'acc'])
lrate = LearningRateScheduler(step_decay)
checkpoint = ModelCheckpoint("./inception_natural_best.h5",
monitor="val_acc",
mode="max",
save_best_only = True,
verbose=1)
nb_train_samples = 1082
nb_validation_samples= 103
epochs=10
batch_size=16
history = model_2.fit_generator(train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
callbacks=[lrate, checkpoint],
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model_2.evaluate(train_generator, verbose=1)
model_2.evaluate(validation_generator, verbose=1)
```
Inception V3 pretrained with INaturalist dataset give better result than the one on ImageNet
## Inception V3 cropped image
```
img_rows, img_cols = (224, 224)
train_batchsize = 16
val_batchsize = 16
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.3,
height_shift_range=0.3,
brightness_range=[0.2, 1.2],
horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'../input/bird-dasat/bird_dataset/train_images_cropped',
target_size=(img_rows, img_cols),
batch_size=train_batchsize,
class_mode='categorical',
interpolation='bilinear')
validation_generator = validation_datagen.flow_from_directory(
'../input/bird-dasat/bird_dataset/val_images_cropped',
target_size=(img_rows, img_cols),
batch_size=val_batchsize,
class_mode='categorical',
shuffle=False,
interpolation='bilinear')
# dimensions of our images.
img_width, img_height = 224, 224
base_model = applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3))
base_model.load_weights('../input/inaturalist/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers[:len(base_model.layers)-17]:
layer.trainable = False
for layer in base_model.layers[len(base_model.layers)-17:]:
layer.trainable = True
# Add final layers
x = base_model.output
x = AveragePooling2D((4, 4), strides=(4, 4), name="avg_pool")(x)
x = Flatten(name="flatten")(x)
x = Dense(
512,
activation="swish",
name="dense_1",
kernel_initializer="he_uniform")(x)
x = Dropout(0.25)(x)
predictions = Dense(
20,
activation="softmax",
name="predictions",
kernel_initializer="he_uniform")(x)
model_3 = Model(inputs=base_model.input, outputs=predictions)
optimizer = Adam(0.0001)
model_3.compile(loss="categorical_crossentropy",
optimizer=optimizer,
metrics=[precision, recall, f1, 'acc'])
lrate = LearningRateScheduler(step_decay)
checkpoint = ModelCheckpoint("./inception_natural_cropping_best_test_0.h5",
monitor="val_acc",
mode="max",
save_best_only = True,
verbose=1)
nb_train_samples = 941
nb_validation_samples= 92
epochs=10
batch_size=16
history = model_3.fit(train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
callbacks=[lrate, checkpoint],
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
verbose=1)
model_3.evaluate(validation_generator)
model_3.evaluate(train_generator)
```
# Predictions
```
## ORIGINAL IMAGES
# dimensions of our images.
img_width, img_height = 334, 334
base_model = applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3))
base_model.load_weights('../input/inaturalist/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers[:len(base_model.layers)-17]:
layer.trainable = False
for layer in base_model.layers[len(base_model.layers)-17:]:
layer.trainable = True
# Add final layers
x = base_model.output
x = AveragePooling2D((8, 8), strides=(8, 8), name="avg_pool")(x)
x = Flatten(name="flatten")(x)
x = Dense(
512,
activation="swish",
name="dense_1",
kernel_initializer="he_uniform")(x)
x = Dropout(0.25)(x)
predictions = Dense(
20,
activation="softmax",
name="predictions",
kernel_initializer="he_uniform")(x)
model_orig_predict = Model(inputs=base_model.input, outputs=predictions)
optimizer = Adam(0.0001)
model_orig_predict.compile(loss="categorical_crossentropy",
optimizer=optimizer,
metrics=[precision, recall, f1, 'acc'])
model_orig_predict.load_weights('./inception_natural_best.h5')
## CROPPED IMAGES
# dimensions of our images.
img_width, img_height = 224, 224
base_model = applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3))
base_model.load_weights('../input/inaturalist/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers[:len(base_model.layers)-17]:
layer.trainable = False
for layer in base_model.layers[len(base_model.layers)-17:]:
layer.trainable = True
# Add final layers
x = base_model.output
x = AveragePooling2D((4, 4), strides=(4, 4), name="avg_pool")(x)
x = Flatten(name="flatten")(x)
x = Dense(
512,
activation="swish",
name="dense_1",
kernel_initializer="he_uniform")(x)
x = Dropout(0.3)(x)
predictions = Dense(
20,
activation="softmax",
name="predictions",
kernel_initializer="he_uniform")(x)
model_crop_predict = Model(inputs=base_model.input, outputs=predictions)
optimizer = Adam(0.0001)
model_crop_predict.compile(loss="categorical_crossentropy",
optimizer=optimizer,
#metrics=[precision, recall, f1, 'acc'])
metrics=[f1, 'acc'])
model_crop_predict.load_weights('./inception_crop_best.h5')
# Predictions on original data
img_rows, img_cols = (334,334)
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
'../input/bird-dasat/bird_dataset/test_images',
target_size=(img_rows, img_cols),
batch_size=1,
class_mode=None,
shuffle=False,
interpolation='bilinear')
predictions_inception_original = model_orig_predict.predict(test_generator)
# Predictions on cropped image
img_rows, img_cols = (224,224)
test_datagen_crop = ImageDataGenerator(rescale=1./255)
test_generator_crop = test_datagen_crop.flow_from_directory(
'../input/bird-dasat/bird_dataset/test_images_cropped',
target_size=(img_rows, img_cols),
batch_size=1,
class_mode=None,
shuffle=False,
interpolation='bilinear')
predictions_inception_crop = model_crop_predict.predict(test_generator_crop)
# Get filenames list
filenames_orig = [x.split('/')[1][:-4] for x in test_generator.filenames]
filenames_crop = [x.split('/')[1][:-4] for x in test_generator_crop.filenames]
```
## Predictions with Max Probability between the two models
```
category = []
all_probas = []
for idx, elem in enumerate(filenames_orig):
inception_orig = (np.max(predictions_inception_original[idx]), np.argmax(predictions_inception_original[idx]))
try:
indice = filenames_crop.index(elem)
inception_crop = (np.max(predictions_inception_crop[indice]), np.argmax(predictions_inception_crop[indice]))
all_proba = [inception_orig, inception_crop]
except ValueError:
all_proba = [inception_orig]
max_proba, cat = all_proba[0]
for prob, label in all_proba[1:]:
if prob > max_proba:
max_proba = prob
cat = label
all_probas.append(all_proba)
category.append(cat)
for lst in all_probas:
if len(lst) > 1:
if lst[0][1] != lst[1][1]:
print(lst)
df = pd.DataFrame({'Id': filenames_orig, 'Category': category})
df.head()
df.to_csv('./submissions_2.csv', index=False)
```
0.87741
## Predictions with only inceptionv3orig on original image & inceptionv3crop on cropped images with condition
if the difference between proba on original image and proba on cropped image is less than 0.15, I will take the prediction on original image (as the inceptionv3orig performs better than the inceptionv3crop)
```
category = []
all_probas = []
thresh = 0.15
for idx, elem in enumerate(filenames_orig):
inception_orig = [np.max(predictions_inception_original[idx]), np.argmax(predictions_inception_original[idx])]
try:
indice = filenames_crop.index(elem)
inception_crop = [np.max(predictions_inception_crop[indice]), np.argmax(predictions_inception_crop[indice])]
if abs(inception_orig[0] - inception_crop[0]) < thresh:
inception_crop[0] = 0.
all_proba = [inception_orig, inception_crop]
except ValueError:
all_proba = [inception_orig]
max_proba, cat = all_proba[0]
for prob, label in all_proba[1:]:
if prob > max_proba:
max_proba = prob
cat = label
all_probas.append(all_proba)
category.append(cat)
df_ = pd.DataFrame({'Id': filenames_orig, 'Category': category})
df_.to_csv('./submissions_3.csv', index=False)
```
0.88387
| github_jupyter |
# <b>Introduction<b>
In this project, I classify Yelp round-10 review datasets. The reviews contain a lot of metadata that can be mined and used to infer meaning, business attributes, and sentiment. For simplicity, I classify the review comments into two class: either as positive or negative. Reviews that have star higher than three are regarded as positive while the reviews with star less than or equal to 3 are negative. Therefore, the problem is a supervised learning. To build and train the model, I first tokenize the text and convert them to sequences. Each review comment is limited to 50 words. As a result, short texts less than 50 words are padded with zeros, and long ones are truncated. After processing the review comments, I trained three model in three different ways:
<li> Model-1: In this model, a neural network with LSTM and a single embedding layer were used.
<li> Model-2: In Model-1, an extra 1D convolutional layer has been added on top of LSTM layer to reduce the training time.
<li> Model-3: In this model, I use the same network architecture as Model-2, but use the pre-trained glove 100 dimension word embeddings as initial input.
Since there are about 1.6 million input comments, it takes a while to train the models. To reduce the training time step, I limit the training epoch to three. After three epochs, it is evident that Model-2 is better regarding both training time and validation accuracy.
## <b>Project Outline <b>
In this project I will cover the follwouings :
<li> Download data from yelp and process them
<li> Build neural network with LSTM
<li> Build neural network with LSTM and CNN
<li> Use pre-trained GloVe word embeddings
<li> Word Embeddings from Word2Vec
## <b>Import libraries<b>
```
# Keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation
from keras.layers.embeddings import Embedding
## Plot
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
import matplotlib as plt
# NLTK
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
# Other
import re
import string
import numpy as np
import pandas as pd
from sklearn.manifold import TSNE
```
## <b> Data Processing<b>
```
df = pd.read_csv('train.csv', sep = '|', names = ['stars', 'text'], error_bad_lines=False)
df= df.dropna()
df = df[df.stars.apply(lambda x: x.isnumeric())]
df = df[df.stars.apply(lambda x: x !="")]
df = df[df.text.apply(lambda x: x !="")]
df.describe()
df.head()
```
### Convert five classes into two classes (positive = 1 and negative = 0)
Since the main purpose is to identify positive or negative comments, I convert five class star category into two classes:
<li> (1) Positive: comments with stars > 3 and
<li> (2) Negative: comments with stars <= 3
```
labels = df['stars'].map(lambda x : 1 if int(x) > 3 else 0)
```
### Tokenize text data
Because of the computational expenses, I use the top 20000 unique words. First, tokenize the comments then convert those into sequences. I keep 50 words to limit the number of words in each comment.
```
def clean_text(text):
## Remove puncuation
text = text.translate(string.punctuation)
## Convert words to lower case and split them
text = text.lower().split()
## Remove stop words
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops and len(w) >= 3]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
return text
df['text'] = df['text'].map(lambda x: clean_text(x))
df.head(10)
vocabulary_size = 20000
tokenizer = Tokenizer(num_words= vocabulary_size)
tokenizer.fit_on_texts(df['text'])
sequences = tokenizer.texts_to_sequences(df['text'])
data = pad_sequences(sequences, maxlen=50)
print(data.shape)
```
### <b>Build neural network with LSTM<b>
### Network Architechture
The network starts with an embedding layer. The layer lets the system expand each token to a more massive vector, allowing the network to represent a word in a meaningful way. The layer takes 20000 as the first argument, which is the size of our vocabulary, and 100 as the second input parameter, which is the dimension of the embeddings. The third parameter is the input_length of 50, which is the length of each comment sequence.
```
model_lstm = Sequential()
model_lstm.add(Embedding(20000, 100, input_length=50))
model_lstm.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model_lstm.add(Dense(1, activation='sigmoid'))
model_lstm.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
```
### Train the network
There are about 1.6 million comments, and it takes a while to train the model in a MacBook Pro. To save time I have used only three epochs. GPU machines can be used to accelerate the training with more time steps. I split the whole datasets as 60% for training and 40% for validation.
```
model_lstm.fit(data, np.array(labels), validation_split=0.4, epochs=3)
```
## <b>Build neural network with LSTM and CNN <b>
The LSTM model worked well. However, it takes forever to train three epochs. One way to speed up the training time is to improve the network adding “Convolutional” layer. Convolutional Neural Networks (CNN) come from image processing. They pass a “filter” over the data and calculate a higher-level representation. They have been shown to work surprisingly well for text, even though they have none of the sequence processing ability of LSTMs.
```
def create_conv_model():
model_conv = Sequential()
model_conv.add(Embedding(vocabulary_size, 100, input_length=50))
model_conv.add(Dropout(0.2))
model_conv.add(Conv1D(64, 5, activation='relu'))
model_conv.add(MaxPooling1D(pool_size=4))
model_conv.add(LSTM(100))
model_conv.add(Dense(1, activation='sigmoid'))
model_conv.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model_conv
model_conv = create_conv_model()
model_conv.fit(data, np.array(labels), validation_split=0.4, epochs = 3)
```
### Save processed Data
```
df_save = pd.DataFrame(data)
df_label = pd.DataFrame(np.array(labels))
result = pd.concat([df_save, df_label], axis = 1)
result.to_csv('train_dense_word_vectors.csv', index=False)
```
## <b>Use pre-trained Glove word embeddings<b>
In this subsection, I want to use word embeddings from pre-trained Glove. It was trained on a dataset of one billion tokens (words) with a vocabulary of 400 thousand words. The glove has embedding vector sizes, including 50, 100, 200 and 300 dimensions. I chose the 100-dimensional version. I also want to see the model behavior in case the learned word weights do not get updated. I, therefore, set the trainable attribute for the model to be False.
### Get embeddings from Glove
```
embeddings_index = dict()
f = open('glove.6B/glove.6B.100d.txt')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = np.zeros((vocabulary_size, 100))
for word, index in tokenizer.word_index.items():
if index > vocabulary_size - 1:
break
else:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
```
### Develop model
I use the same model architecture with a convolutional layer on top of the LSTM layer.
```
model_glove = Sequential()
model_glove.add(Embedding(vocabulary_size, 100, input_length=50, weights=[embedding_matrix], trainable=False))
model_glove.add(Dropout(0.2))
model_glove.add(Conv1D(64, 5, activation='relu'))
model_glove.add(MaxPooling1D(pool_size=4))
model_glove.add(LSTM(100))
model_glove.add(Dense(1, activation='sigmoid'))
model_glove.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model_glove.fit(data, np.array(labels), validation_split=0.4, epochs = 3)
```
## <b>Word embedding visialization<b>
In this subsection, I want to visualize word embedding weights obtained from trained models. Word embeddings with 100 dimensions are first reduced to 2 dimensions using t-SNE. Tensorflow has an excellent tool to visualize the embeddings in a great way, but here I just want to visualize the word relationship.
### Get embedding weights from glove
```
lstm_embds = model_lstm.layers[0].get_weights()[0]
conv_embds = model_conv.layers[0].get_weights()[0]
glove_emds = model_glove.layers[0].get_weights()[0]
```
### Get word list
```
word_list = []
for word, i in tokenizer.word_index.items():
word_list.append(word)
```
### Scatter plot of first two components of TSNE
```
def plot_words(data, start, stop, step):
trace = go.Scatter(
x = data[start:stop:step,0],
y = data[start:stop:step, 1],
mode = 'markers',
text= word_list[start:stop:step]
)
layout = dict(title= 't-SNE 1 vs t-SNE 2',
yaxis = dict(title='t-SNE 2'),
xaxis = dict(title='t-SNE 1'),
hovermode= 'closest')
fig = dict(data = [trace], layout= layout)
py.iplot(fig)
```
#### 1. LSTM
```
number_of_words = 2000
lstm_tsne_embds = TSNE(n_components=2).fit_transform(lstm_embds)
plot_words(lstm_tsne_embds, 0, number_of_words, 1)
```
#### 2. CNN + LSTM
```
conv_tsne_embds = TSNE(n_components=2).fit_transform(conv_embds)
plot_words(conv_tsne_embds, 0, number_of_words, 1)
```
#### 3. Glove
```
glove_tsne_embds = TSNE(n_components=2).fit_transform(glove_emds)
plot_words(glove_tsne_embds, 0, number_of_words, 1)
```
## <b>Word Embeddings from Word2Vec<b>
In this subsection, I use word2vec to create word embeddings from the review comments. Word2vec is one algorithm for learning a word embedding from a text corpus.
```
from gensim.models import Word2Vec
import nltk
nltk.download('punkt')
```
### Tokenize the reviews coments.
```
df['tokenized'] = df.apply(lambda row : nltk.word_tokenize(row['text']), axis=1)
df.head()
```
### Train word2vec model
```
model_w2v = Word2Vec(df['tokenized'], size=100)
X = model_w2v[model_w2v.wv.vocab]
```
### Plot Word Vectors Using PCA
```
from sklearn.decomposition import TruncatedSVD
tsvd = TruncatedSVD(n_components=5, n_iter=10)
result = tsvd.fit_transform(X)
result.shape
tsvd_word_list = []
words = list(model_w2v.wv.vocab)
for i, word in enumerate(words):
tsvd_word_list.append(word)
trace = go.Scatter(
x = result[0:number_of_words, 0],
y = result[0:number_of_words, 1],
mode = 'markers',
text= tsvd_word_list[0:number_of_words]
)
layout = dict(title= 'SVD 1 vs SVD 2',
yaxis = dict(title='SVD 2'),
xaxis = dict(title='SVD 1'),
hovermode= 'closest')
fig = dict(data = [trace], layout= layout)
py.iplot(fig)
```
| github_jupyter |
# <p style="text-align: center;"> Charter school identities and outcomes in the accountability era:<br/> Preliminary results
<p style="text-align: center;">April 19th, 2017<br/>By Jaren Haber, PhD Candidate<break/>Dept. of Sociology, UC Berkeley
<p style="text-align: center;">
<p style="text-align: center;">(this out-dated graphic courtesy of U.S. News & World Report, 2009)
## Research questions
**How are charter schools different from each other in terms of ideology? How do these differences shape their survival and their outcomes, and what does this reveal about current educational policy?**
## The corpus
- Website self-descriptions of all **6,753 charter schools** open in 2014-15 (identified using the NCES Public School Universe Survey)
- Charter school websites are a publicly visible proclamation of identity attempting to impress parents, regulators, etc.
- This study the first to use this contemporary, comprehensive data source on U.S. charter school identities
- Me & research team working on using BeautifulSoup and requests.get to webscrape the full sample
### Motivation
- Too much focus on test scores in education, too little on organizational aspects
- Are charter schools innovative? How?
- How does educational policy shape ed. philosophy? Organization? Outcomes?
- No one has studied charters' public image as expressed in their OWN words
### Methods
- NLP: Word frequencies, distinctive words, etc.
- Supervised: Custom dictionaries
- Unsupervised: Topic models, word embeddings
- Later: statistical regression to test, e.g., how progressivist schools in liberal communities have higher performance than they do in other places
## Preliminary analysis: website self-descriptions of non-random sample of 196 schools
- Early-stage sample: NOT representative!
- About half randomly selected, half tracked down (many through Internet Archive) because of missing URLs
- Closed schools over-represented
## Preliminary conclusions:
### Word counts:
- Website self-descriptions for schools in mid-sized cities and suburbs tend to be longest, followed by other urban and suburban schools, then schools in towns, and shortest tends to be rural schools
- Charter schools in cities and suburbs have the highest textual redundancy (lowest ratio of types to tokens)
### Word embeddings:
- The two educational philosophies I'm interested in--**progressivism** and **essentialism**--can be distinguished using semantic vectors
- Useful way for creating and checking my dictionaries
### Topic modeling:
- Urban charter schools' websites emphasize **GOALS** (topic 0)
- Suburban charter schools' websites emphasize **CURRICULUM** (topic 1) in addition to goals
## Next steps:
- Working with custom dictionaries, POS tagging
- Webscraping and parsing HTML to get full sample
- Match website text with data on test scores and community characteristics (e.g., race, class, political leanings) --> test hypotheses with statistical regression<br/><br/>
- **More long-term**: Collect longitudinal mission statement data from the Internet Archive --> look at survival and geographic dispersion of identity categories over time (especially pre-NCLB if possible)
```
# The keyword categories to help parse website text:
mission = ['mission',' vision ', 'vision:', 'mission:', 'our purpose', 'our ideals', 'ideals:', 'our cause', 'cause:', 'goals', 'objective']
curriculum = ['curriculum', 'curricular', 'program', 'method', 'pedagogy', 'pedagogical', 'approach', 'model', 'system', 'structure']
philosophy = ['philosophy', 'philosophical', 'beliefs', 'believe', 'principles', 'creed', 'credo', 'value', 'moral']
history = ['history', 'our story', 'the story', 'school story', 'background', 'founding', 'founded', 'established', 'establishment', 'our school began', 'we began', 'doors opened', 'school opened']
general = ['about us', 'our school', 'who we are', 'overview', 'general information', 'our identity', 'profile', 'highlights']
```
## Initializing Python
```
#!/usr/bin/env python
# -*- coding: UTF-8
# IMPORTING KEY PACKAGES
import csv # for reading in CSVs and turning them into dictionaries
import re # for regular expressions
import os # for navigating file trees
import nltk # for natural language processing tools
import pandas # for working with dataframes
import numpy as np # for working with numbers
# FOR CLEANING, TOKENIZING, AND STEMMING THE TEXT
from nltk import word_tokenize, sent_tokenize # widely used text tokenizer
from nltk.stem.porter import PorterStemmer # an approximate method of stemming words (it just cuts off the ends)
from nltk.corpus import stopwords # for one method of eliminating stop words, to clean the text
stopenglish = list(stopwords.words("english")) # assign the string of english stopwords to a variable and turn it into a list
import string # for one method of eliminating punctuation
punctuations = list(string.punctuation) # assign the string of common punctuation symbols to a variable and turn it into a list
# FOR ANALYZING WITH THE TEXT
from sklearn.feature_extraction.text import CountVectorizer # to work with document-term matrices, especially
countvec = CountVectorizer(tokenizer=nltk.word_tokenize)
from sklearn.feature_extraction.text import TfidfVectorizer # for creating TF-IDFs
tfidfvec = TfidfVectorizer()
from sklearn.decomposition import LatentDirichletAllocation # for topic modeling
import gensim # for word embedding models
from scipy.spatial.distance import cosine # for cosine similarity
from sklearn.metrics import pairwise # for pairwise similarity
from sklearn.manifold import MDS, TSNE # for multi-dimensional scaling
# FOR VISUALIZATIONS
import matplotlib
import matplotlib.pyplot as plt
# Visualization parameters
% pylab inline
% matplotlib inline
matplotlib.style.use('ggplot')
```
## Reading in preliminary data
```
sample = [] # make empty list
with open('../data_URAP_etc/mission_data_prelim.csv', 'r', encoding = 'Latin-1')\
as csvfile: # open file
reader = csv.DictReader(csvfile) # create a reader
for row in reader: # loop through rows
sample.append(row) # append each row to the list
sample[0]
# Take a look at the most important contents and the variables list
# in our sample (a list of dictionaries)--let's look at just the first entry
print(sample[1]["SCHNAM"], "\n", sample[1]["URL"], "\n", sample[1]["WEBTEXT"], "\n")
print(sample[1].keys()) # look at all the variables!
# Read the data in as a pandas dataframe
df = pandas.read_csv("../data_URAP_etc/mission_data_prelim.csv", encoding = 'Latin-1')
df = df.dropna(subset=["WEBTEXT"]) # drop any schools with no webtext that might have snuck in (none currently)
# Add additional variables for analysis:
# PCTETH = percentage of enrolled students belonging to a racial minority
# this includes American Indian, Asian, Hispanic, Black, Hawaiian, or Pacific Islander
df["PCTETH"] = (df["AM"] + df["ASIAN"] + df["HISP"] + df["BLACK"] + df["PACIFIC"]) / df["MEMBER"]
df["STR"] = df["MEMBER"] / df["FTE"] # Student/teacher ratio
df["PCTFRPL"] = df["TOTFRL"] / df["MEMBER"] # Percent of students receiving FRPL
# Another interesting variable:
# TYPE = type of school, where 1 = regular, 2 = special ed, 3 = vocational, 4 = other/alternative, 5 = reportable program
## Print the webtext from the first school in the dataframe
print(df.iloc[0]["WEBTEXT"])
```
## Descriptive statistics
### How urban proximity is coded: Lower number = more urban (closer to large city)
More specifically, it uses two digits with distinct meanings:
- the first digit:
- 1 = city
- 2 = suburb
- 3 = town
- 4 = rural
- the second digit:
- 1 = large or fringe
- 2 = mid-size or distant
- 3 = small/remote
```
print(df.describe()) # get descriptive statistics for all numerical columns
print()
print(df['ULOCAL'].value_counts()) # frequency counts for categorical data
print()
print(df['LEVEL'].value_counts()) # treat grade range served as categorical
# Codes for level/ grade range served: 3 = High school, 2 = Middle school, 1 = Elementary, 4 = Other)
print()
print(df['LSTATE'].mode()) # find the most common state represented in these data
print(df['ULOCAL'].mode()) # find the most urbanicity represented in these data
# print(df['FTE']).mean() # What's the average number of full-time employees by school?
# print(df['STR']).mean() # And the average student-teacher ratio?
# here's the number of schools from each state, in a graph:
grouped_state = df.groupby('LSTATE')
grouped_state['WEBTEXT'].count().sort_values(ascending=True).plot(kind = 'bar', title='Schools mostly in CA, TX, AZ, FL--similar to national trend')
plt.show()
# and here's the number of schools in each urban category, in a graph:
grouped_urban = df.groupby('ULOCAL')
grouped_urban['WEBTEXT'].count().sort_values(ascending=True).plot(kind = 'bar', title='Most schools are in large cities or large suburbs')
plt.show()
```
#### What these numbers say about the charter schools in the sample:
- Most are located in large cities, followed by large suburbs, then medium and small city, and then rural.
- The means for percent minorities and students receiving free- or reduced-price lunch are both about 60%.
- Most are in CA, TX, AZ, and FL
- Most of the schools in the sample are primary schools
#### This means that the sample reflects national averages. In that sense, this sample isn't so bad.
## Cleaning, tokenizing, and stemming the text
```
# Now we clean the webtext by rendering each word lower-case then removing punctuation.
df['webtext_lc'] = df['WEBTEXT'].str.lower() # make the webtext lower case
df['webtokens'] = df['webtext_lc'].apply(nltk.word_tokenize) # tokenize the lower-case webtext by word
df['webtokens_nopunct'] = df['webtokens'].apply(lambda x: [word for word in x if word not in list(string.punctuation)]) # remove punctuation
print(df.iloc[0]["webtokens"]) # the tokenized text without punctuation
# Now we remove stopwords and stem. This will improve the results
df['webtokens_clean'] = df['webtokens_nopunct'].apply(lambda x: [word for word in x if word not in list(stopenglish)]) # remove stopwords
df['webtokens_stemmed'] = df['webtokens_clean'].apply(lambda x: [PorterStemmer().stem(word) for word in x])
# Some analyses require a string version of the webtext without punctuation or numbers.
# To get this, we join together the cleaned and stemmed tokens created above, and then remove numbers and punctuation:
df['webtext_stemmed'] = df['webtokens_stemmed'].apply(lambda x: ' '.join(char for char in x))
df['webtext_stemmed'] = df['webtext_stemmed'].apply(lambda x: ''.join(char for char in x if char not in punctuations))
df['webtext_stemmed'] = df['webtext_stemmed'].apply(lambda x: ''.join(char for char in x if not char.isdigit()))
df['webtext_stemmed'][0]
# Some analyses require tokenized sentences. I'll do this with the list of dictionaries.
# I'll use cleaned, tokenized sentences (with stopwords) to create both a dictionary variable and a separate list for word2vec
words_by_sentence = [] # initialize the list of tokenized sentences as an empty list
for school in sample:
school["sent_toksclean"] = []
school["sent_tokens"] = [word_tokenize(sentence) for sentence in sent_tokenize(school["WEBTEXT"])]
for sent in school["sent_tokens"]:
school["sent_toksclean"].append([PorterStemmer().stem(word.lower()) for word in sent if (word not in punctuations)]) # for each word: stem, lower-case, and remove punctuations
words_by_sentence.append([PorterStemmer().stem(word.lower()) for word in sent if (word not in punctuations)])
words_by_sentence[:2]
```
### Counting document lengths
```
# We can also count document lengths. I'll mostly use the version with punctuation removed but including stopwords,
# because stopwords are also part of these schools' public image/ self-presentation to potential parents, regulators, etc.
df['webstem_count'] = df['webtokens_stemmed'].apply(len) # find word count without stopwords or punctuation
df['webpunct_count'] = df['webtokens_nopunct'].apply(len) # find length with stopwords still in there (but no punctuation)
df['webclean_count'] = df['webtokens_clean'].apply(len) # find word count without stopwords or punctuation
# For which urban status are website self-description the longest?
print(grouped_urban['webpunct_count'].mean().sort_values(ascending=False))
# here's the mean website self-description word count for schools grouped by urban proximity, in a graph:
grouped_urban['webpunct_count'].mean().sort_values(ascending=True).plot(kind = 'bar', title='Schools in mid-sized cities and suburbs have longer self-descriptions than in fringe areas', yerr = grouped_state["webpunct_count"].std())
plt.show()
# Look at 'FTE' (proxy for # administrators) clustered by urban proximity and whether it explains this
grouped_urban['FTE'].mean().sort_values(ascending=True).plot(kind = 'bar', title='Title', yerr = grouped_state["FTE"].std())
plt.show()
# Now let's calculate the type-token ratio (TTR) for each school, which compares
# the number of types (unique words used) with the number of words (including repetitions of words).
df['numtypes'] = df['webtokens_nopunct'].apply(lambda x: len(set(x))) # this is the number of unique words per site
df['TTR'] = df['numtypes'] / df['webpunct_count'] # calculate TTR
# here's the mean TTR for schools grouped by urban category:
grouped_urban = df.groupby('ULOCAL')
grouped_urban['TTR'].mean().sort_values(ascending=True).plot(kind = 'bar', title='Charters in cities and suburbs have higher textual redundancy than in fringe areas', yerr = grouped_urban["TTR"].std())
plt.show()
```
## (Excessively) Frequent words
```
# First, aggregate all the cleaned webtext:
webtext_all = []
df['webtokens_clean'].apply(lambda x: [webtext_all.append(word) for word in x])
webtext_all[:20]
# Now apply the nltk function FreqDist to count the number of times each token occurs.
word_frequency = nltk.FreqDist(webtext_all)
#print out the 50 most frequent words using the function most_common
print(word_frequency.most_common(50))
```
### These are prolific, ritual, empty words and will be excluded from topic models!
## Distinctive words (mostly place names)
```
sklearn_dtm = countvec.fit_transform(df['webtext_stemmed'])
print(sklearn_dtm)
# What are some of the words in the DTM?
print(countvec.get_feature_names()[:10])
# now we can create the dtm, but with cells weigthed by the tf-idf score.
dtm_tfidf_df = pandas.DataFrame(tfidfvec.fit_transform(df.webtext_stemmed).toarray(), columns=tfidfvec.get_feature_names(), index = df.index)
dtm_tfidf_df[:20] # let's take a look!
# What are the 20 words with the highest TF-IDF scores?
print(dtm_tfidf_df.max().sort_values(ascending=False)[:20])
```
### Like the frequent words above, these highly "unique" words are empty of meaning and will be excluded from topic models!
## Word Embeddings with word2vec
### Word2Vec features
<ul>
<li>Size: Number of dimensions for word embedding model</li>
<li>Window: Number of context words to observe in each direction</li>
<li>min_count: Minimum frequency for words included in model</li>
<li>sg (Skip-Gram): '0' indicates CBOW model; '1' indicates Skip-Gram</li>
<li>Alpha: Learning rate (initial); prevents model from over-correcting, enables finer tuning</li>
<li>Iterations: Number of passes through dataset</li>
<li>Batch Size: Number of words to sample from data during each pass</li>
<li>Worker: Set the 'worker' option to ensure reproducibility</li>
</ul>
```
# train the model, using a minimum of 5 words
model = gensim.models.Word2Vec(words_by_sentence, size=100, window=5, \
min_count=2, sg=1, alpha=0.025, iter=5, batch_words=10000, workers=1)
# dictionary of words in model (may not work for old gensim)
# print(len(model.vocab))
# model.vocab
# Find cosine distance between two given word vectors
print(model.similarity('college-prep','align')) # these two are close to essentialism
print(model.similarity('emot', 'curios')) # these two are close to progressivism
# create some rough dictionaries for our contrasting educational philosophies
essentialism = ['excel', 'perform', 'prep', 'rigor', 'standard', 'align', 'comprehens', 'content', \
'data-driven', 'market', 'research', 'research-bas', 'program', 'standards-bas']
progressivism = ['inquir', 'curios', 'project', 'teamwork', 'social', 'emot', 'reflect', 'creat',\
'ethic', 'independ', 'discov', 'deep', 'problem-solv', 'natur']
# Let's look at two vectors that demonstrate the binary between these philosophies: align and emot
print(model.most_similar('align')) # words core to essentialism
print()
print(model.most_similar('emot')) # words core to progressivism
print(model.most_similar('emot')) # words core to progressivism
# Let's work with the binary between progressivism vs. essentialism
# first let's find the 50 words closest to each philosophy using the two 14-term dictionaries defined above
prog_words = model.most_similar(progressivism, topn=50)
prog_words = [word for word, similarity in prog_words]
for word in progressivism:
prog_words.append(word)
print(prog_words[:20])
ess_words = model.most_similar(essentialism, topn=50) # now let's get the 50 most similar words for our essentialist dictionary
ess_words = [word for word, similarity in ess_words]
for word in essentialism:
ess_words.append(word)
print(ess_words[:20])
# construct an combined dictionary
phil_words = ess_words + prog_words
# preparing for visualizing this binary with word2vec
x = [model.similarity('emot', word) for word in phil_words]
y = [model.similarity('align', word) for word in phil_words]
# here's a visual of the progressivism/essentialism binary:
# top-left half is essentialism, bottom-right half is progressivism
_, ax = plt.subplots(figsize=(20,20))
ax.scatter(x, y, alpha=1, color='b')
for i in range(len(phil_words)):
ax.annotate(phil_words[i], (x[i], y[i]))
ax.set_xlim(.635, 1.005)
ax.set_ylim(.635, 1.005)
plt.plot([0, 1], [0, 1], linestyle='--');
```
### Binary of essentialist (top-left) and progressivist (bottom-right) word vectors
## Topic Modeling with scikit-learn
> For documentation on this topic modeling (TM) package, which uses Latent Dirichlet Allocation (LDA), see [here](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.LatentDirichletAllocation.html).
> And for documentation on the vectorizer package, CountVectorizer from scikit-learn, see [here](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html).
```
####Adopted From:
#Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
# Initialize the variables needed for the topic models
n_samples = 2000
n_topics = 3
n_top_words = 50
# Create helper function that prints out the top words for each topic in a pretty way
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("\nTopic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Vectorize our text using CountVectorizer
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=70, min_df=4,
max_features=None,
stop_words=stopenglish, lowercase=1
)
tf = tf_vectorizer.fit_transform(df.WEBTEXT)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_topics=%d..."
% (n_samples, n_topics))
# define the lda function, with desired options
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=20,
learning_method='online',
learning_offset=80.,
total_samples=n_samples,
random_state=0)
#fit the model
lda.fit(tf)
# print the top words per topic, using the function defined above.
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
```
These topics seem to mean:
- topic 0 relates to **GOALS**,
- topic 1 relates to **CURRICULUM**, and
- topic 2 relates to **PHILOSOPHY** or learning process (but this topic less clear/ more mottled)
```
# Preparation for looking at distribution of topics over schools
topic_dist = lda.transform(tf) # transpose topic distribution
topic_dist_df = pandas.DataFrame(topic_dist) # turn into a df
df_w_topics = topic_dist_df.join(df) # merge with charter MS dataframe
df_w_topics[:20] # check out the merged df with topics!
topic_columns = range(0,n_topics) # Set numerical range of topic columns for use in analyses, using n_topics from above
# Which schools are weighted highest for topic 0? How do they trend with regard to urban proximity and student class?
print(df_w_topics[['LSTATE', 'ULOCAL', 'PCTETH', 'PCTFRPL', 0, 1, 2]].sort_values(by=[0], ascending=False))
# Preparation for comparing total number of words aligned with each topic
# To weight each topic by its prevalenced in the corpus, multiply each topic by the word count from above
col_list = []
for num in topic_columns:
col = "%d_wc" % num
col_list.append(col)
df_w_topics[col] = df_w_topics[num] * df_w_topics['webpunct_count']
df_w_topics[:20]
# Now we can see the prevalence of each topic over words for each urban category and state
grouped_urban = df_w_topics.groupby('ULOCAL')
for e in col_list:
print(e)
print(grouped_urban[e].sum()/grouped_urban['webpunct_count'].sum())
grouped_state = df_w_topics.groupby('LSTATE')
for e in col_list:
print(e)
print(grouped_state[e].sum()/grouped_state['webpunct_count'].sum())
# Here's the distribution of urban proximity over the three topics:
fig1 = plt.figure()
chrt = 0
for num in topic_columns:
chrt += 1
ax = fig1.add_subplot(2,3, chrt)
grouped_urban[num].mean().plot(kind = 'bar', yerr = grouped_urban[num].std(), ylim=0, ax=ax, title=num)
fig1.tight_layout()
plt.show()
# Here's the distribution of each topic over words, for each urban category:
fig2 = plt.figure()
chrt = 0
for e in col_list:
chrt += 1
ax2 = fig2.add_subplot(2,3, chrt)
(grouped_urban[e].sum()/grouped_urban['webpunct_count'].sum()).plot(kind = 'bar', ylim=0, ax=ax2, title=e)
fig2.tight_layout()
plt.show()
```
| github_jupyter |
```
import cv2
import time
```
# Connect to A Webcam
```
video_frames = cv2.VideoCapture(0) # 0: default camera
# get frame size from camera
frame_width = int(video_frames.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(video_frames.get(cv2.CAP_PROP_FRAME_HEIGHT))
# initialize writer
# fourcc: video codec. DIVX for windows, XVID for linux and macOS
# fps: 30
writer = cv2.VideoWriter('./my_video.mp4', cv2.VideoWriter_fourcc(*'DIVX'), 30, (frame_width, frame_height))
# loop: grab frame and display image
while True:
# read frames
ret, frame = video_frames.read()
# save video
writer.write(frame)
# show frame
cv2.imshow('frame', frame)
# if frame is display for more than 1 ms and ESC key is pressed, close display
if cv2.waitKey(1) & 0xFF == 27:
break
video_frames.release()
writer.release()
cv2.destroyAllWindows()
```
# Load A Video File
```
video_frames = cv2.VideoCapture('./my_video.mp4')
# check if video is loaded
if video_frames.isOpened() == False:
print('Error: File not found or wrong codec used.')
# show video files with delay
while video_frames.isOpened():
# read frames
ret, frame = video_frames.read()
# if the video is running
if ret == True:
# add delay, display at frame rate 30
time.sleep(1/30)
# show frame
cv2.imshow('frame', frame)
if cv2.waitKey(10) & 0xFF == 27:
break
# if the video is finished, break loop
else:
break
video_frames.release()
cv2.destroyAllWindows()
```
# Draw Shapes on Video
```
video_frames = cv2.VideoCapture(0)
# get frame size from camera
frame_width = int(video_frames.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(video_frames.get(cv2.CAP_PROP_FRAME_HEIGHT))
# define top left corner
x = frame_width // 4
y = frame_height // 4
# define rectangle size
rect_width = 200
rect_height = 200
# loop: grab frame and display image
while True:
# read frames
ret, frame = video_frames.read()
# rectangle
cv2.rectangle(img=frame, pt1=(x,y), pt2=(x+rect_width, y+rect_height), color=(255, 0, 0), thickness=4)
# show frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
video_frames.release()
cv2.destroyAllWindows()
```
# Interact with Video
```
def check_clicks(event, x, y, flags, param):
'''draw rectangles on screen by clicks'''
global pt1, pt2, topLeft_clicked, botRight_clicked
if event == cv2.EVENT_LBUTTONDOWN:
# if there is existing rectangle, reset the variables
if topLeft_clicked == True and botRight_clicked == True:
pt1 = (0, 0)
pt2 = (0, 0)
topLeft_clicked = False
botRight_clicked = False
# if there is no pt1, register pt1
if topLeft_clicked == False:
pt1 = (x, y)
topLeft_clicked = True
# else if there is no pt2, register pt2
elif botRight_clicked == False:
pt2 = (x, y)
botRight_clicked = True
# initialize variables
pt1 = (0, 0)
pt2 = (0, 0)
topLeft_clicked = False
botRight_clicked = False
video_frames = cv2.VideoCapture(0) # 0: default camera
cv2.namedWindow('frame')
cv2.setMouseCallback('frame', check_clicks)
# loop: grab frame and display image
while True:
#read frames
ret, frame = video_frames.read()
# draw a circle marker at point 1
if topLeft_clicked:
cv2.circle(img=frame, center=pt1, radius=5, color=(0, 255, 0), thickness=-1)
# draw a rectangle
if topLeft_clicked and botRight_clicked:
cv2.rectangle(img=frame, pt1=pt1, pt2=pt2, color=(0, 255, 0), thickness=3)
# show frame
cv2.imshow('frame', frame)
# if frame is display for more than 1 ms and ESC key is pressed, close display
if cv2.waitKey(1) & 0xFF == 27:
break
video_frames.release()
cv2.destroyAllWindows()
```
# Next Steps:
- Start-to-finish face detection project.
| github_jupyter |
# Introduction
No need to say that the Covid19 crisis is a global challenge that is going to change how we see the world. There is a lot of interest in understanding the internals of virus propagation and several disciplines can be really helpful in this task. There is a lot of data going around and we have really accessible tools to work with this data.
For any data scientist this is a nice opportunity to explore and understand time series, graph theory and other fascinating disciplines. If you are just a newbie or a consolidated practitioner, I have decided to share a series of Jupyter Notebooks with some examples of tools and methods that you can find helpful. I will make my best to make all the code available.
[Kaggle](http://www.kaggle.com) has opened a challenge to forecast the propagation of the virus. You can check the challenge with more details at the Kaggle site [here](https://www.kaggle.com/c/covid19-global-forecasting-week-2). I invite you to check the notebooks uploaded by the community. I have not considered to participate in the challenge, but this could be a good opportunity if you plan to start with these kind of challenges.
In this part, I will use Kaggle data to show how we can visualize the virus evolution in different manners. You can download the data (after registration) [here](https://www.kaggle.com/c/covid19-global-forecasting-week-2/data). After downloading the zip file with the dataset we have three CSV files:
* train.csv
* test.csv
* submission.csv
For this exercise we will only use the train.csv file.
**Assumptions**
* You have an already running Jupyter environment
* You are familiar with Pandas
* You have heard about Matplotlib
* The covid19 files are available in the path covid19-global-forecasting-week-2
# Loading a CSV with Pandas
There are several solutions to read CSV files in Python. However, with no disussion Pandas is the most suitable option for many scenarios. We import the pandas library and read the csv file with all the training data.
```
import pandas as pd
data = pd.read_csv("covid19-global-forecasting-week-2/train.csv")
data
```
We have a six columns dataframe indicating the country, state, date, number of confirmed cases and number of fatalities. We are going to focus on one country. Let's say Spain.
```
spain = data[data['Country_Region']=='Spain']
spain
```
We have data for 64 days with no information at a province/state level. Now we would like to have a visual representation of the time series.
# Matplotlib
The first solution to be considered is Pyplot from the [Matplotlib](https://matplotlib.org/) library.
```
from matplotlib import pyplot
pyplot.plot(spain.ConfirmedCases)
pyplot.title('Confirmed cases in Spain')
pyplot.show()
```
The figure above is the representation of the number of confirmed cases in Spain until March 26th. We have not set the X axis, so pyplot is considering the id column defined by Pandas. To define a more reasonable X ticks we simply pass a list with the same number of items of the Y axis starting from zero.
```
pyplot.plot(range(0,spain.ConfirmedCases.size),spain.ConfirmedCases)
pyplot.title('Confirmed cases in Spain')
pyplot.show()
```
Now we have a clearer view of the X axis. However, we would like to have a comparison of the number of fatalities vs the number of confirmed cases.
```
pyplot.plot(range(0,spain.ConfirmedCases.size),spain.ConfirmedCases,label='ConfirmedCases')
pyplot.plot(range(0,spain.Fatalities.size),spain.Fatalities,label='Fatalities')
pyplot.legend()
pyplot.title('Confirmed cases vs fatalities in Spain')
pyplot.show()
```
The increment shows an exponential behaviour. A logarithmic scale would help a better view.
```
pyplot.plot(range(0,spain.ConfirmedCases.size),spain.ConfirmedCases,label='ConfirmedCases')
pyplot.plot(range(0,spain.Fatalities.size),spain.Fatalities,label='Fatalities')
pyplot.yscale('log')
pyplot.title('Confirmed cases vs fatalities in Spain log scale')
pyplot.legend()
pyplot.show()
```
What about displaying the date in the X axis? To do that we need pyplot to format the x axis. This requires datetime structures to set the datetime of every observation. We already have them in the Date column. The main difference is setting the formatter for the x axis using *mdates* from *matplotlib*.
```
import matplotlib.dates as mdates
# convert date strings to datenums
dates = mdates.datestr2num(spain.Date)
pyplot.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
pyplot.gca().xaxis.set_major_locator(mdates.DayLocator(interval=5))
pyplot.plot(dates,spain.ConfirmedCases,label='confirmed')
pyplot.plot(dates,spain.Fatalities,label='fatalities')
pyplot.title('Confirmed cases vs fatalities in Spain with datetime in x axis')
pyplot.legend()
pyplot.gcf().autofmt_xdate()
pyplot.show()
```
# Seaborn
For those familiar with [ggplot](https://ggplot2.tidyverse.org/), [Seaborn](https://seaborn.pydata.org) will look familiar. Seaborn is built on top of Matplotlib and offers a high level interface for drawing statistical graphics. It is particularly suitable to used in conjunction with Pandas.
We can replicate some of the plots above:
```
import seaborn as sns
g = sns.relplot(x=range(spain.Date.size),y='ConfirmedCases', data=spain,kind='line',)
g.set_axis_labels(x_var='') # I remove the xlabel for consistency with the previous plot
pyplot.title('Confirmed cases in Spain')
pyplot.show()
```
To set the x axis with datetimes we do the same we did with matplotlib. However, now we are going to directly transform the Date column from the Pandas Dataframe so we can directly call seaborn to use it.
```
# Transform the Date column to matplotlib datenum
spain.Date = spain.Date.apply(lambda x : mdates.datestr2num(x))
```
After this, the Date column type is a datenum that can be used to correctly format the x axis.
(By the way, this operation triggers a warning message. I let you to investigate why this is happening ;) )
```
sns.relplot(x='Date',y='ConfirmedCases', data=spain,kind='line',)
pyplot.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
pyplot.gca().xaxis.set_major_locator(mdates.DayLocator(interval=5))
pyplot.gcf().autofmt_xdate()
pyplot.title('Confirmed cases in Spain with datetime in x axis')
pyplot.show()
```
So far we replicated the same plots we already created using pyplot. Why is this seaborn interesting then? I find seaborn particularly relevant to create plots where we can easily compare different series. What if we try to compare the evolution of cases in different countries? We are going to select a sample of countries and compare their evolutions.
To do that we have to run two operations.
* First. We filter the countries included in a list.
* Second. For some countries the values per day reflect observations per province. We are only interested in the observations per country and day. We aggregate the confirmed cases and fatalities columns for every country in the same day.
```
# sample of countries to study
chosen = ['Spain', 'Iran', 'Singapore', 'France', 'United Kingdom']
# 1) Filter rows which country is in the list 2) group by country and date and finally sum the result
sample = data[data.Country_Region.isin(chosen)].groupby(['Date','Country_Region'], as_index=False,).sum()
sample
# As a sanity check we are going to check that the previous operation was correct.
# Lets check how many confirmed cases France had on 2020-03-24
france = data[(data.Country_Region=='France') & (data.Date=='2020-03-24')]
print('These are the values for France on 2020-03-24 before running the aggregation')
display(france)
print('Total number of confirmed cases: ', france.ConfirmedCases.sum())
print('And this is the aggregation we obtained')
sample[(sample.Country_Region=='France') & (sample.Date=='2020-03-24')]
```
We have manually checked that the values we obtained after aggregation are correct. Now we are going to plot a comparison of these values per country.
```
# remember to transform the date timestamp
sample.Date = sample.Date.apply(lambda x : mdates.datestr2num(x))
# Confirmed cases
sns.relplot(x='Date',y='ConfirmedCases', col='Country_Region', hue='Country_Region', col_wrap=2, data=sample,kind='line',)
pyplot.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
pyplot.gca().xaxis.set_major_locator(mdates.DayLocator(interval=5))
pyplot.gcf().autofmt_xdate()
# Fatalities
sns.relplot(x='Date',y='Fatalities', col='Country_Region', hue='Country_Region', col_wrap=2, data=sample,kind='line',)
pyplot.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
pyplot.gca().xaxis.set_major_locator(mdates.DayLocator(interval=5))
pyplot.gcf().autofmt_xdate()
```
Additionally, we can compare all the timelines in the same plot.
```
sns.relplot(x='Date',y='ConfirmedCases', hue='Country_Region', data=sample,kind='line',)
pyplot.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
pyplot.gca().xaxis.set_major_locator(mdates.DayLocator(interval=5))
pyplot.gcf().autofmt_xdate()
sns.relplot(x='Date',y='Fatalities', hue='Country_Region', data=sample,kind='line',)
pyplot.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
pyplot.gca().xaxis.set_major_locator(mdates.DayLocator(interval=5))
pyplot.gcf().autofmt_xdate()
```
# Conclusions
In this notebook we have shown how we can use Python Matplotlib and Seaborn with Pandas to plot the time series corresponding to the Covid19 virus.
| github_jupyter |
```
# created on Dec 24, 2020
# modified on April 14, 2021
# @author: Bo Zhao
# @email: zhaobo@uw.edu
# @website: https://hgis.uw.edu
# @organization: Department of Geography, University of Washington, Seattle
# @description: Search existing tweets
!python -m pip install tweepy
import tweepy, json, time, csv
# Create a csv file to store the structured data after processing.
csvfile = open("assets/searched_tweets.csv", "w", newline='', encoding="utf-8") # mode a, r, w
# All the fields of each data entry that I want to collect.
fieldnames = ['username', 'userid', 'profile_location', 'created_at', 'text', 'retweet_count', 'source', 'coordinates']
# Create a writer to write the structured data to the csv file.
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# Write the header to the csv file
writer.writeheader()
# Apply for your own Twitter API keys at https://developer.twitter.com/en/apply-for-access
consumer_key = "your_consumer_key"
consumer_secret = "your_consumer_secret"
access_token = "your_access_token"
access_token_secret = "your_access_token_secret"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
# Define the search term and the date_since date as variables
search_words = "#BLM"
location = "47.6138893,-122.3107869,100mi"
# read the Twitter API document to look for other ways to customize your queries.
# refer to https://developer.twitter.com/en/docs/twitter-api/v1/rules-and-filtering/search-operators
# for example: you can ignore all the retweets by #wildfires -filter:retweets
# Geolocalization: the search operator “near” isn’t available in the API, but there is a more precise way to restrict
# your query by a given location using the geocode parameter specified with the template “latitude,longitude,radius”,
# for example, “47.6138893,-122.3107869,10mi” (capitol hill at Seattle). When conducting geo searches, the search API will first attempt to find Tweets、
# which have lat/long within the queried geocode, and in case of not having success, it will attempt to find Tweets created
# by users whose profile location can be reverse geocoded into a lat/long within the queried geocode, meaning that is possible
# to receive Tweets which do not include lat/long information.
date_since = "2020-10-16"
# Collect tweets
# tweets = tweepy.Cursor(api.search, q=search_words, lang="en", since=date_since).items(100)
tweets = tweepy.Cursor(api.search, q=search_words, geocode=location, lang="en", since=date_since).items(100)
# Iterate and print tweets
for tweet in tweets:
row = {
'username': tweet.author.name,
'userid': tweet.author.id,
'profile_location': tweet.author.location,
'created_at': str(tweet.author.created_at),
'text': tweet.text,
'retweet_count': tweet.retweet_count,
'source': tweet.source,
'coordinates': tweet.coordinates
}
writer.writerow(row)
print(row)
csvfile.close()
# notify the completion of the program in the console.
print("finished")
```
| github_jupyter |
## SLU08 - Metrics for Regression: Exercise Notebook
In this notebook, you will implement:
- Mean Absolute Error (MAE)
- Mean Squared Error (MSE)
- Root Mean Squared Error (RMSE)
- Coefficient of Determination (R²)
- Adjusted R²
- Scikitlearn metrics
- Using metrics for k-fold cross validation
Start by loading the data we will use to fit a linear regression and fitting the LinearRegression estimator from scikitlearn:
```
# Base imports
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
data = load_boston()
x = pd.DataFrame(data['data'], columns=data['feature_names'])
y = pd.Series(data['target'])
x.head()
np.random.seed(42)
x_housing = x.values
y_housing = y.values
lr = LinearRegression()
lr.fit(x_housing, y_housing)
y_hat_housing = lr.predict(x_housing)
betas_housing = pd.Series([lr.intercept_] + list(lr.coef_))
```
## 1 Metrics
We will start by covering the metrics we learned in the unit, in particular a set of related metrics:
- Mean Absolute Error
$$MAE = \frac{1}{N} \sum_{n=1}^N \left| y_n - \hat{y}_n \right|$$
- Mean Squared Error
$$MSE = \frac{1}{N} \sum_{n=1}^N (y_n - \hat{y}_n)^2$$
- Root Mean Squared Error
$$RMSE = \sqrt{MSE}$$
### 1.1 Mean Absolute Error
Finally, implement the Mean Absolute Error in the function below.
```
def mean_absolute_error(y, y_pred):
"""
Args:
y_pred : numpy.array with shape (num_samples,) - predictions
y : numpy.array with shape (num_samples,) - labels
Returns:
mae : float with Mean Absolute Error
"""
# 1) Compute the error.
# error = ...
# YOUR CODE HERE
raise NotImplementedError()
# 2) Compute the absolute value of the errors for each sample
# abs_error = ...
# YOUR CODE HERE
raise NotImplementedError()
# 3) Compute the mean of the absolute value of the errors
# mae = ...
# YOUR CODE HERE
raise NotImplementedError()
return mae
```
Check the outputs of your function match the results below:
```
mae = mean_absolute_error(y_housing, y_hat_housing)
print('Mean Absolute Error Boston dataset: {}'.format(mae))
np.testing.assert_almost_equal(mae, 3.2709, 3)
```
### 1.2 Mean Squared Error
Implement the mean squared error in the next function:
```
def mean_squared_error(y, y_pred):
"""
Args:
y_pred : numpy.array with shape (num_samples,) - predictions
y : numpy.array with shape (num_samples,) - labels
Returns:
mse : float with Mean Squared Error Value
"""
# 1) Compute the error.
# error = ...
# YOUR CODE HERE
raise NotImplementedError()
# 2) Compute the squared value of the errors for each sample
# squared_error = ...
# YOUR CODE HERE
raise NotImplementedError()
# 3) Compute the mean squared value of the errors
# mse = ...
# YOUR CODE HERE
raise NotImplementedError()
return mse
```
Check the outputs of your function match the results below:
```
mse = mean_squared_error(y_housing, y_hat_housing)
print('Mean Squared Error Boston dataset: {}'.format(mse))
np.testing.assert_almost_equal(mse, 21.8948, 3)
```
### 1.3 Root Mean Squared Error
Implement the root mean squared error in the function below:
```
def root_mean_squared_error(y, y_pred):
"""
Args:
y_pred : numpy.array with shape (num_samples,) - predictions
y : numpy.array with shape (num_samples,) - labels
Returns:
mse : float with the Root Mean Squared Error Value
"""
# 1) Compute the mean squared error.
# mse = ...
# YOUR CODE HERE
raise NotImplementedError()
# 2) Compute the root square.
# rmse = ...
# YOUR CODE HERE
raise NotImplementedError()
return rmse
```
Check the outputs of your function match the results below:
```
rmse = root_mean_squared_error(y_housing, y_hat_housing)
print('Root Mean Squared Error Boston dataset: {}'.format(rmse))
np.testing.assert_almost_equal(rmse, 4.6792, 3)
```
Next we will focus on the Coefficient of Determination - $R^2$ - and its adjusted form. See the equations below:
- $R^2$ score
$$R² = 1 - \frac{MSE(y, \hat{y})}{MSE(y, \bar{y})}
= 1 - \frac{\frac{1}{N} \sum_{n=1}^N (y_n - \hat{y}_n)^2}{\frac{1}{N} \sum_{n=1}^N (y_n - \bar{y})^2}
= 1 - \frac{\sum_{n=1}^N (y_n - \hat{y}_n)^2}{\sum_{n=1}^N (y_n - \bar{y})^2}$$
where $$\bar{y} = \frac{1}{N} \sum_{n=1}^N y_n$$
- Adjusted $R^2$ score
$$\bar{R}^2 = 1 - \frac{N - 1}{N - K - 1} (1 - R^2)$$
where $N$ is the number of observations in the dataset used for training the model (i.e. number of rows of the pandas dataframe) and $K$ is the number of features used by your model (i.e. number of columns of the pandas dataframe)
### 1.4 R² score
Start by implementing the $R^2$ score in the function below:
```
def r_squared(y, y_pred):
"""
Args:
y_pred : numpy.array with shape (num_samples,) - predictions
y : numpy.array with shape (num_samples,) - labels
Returns:
r2 : float with R squared value
"""
# 1) Compute labels mean.
# y_mean = ...
# YOUR CODE HERE
raise NotImplementedError()
# 2) Compute the mean squared error between the target and the predictions.
# mse_pred = ...
# YOUR CODE HERE
raise NotImplementedError()
# 3) Compute the mean squared error between the target and its mean.
# YOUR CODE HERE
raise NotImplementedError()
# 4) Finally, compute R²
# r2 = ...
# YOUR CODE HERE
raise NotImplementedError()
return r2
```
Check the outputs of your function match the results below:
```
r2 = r_squared(y_housing, y_hat_housing)
print('R² Boston dataset: {}'.format(r2))
np.testing.assert_almost_equal(r2, 0.7406, 3)
```
### 1.5 Adjusted R² score
Then implement the adjusted $R^2$ score in the function below:
```
def adjusted_r_squared(y, y_pred, K):
"""
Args:
y : numpy.array with shape (num_samples,) - labels
y_pred : numpy.array with shape (num_samples,) - predictions
K : integer - Number of features used in the model that computed y_hat.
Returns:
r2_adj : float with adjusted R squared value
"""
# 1) Compute R².
# r2 = ...
# YOUR CODE HERE
raise NotImplementedError()
# 2) Get number of samples
# N = ...
# YOUR CODE HERE
raise NotImplementedError()
# 3) Adjust R²
# r2_adj = ...
# YOUR CODE HERE
raise NotImplementedError()
return r2_adj
```
Check the outputs of your function match the results below:
```
r2 = adjusted_r_squared(y_housing, y_hat_housing, x_housing.shape[1])
print('Adjusted R² Boston dataset: {}'.format(r2))
np.testing.assert_almost_equal(r2, 0.7337, 3)
```
## 2 Scikit-Learn metrics
As you know, scikitlearn also already provides you with implementations of these metrics:
- `sklearn.metrics.mean_absolute_error`
- `sklearn.metrics.mean_squared_error`
- `sklearn.metrics.r2_score`
- `sklearn.linear_model.LinearRegression.score`
```
# Import sklearn metrics
from sklearn import metrics as sklearn_metrics
```
#### 2.1 Root Mean Squared Error
Implement the root mean squared error functions below with scikitlearn:
```
def sklearn_root_mean_squared_error(y, y_pred):
"""
Args:
y_pred : numpy.array with shape (num_samples,) - predictions
y : numpy.array with shape (num_samples,) - labels
Returns:
rmse : float with Root Mean Squared Error
"""
# YOUR CODE HERE
raise NotImplementedError()
```
Make sure your function passes the tests below:
```
rmse = sklearn_root_mean_squared_error(y_housing, y_hat_housing)
print('Sklearn RMSE Boston dataset: {}'.format(rmse))
np.testing.assert_almost_equal(rmse, 4.6791, 3)
```
#### 2.2 Adjusted R² score
Implement the adjusted R² score below using scikitlearn:
```
def sklearn_adjusted_r_squared(y, y_pred, K):
"""
Args:
y_pred : numpy.array with shape (num_samples,) - predictions
y : numpy.array with shape (num_samples,) - labels
K : integer - Number of features used in the model that computed y_hat.
Returns:
r2_adj : float with adjusted R squared value
"""
# YOUR CODE HERE
raise NotImplementedError()
```
Make sure your function passes the tests below:
```
r2 = sklearn_adjusted_r_squared(y_housing, y_hat_housing, x_housing.shape[1])
print('Sklearn Adjusted R² Boston dataset: {}'.format(r2))
np.testing.assert_almost_equal(r2, 0.7337, 3)
```
Finally, compare the sklearn-based metrics with your own for the housing dataset:
```
MAE = mean_absolute_error(y_housing, y_hat_housing)
MSE = mean_squared_error(y_housing, y_hat_housing)
RMSE = root_mean_squared_error(y_housing, y_hat_housing)
R2 = r_squared(y_housing, y_hat_housing)
R2_adj = adjusted_r_squared(y_housing, y_hat_housing, x_housing.shape[1])
print('Metric for housing dataset with base implementation:')
print('Mean Absolute Error housing dataset: {}'.format(MAE))
print('Mean Squared Error housing dataset: {}'.format(MSE))
print('Root Mean Squared Error housing dataset: {}'.format(RMSE))
print('R² housing dataset: {}'.format(R2))
print('Adjusted R² housing dataset: {}'.format(R2_adj))
print('\n')
SK_MAE = sklearn_metrics.mean_absolute_error(y_housing, y_hat_housing)
SK_MSE = sklearn_metrics.mean_squared_error(y_housing, y_hat_housing)
SK_RMSE = sklearn_root_mean_squared_error(y_housing, y_hat_housing)
SK_R2 = sklearn_metrics.r2_score(y_housing, y_hat_housing)
SK_R2_adj = sklearn_adjusted_r_squared(y_housing, y_hat_housing, x_housing.shape[1])
print('Metric for housing dataset with scikitlearn:')
print('Mean Absolute Error housing dataset: {}'.format(SK_MAE))
print('Mean Squared Error housing dataset: {}'.format(SK_MSE))
print('Root Mean Squared Error housing dataset: {}'.format(SK_RMSE))
print('R² housing dataset: {}'.format(SK_R2))
print('Adjusted R² housing dataset: {}'.format(SK_R2_adj))
```
## 3 Using the Metrics
Now you'll use the metrics to fit and check performance of your LinearRegression and SGDRegressor, with the `cross_val_scores` method of scikitlearn. Implement the missing steps below:
```
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from sklearn import linear_model
def estimator_cross_fold(X, y, K, clf_choice='linear', scoring='neg_mean_squared_error'):
"""
Args:
X : numpy.array with shape (num_samples, num_features) - sample data
y : numpy.array with shape (num_samples,) - sample labels
K : integer - Number of iterations for k-fold
clf_choice: choice of estimator
scoring : scoring function as per sklearn notation
Returns:
clf: estimator trained with full data
scores : scores for each fold
"""
if clf_choice == 'linear':
clf = linear_model.LinearRegression()
elif clf_choice == 'sgd':
clf = linear_model.SGDRegressor(max_iter=10000, random_state=42)
else:
print('Invalid estimator')
return None
# Run k-fold cross validation
# YOUR CODE HERE
raise NotImplementedError()
return clf, scores
```
Let's run the k-fold cross validation for the several cases and get the average error:
```
## Preparation code - no need to worry about this for now
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
# We need to shuffle our data cause `cross_val_score` doesn't shuffle it internally
x_housing_shuff, y_housing_shuff = shuffle(x_housing, y_housing, random_state=42)
# We need to scale our data for SGD to behave correctly
sc = StandardScaler()
x_housing_scaled = sc.fit_transform(x_housing_shuff)
y_housing_scaled = y_housing_shuff
clf_lr, nmse_lr = estimator_cross_fold(x_housing_scaled, y_housing_scaled, 5, clf_choice='linear', scoring='neg_mean_squared_error')
np.testing.assert_almost_equal(nmse_lr.mean(), -23.4885, 2)
clf_sgd, nmse_sgd = estimator_cross_fold(x_housing_scaled, y_housing_scaled, 5, clf_choice='sgd', scoring='neg_mean_squared_error')
np.testing.assert_almost_equal(nmse_sgd.mean(), -23.6898, 2)
clf_lr, r2_lr = estimator_cross_fold(x_housing_scaled, y_housing_scaled, 5, clf_choice='linear', scoring='r2')
np.testing.assert_almost_equal(r2_lr.mean(), 0.7152, 2)
clf_sgd, r2_sgd = estimator_cross_fold(x_housing_scaled, y_housing_scaled, 5, clf_choice='sgd', scoring='r2')
np.testing.assert_almost_equal(r2_sgd.mean(), 0.71244, 2)
print('Cross val evaluation for Boston dataset:')
print('NMSE with Linear Regression: {}'.format(nmse_lr.mean()))
print('NMSE with SGD: {}'.format(nmse_sgd.mean()))
print('R² Score with Linear Regression: {}'.format(r2_lr.mean()))
print('R² Score with SGD: {}'.format(r2_sgd.mean()))
```
For this particular case it seems that the linear regression generalises better than the SGD regressor. It's important to remind that the SGD regressor is at a slight disadvantage, because we didn't check the data distribution to understand if it has appropriate scaling. Remember that SGD will be sensitive to this, while linear regression won't. Feel free to replicate these exercises but applying min-max scaling beforehand and check the new results.
| github_jupyter |
# Results: Musk1 Scaled
<b> MIL </b> <i>stratified k fold Validation</i> is performed.
Metrics: <br>
- AUC
- Accuracie
### Import Libraries
```
import sys,os
import warnings
os.chdir('/Users/josemiguelarrieta/Documents/MILpy')
sys.path.append(os.path.realpath('..'))
from sklearn.utils import shuffle
import random as rand
import numpy as np
from data import load_data
warnings.filterwarnings('ignore')
from MILpy.functions.mil_cross_val import mil_cross_val
#Import Algorithms
from MILpy.Algorithms.simpleMIL import simpleMIL
from MILpy.Algorithms.MILBoost import MILBoost
from MILpy.Algorithms.maxDD import maxDD
from MILpy.Algorithms.CKNN import CKNN
from MILpy.Algorithms.EMDD import EMDD
from MILpy.Algorithms.MILES import MILES
from MILpy.Algorithms.BOW import BOW
```
### Load data
```
bags,labels,X = load_data('musk1_scaled')
folds = 5
runs = 5
```
#### Simple MIL [max]
```
SMILa = simpleMIL()
parameters_smil = {'type': 'max'}
print '\n========= SIMPLE MIL RESULT [MAX] ========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
#Shuffle Data
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels.ravel(), model=SMILa, folds=folds, parameters=parameters_smil, timer = True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### Simple MIL [min]
```
parameters_smil = {'type': 'min'}
print '\n========= SIMPLE MIL RESULT [MIN] ========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels.ravel(), model=SMILa, folds=folds,parameters=parameters_smil, timer=True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### Simple MIL [extreme]
```
parameters_smil = {'type': 'extreme'}
print '\n========= SIMPLE MIL RESULT [MIN] ========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
#Shuffle Data
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels.ravel(), model=SMILa, folds=folds,parameters=parameters_smil, timer=True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### Simple MIL [average]
```
parameters_smil = {'type': 'average'}
print '\n========= SIMPLE MIL RESULT [AVERAGE] ========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels.ravel(), model=SMILa, folds=folds,parameters=parameters_smil, timer=True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### Bag of Words
```
bow_classifier = BOW()
parameters_bow = {'k':100,'covar_type':'diag','n_iter':20}
print '\n========= BAG OF WORDS RESULT ========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels.ravel(), model=bow_classifier, folds=folds,parameters=parameters_bow, timer=True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### Citation KNN
```
cknn_classifier = CKNN()
parameters_cknn = {'references': 3, 'citers': 5}
print '\n========= CKNN RESULT ========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels.ravel(), model=cknn_classifier, folds=folds,parameters=parameters_cknn, timer=True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### Diverse Density
```
maxDD_classifier = maxDD()
print '\n========= DIVERSE DENSITY RESULT========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels.ravel(), model=maxDD_classifier, folds=folds,parameters={}, timer=True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### EM-DD
```
emdd_classifier = EMDD()
print '\n========= EM-DD RESULT ========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels.ravel(), model=emdd_classifier, folds=folds,parameters={}, timer=True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### MILBoost
```
milboost_classifier = MILBoost()
print '\n========= MILBOOST RESULT ========='
AUC = []
ACCURACIE=[]
for i in range(runs):
print '\n run #'+ str(i)
bags,labels = shuffle(bags, labels, random_state=rand.randint(0, 100))
accuracie, results_accuracie, auc,results_auc, elapsed = mil_cross_val(bags=bags,labels=labels, model=milboost_classifier, folds=folds,parameters={}, timer=True)
print '\n'+'AUC: ' + str(auc)+'\n'+'Accuracie: '+ str(accuracie)+'\n'+'Elapsed: '+ str(round(elapsed,2))
AUC.append(auc)
ACCURACIE.append(accuracie)
print '\n MEAN AUC: '+ str(np.mean(AUC)) + '\n MEAN ACCURACIE: '+ str(np.mean(ACCURACIE))
```
#### Miles
```
#Pending
```
| github_jupyter |
## Import
```
import os
import pandas as pd
import numpy as np
import ast
import re
# z test
from statsmodels.stats.proportion import proportions_ztest
# bayesian bootstrap and vis
import matplotlib.pyplot as plt
import seaborn as sns
import bayesian_bootstrap.bootstrap as bb
from astropy.utils import NumpyRNGContext
# progress bar
from tqdm import tqdm, tqdm_notebook
from scipy import stats
from collections import Counter
# set up the style for our plots
sns.set(style='white', palette='colorblind', font_scale=1.3,
rc={'figure.figsize':(12,9),
"axes.facecolor": (0, 0, 0, 0)})
# instantiate progress bar goodness
tqdm.pandas(tqdm_notebook)
pd.set_option('max_colwidth',500)
# the number of bootstrap means used to generate a distribution
boot_reps = 10000
# alpha - false positive rate
alpha = 0.05
# number of tests
m = 4
# Correct alpha for multiple comparisons
alpha = alpha / m
# The Bonferroni correction can be used to adjust confidence intervals also.
# If one establishes m confidence intervals, and wishes to have an overall confidence level of 1-alpha,
# each individual confidence interval can be adjusted to the level of 1-(alpha/m).
# reproducible
seed = 1337
```
## File/dir locations
### Processed journey data
```
DATA_DIR = os.getenv("DATA_DIR")
filename = "testing_processed_sampled_taxon_ab_2019-01-21.csv.gz"
filepath = os.path.join(
DATA_DIR, "sampled_journey",
filename)
filepath
# read in processed sampled journey with just the cols we need for related links
df = pd.read_csv(filepath, sep ="\t", compression="gzip")
# convert from str to list
df['Event_cat_act_agg']= df['Event_cat_act_agg'].progress_apply(ast.literal_eval)
df['Page_Event_List'] = df['Page_Event_List'].progress_apply(ast.literal_eval)
df['Page_List'] = df['Page_List'].progress_apply(ast.literal_eval)
df['Page_List_Length'] = df['Page_List'].progress_apply(len)
# drop dodgy rows, where page variant is not A or B.
df = df.query('ABVariant in ["A", "B"]')
```
### Nav type of page lookup - is it a finding page? if not it's a thing page
```
filename = "document_types.csv.gz"
# created a metadata dir in the DATA_DIR to hold this data
filepath = os.path.join(
DATA_DIR, "metadata",
filename)
print(filepath)
df_finding_thing = pd.read_csv(filepath, sep="\t", compression="gzip")
df_finding_thing.head()
thing_page_paths = df_finding_thing[
df_finding_thing['is_finding']==0]['pagePath'].tolist()
finding_page_paths = df_finding_thing[
df_finding_thing['is_finding']==1]['pagePath'].tolist()
```
## Outliers
Some rows should be removed before analysis. For example rows with journey lengths of 500 or very high related link click rates. This process might have to happen once features have been created.
## journey_click_rate
There is no difference in the proportion of journeys using at least one related link (journey_click_rate) between page variant A and page variant B.
\begin{equation*}
\frac{\text{total number of journeys including at least one click on a related link}}{\text{total number of journeys}}
\end{equation*}
### Prepare features
#### Related link prep
```
def get_number_of_events_rl(event):
"""Counts events with category 'relatedLinkClicked' and action'Related content'."""
if event[0][0] == 'relatedLinkClicked' and 'Related content' in event[0][1]:
return event[1]
return 0
def sum_related_click_events(event_list):
return sum([get_number_of_events_rl(event) for event in event_list])
def is_related(x):
"""Compute whether a journey includes at least one related link click."""
return x > 0
# get the number of related links clicks per Sequence
df['Related Links Clicks per seq'] = df['Event_cat_act_agg'].map(sum_related_click_events)
# map across the Sequence variable, which includes pages and Events
# we want to pass all the list elements to a function one-by-one and then collect the output.
df["Has_Related"] = df["Related Links Clicks per seq"].map(is_related)
df['Related Links Clicks row total'] = df['Related Links Clicks per seq'] * df['Occurrences']
df.head(3)
```
### Frequentist statistics
```
def z_prop(df, col_name):
"""
Conduct z_prop test and generate confidence interval.
Using Bernoulli trial terminology where X (or x)
is number of successes and n is number of trials
total occurrences, we compare ABVariant A and B.
p is x/n. We use a z proportion test between variants.
"""
# A & B
n = df.Occurrences.sum()
# prop of journeys with at least one related link, occurrences summed for those rows gives X
p = df[df[col_name] == 1].Occurrences.sum() / n
assert (p >= 0), "Prop less than zero!"
assert (p <= 1), "Prop greater than one!"
# A
# number of trials for page A
n_a = df[df.ABVariant == "A"].Occurrences.sum()
# number of successes (occurrences), for page A and at least one related link clicked journeys
x_a = df[(df['ABVariant'] == 'A') & (df[col_name] == 1)].Occurrences.sum()
# prop of journeys where one related link was clicked, on A
p_a = x_a / n_a
# B
# number of trials for page B
n_b = df[df.ABVariant == "B"].Occurrences.sum()
# number of successes for page B, at least one related link clicked
x_b = df[(df['ABVariant'] == 'B') & (df[col_name] == 1)].Occurrences.sum()
# prop of journeys where one related link was clicked, on B
p_b = x_b / n_b
assert (n == n_a + n_b), "Error in filtering by ABVariant!"
# validate assumptions
# The formula of z-statistic is valid only when sample size (n) is large enough.
# nAp, nAq, nBp and nBq should be ≥ 5.
# where p is probability of success (we can use current baseline)
# q = 1 - p
# tried a helper function here but it didn't work hence not DRY
assert (n_a * p) >= 5, "Assumptions for z prop test invalid!"
assert (n_a * (1 - p)) >= 5, "Assumptions for z prop test invalid!"
assert (n_b * p) >= 5, "Assumptions for z prop test invalid!"
assert (n_b * (1 - p)) >= 5, "Assumptions for z prop test invalid!"
# using statsmodels
# successes
count = np.array([x_a, x_b])
# number of trials
nobs = np.array([n_a, n_b])
# z prop test
z, p_value = proportions_ztest(count, nobs, value=0, alternative='two-sided')
# print(' z-stat = {z} \n p-value = {p_value}'.format(z=z,p_value=p_value))
statsdict = {'metric_name': col_name, 'stats_method': 'z_prop_test',
'x_ab': x_a + x_b, 'n_ab': n, 'p': p,
'x_a': x_a, 'n_a': n_a, 'p_a': p_a,
'x_b': x_b, 'n_b': n_b, 'p_b': p_b,
'test_statistic': z, 'p-value': p_value}
return statsdict
```
#### Statistical significance
```
# help(proportions_ztest)
has_rel = z_prop(df, 'Has_Related')
has_rel
# if true reject null hypothesis
has_rel['p-value'] < alpha
```
#### Practical significance - uplift
```
# uplift
def compute_standard_error_prop_two_samples(x_a, n_a, x_b, n_b):
"""
The standard error of the difference between two proportions is given by the square root of the variances.
The square of the standard error of a proportion is known as the variance of proportion.
The variance of the difference between two independent proportions is equal to the sum of the variances of the proportions of each sample.
The variances are summed because each sample contributes to sampling error in the distribution of differences.
"""
p1 = x_a/n_a
p2 = x_b/n_b
se = p1*(1-p1)/n_a + p2*(1-p2)/n_b
return np.sqrt(se)
def zconf_interval_two_samples(x_a, n_a, x_b, n_b, alpha=0.05):
"""
Gives two points, the lower and upper bound of a (1-alpha)% confidence interval.
To calculate the confidence interval we need to know the standard error of the difference between two proportions.
The standard error of the difference between two proportions is the combination of the standard error of two independent distributions, ES (p_a) and (p_b).
If the CI includes zero then we accept the null hypothesis at the defined alpha.
"""
p1 = x_a/n_a
p2 = x_b/n_b
se = compute_standard_error_prop_two_samples(x_a, n_a, x_b, n_b)
z_critical = stats.norm.ppf(1-0.5*alpha)
return p2-p1-z_critical*se, p2-p1+z_critical*se
# Due to multiple testing we used the Bonferroni correction for alpha
ci_low,ci_upp = zconf_interval_two_samples(has_rel['x_a'], has_rel['n_a'],
has_rel['x_b'], has_rel['n_b'], alpha = alpha)
print(' 95% Confidence Interval = ( {0:.2f}% , {1:.2f}% )'
.format(100*ci_low, 100*ci_upp))
```
### Bayesian statistics
```
# https://medium.com/@thibalbo/coding-bayesian-ab-tests-in-python-e89356b3f4bd
```
To be developed, a Bayesian approach can provide a simpler interpretation.
## count of clicks on navigation elements
There is no statistically significant difference in the count of clicks on navigation elements per journey between page variant A and page variant B.
\begin{equation*}
{\text{total number of navigation element click events from content pages}}
\end{equation*}
### Related link counts
```
# get the total number of related links clicks for that row (clicks per sequence multiplied by occurrences)
df['Related Links Clicks row total'] = df['Related Links Clicks per seq'] * df['Occurrences']
```
### Navigation events
```
def is_nav_event(event):
"""
Return the total number of related links clicks for that row.
Clicks per sequence multiplied by occurrences.
"""
return any(
['breadcrumbClicked' in event, 'homeLinkClicked' in event,
all(cond in event for cond in [
'relatedLinkClicked','Explore the topic'])])
def count_nav_events(page_event_list):
"""Counts the number of nav events from a content page in a Page Event List."""
content_page_nav_events = 0
for pair in page_event_list:
if is_nav_event(pair[1]):
if pair[0] in thing_page_paths:
content_page_nav_events += 1
return content_page_nav_events
# needs finding_thing_df read in from document_types.csv.gz
df['Content_Page_Nav_Event_Count'] = df['Page_Event_List'].progress_map(count_nav_events)
def count_search_from_content(page_list):
search_from_content = 0
for i, page in enumerate(page_list):
if i > 0:
if '/search?q=' in page:
if page_list[i-1] in thing_page_paths:
search_from_content += 1
return search_from_content
df['Content_Search_Event_Count'] = df['Page_List'].progress_map(count_search_from_content)
```
### Generate the derived metric
```
# count of nav or search clicks
df['Content_Nav_or_Search_Count'] = df['Content_Page_Nav_Event_Count'] + df['Content_Search_Event_Count']
# occurrences is accounted for by the group by bit in our bayesian boot analysis function
df['Content_Nav_Search_Event_Sum_row_total'] = df['Content_Nav_or_Search_Count'] * df['Occurrences']
# required for journeys with no nav later
df['Has_No_Nav_Or_Search'] = df['Content_Nav_Search_Event_Sum_row_total'] == 0
```
#### Vestigial metric which we dropped
```
# (nav events + search events) + 1 / related links clicked + 1
# add one to numerator and denominator to avoid undesirable characteristics
# not sure this has great utility as a proxy, seems volatile
# df['Ratio_Nav_Search_to_Rel'] = (df['Content_Nav_Search_Event_Sum_row_total'] + 1) / (df['Related Links Clicks row total'] + 1)
# sns.distplot(df['Ratio_Nav_Search_to_Rel'].values);
```
This derived variable is problematic, should consider dropping it. Use counts of the numerator instead (as this could be modelled using generalised linear model), as related link clickedness is captured by the earlier metric.
## Temporary df file in case of crash
### Save
```
# create temp file incase we crash after all that hard work
# uncomment the next cell if you want to save
filepath = os.path.join(
DATA_DIR, "rl_sampled_processed_journey",
filename)
# df.to_csv(filepath, sep="\t", compression="gzip", index=False)
```
### Bayesian bootstrap
```
def mean_bb(counter_X_keys, counter_X_vals, n_replications):
"""Simulate the posterior distribution of the mean.
Parameter X: The observed data (array like)
Parameter n_replications: The number of bootstrap replications to perform (positive integer)
Returns: Samples from the posterior
"""
samples = []
weights = np.random.dirichlet(counter_X_vals, n_replications)
for w in weights:
samples.append(np.dot(counter_X_keys, w))
return samples
def bayesian_bootstrap_analysis(df, col_name=None, boot_reps = 10000, seed = 1337):
"""Run bayesian bootstrap on the mean of a variable of interest between Page Variants.
Args:
df: A rl_sampled_processed pandas Datframe.
col_name: A string of the column of interest.
Returns:
a_bootstrap: a vector of boot_reps n resampled means from A.
b_bootstrap: a vector of boot_reps n resampled means from B.
"""
# grouped by length is vestigial, before this was generalised into a function
with NumpyRNGContext(seed):
A_grouped_by_length = df[df.ABVariant == "A"].groupby(
col_name).sum().reset_index()
B_grouped_by_length = df[df.ABVariant == "B"].groupby(
col_name).sum().reset_index()
a_bootstrap = mean_bb(A_grouped_by_length[col_name],
A_grouped_by_length['Occurrences'],
boot_reps)
b_bootstrap = mean_bb(B_grouped_by_length[col_name],
B_grouped_by_length['Occurrences'],
boot_reps)
return a_bootstrap, b_bootstrap
a_bootstrap, b_bootstrap = bayesian_bootstrap_analysis(df, col_name='Content_Nav_or_Search_Count', boot_reps=boot_reps, seed = seed)
def bb_hdi(a_bootstrap, b_bootstrap, alpha = 0.05):
"""Calculate a 1-alpha high density interval
Args:
a_bootstrap: a list of resampled means from page A journeys.
b_bootstrap: a list of resampled means from page B journeys.
Returns:
a_ci_low: the lower point of the 1-alpha% highest density interval for A.
a_ci_hi: the higher point of the 1-alpha% highest density interval for A.
b_ci_low: the lower point of the 1-alpha% highest density interval for B.
b_ci_hi: the higher point of the 1-alpha% highest density interval for B.
ypa_diff_mean: the mean difference for the posterior between A's and B's distributions.
ypa_diff_ci_low: lower hdi for posterior of the difference.
ypa_diff_ci_hi: upper hdi for posterior of the difference.
sorta_p_value: number of values greater than 0 divided by num of obs for mean diff psoterior.
"""
# Calculate a 95% HDI
a_ci_low, a_ci_hi = bb.highest_density_interval(a_bootstrap, alpha=alpha)
# Calculate a 95% HDI
b_ci_low, b_ci_hi = bb.highest_density_interval(b_bootstrap, alpha=alpha)
# calculate the posterior for the difference between A's and B's mean of resampled means
# ypa prefix is vestigial from blog post
ypa_diff = np.array(b_bootstrap) - np.array(a_bootstrap)
ypa_diff_mean = ypa_diff.mean()
# get the hdi
ypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff)
# We count the number of values greater than 0 and divide by the total number
# of observations
# which returns us the the proportion of values in the distribution that are
# greater than 0, could act a bit like a p-value
p_value = (ypa_diff > 0).sum() / ypa_diff.shape[0]
return {'a_ci_low':a_ci_low, 'a_ci_hi':a_ci_hi, 'b_ci_low':b_ci_low, 'b_ci_hi':b_ci_hi, 'ypa_diff_mean':ypa_diff_mean, 'ypa_diff_ci_low':ypa_diff_ci_low, 'ypa_diff_ci_hi':ypa_diff_ci_hi, 'p_value':p_value}
# ratio is vestigial but we keep it here for convenience
# it's actually a count but considers occurrences
ratio_stats = bb_hdi(a_bootstrap, b_bootstrap, alpha=alpha)
ratio_stats
ax = sns.distplot(b_bootstrap, label='B')
ax.errorbar(x=[ratio_stats['b_ci_low'], ratio_stats['b_ci_hi']], y=[2, 2], linewidth=5, c='teal', marker='o',
label='95% HDI B')
ax = sns.distplot(a_bootstrap, label='A', ax=ax, color='salmon')
ax.errorbar(x=[ratio_stats['a_ci_low'], ratio_stats['a_ci_hi']], y=[5, 5], linewidth=5, c='salmon', marker='o',
label='95% HDI A')
ax.set(xlabel='Content_Nav_or_Search_Count', ylabel='Density')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show();
# calculate the posterior for the difference between A's and B's ratio
# ypa prefix is vestigial from blog post
ypa_diff = np.array(b_bootstrap) - np.array(a_bootstrap)
# get the hdi
ypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff)
# the mean of the posterior
print('mean:', ypa_diff.mean())
print('low ci:', ypa_diff_ci_low, '\nhigh ci:', ypa_diff_ci_hi)
ax = sns.distplot(ypa_diff)
ax.plot([ypa_diff_ci_low, ypa_diff_ci_hi], [0, 0], linewidth=10, c='k', marker='o',
label='95% HDI')
ax.set(xlabel='Content_Nav_or_Search_Count', ylabel='Density',
title='The difference between B\'s and A\'s mean counts times occurrences')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show();
# We count the number of values greater than 0 and divide by the total number
# of observations
# which returns us the the proportion of values in the distribution that are
# greater than 0, could act a bit like a p-value
(ypa_diff > 0).sum() / ypa_diff.shape[0]
```
## proportion of journeys with a page sequence including content and related links only
There is no statistically significant difference in the proportion of journeys with a page sequence including content and related links only (including loops) between page variant A and page variant B
\begin{equation*}
\frac{\text{total number of journeys that only contain content pages and related links (i.e. no nav pages)}}{\text{total number of journeys}}
\end{equation*}
### Overall
```
# if (Content_Nav_Search_Event_Sum == 0) that's our success
# Has_No_Nav_Or_Search == 1 is a success
# the problem is symmetrical so doesn't matter too much
sum(df.Has_No_Nav_Or_Search * df.Occurrences) / df.Occurrences.sum()
sns.distplot(df.Content_Nav_or_Search_Count.values);
```
### Frequentist statistics
#### Statistical significance
```
nav = z_prop(df, 'Has_No_Nav_Or_Search')
nav
```
#### Practical significance - uplift
```
# function defined earlier in notebook
# Due to multiple testing we used the Bonferroni correction for alpha
ci_low,ci_upp = zconf_interval_two_samples(nav['x_a'], nav['n_a'],
nav['x_b'], nav['n_b'], alpha = alpha)
print(' 95% Confidence Interval = ( {0:.2f}% , {1:.2f}% )'
.format(100*ci_low, 100*ci_upp))
```
## Average Journey Length (number of page views)
There is no statistically significant difference in the average page list length of journeys (including loops) between page variant A and page variant B.
### Bayesian bootstrap for non-parametric hypotheses
```
# http://savvastjortjoglou.com/nfl-bayesian-bootstrap.html
# let's use mean journey length (could probably model parametrically but we use it for demonstration here)
# some journeys have length 500 and should probably be removed as they are liekely bots or other weirdness
# for reproducibility, set the seed within this context
a_bootstrap, b_bootstrap = bayesian_bootstrap_analysis(df, col_name='Page_List_Length', boot_reps=boot_reps, seed = seed)
# Calculate a 95% HDI
a_ci_low, a_ci_hi = bb.highest_density_interval(a_bootstrap)
print('low ci:', a_ci_low, '\nhigh ci:', a_ci_hi)
ax = sns.distplot(a_bootstrap, color='salmon')
ax.plot([a_ci_low, a_ci_hi], [0, 0], linewidth=10, c='k', marker='o',
label='95% HDI')
ax.set(xlabel='Journey Length', ylabel='Density', title='Page Variant A Mean Journey Length')
sns.despine()
plt.legend();
# Calculate a 95% HDI
b_ci_low, b_ci_hi = bb.highest_density_interval(b_bootstrap)
print('low ci:', b_ci_low, '\nhigh ci:', b_ci_hi)
ax = sns.distplot(b_bootstrap)
ax.plot([b_ci_low, b_ci_hi], [0, 0], linewidth=10, c='k', marker='o',
label='95% HDI')
ax.set(xlabel='Journey Length', ylabel='Density', title='Page Variant B Mean Journey Length')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show();
ax = sns.distplot(b_bootstrap, label='B')
ax = sns.distplot(a_bootstrap, label='A', ax=ax, color='salmon')
ax.set(xlabel='Journey Length', ylabel='Density')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show();
```
We can also measure the uncertainty in the difference between the Page Variants's Journey Length by subtracting their posteriors.
```
# calculate the posterior for the difference between A's and B's YPA
ypa_diff = np.array(b_bootstrap) - np.array(a_bootstrap)
# get the hdi
ypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff)
# the mean of the posterior
ypa_diff.mean()
print('low ci:', ypa_diff_ci_low, '\nhigh ci:', ypa_diff_ci_hi)
ax = sns.distplot(ypa_diff)
ax.plot([ypa_diff_ci_low, ypa_diff_ci_hi], [0, 0], linewidth=10, c='k', marker='o',
label='95% HDI')
ax.set(xlabel='Journey Length', ylabel='Density',
title='The difference between B\'s and A\'s mean Journey Length')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show();
```
We can actually calculate the probability that B's mean Journey Length was greater than A's mean Journey Length by measuring the proportion of values greater than 0 in the above distribution.
```
# We count the number of values greater than 0 and divide by the total number
# of observations
# which returns us the the proportion of values in the distribution that are
# greater than 0, could act a bit like a p-value
(ypa_diff > 0).sum() / ypa_diff.shape[0]
```
| github_jupyter |
### Introduction to Deep4Rec
Brief practical introduction to Deep4Rec. We'll show how to load a dataset and train a model.
Check more examples in the [examples folder]()!
- Author: Marianne Linhares Monteiro ([github: mari-linhares](https://github.com/mari-linhares/), [twitter: hereismari](https://twitter.com/hereismari))
First let's import the required dependencies.
```
import sys
sys.path.append('../../')
import os
# Ignore some tensorflow logs
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
from matplotlib import pyplot as plt
import tensorflow as tf
```
Now let's import Deep4Rec.
There are two main modules in deep4rec that you usually are going to use:
* `datasets`: Common datasets used for Recommendation Systems evaluation already preprocessed and ready to be used.
* `models`: Recommenders based on deep learning.
Let's import them.
```
from deep4rec import datasets
from deep4rec import models
```
We can choose any dataset listed in the datasets options.
```
datasets.options()
# Build chosen dataset
ds = datasets.build_dataset("ml-100k")
```
We can choose any model listed in the models options.
```
models.options()
```
Let's start with the FM model.
```
fm = models.FM(ds, num_units=16)
```
In order to train a model just call `train`.
```
BATCH_SIZE = 128
EPOCHS = 10
LR = 0.04
fm.train(
ds,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
loss_function="rmse",
optimizer=tf.train.AdagradOptimizer(learning_rate=LR),
)
```
Let's now try NeuralFM and NeuralMF models and compare them. We just need to add a few lines of code and we're ready to go!
```
neuralfm = models.NeuralFM(ds,
num_units=16,
layers=[16, 8],
dropout_prob=[0.1, 0.1, 0.1] # dropout for layer 1, layer 2 and features weights.
)
neuralfm.train(
ds,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
loss_function="rmse",
optimizer=tf.train.AdagradOptimizer(learning_rate=LR),
)
```
All Deep Learning methods have the same interface and parameters so to use NeuralFM instead of NeuralFM we just need to use a different class name.
```
neuralmf = models.NeuralMF(ds,
num_units=16,
layers=[16, 8],
dropout_prob=[0.1, 0.1, 0.1] # dropout for layer 1, layer 2 and features weights.
)
neuralmf.train(
ds,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
loss_function="rmse",
optimizer=tf.train.AdagradOptimizer(learning_rate=LR),
)
```
Let's compare the metods. After training a model we can check their losses in each epoch using `model.test_losses` and `model.train_losses` (if we had a validation set we could also use `model.valid_losses`).
```
# utility function to plot model losses
def plot_losses(model):
plt.title(type(model).__name__)
plt.plot([loss["rmse"] for loss in model._losses['test']], label='test')
plt.plot([loss["rmse"] for loss in model._losses['train']], label='train')
plt.legend()
plt.show()
plot_losses(fm); plot_losses(neuralfm); plot_losses(neuralmf)
```
### What's next?
- Contribute to the code base! Feel free to create a PR with your contributions, if you don't know from where start check the open issues.
- Check the examples folder
| github_jupyter |
# Sentiment Analysis with an RNN
In this notebook, you'll implement a recurrent neural network that performs sentiment analysis.
>Using an RNN rather than a strictly feedforward network is more accurate since we can include information about the *sequence* of words.
Here we'll use a dataset of movie reviews, accompanied by sentiment labels: positive or negative.
<img src="assets/reviews_ex.png" width=40%>
### Network Architecture
The architecture for this network is shown below.
<img src="assets/network_diagram.png" width=40%>
>**First, we'll pass in words to an embedding layer.** We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the Word2Vec lesson. You can actually train an embedding with the Skip-gram Word2Vec model and use those embeddings as input, here. However, it's good enough to just have an embedding layer and let the network learn a different embedding table on its own. *In this case, the embedding layer is for dimensionality reduction, rather than for learning semantic representations.*
>**After input words are passed to an embedding layer, the new embeddings will be passed to LSTM cells.** The LSTM cells will add *recurrent* connections to the network and give us the ability to include information about the *sequence* of words in the movie review data.
>**Finally, the LSTM outputs will go to a sigmoid output layer.** We're using a sigmoid function because positive and negative = 1 and 0, respectively, and a sigmoid will output predicted, sentiment values between 0-1.
We don't care about the sigmoid outputs except for the **very last one**; we can ignore the rest. We'll calculate the loss by comparing the output at the last time step and the training label (pos or neg).
---
### Load in and visualize the data
```
import numpy as np
# read data from text files
with open('data/reviews.txt', 'r') as f:
reviews = f.read()
with open('data/labels.txt', 'r') as f:
labels = f.read()
print(reviews[:2000])
print()
print(labels[:20])
```
## Data pre-processing
The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.
You can see an example of the reviews data above. Here are the processing steps, we'll want to take:
>* We'll want to get rid of periods and extraneous punctuation.
* Also, you might notice that the reviews are delimited with newline characters `\n`. To deal with those, I'm going to split the text into each review using `\n` as the delimiter.
* Then I can combined all the reviews back together into one big string.
First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.
```
from string import punctuation
print(punctuation)
# get rid of punctuation
reviews = reviews.lower() # lowercase, standardize
all_text = ''.join([c for c in reviews if c not in punctuation])
# split by new lines and spaces
reviews_split = all_text.split('\n')
all_text = ' '.join(reviews_split)
# create a list of words
words = all_text.split()
words[:30]
```
### Encoding the words
The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.
> **Exercise:** Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers **start at 1, not 0**.
> Also, convert the reviews to integers and store the reviews in a new list called `reviews_ints`.
```
# feel free to use this import
from collections import Counter
## Build a dictionary that maps words to integers
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
## use the dict to tokenize each review in reviews_split
## store the tokenized reviews in reviews_ints
reviews_ints = []
for review in reviews_split:
reviews_ints.append([vocab_to_int[word] for word in review.split()])
```
**Test your code**
As a text that you've implemented the dictionary correctly, print out the number of unique words in your vocabulary and the contents of the first, tokenized review.
```
# stats about vocabulary
print('Unique words: ', len((vocab_to_int))) # should ~ 74000+
print()
# print tokens in first review
print('Tokenized review: \n', reviews_ints[:1])
```
### Encoding the labels
Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1.
> **Exercise:** Convert labels from `positive` and `negative` to 1 and 0, respectively, and place those in a new list, `encoded_labels`.
```
# 1=positive, 0=negative label conversion
labels_split = labels.split('\n')
encoded_labels = np.array([1 if label == 'positive' else 0 for label in labels_split])
```
### Removing Outliers
As an additional pre-processing step, we want to make sure that our reviews are in good shape for standard processing. That is, our network will expect a standard input text size, and so, we'll want to shape our reviews into a specific length. We'll approach this task in two main steps:
1. Getting rid of extremely long or short reviews; the outliers
2. Padding/truncating the remaining data so that we have reviews of the same length.
<img src="assets/outliers_padding_ex.png" width=40%>
Before we pad our review text, we should check for reviews of extremely short or long lengths; outliers that may mess with our training.
```
# outlier review stats
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
```
Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. We'll have to remove any super short reviews and truncate super long reviews. This removes outliers and should allow our model to train more efficiently.
> **Exercise:** First, remove *any* reviews with zero length from the `reviews_ints` list and their corresponding label in `encoded_labels`.
```
print('Number of reviews before removing outliers: ', len(reviews_ints))
## remove any reviews/labels with zero length from the reviews_ints list.
# get indices of any reviews with length 0
non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0]
# remove 0-length reviews and their labels
reviews_ints = [reviews_ints[ii] for ii in non_zero_idx]
encoded_labels = np.array([encoded_labels[ii] for ii in non_zero_idx])
print('Number of reviews after removing outliers: ', len(reviews_ints))
```
---
## Padding sequences
To deal with both short and very long reviews, we'll pad or truncate all our reviews to a specific length. For reviews shorter than some `seq_length`, we'll pad with 0s. For reviews longer than `seq_length`, we can truncate them to the first `seq_length` words. A good `seq_length`, in this case, is 200.
> **Exercise:** Define a function that returns an array `features` that contains the padded data, of a standard size, that we'll pass to the network.
* The data should come from `review_ints`, since we want to feed integers to the network.
* Each row should be `seq_length` elements long.
* For reviews shorter than `seq_length` words, **left pad** with 0s. That is, if the review is `['best', 'movie', 'ever']`, `[117, 18, 128]` as integers, the row will look like `[0, 0, 0, ..., 0, 117, 18, 128]`.
* For reviews longer than `seq_length`, use only the first `seq_length` words as the feature vector.
As a small example, if the `seq_length=10` and an input review is:
```
[117, 18, 128]
```
The resultant, padded sequence should be:
```
[0, 0, 0, 0, 0, 0, 0, 117, 18, 128]
```
**Your final `features` array should be a 2D array, with as many rows as there are reviews, and as many columns as the specified `seq_length`.**
This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.
```
def pad_features(reviews_ints, seq_length):
''' Return features of review_ints, where each review is padded with 0's
or truncated to the input seq_length.
'''
# getting the correct rows x cols shape
features = np.zeros((len(reviews_ints), seq_length), dtype=int)
# for each review, I grab that review and
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_length]
return features
# Test your implementation!
seq_length = 200
features = pad_features(reviews_ints, seq_length=seq_length)
## test statements - do not change - ##
assert len(features)==len(reviews_ints), "Your features should have as many rows as reviews."
assert len(features[0])==seq_length, "Each feature row should contain seq_length values."
# print first 10 values of the first 30 batches
print(features[:30,:10])
```
## Training, Validation, Test
With our data in nice shape, we'll split it into training, validation, and test sets.
> **Exercise:** Create the training, validation, and test sets.
* You'll need to create sets for the features and the labels, `train_x` and `train_y`, for example.
* Define a split fraction, `split_frac` as the fraction of data to **keep** in the training set. Usually this is set to 0.8 or 0.9.
* Whatever data is left will be split in half to create the validation and *testing* data.
```
split_frac = 0.8
## split data into training, validation, and test data (features and labels, x and y)
split_idx = int(len(features)*0.8)
train_x, remaining_x = features[:split_idx], features[split_idx:]
train_y, remaining_y = encoded_labels[:split_idx], encoded_labels[split_idx:]
test_idx = int(len(remaining_x)*0.5)
val_x, test_x = remaining_x[:test_idx], remaining_x[test_idx:]
val_y, test_y = remaining_y[:test_idx], remaining_y[test_idx:]
## print out the shapes of your resultant feature data
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
```
**Check your work**
With train, validation, and test fractions equal to 0.8, 0.1, 0.1, respectively, the final, feature data shapes should look like:
```
Feature Shapes:
Train set: (20000, 200)
Validation set: (2500, 200)
Test set: (2500, 200)
```
---
## DataLoaders and Batching
After creating training, test, and validation data, we can create DataLoaders for this data by following two steps:
1. Create a known format for accessing our data, using [TensorDataset](https://pytorch.org/docs/stable/data.html#) which takes in an input set of data and a target set of data with the same first dimension, and creates a dataset.
2. Create DataLoaders and batch our training, validation, and test Tensor datasets.
```
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
train_loader = DataLoader(train_data, batch_size=batch_size)
```
This is an alternative to creating a generator function for batching our data into full batches.
```
import torch
from torch.utils.data import TensorDataset, DataLoader
# create Tensor datasets
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
valid_data = TensorDataset(torch.from_numpy(val_x), torch.from_numpy(val_y))
test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))
# dataloaders
batch_size = 50
# make sure to SHUFFLE your data
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
valid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
# obtain one batch of training data
dataiter = iter(train_loader)
sample_x, sample_y = dataiter.next()
print('Sample input size: ', sample_x.size()) # batch_size, seq_length
print('Sample input: \n', sample_x)
print()
print('Sample label size: ', sample_y.size()) # batch_size
print('Sample label: \n', sample_y)
```
---
# Sentiment Network with PyTorch
Below is where you'll define the network.
<img src="assets/network_diagram.png" width=40%>
The layers are as follows:
1. An [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) that converts our word tokens (integers) into embeddings of a specific size.
2. An [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) defined by a hidden_state size and number of layers
3. A fully-connected output layer that maps the LSTM layer outputs to a desired output_size
4. A sigmoid activation layer which turns all outputs into a value 0-1; return **only the last sigmoid output** as the output of this network.
### The Embedding Layer
We need to add an [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) because there are 74000+ words in our vocabulary. It is massively inefficient to one-hot encode that many classes. So, instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using Word2Vec, then load it here. But, it's fine to just make a new layer, using it for only dimensionality reduction, and let the network learn the weights.
### The LSTM Layer(s)
We'll create an [LSTM](https://pytorch.org/docs/stable/nn.html#lstm) to use in our recurrent network, which takes in an input_size, a hidden_dim, a number of layers, a dropout probability (for dropout between multiple layers), and a batch_first parameter.
Most of the time, you're network will have better performance with more layers; between 2-3. Adding more layers allows the network to learn really complex relationships.
> **Exercise:** Complete the `__init__`, `forward`, and `init_hidden` functions for the SentimentRNN model class.
Note: `init_hidden` should initialize the hidden and cell state of an lstm layer to all zeros, and move those state to GPU, if available.
```
# First checking if GPU is available
train_on_gpu=torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
import torch.nn as nn
class SentimentRNN(nn.Module):
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
"""
Initialize the model by setting up the layers.
"""
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(0.3)
# linear and sigmoid layers
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
# stack up lstm outputs
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully-connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
```
## Instantiate the network
Here, we'll instantiate the network. First up, defining the hyperparameters.
* `vocab_size`: Size of our vocabulary or the range of values for our input, word tokens.
* `output_size`: Size of our desired output; the number of class scores we want to output (pos/neg).
* `embedding_dim`: Number of columns in the embedding lookup table; size of our embeddings.
* `hidden_dim`: Number of units in the hidden layers of our LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.
* `n_layers`: Number of LSTM layers in the network. Typically between 1-3
> **Exercise:** Define the model hyperparameters.
```
# Instantiate the model w/ hyperparams
vocab_size = len(vocab_to_int)+1 # +1 for the 0 padding + our word tokens
output_size = 1
embedding_dim = 400
hidden_dim = 256
n_layers = 2
net = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
print(net)
```
---
## Training
Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. You can also add code to save a model by name.
>We'll also be using a new kind of cross entropy loss, which is designed to work with a single Sigmoid output. [BCELoss](https://pytorch.org/docs/stable/nn.html#bceloss), or **Binary Cross Entropy Loss**, applies cross entropy loss to a single value between 0 and 1.
We also have some data and training hyparameters:
* `lr`: Learning rate for our optimizer.
* `epochs`: Number of times to iterate through the training dataset.
* `clip`: The maximum gradient value to clip at (to prevent exploding gradients).
```
# loss and optimization functions
lr=0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# training params
epochs = 4 # 3-4 is approx where I noticed the validation loss stop decreasing
counter = 0
print_every = 100
clip=5 # gradient clipping
# move model to GPU, if available
if(train_on_gpu):
net.cuda()
net.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
# batch loop
for inputs, labels in train_loader:
counter += 1
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for inputs, labels in valid_loader:
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output.squeeze(), labels.float())
val_losses.append(val_loss.item())
net.train()
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.6f}...".format(loss.item()),
"Val Loss: {:.6f}".format(np.mean(val_losses)))
```
---
## Testing
There are a few ways to test your network.
* **Test data performance:** First, we'll see how our trained model performs on all of our defined test_data, above. We'll calculate the average loss and accuracy over the test data.
* **Inference on user-generated data:** Second, we'll see if we can input just one example review at a time (without a label), and see what the trained model predicts. Looking at new, user input data like this, and predicting an output label, is called **inference**.
```
# Get test data loss and accuracy
test_losses = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size)
net.eval()
# iterate over test data
for inputs, labels in test_loader:
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# get predicted outputs
output, h = net(inputs, h)
# calculate loss
test_loss = criterion(output.squeeze(), labels.float())
test_losses.append(test_loss.item())
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze()) # rounds to the nearest integer
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
# -- stats! -- ##
# avg test loss
print("Test loss: {:.3f}".format(np.mean(test_losses)))
# accuracy over all test data
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc))
```
### Inference on a test review
You can change this test_review to any text that you want. Read it and think: is it pos or neg? Then see if your model predicts correctly!
> **Exercise:** Write a `predict` function that takes in a trained net, a plain text_review, and a sequence length, and prints out a custom statement for a positive or negative review!
* You can use any functions that you've already defined or define any helper functions you want to complete `predict`, but it should just take in a trained net, a text review, and a sequence length.
```
# negative test review
test_review_neg = 'The worst movie I have seen; acting was terrible and I want my money back. This movie had bad acting and the dialogue was slow.'
def predict(net, test_review, sequence_length=200):
net.eval()
# tokenize review
test_ints = tokenize_review(test_review)
# pad tokenized sequence
seq_length=sequence_length
features = pad_features(test_ints, seq_length)
# convert to tensor to pass into your model
feature_tensor = torch.from_numpy(features)
batch_size = feature_tensor.size(0)
# initialize hidden state
h = net.init_hidden(batch_size)
if(train_on_gpu):
feature_tensor = feature_tensor.cuda()
# get the output from the model
output, h = net(feature_tensor, h)
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze())
# printing output value, before rounding
print('Prediction value, pre-rounding: {:.6f}'.format(output.item()))
# print custom response
if(pred.item()==1):
print("Positive review detected!")
else:
print("Negative review detected.")
# positive test review
test_review_pos = 'This movie had the best acting and the dialogue was so good. I loved it.'
# call function
# try negative and positive reviews!
seq_length=200
predict(net, test_review_neg, seq_length)
```
### Try out test_reviews of your own!
Now that you have a trained model and a predict function, you can pass in _any_ kind of text and this model will predict whether the text has a positive or negative sentiment. Push this model to its limits and try to find what words it associates with positive or negative.
Later, you'll learn how to deploy a model like this to a production environment so that it can respond to any kind of user data put into a web app!
| github_jupyter |
Input and Variables
===================
Now I feel it is time for a really complicated program. Here it is:
```
print("Halt!")
s = input("Who Goes there? ")
print("You may pass,", s)
```
When **I** ran it here is what **my** screen showed:
Of course when you run the program your screen will look different
because of the `input` statement. When you ran the program
you probably noticed (you did run the program, right?) how you had to
type in your name and then press Enter. Then the program printed out
some more text and also your name. This is an example of input. The
program reaches a certain point and then waits for the user to input
some data that the program can use later.
Of course, getting information from the user would be useless if we didn't have anywhere to put that information and this is where variables come in. In the previous program s is a variable. Variables are like a box that can store some piece of data. Here is a program to show examples of variables:
```
a = 123.4
b23 = 'Spam'
first_name = "Bill"
b = 432
c = a + b
print("a + b is", c)
print("first_name is", first_name)
print("Sorted Parts, After Midnight or", b23)
```
And here is the output:
Variables store data. The variables in the above program are a, b23, `first_name`, b, and c. The two basic types are strings and numbers. Strings are a sequence of letters, numbers and other characters. In this example b23 and `first_name` are variables that are storing strings. Spam, Bill, a + b is, and `first_name is` are the strings in this program. The characters are surrounded by " or '. The other type of variables are numbers.
Okay, so we have these boxes called variables and also data that can go into the variable. The computer will see a line like `first_name = "Bill"` and it reads it as Put the string Bill into the box (or variable) `first_name`. Later on it sees the statement c = a + b and it reads it as Put a + b or 123.4 + 432 or 555.4 into c.
Here is another example of variable usage:
```
a = 1
print(a)
a = a + 1
print(a)
a = a * 2
print(a)
```
And of course here is the output:
Even if it is the same variable on both sides the computer still reads it as: First find out the data to store and than find out where the data goes.
One more program before I end this chapter:
```
num = float(input("Type in a Number: "))
str = input("Type in a String: ")
print("num =", num)
print("num is a ", type(num))
print("num * 2 =", num*2)
print("str =", str)
print("str is a ", type(str))
print("str * 2 =", str*2)
```
The output I got was:
Notice that `num` was gotten with `float(input(...))` while `str` was gotten with `input`. `input` returns a string and the function float converts it to a floating point number. There is also a function `int` that converts a string or a floating point number into an integer.
The second half of the program uses type which tells what a
variable is. Numbers are of type int or
float (which are short for _integer_ and _floating point_
respectively). Strings are of type string. Integers and floats
can be worked on by mathematical functions, strings cannot. Notice
how when python multiples a number by a integer the expected thing
happens. However when a string is multiplied by a integer the string
has that many copies of it repeated: for example `str * 2 = "HelloHello"`.
The operations with strings do slightly different things than
operations with numbers. Here are some interative mode examples
to show that some more.
```
>>> "This"+" "+"is"+" joined."
'This is joined.'
>>> "Ha, "*5
'Ha, Ha, Ha, Ha, Ha, '
>>> "Ha, "*5+"ha!"
'Ha, Ha, Ha, Ha, Ha, ha!'
>>>
```
Here is the list of some string operations:
| | | | | | | |
| --- | --- | --- | --- | --- | --- | --- |
| Operation | Symbol | Example | | | | |
| Repetition | `*` | `"i"*5 == "iiiii"` | | | | |
| Concatenation | `+` | `"Hello, "+"World!" == "Hello, World!"` | | | | |
Examples
========
Rate\_times.py
```
#This programs calculates rate and distance problems
print("Input a rate and a distance")
rate = float(input("Rate:"))
distance = float(input("Distance:"))
print("Time:", distance/rate)
```
Sample runs:
Area.py
```
#This program calculates the perimeter and area of a rectangle
print("Calculate information about a rectangle")
length = float(input("Length:"))
width = float(input("Width:"))
print("Area", length*width)
print("Perimeter", 2*length+2*width)
```
Sample runs:
temperature.py
```
#Converts Fahrenheit to Celsius
temp = float(input("Farenheit temperature:"))
print((temp-32.0)*5.0/9.0)
```
Sample runs:
Exercises
=========
Write a program that gets 2 string variables and 2 integer variables
from the user, concatenates (joins them together with no spaces) and
displays the strings, then multiplies the two numbers on a new line.
| github_jupyter |
# Welly and LAS files
Some preliminaries...
```
import numpy as np
import matplotlib.pyplot as plt
import welly
welly.__version__
import lasio
lasio.__version__
```
## Load a well from LAS
Use the `from_las()` method to load a well by passing a filename as a `str`.
This is really just a wrapper for `lasio` but instantiates a `Header`, `Curve`s, etc.
```
from welly import Well
w = Well.from_las('data/P-129_out.LAS')
```
## Save LAS file
We can write out to LAS with a simple command, passing the file name you want:
```
w.to_las('data/out.las')
```
Let's just check we get the same thing out of that file as we put in:
```
w.plot()
z = Well.from_las('data/out.las')
z.plot()
z.data['CALI'].plot()
```
We don't get the striplog back (right hand side), but everything else looks good.
## Header
Maybe should be called 'meta' as it's not really a header...
```
w.header
w.header.name
w.uwi
```
What?? OK, we need to load this file more carefully...
## Coping with messy LAS
Some file headers are a disgrace:
# LAS format log file from PETREL
# Project units are specified as depth units
#==================================================================
~Version information
VERS. 2.0:
WRAP. YES:
#==================================================================
~WELL INFORMATION
#MNEM.UNIT DATA DESCRIPTION
#---- ------ -------------- -----------------------------
STRT .M 1.0668 :START DEPTH
STOP .M 1939.13760 :STOP DEPTH
STEP .M 0.15240 :STEP
NULL . -999.25 :NULL VALUE
COMP . Elmworth Energy Corporation :COMPANY
WELL . Kennetcook #2 :WELL
FLD . Windsor Block :FIELD
LOC . Lat = 45* 12' 34.237" N :LOCATION
PROV . Nova Scotia :PROVINCE
UWI. Long = 63* 45'24.460 W :UNIQUE WELL ID
LIC . P-129 :LICENSE NUMBER
CTRY . CA :COUNTRY (WWW code)
DATE. 10-Oct-2007 :LOG DATE {DD-MMM-YYYY}
SRVC . Schlumberger :SERVICE COMPANY
LATI .DEG :LATITUDE
LONG .DEG :LONGITUDE
GDAT . :GeoDetic Datum
SECT . 45.20 Deg N :Section
RANG . PD 176 :Range
TOWN . 63.75 Deg W :Township
```
import welly
import re
def transform_ll(text):
def callback(match):
d = match.group(1).strip()
m = match.group(2).strip()
s = match.group(3).strip()
c = match.group(4).strip()
if c.lower() in ('w', 's') and d[0] != '-':
d = '-' + d
return ' '.join([d, m, s])
pattern = re.compile(r""".+?([-0-9]+?).? ?([0-9]+?).? ?([\.0-9]+?).? +?([NESW])""", re.I)
text = pattern.sub(callback, text)
return welly.utils.dms2dd([float(i) for i in text.split()])
print(transform_ll("""Lat = 45* 12' 34.237" N"""))
print(transform_ll("""Long = 63* 45'24.460 W"""))
remap = {
'LATI': 'LOC', # Use LOC for the parameter LATI.
'LONG': 'UWI', # Use UWI for the parameter LONG.
'SECT': None, # Use nothing for the parameter SECT.
'RANG': None, # Use nothing for the parameter RANG.
'TOWN': None, # Use nothing for the parameter TOWN.
}
funcs = {
'LATI': transform_ll, # Pass LATI through this function before loading.
'LONG': transform_ll, # Pass LONG through it too.
'UWI': lambda x: "No name, oh no!"
}
w = Well.from_las('data/P-129_out.LAS', remap=remap, funcs=funcs)
w.location
w.location.crs # Should be empty.
w.uwi
```
---
© 2022 Agile Scientific, CC BY
| github_jupyter |
```
from sympy import *
from IPython.display import display
```
### Path length of LoS in galactic ellipsoid
We want to calculate the path length of an LoS that intersects an ellipsoid with two identical axes, $1 = \frac{x^2}{a^2} + \frac{y^2}{b^2} + \frac{z^2}{b^2}$, which represents an axisymmetric galaxy.
The LoS is determined by an inclination angle $\theta$ w.r.t. the individual axis and two offset parameters y and z from the center of the galaxy. While z moves perpendiclar to the axis of inclination, the y parameter moves along one of the identical axes. Hence, y defines a plane parallel to x-z, whose intersection with the ellipsoid defines an ellipse which is intersected by the LoS. We first determine the axes of this ellipse and then obtain the path-length of the intersecting LoS within the ellipse.
The y coordinate on the ellipsoid is determined by the offset parameter, hence the ellipse equation can be rewritten as $1 = \frac{x^2}{a^2 \left( 1 - \frac{y^2}{b^2} \right) } + \frac{z^2}{b^2\left( 1 - \frac{y^2}{b^2} \right)}$, determining the axes of the ellipse to be $a_{new} = a \sqrt{\left( 1 - \frac{y^2}{b^2} \right)}$ and $b_{new} = b \sqrt{\left( 1 - \frac{y^2}{b^2} \right)}$
By parametrizing the ellipse $r(\Theta)$ and the LoS $L(t|\theta)$, we determine their intersections and calculate the length of the vector between the two solutions, $|| r(\Theta_1) - r(\Theta_2) ||$.
```
Th, th, t, z, a, b = symbols("Theta theta t z a b")
r = [ b*sin(Th), a*cos(Th) ]
L = [ t*sin(th) + z*cos(th), t*cos(th) - z*sin(th) ]
## find solutions for Th or t, where r = L
t1=solve( r[0]-L[0], t)[0]
t2=solve( r[1]-L[1], t)[0]
display(t1,t2)
#solve(t1-t2,Th)
## ... takes aeons ...
```
works faster when reduced by hand to the following, where $a=a ~\sin(\theta)$ and $b=b ~\cos(\theta)$ (use trigonometric identity with z )
```
sol = solve( a*cos(Th) - b*sin(Th) + z, Th )
print("solutions for Theta")
for s in sol:
display(s.subs(b,b*cos(th)).subs(a,a*sin(th)))
print(latex(s.subs(b,b*cos(th)).subs(a,a*sin(th))))
Th1, Th2 = symbols('Theta_1 Theta_2')
rr = [[0,0],[0,0]]
#for i, s in enumerate(sol):
# ss = s.subs(b,b*cos(th)).subs(a,a*sin(th))
for i, ss in enumerate([Th1,Th2]):
rr[i][0] = r[0].subs(Th, ss)
rr[i][1] = r[1].subs(Th, ss)
print( "path length of LoS within ellipse" )
solution = sqrt( (rr[0][0] - rr[1][0] )**2 + (rr[0][1] - rr[1][1] )**2 )
display(simplify(solution))
print(latex(simplify(solution)))
## unluckily, cannot be simplified further..
```
for the final solution, all $a$ and $b$ have to be replaced by the expressions derived in the very beginning,
$a_{new} = a \sqrt{\left( 1 - \frac{y^2}{b^2} \right)}$ and $b_{new} = b \sqrt{\left( 1 - \frac{y^2}{b^2} \right)}$.
```
## test results !!! z has to be small enough to be within the ellipse, otherwise solution is complex
## !!! doesnt work if both exact theta=0 and z=0
solution_ = solution.subs(Th1,sol[0].subs(b,b*cos(th)).subs(a,a*sin(th))).subs(Th2,sol[1].subs(b,b*cos(th)).subs(a,a*sin(th)))
solution_.subs(th,0).subs(a,1).subs(b,40).subs(z,0.01)
```
Here we provide the implementation to be used in the Monte-Carlo simulation performed by Luiz F. S. Rodrigues.
```
def PathLength( inclination=0, z_off=0., y_off=0, major_axis=40, minor_axis=1 ): ## inclination in rad, rest in kpc
a = minor_axis * np.sqrt(1 - (y_off/major_axis)**2 )
b = major_axis * np.sqrt(1 - (y_off/major_axis)**2 )
t = inclination
theta_1 = -2*np.arctan2( b*np.cos(t) - np.sqrt( (a*np.sin(t))**2 + (b*np.cos(t))**2 - z_off**2 ) , a*np.sin(t) - z_off )
theta_2 = -2*np.arctan2( b*np.cos(t) + np.sqrt( (a*np.sin(t))**2 + (b*np.cos(t))**2 - z_off**2 ) , a*np.sin(t) - z_off )
length = np.sqrt( a**2 * ( np.cos(theta_1) - np.cos(theta_2) )**2 + b**2 * (np.sin(theta_1) - np.sin(theta_2))**2 )
return length
def LoSHostProbability( inclination=0, z_off=0., y_off=0, major_axis=40, minor_axis=1 ): ## inclination in rad, rest in kpc
return PathLength( inclination=inclination, z_off=z_off, y_off=y_off, major_axis=major_axis, minor_axis=minor_axis ) / (2*major_axis)
```
check results
```
import matplotlib.pyplot as plt, numpy as np
thetas = np.linspace(0.0001, np.pi, 91)
sols = LoSHostProbability( inclination=thetas, z_off=0.001, y_off=0., major_axis=40, minor_axis=1 )
plt.plot(thetas, sols)
plt.hlines([1./40,1], 0., 3.14)
```
valid. The maximum is reached for $\theta = \frac{\pi}{2}$, i. e. an edge on galaxy, while for $\theta \leftarrow 0$ or $\pi$, i. e. face on galaxy, the minimum is approached fast, according to expectation.
| github_jupyter |
# Candlestick Harami Cross
https://www.investopedia.com/terms/h/haramicross.asp
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import talib
import warnings
warnings.filterwarnings("ignore")
# yahoo finance is used to fetch data
import yfinance as yf
yf.pdr_override()
# input
symbol = 'AMD'
start = '2018-01-01'
end = '2021-10-08'
# Read data
df = yf.download(symbol,start,end)
# View Columns
df.head()
```
## Candlestick with Harami Cross
```
from matplotlib import dates as mdates
import datetime as dt
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = pd.to_datetime(dfc['Date'])
dfc['Date'] = dfc['Date'].apply(mdates.date2num)
dfc.head()
from mplfinance.original_flavor import candlestick_ohlc
fig = plt.figure(figsize=(14,10))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax.grid(True, which='both')
ax.minorticks_on()
axv = ax.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
harami_cross = talib.CDLHARAMICROSS(df['Open'], df['High'], df['Low'], df['Close'])
harami_cross = harami_cross[harami_cross != 0]
df['harami_cross'] = talib.CDLHARAMICROSS(df['Open'], df['High'], df['Low'], df['Close'])
df.loc[df['harami_cross'] !=0]
df['Adj Close'].loc[df['harami_cross'] !=0]
df['Adj Close'].loc[df['harami_cross'] !=0].index
harami_cross
harami_cross.index
df
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax.grid(True, which='both')
ax.minorticks_on()
axv = ax.twinx()
ax.plot_date(df['Adj Close'].loc[df['harami_cross'] !=0].index, df['Adj Close'].loc[df['harami_cross'] !=0],
'Dc', # marker style 'o', color 'g'
fillstyle='none', # circle is not filled (with color)
ms=10.0)
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
## Plot Certain dates
```
df = df['2020-12-01':'2020-12-31']
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = pd.to_datetime(dfc['Date'])
dfc['Date'] = dfc['Date'].apply(mdates.date2num)
dfc.head()
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
ax.set_facecolor('gold')
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='darkgoldenrod', colordown='cornsilk', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
#ax.grid(True, which='both')
#ax.minorticks_on()
axv = ax.twinx()
ax.plot_date(df['Adj Close'].loc[df['harami_cross'] !=0].index, df['Adj Close'].loc[df['harami_cross'] !=0],
'dg', # marker style 'o', color 'g'
fillstyle='none', # circle is not filled (with color)
ms=20.0)
colors = dfc.VolumePositive.map({True: 'darkgoldenrod', False: 'cornsilk'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
# Highlight Candlestick
```
from matplotlib.dates import date2num
from datetime import datetime
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
#ax.grid(True, which='both')
#ax.minorticks_on()
axv = ax.twinx()
ax.axvspan(date2num(datetime(2020,12,10)), date2num(datetime(2020,12,12)),
label="Harami Cross Bullish",color="green", alpha=0.3)
ax.axvspan(date2num(datetime(2020,12,23)), date2num(datetime(2020,12,25)),
label="Harami Cross Bearish",color="red", alpha=0.3)
ax.legend()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
| github_jupyter |
```
%matplotlib inline
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import uqer
from uqer import DataAPI as api
from QuantLib import *
plt.style.use('fivethirtyeight')
uqer.Client(token='f1b9bea1d0b4e489c5ab9b69c3e2326a1bee6057af858067dbd1546453f428b2')
```
## 0. Market Data
-------------------
```
ru1801 = api.MktFutdGet(ticker='RU1801', beginDate='20170825', endDate='20171130', field='tradeDate,closePrice').set_index('tradeDate')
ru1801['closePrice'] = ru1801['closePrice'].astype(float)
ru1801.plot(figsize=(14, 7))
```
## 1. Parameters
------------
```
start_date = Date(31, 8, 2017)
maturity_date = Date(30, 11, 2017)
freq = Period(1, Days)
volatility = 0.27
option_type = Option.Put
spot_price = ru1801.loc['2017-08-31', 'closePrice']
strike_price = np.mean(ru1801.loc['2017-08-25': '2017-08-31', 'closePrice'])
risk_free_rate = 0.
dividend_rate = 0.
day_count = Actual365Fixed()
calendar = China(China.SSE)
payoff = PlainVanillaPayoff(option_type, strike_price)
exercise = EuropeanExercise(maturity_date)
fixing_scheduler = Schedule(start_date, maturity_date, freq, calendar, Following, Following, DateGeneration.Backward, False)
fixing_dates = [d for d in fixing_scheduler][1:]
calculation_date = start_date
Settings.instance().evaluationDate = calculation_date
spot_handle = RelinkableQuoteHandle(SimpleQuote(spot_price))
flat_ts = YieldTermStructureHandle(FlatForward(calculation_date, risk_free_rate, day_count))
dividend_yield = YieldTermStructureHandle(FlatForward(calculation_date, dividend_rate, day_count))
flat_vol_ts = BlackVolTermStructureHandle(BlackConstantVol(calculation_date, calendar, volatility, day_count))
bsm_process = BlackScholesMertonProcess(spot_handle, dividend_yield, flat_ts, flat_vol_ts)
option = DiscreteAveragingAsianOption(Average.Arithmetic, 0., 0, fixing_dates, payoff, exercise)
```
## 2. FDM Arithmetic Pricing Engine
------------
```
engine = FdBlackScholesAsianEngine(bsm_process, 200, 100, 100)
option.setPricingEngine(engine)
fdm_price = option.NPV()
fdm_delta = option.delta()
print("BSM ASIAN FDM price is {0:.4f}".format(fdm_price))
print("BSM ASIAN FDM delta is {0:.4f}".format(fdm_delta))
```
### 3. MC Arithmetic Pricing Engine
------------------------
```
engine = MCDiscreteArithmeticAPEngine(bsm_process, 'ld', controlVariate=True, requiredSamples=50000)
%%time
option.setPricingEngine(engine)
mc_price = option.NPV()
price_delta = 0.001 * spot_price
new_quote1 = SimpleQuote(spot_price + price_delta)
new_quote2 = SimpleQuote(spot_price - price_delta)
spot_handle.linkTo(new_quote1)
price_bump_plus = option.NPV()
spot_handle.linkTo(new_quote2)
price_bump_minus = option.NPV()
mc_delta = (price_bump_plus - price_bump_minus) / 2. / price_delta
print("BSM ASIAN MC price is {0:.4f}".format(mc_price))
print("BSM ASIAN MC delta is {0:.4f}".format(mc_delta))
```
## 3. Time Series NPV / Delta Analysis
----------------------
### 3.1 FDM Method
```
%%time
monitor_dates = [start_date]
fdm_npvs = [fdm_price]
fdm_deltas = [fdm_delta]
for i, date in enumerate(fixing_dates[:-1]):
past_fixings = i + 1
running_accumulator = np.sum(ru1801.loc[fixing_dates[0].ISO():date.ISO(), 'closePrice'])
# Model settings
calculation_date = date
Settings.instance().evaluationDate = calculation_date
spot_handle = RelinkableQuoteHandle(SimpleQuote(ru1801.loc[date.ISO(), 'closePrice']))
flat_ts = YieldTermStructureHandle(FlatForward(calculation_date, risk_free_rate, day_count))
dividend_yield = YieldTermStructureHandle(FlatForward(calculation_date, dividend_rate, day_count))
flat_vol_ts = BlackVolTermStructureHandle(BlackConstantVol(calculation_date, calendar, volatility, day_count))
bsm_process = BlackScholesMertonProcess(spot_handle, dividend_yield, flat_ts, flat_vol_ts)
option = DiscreteAveragingAsianOption(Average.Arithmetic, running_accumulator, past_fixings, fixing_dates[i+1:], payoff, exercise)
engine = FdBlackScholesAsianEngine(bsm_process, 200, 100, 100)
option.setPricingEngine(engine)
monitor_dates.append(date)
fdm_npvs.append(option.NPV())
fdm_deltas.append(option.delta())
```
### 3.2 MC Method
```
%%time
mc_npvs = [mc_price]
mc_deltas = [mc_delta]
for i, date in enumerate(fixing_dates[:-1]):
past_fixings = i + 1
running_accumulator = np.sum(ru1801.loc[fixing_dates[0].ISO():date.ISO(), 'closePrice'])
# Model settings
calculation_date = date
Settings.instance().evaluationDate = calculation_date
spot_handle = RelinkableQuoteHandle(SimpleQuote(ru1801.loc[date.ISO(), 'closePrice']))
flat_ts = YieldTermStructureHandle(FlatForward(calculation_date, risk_free_rate, day_count))
dividend_yield = YieldTermStructureHandle(FlatForward(calculation_date, dividend_rate, day_count))
flat_vol_ts = BlackVolTermStructureHandle(BlackConstantVol(calculation_date, calendar, volatility, day_count))
bsm_process = BlackScholesMertonProcess(spot_handle, dividend_yield, flat_ts, flat_vol_ts)
option = DiscreteAveragingAsianOption(Average.Arithmetic, running_accumulator, past_fixings, fixing_dates[i+1:], payoff, exercise)
engine = MCDiscreteArithmeticAPEngine(bsm_process, 'ld', controlVariate=True, requiredSamples=50000)
option.setPricingEngine(engine)
option.setPricingEngine(engine)
mc_npvs.append(option.NPV())
price_delta = 0.001 * spot_handle.value()
new_quote1 = SimpleQuote(spot_handle.value() + price_delta)
new_quote2 = SimpleQuote(spot_handle.value() - price_delta)
spot_handle.linkTo(new_quote1)
price_bump_plus = option.NPV()
spot_handle.linkTo(new_quote2)
price_bump_minus = option.NPV()
mc_delta = (price_bump_plus - price_bump_minus) / 2. / price_delta
mc_deltas.append(mc_delta)
```
### 3.3 Graph
```
dates_series = [d.to_date() for d in monitor_dates]
delta_diff = np.array(fdm_deltas) - np.array(mc_deltas)
df = pd.DataFrame({'FDM NPV': np.array(fdm_npvs),
'MC NPV': np.array(mc_npvs),
'FDM $\Delta$': fdm_deltas,
'MC $\Delta$': mc_deltas},
index=dates_series)
df.plot(figsize=(14, 7), secondary_y=['FDM NPV','MC NPV'], title='RU1801 Asian Option NPV & $\Delta$: 2017-09-01 ~ 2017-11-30')
```
| github_jupyter |
### How to correlate between IF Ab and transcriptome gene signals?
#### This script recreates figure Fig2C in SM-Omics: An automated platform for high-throughput spatial multi-omics; doi: https://doi.org/10.1101/2020.10.14.338418
Expected run time: 1-2 mins \
Expected results: gene-to-protein correlation plot, spatial gene and protein expression plot
Load libraries
```
import os
import re
import glob
import matplotlib
import matplotlib.pyplot as plt
import pickle
import pandas as pd
from sklearn.preprocessing import minmax_scale
from scipy.stats import linregress, spearmanr
import numpy as np
import math
import seaborn as sns
%matplotlib inline
matplotlib.rcParams.update({'axes.linewidth': 0.25,
'xtick.major.size': 2,
'xtick.major.width': 0.25,
'ytick.major.size': 2,
'ytick.major.width': 0.25,
'pdf.fonttype': 42,
'font.sans-serif': 'Arial',
'ps.fonttype':42})
# raw data files needed to run are available at SCP: https://singlecell.broadinstitute.org/single_cell/study/SCP979/
# please download: lambdas_pmeans_genes_NeuN.tsv,lambdas_pmeans_ifs_NeuN.tsv, coordinate_registration_NeuN.p
# also download annotation files: 10015CN78*annotations.txt
```
# mRNA
```
# Load Lambda pmean df
# Read file
path = '../../smomics_data'
filename = os.path.join(path, 'lambdas_pmeans_genes_NeuN.tsv')
lambda_posterior_means = pd.read_csv(filename, index_col=0, sep='\t', header=[0,1])
# Change names on genes, ie remove '_'
nwe=[]
nm=lambda_posterior_means.index
for item in nm:
nwe.append(str(item).split("_")[0])
lambda_posterior_means.index = nwe
# Take exp()
lambda_posterior_means = lambda_posterior_means.astype(float)
lambda_posterior_means = np.exp(lambda_posterior_means-1)
# Select NeuN gene ie. Rbfox3
pd.set_option('display.max_colwidth', -1)
gene = 'Rbfox3'
# reset index
lambda_gene = lambda_posterior_means.loc[gene, :].to_frame().reset_index()
# Get name of file, for plotting
lambda_gene['name'] = lambda_gene['file'].str.split('/').str[-1].str.split('_stdata').str[0]
# Scales within each sample
gene_scale = lambda_gene.copy()
gene_scale[['Rbfox3.all']] = gene_scale[['Rbfox3']].transform(lambda x: minmax_scale(x))
gene_scale['Rbfox3.scaled'] = gene_scale.groupby('name')['Rbfox3.all'].transform(lambda x: minmax_scale(x))
```
# Ab
```
# Load Lambda pmean df
# Read file
filename = os.path.join(path, 'lambdas_pmeans_ifs_NeuN.tsv')
lambda_posterior_means = pd.read_csv(filename, index_col=0, sep='\t', header=[0,1])
# Change names on genes, ie remove '_'
nwe=[]
nm=lambda_posterior_means.index
for item in nm:
nwe.append(str(item).split("_")[0])
lambda_posterior_means.index = nwe
# Take exp()
lambda_posterior_means = lambda_posterior_means.astype(float)
lambda_posterior_means = np.exp(lambda_posterior_means-1)
# Select Neun Gene name
pd.set_option('display.max_colwidth', -1)
gene = 'Neun'
# reset index
lambda_ab = lambda_posterior_means.loc[gene, :].to_frame().reset_index()
# Get name of file, for plotting
lambda_ab['name'] = lambda_ab['file'].str.split('/').str[-1].str.split('_stdata').str[0]
# Scales within each sample
abfile_scale = lambda_ab.copy()
abfile_scale[['Neun.all']] = abfile_scale[['Neun']].transform(lambda x: minmax_scale(x))
abfile_scale['NeuN.scaled'] = abfile_scale.groupby('name')['Neun.all'].transform(lambda x: minmax_scale(x))
```
# Annotation
```
annot_path = path
annot_list = []
for stdata in list(set(abfile_scale['name'].tolist())):
well = stdata.split('_stdata')[0]
filename = os.path.join(annot_path, well + '_annotations.txt')
annot_file = pd.read_csv(filename, sep = '\t')
# Remove 'Unknown'
#file = file[file['value'] != 'Unknown']
# Change to int if spot coord are in decimals
if len(annot_file['x_y'][0].split('.')) == 3: # Spot coord in decimals
# Detect which delimiter separates the spot coord
if len(annot_file['x_y'][0].split('_')) == 2:
delimi = '_'
elif len(annot_file['x_y'][0].split('x')) == 2:
delimi = 'x'
else:
print('Unknown spot coordinate delimiter.')
# Round up
annot_file['x'] = annot_file['x'].round(0)
annot_file['y'] = annot_file['y'].round(0)
# Make columns to int type
annot_file['x'] = annot_file['x'].astype(int)
annot_file['y'] = annot_file['y'].astype(int)
# Make columns to string type
annot_file['x'] = annot_file['x'].astype(str)
annot_file['y'] = annot_file['y'].astype(str)
# Make a new columnwith the rounded values
annot_file['spot_coord'] = annot_file[['x', 'y']].apply(lambda z: '_'.join(z), axis=1)
annot_file.drop(['x_y'], axis=1, inplace=True)
annot_file.rename(columns = {'spot_coord':'x_y'}, inplace=True)
# Keep certain columns:
annot_file = annot_file.loc[:, ['image', 'x_y', 'value']]
annot_list.append(annot_file)
# Concat
annotdf = pd.concat(annot_list)
```
# Merge gene + ab + annotation
```
lambdadfTMP = pd.merge(gene_scale, abfile_scale, how='left', on=['name','coordinate'])
lambdadf = pd.merge(lambdadfTMP, annotdf, how='left', left_on=['name','coordinate'], right_on=['image', 'x_y'])
# Take mean per region per sample
trans = lambdadf.groupby(['name', 'value'])['Neun', 'Rbfox3'].mean()
trans.reset_index(inplace=True)
# Scale min max per sample
trans[['NeuN.norm', 'Rbfox3.norm']] = trans.groupby(['name'])['Neun', 'Rbfox3'].transform(lambda x: minmax_scale(x))
trans = trans.dropna(axis=0, how='any')
trans.reset_index(drop=True)
# selectes 3 samples
trans = trans[~trans.name.isin(['10015CN78_C2'])]
# Plot
# initialize a figure
fig, ax = plt.subplots(figsize=(8, 8))
x = 'Rbfox3.norm'
y = 'NeuN.norm'
hue = 'value'
style = 'name'
sns.scatterplot(x=x, y=y, data=trans, ax=ax, hue=hue, s=50, linewidth=0)
# Calculate the linear regression
slope, intercept, r_value, p_value, std_err = linregress(trans[x], trans[y])
coef = np.polyfit(trans[x],trans[y],1)
poly1d_fn = np.poly1d(coef)
ax.plot(trans[x], poly1d_fn(trans[x]), c="k")
ax.plot(trans[x], poly1d_fn(trans[x])+1*std_err, "--", c="cyan",)
ax.plot(trans[x], poly1d_fn(trans[x])-1*std_err, "--",c="cyan", )
ax.plot(trans[x], poly1d_fn(trans[x])+2*std_err, "--",c="grey", )
ax.plot(trans[x], poly1d_fn(trans[x])-2*std_err, "--",c="grey", )
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
# Spearman's & Pearson's r
spear, pspear = spearmanr(trans[x], trans[y])
print("Spearman's r: " + str(round(spear, 2)))
print("Spearman's p value: " + str(pspear))
ax.text(0.05, 0.9, "Spearman's r: " + str(round(spear, 2)), transform=ax.transAxes, fontsize=15)
ax.text(0.05, 0.8, "p-value: " + str(round(pspear, 8)), transform=ax.transAxes, fontsize=15)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# for i, txt in enumerate(trans['name'].to_list()):
# ax.annotate(txt, (trans[x][int(i)]+0.005, trans[y][int(i)]+0.005))
plt.tight_layout()
plt.savefig("../../figures/Neun_IFvsGENE_Neun.pdf", transparent=True, bbox_inches = 'tight',
pad_inches = 0, dpi=1200, rasterized=True)
# Take mean per region per sample
trans = lambdadf.groupby(['name', 'value'])['Neun', 'Rbfox3'].mean()
trans.reset_index(inplace=True)
# Scale min max per sample
#trans[['NeuN.norm', 'Rbfox3.norm']] = trans.groupby(['name'])['Neun', 'Rbfox3'].transform(lambda x: minmax_scale(x))
trans = trans.dropna(axis=0, how='any')
trans.reset_index(drop=True)
# selectes 3 samples
trans = trans[~trans.name.isin(['10015CN78_C2'])]
100*trans.groupby('value').std()/trans.groupby('value').mean()
```
# Plotting overlaps
```
# Load registration file
reg_file = os.path.join(path, 'coordinate_registration_NeuN.p')
registered_coordinates_dict = pickle.load(open(reg_file,'rb'), encoding='bytes')
abfile_scale
#Plotting overlapping ab
# Path can be found in registration coordinate file
coord_path = os.path.dirname(list(registered_coordinates_dict)[0])
variables_of_interest = ['M1']
# use the same color scale across the arrays
#vmin,vmax = np.percentile(abfile_scale['Neun.all'].values,0.00),np.percentile(abfile_scale['Neun'].values,10)
vmin = 0.00
vmax = 0.25
# initialize a figure
fig = matplotlib.pyplot.figure(figsize=(30, 10))
# initialize axes
ax = []
for row_idx in range(0,1):
for col_idx in range(0,1):
axes = plt.subplot2grid((1, 1), (row_idx, col_idx))
ax.append(axes)
# adjust the axes
for ax_idx in range(0,len(ax)):
ax[ax_idx].set_aspect('equal')
ax[ax_idx].set_xticks([])
ax[ax_idx].set_yticks([])
ax[ax_idx].set_xlim([-18,18])
ax[ax_idx].set_ylim([-18,18])
cbmap = [None for _ in range(0,len(ax))]
# loop over the count files
for sample, group in abfile_scale.groupby('name'):
count_file = os.path.join(coord_path, sample + '_stdata_adjusted.tsv')
# for each ST spot on the array, let us get its registered coordinate if available
coordinates_found = np.zeros(len(group['coordinate'].tolist())) # indicator of being registered
coordinates = []
for coord_idx,coordinate in enumerate(group['coordinate'].tolist()):
if coordinate in registered_coordinates_dict[count_file]:
coordinates_found[coord_idx] = 1
coordinates.append(list(map(float,registered_coordinates_dict[count_file][coordinate].split('_'))))
coordinates = np.array(coordinates)
x_coordinates = coordinates[:,0]
y_coordinates = coordinates[:,1]
# rotate
alpha = 160
x_coor_rot = math.cos(math.radians(alpha))* x_coordinates-math.sin(math.radians(alpha))*y_coordinates
y_coor_rot = math.sin(math.radians(alpha))* x_coordinates+math.cos(math.radians(alpha))*y_coordinates
# visualize the registered ST spots
cbmap[ax_idx] = ax[ax_idx].scatter(x_coor_rot,y_coor_rot,
s=150, c=group['Neun.all'][coordinates_found > 0],
cmap='magma',vmin=vmin,vmax=vmax,marker='o')
# add colorbars
for ax_idx in range(0,len(ax)):
if cbmap[ax_idx] is not None:
cbar = matplotlib.pyplot.colorbar(cbmap[ax_idx],ax=ax[ax_idx],shrink=0.8)
plt.savefig("../../figures/Neun_IF_overlap.pdf", transparent=True, bbox_inches = 'tight',
pad_inches = 0, dpi=1200, rasterized=True)
#Plotting overlapping ab
# Path can be found in registration coordinate file
coord_path = os.path.dirname(list(registered_coordinates_dict)[0])
variables_of_interest = ['M1']
# use the same color scale across the arrays
vmin,vmax = np.percentile(gene_scale['Rbfox3'].values,0.1),np.percentile(gene_scale['Rbfox3'].values,100)
vmin = 0.00
vmax = 0.25
# initialize a figure
fig = matplotlib.pyplot.figure(figsize=(30, 10))
# initialize axes
ax = []
for row_idx in range(0,1):
for col_idx in range(0,1):
axes = plt.subplot2grid((1, 1), (row_idx, col_idx))
ax.append(axes)
# adjust the axes
for ax_idx in range(0,len(ax)):
ax[ax_idx].set_aspect('equal')
ax[ax_idx].set_xticks([])
ax[ax_idx].set_yticks([])
ax[ax_idx].set_xlim([-18,18])
ax[ax_idx].set_ylim([-18,18])
cbmap = [None for _ in range(0,len(ax))]
# loop over the count files
for sample, group in gene_scale.groupby('name'):
count_file = os.path.join(coord_path, sample + '_stdata_adjusted.tsv')
# for each ST spot on the array, let us get its registered coordinate if available
coordinates_found = np.zeros(len(group['coordinate'].tolist())) # indicator of being registered
coordinates = []
for coord_idx,coordinate in enumerate(group['coordinate'].tolist()):
if coordinate in registered_coordinates_dict[count_file]:
coordinates_found[coord_idx] = 1
coordinates.append(list(map(float,registered_coordinates_dict[count_file][coordinate].split('_'))))
coordinates = np.array(coordinates)
x_coordinates = coordinates[:,0]
y_coordinates = coordinates[:,1]
# rotate
alpha = 160
x_coor_rot = math.cos(math.radians(alpha))* x_coordinates-math.sin(math.radians(alpha))*y_coordinates
y_coor_rot = math.sin(math.radians(alpha))* x_coordinates+math.cos(math.radians(alpha))*y_coordinates
# visualize the registered ST spots
cbmap[ax_idx] = ax[ax_idx].scatter(x_coor_rot,y_coor_rot,
s=150, c=group['Rbfox3.all'][coordinates_found > 0],
cmap='magma',vmin=vmin,vmax=vmax,marker='o')
# add colorbars
for ax_idx in range(0,len(ax)):
if cbmap[ax_idx] is not None:
cbar = matplotlib.pyplot.colorbar(cbmap[ax_idx],ax=ax[ax_idx],shrink=0.8)
plt.savefig("../../figures/Neun_gene_overlap.pdf", transparent=True, bbox_inches = 'tight',
pad_inches = 0, dpi=1200, rasterized=True)
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Predict house prices: regression
This file has moved.
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/basic_regression"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
| github_jupyter |
## Application: Wage and Education
#### Variables:
+ work - participation in 1975 ?
+ hoursw - wife's hours of work in 1975
+ child6 - number of children less than 6 years old in household
+ child618 - number of children between ages 6 and 18 in household
+ agew - wife's age
+ educw - wife's educational attainment, in years
+ hearnw - wife's average hourly earnings, in 1975 dollars
+ wagew - wife's wage reported at the time of the 1976 interview (not= 1975 estimated wage)
+ hoursh - husband's hours worked in 1975
+ ageh - husband's age
+ educh - husband's educational attainment, in years
+ wageh - husband's wage, in 1975 dollars
+ income - family income, in 1975 dollars
+ educwm - wife's mother's educational attainment, in years
+ educwf - wife's father's educational attainment, in years
+ unemprate - unemployment rate in county of residence, in percentage points
+ city - lives in large city (SMSA) ?
+ experience - actual years of wife's previous labor market experience
#### Source:
Mroz - Labor Supply Data
https://vincentarelbundock.github.io/Rdatasets/datasets.html
Mroz, T. (1987) “The sensitivity of an empirical model of married women's hours of work to economic and statistical assumptions”, Econometrica, 55, 765-799.
```
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.api import datasets
import pymc3 as pm
import arviz as az
from IPython.display import display
from numpy.linalg import lstsq
from bokeh.io import show, output_notebook
from bokeh.plotting import figure
output_notebook()
```
#### Data retrieval
`datasets.get_rdataset` retrieves data from `Rdatasets` (https://vincentarelbundock.github.io/Rdatasets/).
```Python
dataset.get_rdataset(dataset_name, package_name)
```
```
mroz = datasets.get_rdataset('Mroz', 'Ecdat')
data = mroz.data
wage = data['wagew'].values
educ = data['educw'].values
y = wage[wage > 0]
x = educ[wage > 0] - 12
ols_b = lstsq(np.vstack((np.ones(x.size), x)).T, y, rcond=None)[0]
display(mroz.data)
x_range = np.array([1.1*np.min(x), 1.1*np.max(x)])
y_range = np.array([ols_b[0] + ols_b[1]*x_range[0], ols_b[0] + ols_b[1]*x_range[1]])
p = figure(plot_width=400, plot_height=300, toolbar_location=None, title='Relationship Between Wage And Years Of Schooling')
p.line(x_range, y_range, line_color='navy', line_width=2, legend_label='Regression line (OLS)')
p.cross(x, y, color='firebrick', size=8, legend_label='Observations')
p.xaxis.axis_label = 'Years of schooling after high school'
p.yaxis.axis_label = 'Hourly wage'
p.legend.location = 'top_left'
p.legend.click_policy = 'hide'
p.legend.border_line_color = p.xgrid.grid_line_color = p.ygrid.grid_line_color = p.outline_line_color = None
show(p)
mu_a = 0.0
mu_b = 0.0
omega_a = 10.0
omega_b = 10.0
nu0 = 0.02
lam0 = 0.02
regresssion_normal_invgamma = pm.Model()
with regresssion_normal_invgamma:
sigma2 = pm.InverseGamma('sigma2', alpha=0.5*nu0, beta=0.5*lam0)
a = pm.Normal('a', mu=mu_a, sigma=omega_a)
b = pm.Normal('b', mu=mu_b, sigma=omega_b)
y_hat = a + b * x
likelihood = pm.Normal('y', mu=y_hat, sigma=pm.math.sqrt(sigma2), observed=y)
n_draws = 5000
n_chains = 4
n_tune = 1000
with regresssion_normal_invgamma:
trace = pm.sample(draws=n_draws, chains=n_chains, tune=n_tune, random_seed=123, return_inferencedata=False)
results = az.summary(trace)
results.index = ['constant', 'education', '$\\sigma^2$']
display(results)
az.plot_trace(trace)
az.plot_posterior(trace, kind='kde', point_estimate='mean')
plt.show()
```
| github_jupyter |
# Reading Hydro echo files into Pandas DataFrame
hydro (and qual) compile the input files and output an echo file that contains all the input data that goes into the model run. This is a very useful file as it is a direct representation of the input as seen by the model. This is especially important as the input system with its layering and overrides and include featues can be quite complex and sometimes it can be hard to see what actually finally make it into the model run.
To create an echo file of the inputs, refer to hydro and qual document. Below is an example
```
hydro -e hydro_main.inp
qual -e qual_main.inp
```
The notebook here shows how to use the pydsm parser to read this echo file into a dictionary of pandas DataFrame objects
```
import pandas as pd
import io
import re
# main import
import pydsm
from pydsm.input import read_input,write_input
```
## Read input into a dictionary of pandas DataFrames
DSM2 input consists of tables that have a name, e.g. CHANNEL. They have named columns and then rows of values for those columns
```
CHANNEL
CHAN_NO LENGTH MANNING DISPERSION UPNODE DOWNNODE
0 1 19500 0.035 360.0 1 2
1 2 14000 0.028 360.0 2 3
...
END
```
The *read_input* method reads the input file, parsing the tables found into data frames and returning a dictionary the keys of which are the names of the tables
Lets see how this looks...
```
fname='../../tests/hydro_echo_historical_v82.inp'
tables=read_input(fname)
```
## Print list of all tables
```
print(list(tables.keys()))
```
## Display the DataFrame for a table, e.g. CHANNEL
```
display(tables['CHANNEL'])
```
It is a data frame so you can query its types, etc just like a pandas DataFrame
```
print(tables['CHANNEL'].dtypes)
```
## Programmatic inspection of input
This is an important feature as pandas has a multitude of features to allow for filtering, describing and joining with other DataFrames and this can be used to analyze the input file
For example, display all channels with length > 20000 feet
```
c=tables['CHANNEL']
print('Channels with length > 20000 ft:')
display(c[c.LENGTH>20000])
```
## Combining input tables
Channels have cross sections but those are defined in the 'XSECT_LAYER' table.
```
x=tables['XSECT_LAYER']
display(x)
```
These can be combined (merged) with the channel table on the 'CHAN_NO' common column to get a larger table with channel and x section information
```
fc=pd.merge(c,x,on='CHAN_NO')
for name, group in fc[fc.CHAN_NO==441].groupby('DIST'):
print('DIST: ',name)
display(group)
```
# Visualizing input data
Furthermore this information can be displayed with the usual pandas and other visualization libraries that are available
```
group=fc[fc.CHAN_NO==441].groupby('DIST')
dist,group=next(iter(group))
group.plot(y='ELEV',x='AREA',kind='line',label='AREA',title='AREA with ELEV')
group.plot(y='ELEV',x='WIDTH',kind='line',label='WIDTH',title='WIDTH with ELEV')
_=group.plot(y='ELEV',x='WET_PERIM',kind='line',label='WET_PERIM',title='WET_PERIM with ELEV')
```
# Writing input to file
Once the tables are manipulated using pandas DataFrame functions, these can be written to a file to be used as input to run DSM2 models
The code below shows
```
write_input('../../tests/hydro_echo_historical_v82_copy.inp',tables)
```
| github_jupyter |
```
import numpy as np
from baseline import baseline
from sample import sample
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.decomposition import NMF
```
Load our large image and sample a smaller chunk of that image:
```
image = np.load('image2.npy')
im = sample(image, 32)
im.shape # can we scroll through all the wavenumbers? that would be so cool
```
Initialize NMF model and flatten image to run NMF:
```
nmf = NMF(n_components = 2, solver = 'cd', tol = 1e-5, max_iter=int(1e6), l1_ratio = 0.2, alpha = 1e-3, random_state = 0, verbose = False)
im2d = np.zeros((im.shape[0]**2, im.shape[2]))
for i in range(im.shape[2]):
im2d[:,i] = im[:,:,i].flatten()
W_mat = nmf.fit_transform(X = im2d + abs(np.min(im2d)))
H_mat = nmf.components_
for i in range(H_mat.shape[0]):
base_i = baseline(H_mat[i,:])
H_mat[i,:] = H_mat[i,:] - base_i
```
Let's look at our resulting coefficient images:
```
coefficients = W_mat.reshape((32,32,2))
fig, ax = plt.subplots(ncols = 2, figsize = (13,6))
ax[0].imshow(coefficients[:,:,0], cmap = 'gray')
ax[0].set_title('DPPDTT Coefficients')
ax[1].imshow(coefficients[:,:,1], cmap = 'gray')
ax[1].set_title('PS Coefficients')
```
Normalize the images to the sum of each other:
```
threshold_mtx = np.zeros_like(coefficients[:,:,0])
dppdtt_co = coefficients[:,:,0]
ps_co = coefficients[:,:,1]
for i in range(threshold_mtx.shape[0]):
for j in range(threshold_mtx.shape[1]):
threshold_mtx[i][j] = dppdtt_co[i][j]/(dppdtt_co[i][j] + ps_co[i][j])
# threshold matrix is relative to dppdtt
plt.imshow(threshold_mtx, cmap = 'gray')
plt.colorbar()
plt.title('Threshold Matrix relative to DPPDTT')
np.save('threshold_mtx', threshold_mtx)
```
Create mask with 50% threshold:
```
mask = np.zeros_like(threshold_mtx)
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if threshold_mtx[i][j] >= 0.5:
mask[i][j] = 1
else:
mask[i][j] = 0
plt.imshow(mask, cmap = 'gray')
plt.title('50% DPPDTT Mask')
```
Display mask, original image, and mask over reconstructed image!
```
fig, ax = plt.subplots(ncols = 3, figsize = (16,5))
ax[0].imshow(im[:,:,1550-750]) # indexed from 0 but wavenumbers range from 750 to 1878
ax[0].set_title('Original Image at 1550 $cm^{-1}$')
ax[1].imshow(mask, cmap = 'gray')
ax[1].set_title('Mask of 50% or Greater DPPDTT Region')
ax[2].imshow(im[:,:,1550-750], cmap = 'gray')
ax[2].imshow(mask, alpha = 0.3)
ax[2].set_title('Mask Overlayed')
value = np.sum(mask) / (mask.shape[0]**2)
value # percent of area that's greater than 50% DPPDTT
```
To do:
- [ ] Create function that creates mask
- [ ] Import as .py files
Muammer - contour plot
Maddie - turn things .py files
Caitlin - region props
Preetham - interactive image/plot
| github_jupyter |
# Example 1.2: Correlation and auto-correlation
This example consists of two parts. The first part will look at correlation and present a procedure to generate realizations of correlated Gaussian random variables in Python. The second part will look into how to make realizations of time series with a given auto-correlation in Python.
## Part 1: Monte Carlo simulation of correlated Gaussian variables.
Consider a vector of random variables $\mathbf{X(t)}$. We consider a case with two variables $X_1$ and $X_2$ that are loads acting on a two-story portal frame. The loads are correlated Gaussian distributed random variables. This means that the loads can be defined by a vector of mean values $m_x$
$$ \bf{m}_{\bf{x}} = \left[ \begin{array}{c} m_{X_1} \\ m_{X_2} \end{array} \right] $$
and a covariance matrix $\mathbf{\sigma^2_x}$
$$ \sigma^2 _{\bf{x}} = \left[ \begin{array}{c c}
{\sigma _{X_1}^2}&{\rho _{X_1X_2}\sigma _{X_1}\sigma _{X_2}}\\
{\rho _{X_2X_1}\sigma _{X_2}\sigma _{X_1}}&{\sigma _{X_2}^2}
\end{array} \right] $$
Here $\sigma _{{X_1}}$ and $\sigma _{{X_1}}$ are the the standard deviation, while $\sigma^2 _{{X_1}}$ and $\sigma^2 _{{X_1}}$ are the the variance of $X_1$ and $X_2$. The correlation coefficient is denoted $\rho _{{X_2}{X_1}}$.
We will now assume that the mean values of $X_1$ and $X_2$ are zero and that the standard deviation of both processes is one and generate data for different correlation coefficients.
```
# import necessary modules and packages
import numpy as np
from matplotlib import pyplot as plt
# Make scatter plot that illustrates linear correlation
mean = np.array([0, 0]) # Mean values
sigma1 = 1.0 # Standard deviation of X1
sigma2 = 1.0 # Standard deviation of X2
rho12 = 0.8 # correlation coefficient between -1 and 1
Nsim = 101 # Number of points
cov = np.array([[sigma1**2, sigma1*sigma2*rho12], [sigma1*sigma2*rho12, sigma2**2]]) # Covariance matrix
X = np.random.multivariate_normal(mean,cov,Nsim) # Monte Carlo simulation of correlated Gaussian (normal) variables
# Scatter plot
plt.figure(figsize=(12, 5))
plt.plot(X[:,0],marker='o',label = '$X_1$')
plt.plot(X[:,1],marker='o',label = '$X_2$' )
plt.ylabel(r'$X_1$')
plt.xlabel(r'$t$')
plt.grid()
plt.legend()
plt.figure(figsize=(5, 5))
plt.scatter(X[:,0],X[:,1],label='$X_2$')
plt.xlabel('$X_1$')
plt.ylabel('$X_2$')
plt.grid()
plt.title(r'$\rho=' + str(rho12) + '$');
```
The top figure shows the series of points generated for the stochastic processes $X_1$ and $X_2$ while the bottom shows a scatter plot. Try to change the correlation coefficient in the cell above and investigate how the time series and the scatter plot depends on the correlation coefficient.
The time series shown in the top figure is very irregular because there are many significant changes when going from one point to the next. This leaves the impression that the correlation to neighbouring samples is low. We can, for instance, not give a reasonable estimate of the value at 61 seconds based on the value of the time series at 60 seconds. This is because the time series have been generated without considering auto-correlation. We will look into auto-correlation in the next part of this example.
## Part two: Simulation of time series with a given auto-correlation function
Auto-correlation describes how a stochastic process is correlated with itself considering a time lag $\tau$
$$R_{X_1}(\tau)= E[X_1(t)X_1(t+\tau)]$$
A typical auto-correlation function is defined as follows.
$$R(\tau)=\frac{\sigma_{X_1}^2}{\omega_{c}\tau}\sin(\omega_{c}\tau)$$
Here $\omega_c$ is a cutoff frequency.
```
# Define auto-correlation function
dt = 0.5
t = np.arange(0.,100.,dt) # Time vector
tau = t-np.max(t)/2
omega_c = 1.0; # Cut-off frequency
sigma = 1.0 # Standard deviation
R = sigma**2/(omega_c*tau)*np.sin(omega_c*tau); # Auto-correlation function
plt.figure(figsize=(12,4))
plt.plot(tau,R)
plt.ylim(-sigma,sigma)
plt.grid()
plt.ylabel(r'$R(\tau)/\sigma^2$')
plt.xlabel(r'$\tau$')
plt.title('Auto-correlation coefficient');
```
The figure above shows that the auto-correlation coefficient approaches one as the time lag approaches zero. This is because the correlation is perfect when the time lag is minimal. It is also seen that the correlation drops quickly and that it becomes negative and oscillates as the time lag increases. An auto-correlation function is always symmetric about the vertical axis, but it does not necessarily oscillate or go to zero as $\tau$ increases.
We will now use the auto-correlation function to generate realizations of $X_1$ assuming that the auto-correlation function above applies. We then need to consider all the points in the time series as correlated Gaussian variables. We define a vector of mean values and a covariance matrix defined by the auto-correlation function. We generate $N$ points of the time series by using the following vector of mean values
$${{\bf{m}}_{\bf{x}}} = \left[ {\begin{array}{c}
{{m_{{X_1}}}({t_1})}\\
\vdots \\
{{m_{{X_1}}}({t_N})}
\end{array}} \right]$$
and covariance matrix
$${\sigma^2 _{{X_1}}} = \left[ {\begin{array}{cc}
{{R_{{X_1}}}(0)}&{{R_{{X_1}}}({t_1} - {t_2})}& \cdots &{{R_{{X_1}}}({t_1} - {t_N})}\\
{{R_{{X_1}}}({t_2} - {t_1})}&{{R_{{X_1}}}(0)}& \cdots &{{R_{{X_1}}}({t_2} - {t_N})}\\
\vdots & \vdots & \ddots & \vdots \\
{{R_{{X_1}}}({t_N} - {t_1})}&{{R_{{X_1}}}({t_N} - {t_2})}& \cdots &{{R_{{X_1}}}(0)}
\end{array}} \right]$$
```
# Use auto-correlation function to generate stochastic time series
tau_mat = np.abs(np.array([t])-np.array([t]).T) # Matrix of all possible time lags
tau_mat[tau_mat==0] = np.finfo(float).eps # Avoid the singularity when \tau = 0
mean = np.zeros((t.shape[0])) # Vector of mean values
cov = sigma**2/(omega_c*tau_mat)*np.sin(omega_c*tau_mat); # Co-variance matrix defined using the auto-correlation function
X = np.random.multivariate_normal(mean,cov,3) # Simulate the points
fig, axs = plt.subplots(2,1,figsize=(12,8))
axs[0].plot(tau, R)
axs[0].set_ylabel(r'$R(\tau)$')
axs[0].set_xlabel(r'$\tau$')
axs[0].set_ylim(-sigma,sigma)
axs[0].grid(True)
axs[0].set_title('Auto-correlation function')
axs[1].plot(t, X[0,:],marker='o')
axs[1].set_ylabel(r'$X_1$')
axs[1].set_xlabel(r'$t$')
axs[1].grid(True)
```
The bottom figure above shows one particular realization of the stochastic process $X_1$ with the given auto-correlation function. The time series above has different characteristics than the time series generated in the first part of this example. The time series above is smooth, and the points follow a pattern defined by the auto-correlation function. This is because nearby points will attain similar values since they are strongly correlated. Try to execute the cell above to get more realizations, change the cutoff frequency $\omega_c$, and study how the characteristics of the simulated time series change.
| github_jupyter |
# Plotting uncertainty
In this example we will go over plotting uncertainties in various ways:
+ y errorbars
+ x errorbars
+ x and y errorbars (no covariance)
+ x and y error-ellipse (covariance)
## Packages being used
+ `matplotlib`: all the plotting
+ `pandas`: read in the data table
+ `numpy` and `scipy`: convert cov matrix to ellipse params
## Relevant documentation
+ `matplotlib`: https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.errorbar.html
```
import pandas
import scipy.linalg as sl
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
import mpl_style
%matplotlib inline
plt.style.use(mpl_style.style1)
```
Our data contains $(x, y)$ positions with 1-$\sigma$ uncertainties and covariance values:
```
t = pandas.read_csv('data.csv')
display(t)
```
**Note** the full covariance matrix for each data point is:
$\left[ \begin{array}{ccc} \sigma_x^2 & \rho_{xy}\sigma_x \sigma_y \\ \rho_{xy}\sigma_x \sigma_y & \sigma_y^2 \end{array} \right]$
## y-uncertainties or x-uncertainties only
The most common type of data you will work with will only have (significant) uncertainties in one direction. In this case it is very easy to plot using `errorbar`:
```
plt.figure(1)
plt.errorbar(
t.x,
t.y,
yerr=t.sy,
ls='None',
mfc='k',
mec='k',
ms=5,
marker='s',
ecolor='k'
)
plt.xlabel('x')
plt.ylabel('y')
plt.ylim(0, 700);
plt.figure(2)
plt.errorbar(
t.x,
t.y,
xerr=t.sx,
ls='None',
mfc='k',
mec='k',
ms=5,
marker='s',
ecolor='k'
)
plt.xlabel('x')
plt.ylabel('y')
plt.ylim(0, 700);
```
## Uncertainties in both x and y with no cov
If your data has no cov you can still use `errorbar`:
```
plt.figure(3)
plt.errorbar(
t.x,
t.y,
yerr=t.sy,
xerr=t.sx,
ls='None',
mfc='k',
mec='k',
ms=5,
marker='s',
ecolor='k'
)
plt.xlabel('x')
plt.ylabel('y')
plt.ylim(0, 700);
```
## Uncertainties in both x and y with cov
If your data does have cov you should plot a 1-$\sigma$ ellipse around each point. There is no built in function to do this, so we will have to write our own. We will start by writing a function to turn a cov matrix into the parameters for an ellipse and draw it on a figure.
```
def cov_to_ellipse(cov, pos, **kwargs):
eigvec,eigval,V = sl.svd(cov,full_matrices=False)
# the angle the first eigenvector makes with the x-axis
theta = np.degrees(np.arctan2(eigvec[1, 0], eigvec[0, 0]))
# full width and height of ellipse, not radius
# the eigenvalues are the variance along the eigenvectors
width, height = 2 * np.sqrt(eigval)
return Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
def plot_ellipse(t, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
for rdx, row in t.iterrows():
cov = np.array(
[[row.sx**2, row.pxy * row.sx * row.sy],
[row.pxy * row.sx * row.sy, row.sy**2]]
)
ellip = cov_to_ellipse(cov, [row.x, row.y], **kwargs)
ax.add_artist(ellip)
plt.figure(4)
plt.plot(
t['x'],
t['y'],
's',
mfc='k',
mec='k',
ms=5
)
plot_ellipse(
t,
lw=1.5,
fc='none',
ec='C0'
)
plt.xlabel('x')
plt.ylabel('y')
plt.ylim(0, 700)
plt.draw();
```
| github_jupyter |
```
import pandas as pd
import numpy as np
```
#### Parcel File
```
parcel_data_path = "C:/Users/etheocharides/Box/Modeling and Surveys/Urban Modeling/Bay Area UrbanSim/PBA50/Final Blueprint runs/Final Blueprint (s24)/BAUS v2.25 - FINAL VERSION/"
parcel_data_file = "run182_parcel_data_2050.csv"
parcel_data = pd.read_csv(parcel_data_path+parcel_data_file)
print("number of q1 households is {}".format(parcel_data.hhq1.sum()))
print("number of q2 households is {}".format(parcel_data.hhq2.sum()))
num_hh_to_move = 132529
# randomly select a set of parcels
ids_to_move = np.random.choice(parcel_data.loc[parcel_data.hhq1 > 0].parcel_id, num_hh_to_move, replace=False)LOCKED
# make Q1 HHs Q2 HHs
parcel_data.loc[parcel_data.parcel_id.isin(ids_to_move), "hhq1"] = parcel_data.hhq1 - 1
parcel_data.loc[parcel_data.parcel_id.isin(ids_to_move), "hhq2"] = parcel_data.hhq2.fillna(0) + 1
print("number of q1 households is {}".format(parcel_data.hhq1.sum()))
print("number of q2 households is {}".format(parcel_data.hhq2.sum()))
# save a copy of the data in the output folder, manually make it the master file as needed
# don't want to risk overwriting the master file (run182_parcel_data_2050_UBI.csv)
new_parcel_data_file = "run182_parcel_data_2050_UBI_output.csv"
parcel_data.to_csv(parcel_data_path+new_parcel_data_file)
```
#### TAZ File
```
taz_data_path = "C:/Users/etheocharides/Box/Modeling and Surveys/Urban Modeling/Bay Area UrbanSim/PBA50/Final Blueprint runs/Final Blueprint (s24)/BAUS v2.25 - FINAL VERSION/"
taz_data_file = "run182_taz_summaries_2050.csv"
taz_data = pd.read_csv(taz_data_path+taz_data_file)
print("number of q1 households is {}".format(taz_data.HHINCQ1.sum()))
print("number of q2 households is {}".format(taz_data.HHINCQ2.sum()))
parcel_taz_xwalk_path = "D:/w/bayarea_urbansim/data/2018_10_17_parcel_to_taz1454sub.csv"
parcel_taz_xwalk = pd.read_csv(parcel_taz_xwalk_path)
parcel_taz_xwalk.index = parcel_taz_xwalk.PARCEL_ID.round().astype(int)
# randomly selected set of parcels
ids_to_move_df = pd.DataFrame((ids_to_move), columns=['ids'])
# add TAZ IDs to the random set of parcels
ids_to_move_df['TAZ'] = 0
ids_to_move_df['TAZ'] = ids_to_move_df.ids.map(parcel_taz_xwalk['ZONE_ID'])
# group the random set of parcels by TAZ
ids_to_move_df['id_count'] = 1
ids_by_taz = ids_to_move_df.groupby(['TAZ']).sum().drop(['ids'], 1)
# make Q1 HHs Q2 HHs in these TAZs
for i in ids_by_taz.index:
count = ids_by_taz.id_count[i]
taz_data.loc[taz_data.TAZ == i, 'HHINCQ1'] = taz_data.loc[taz_data.TAZ == i].HHINCQ1 - count
taz_data.loc[taz_data.TAZ == i, 'HHINCQ2'] = taz_data.loc[taz_data.TAZ == i].HHINCQ2 + count
print("number of q1 households is {}".format(taz_data.HHINCQ1.sum()))
print("number of q2 households is {}".format(taz_data.HHINCQ2.sum()))
# save a copy of the data in the output folder, manually make it the master file as needed
# don't want to risk overwriting the master file (run182_taz_summaries_2050_UBI.csv)
new_taz_data_file = "run182_taz_summaries_2050_UBI_output.csv"
taz_data.to_csv(taz_data_path+new_taz_data_file)
```
#### Superdistrict File
```
sd_data_path = "C:/Users/etheocharides/Box/Modeling and Surveys/Urban Modeling/Bay Area UrbanSim/PBA50/Final Blueprint runs/Final Blueprint (s24)/BAUS v2.25 - FINAL VERSION/"
sd_data_file = "run182_superdistrict_summaries_2050.csv"
sd_data = pd.read_csv(sd_data_path+sd_data_file)
print("number of q1 households is {}".format(sd_data.hhincq1.sum()))
print("number of q2 households is {}".format(sd_data.hhincq2.sum()))
taz_sd_xwalk = taz_data[['TAZ', 'SD']].set_index('TAZ')
# add superdistrict IDs to the random set of parcels
ids_to_move_df['SD'] = 0
ids_to_move_df['SD'] = ids_to_move_df.TAZ.map(taz_sd_xwalk['SD'])
# group the random set of parcels by superdistrict
ids_by_sd = ids_to_move_df.groupby(['SD']).sum().drop(['ids'], 1)
# make Q1 HHs Q2 HHs in these superdistricts
for i in ids_by_sd.index:
count = ids_by_sd.id_count[i]
sd_data.loc[sd_data.superdistrict == i, 'hhincq1'] = sd_data.loc[sd_data.superdistrict == i].hhincq1 - count
sd_data.loc[sd_data.superdistrict == i, 'hhincq2'] = sd_data.loc[sd_data.superdistrict == i].hhincq2 + count
print("number of q1 households is {}".format(sd_data.hhincq1.sum()))
print("number of q2 households is {}".format(sd_data.hhincq2.sum()))
# save a copy of the data in the output folder, manually make it the master file as needed
# don't want to risk overwriting the master file (run182_superdistrict_summaries_2050_UBI.csv)
new_sd_data_file = "run182_superdistrict_summaries_2050_UBI_output.csv"
sd_data.to_csv(sd_data_path+new_sd_data_file)
```
| github_jupyter |
```
import os
import pandas as pd
import requests
import spacy
import string
from typing import List, Tuple, Union, Callable, Dict, Iterator
from collections import defaultdict
from difflib import SequenceMatcher
from spacy.matcher import Matcher, PhraseMatcher
from spacy.tokens import Doc, Token, Span
from spacy.matcher import Matcher
!python -m spacy download en_core_web_md
nlp = spacy.load("en_core_web_md")
filenames = os.listdir('D:\\Lambda\\Labs\\human-rights-first-asylum-ds-a\\texts\\text cases') # Wherever files are located
matcher = Matcher(nlp.vocab)
# phrases we are looking for in protected grounds
# explore court documents if there are identifiers
# when seaching for these patterns/phrases/tokens
pattern = [
[{"LOWER": "race"}],
[{"LOWER": "religion"}],
[{"LOWER": "nationality"}],
[{"LOWER": "social"}, {"LOWER": "group"}],
[{"LOWER": "political"}, {"LOWER": "opinion"}]
]
matcher.add('protected_grounds',pattern)
# creates searchable files to read in, and then test similar and get protected grounds
counter = 0
dict_name = {}
for file in filenames:
counter += 1
f = open(f"D:\\Lambda\\Labs\\human-rights-first-asylum-ds-a\\texts\\text cases\\{file}", "r", encoding='utf-8')
dict_name[counter] = nlp(f.read())
f.close()
# already tokenized text
doc_1 = dict_name[1]
doc_2 = dict_name[2]
# run matcher
matches = matcher(doc_1, as_spans=True)
matches
def similar(target_phrases, file):
"""GET RID OF PUNCT"""
# from string lib, we create an exclusion table
#table = str.maketrans(dict.fromkeys(string.punctuation))
# then use that table to make a string without punct
#no_punct_string = target_phrases.translate(table)
# create matcher object and add the pattern we are looking for
matcher = Matcher(nlp.vocab)
matcher.add('target_phrases', target_phrases)
matches = matcher(file, as_spans=True)
# in the functions where similiar is used,
# must present target_phrases in a list of dictionary using Spacy pattern syntax
# example
# pattern = [[{"LOWER": "race"}]]
# similar_pg = similar(target_phrases=pattern, file=self.doc)
return matches
def get_protected_grounds(self):
# list of protected grounds
# can expand this list and add different phrases to cover more ground
pattern = [
[{"LOWER": "race"}],
[{"LOWER": "religion"}],
[{"LOWER": "nationality"}], # currently, phrase is pulled but out of context
[{"LOWER": "social"}, {"LOWER": "group"}],
[{"LOWER": "political"}, {"LOWER": "opinion"}],
[{"LOWER": "political"}, {"LOWER": "offense"}],
[{"LOWER": "protected"}, {"LOWER": "grounds"}]
]
pgs = []
similar_pg = similar(target_phrases=pattern, file=self.doc)
# code edge cases where nationality isn't triggered by 'nationality act'
for token in self.doc:
if token.text.lower() == 'nationality':
# get unique words that matched the phrases above
for phrase in similar_pg:
if phrase.text.lower() not in pgs:
pgs.append(phrase.text.lower())
return pgs
get_protected_grounds(doc_2)
```
| github_jupyter |
# Getting Started with Exploring Segmentations
## Before you start!
- This notebook assumes that shapeworks conda environment has been activated using `conda activate shapeworks` on the terminal.
- See [Getting Started with Notebooks](getting-started-with-notebooks.ipynb) for information on setting up your environment and notebook shortcuts.
- See [Getting Started with Segmentations](getting-started-with-segmentations.ipynb) to learn how to load and visualize binary segmentations.
- Note example output was generated on Linux/Mac environment and may look different on Windows.
## In this notebook, you will learn:
1. How to define your dataset location and explore what is available in it
2. How to explore your dataset
3. How to decide the grooming pipeline needed for your dataset
```
import shapeworks as sw
```
## 1. Defining and exploring your dataset
### Defining dataset location
You can download exemplar datasets from [ShapeWorks data portal](https://girder.shapeworks-cloud.org) after you login. For new users, you can [register](https://girder.shapeworks-cloud.org#?dialog=register) an account for free. Please do not use an important password.
After you login, click `Collections` on the left panel and then `use-case-data-v2`. Select the dataset you would like to download by clicking on the checkbox on the left of the dataset name. See the video below.
After you download the dataset zip file, make sure you unzip/extract the contents in the appropriate location.
**This notebook assumes that you have downloaded `ellipsoid_1mode` and you have placed the unzipped folder `ellipsoid_1mode` in `Examples/Python/Data`.** Feel free to use your own dataset.
<p><video src="https://sci.utah.edu/~shapeworks/doc-resources/mp4s/portal_data_download.mp4" autoplay muted loop controls style="width:100%"></p>
```
#import relevant libraries
import os
import pyvista as pv
import numpy as np
# dataset name is the folder name for your dataset
datasetName = 'ellipsoid_1mode'
# path to the dataset where we can find shape data
# here we assume shape data are given as binary segmentations
shapeDir = '../../Data/' + datasetName + '/segmentations/'
print('Dataset Name: ' + datasetName)
print('Shape Directory: ' + shapeDir)
```
### What is available in the dataset?
First let's see how many shapes we have in the dataset.
**File formats:** For binary segmentations, all [itk-supported image formats](https://insightsoftwareconsortium.github.io/itk-js/docs/image_formats.html) can be used.
```
import glob
from pathlib import Path
shapeExtention = '.nrrd'
# let's get a list of files for available segmentations in this dataset
# * here is a wild character used to retrieve all filenames
# in the shape directory with the file extensnion
shapeFilenames = sorted(glob.glob(shapeDir + '*' + shapeExtention))
print ('Number of shapes: ' + str(len(shapeFilenames)))
print('Shape files found:')
for shapeFilename in shapeFilenames:
shapeFilename = Path(shapeFilename)
print(shapeFilename)
```
## 2. Exploring your dataset
We would like to better understand the given dataset to decide the appropriate grooming (preprocessing) pipeline/step to prepare it for shape modeling.
### Loading your dataset
First step is to load the dataset.
**Note:** If your dataset is large (large volumes and/or large number of segmentations), you could select a subset for this exploration step.
```
# list of shape segmentations
shapeSegList = []
# list of shape names (shape files prefixes) to be used
# for saving outputs and visualizations
shapeNames = []
# loop over all shape files and load individual segmentations
for shapeFilename in shapeFilenames:
print('Loading: ' + shapeFilename)
# current shape name
segFilename = shapeFilename.split('/')[-1]
shapeName = segFilename[:-len(shapeExtention)]
shapeNames.append(shapeName)
# load segmentation
shapeSeg = sw.Image(shapeFilename)
# append to the shape list
shapeSegList.append(shapeSeg)
num_samples = len(shapeSegList)
print('\n' + str(num_samples) +
' segmentations are loaded for the ' + datasetName + ' dataset ...')
```
### Visualizing your dataset
Now let's visualize all samples in a grid using `pyvista`. You may need to call `pv.close_all()` every once in a while to clean up the unclosed plotters.
We will use `sw.plot_volumes` function from the Shapeworks python module. This function will take in a list of shapeworks images as input and initiate a pyvista plotter to render multiple windows, each with a single segmentation, add segmentations to the plotter, and start rendering.
```
# define parameters that controls the plotter
use_same_window = False # plot using multiple rendering windows if false
notebook = False # True will enable the plots to lie inline
show_borders = True # show borders for each rendering window
shade_volumes = True # use shading when performing volume rendering
color_map = "viridis" # color map for volume rendering, e.g., 'bone', 'coolwarm', 'cool', 'viridis', 'magma'
show_axes = True # show a vtk axes widget for each rendering window
show_bounds = True # show volume bounding box
show_all_edges = True # add an unlabeled and unticked box at the boundaries of plot.
font_size = 10 # text font size for windows
link_views = True # link all rendering windows so that they share same camera and axes boundaries
# plot all segmentations in the shape list
sw.plot_volumes(shapeSegList,
volumeNames = shapeNames,
use_same_window = use_same_window,
notebook = notebook,
show_borders = show_borders,
shade_volumes = shade_volumes,
color_map = color_map,
show_axes = show_axes,
show_bounds = show_bounds,
show_all_edges = show_all_edges,
font_size = font_size,
link_views = link_views
)
```
<p><video src="https://sci.utah.edu/~shapeworks/doc-resources/mp4s/nb-explore-vols.mp4" autoplay muted loop controls style="width:100%"></p>
## 3. Deciding the grooming pipeline needed for your dataset
Does this dataset need grooming? What are grooming steps needed? Let's inspect the segmentations. What do we observe?
### Voxel spacing
Voxel spacing are not isotropic, i.e., voxel size in each of the three dimensions are not equal.This can be identified if you zoom-in in the visualization below and observe the step size in the x,y,z direction. Anisotropic spacing could adversely impact particles optimization since shapeworks assumes equal voxel spacing. Some datasets might also have different voxel spacings for each segmentation.
*Hence, it is necessary to bring all segmentations to the same voxel spacing that is equal in all dimensions.*
Another observation is voxel spacing is relatively large. This can be observed by the pixelated volume rendering and the jagged isosurface.
*We can improve the segmentation resolution by decreasing voxel spacing.*
```
import pyvista as pv
# to better appreciate the pixelated nature of these segmentations, we need to only visualize
# the binary segmentation, notice the thick slices
shapeIdx = 10
shapeSeg = shapeSegList[shapeIdx]
shapeSeg_vtk = sw.sw2vtkImage(shapeSeg, verbose = True)
sw.plot_volumes(shapeSeg_vtk)
```
### Segmentations and image boundaries
Some segmentations are very close to the image boundary, not leaving enough room for particles (correspondences) to move and spread over these surface regions. In particular, particles could overshoot outside the image boundary during optimization.
Furthermore, if a segmentation touches the image boundary, this will result in an artificially (i.e., not real) open surface.
*Hence, these segmentations needs to be padded with background voxels (zero-valued) to create more room along each dimension.*
```
# let's inspect a segmentation that touches the image boundaries
shapeIdx = 13
shapeSeg = shapeSegList[shapeIdx]
shapeSeg_vtk = sw.sw2vtkImage(shapeSeg, verbose = False)
sw.plot_volumes(shapeSeg_vtk)
```
<p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/shape_boundary.png"></p>
This segmentation touches the image boundary and hence will result in an artificially open surface. To inspect this behavior, we need extract a surface mesh (isosurface) from each segmentation. An isosurface is a three-dimensional surface that represents points of a constant value (aka isovalue) within the given volume of space.
```
# let's see if there's a function that extracts an isosurface from an image
# use dot-tap to get a list of functions/apis available for shapeSeg
# found it - toMesh, let's see its help
help(shapeSeg.toMesh)
```
The `toMesh` function needs an isovalue, which is the constant value the represents the surface of interest. Since a shape segmentation is a binary image, the foreground is expected to have the value of 1 (white) and the background should have a zero value (black), so an appropriate isovalue to extract the foregound-background interface a value in between, e.g., 0.5
```
import numpy as np
# let's make sure that our assumptions about the voxel values are correct
# is the given volume a binary segmentation?
# first convert to numpy array
shapeSeg_array = shapeSeg.toArray()
# make sure that it is a binary segmentation
voxelValues = np.unique(shapeSeg_array)
print('\nVoxel values:' + str(voxelValues))
if len(voxelValues) > 2:
print('WARNING: ' + shapeName + ' is not a bindary segmentation. Voxels have more than two distinct values')
print('PLEASE make sure to use binary segmentations')
else:
print('Shape ' + shapeName + ' is a binary segmentation')
# now define the isovalue, in case a binary segmentation has a foreground label that is not 1
# we need to obtain a value inbetween
# get min and max values
minVal = shapeSeg_array.min()
maxVal = shapeSeg_array.max()
print('\nMinimum voxel value: ' + str(minVal))
print('Maximum voxel value: ' + str(maxVal))
isoValue = (maxVal - minVal)/2.0
print('\nisoValue = ' + str(isoValue))
# let's extract the segmentation isosurface and visualize it
# extract isosurface
shapeMesh = shapeSeg.toMesh(isovalue = isoValue)
# sw to vtk
shapeMesh_vtk = sw.sw2vtkMesh(shapeMesh)
sw.plot_meshes([shapeMesh_vtk])
```
<p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/explore_seg.png"></p>
So, we have been able to extract a segmentation's isosurface and visualize it as a surface mesh. It is worth noting that the jagged surface is due to the anisotropic voxel space (with spacing in z-dimension is double that of x- and y-dimensions) and large voxel size.
### Shape alignment
One can observe from the segmentation visualization that they are not roughly aligned, i.e., they do not share the same coordinate frame where each individual shape is located differently compared to other shapes.
*Aligning shapes is a critical preprocessing step to avoid the shape model to encode variabilities pertaining to global transformations such as rotation and translation.*
```
# let's inspect some segmentations where we can observe misalignment
shapeIdxs = [8,9,10]
shapeSegSubset = [shapeSegList[shapeIdx] for shapeIdx in shapeIdxs ]
shapeNamesSubset = [shapeNames[shapeIdx] for shapeIdx in shapeIdxs ]
```
To inspect how mutliple segmentation are spatially aligned with respect to each other, we will visualize their surfaces in the same rendering window.
```
shapeSegIsosurfaces = []
shapeSegIsosurfaces_vtk = []
for shapeSeg in shapeSegSubset:
# extract isosurface
shapeIsosurface = shapeSeg.toMesh(isovalue = isoValue)
shapeSegIsosurfaces.append(shapeIsosurface)
# sw to vtk
shapeSegIsosurfaces_vtk.append(sw.sw2vtkMesh(shapeIsosurface, verbose = False))
sw.plot_meshes(shapeSegIsosurfaces,
use_same_window = True,
notebook = False,
show_borders = True,
meshes_color = ['tan', 'blue','red'],
mesh_style = "surface",
show_mesh_edges = False,
show_axes = True,
show_bounds = True,
show_all_edges = True,
font_size = 10,
link_views = True
)
```
<p><video src="https://sci.utah.edu/~shapeworks/doc-resources/mp4s/nb-explore-iso-align-pv.mp4" autoplay muted loop controls style="width:100%"></p>
### Too much background
Image boundaries are not tight around shapes, leaving irrelevant background voxels that might increase the memory footprint when optimizing the shape model.
*We can crop segmentations to remove unnecessary background.*
```
shapeIdx = 12
shapeSeg = shapeSegList[shapeIdx]
shapeSeg_vtk = sw.sw2vtkImage(shapeSeg, verbose = False)
sw.plot_volumes(shapeSeg_vtk)
```
<p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/explore_background.png"></p>
### Binary segmentations
In general, this binary representation is not useful for finite numerical calculation of surface geometry and features that are required in shape modeling, which assumes the image is a sampling of a smooth function.
Hence, ShapeWorks makes use of the signed distance transform of the binary segmentation that does satisfy this criterion.
*For the correspondence optimization step, shapes can be represented as the zero level set of a smooth signed distance transform.*
### Tentative grooming
Hence, a tentative grooming pipeline entails the following steps:
1. Resampling segmentations to have smaller and isotropic voxel spacing
2. Rigidly aligning shapes
3. Cropping and padding segmentations
4. Converting segmentations to smooth signed distance transforms
Let the fun begins!!! Please visit [Getting Started with Grooming Segmentations](getting-started-with-grooming-segmentations.ipynb) to learn how to groom your dataset.
| github_jupyter |
# Data Wrangling Part 2: Data Cleaning and Feature Engineering
This is the second part of the series of tutorials on data wrangling. In the first part we started the process of data wrangling on the Titanic dataset, focusing on the EDA aspects. Recall that the point of doing EDA first was to give us a feel of the data. How big the data is; how many samples; how many features; problematic problems like missing values, duplicate samples, and useless features; etc. To see the results of our EDA on the Titatic dataset see that previous tutorial.
In this next part we'll focus on data cleaning and feature engineering. The goal of data cleaning is to take the messy raw data we have and transform it into quality data that an ML model can train on. The goal of feature engineering is to identify which raw features from the raw data might be useful, and where feasible create potentially useful new derived features from them.
We'll continue on with the example Titanic dataset from before. Since the plan is to do data cleaning, feature engineering, and (finally) train a working model, I'll go ahead and load numpy, pandas, matplotlib, and various sklearn and imblearn functions we covered before for imbalanced classification. I'll also specify a seed since randomness will be used in this code, and finally port over a useful function I've used in prior tutorials for printing out various metrics during model training.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
roc_auc_score, f1_score, confusion_matrix, accuracy_score, precision_score, recall_score
)
from imblearn.over_sampling import RandomOverSampler
seed = 123
np.random.seed(seed)
def get_scores(y,yhat):
print('accuracy: ', round(accuracy_score(y,yhat),4))
print('precision: ', round(precision_score(y,yhat),4))
print('recall: ', round(recall_score(y,yhat),4))
print('f1: ', round(f1_score(y,yhat),4))
print('auc: ', round(roc_auc_score(y,yhat),4))
print('confusion matrix:\n', confusion_matrix(y,yhat))
```
## Data Cleaning and Feature Engineering: First Pass
Focusing on the Titanic dataset again, I'll again import the data from the URL below, verify that the output is what was expected, and drop the `PassengerId` column since EDA identified it as a useless feature (a duplicate index). Recall that the ultimate goal here is to build a model that can predict whether a passenger of the Titanic survived, given the other features present in this dataset.
This time when loading the dataframe I'll call it `df_raw`, and then make a copy of it into the dataframe `df` that we'll clean up. This is a good idea to do because it's easy to screw something up when data cleaning, and so if you do it's worth having the raw data around to go back to and try again.
```
url = 'https://gist.githubusercontent.com/michhar/2dfd2de0d4f8727f873422c5d959fff5/raw/fa71405126017e6a37bea592440b4bee94bf7b9e/titanic.csv'
df_raw = pd.read_csv(url)
df_raw.head()
df_raw.info()
df = df_raw.copy()
df = df.drop(columns='PassengerId')
df.head()
```
Now, let's start by cleaning up the non-string columns since they're the easiest to deal with. Going in left-to-right order, we first have the `Survived` column, which will end up being the labels for the ML problem. From the EDA, we know that there are no missing values here, and that the values are already encoded as binary integers with `0 = not survived` and `1 = survived`. That means this column is already good and we can move on.
The next column is `Pclass`. Recall that `Pclass` takes on the integer values `1, 2, 3`. It has no missing values. And it is evidently a categorical variable representing what boarding class the passenger was in. The fact that `Pclass` is categorical means that there is no (far as we can tell) no natural ordering. With categorical data, it wouldn't make sense to say `1 < 2 < 3`. It would make about as much sense as saying `red < blue < orange`. We thus may want to encode this data differently, e.g. one-hot encodings or embeddings. I'll ignore this for now and come back to it later. Other than this issue, this column appears good to go as well.
The next non-string column is `Sex`. Recall that this column (as the name implies) represents passenger sex, and is encoded as `male`, `female`. This feature is obviously categorical. There are no missing values in this column. To deal with the encodings, I'll for now map `0 = male` and `1 = female`, though we may want to use a different categorical encoding later.
```
df['Sex'] = df['Sex'].replace(to_replace='male', value=0)
df['Sex'] = df['Sex'].replace(to_replace='female', value=1)
df.head()
```
Next is the `Age` column. Recall this is a numerical feature with ages ranging roughly 0-80 years in not-necessarily-integer values (some passengers gave their age in years and months evidently). Also recall that indeed this column *does* have missing values. It looks like there are 177 of 891 missing values here.
```
df['Age'].isna().sum()
```
Let's address the missing value question first. With missing values present in a column you generally have a few options:
1. Drop the rows with missing values in that column. We could do this, and it may make since if there or only a handful missing. But 177 is a lot (almost 20% of the data). Dropping 20% of the data just doesn't seem like a great idea.
2. Impute the missing values with an agreed upon value. But what value? We could just pick something random like 0 or -1 or 999 or whatever, but then we skew the distribution since this feature is continuous. A better idea would be to impute it with some kind of average value in the feature distribution, e.g. the column's mean, median, or mode. This is equivalent to saying "I don't know what this value is, and without any better info my best guess is the average from that column".
3. Build a model to learn what to impute with. That is, you train a model that can take in as an example a row containing all features but that feature with missing values, and try to predict what that missing value should be, given what the values in the other features are. This can be thought of as building a prior, where you ask "what is my best guess for this missing value, given that I know what the values in the other features are for this example". You can get a smarter imputer this way. However, it can be time consuming to build such a model, and the gains usually aren't helpful enough in practice to justify the effort. If you're interested in an easier way to do this, checkout sklearn's imputation approaches [here](https://scikit-learn.org/stable/modules/impute.html).
For simplicity, and because it's the most common way to deal with missing values in practice, I'll go with approach (2) for this tutorial. It's also become useful to create a *new* binary feature to keep track of *which* values in a given column are missing. The reason is that there may in fact be some information in the fact that those values are missing, and maybe this is information the ML model might find useful in learning to make predictions.
Focusing on the `Age` column, we can fill in missing values with a fixed value using the `df.fillna` method. I'll fill in the missing values with the *mean* age of 29.699 years. Doing this, we can see that there are no no missing values in that column. Also note a consequence of imputing with the mean is that the mean of that column won't change. Why? Adding more values of the mean to a dataset never changes the mean (prove it). It *will* affect the spread of the data though, making it generally go down.
Notice I also create the new feature `Age_missing`, which is a binary feature where `0 = not missing` and `1 = missing`. I'll do this for every missing value column below as well. It's also good to check that `Age_missing` does indeed have 177 `1` values as well.
```
mean_age = df['Age'].mean()
df['Age_missing'] = df['Age'].isna().astype(int)
df['Age'] = df['Age'].fillna(value=mean_age)
df.head()
df['Age_missing'].sum()
```
It looks like `Age` is now good to go. It's a numerical datatype with all missing values imputed. We'll also want to normalize the data before doing ML, but we'll handle that later.
Moving on we have `SigSp`. Recall that this column contains 6 distinct values `0, 1, ..., 5` and is numerical. There are no missing values, so we'll move on.
Next we have `Parch`, which is another numerical column with 7 distinct values `0, ..., 6`. There are no missing values here either.
The next non-string column is `Fare`. This is a numerical feature with no missing values. We'll need to normalize it later as well, but other than that we're good.
Moving on, we have `Embarked`. From the EDA it looks like it's a categorical feature with 3 values `S, C, Q`. It also has missing values, but only 2 of them. Filling those in makes sense. Since this feature is categorical, you'd want to fill it in with the *mode* (the most frequently occuring value). In this case that mode is `S`.
Next, we'll want to encode the categories with numbers, as ML models can't handle strings. To do this I'll perform the following mapping: `0 = S`, `1 = C`, `2 = Q`.
Since there are only 2 missing values I won't bother to create a missing column here. It's just too sparse to matter. Thus, once we've done the imputing and format conversions we're done with this column.
```
df['Embarked'].value_counts(dropna=False)
mode_embarked = df['Embarked'].mode().item()
df['Embarked'] = df['Embarked'].fillna(value=mode_embarked)
df['Embarked'].isna().sum()
df['Embarked'] = df['Embarked'].replace(to_replace='S', value=0)
df['Embarked'] = df['Embarked'].replace(to_replace='C', value=1)
df['Embarked'] = df['Embarked'].replace(to_replace='Q', value=2)
df.head()
```
That's it for all the non-string columns. We have the rest in a minimal format that we can do ML with.
## Getting a Baseline
What we'll first do then is take those columns and build our inputs from those, using those to train what we'll call a *baseline* model. A baseline is a quick, dirty model done with minimal work that can tell you how well you can do without trying to hard. The baseline is often the first model you want to try to beat. If the baseline is good enough, no need to waste time on more feature engineering or model iterations. If not, you can go from there.
To start on the baseline, I'll extract a new dataframe `df_baseline` from `df` that contains only the columns we've cleaned up. Some notes:
- I won't do any re-mapping of the categorical variables for now, so we'll be keeping them as ordinal integers.
- I won't be doing any re-sampling techniques yet either. We'll let the class imbalance show through the metrics we use.
- To avoid any need to normalize the data I'll stick with tree-based models, which in practice tend to work well on this kind of data anyway.
```
df_baseline = df.drop(columns=['Name', 'Ticket', 'Cabin'])
df_baseline.head()
```
I'll now create the data arrays `X` and `y`, split off training and test sets, and train a couple of "good enough" tree-based models (random forest and a GBM) on that data to predict survival.
Note that in defining the training and test data I do it slightly different here than usual. Instead of splitting the training and test data directly, I instead split on their indexes and then index them out to get the data. The reason I do this is because we'll train more models on different features below, and it's good to do an objective comparison between the different approaches. If you sample different training and test data at each iteration, you don't necessarily know if one set of models is better than another.
**Note:** That said, there's a caveat. You *must* be cognizant of the fact that it's easy to overfit your test set this way if you're not careful. Reason being you're training a bunch of models, tweaking the data, training a bunch more models, etc. When you do that it's easy to accidentally overfit your test set because you find yourself optimizing on test set metrics and pumping them up. Thus, be careful when you do this. It may be good practice to hold out a third *validation set* if you wish to do extensive data and model tuning like I'm doing here. Tune on the validation set, and only evaluate against your test set at the very very end.
```
X = df_baseline.drop(columns=['Survived']).values
y = df_baseline['Survived'].values
X.shape, y.shape
idx_train, idx_test = train_test_split(range(len(y)), test_size=0.2, random_state=seed)
X_train = X[idx_train]
y_train = y[idx_train]
X_test = X[idx_test]
y_test = y[idx_test]
```
Let's train the models now. The two I'll train are a random forest and a GBM.
Looking at the output from either one we can observe a few things: From the metrics, we can see the label-skewing (more non-survivors than survivors) makes accuracy appear deceptively high. Precision is good, indicating false positives (non-survivors classified as survivors) isn't too bad. The recall looks a bit worse, indicating the model had more problems with false negatives (survivors classified as non-survivors). The AUC is reasonably good as well.
Not bad for a quick and dirty first go at it. Let's look at the feature importance plot for the random forest.
```
rf_model_baseline = RandomForestClassifier(n_estimators=30, min_samples_leaf=10, n_jobs=-1, random_state=seed)
rf_model_baseline.fit(X_train, y_train)
yhat = rf_model_baseline.predict(X_test)
get_scores(y_test, yhat)
gbm_model_baseline = GradientBoostingClassifier(n_estimators=100, random_state=seed)
gbm_model_baseline.fit(X_train, y_train)
yhat = gbm_model_baseline.predict(X_test)
get_scores(y_test, yhat)
```
For the random forest model, we can also look at the feature importances to see if the model makes sense. It appears the model treats `Sex` as by far the most important feature in deciding whether somebody survived, which from our previous EDA makes a lot of sense. Females were far more likely to survive than males. After `Sex`, it seems both `Fare` and `Pclass` were about equally important. This makes sense as well. We'd expect wealthier folks (who often pay more for better tickets and better amenities) to be more likely to survive than poorer folks.
```
def plot_feature_importances(model, columns):
top_importances = -np.sort(-model.feature_importances_)
top_importances = np.sort(top_importances)
top_features = np.argsort(-model.feature_importances_)
top_columns = [columns[i] for i in top_features]
top_columns = list(reversed(top_columns))
plt.figure()
plt.title('Feature Importances')
plt.barh(range(len(top_columns)), top_importances,
color='r', align='center')
plt.yticks(range(len(top_columns)), top_columns)
plt.ylim([-1, len(top_columns)])
plt.show()
columns = df_baseline.drop(columns=['Survived']).columns
plot_feature_importances(rf_model_baseline, columns)
```
**Note:** In real life ML engineering, depending on business objectives and priorities, you might stop here and opt not to continue to feature engineer or improve the models. The reason is you often have a set of metrics thresholds indicating what's "good enough". Once you reach those thresholds, it often doesn't matter in a practical sense whether you can improve results. It's "good enough" and you can move onto other things. Odds are that all the feature engineering we'll do will only improve the models by a few percent if that, meaning you should really justify to yourself if the considerable extra development effort (both training and production) is worth it to you before doing so. It may make sense in research or in Kaggle where you're trying to hit a state of the art, but in most other cases other priorities are often as or more important than your model metrics. Think carefully.
## Data Cleaning and Feature Engineering: Second Pass
Let's now go back and see if we can improve results by feature engineering a little more. For one thing we have 3 columns we dropped and never attempted to use: `Name`, `Cabin`, and `Ticket`. Let's look at those in a little more detail.
Let's start with `Name` and look at a sample of the values. Printing the first 10 names we can see some patterns. First, names by and large tend to be in the order `last name, title, first name, middle name, alias`. Outside of that the strings don't appear to be that consistently formatted.
Since we're trying to predict survival, it also *might* be work printing out some examples of `Name` strings of those who survived vs those who didn't, and see if there's a pattern we can maybe exploit.
```
df['Name'].head(10)
df['Name'][df['Survived'] == 1].head(10)
df['Name'][df['Survived'] == 0].head(10)
```
One pattern appears very clear in those who survived vs didn't: Those with titles like `Mr.` or `Master.` seem less likely to have survived than those with titles like `Mrs.` or `Miss.`. This is yet another reminder that survival on the Titanic strongly correlated with sex, something we already saw above in the baseline model.
But, there may be something else to keeping these titles and making features out of them. So let's do that. I'm going to take the `Name` column and extract out of it a new column `Titles`. Let's first write some code to extract out the title for each column, and then show the value counts of those, so we can see which titles are worth keeping vs lumping together into a `Rare` group.
The way I'll extract titles is by using the regular expression ("regex") string `' ([A-Za-z]+)\.'`. If you're not familiar with regexes, this particular regex string says to take the input string from `Name`, match with any substring that
- doesn't come first in the input string,
- contains one or more alphabetical characters, possibly capitalized,
- ends in a period.
This makes sense for this particular problem of extracting titles because the titles aren't written first in the string, and always contain alphabetical characters followed by a period. After matching with substrings of this form, the regex will extract the alphabetical text *before* the period, and then show that (which we'll eventually dump into a new `Titles` column).
Looking at the value counts, we can see that the set of titles `Mr, Miss, Mrs, Master` all occur over 10 times in the dataset, and the rest look to be rare. We'll map all of these rare titles into their own `Rare` title class and keep the rest separately.
```
df['Name'].str.extract(' ([A-Za-z]+)\.').value_counts()
titles = ['Mr', 'Miss', 'Mrs', 'Master', 'Rare']
df['Titles'] = df['Name'].str.extract(' ([A-Za-z]+)\.')
df['Titles'] = df['Titles'].apply(lambda x: x if x in titles else 'Rare')
df['Titles'].value_counts()
```
The last move will be to make `Titles` a good feature by mapping its values to integers and treating it as a categorical variable. After doing this, we will go ahead and drop the `Name` column, as other than the title there just doesn't seem to be much there that's left to extract in my opinion.
```
titles_dict = {title:idx for (idx,title) in enumerate(titles)}
df['Titles'] = df['Titles'].apply(lambda x: titles_dict[x])
df['Titles'].value_counts()
df = df.drop(columns='Name')
df.head()
```
The next column to attend to is `Ticket`. Let's again look at what these values tend to look like and see if there's a pattern. It looks like these are mostly strings of numbers. Some of them also have an alphanumeric prefix like `A/5` or `STON/02.`. Looking at the value counts, a small handful of ticket numbers are repeated, but the vast majority only have one count.
Looking at the numbers, it's difficult to say what they refer to. If they're like most ticket numbers today they're just arbitrary strings of numbers with no clear meaning. Maybe there's something to saving the prefixes though?
```
df['Ticket'].head(10)
df['Ticket'].value_counts()
```
Focusing on the idea that the prefixes might have meaning, let's split off that part of the string and print out only the rows that have those prefixes (which I determine by checking if the string split contains at least 2 elements and dropping the rest). It looks like 226 of 891 passengers had a ticket with a prefix. Of those, it looks like 60 passengers had a `PC` prefix, 27 a `C.A.` prefix, etc.
```
ticket_prefixes = df['Ticket'].str.split().apply(lambda x: x[0] if len(x) >= 2 else None)
ticket_prefixes.value_counts()
```
The prefix strings do look a little messy. There are a lot of prefixes that are probably the same (e.g. `A/5`, `A/5.`, `A./5.`, and `A.5.`). It may help to clean these up by stripping out the punctuation, and then comparing.
This looks somewhat better. It looks like we have 6 values that occur at least 10 times. What I think I'll do is this: Create new category values for the top 6 most occuring prefixes, and map the rest to a new `Other` value. This will give us a new column with 7 categories. As most rows won't have a prefix, most of the time the value will be `Other`. But enough rows *do* have another value that there should be enough information in this column to use. This is *not* to say it'll do any good with improving the model, but it's something to try and see.
```
ticket_prefixes = ticket_prefixes.str.replace('/', '')
ticket_prefixes = ticket_prefixes.str.replace('.', '')
ticket_prefixes.value_counts()
prefixes = ['PC', 'CA', 'A5', 'SOTONOQ', 'STONO', 'WC', 'Other']
df['Prefix'] = ticket_prefixes.apply(lambda x: x if x in prefixes else 'Other')
df['Prefix'].value_counts()
```
Let's now map these new `Prefix` values to integers and treat them as a proper category. Note by the value counts this feature is highly imbalanced, so this may be kind of a crap shoot with improving performance.
```
prefix_dict = {prefix:idx for (idx,prefix) in enumerate(prefixes)}
df['Prefix'] = df['Prefix'].apply(lambda x: prefix_dict[x])
df['Prefix'].value_counts()
df = df.drop(columns='Ticket')
df.head()
```
Next is `Cabin`, which like `Name` and `Ticket` appears to be a high-dimensional categorical column, but unlike those it has lots of missing values. A full 77% of the data is missing! How should you impute this one? Let's step back and see if we can extract a feature here.
Printing the first few values, it looks like the cabin value is formatted letter-number, sometimes having multiple such values per passenger. Let's split these up by letter and number and see if we can spot a pattern in each separately.
```
df['Cabin'].dropna().head(10)
df['Cabin'].value_counts(dropna=False)
```
Below I look at some useful statistics that might be worth looking into for creating a feature here:
- From the printouts above it looks like some passengers have multiple cabin numbers. How frequent is this? If this is a fairly common thing, maybe we should create a feature tracking this.
- How often does each letter prefix in the cabin numbers appear? If they occur with enough frequency, it may be worth tracking them as a feature.
- How often does each number suffix in the cabin numbers appear? Again, if they occur enough, may be worth tracking.
Looking at these one by one. First, it looks like there are only 24 rows with multiple cabin numbers. That's like 2% of the data. Probably not worth keeping track of? This may be something to come back to later in model improvement iterations.
Second, looking at the value counts of the cabin prefix letters, they seem to take on 8 possible values, with most occuring with enough frequency that it may be worth tracking this.
Last, looking at the value counts of the cabin suffix numbers, there seem to be a lot of these, with none occuring at least 10 time. This suggest to me that this isn't worth keeping track of.
```
# number of rows with multiple cabins
df['Cabin'].str.split().dropna().apply(lambda x: len(x) > 1).sum()
df['Cabin'].str.split().dropna().apply(lambda x: x[0][0]).value_counts()
df['Cabin'].str.split().dropna().apply(lambda x: x[0][1:] if x[0][1:] != '' else None).value_counts()
```
Let's do this. We'll create a `Letter` feature that extracts and stores the cabin letter for each passenger. We'll keep `A, B, C, D, E, F` and map the rest to `Other`.
Since there are so many missing values, we need to deal with these as well. Since we can't drop them, I'll do the next simplest thing for now: I'll create a new `Letter_missing` feature to track which values are missing, and then impute the missing values in `Letter` with the mode `C`. This *does* risk grossly overweighting the importance of `C` relative to other letters, but hopefully the missing column combined with a model capable of learning complex correlations will help deal with this.
Finally, I'll convert these into proper categorical features by mapping them to integers, and then drop the `Cabin` column.
```
# using x[0][0] to account for possibility of multiple cabin numbers in a row, selecting first one arbitrarily
df['Letter'] = df['Cabin'].fillna(value='?').str.split().apply(lambda x: x[0][0])
df['Letter'] = df['Letter'].replace('?', np.nan)
df['Letter'].value_counts()
letters = ['A', 'B', 'C', 'D', 'E', 'F', 'Other']
df['Letter'] = df['Letter'].apply(lambda x: x if x in letters or pd.isna(x) else 'Other')
df['Letter'].value_counts()
df['Letter_missing'] = df['Letter'].isna().astype(int)
df['Letter_missing'].value_counts()
# this whole cell was more painful to get to work than it should've been
mode_letter = df['Letter'].mode().item()
df['Letter'] = df['Letter'].replace(np.nan, mode_letter)
df['Letter'].value_counts()
letter_dict = {letter:idx for (idx,letter) in enumerate(letters)}
df['Letter'] = df['Letter'].apply(lambda x: letter_dict[x])
df['Letter'].value_counts()
df = df.drop(columns='Cabin')
df.head()
```
## Model Training
Now we're good to go. We've feature engineered all of our original input columns. We have an array of numerical data that we can do machine learning with.
Let's start by looking at what happens if we train a model with the new features, but don't do any one-hot encoding of the categorical features.
Looks like we're already doing pretty good. The F1 scores for both RF and GBM have pulled up a few percent. Can we do still better by one-hot encoding, or by balancing the data?
```
X = df.copy().drop(columns='Survived').values
y = df['Survived'].copy().values
X.shape, y.shape
X_train = X[idx_train]
y_train = y[idx_train]
X_test = X[idx_test]
y_test = y[idx_test]
rf_model_no_onehot = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=seed)
rf_model_no_onehot.fit(X_train, y_train)
yhat = rf_model_no_onehot.predict(X_test)
get_scores(y_test, yhat)
gbm_model_no_onehot = GradientBoostingClassifier(n_estimators=100, random_state=seed)
gbm_model_no_onehot.fit(X_train, y_train)
yhat = gbm_model_no_onehot.predict(X_test)
get_scores(y_test, yhat)
```
Let's now instead treat the categorical features as we "should", by one-hot encoding them first. Recall that one-hot encoding is a way of converting one feature of k categories `A1, A2, ..., Ak` into k features of 2 categories `is_A1, is_A2, ..., is_A3`. Essentially it creates a series of binary indicator that looks for whether a particular category is present, each being its own feature.
I'll only one-hot encode the features we'd already decided were categorical, and that contain more than 2 categories. Note that if a feature only contains 2 categories you don't need to one-hot encode it. Why?
```
df_one_hot = pd.get_dummies(df, columns=['Pclass', 'Embarked', 'Titles', 'Prefix', 'Letter'])
df_one_hot.head()
X = df_one_hot.copy().drop(columns='Survived').values
y = df_one_hot['Survived'].copy().values
X.shape, y.shape
X_train = X[idx_train]
y_train = y[idx_train]
X_test = X[idx_test]
y_test = y[idx_test]
```
Training an RF and GBM now, it looks like performance is slightly better or about the same as without the one-hot encodings. Let's see if balancing the training data will help.
```
rf_model_unbal = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=seed)
rf_model_unbal.fit(X_train, y_train)
yhat = rf_model_unbal.predict(X_test)
get_scores(y_test, yhat)
gbm_model_unbal = GradientBoostingClassifier(n_estimators=100, random_state=seed)
gbm_model_unbal.fit(X_train, y_train)
yhat = gbm_model_unbal.predict(X_test)
get_scores(y_test, yhat)
```
Recall from a prior tutorial that we can do random upsampling of the training data easily using the imblearn `RandomOverSampler` class. This will upsample the training data until the labels are balanced by copying values with minority labels until they're balanced with the majority label.
Training models in this data, it looks like things actually got a big worse for both. Maybe some tuning on the models will help, but other than that it doesn't seem that helpful.
```
X_train_bal,y_train_bal = RandomOverSampler(random_state=seed).fit_resample(X_train, y_train)
rf_model_bal = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=seed)
rf_model_bal.fit(X_train_bal, y_train_bal)
yhat = rf_model_bal.predict(X_test)
get_scores(y_test, yhat)
gbm_model_bal = GradientBoostingClassifier(n_estimators=100, random_state=seed)
gbm_model_bal.fit(X_train_bal, y_train_bal)
yhat = gbm_model_bal.predict(X_test)
get_scores(y_test, yhat)
```
## Final Thoughts
So what do we do now?
|(seed=123)|accuracy|precision|recall|F1|AUC|
|---|---|---|---|---|---|
|rf baseline|0.8547|**0.8545**|0.7231|0.7833|0.8265|
|gbm baseline|0.8380|0.7727|0.7846|0.7786|0.8265|
|rf no onehot|0.8492|0.7794|0.8154|0.7970|0.8419|
|gbm no onehot|**0.8659**|0.8154|0.8154|**0.8154**|**0.8551**|
|rf unbal, onehot|0.8492|0.7714|0.8308|0.8000|0.8452|
|gbm unbal, onehot|**0.8659**|0.8154|0.8154|**0.8154**|**0.8551**|
|rf bal, onehot|0.8324|0.7397|0.8308|0.7826|0.8321|
|gbm bal, onehot|0.8547|0.7746|**0.8462**|0.8088|0.8529|
- Issues changing the seed strongly affecting results (lack of data). Could solve with cross validation?
- Danger of overfitting to this particular test set. Really want a model that generalizes to unseen data well.
- Which metric to use? Often dangerous to focus on only one and optimize it, as weird edge cases can happen if you ignore others.
- We're doing about as well as we can expect with this data. Even Kaggle [discussions](https://www.kaggle.com/c/titanic/discussion) consider 77-85% good scores here. Not worth more effort?
- Think about the use case. What are you using this model for? How good does it have to be? What value does it provide? Don't just mindlessly fall into optimizing it. Real life isn't a Kaggle competition.
- Selecting the best model isn't about optimizing a metric, but finding best overall fit. Which one is "good enough", in the sense that it's accurate enough, fast enough, easy to implement and maintain, (where necessary) easy to interpret, etc.
- Possible improvements: Tune the hyperparameters of the above models more. Use cross validation for stable metric estimates. Use other models. Use more advanced resampling techniques like SMOTE/ADASYN. Take the unlabeled "test" set from Kaggle, label it with your best model, and use that as new training data on top of what you've already got. Try more advanced categorical encodings like learned embeddings. Better yet, turn all your features into categorical features by thresholding them.
| github_jupyter |
```
from __future__ import division
import pickle
import os
import sys
import copy
import random
import types
from queue import Queue
import numpy as np
import gym
from gym import spaces
from rllab.policies.categorical_gru_policy import CategoricalGRUPolicy
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.algos.trpo import TRPO
from rllab.misc.overrides import overrides
from matplotlib import pyplot as plt
%matplotlib inline
import matplotlib as mpl
mpl.rc('savefig', dpi=300)
mpl.rc('text', usetex=True)
data_dir = os.path.join('.', 'data')
def sample_const_delay(d):
return (lambda: d)
class StudentEnv(gym.Env):
def __init__(self, n_items=10, n_steps=100, discount=1., sample_delay=None, reward_func='likelihood'):
if sample_delay is None:
self.sample_delay = sample_const_delay(1)
else:
self.sample_delay = sample_delay
self.curr_step = None
self.n_steps = n_steps
self.n_items = n_items
self.now = 0
self.curr_item = 0
self.curr_outcome = None
self.curr_delay = None
self.discount = discount
self.reward_func = reward_func
self.action_space = spaces.Discrete(n_items)
self.observation_space = spaces.Box(np.zeros(4), np.array([n_items-1, 1, sys.maxsize, sys.maxsize]))
def _recall_likelihoods(self):
raise NotImplementedError
def _recall_log_likelihoods(self, eps=1e-9):
return np.log(eps + self._recall_likelihoods())
def _update_model(self, item, outcome, timestamp, delay):
raise NotImplementedError
def _obs(self):
timestamp = self.now - self.curr_delay
return np.array([self.curr_item, self.curr_outcome, timestamp, self.curr_delay], dtype=int)
def _rew(self):
if self.reward_func == 'likelihood':
return self._recall_likelihoods().mean()
elif self.reward_func == 'log_likelihood':
return self._recall_log_likelihoods().mean()
else:
raise ValueError
def _step(self, action):
if self.curr_step is None or self.curr_step >= self.n_steps:
raise ValueError
if action < 0 or action >= self.n_items:
raise ValueError
self.curr_item = action
self.curr_outcome = 1 if np.random.random() < self._recall_likelihoods()[action] else 0
self.curr_step += 1
self.curr_delay = self.sample_delay()
self.now += self.curr_delay
self._update_model(self.curr_item, self.curr_outcome, self.now, self.curr_delay)
obs = self._obs()
r = self._rew()
done = self.curr_step == self.n_steps
info = {}
return obs, r, done, info
def _reset(self):
self.curr_step = 0
self.now = 0
return self._step(np.random.choice(range(self.n_items)))[0]
item_difficulty_mean = 1
item_difficulty_std = 1
log_item_decay_exp_mean = 1
log_item_decay_exp_std = 1
log_delay_coef_mean = 0
log_delay_coef_std = 0.01
def sample_item_difficulties(n_items):
return np.random.normal(item_difficulty_mean, item_difficulty_std, n_items)
def sample_student_ability():
return 0
def sample_window_cw(n_windows):
x = 1 / (np.arange(1, n_windows+1, 1))**2
return x[::-1]
def sample_window_nw(n_windows):
x = 1 / (np.arange(1, n_windows+1, 1))**2
return x[::-1]
def sample_item_decay_exps(n_items):
return np.exp(np.random.normal(log_item_decay_exp_mean, log_item_decay_exp_std, n_items))
def sample_student_decay_exp():
return 0
def sample_delay_coef():
return np.exp(np.random.normal(log_delay_coef_mean, log_delay_coef_std))
class DASHEnv(StudentEnv):
def __init__(
self, n_windows=5, item_difficulties=None, student_ability=None,
window_cw=None, window_nw=None, item_decay_exps=None, student_decay_exp=None,
delay_coef=None, **kwargs):
super(DASHEnv, self).__init__(**kwargs)
if item_difficulties is None:
self.item_difficulties = sample_item_difficulties(self.n_items)
else:
if len(item_difficulties) != self.n_items:
raise ValueError
self.item_difficulties = item_difficulties
if student_ability is None:
self.student_ability = sample_student_ability()
else:
self.student_ability = student_ability
if item_decay_exps is None:
self.item_decay_exps = sample_item_decay_exps(self.n_items)
else:
if len(item_decay_exps) != self.n_items:
raise ValueError
self.item_decay_exps = item_decay_exps
if student_decay_exp is None:
self.student_decay_exp = sample_student_decay_exp()
else:
self.student_decay_exp = student_decay_exp
if delay_coef is None:
self.delay_coef = sample_delay_coef()
else:
self.delay_coef = delay_coef
if self.n_steps % n_windows != 0:
raise ValueError
self.n_windows = n_windows
self.window_size = self.n_steps // self.n_windows
self.n_correct = None
self.n_attempts = None
if window_cw is None:
window_cw = sample_window_cw(self.n_windows)
if window_nw is None:
window_nw = sample_window_nw(self.n_windows)
if len(window_cw) != n_windows or len(window_nw) != n_windows:
raise ValueError
self.window_cw = np.tile(window_cw, self.n_items).reshape((self.n_items, self.n_windows))
self.window_nw = np.tile(window_nw, self.n_items).reshape((self.n_items, self.n_windows))
self.init_tlasts = np.exp(np.random.normal(0, 0.01, self.n_items))
self._init_params()
def _init_params(self):
self.n_correct = np.zeros((self.n_items, self.n_windows))
self.n_attempts = np.zeros((self.n_items, self.n_windows))
#self.tlasts = np.ones(self.n_items) * -sys.maxsize
self.tlasts = copy.deepcopy(self.init_tlasts)
def _current_window(self):
return min(self.n_windows - 1, self.curr_step // self.window_size)
def _recall_likelihoods(self):
curr_window = self._current_window()
study_histories = (self.window_cw[:, :curr_window]*np.log(
1 + self.n_correct[:, :curr_window]) + self.window_nw[:, :curr_window]*np.log(
1 + self.n_attempts[:, :curr_window])).sum(axis=1)
m = 1 / (1 + np.exp(-(self.student_ability - self.item_difficulties + study_histories)))
f = np.exp(self.student_decay_exp - self.item_decay_exps)
delays = self.now - self.tlasts
return m / (1 + self.delay_coef * delays)**f
def _update_model(self, item, outcome, timestamp, delay):
curr_window = self._current_window()
if outcome == 1:
self.n_correct[item, curr_window] += 1
self.n_attempts[item, curr_window] += 1
self.tlasts[item] = timestamp
def _reset(self):
self._init_params()
return super(DASHEnv, self)._reset()
def sample_item_decay_rates(n_items):
return np.exp(np.random.normal(np.log(0.077), 1, n_items))
class EFCEnv(StudentEnv):
'''exponential forgetting curve'''
def __init__(self, item_decay_rates=None, **kwargs):
super(EFCEnv, self).__init__(**kwargs)
if item_decay_rates is None:
self.item_decay_rates = sample_item_decay_rates(self.n_items)
else:
self.item_decay_rates = item_decay_rates
self.tlasts = None
self.strengths = None
self.init_tlasts = np.exp(np.random.normal(0, 1, self.n_items))
self._init_params()
def _init_params(self):
#self.tlasts = np.ones(self.n_items) * -sys.maxsize
self.tlasts = copy.deepcopy(self.init_tlasts)
self.strengths = np.ones(self.n_items)
def _recall_likelihoods(self):
return np.exp(-self.item_decay_rates*(self.now - self.tlasts)/self.strengths)
def _update_model(self, item, outcome, timestamp, delay):
#self.strengths[item] = max(1, self.strengths[item] + 2 * outcome - 1) # fictional Leitner system
self.strengths[item] += 1 # num attempts
self.tlasts[item] = timestamp
def _reset(self):
self._init_params()
return super(EFCEnv, self)._reset()
def sample_loglinear_coeffs(n_items):
coeffs = np.array([1, 1, 0])
coeffs = np.concatenate((coeffs, np.random.normal(0, 1, n_items)))
return coeffs
class HLREnv(StudentEnv):
'''exponential forgetting curve with log-linear memory strength'''
def __init__(self, loglinear_coeffs=None, **kwargs):
super(HLREnv, self).__init__(**kwargs)
if loglinear_coeffs is None:
self.loglinear_coeffs = sample_loglinear_coeffs(self.n_items)
else:
self.loglinear_coeffs = loglinear_coeffs
assert self.loglinear_coeffs.size == 3 + self.n_items
self.tlasts = None
self.loglinear_feats = None
self.init_tlasts = np.exp(np.random.normal(0, 1, self.n_items))
self._init_params()
def _init_params(self):
#self.tlasts = np.ones(self.n_items) * -sys.maxsize
self.tlasts = copy.deepcopy(self.init_tlasts)
self.loglinear_feats = np.zeros((self.n_items, 3)) # n_attempts, n_correct, n_incorrect
self.loglinear_feats = np.concatenate((self.loglinear_feats, np.eye(self.n_items)), axis=1)
def _strengths(self):
return np.exp(np.einsum('j,ij->i', self.loglinear_coeffs, self.loglinear_feats))
def _recall_likelihoods(self):
return np.exp(-(self.now - self.tlasts)/self._strengths())
def _update_model(self, item, outcome, timestamp, delay):
self.loglinear_feats[item, 0] += 1
self.loglinear_feats[item, 1 if outcome == 1 else 2] += 1
self.tlasts[item] = timestamp
def _reset(self):
self._init_params()
return super(HLREnv, self)._reset()
normalize = lambda x: x / x.sum()
class Tutor(object):
def __init__(self):
pass
def _next_item(self):
raise NotImplementedError
def _update(self, item, outcome, timestamp, delay):
raise NotImplementedError
def act(self, obs):
self._update(*list(obs))
return self._next_item()
def learn(self, r):
pass
def train(self, env, n_eps=10):
return run_eps(self, env, n_eps=n_eps)
def reset(self):
raise NotImplementedError
class RandTutor(Tutor):
'''sample item uniformly at random'''
def __init__(self, n_items, init_timestamp=0):
self.n_items = n_items
def _next_item(self):
return np.random.choice(range(self.n_items))
def _update(self, item, outcome, timestamp, delay):
pass
def reset(self):
pass
class LeitnerTutor(Tutor):
'''sample item from an infinite leitner queue network'''
def __init__(self, n_items, init_timestamp=0, arrival_prob=0.1):
self.arrival_prob = arrival_prob
self.n_items = n_items
self.queues = None
self.curr_q = None
self.curr_item = None
self.just_reset = False
self.reset()
def _next_item(self):
if self.curr_item is not None:
raise ValueError
n_queues = len(self.queues)
q_sampling_rates = 1 / np.sqrt(np.arange(1, n_queues, 1))
q_sampling_rates = np.array([x if not self.queues[i+1].empty() else 0 for i, x in enumerate(q_sampling_rates)])
arrival_prob = self.arrival_prob if not self.queues[0].empty() else 0
q_sampling_rates = np.concatenate((np.array([arrival_prob]), normalize(q_sampling_rates) * (1 - arrival_prob)))
p = normalize(q_sampling_rates)
if self.queues[0].qsize() == self.n_items: # no items have been shown yet
self.curr_q = 0
else:
self.curr_q = np.random.choice(range(n_queues), p=p)
self.curr_item = self.queues[self.curr_q].get(False)
return self.curr_item
def _update(self, item, outcome, timestamp, delay):
if not self.just_reset and (self.curr_item is None or item != self.curr_item):
raise ValueError
if self.just_reset:
for i in range(self.n_items):
if i != item:
self.queues[0].put(i)
next_q = max(1, self.curr_q + 2 * int(outcome) - 1)
if next_q == len(self.queues):
self.queues.append(Queue())
self.queues[next_q].put(item)
self.curr_item = None
self.curr_q = None
self.just_reset = False
def reset(self):
self.queues = [Queue()]
self.curr_item = None
self.curr_q = 0
self.just_reset = True
def train(self, gym_env, n_eps=10):
arrival_probs = np.arange(0, 1, 0.01)
n_eps_per_aprob = n_eps // arrival_probs.size
assert n_eps_per_aprob > 0
best_reward = None
best_aprob = None
for aprob in arrival_probs:
self.arrival_prob = aprob
reward = np.mean(run_eps(self, env, n_eps=n_eps_per_aprob))
if best_reward is None or reward > best_reward:
best_aprob = aprob
best_reward = reward
self.arrival_prob = best_aprob
return run_eps(self, env, n_eps=n_eps)
class ThresholdTutor(Tutor):
'''review item with recall likelihood closest to some threshold'''
def __init__(self, n_items, env, init_timestamp=0):
self.n_items = n_items
self.threshold = None
self.env = copy.deepcopy(env)
self.env._reset()
def _next_item(self):
return np.argmin(np.abs(self.env._recall_likelihoods() - self.threshold))
def _update(self, item, outcome, timestamp, delay):
self.env._update_model(item, outcome, timestamp, delay)
self.env.curr_step += 1
self.env.now += delay
def reset(self):
self.env._reset()
def train(self, env, n_eps=10):
thresholds = np.arange(0, 1, 0.01)
n_eps_per_thresh = n_eps // thresholds.size
assert n_eps_per_thresh > 0
best_reward = None
best_thresh = None
for thresh in thresholds:
self.threshold = thresh
reward = np.mean(run_eps(self, env, n_eps=n_eps_per_thresh))
if best_reward is None or reward > best_reward:
best_thresh = thresh
best_reward = reward
self.threshold = best_thresh
return run_eps(self, env, n_eps=n_eps)
def make_rl_student_env(env):
env = copy.deepcopy(env)
env.n_item_feats = int(np.log(2 * env.n_items))
env.item_feats = np.random.normal(
np.zeros(2*env.n_items*env.n_item_feats),
np.ones(2*env.n_items*env.n_item_feats)).reshape((2*env.n_items, env.n_item_feats))
env.observation_space = spaces.Box(
np.concatenate((np.ones(env.n_item_feats) * -sys.maxsize, np.zeros(3))),
np.concatenate((np.ones(env.n_item_feats + 2) * sys.maxsize, np.ones(1)))
)
def encode_item(self, item, outcome):
return self.item_feats[self.n_items*outcome+item, :]
def encode_delay(self, delay, outcome):
v = np.zeros(2)
v[outcome] = np.log(1+delay)
return v
def vectorize_obs(self, item, outcome, delay):
return np.concatenate((self.encode_item(item, outcome), self.encode_delay(delay, outcome), np.array([outcome])))
env._obs_orig = env._obs
def _obs(self):
item, outcome, timestamp, delay = env._obs_orig()
return self.vectorize_obs(item, outcome, delay)
env.encode_item = types.MethodType(encode_item, env)
env.encode_delay = types.MethodType(encode_delay, env)
env.vectorize_obs = types.MethodType(vectorize_obs, env)
env._obs = types.MethodType(_obs, env)
return env
from rllab.envs.gym_env import *
class MyGymEnv(GymEnv):
def __init__(self, env, record_video=False, video_schedule=None, log_dir=None, record_log=False,
force_reset=False):
if log_dir is None:
if logger.get_snapshot_dir() is None:
logger.log("Warning: skipping Gym environment monitoring since snapshot_dir not configured.")
else:
log_dir = os.path.join(logger.get_snapshot_dir(), "gym_log")
Serializable.quick_init(self, locals())
self.env = env
self.env_id = ''
assert not (not record_log and record_video)
if log_dir is None or record_log is False:
self.monitoring = False
else:
if not record_video:
video_schedule = NoVideoSchedule()
else:
if video_schedule is None:
video_schedule = CappedCubicVideoSchedule()
self.env = gym.wrappers.Monitor(self.env, log_dir, video_callable=video_schedule, force=True)
self.monitoring = True
self._observation_space = convert_gym_space(env.observation_space)
logger.log("observation space: {}".format(self._observation_space))
self._action_space = convert_gym_space(env.action_space)
logger.log("action space: {}".format(self._action_space))
self._horizon = self.env.n_steps
self._log_dir = log_dir
self._force_reset = force_reset
class DummyTutor(Tutor):
def __init__(self, policy):
self.policy = policy
def act(self, obs):
return self.policy(obs)
def reset(self):
pass
class LoggedTRPO(TRPO):
def __init__(self, *args, **kwargs):
super(LoggedTRPO, self).__init__(*args, **kwargs)
self.rew_chkpts = []
@overrides
def train(self):
self.start_worker()
self.init_opt()
for itr in range(self.current_itr, self.n_itr):
paths = self.sampler.obtain_samples(itr)
samples_data = self.sampler.process_samples(itr, paths)
self.optimize_policy(itr, samples_data)
my_policy = lambda obs: self.policy.get_action(obs)[0]
r, _ = run_ep(DummyTutor(my_policy), self.env)
self.rew_chkpts.append(r)
print(self.rew_chkpts[-1])
self.shutdown_worker()
class RLTutor(Tutor):
def __init__(self, n_items, init_timestamp=0):
self.raw_policy = None
self.curr_obs = None
def train(self, gym_env, n_eps=10):
env = MyGymEnv(gym_env)
policy = CategoricalGRUPolicy(
env_spec=env.spec, hidden_dim=32,
state_include_action=False)
self.raw_policy = LoggedTRPO(
env=env,
policy=policy,
baseline=LinearFeatureBaseline(env_spec=env.spec),
batch_size=4000,
max_path_length=env.env.n_steps,
n_itr=n_eps,
discount=0.99,
step_size=0.01,
verbose=False
)
self.raw_policy.train()
return self.raw_policy.rew_chkpts
def reset(self):
self.curr_obs = None
self.raw_policy.reset()
def _next_item(self):
if self.curr_obs is None:
raise ValueError
return self.raw_policy.get_action(self.curr_obs)[0]
def _update(self, obs):
self.curr_obs = self.vectorize_obs(obs)
def act(self, obs):
self._update(obs)
return self._next_item()
def reset(self):
self.raw_policy.reset()
#
# based on SM2_mnemosyne.py <Peter.Bienstman@UGent.be>
#
HOUR = 60 * 60 # Seconds in an hour.
DAY = 24 * HOUR # Seconds in a day.
class Card(object):
def __init__(self, _id):
self.grade = 0
self.next_rep = 0
self.last_rep = 0
self.easiness = 0
self.acq_reps = 0
self.acq_reps_since_lapse = 0
self.ret_reps = 0
self.ret_reps_since_lapse = 0
self.lapses = 0
self._id = _id
class SuperMnemoTutor(Tutor):
"""Scheduler based on http://www.supermemo.com/english/ol/sm2.htm.
Note that all intervals are in seconds, since time is stored as
integer POSIX timestamps.
Since the scheduling granularity is days, all cards due on the same time
should become due at the same time. In order to keep the SQL query
efficient, we do this by setting 'next_rep' the same for all cards that
are due on the same day.
In order to allow for the fact that the timezone and 'day_starts_at' can
change after scheduling a card, we store 'next_rep' as midnight UTC, and
bring local time and 'day_starts_at' only into play when querying the
database.
"""
def __init__(self, n_items, init_timestamp=0, non_memorised_cards_in_hand=10, fail_grade=0, pass_grade=2):
self.non_memorised_cards_in_hand = non_memorised_cards_in_hand
self.fail_grade = fail_grade
self.pass_grade = pass_grade
self.state = 1
self.curr_step = 0
self.card_of_id = [Card(i) for i in range(n_items)]
self.curr_item = None
self.now = init_timestamp
self.unseen = set(range(n_items))
self._card_ids_memorised = []
self.reset()
self.n_items = n_items
def true_scheduled_interval(self, card):
"""Since 'next_rep' is always midnight UTC for retention reps, we need
to take timezone and 'day_starts_at' into account to calculate the
true scheduled interval when we are doing the actual repetition.
This basically undoes the operations from 'adjusted_now'.
Note that during the transition between different timezones, this is
not well-defined, but the influence on the scheduler will be minor
anyhow.
"""
interval = card.next_rep - card.last_rep
if card.grade < 2:
return interval
interval += HOUR
return int(interval)
def reset(self, new_only=False):
"""'_card_ids_in_queue' contains the _ids of the cards making up the
queue.
The corresponding fact._ids are also stored in '_fact_ids_in_queue',
which is needed to make sure that no sister cards can be together in
the queue at any time.
'_fact_ids_memorised' has a different function and persists over the
different stages invocations of 'rebuild_queue'. It can be used to
control whether or not memorising a card will prevent a sister card
from being pulled out of the 'unseen' pile, even after the queue has
been rebuilt.
'_card_id_last' is stored to avoid showing the same card twice in a
row.
'stage' stores the stage of the queue building, and is used to skip
over unnecessary queries.
"""
self._card_ids_in_queue = []
self._card_id_last = None
self.new_only = new_only
if self.new_only == False:
self.stage = 1
else:
self.stage = 3
def set_initial_grade(self, cards, grade):
"""Sets the initial grades for a set of sister cards, making sure
their next repetitions do no fall on the same day.
Note that even if the initial grading happens when adding a card, it
is seen as a repetition.
"""
new_interval = self.calculate_initial_interval(grade)
new_interval += self.calculate_interval_noise(new_interval)
last_rep = self.now
next_rep = last_rep + new_interval
for card in cards:
card.grade = grade
card.easiness = 2.5
card.acq_reps = 1
card.acq_reps_since_lapse = 1
card.last_rep = last_rep
card.next_rep = next_rep
next_rep += DAY
def calculate_initial_interval(self, grade):
"""The first repetition is treated specially, and gives longer
intervals, to allow for the fact that the user may have seen this
card before.
"""
return (0, 0, 1*DAY, 3*DAY, 4*DAY, 7*DAY) [grade]
def calculate_interval_noise(self, interval):
if interval == 0:
noise = 0
elif interval <= 10 * DAY:
noise = random.choice([0, DAY])
elif interval <= 60 * DAY:
noise = random.uniform(-3 * DAY, 3 * DAY)
else:
noise = random.uniform(-0.05 * interval, 0.05 * interval)
return int(noise)
def cards_due_for_ret_rep(self):
return sorted(
range(self.n_items),
key=lambda i: self.card_of_id[i].next_rep - self.card_of_id[i].last_rep,
reverse=True)
def cards_to_relearn(self, grade=0):
# TODO: only return cards incorrectly answered in stage 1
return [i for i in range(self.n_items) if self.card_of_id[i].grade == grade and i not in self.unseen]
def cards_new_memorising(self, grade=0):
return [i for i in range(self.n_items) if self.card_of_id[i].grade == grade and i not in self.unseen]
def cards_unseen(self, limit=50):
return random.sample(self.unseen, limit) if limit < len(self.unseen) else self.unseen
def card(self, card_id):
return self.card_of_id[card_id]
def interval_multiplication_factor(self, *args):
return 1
def rebuild_queue(self, learn_ahead=False):
self._card_ids_in_queue = []
# Stage 1
#
# Do the cards that are scheduled for today (or are overdue), but
# first do those that have the shortest interval, as being a day
# late on an interval of 2 could be much worse than being a day late
# on an interval of 50.
# Fetch maximum 50 cards at the same time, as a trade-off between
# memory usage and redoing the query.
if self.stage == 1:
for _card_id in self.cards_due_for_ret_rep():
self._card_ids_in_queue.append(_card_id)
if len(self._card_ids_in_queue):
return
self.stage = 2
# Stage 2
#
# Now rememorise the cards that we got wrong during the last stage.
# Concentrate on only a limited number of non memorised cards, in
# order to avoid too long intervals between repetitions.
limit = self.non_memorised_cards_in_hand
non_memorised_in_queue = 0
if self.stage == 2:
for _card_id in self.cards_to_relearn(grade=1):
if _card_id not in self._card_ids_in_queue:
if non_memorised_in_queue < limit:
self._card_ids_in_queue.append(_card_id)
non_memorised_in_queue += 1
if non_memorised_in_queue >= limit:
break
for _card_id in self.cards_to_relearn(grade=0):
if _card_id not in self._card_ids_in_queue:
if non_memorised_in_queue < limit:
self._card_ids_in_queue.append(_card_id)
self._card_ids_in_queue.append(_card_id)
non_memorised_in_queue += 1
if non_memorised_in_queue >= limit:
break
random.shuffle(self._card_ids_in_queue)
# Only stop when we reach the non memorised limit. Otherwise, keep
# going to add some extra cards to get more spread.
if non_memorised_in_queue >= limit:
return
# If the queue is empty, we can skip stage 2 in the future.
if len(self._card_ids_in_queue) == 0:
self.stage = 3
# Stage 3
#
# Now do the cards which have never been committed to long-term
# memory, but which we have seen before.
# Use <= in the stage check, such that earlier stages can use
# cards from this stage to increase the hand.
if self.stage <= 3:
for _card_id in self.cards_new_memorising(grade=1):
if _card_id not in self._card_ids_in_queue:
if non_memorised_in_queue < limit:
self._card_ids_in_queue.append(_card_id)
non_memorised_in_queue += 1
if non_memorised_in_queue >= limit:
break
for _card_id in self.cards_new_memorising(grade=0):
if _card_id not in self._card_ids_in_queue:
if non_memorised_in_queue < limit:
self._card_ids_in_queue.append(_card_id)
self._card_ids_in_queue.append(_card_id)
non_memorised_in_queue += 1
if non_memorised_in_queue >= limit:
break
random.shuffle(self._card_ids_in_queue)
# Only stop when we reach the grade 0 limit. Otherwise, keep
# going to add some extra cards to get more spread.
if non_memorised_in_queue >= limit:
return
# If the queue is empty, we can skip stage 3 in the future.
if len(self._card_ids_in_queue) == 0:
self.stage = 4
# Stage 4
#
# Now add some cards we have yet to see for the first time.
# Use <= in the stage check, such that earlier stages can use
# cards from this stage to increase the hand.
if self.stage <= 4:
# Preferentially keep away from sister cards for as long as
# possible.
for _card_id in self.cards_unseen(limit=min(limit, 50)):
if _card_id not in self._card_ids_in_queue \
and _card_id not in self._card_ids_memorised:
self._card_ids_in_queue.append(_card_id)
non_memorised_in_queue += 1
if non_memorised_in_queue >= limit:
if self.new_only == False:
self.stage = 2
else:
self.stage = 3
return
# If the queue is close to empty, start pulling in sister cards.
if len(self._card_ids_in_queue) <= 2:
for _card_id in self.cards_unseen(limit=min(limit, 50)):
if _card_id not in self._card_ids_in_queue:
self._card_ids_in_queue.append(_card_id)
non_memorised_in_queue += 1
if non_memorised_in_queue >= limit:
if self.new_only == False:
self.stage = 2
else:
self.stage = 3
return
# If the queue is still empty, go to learn ahead of schedule.
if len(self._card_ids_in_queue) == 0:
self.stage = 5
# Stage 5
#
# If we get to here, there are no more scheduled cards or new cards
# to learn. The user can signal that he wants to learn ahead by
# calling rebuild_queue with 'learn_ahead' set to True.
# Don't shuffle this queue, as it's more useful to review the
# earliest scheduled cards first. We only put 50 cards at the same
# time into the queue, in order to save memory.
if self.new_only == False:
self.stage = 2
else:
self.stage = 3
def next_card(self, learn_ahead=False):
# Populate queue if it is empty, and pop first card from the queue.
if len(self._card_ids_in_queue) == 0:
self.rebuild_queue(learn_ahead)
if len(self._card_ids_in_queue) == 0:
return None
_card_id = self._card_ids_in_queue.pop(0)
# Make sure we don't show the same card twice in succession.
if self._card_id_last:
while _card_id == self._card_id_last:
# Make sure we have enough cards to vary, but exit in hopeless
# situations.
if len(self._card_ids_in_queue) == 0:
self.rebuild_queue(learn_ahead)
if len(self._card_ids_in_queue) == 0:
return None
if set(self._card_ids_in_queue) == set([_card_id]):
return db.card(_card_id, is_id_internal=True)
_card_id = self._card_ids_in_queue.pop(0)
self._card_id_last = _card_id
return self.card(_card_id)
def _next_item(self):
if self.curr_item is not None:
raise ValueError
card = self.next_card()
if card is None:
raise ValueError
self.curr_item = card._id
return self.curr_item
def grade_answer(self, card, new_grade, dry_run=False):
# When doing a dry run, make a copy to operate on. This leaves the
# original in the GUI intact.
if dry_run:
card = copy.copy(card)
# Determine whether we learned on time or not (only relevant for
# grades 2 or higher).
if self.now - DAY >= card.next_rep: # Already due yesterday.
timing = "LATE"
else:
if self.now < card.next_rep: # Not due today.
timing = "EARLY"
else:
timing = "ON TIME"
# Calculate the previously scheduled interval, i.e. the interval that
# led up to this repetition.
scheduled_interval = self.true_scheduled_interval(card)
# If we memorise a card, keep track of its fact, so that we can avoid
# pulling a sister card from the 'unseen' pile.
if not dry_run and card.grade < 2 and new_grade >= 2:
self._card_ids_memorised.append(card._id)
if card.grade == -1: # Unseen card.
actual_interval = 0
else:
actual_interval = self.now - card.last_rep
if card.grade == -1:
# The card has not yet been given its initial grade.
card.easiness = 2.5
card.acq_reps = 1
card.acq_reps_since_lapse = 1
new_interval = self.calculate_initial_interval(new_grade)
elif card.grade in [0, 1] and new_grade in [0, 1]:
# In the acquisition phase and staying there.
card.acq_reps += 1
card.acq_reps_since_lapse += 1
new_interval = 0
elif card.grade in [0, 1] and new_grade in [2, 3, 4, 5]:
# In the acquisition phase and moving to the retention phase.
card.acq_reps += 1
card.acq_reps_since_lapse += 1
if new_grade == 2:
new_interval = DAY
elif new_grade == 3:
new_interval = random.choice([1, 1, 2]) * DAY
elif new_grade == 4:
new_interval = random.choice([1, 2, 2]) * DAY
elif new_grade == 5:
new_interval = 2 * DAY
# Make sure the second copy of a grade 0 card doesn't show
# up again.
if not dry_run and card.grade == 0:
if card._id in self._card_ids_in_queue:
self._card_ids_in_queue.remove(card._id)
elif card.grade in [2, 3, 4, 5] and new_grade in [0, 1]:
# In the retention phase and dropping back to the
# acquisition phase.
card.ret_reps += 1
card.lapses += 1
card.acq_reps_since_lapse = 0
card.ret_reps_since_lapse = 0
new_interval = 0
elif card.grade in [2, 3, 4, 5] and new_grade in [2, 3, 4, 5]:
# In the retention phase and staying there.
card.ret_reps += 1
card.ret_reps_since_lapse += 1
# Don't update the easiness when learning ahead.
if timing in ["LATE", "ON TIME"]:
if new_grade == 2:
card.easiness -= 0.16
if new_grade == 3:
card.easiness -= 0.14
if new_grade == 5:
card.easiness += 0.10
if card.easiness < 1.3:
card.easiness = 1.3
if card.ret_reps_since_lapse == 1:
new_interval = 6 * DAY
else:
if new_grade == 2 or new_grade == 3:
if timing in ["ON TIME", "EARLY"]:
new_interval = actual_interval * card.easiness
else:
# Learning late and interval was too long, so don't
# increase the interval and use scheduled_interval
# again as opposed to the much larger
# actual_interval * card.easiness.
new_interval = scheduled_interval
if new_grade == 4:
new_interval = actual_interval * card.easiness
if new_grade == 5:
if timing in ["EARLY"]:
# Learning ahead and interval was too short. To avoid
# that the intervals increase explosively when learning
# ahead, take scheduled_interval as opposed to the
# much larger actual_interval * card.easiness.
new_interval = scheduled_interval
else:
new_interval = actual_interval * card.easiness
# Pathological case which can occur when learning ahead a card
# in a single card database many times on the same day, such
# that actual_interval becomes 0.
if new_interval < DAY:
new_interval = DAY
# Allow plugins to modify new_interval by multiplying it.
new_interval *= self.interval_multiplication_factor(card, new_interval)
new_interval = int(new_interval)
# When doing a dry run, stop here and return the scheduled interval.
if dry_run:
return new_interval
# Add some randomness to interval.
new_interval += self.calculate_interval_noise(new_interval)
# Update card properties. 'last_rep' is the time the card was graded,
# not when it was shown.
card.grade = new_grade
card.last_rep = self.now
if new_grade >= 2:
card.next_rep = card.last_rep + new_interval
else:
card.next_rep = card.last_rep
return new_interval
def _update(self, item, outcome, timestamp, delay):
if self.curr_step > 0 and (self.curr_item is None or item != self.curr_item):
raise ValueError
self.now = timestamp
try:
self.unseen.remove(item)
except KeyError:
pass
self.grade_answer(self.card(item), (self.fail_grade, self.pass_grade)[int(outcome)])
self.curr_item = None
def run_ep(agent, env):
agent.reset()
obs = env.reset()
done = False
totalr = []
observations = []
while not done:
action = agent.act(obs)
obs, r, done, _ = env.step(action)
agent.learn(r)
totalr.append(r)
observations.append(obs)
return np.mean(totalr), observations
def run_eps(agent, env, n_eps=100):
tot_rew = []
for i in range(n_eps):
totalr, _ = run_ep(agent, env)
tot_rew.append(totalr)
return tot_rew
n_steps = 200
n_items = 30
const_delay = 5
discount = 0.99
n_reps = 10
n_eps = 100
env_kwargs = {
'n_items': n_items, 'n_steps': n_steps, 'discount': discount,
'sample_delay': sample_const_delay(const_delay)
}
reward_funcs = ['likelihood', 'log_likelihood']
envs = [
('EFC', EFCEnv),
('HLR', HLREnv),
('DASH', DASHEnv)
]
tutor_builders = [
('Random', RandTutor),
('Leitner', LeitnerTutor),
('SuperMnemo', SuperMnemoTutor),
('Threshold', ThresholdTutor),
('RL', RLTutor)
]
R = np.zeros((len(envs) * len(reward_funcs), len(tutor_builders), n_eps, n_reps))
for h, (base_env_name, base_env) in enumerate(envs):
for m, reward_func in enumerate(reward_funcs):
k = h*len(reward_funcs)+m
env_name = base_env_name + '-' + ('L' if reward_func == 'likelihood' else 'LL')
for j in range(n_reps):
env = base_env(**env_kwargs, reward_func=reward_func)
rl_env = make_rl_student_env(env)
for i, (tutor_name, build_tutor) in enumerate(tutor_builders):
if tutor_name.startswith('RL'):
agent = build_tutor(n_items)
R[k, i, :, j] = agent.train(rl_env, n_eps=n_eps)
else:
if 'Thresh' in tutor_name:
agent = build_tutor(n_items, env=env)
else:
agent = build_tutor(n_items)
R[k, i, :, j] = agent.train(env, n_eps=n_eps)
print(env_name, j, tutor_name, np.mean(R[k, i, :, j]))
print()
reward_logs = {
'n_steps': n_steps,
'n_items': n_items,
'discount': discount,
'const_delay': const_delay,
'n_reps': n_reps,
'n_eps': n_eps,
'env_names': list(zip(*envs))[0],
'tutor_names': list(zip(*tutor_builders))[0],
'reward_funcs': reward_funcs,
'rewards': R
}
with open(os.path.join(data_dir, 'reward_logs.pkl'), 'wb') as f:
pickle.dump(reward_logs, f, pickle.HIGHEST_PROTOCOL)
R = np.zeros((6, len(tutor_builders), n_eps, n_reps))
for i in range(6):
with open(os.path.join(data_dir, 'reward_logs.pkl.%d' % i), 'rb') as f:
reward_logs = pickle.load(f)
R[i] = reward_logs['rewards']
print(reward_logs['env_names'], reward_logs['reward_funcs'])
for i in range(R.shape[0]):
for j in range(R.shape[3]):
R[i, :, :, j] = 100 * (R[i, :, :, j] - R[i, 0, :, j]) / abs(R[i, 0, :, j])
def moving_avg(d, n=5):
s = np.concatenate((np.zeros(1), np.cumsum(d).astype(float)))
return (s[n:] - s[:-n]) / n
r_means = lambda x: np.nanmean(x, axis=1)
r_stderrs = lambda x: np.nanstd(x, axis=1) / np.sqrt(np.count_nonzero(x, axis=1))
r_mins = lambda x: r_means(x) - r_stderrs(x)#np.nanmin(x, axis=1)
r_maxs = lambda x: r_means(x) + r_stderrs(x)#np.nanmax(x, axis=1)
for h, (env_name, _) in enumerate(envs):
for m, reward_func in enumerate(reward_funcs):
k = h*len(reward_funcs)+m
for i, (tutor_name, _) in enumerate(tutor_builders):
print(env_name, reward_func, tutor_name, np.nanmean(R[k, i, :, :]), np.nanstd(R[k, i, :, :]))
title_of_env_name = {
'EFC': 'Exponential Forgetting Curve',
'HLR': 'Half-Life Regression',
'DASH': 'Generalized Power-Law'
}
for h, (env_name, _) in enumerate(envs):
for m, reward_func in enumerate(reward_funcs):
k = h*len(reward_funcs)+m
plt.xlabel('Iteration')
plt.ylabel('Percent better than Random\n(Reward: %s)' % reward_func.replace('_', '-').replace('likelihood', 'Likelihood').replace('log', 'Log'))
plt.title('Student Model: %s' % title_of_env_name[env_name])
colors = ['gray', 'teal', 'teal', 'teal', 'orange']
styles = ['dotted', 'dashed', 'dashdot', 'solid', 'solid']
for i, (tutor_name, _) in enumerate(tutor_builders):
if tutor_name == 'RL': tutor_name = 'TRPO'
if tutor_name == 'TRPO':
x = range(R.shape[2])
y1 = r_mins(R[k, i, :, :])
y2 = r_maxs(R[k, i, :, :])
plt.fill_between(x, y1, y2, where=y2 >= y1, facecolor=colors[i], interpolate=True, alpha=0.5, label=tutor_name)
plt.plot(r_means(R[k, i, :, :]), color=colors[i])
else:
plt.axhline(y=np.nanmean(R[k, i, :, :]), color=colors[i], linestyle=styles[i], label=tutor_name)
plt.yticks(plt.yticks()[0], [str(int(x)) + r'\%' for x in plt.yticks()[0]])
plt.legend(loc='upper left')
plt.savefig(os.path.join(data_dir, '%s-%s.pdf' % (env_name, reward_func)), bbox_inches='tight')
plt.show()
```
| github_jupyter |
```
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Vertex SDK: Custom training tabular regression model for online prediction with explainabilty
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_tabular_regression_online_explain.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_tabular_regression_online_explain.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_tabular_regression_online_explain.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use the Vertex SDK to train and deploy a custom tabular regression model for online prediction with explanation.
### Dataset
The dataset used for this tutorial is the [Boston Housing Prices dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html). The version of the dataset you will use in this tutorial is built into TensorFlow. The trained model predicts the median price of a house in units of 1K USD.
### Objective
In this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the Vertex SDK, and then do a prediction with explanations on the deployed model by sending data. You can alternatively create custom models using `gcloud` command-line tool or online using Cloud Console.
The steps performed include:
- Create a Vertex custom job for training a model.
- Train a TensorFlow model.
- Retrieve and load the model artifacts.
- View the model evaluation.
- Set explanation parameters.
- Upload the model as a Vertex `Model` resource.
- Deploy the `Model` resource to a serving `Endpoint` resource.
- Make a prediction with explanation.
- Undeploy the `Model` resource.
### Costs
This tutorial uses billable components of Google Cloud:
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
### Set up your local development environment
If you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step.
Otherwise, make sure your environment meets this notebook's requirements. You need the following:
- The Cloud Storage SDK
- Git
- Python 3
- virtualenv
- Jupyter notebook running in a virtual environment with Python 3
The Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/).
2. [Install Python 3](https://cloud.google.com/python/setup#installing_python).
3. [Install virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3.
4. Activate that environment and run `pip3 install Jupyter` in a terminal shell to install Jupyter.
5. Run `jupyter notebook` on the command line in a terminal shell to launch Jupyter.
6. Open this notebook in the Jupyter Notebook Dashboard.
## Installation
Install the latest version of Vertex SDK for Python.
```
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
```
Install the latest GA version of *google-cloud-storage* library as well.
```
! pip3 install -U google-cloud-storage $USER_FLAG
if os.environ["IS_TESTING"]:
! pip3 install --upgrade tensorflow $USER_FLAG
```
### Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
```
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### GPU runtime
This tutorial does not require a GPU runtime.
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com)
4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.
```
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)
```
REGION = "us-central1" # @param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
```
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
```
import google.cloud.aiplatform as aip
```
## Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
```
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
```
#### Set hardware accelerators
You can set hardware accelerators for training and prediction.
Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
Otherwise specify `(None, None)` to use a container image to run on a CPU.
Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators) hardware accelerator support for your region
*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
```
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (None, None)
if os.getenv("IS_TESTING_DEPLOY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPLOY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
```
#### Set pre-built containers
Set the pre-built Docker container image for training and prediction.
For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers).
```
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
```
#### Set machine type
Next, set the machine type to use for training and prediction.
- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction.
- `machine type`
- `n1-standard`: 3.75GB of memory per vCPU.
- `n1-highmem`: 6.5GB of memory per vCPU
- `n1-highcpu`: 0.9 GB of memory per vCPU
- `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
*Note: The following is not supported for training:*
- `standard`: 2 vCPUs
- `highcpu`: 2, 4 and 8 vCPUs
*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
```
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
```
# Tutorial
Now you are ready to start creating your own custom model and training for Boston Housing.
### Examine the training package
#### Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
- PKG-INFO
- README.md
- setup.cfg
- setup.py
- trainer
- \_\_init\_\_.py
- task.py
The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.
The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).
#### Package Assembly
In the following cells, you will assemble the training package.
```
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
```
#### Task.py contents
In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary:
- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.
- Loads Boston Housing dataset from TF.Keras builtin datasets
- Builds a simple deep neural network model using TF.Keras model API.
- Compiles the model (`compile()`).
- Sets a training distribution strategy according to the argument `args.distribute`.
- Trains the model (`fit()`) with epochs specified by `args.epochs`.
- Saves the trained model (`save(args.model_dir)`) to the specified model directory.
- Saves the maximum value for each feature `f.write(str(params))` to the specified parameters file.
```
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
```
#### Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
```
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz
```
### Create and run custom training job
To train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job.
#### Create custom training job
A custom training job is created with the `CustomTrainingJob` class, with the following parameters:
- `display_name`: The human readable name for the custom training job.
- `container_uri`: The training container image.
- `requirements`: Package requirements for the training container image (e.g., pandas).
- `script_path`: The relative path to the training script.
```
job = aip.CustomTrainingJob(
display_name="boston_" + TIMESTAMP,
script_path="custom/trainer/task.py",
container_uri=TRAIN_IMAGE,
requirements=["gcsfs==0.7.1", "tensorflow-datasets==4.4"],
)
print(job)
```
### Prepare your command-line arguments
Now define the command-line arguments for your custom training container:
- `args`: The command-line arguments to pass to the executable that is set as the entry point into the container.
- `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts.
- direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or
- indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification.
- `"--epochs=" + EPOCHS`: The number of epochs for training.
- `"--steps=" + STEPS`: The number of steps per epoch.
```
MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP)
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
```
#### Run the custom training job
Next, you run the custom job to start the training job by invoking the method `run`, with the following parameters:
- `args`: The command-line arguments to pass to the training script.
- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).
- `machine_type`: The machine type for the compute instances.
- `accelerator_type`: The hardware accelerator type.
- `accelerator_count`: The number of accelerators to attach to a worker replica.
- `base_output_dir`: The Cloud Storage location to write the model artifacts to.
- `sync`: Whether to block until completion of the job.
```
if TRAIN_GPU:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=True,
)
else:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=True,
)
model_path_to_deploy = MODEL_DIR
```
## Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.
```
import tensorflow as tf
local_model = tf.keras.models.load_model(MODEL_DIR)
```
## Evaluate the model
Now let's find out how good the model is.
### Load evaluation data
You will load the Boston Housing test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This returns the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the feature data, and the corresponding labels (median value of owner-occupied home).
You don't need the training data, and hence why we loaded it as `(_, _)`.
Before you can run the data through evaluation, you need to preprocess it:
`x_test`:
1. Normalize (rescale) the data in each column by dividing each value by the maximum value of that column. This replaces each single value with a 32-bit floating point number between 0 and 1.
```
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
```
### Perform the model evaluation
Now evaluate how well the model in the custom job did.
```
local_model.evaluate(x_test, y_test)
```
## Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
You also need to know the name of the serving function's input and output layer for constructing the explanation metadata -- which is discussed subsequently.
```
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
serving_output = list(loaded.signatures["serving_default"].structured_outputs.keys())[0]
print("Serving function output:", serving_output)
input_name = local_model.input.name
print("Model input name:", input_name)
output_name = local_model.output.name
print("Model output name:", output_name)
```
### Explanation Specification
To get explanations when doing a prediction, you must enable the explanation capability and set corresponding settings when you upload your custom model to an Vertex `Model` resource. These settings are referred to as the explanation metadata, which consists of:
- `parameters`: This is the specification for the explainability algorithm to use for explanations on your model. You can choose between:
- Shapley - *Note*, not recommended for image data -- can be very long running
- XRAI
- Integrated Gradients
- `metadata`: This is the specification for how the algoithm is applied on your custom model.
#### Explanation Parameters
Let's first dive deeper into the settings for the explainability algorithm.
#### Shapley
Assigns credit for the outcome to each feature, and considers different permutations of the features. This method provides a sampling approximation of exact Shapley values.
Use Cases:
- Classification and regression on tabular data.
Parameters:
- `path_count`: This is the number of paths over the features that will be processed by the algorithm. An exact approximation of the Shapley values requires M! paths, where M is the number of features. For the CIFAR10 dataset, this would be 784 (28*28).
For any non-trival number of features, this is too compute expensive. You can reduce the number of paths over the features to M * `path_count`.
#### Integrated Gradients
A gradients-based method to efficiently compute feature attributions with the same axiomatic properties as the Shapley value.
Use Cases:
- Classification and regression on tabular data.
- Classification on image data.
Parameters:
- `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time.
#### XRAI
Based on the integrated gradients method, XRAI assesses overlapping regions of the image to create a saliency map, which highlights relevant regions of the image rather than pixels.
Use Cases:
- Classification on image data.
Parameters:
- `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time.
In the next code cell, set the variable `XAI` to which explainabilty algorithm you will use on your custom model.
```
XAI = "ig" # [ shapley, ig, xrai ]
if XAI == "shapley":
PARAMETERS = {"sampled_shapley_attribution": {"path_count": 10}}
elif XAI == "ig":
PARAMETERS = {"integrated_gradients_attribution": {"step_count": 50}}
elif XAI == "xrai":
PARAMETERS = {"xrai_attribution": {"step_count": 50}}
parameters = aip.explain.ExplanationParameters(PARAMETERS)
```
#### Explanation Metadata
Let's first dive deeper into the explanation metadata, which consists of:
- `outputs`: A scalar value in the output to attribute -- what to explain. For example, in a probability output \[0.1, 0.2, 0.7\] for classification, one wants an explanation for 0.7. Consider the following formulae, where the output is `y` and that is what we want to explain.
y = f(x)
Consider the following formulae, where the outputs are `y` and `z`. Since we can only do attribution for one scalar value, we have to pick whether we want to explain the output `y` or `z`. Assume in this example the model is object detection and y and z are the bounding box and the object classification. You would want to pick which of the two outputs to explain.
y, z = f(x)
The dictionary format for `outputs` is:
{ "outputs": { "[your_display_name]":
"output_tensor_name": [layer]
}
}
<blockquote>
- [your_display_name]: A human readable name you assign to the output to explain. A common example is "probability".<br/>
- "output_tensor_name": The key/value field to identify the output layer to explain. <br/>
- [layer]: The output layer to explain. In a single task model, like a tabular regressor, it is the last (topmost) layer in the model.
</blockquote>
- `inputs`: The features for attribution -- how they contributed to the output. Consider the following formulae, where `a` and `b` are the features. We have to pick which features to explain how the contributed. Assume that this model is deployed for A/B testing, where `a` are the data_items for the prediction and `b` identifies whether the model instance is A or B. You would want to pick `a` (or some subset of) for the features, and not `b` since it does not contribute to the prediction.
y = f(a,b)
The minimum dictionary format for `inputs` is:
{ "inputs": { "[your_display_name]":
"input_tensor_name": [layer]
}
}
<blockquote>
- [your_display_name]: A human readable name you assign to the input to explain. A common example is "features".<br/>
- "input_tensor_name": The key/value field to identify the input layer for the feature attribution. <br/>
- [layer]: The input layer for feature attribution. In a single input tensor model, it is the first (bottom-most) layer in the model.
</blockquote>
Since the inputs to the model are tabular, you can specify the following two additional fields as reporting/visualization aids:
<blockquote>
- "modality": "image": Indicates the field values are image data.
</blockquote>
Since the inputs to the model are tabular, you can specify the following two additional fields as reporting/visualization aids:
<blockquote>
- "encoding": "BAG_OF_FEATURES" : Indicates that the inputs are set of tabular features.<br/>
- "index_feature_mapping": [ feature-names ] : A list of human readable names for each feature. For this example, we use the feature names specified in the dataset.<br/>
- "modality": "numeric": Indicates the field values are numeric.
</blockquote>
```
INPUT_METADATA = {
"input_tensor_name": serving_input,
"encoding": "BAG_OF_FEATURES",
"modality": "numeric",
"index_feature_mapping": [
"crim",
"zn",
"indus",
"chas",
"nox",
"rm",
"age",
"dis",
"rad",
"tax",
"ptratio",
"b",
"lstat",
],
}
OUTPUT_METADATA = {"output_tensor_name": serving_output}
input_metadata = aip.explain.ExplanationMetadata.InputMetadata(INPUT_METADATA)
output_metadata = aip.explain.ExplanationMetadata.OutputMetadata(OUTPUT_METADATA)
metadata = aip.explain.ExplanationMetadata(
inputs={"features": input_metadata}, outputs={"medv": output_metadata}
)
```
## Upload the model
Next, upload your model to a `Model` resource using `Model.upload()` method, with the following parameters:
- `display_name`: The human readable name for the `Model` resource.
- `artifact`: The Cloud Storage location of the trained model artifacts.
- `serving_container_image_uri`: The serving container image.
- `sync`: Whether to execute the upload asynchronously or synchronously.
- `explanation_parameters`: Parameters to configure explaining for `Model`'s predictions.
- `explanation_metadata`: Metadata describing the `Model`'s input and output for explanation.
If the `upload()` method is run asynchronously, you can subsequently block until completion with the `wait()` method.
```
model = aip.Model.upload(
display_name="boston_" + TIMESTAMP,
artifact_uri=MODEL_DIR,
serving_container_image_uri=DEPLOY_IMAGE,
explanation_parameters=parameters,
explanation_metadata=metadata,
sync=False,
)
model.wait()
```
## Deploy the model
Next, deploy your model for online prediction. To deploy the model, you invoke the `deploy` method, with the following parameters:
- `deployed_model_display_name`: A human readable name for the deployed model.
- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs.
If only one model, then specify as { "0": 100 }, where "0" refers to this model being uploaded and 100 means 100% of the traffic.
If there are existing models on the endpoint, for which the traffic will be split, then use model_id to specify as { "0": percent, model_id: percent, ... }, where model_id is the model id of an existing model to the deployed endpoint. The percents must add up to 100.
- `machine_type`: The type of machine to use for training.
- `accelerator_type`: The hardware accelerator type.
- `accelerator_count`: The number of accelerators to attach to a worker replica.
- `starting_replica_count`: The number of compute instances to initially provision.
- `max_replica_count`: The maximum number of compute instances to scale to. In this tutorial, only one instance is provisioned.
```
DEPLOYED_NAME = "boston-" + TIMESTAMP
TRAFFIC_SPLIT = {"0": 100}
MIN_NODES = 1
MAX_NODES = 1
if DEPLOY_GPU:
endpoint = model.deploy(
deployed_model_display_name=DEPLOYED_NAME,
traffic_split=TRAFFIC_SPLIT,
machine_type=DEPLOY_COMPUTE,
accelerator_type=DEPLOY_GPU,
accelerator_count=DEPLOY_NGPU,
min_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
)
else:
endpoint = model.deploy(
deployed_model_display_name=DEPLOYED_NAME,
traffic_split=TRAFFIC_SPLIT,
machine_type=DEPLOY_COMPUTE,
accelerator_type=DEPLOY_GPU,
accelerator_count=0,
min_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
)
```
### Get test item
You will use an example out of the test (holdout) portion of the dataset as a test item.
```
test_item = x_test[0]
test_label = y_test[0]
print(test_item.shape)
```
### Make the prediction with explanation
Now that your `Model` resource is deployed to an `Endpoint` resource, one can do online explanations by sending prediction requests to the `Endpoint` resource.
#### Request
The format of each instance is:
[feature_list]
Since the explain() method can take multiple items (instances), send your single test item as a list of one test item.
#### Response
The response from the explain() call is a Python dictionary with the following entries:
- `ids`: The internal assigned unique identifiers for each prediction request.
- `predictions`: The prediction per instance.
- `deployed_model_id`: The Vertex AI identifier for the deployed `Model` resource which did the predictions.
- `explanations`: The feature attributions
```
instances_list = [test_item.tolist()]
prediction = endpoint.explain(instances_list)
print(prediction)
```
### Understanding the explanations response
First, you will look what your model predicted and compare it to the actual value.
```
value = prediction[0][0][0]
print("Predicted Value:", value)
```
### Examine feature attributions
Next you will look at the feature attributions for this particular example. Positive attribution values mean a particular feature pushed your model prediction up by that amount, and vice versa for negative attribution values.
```
from tabulate import tabulate
feature_names = [
"crim",
"zn",
"indus",
"chas",
"nox",
"rm",
"age",
"dis",
"rad",
"tax",
"ptratio",
"b",
"lstat",
]
attributions = prediction.explanations[0].attributions[0].feature_attributions
rows = []
for i, val in enumerate(feature_names):
rows.append([val, test_item[i], attributions[val]])
print(tabulate(rows, headers=["Feature name", "Feature value", "Attribution value"]))
```
### Check your explanations and baselines
To better make sense of the feature attributions you're getting, you should compare them with your model's baseline. In most cases, the sum of your attribution values + the baseline should be very close to your model's predicted value for each input. Also note that for regression models, the `baseline_score` returned from AI Explanations will be the same for each example sent to your model. For classification models, each class will have its own baseline.
In this section you'll send 10 test examples to your model for prediction in order to compare the feature attributions with the baseline. Then you'll run each test example's attributions through a sanity check in the `sanity_check_explanations` method.
#### Get explanations
```
# Prepare 10 test examples to your model for prediction
instances = []
for i in range(10):
instances.append(x_test[i].tolist())
response = endpoint.explain(instances)
```
#### Sanity check
In the function below you perform a sanity check on the explanations.
```
import numpy as np
def sanity_check_explanations(
explanation, prediction, mean_tgt_value=None, variance_tgt_value=None
):
passed_test = 0
total_test = 1
# `attributions` is a dict where keys are the feature names
# and values are the feature attributions for each feature
baseline_score = explanation.attributions[0].baseline_output_value
print("baseline:", baseline_score)
# Sanity check 1
# The prediction at the input is equal to that at the baseline.
# Please use a different baseline. Some suggestions are: random input, training
# set mean.
if abs(prediction - baseline_score) <= 0.05:
print("Warning: example score and baseline score are too close.")
print("You might not get attributions.")
else:
passed_test += 1
print("Sanity Check 1: Passed")
print(passed_test, " out of ", total_test, " sanity checks passed.")
i = 0
for explanation in response.explanations:
try:
prediction = np.max(response.predictions[i]["scores"])
except TypeError:
prediction = np.max(response.predictions[i])
sanity_check_explanations(explanation, prediction)
i += 1
```
## Undeploy the model
When you are done doing predictions, you undeploy the model from the `Endpoint` resouce. This deprovisions all compute resources and ends billing for the deployed model.
```
endpoint.undeploy_all()
```
# Cleaning up
To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- AutoML Training Job
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# How to use EstimatorStep in AML Pipeline
This notebook shows how to use the EstimatorStep with Azure Machine Learning Pipelines. Estimator is a convenient object in Azure Machine Learning that wraps run configuration information to help simplify the tasks of specifying how a script is executed.
## Prerequisite:
* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning
* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](https://aka.ms/pl-config) to:
* install the AML SDK
* create a workspace and its configuration file (`config.json`)
Let's get started. First let's import some Python libraries.
```
import azureml.core
# check core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
```
## Initialize workspace
Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
```
from azureml.core import Workspace
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
```
## Create or Attach existing AmlCompute
You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.
If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:
1. create the configuration (this step is local and only takes a second)
2. create the cluster (this step will take about **20 seconds**)
3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your cluster
cluster_name = "amlcomp"
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4)
# create the cluster
cpu_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
# can poll for a minimum number of nodes and for a specific timeout.
# if no min node count is provided it uses the scale settings for the cluster
cpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# use get_status() to get a detailed status for the current cluster.
print(cpu_cluster.get_status().serialize())
```
Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'cpu-cluster' of type `AmlCompute`.
## Use a simple script
We have already created a simple "hello world" script. This is the script that we will submit through the estimator pattern. It prints a hello-world message, and if Azure ML SDK is installed, it will also logs an array of values ([Fibonacci numbers](https://en.wikipedia.org/wiki/Fibonacci_number)).
## Build an Estimator object
Estimator by default will attempt to use Docker-based execution. You can also enable Docker and let estimator pick the default CPU image supplied by Azure ML for execution. You can target an AmlCompute cluster (or any other supported compute target types). You can also customize the conda environment by adding conda and/or pip packages.
> Note: The arguments to the entry script used in the Estimator object should be specified as *list* using
'estimator_entry_script_arguments' parameter when instantiating EstimatorStep. Estimator object's parameter
'script_params' accepts a dictionary. However 'estimator_entry_script_arguments' parameter expects arguments as
a list.
> Estimator object initialization involves specifying a list of data input and output.
In Pipelines, a step can take another step's output as input. So when creating an EstimatorStep.
> The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step.
```
from azureml.core import Datastore
def_blob_store = Datastore(ws, "workspaceblobstore")
#upload input data to workspaceblobstore
def_blob_store.upload_files(files=['20news.pkl'], target_path='20newsgroups')
from azureml.core import Dataset
from azureml.data import OutputFileDatasetConfig
# create dataset to be used as the input to estimator step
input_data = Dataset.File.from_files(def_blob_store.path('20newsgroups/20news.pkl'))
# OutputFileDatasetConfig by default write output to the default workspaceblobstore
output = OutputFileDatasetConfig()
source_directory = 'estimator_train'
from azureml.train.estimator import Estimator
est = Estimator(source_directory=source_directory,
compute_target=cpu_cluster,
entry_script='dummy_train.py',
conda_packages=['scikit-learn'])
```
## Create an EstimatorStep
[EstimatorStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.estimator_step.estimatorstep?view=azure-ml-py) adds a step to run Estimator in a Pipeline.
- **name:** Name of the step
- **estimator:** Estimator object
- **estimator_entry_script_arguments:** A list of command-line arguments
- **runconfig_pipeline_params:** Override runconfig properties at runtime using key-value pairs each with name of the runconfig property and PipelineParameter for that property
- **compute_target:** Compute target to use
- **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs. If this is false, a new run will always be generated for this step during pipeline execution.
- **version:** Optional version tag to denote a change in functionality for the step
```
from azureml.pipeline.steps import EstimatorStep
est_step = EstimatorStep(name="Estimator_Train",
estimator=est,
estimator_entry_script_arguments=["--datadir", input_data.as_mount(), "--output", output],
runconfig_pipeline_params=None,
compute_target=cpu_cluster)
```
## Build and Submit the Experiment
```
from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
pipeline = Pipeline(workspace=ws, steps=[est_step])
pipeline_run = Experiment(ws, 'Estimator_sample').submit(pipeline)
```
## View Run Details
```
from azureml.widgets import RunDetails
RunDetails(pipeline_run).show()
```
| github_jupyter |
# Ready, Steady, Go AI (*Exercises*)
This tutorial is a supplement to the paper, **Ready, Steady, Go AI: A Practical Tutorial on Fundamentals of Artificial Intelligence and Its Applications in Phenomics Image Analysis** (*Patterns, 2021*) by Farid Nakhle and Antoine Harfouche
Read the accompanying paper [here](https://doi.org/10.1016/j.patter.2021.100323).
# Table of Contents
* **1. Introduction**
* **2. Exercise I: Splitting Data**
* **3. Exercise II: Cropping Leaf Images**
* **4. Exercise III: Segmenting Leaf Images**
* **5. Exercise IV: Descriptive Data Analysis**
* **6. Exercise V: Balancing the Dataset**
* **7. Exercise VI: Classification Using DenseNet-161 Pretrained DCNN algorithm**
* **8. Exercise VII: Generating Confusion Matrix**
* **9. Exercise VIII: Generating Explanations With LIME**
# 1. Introduction
Before attempting to resolve the exercises found in this notebook, visit our Github repository and try to open and run all the notebooks provided by the tutorial.
Here, the solution for each exercise can be found in a hidden code cell at its end.
Interested users should try to solve the exercises with the help of the notebooks provided by the tutorial before looking at the solution.
As a reminder, we are working with the PlantVillage dataset, originally obtained from [here](http://dx.doi.org/10.17632/tywbtsjrjv.1).
For the following exercises, we will be working with a subset of PlantVillage containing the tomato classes only. We have made the subset available [here](http://faridnakhle.com/pv/tomato-original.zip).
**It is important to note that Colab deletes all unsaved data once the instance is recycled. Therefore, remember to download your results once you run the code.**
#2. Exercise I: Data Splitting
**A.** Complete the code that downloads the PlantVillage tomato leaves dataset using the link provided in the introduction. The dataset must be saved then extracted to /content/dataset/original/.
**B.** Complete the code that randomly splits the dataset into training, validation, and testing. Use the following split ratio: training, 80%; validation, 10%; and testing, 10%. The split dataset must be saved under /content/dataset/split/
```
import requests
import os
import zipfile
dataset_url = ? # WRITE YOUR CODE HERE
save_data_to = ? # WRITE YOUR CODE HERE
dataset_file_name = "dataset.zip"
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
r = requests.get(dataset_url, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
print("Downloading dataset...")
with open(save_data_to + dataset_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Dataset downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + dataset_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
## Delete the zip file as we no longer need it
os.remove(save_data_to + dataset_file_name)
print("All done!")
!pip install split-folders tqdm
# REPLACE ? WITH YOUR ANSWER. NB the ratio parameter expects numbers in the following format: for 50%, type .5; for 60%, .6; etc.
!splitfolders --output ? --seed 1337 --ratio ? ? ? -- "/content/dataset/original"
```
# Solution
```
!rm -R /content/dataset/original/
!rm -R /content/dataset/split/
import requests
import os
import zipfile
dataset_url = "http://faridnakhle.com/pv/tomato-original.zip"
save_data_to = "/content/dataset/original/"
dataset_file_name = "dataset.zip"
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
r = requests.get(dataset_url, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
print("Downloading dataset...")
with open(save_data_to + dataset_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Dataset downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + dataset_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
## Delete the zip file as we no longer need it
os.remove(save_data_to + dataset_file_name)
print("All done!")
## SPLIT
!pip install split-folders tqdm
!splitfolders --output "/content/dataset/split/" --seed 1337 --ratio .8 .1 .1 -- "/content/dataset/original"
```
# 3. Exercise II: Cropping Leaf Images
Before you start, make sure to run the "Install and import prequisites", "Download pretrained model", and "Define prerequisite functions" code cells.
Once you run all of them, you are required to pass the path of the folder containing the split data from Exercise I (training, validation, and testing) to the crop function below. Running it will trigger YOLO to crop the images using a pretrained model.
After that, you should be able to run the "Preview a Cropped Image" cell and see a sample image from the results.
```
#@title Install and import prerequisites
!git clone https://github.com/ultralytics/yolov3
%cd yolov3
%pip install -qr requirements.txt
import torch
from IPython.display import Image
#@title Download pretrained model
model_URL = "http://faridnakhle.com/pv/models/YOLOv3.zip"
save_data_to = "/content/models/"
model_file_name = "yolo.zip"
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
print("Downloading model...")
r = requests.get(model_URL, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
with open(save_data_to + model_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Model downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + model_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
print("All done!")
#@title Define prerequisite functions
import os
import cv2
import random
import numpy as np
# function for cropping each detection and saving as new image
def crop_objects(img, data, path):
boxes, scores, classes, num_objects = data
#create dictionary to hold count of objects for image name
for i in range(len(num_objects)):
# get count of class for part of image name
class_index = int(classes[i])
# get box coords
xmin, ymin, xmax, ymax = boxes[i]
# crop detection from image
cropped_img = img[int(ymin)-5:int(ymax)+5, int(xmin)-5:int(xmax)+5]
# construct image name and join it to path for saving crop properly
img_name = 'cropped_img.png'
img_path = os.path.join(path, img_name )
# save image
cv2.imwrite(img_path, cropped_img)
def crop_object(img, coords, img_path):
# get box coords
xmin = int(coords[0])
ymin = int(coords[1])
xmax = int(coords[2])
ymax = int(coords[3])
# crop detection from image
cropped_img = img[ymin:ymax, xmin:xmax]
# save image
cv2.imwrite(img_path, cropped_img)
def plot_grid(img, line_color=(0, 255, 0), thickness=1, type_=cv2.LINE_AA, pxstep=20, pystep=20):
x = pystep
y = pxstep
while x < img.shape[1]:
cv2.line(img, (x, 0), (x, img.shape[0]), color=line_color, lineType=type_, thickness=thickness)
x += pystep
while y < img.shape[0]:
cv2.line(img, (0, y), (img.shape[1], y), color=line_color, lineType=type_, thickness=thickness)
y += pxstep
def plot_borders(img, line_color=(0, 255, 0), thickness=1):
cv2.rectangle(img,(0 ,0),(img.shape[1]-thickness,img.shape[0]-thickness), line_color, thickness)
def myround(x, base=5):
return base * round(x/base)
def plot_overlay(x, img, color, alpha,
pxstep=20, pystep=20):
overlay = img.copy()
x0, x1, x2, x3 = int(x[0]), int(x[1]), int(x[2]), int(x[3])
x0 = myround(x0,pystep)
x1 = myround(x1,pxstep)
x2 = myround(x2,pystep)
x3 = myround(x3,pxstep)
c1, c2 = (x0, x1), (x2, x3)
cv2.rectangle(overlay, c1, c2, color, -1)
# apply the overlay
cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0, img)
!cd /content/yolov3/
import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, \
strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
import glob
def crop(dataset_dir='', model_path='/content/yolov3/runs/train/exp/best.pt'):
save_txt, imgsz = False, 224
weights = model_path
projectP = 'runs/detect'
projectNameP = 'exp'
save_img = True
view_img = True
save_dir = Path(increment_path(Path(projectP) / projectNameP, False)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
#loop over train, val, and test set
trainTestVarDirs = glob.glob(dataset_dir + "*")
for setDir in trainTestVarDirs:
splitDir = os.path.basename(setDir)
setClasses = glob.glob(setDir + "/*")
for setClass in setClasses:
# Directories
classDir = os.path.basename(setClass)
finalSaveDir = os.path.join(save_dir, splitDir, classDir)
Path(finalSaveDir).mkdir(parents=True, exist_ok=True)
source = setClass
# Initialize
set_logging()
device = select_device('0')
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
#introducing grid size
gs = model.stride.max()
#end
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
colors = [[217, 175, 78]]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=True)[0]
# Apply NMS
final_pred = non_max_suppression(pred, 0.15, 0.3, classes=0, agnostic=True)
pred = non_max_suppression(pred, 0.00005, 1, classes=0, agnostic=True)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
p, s, im0 = Path(path), '', im0s.copy()
imoriginal = im0.copy()
#plot grid
numofsquares = int(imgsz/int(gs))
rowstep = int(im0.shape[0]/numofsquares)
colstep = int(im0.shape[1]/numofsquares)
plot_borders(im0, line_color=(0,0,0), thickness=2)
gridim_solo = im0.copy()
plot_grid(gridim_solo, pxstep=rowstep, pystep=colstep, line_color=(0,0,0), thickness=2)
#end plot grid
save_path = str(finalSaveDir + "/" + p.name)
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_img or view_img: # Add bbox to image
label = ''#'%s %.2f' % (names[int(cls)], conf)
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Save results (image with detections)
if save_img:
cv2.imwrite(save_path + "_original.jpg", imoriginal)
cv2.imwrite(save_path, im0)
cv2.imwrite(save_path + "_grid.jpg", gridim_solo)
# SAVE FINAL CROPPED IMAGES
# Process detections
for i, det in enumerate(final_pred): # detections per image
p, s, im0 = Path(path), '', im0s
im2 = im0.copy() #to use with grid/map
#background
numofsquares = int(imgsz/int(gs))
rowstep = int(im0.shape[0]/numofsquares)
colstep = int(im0.shape[1]/numofsquares)
plot_overlay([0,0, im2.shape[1], im2.shape[0]], im2, color=(255, 255, 255), alpha=0.7, pxstep=rowstep, pystep=colstep)
#borders
plot_borders(im2, line_color=(0,0,0), thickness=2)
plot_borders(im0, line_color=(0,0,0), thickness=2)
save_path = str(finalSaveDir + "/" + p.name)
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
#FUNCTION custom crop
CROP = True
if CROP:
fidx = 0
for *xyxy, conf, cls in reversed(det):
if save_img or view_img:
fidx = fidx + 1
crop_object(im0, xyxy, str(finalSaveDir + "/" + (p.stem + "_cropped_" + str(fidx) + p.suffix)))
#END
# Write results
for *xyxy, conf, cls in reversed(det):
if save_img or view_img: # Add bbox to image
label = ''#'%s %.2f' % (names[int(cls)], conf)
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2)
plot_overlay(xyxy, im2, color=colors[int(cls)], alpha=0.7, pxstep=rowstep, pystep=colstep)
else:
cv2.imwrite(save_path + "_not_cropped.jpg", im0)
gridim = im2.copy()
plot_grid(gridim, pxstep=rowstep, pystep=colstep, line_color=(0,0,0), thickness=2)
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Save results (image with detections)
if save_img:
cv2.imwrite(save_path + "_map.jpg", gridim)
cv2.imwrite(save_path + "_final.jpg", im0)
if save_txt or save_img:
s = f"\n{len(list(finalSaveDir.glob('labels/*.txt')))} labels saved to {finalSaveDir + '/' + 'labels'}" if save_txt else ''
print(f"Results saved to {finalSaveDir}{s}")
print('Done. (%.3fs)' % (time.time() - t0))
# REPLACE ? with your answer
crop(dataset_dir=?, model_path='/content/models/weights/RSGAI_YOLOv3.pt')
```
# Solution
```
crop(dataset_dir='/content/dataset/split/', model_path='/content/models/weights/RSGAI_YOLOv3.pt')
```
# Preview a Cropped Image
```
#@title Generate preview
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
import glob
lastExp = max(glob.glob(os.path.join('/content/yolov3/runs/detect','*/' )), key=os.path.getmtime)
imgPath = lastExp + 'test/Tomato___Leaf_Mold/image (114).JPG'
oringinalImg = mpimg.imread(imgPath + "_original.jpg")
boundingBoxesImg = mpimg.imread(imgPath)
croppedImg = mpimg.imread(imgPath.replace(".JPG", "_cropped_1.JPG"))
gridImg = mpimg.imread(imgPath+ "_grid.jpg")
mapImg = mpimg.imread(imgPath+ "_map.jpg")
finaldetectImg = mpimg.imread(imgPath+ "_final.jpg")
print("Original Image:")
plt.axis('off')
plt.imshow(oringinalImg)
plt.show()
print("Grid:")
plt.axis('off')
plt.imshow(gridImg)
plt.show()
print("Bounding Boxes:")
plt.axis('off')
plt.imshow(boundingBoxesImg)
plt.show()
print("Probability Map:")
plt.axis('off')
plt.imshow(mapImg)
plt.show()
print("Final Detection:")
plt.axis('off')
plt.imshow(finaldetectImg)
plt.show()
print("Cropped Image:")
plt.axis('off')
plt.imshow(croppedImg)
plt.show()
```
# 4. Exercise III: Segmenting Leaf Images
Before you start, make sure to run the "Install and import prequisites", "Download pretrained model", and "Define prerequisite functions" code cells.
Once you run all of them, you are required to pass the path of the folder containing the cropped tomato leaves to the segment function below. The cropped version of the dataset is located under /content/dataset/tomato-cropped/.
Running the segment function will trigger SegNet to segment the images using a pretrained model.
After that, you should be able to run the "Preview a Segmented Image" cell and see a sample image from the results.
```
#@title Install and import prerequisites
!git clone https://github.com/divamgupta/image-segmentation-keras
%cd image-segmentation-keras
from keras_segmentation.models.segnet import segnet
print("Keras and SegNet are loaded")
#@title Download pretrained model
##########################
### DOWNLOAD THE MODEL ###
##########################
import requests
import os
import zipfile
## FEEL FREE TO CHANGE THESE PARAMETERS
model_URL = "http://faridnakhle.com/pv/models/SegNet.zip"
save_data_to = "/content/models/"
model_file_name = "segnet.zip"
#######################################
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
print("Downloading model...")
r = requests.get(model_URL, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
with open(save_data_to + model_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Model downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + model_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
print("Done!")
## CROPPED DATASET
## FEEL FREE TO CHANGE THESE PARAMETERS
dataset_url = "http://faridnakhle.com/pv/tomato-split-cropped.zip"
save_data_to = "/content/dataset/tomato-cropped/"
dataset_file_name = "tomato-cropped.zip"
#######################################
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
r = requests.get(dataset_url, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
print("Downloading dataset...")
with open(save_data_to + dataset_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Dataset downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + dataset_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
## Delete the zip file as we no longer need it
os.remove(save_data_to + dataset_file_name)
print("All done!")
#@title Define prerequisite functions
import cv2
import numpy as np
from keras_segmentation.models.segnet import segnet
import glob
import os
from tqdm import tqdm
import six
def segment(inptDir = ""):
modelName = "/content/models/RSGAI_SegNet.hdf5"
model = segnet(n_classes=50 , input_height=320, input_width=640)
model.load_weights(modelName)
outputDir = "/content/dataset/tomato-cropped-segmented/"
inptDirGlob = glob.glob(inptDir + "*")
for setDir in inptDirGlob:
splitDir = os.path.basename(setDir)
setClasses = glob.glob(setDir + "/*")
for setClass in setClasses:
classDir = os.path.basename(setClass)
inptFolder = os.path.join(inptDir, splitDir, classDir)
outputFolder = os.path.join(outputDir, splitDir, classDir)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
inps = glob.glob(os.path.join(inptFolder, "*.jpg")) + glob.glob(
os.path.join(inptFolder, "*.png")) + \
glob.glob(os.path.join(inptFolder, "*.jpeg"))+ \
glob.glob(os.path.join(inptFolder, "*.JPG"))
inps = sorted(inps)
if len(inps) > 0:
all_prs = []
for i, inp in enumerate(tqdm(inps)):
if outputFolder is None:
out_fname = None
else:
if isinstance(inp, six.string_types):
out_fname = os.path.join(outputFolder, os.path.basename(inp))
else:
out_fname = os.path.join(outputFolder, str(i) + ".jpg")
pr = model.predict_segmentation(
inp=inp,
out_fname=out_fname
)
img = cv2.imread(inp)
seg = cv2.imread(out_fname)
for row in range(0, len(seg)):
for col in range(0, len(seg[0])):
#if np.all(seg[row, col] == [7,47,204]) == False:
# img[row, col] = [0,0,0]
if seg[row, col][0] > 50:
img[row, col] = [0,0,0]
all_prs.append(pr)
cv2.imwrite(out_fname, img)
print("Segmented images are saved in:")
print(outputDir)
# REPLACE ? with your answer
segment(inptDir=?)
```
# Solution
```
segment(inptDir="/content/dataset/tomato-cropped/")
```
# Preview a Segmented Image
```
#@title Generate Preview
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
imgPath = '/content/dataset/tomato-cropped/test/Tomato___Septoria_leaf_spot/image (1020)_cropped_1.JPG'
segmemtedPath = '/content/dataset/tomato-cropped-segmented/test/Tomato___Septoria_leaf_spot/image (1020)_cropped_1.JPG'
oringinalImg = mpimg.imread(imgPath)
segmentedImage = mpimg.imread(segmemtedPath)
print("Original Image:")
plt.axis('off')
plt.imshow(oringinalImg)
plt.show()
print("Segmented Image:")
plt.axis('off')
plt.imshow(segmentedImage)
plt.show()
```
# 5. Exercise IV: Descriptive Data Analysis
Before you start, make sure to run the "Install and import prequisites" code cell. This will define a variable containing the name of all available classes, and another with the corresponding number of images in each class of the training set.
You are required to plot these data and analyze the data distribution.
Based on the above, you can decide whether or not there is a need for data balancing.
```
#@title Install and import prequisites
import requests
import os
import zipfile
## FEEL FREE TO CHANGE THESE PARAMETERS
dataset_url = "http://faridnakhle.com/pv/tomato-split-cropped-segmented.zip"
save_data_to = "/content/dataset/tomato-segmented/"
dataset_file_name = "tomato-segmented.zip"
#######################################
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
r = requests.get(dataset_url, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
print("Downloading dataset...")
with open(save_data_to + dataset_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Dataset downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + dataset_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
## Delete the zip file as we no longer need it
os.remove(save_data_to + dataset_file_name)
print("All done!")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import shutil
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
train_dir = '/content/dataset/tomato-segmented/train/'
train_classes = [path for path in os.listdir(train_dir)]
train_imgs = dict([(ID, os.listdir(os.path.join(train_dir, ID))) for ID in train_classes])
train_classes_count = []
for trainClass in train_classes:
train_classes_count.append(len(train_imgs[trainClass]))
#the list of classes (for the x axis) is now contained in a variable named train_classes
#while the number of images for each class (for the y axis) is contained in a variable named train_classes_count
#Use these variables to plot the data distribution plot by replacing ? with your answer
plt.figure(figsize=(15, 10))
g = sns.barplot(x=?, y=?)
g.set_xticklabels(labels=train_classes, rotation=30, ha='right')
```
# Solution
```
plt.figure(figsize=(15, 10))
g = sns.barplot(x=train_classes, y=train_classes_count)
g.set_xticklabels(labels=train_classes, rotation=30, ha='right')
```
# 6. Exercise V: Balancing the Dataset
Based on the results of the Descriptive Data Analysis (exercise IV), it can be seen that that the classes are imbalanced. In this exercise, you are required to:
**A.** Use Augmentor to oversample any class containing less than 1500 images, except for the healthy class.
**B.** Use DCGAN to synthesize images for the healthy class. You need to pass the path of the pretrained model as a parameter to the syntesizing function. The model is saved under /content/models/RSGAI_DCGAN.pth
**C.** Use KNN to downsample the yellow leaf curl virus class.
Balanced classes should end up with 1500 images each.
**NB:** After data balancing, generate the data distribution plot again to analyze the new distribution of classes.
Make sure to run the "Install and import prequisites", "Download pretrained models", and "Define prerequisite functions" code cells first.
```
#@title Install and import prequisites
!pip install Augmentor
import Augmentor
import os
def makedir(path):
'''
if path does not exist in the file system, create it
'''
if not os.path.exists(path):
os.makedirs(path)
datasets_root_dir = '/content/dataset/tomato-segmented/'
dir = datasets_root_dir + 'train/'
target_dir = dir #same directory as input
makedir(target_dir)
folders = [os.path.join(dir, folder) for folder in next(os.walk(dir))[1]]
target_folders = [os.path.join(target_dir, folder) for folder in next(os.walk(dir))[1]]
#@title Download pretrained models
##########################
### DOWNLOAD THE MODEL ###
##########################
## FEEL FREE TO CHANGE THESE PARAMETERS
model_URL = "http://faridnakhle.com/pv/models/RSGAI_DCGAN.zip"
save_data_to = "/content/models/"
model_file_name = "dcgan.zip"
#######################################
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
print("Downloading model...")
r = requests.get(model_URL, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
with open(save_data_to + model_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Model downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + model_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
print("All done!")
#@title Define prerequisite functions
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
## YOU CAN CHANGE THESE VARIABLES
n_epochs = 300
batch_size = 50
lr = 0.0002
b1 = 0.7 #adam: decay of first order momentum of gradient
b2 = 0.999 #adam: decay of first order momentum of gradient
n_cpu = 1
latent_dim = 100 #dimensionality of the latent space
img_size = 224
channels = 3 #R, G, and B
sample_interval = 400 #interval between image sampling
######################################################
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.init_size = img_size // 4
self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, channels, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = nn.Sequential(
*discriminator_block(channels, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = img_size // 2 ** 4
self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
def GenerateImages(modelPath, outPutFolder, IMGS2GENERATE):
if not os.path.exists(outPutFolder):
os.makedirs(outPutFolder)
## YOU CAN CHANGE THESE VARIABLES
n_epochs = 1
batch_size = 50
lr = 0.0002
b1 = 0.7 #adam: decay of first order momentum of gradient
b2 = 0.999 #adam: decay of first order momentum of gradient
n_cpu = 1
latent_dim = 100 #dimensionality of the latent space
img_size = 224
channels = 3 #R, G, and B
sample_interval = 400 #interval between image sampling
######################################################
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
cuda = True if torch.cuda.is_available() else False
load_from_checkpoint = True
# Loss function
adversarial_loss = torch.nn.BCELoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=lr, betas=(b1, b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(b1, b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Load from Checkpoint
# ----------
if (load_from_checkpoint):
checkpointName = modelPath
checkpoint = torch.load(checkpointName)
generator.load_state_dict(checkpoint['G_state_dict'])
discriminator.load_state_dict(checkpoint['D_state_dict'])
optimizer_G.load_state_dict(checkpoint['G_optimizer'])
optimizer_D.load_state_dict(checkpoint['D_optimizer'])
print("Loaded CheckPoint: " + checkpointName)
if cuda:
generator.cuda()
discriminator.cuda()
# ----------
# Generating images
# ----------
for i in range (0, IMGS2GENERATE):
z = Variable(Tensor(np.random.normal(0, 1, (1, latent_dim))))
# Generate a batch of images
gen_imgs = generator(z)
save_image(gen_imgs.data, outPutFolder + "/DCGAN_%d.png" % (i + 1), nrow=0, normalize=True)
```
# A. Augmentor
```
requiredNbrOfImages = ? # REPLACE ? WITH YOUR ANSWER
for i in range(len(folders)):
# REPLACE ? WITH YOUR ANSWER
if folders[i].endswith(?) == False:
path, dirs, files = next(os.walk(folders[i]))
nbrOfImages = len(files)
nbrOfImagesNeeded = requiredNbrOfImages - nbrOfImages
if nbrOfImagesNeeded > 0:
tfd = target_folders[i]
print ("saving in " + tfd)
p = Augmentor.Pipeline(source_directory=folders[i], output_directory=tfd)
p.rotate(probability=1, max_left_rotation=15, max_right_rotation=15)
p.flip_left_right(probability=0.5)
p.skew(probability=1, magnitude=0.2)
p.flip_left_right(probability=0.5)
p.shear(probability=1, max_shear_left=10, max_shear_right=10)
p.flip_left_right(probability=0.5)
p.sample(nbrOfImagesNeeded)
print("Dataset Augmented!")
```
# Solution
```
requiredNbrOfImages = 1500
for i in range(len(folders)):
if folders[i].endswith("healthy") == False:
path, dirs, files = next(os.walk(folders[i]))
nbrOfImages = len(files)
nbrOfImagesNeeded = requiredNbrOfImages - nbrOfImages
if nbrOfImagesNeeded > 0:
tfd = target_folders[i]
print ("saving in " + tfd)
p = Augmentor.Pipeline(source_directory=folders[i], output_directory=tfd)
p.rotate(probability=1, max_left_rotation=15, max_right_rotation=15)
p.flip_left_right(probability=0.5)
p.skew(probability=1, magnitude=0.2)
p.flip_left_right(probability=0.5)
p.shear(probability=1, max_shear_left=10, max_shear_right=10)
p.flip_left_right(probability=0.5)
p.sample(nbrOfImagesNeeded)
print("Dataset Augmented!")
```
# B. DCGAN
```
## Replace ? With your output
GenerateImages(?, '/content/dataset/dcgan/', IMGS2GENERATE = 228)
print("Data Generated")
```
# Solution
```
GenerateImages('/content/models/RSGAI_DCGAN.pth', '/content/dataset/dcgan/', IMGS2GENERATE = 228)
print("Data Generated")
```
# Preview Some Generated Images
```
#@title Preview some syntetic images
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
imgPath = '/content/dataset/dcgan/DCGAN_'
imageOne = mpimg.imread(imgPath + "1.png")
imageTen = mpimg.imread(imgPath + "10.png")
plt.axis('off')
plt.imshow(imageOne)
plt.show()
plt.axis('off')
plt.imshow(imageTen)
plt.show()
```
# C. KNN
```
#@title Load prerequisites
from sklearn.neighbors import NearestNeighbors
from glob import glob
import numpy as np
import scipy.sparse as sp
from keras.applications import VGG19
from keras.applications.vgg19 import preprocess_input
from keras.engine import Model
from keras.preprocessing import image
import numpy as np
import os
deleteImages = True
def SaveFile(arr, filename):
with open(filename, 'w') as filehandle:
for listitem in arr:
filehandle.write(str(listitem) + "\n")
def vectorize_all(files, model, px=224, n_dims=512, batch_size=512):
min_idx = 0
max_idx = min_idx + batch_size
total_max = len(files)
if (max_idx > total_max):
max_idx = total_max
preds = sp.lil_matrix((len(files), n_dims))
print("Total: {}".format(len(files)))
while min_idx < total_max - 1:
print(min_idx)
X = np.zeros(((max_idx - min_idx), px, px, 3))
# For each file in batch,
# load as row into X
i = 0
for i in range(min_idx, max_idx):
file = files[i]
try:
img = image.load_img(file, target_size=(px, px))
img_array = image.img_to_array(img)
X[i - min_idx, :, :, :] = img_array
except Exception as e:
print(e)
max_idx = i
X = preprocess_input(X)
these_preds = model.predict(X)
shp = ((max_idx - min_idx) + 1, n_dims)
preds[min_idx:max_idx + 1, :] = these_preds.reshape(shp)
min_idx = max_idx
max_idx = np.min((max_idx + batch_size, total_max))
return preds
def vectorizeOne(path, model):
img = image.load_img(path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
pred = model.predict(x)
return pred.ravel()
def findSimilar(vec, knn, filenames, n_neighbors=6):
if n_neighbors >= len(filenames):
print("Error. number of neighbours should be less than the number of images.")
else:
n_neighbors = n_neighbors + 1
dist, indices = knn.kneighbors(vec.reshape(1, -1), n_neighbors=n_neighbors)
dist, indices = dist.flatten(), indices.flatten()
similarList = [(filenames[indices[i]], dist[i]) for i in range(len(indices))]
del similarList[0]
#similarImages.sort(reverse=True, key=lambda tup: tup[1])
return similarList
## REPLACE ? WITH THE NAME OF THE CLASS TO BE DOWNSAMPLED
img_dir = "/content/dataset/tomato-segmented/train/?/*"
targetLimit = ? ## REPLACE ? WITH THE DESIRED NUMBER OF IMAGES
files = glob(img_dir)
nbrOfImages2Delete = len(files) - targetLimit
if (nbrOfImages2Delete > 0):
imgToSearchFor = files[0]
base_model = VGG19(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
vecs = vectorize_all(files, model, n_dims=4096)
knn = NearestNeighbors(metric='cosine', algorithm='brute')
knn.fit(vecs)
vec = vectorizeOne(imgToSearchFor, model)
similarImages = findSimilar(vec, knn, files, nbrOfImages2Delete)
print(similarImages)
SaveFile(similarImages, "deletedImages.txt")
if deleteImages:
for i in range(0, len(similarImages)):
if os.path.exists(similarImages[i][0]):
os.remove(similarImages[i][0])
print("Balancing done. A list of deleted images can be found in deletedImages.txt")
else:
print("nothing to delete")
```
# Solution
```
img_dir = "/content/dataset/tomato-segmented/train/Tomato___Tomato_Yellow_Leaf_Curl_Virus/*"
targetLimit = 1500
deleteImages = True
files = glob(img_dir)
nbrOfImages2Delete = len(files) - targetLimit
if (nbrOfImages2Delete > 0):
imgToSearchFor = files[0]
base_model = VGG19(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
vecs = vectorize_all(files, model, n_dims=4096)
knn = NearestNeighbors(metric='cosine', algorithm='brute')
knn.fit(vecs)
vec = vectorizeOne(imgToSearchFor, model)
similarImages = findSimilar(vec, knn, files, nbrOfImages2Delete)
SaveFile(similarImages, "deletedImages.txt")
if deleteImages:
for i in range(0, len(similarImages)):
if os.path.exists(similarImages[i][0]):
os.remove(similarImages[i][0])
print("Balancing done. A list of deleted images can be found in deletedImages.txt")
else:
print("nothing to delete")
```
# Display Final Data Distribution
```
#@title Generate Data Distribution
import requests
import os
import zipfile
## FEEL FREE TO CHANGE THESE PARAMETERS
dataset_url = "http://faridnakhle.com/pv/tomato-split-cropped-segmented-balanced.zip"
save_data_to = "/content/dataset/tomato-dataset-final/"
dataset_file_name = "tomato-split-cropped-segmented-balanced.zip"
#######################################
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
r = requests.get(dataset_url, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
print("Downloading dataset...")
with open(save_data_to + dataset_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Dataset downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + dataset_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
## Delete the zip file as we no longer need it
os.remove(save_data_to + dataset_file_name)
print("All done!")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import shutil
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
train_dir = '/content/dataset/tomato-dataset-final/train/'
train_classes = [path for path in os.listdir(train_dir)]
train_imgs = dict([(ID, os.listdir(os.path.join(train_dir, ID))) for ID in train_classes])
train_classes_count = []
for trainClass in train_classes:
train_classes_count.append(len(train_imgs[trainClass]))
plt.figure(figsize=(15, 10))
g = sns.barplot(x=train_classes, y=train_classes_count)
g.set_xticklabels(labels=train_classes, rotation=30, ha='right')
```
# 7. Exercise VI: Classification Using DenseNet-161 Pretrained DCNN algorithm
In this exercise, you are required to load a pretrained DCNN model and test it with the testing dataset located under /dataset/tomato-dataset-final/test/ .
```
#@title Load prerequisites and define needed functions
import argparse
import os
import time
import matplotlib.pyplot as plt
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
from collections import OrderedDict
import json
!pip install lime
from lime import lime_image
## YOU CAN CHANGE THESE VARIABLES
EPOCHS = 100
BATCH_SIZE = 20
LEARNING_RATE = 0.0001
data_dir = '/content/dataset/tomato-dataset-final/'
save_checkpoints = True
save_model_to = '/content/output/'
!mkdir /content/output/
IMG_SIZE = 220
NUM_WORKERS = 1
using_gpu = torch.cuda.is_available()
print_every = 300
ARCH = 'densenet161'
######################################################
def data_loader(root, batch_size=256, workers=1, pin_memory=True):
traindir = os.path.join(root, 'train')
valdir = os.path.join(root, 'val')
testdir = os.path.join(root, 'test')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
normalize
])
)
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
normalize
])
)
test_dataset = datasets.ImageFolder(
testdir,
transforms.Compose([
transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
normalize
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=workers,
pin_memory=pin_memory,
sampler=None
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=workers,
pin_memory=pin_memory
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=workers,
pin_memory=pin_memory
)
return train_loader, val_loader, test_loader, train_dataset, val_dataset, test_dataset
# Data loading
train_loader, val_loader, test_loader, train_dataset, val_dataset, test_dataset = data_loader(data_dir, BATCH_SIZE, NUM_WORKERS, False)
print("Training Set: " + str(len(train_loader.dataset)))
print("Validation Set: " + str(len(val_loader.dataset)))
print("Testing Set: " + str(len(test_loader.dataset)))
##########################
### DOWNLOAD THE MODEL ###
##########################
## FEEL FREE TO CHANGE THESE PARAMETERS
model_URL = "http://faridnakhle.com/pv/models/RSGAI_DenseNet.zip"
save_data_to = "/content/models/"
model_file_name = "densenet.zip"
#######################################
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
print("Downloading model...")
r = requests.get(model_URL, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
with open(save_data_to + model_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Model downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + model_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
print("All done!")
# Freeze parameters so we don't backprop through them
hidden_layers = [10240, 1024]
def make_model(structure, hidden_layers, lr, preTrained):
if structure=="densenet161":
model = models.densenet161(pretrained=preTrained)
input_size = 2208
else:
model = models.vgg16(pretrained=preTrained)
input_size = 25088
output_size = 102
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('dropout',nn.Dropout(0.5)),
('fc1', nn.Linear(input_size, hidden_layers[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_layers[0], hidden_layers[1])),
('relu2', nn.ReLU()),
('fc3', nn.Linear(hidden_layers[1], output_size)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
return model
model = make_model(ARCH, hidden_layers, LEARNING_RATE, True)
# define loss and optimizer
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=LEARNING_RATE)
def cal_accuracy(model, dataloader):
validation_loss = 0
accuracy = 0
for i, (inputs,labels) in enumerate(dataloader):
optimizer.zero_grad()
inputs, labels = inputs.to('cuda') , labels.to('cuda')
model.to('cuda')
with torch.no_grad():
outputs = model.forward(inputs)
validation_loss = criterion(outputs,labels)
ps = torch.exp(outputs).data
equality = (labels.data == ps.max(1)[1])
accuracy += equality.type_as(torch.FloatTensor()).mean()
validation_loss = validation_loss / len(dataloader)
accuracy = accuracy /len(dataloader)
return validation_loss, accuracy
RESUME = True
RESUME_PATH ='/content/models/RSGAI_DenseNet.pth'
def loading_checkpoint(path):
# Loading the parameters
state = torch.load(path)
LEARNING_RATE = state['learning_rate']
structure = state['structure']
hidden_layers = state['hidden_layers']
epochs = state['epochs']
# Building the model from checkpoints
model = make_model(structure, hidden_layers, LEARNING_RATE, False) # IF NOT PRETRAINED CHANGE TO FALSE
model.class_to_idx = state['class_to_idx']
model.load_state_dict(state['state_dict'])
model.eval()
return model
if RESUME:
print(RESUME_PATH)
if os.path.isfile(RESUME_PATH):
model = loading_checkpoint(RESUME_PATH)
print("=> loaded checkpoint '{}'".format(RESUME_PATH))
else:
print("Invalid model Path")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.cuda()
@torch.no_grad()
def get_all_preds(model, dataloader):
all_preds = torch.tensor([])
all_preds = all_preds.to(device)
all_labels = torch.tensor([])
all_labels = all_labels.to(device)
for data, target in dataloader:
input = data.to(device)
target = target.to(device)
with torch.no_grad():
output = model(input)
all_preds = torch.cat(
(all_preds, output)
,dim=0
)
all_labels = torch.cat(
(all_labels, target)
,dim=0
)
return all_preds, all_labels
def get_num_correct(preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
with torch.no_grad():
model.eval()
#Replace ? with one of the data loader. Choose between train_loader, val_loader, or test_loader
test_preds, test_labels = get_all_preds(model,?)
preds_correct = get_num_correct(test_preds.cuda(), test_labels.cuda())
#Replace ? with the total number of correctly predicted test samples
print('total correct:', ?)
print('accuracy:')
#Replace ? by writing the formula for the accuracy
#Hint: You can get the total number of test images using: len(test_loader.dataset)
ovallAccuracy = ?
print(ovallAccuracy)
```
# Solution
```
with torch.no_grad():
model.eval()
test_preds, test_labels = get_all_preds(model,test_loader)
preds_correct = get_num_correct(test_preds.cuda(), test_labels.cuda())
print('total correct:', preds_correct)
print('accuracy:')
print(((preds_correct / (len(test_loader.dataset))) * 100))
```
# 8. Exercise VII: Generating Confusion Matrix
In this exercise, you will plot the confusion matrix to visualize the prediction performance for each class.
```
#@title Defining required functions
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#percentage:
cm = cm.astype('float') * 100
# add percentage sign
mycm = plt.imshow(cm, interpolation='nearest', cmap=cmap)
mycm.set_clim([0,100])
cbar = plt.colorbar(mycm, shrink=0.82, ticks=list(range(0, 120, 20)))
cbar.ax.set_yticklabels(['0', '20', '40', '60', '80', '100']) # vertically oriented colorbar
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, ha="right")
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, str(format(cm[i, j], fmt)) + "%", horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.rcParams['font.family'] = "sans-serif"
plt.rcParams['font.sans-serif'] = "Arial"
plt.rcParams.update({'font.size': 12})
plt.ylabel('True class', fontsize=17, fontweight='bold')
plt.xlabel('Predicted class', fontsize=17, fontweight='bold')
import itertools
cmt = torch.zeros(10, 10, dtype=torch.int32) #10 is the number of classes
stacked = torch.stack(
(
test_labels
,test_preds.argmax(dim=1)
)
,dim=1
)
for p in stacked:
tl, pl = p.tolist()
tl = int(tl)
pl = int(pl)
cmt[tl, pl] = cmt[tl, pl] + 1
#Plot CM
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
## TO CALCULATE THE CONFUSION MATRIX, THE CONFUSION_MATRIX FUNCTION NEEDS
## the test labels (test_labels.cpu()) and test predictions (test_preds.argmax(dim=1).cpu()) as parameters.
## Replace ?? with the corresponding parameters.
cm = confusion_matrix(??)
print(cm)
plt.figure(figsize=(12, 12))
plot_confusion_matrix(cm, test_dataset.classes, True, 'Confusion matrix', cmap=plt.cm.Blues)
plt.savefig(save_model_to + 'confusionMatrix.eps', format='eps', bbox_inches='tight')
plt.show()
```
# Solution
```
cm = confusion_matrix(test_labels.cpu(), test_preds.argmax(dim=1).cpu())
print(cm)
plt.figure(figsize=(12, 12))
plot_confusion_matrix(cm, test_dataset.classes, True, 'Confusion matrix', cmap=plt.cm.Blues)
plt.savefig(save_model_to + 'confusionMatrix.eps', format='eps', bbox_inches='tight')
plt.show()
```
# 9. Exercise VIII: Generating Explanations With LIME
In this exercise you are required to use LIME in order to generate explanations for the classification of the image located under '/content/dataset/tomato-dataset-final/test/Tomato___Late_blight/image (1076)_cropped_1.JPG'.
```
from lime import lime_image
from skimage import io
from skimage import img_as_ubyte
from skimage.segmentation import mark_boundaries
#@title Define Prerequisite Functions
Pretrainedmodel = model
def get_PCNN_image(path):
image = cv2.imread(path)
image = cv2.resize(image, (226,226))
return image
PerturbationImgs = []
def batch_predictPDCNN(images):
Pretrainedmodel.eval()
batch = torch.stack(tuple(preprocess_transform(i) for i in images), dim=0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Pretrainedmodel.to(device)
batch = batch.to(device)
logits = Pretrainedmodel(batch)
probs = F.softmax(logits, dim=1)
for image in images:
PerturbationImgs.append(image)
return probs.detach().cpu().numpy()
def get_preprocess_transform():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transf = transforms.Compose([
transforms.ToTensor(),
normalize
])
return transf
preprocess_transform = get_preprocess_transform()
explainerPCNN = lime_image.LimeImageExplainer()
## REPLACE ? WITH THE PATH OF IMAGE YOU NEED TO EXPLAIN
image2explain = ?
PCNNimg = get_PCNN_image(image2explain)
PCNNimg = cv2.cvtColor(PCNNimg, cv2.COLOR_BGR2RGB)
explanationPCNN = explainerPCNN.explain_instance(PCNNimg,
batch_predictPDCNN, top_labels=5, hide_color=0, num_samples=5000)
tempPCNN, maskPCNN = explanationPCNN.get_image_and_mask(explanationPCNN.top_labels[0], positive_only=True, num_features=1, hide_rest=False)
###############################
## SUPER PIXEL PERTURPATIONS ##
###############################
from matplotlib import gridspec
## VISUALIZE SOME PERTURBATIONS
# create a figure
fig = plt.figure()
# to change size of subplot's
fig.set_figheight(5)
# set width of each subplot as 8
fig.set_figwidth(15)
# create grid for different subplots
spec = gridspec.GridSpec(ncols=5, nrows=2, wspace=0.1, hspace=0.1)
print("PERTURBATIONS:")
i=0
for perturbationImg in PerturbationImgs:
p = fig.add_subplot(spec[i])
p.axis('off')
p.imshow(perturbationImg)
i = i + 1
if i > 9:
break
#######################
## SHOW EXPLANATION ##
#######################
print("FINAL EXPLANATION:")
tempCNNP, maskCNNP = explanationPCNN.get_image_and_mask(explanationPCNN.top_labels[0], positive_only=False, num_features=1, hide_rest=False)
fig, (ax1) = plt.subplots(1, 1, figsize=(5,5))
ax1.bbox_inches='tight'
ax1.pad_inches = 0
ax1.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
# USE A FUNCTION NAMED mark_boundaries(), PASSING THE IMAGE AND THE MASK AS PARAMETERS
# IN ORDER TO GENERATE THE EXPLANATION.
# REPLACE ?? BELOW WITH YOUR ANSWER
plt.imshow(??)
```
# Solution
```
image2explain = '/content/dataset/tomato-dataset-final/test/Tomato___Late_blight/image (1076)_cropped_1.JPG'
PCNNimg = get_PCNN_image(image2explain)
PCNNimg = cv2.cvtColor(PCNNimg, cv2.COLOR_BGR2RGB)
explanationPCNN = explainerPCNN.explain_instance(PCNNimg,
batch_predictPDCNN, top_labels=5, hide_color=0, num_samples=5000)
tempPCNN, maskPCNN = explanationPCNN.get_image_and_mask(explanationPCNN.top_labels[0], positive_only=True, num_features=1, hide_rest=False)
###############################
## SUPER PIXEL PERTURPATIONS ##
###############################
from matplotlib import gridspec
## VISUALIZE SOME PERTURBATIONS
# create a figure
fig = plt.figure()
# to change size of subplot's
fig.set_figheight(5)
# set width of each subplot as 8
fig.set_figwidth(15)
# create grid for different subplots
spec = gridspec.GridSpec(ncols=5, nrows=2, wspace=0.1, hspace=0.1)
print("PERTURBATIONS:")
i=0
for perturbationImg in PerturbationImgs:
p = fig.add_subplot(spec[i])
p.axis('off')
p.imshow(perturbationImg)
i = i + 1
if i > 9:
break
#######################
## SHOW EXPLANATION ##
#######################
print("FINAL EXPLANATION:")
tempCNNP, maskCNNP = explanationPCNN.get_image_and_mask(explanationPCNN.top_labels[0], positive_only=False, num_features=1, hide_rest=False)
fig, (ax1) = plt.subplots(1, 1, figsize=(5,5))
ax1.bbox_inches='tight'
ax1.pad_inches = 0
ax1.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.imshow(mark_boundaries(tempCNNP, maskCNNP))
```
| github_jupyter |
# 从图片生成数据库
### 生成`cnn_custom_simple.ipynb`文件里所需的`custom_data.npz`数据文件
```
import numpy as np
import os
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
%matplotlib inline
cwd = os.getcwd()
print ("所有python包载入完毕")
print ("当前目录为 [%s]" % (cwd) )
```
## 配置
```
# 数据路径
paths = ["data/celebs/Arnold_Schwarzenegger"
, "data/celebs/Junichiro_Koizumi"
, "data/celebs/Vladimir_Putin"
, "data/celebs/George_W_Bush"]
categories = ['Terminator', 'Koizumi', 'Putin', 'Bush']
# 配置
imgsize = [64, 64]
use_gray = 1
data_name = "custom_data"
print ("你的图片应当在此路径下:")
for i, path in enumerate(paths):
print (" [%d/%d] %s" % (i, len(paths), path))
print ("数据将会被存储在此路径下:\n [%s]"
% (cwd + '/data/' + data_name + '.npz'))
```
## RGB2GARY图像色域转换
```
def rgb2gray(rgb):
if len(rgb.shape) is 3:
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
else:
return rgb
```
## 图片载入
```
nclass = len(paths)
valid_exts = [".jpg",".gif",".png",".tga", ".jpeg"]
imgcnt = 0
for i, relpath in zip(range(nclass), paths):
path = cwd + "/" + relpath
flist = os.listdir(path)
for f in flist:
if os.path.splitext(f)[1].lower() not in valid_exts:
continue
fullpath = os.path.join(path, f)
currimg = imread(fullpath)
# 需要时则转为灰阶图像
if use_gray:
grayimg = rgb2gray(currimg)
else:
grayimg = currimg
# 缩放
graysmall = imresize(grayimg, [imgsize[0], imgsize[1]])/255.
grayvec = np.reshape(graysmall, (1, -1))
# 存储
curr_label = np.eye(nclass, nclass)[i:i+1, :]
if imgcnt is 0:
totalimg = grayvec
totallabel = curr_label
else:
totalimg = np.concatenate((totalimg, grayvec), axis=0)
totallabel = np.concatenate((totallabel, curr_label), axis=0)
imgcnt = imgcnt + 1
print ("共有 %d 张图片" % (imgcnt))
```
## 将数据分为训练与测试两部分
```
def print_shape(string, x):
print ("SHAPE OF [%s] IS [%s]" % (string, x.shape,))
randidx = np.random.randint(imgcnt, size=imgcnt)
trainidx = randidx[0:int(4*imgcnt/5)]
testidx = randidx[int(4*imgcnt/5):imgcnt]
trainimg = totalimg[trainidx, :]
trainlabel = totallabel[trainidx, :]
testimg = totalimg[testidx, :]
testlabel = totallabel[testidx, :]
print_shape("totalimg", totalimg)
print_shape("totallabel", totallabel)
print_shape("trainimg", trainimg)
print_shape("trainlabel", trainlabel)
print_shape("testimg", testimg)
print_shape("testlabel", testlabel)
```
## 存为NPZ格式
```
savepath = cwd + "/data/" + data_name + ".npz"
np.savez(savepath, trainimg=trainimg, trainlabel=trainlabel
, testimg=testimg, testlabel=testlabel
, imgsize=imgsize, use_gray=use_gray, categories=categories)
print ("SAVED TO [%s]" % (savepath))
```
## 载入NPZ文件
```
# 载入
cwd = os.getcwd()
loadpath = cwd + "/data/" + data_name + ".npz"
l = np.load(loadpath)
print (l.files)
# 解析数据
trainimg_loaded = l['trainimg']
trainlabel_loaded = l['trainlabel']
testimg_loaded = l['testimg']
testlabel_loaded = l['testlabel']
categories_loaded = l['categories']
print ("[%d] TRAINING IMAGES" % (trainimg_loaded.shape[0]))
print ("[%d] TEST IMAGES" % (testimg_loaded.shape[0]))
print ("LOADED FROM [%s]" % (savepath))
```
## 绘制载入数据
```
ntrain_loaded = trainimg_loaded.shape[0]
batch_size = 5;
randidx = np.random.randint(ntrain_loaded, size=batch_size)
for i in randidx:
currimg = np.reshape(trainimg_loaded[i, :], (imgsize[0], -1))
currlabel_onehot = trainlabel_loaded[i, :]
currlabel = np.argmax(currlabel_onehot)
if use_gray:
currimg = np.reshape(trainimg[i, :], (imgsize[0], -1))
plt.matshow(currimg, cmap=plt.get_cmap('gray'))
plt.colorbar()
else:
currimg = np.reshape(trainimg[i, :], (imgsize[0], imgsize[1], 3))
plt.imshow(currimg)
title_string = ("[%d] CLASS-%d (%s)"
% (i, currlabel, categories_loaded[currlabel]))
plt.title(title_string)
plt.show()
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/geemap/tree/master/examples/template/template.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/geemap/blob/master/examples/template/template.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/geemap/blob/master/examples/template/template.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print("Installing geemap ...")
subprocess.check_call(["python", "-m", "pip", "install", "geemap"])
import ee
import geemap
```
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
```
Map = geemap.Map(center=[40, -100], zoom=4)
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
#*
# @fileoverview Earth Engine Developer's Guide examples contained in
# the Global Surface Water tutorial, page 2.
#
# [START final_script]
###############################
# Asset List
###############################
gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')
occurrence = gsw.select('occurrence')
###############################
# Constants
###############################
# [START occurrence_visualization]
VIS_OCCURRENCE = {
'min':0,
'max':100,
'palette': ['red', 'blue']
}
# [END occurrence_visualization]
# [START watermask_visualization]
VIS_WATER_MASK = {
'palette': ['white', 'black']
}
# [END watermask_visualization]
###############################
# Calculations
###############################
# [START watermask_definition]
# Create a water mask layer, and set the image mask so that non-water areas
# are opaque.
water_mask = occurrence.gt(90).unmask(0)
# [END watermask_definition]
###############################
# Initialize Map Location
###############################
# [START interesting_locations]
# Uncomment one of the following statements to center the map.
# Map.setCenter(-90.162, 29.8597, 10); # New Orleans, USA
# Map.setCenter(-114.9774, 31.9254, 10); # Mouth of the Colorado River, Mexico
# Map.setCenter(-111.1871, 37.0963, 11); # Lake Powell, USA
# Map.setCenter(149.412, -35.0789, 11); # Lake George, Australia
# Map.setCenter(105.26, 11.2134, 9); # Mekong River Basin, SouthEast Asia
# Map.setCenter(90.6743, 22.7382, 10); # Meghna River, Bangladesh
# Map.setCenter(81.2714, 16.5079, 11); # Godavari River Basin Irrigation Project, India
# Map.setCenter(14.7035, 52.0985, 12); # River Oder, Germany & Poland
# Map.setCenter(-59.1696, -33.8111, 9); # Buenos Aires, Argentina
Map.setCenter(-74.4557, -8.4289, 11); # Ucayali River, Peru
# [END interesting_locations]
###############################
# Map Layers
###############################
Map.addLayer({
'eeObject': water_mask,
'visParams': VIS_WATER_MASK,
'name': '90% occurrence water mask',
'shown': False
})
# [START addLayer_occurrence_v2]
Map.addLayer({
'eeObject': occurrence.updateMask(occurrence.divide(100)),
'name': "Water Occurrence (1984-2015)",
'visParams': VIS_OCCURRENCE
})
# [END addLayer_occurrence_v2]
# [START final_script]
# [START initial_script]
gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')
occurrence = gsw.select('occurrence')
Map.addLayer(occurrence)
# [END initial_script]
# [START addLayer_occurrence_v1]
Map.addLayer({'eeObject': occurrence, 'name': 'Water Occurrence (1984-2015)'})
# [END addLayer_occurrence_v1]
# [START addLayer_watermask]
Map.addLayer({
'eeObject': water_mask,
'visParams': VIS_WATER_MASK,
'name': '90% occurrence water mask'
})
# [END addLayer_watermask]
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
| github_jupyter |
# Demo: Single Curvelet (Interactive)
```
import numpy as np
import matplotlib.pyplot as plt
from pyctlops import FDCT2D
from ipywidgets import interactive_output, IntSlider, VBox, HBox
```
### Setup
```
nx = 300
nz = 350
# Create operator
DCT = FDCT2D((nx, nz), nbangles_coarse=8)
# Create empty structure for curvelet
y_struct = DCT.struct(np.zeros(DCT.shape[0]))
```
### Plotting
```
def display_curvelet(scale=1, wedge=1, ix=1, iy=1):
s = scale - 1
w = wedge - 1
# Populate curvelet
y_new = DCT.struct(np.zeros(DCT.shape[0]))
A, B = y_new[s][w].shape
iy = max(1, min(iy, A))
ix = max(1, min(ix, B))
y_new[s][w][iy-1, ix-1] = 1.
x = DCT.H * DCT.vect(y_new)
x = x.reshape((nx, nz))
x_fk = np.fft.fft2(x)
x_fk = np.fft.fftshift(x_fk)
vmin, vmax = 0.8 * np.array([-1,1]) * np.abs(np.max(x))
fig, ax = plt.subplots(2,2, figsize=(8,8), sharex='row', sharey='row')
ax[0, 0].imshow(np.real(x.T), cmap='gray', vmin=vmin, vmax=vmax)
ax[0, 1].imshow(np.imag(x.T), cmap='gray', vmin=vmin, vmax=vmax)
ax[1, 0].imshow(np.abs(x_fk.T), cmap='gray', vmin=0)
ax[1, 1].imshow(np.abs(x_fk.T) * np.angle(x_fk.T, deg=True), cmap='gray', vmin=-180, vmax=180)
ax[0, 0].set(title='Space domain (Real)')
ax[0, 1].set(title='Space domain (Imag)')
ax[1, 0].set(title='Frequency domain (Abs)')
ax[1, 1].set(title='Frequency domain (Abs * Phase)')
ax[0,0].axvline(nx/2, color='y', alpha=0.5)
ax[0,0].axhline(nz/2, color='y', alpha=0.5)
ax[0,1].axvline(nx/2, color='y', alpha=0.5)
ax[0,1].axhline(nz/2, color='y', alpha=0.5)
fig.tight_layout()
display_curvelet(scale=3, wedge=3, ix=y_struct[2][2].shape[1]//2+1, iy=y_struct[2][2].shape[0]//2+1)
```
### Interactive
```
max_scale = DCT.nbscales
max_wedge = len(y_struct[0])
max_iy, max_ix = y_struct[0][0].shape
curr_scale = 1
curr_wedge = 1
slider_scale = IntSlider(min=1, max=max_scale, value=curr_scale, step=1,description='Scales')
slider_wedge = IntSlider(min=1, max=max_wedge, value=curr_wedge, step=1, description='Wedge')
slider_ix = IntSlider(min=1, max=max_ix, value=max_ix//2 + 1, step=1, description='X Index')
slider_iy = IntSlider(min=1, max=max_iy, value=max_iy//2 + 1, step=1, description='Y Index')
def handle_scale_change(change):
global curr_scale
curr_scale = change.new
slider_wedge.max = len(y_struct[curr_scale-1])
global curr_wedge
curr_wedge = slider_wedge.value
A, B = y_struct[curr_scale-1][curr_wedge-1].shape
slider_ix.max = B
slider_iy.max = A
def handle_wedge_change(change):
global curr_wedge
curr_wedge = change.new
A, B = y_struct[curr_scale-1][curr_wedge-1].shape
slider_ix.max = B
slider_iy.max = A
slider_scale.observe(handle_scale_change, names='value')
slider_wedge.observe(handle_wedge_change, names='value')
out = interactive_output(display_curvelet,
{"scale": slider_scale, "wedge": slider_wedge, "ix": slider_ix, "iy": slider_iy})
vbox1 = VBox([slider_scale, slider_wedge])
vbox2 = VBox([slider_ix, slider_iy])
ui = HBox([vbox1, vbox2])
display(ui, out, continuous_update=False)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.07.Colab_Vegetation_Phenology_L8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Downloads the odc-colab Python module and runs it to setup ODC.
```
!wget -nc https://raw.githubusercontent.com/ceos-seo/odc-colab/master/odc_colab.py
from odc_colab import odc_colab_init
odc_colab_init(install_odc_gee=True)
```
Downloads an existing index and populates the new ODC environment with it.
```
from odc_colab import populate_db
populate_db()
```
# Landsat Vegetation Phenology
This notebook calculates vegetation phenology changes using Landsat 7 and Landsat 8 data. To detect changes, the algorithm uses Normalized Difference Vegetation Index (NDVI) which is a common proxy for vegetation growth and health. The outputs of this notebook can be used to assess differences in agriculture fields over time or space and also allow the assessment of growing states such as planting and harvesting.
<br>
There are two output products. The first output product is a time series boxplot of NDVI with the data binned by week, month, week of year, or month of year. The second output product is a time series lineplot of the mean NDVI for each year, with the data potentially binned by week or month. This product is useful for comparing different years to each other.
## Load Data Cube Configuration and Import Utilities
```
# Supress Warnings
import warnings
warnings.filterwarnings('ignore')
# Load Data Cube Configuration
from odc_gee import earthengine
dc = earthengine.Datacube(app='Phenology')
# Import Data Cube API
import utils.data_cube_utilities.data_access_api as dc_api
api = dc_api.DataAccessApi()
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
# Select a Product and Platform
product = "ls8_google"
platform = "LANDSAT_8"
```
## <span id="define_extents">Define the Extents of the Analysis [▴](#top)</span>
```
# MODIFY HERE
# Select the center of an analysis region (lat_long)
# Adjust the surrounding box size (box_size) around the center (in degrees)
# Remove the comment tags (#) below to change the sample location
# Maize Crops in Sudan Savanna, Ghana, Africa
lat_long = (11.11, -0.23)
box_size_deg = 0.0023
# Calculate the latitude and longitude bounds of the analysis box
latitude = (lat_long[0]-box_size_deg/2, lat_long[0]+box_size_deg/2)
longitude = (lat_long[1]-box_size_deg/2, lat_long[1]+box_size_deg/2)
# Define Time Range
# Landsat-8 time range: 07-Apr-2013 to current
# The format of the time date is YYYY-MM-DD
start_date = '2017-01-01'
end_date = '2020-12-31'
time_extents = (start_date, end_date)
# The code below renders a map that can be used to view the region.
from utils.data_cube_utilities.dc_display_map import display_map
display_map(latitude,longitude)
```
## Load bands needed for NDVI and remove clouds and water
```
landsat_dataset = dc.load(latitude = latitude,
longitude = longitude,
platform = platform,
time = time_extents,
product = product,
measurements = ['red', 'nir', 'pixel_qa'])
from utils.data_cube_utilities.clean_mask import landsat_qa_clean_mask
land_mask = landsat_qa_clean_mask(landsat_dataset, platform=platform, cover_types=['clear'])
landsat_dataset = landsat_dataset.drop('pixel_qa')
cleaned_dataset = landsat_dataset.where(land_mask)
```
## Define NDVI and add it to the dataset
```
def NDVI(dataset):
return (dataset.nir - dataset.red)/(dataset.nir + dataset.red)
cleaned_dataset['NDVI'] = NDVI(cleaned_dataset)
```
## Plot NDVI vs Time in a Box-and-Whisker Plot
```
# MODIFY HERE
# Specify the target aggregation type of the curve fit.
# Input can be either 'mean' or 'median'.
curve_fit_target = 'median'
# The maximum number of data points that appear along time in each plot.
# If more than this number of data points need to be plotted, a grid of plots will be created.
max_times_per_plot = 50
# Select the binning approach for the vegetation index. Choose one from the list below.
# None = do not bin the data
# 'week' = bin the data by week with an extended time axis
# 'month' = bin the data by month with an extended time axis
# 'weekofyear' = bin the data by week and years using a single year time axis
# 'monthofyear' = bin the data by month and years using a single year time axis
bin_by = 'month'
from utils.data_cube_utilities.plotter_utils import xarray_time_series_plot
veg_proxy = 'NDVI'
aggregated_by_str = None
if bin_by is None:
plotting_data = cleaned_dataset
elif bin_by == 'week':
plotting_data = cleaned_dataset.resample(time='1w').mean()
aggregated_by_str = 'Week'
elif bin_by == 'month':
plotting_data = cleaned_dataset.resample(time='1m').mean()
aggregated_by_str = 'Month'
elif bin_by == 'weekofyear':
plotting_data = cleaned_dataset.groupby('time.week').mean(dim=('time'))
aggregated_by_str = 'Week of Year'
elif bin_by == 'monthofyear':
plotting_data = cleaned_dataset.groupby('time.month').mean(dim=('time'))
aggregated_by_str = 'Month of Year'
params = dict(dataset=plotting_data, plot_descs={veg_proxy:{'none':[
{'box':{'boxprops':{'facecolor':'forestgreen'}}}]}})
params['plot_descs'][veg_proxy][curve_fit_target] = [{'gaussian_filter':{}}]
fig, curve_fit_plotting_data = \
xarray_time_series_plot(**params, fig_params=dict(figsize=(8,4), dpi=150),
max_times_per_plot=max_times_per_plot)
plt.title('Box-and-Whisker Plot of {1} with a Curvefit of {0} {1}'
.format(curve_fit_target.capitalize(), veg_proxy))
plt.tight_layout()
plt.show()
```
### Plot NDVI vs. Time for each year
Note that the curve fits here do not show where some times have no data, as is shown in the box-and-whisker plot. Notably, the curve interpolates over times with missing data that are not the first or last time (e.g. January or December for monthly binned data).
```
# MODIFY HERE
# Select the binning approach for the vegetation index. Set the 'bin_by' parameter.
# 'weekofyear' = bin the data by week and years using a single year time axis
# 'monthofyear' = bin the data by month and years using a single year time axis
bin_by = 'monthofyear'
years_with_data = []
plot_descs = {}
daysofyear_per_year = {}
plotting_data_years = {}
time_dim_name = None
for year in range(np.datetime64(start_date, 'Y').item().year, np.datetime64(end_date, 'Y').item().year+1):
year_data = cleaned_dataset.sel(time=slice('{}-01-01'.format(year), '{}-12-31'.format(year)))[veg_proxy]
if len(year_data['time']) == 0: # There is nothing to plot for this year.
print("Year {} has no data, so will not be plotted.".format(year))
continue
years_with_data.append(year)
spec_ind_dayofyear = year_data.groupby('time.dayofyear').mean()
daysofyear_per_year[year] = spec_ind_dayofyear[~spec_ind_dayofyear.isnull().sum(dim='dayofyear')].dayofyear
aggregated_by_str = None
if bin_by == 'weekofyear':
plotting_data_year = year_data.groupby('time.week').mean(dim=('time'))
time_dim_name = 'week'
elif bin_by == 'monthofyear':
plotting_data_year = year_data.groupby('time.month').mean(dim=('time'))
time_dim_name = 'month'
plotting_data_years[year] = plotting_data_year
num_time_pts = len(plotting_data_year[time_dim_name])
# Select the curve-fit type.
# See the documentation for `xarray_time_series_plot()` regarding the `plot_descs` parameter.
plot_descs[year] = {'mean':[{'gaussian_filter':{}}]}
time_dim_name = 'week' if bin_by == 'weekofyear' else 'month' if bin_by == 'monthofyear' else 'time'
num_times = 54 if bin_by == 'weekofyear' else 12
time_coords_arr = np.arange(1, num_times+1) # In xarray, week and month indices start at 1.
time_coords_da = xr.DataArray(time_coords_arr, coords={time_dim_name:time_coords_arr},
dims=[time_dim_name], name=time_dim_name)
coords = dict(list(plotting_data_years.values())[0].coords)
coords[time_dim_name] = time_coords_da
plotting_data = xr.Dataset(plotting_data_years, coords=coords)
params = dict(dataset=plotting_data, plot_descs=plot_descs)
xarray_time_series_plot(**params, fig_params=dict(figsize=(8,4), dpi=150))
plt.title('Line Plot of {0} for Each Year'.format(veg_proxy))
plt.show()
```
| github_jupyter |
# Load previous results
```
import pickle
with open("results.pkl", "rb") as fh:
final_results = pickle.load(fh)
```
# Loading the Corpus
```
from corpora import hulth, scopus, semeval, kp20k
from importlib import reload
corpus_name = "semeval"
if corpus_name == "scopus":
df = scopus.load()
elif corpus_name == "hulth":
df = hulth.load()
elif corpus_name == "semeval":
df = semeval.load()
elif corpus_name == "kp20k":
df = kp20k.load()
df = df.reset_index()
df = df[:100000]
else:
raise Exception("no corpus loaded")
```
# Removing documents with no Ground Truth keywords that are also in the text
```
def kwds_in_text(kwds, text):
return [kw for kw in kwds if kw.lower() in text.lower()]
actual_kwds = [[kw.lower() for kw in r["keywords"].split("; ")] for idx, r in df.iterrows()]
has_kwds_despite_kw_removal = [len(kwds_in_text(kwds, text)) > 0 for kwds, text in zip(actual_kwds, df["abstract"])]
df = df[has_kwds_despite_kw_removal]
```
# TFIDF keywords
```
from tfidf_KW_extraction import Tfidf_Pos_keywords
tfidf = Tfidf_Pos_keywords(df["abstract"])
```
# Fuzzy Matching
```
from fuzzywuzzy import fuzz
import numpy as np
def issimilar(a, b, t=80):
'''
Returns `True` if the strings a and b are sufficiently similar and
`False` otherwise.
'''
if fuzz.token_sort_ratio(a, b) > t:
return True
else:
return False
def fuzzy_is_in(candidate, kwds, t=80):
return np.any([issimilar(candidate, kw, t) for kw in kwds])
```
# Keyword Extraction Tools
```
fh = open("corpora/stoplist.txt")
stopwords = fh.readlines()
fh.close()
stopwords = " ".join(stopwords).split("\n ")
from gensim.summarization import keywords as textrank_keywords
from jgtextrank import keywords_extraction as textrank_keywords_jg
from rake_nltk import Rake
from rake_nltk import Metric
from itertools import islice
def textrank_gensim(text, num_kwds=None, scores=False):
try:
results = textrank_keywords(text, ratio=1., scores=True)
except ZeroDivisionError:
return []
if scores == False:
results = [kw for kw, scores in results]
return list(islice(results, num_kwds))
def textrank(text, num_kwds=None, scores=False):
try:
results, _ = textrank_keywords_jg(text, window=2, directed=False, stop_words=stopwords, workers=1)
except KeyError:
text = ''.join([i if ord(i) < 128 else ' ' for i in text]) # remove non-ascii characters
results, _ = textrank_keywords_jg(text, window=2, directed=False, stop_words=stopwords, workers=1)
if scores == False:
results = [kw for kw, scores in results]
return list(islice(results, num_kwds))
def rake(text, num_kwds=None, scores=False):
r = Rake(stopwords=stopwords, punctuations=". , ? ! - : ; \" \' ( ) [ ]".split(), language='english')
r.extract_keywords_from_text(text)
if scores == True:
results = r.get_ranked_phrases_with_scores()
results = [(kw, score) for score, kw in results]
else:
results = r.get_ranked_phrases()
return list(islice(results, num_kwds))
```
# Function for calculating precision, recall and F1
```
from sklearn.metrics import precision_score, recall_score, f1_score
import pandas as pd
def kwd_metrics(extracted=None, actual=None, fuzzy=True, t=80):
extracted = list(map(lambda s: s.lower(), extracted))
actual = list(map(lambda s: s.lower(), actual))
assert t >=0 and t <=100, "t must be 0 <= t <= 100"
if t==100: # not fuzzy
all_kwds = set(actual).union(extracted)
y_true = [kw in actual for kw in all_kwds]
y_pred = [kw in extracted for kw in all_kwds]
else: # go fuzzy
# get rid of extracted kwds that are similar to kwds in actual
extracted_no_dup = [e for e in extracted if not fuzzy_is_in(e, actual, t=t)]
all_kwds = set(actual).union(extracted_no_dup)
y_true = [fuzzy_is_in(kw, actual, t=t) for kw in all_kwds]
y_pred = [fuzzy_is_in(kw, extracted, t=t) for kw in all_kwds]
precision = precision_score(y_true, y_pred, average="binary")
recall = recall_score(y_true, y_pred, average="binary")
f1 = f1_score(y_true, y_pred, average="binary")
return {
'precision': precision,
'recall': recall,
'f1': f1
}
```
# Benchmark
```
from tqdm import tqdm
from collections import defaultdict
from multiprocessing import Pool, cpu_count
from concurrent.futures import ProcessPoolExecutor
from functools import partial
def kw_extraction(text_iter, extractor, num_kwds=None, multi_threaded=False):
my_extractor = partial(extractor, num_kwds=num_kwds)
if multi_threaded:
with ProcessPoolExecutor(max_workers=8) as p:
kwds = p.map(my_extractor, text_iter)
else:
kwds = map(my_extractor, text_iter)
return kwds
def assess_extractor(extractor, df, num_kwds=None, t=80, kw_removal=True):
"""
"""
text_iter = [r["abstract"] for idx, r in df.iterrows()]
extr_kwds = kw_extraction(text_iter, extractor, num_kwds=num_kwds)
actual_kwds = [[kw.lower() for kw in r["keywords"].split("; ")] for idx, r in df.iterrows()]
if kw_removal:
actual_kwds = [kwds_in_text(kwds, text) for kwds, text in zip(actual_kwds, df["abstract"])]
scores = pd.DataFrame(columns=('precision', 'recall', 'f1'))
for idx, a, e in zip(df.index, actual_kwds, extr_kwds):
scores.loc[idx] = kwd_metrics(extracted=e, actual=a, t=t)
return scores
```
# Train the Tfidf Vectorizer
```
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(stop_words="english", smooth_idf=True).fit(df["abstract"])
import numpy as np
def tfidf_scores(document, vectorizer):
'''
returns the raw tfidf scores of document passed.
'''
transformed_doc = vectorizer.transform([document]).toarray()[0]
tfidf_scores = {}
for w in document.split():
try:
word_id = vectorizer.vocabulary_[w]
tfidf_scores[w] = transformed_doc[word_id]
except KeyError:
pass
return tfidf_scores
def weight_keywords(kwds, scores):
'''
weights the keywords in `kwds` with the tfidf scores provided
in `scores`. the returned list of tuples is sorted. the highest
scoring keyword comes first.
'''
kwd_scores = []
for kwd in kwds:
tokens = kwd.split()
cum_score = 0
token_count = 0
for token in tokens:
if token in scores:
cum_score += scores[token]
token_count += 1
if token_count > 0:
kwd_scores.append(cum_score * (1 - .1 * token_count))
else:
kwd_scores.append(0)
return sorted([(kwd, score) for kwd, score in zip(kwds, kwd_scores)],
key=lambda x: x[1], reverse=True)
def rank_keywords(document, extr=None, vectorizer=None, num_kwds=None, scores=False):
'''
produces a sorted list of keywords produced by `extr` on the
document with index `idx`.
'''
kwds = extr(document, num_kwds=None, scores=False)
results = tfidf_scores(document, vectorizer)
results = weight_keywords(kwds, results)
if scores == False:
results = [kw for kw, scores in results]
return list(islice(results, num_kwds))
def tfidfify(extr, vectorizer):
"""
Changes the ranking of a keyword extractor to a tfidf-based one.
"""
return partial(rank_keywords, extr=extr, vectorizer=vectorizer)
tfidfed_rake = tfidfify(rake, vectorizer)
tfidfed_textrank = tfidfify(textrank, vectorizer)
```
# Frankenrake's Textrank
```
from itertools import chain
def tfidfed_frankenrake(text, num_kwds=None, scores=False):
def frankenrake(text, num_kwds=None, scores=False):
kwds = list({kw for kw in chain(rake(text), textrank(text))})
return kwds
tfidfed = tfidfify(frankenrake, vectorizer)
return list(islice(tfidfed(text, scores=scores), num_kwds))
```
# Run Evaluation
### One job at a time
```
"""
from collections import defaultdict
kw_threshholds = range(1, 21, 1)
scores = {i: {"textrank": None, "rake": None, "tfidfed_textrank": None, "tfidfed_rake": None}
for i in kw_threshholds}
for i in tqdm(kw_threshholds):
for extr_name, extr in zip(("textrank", "rake", "tfidfed_textrank", "tfidfed_rake", "frankenrake", "tfidf"), \
(textrank, rake, tfidfed_textrank, tfidfed_rake, tfidfed_frankenrake, tfidf)):
scores[i][extr_name] = assess_extractor(extr, df, num_kwds=i, t=100, kw_removal=True)
"""
```
# multiple jobs at a time
```
def partial_assess_extractor(i):
scores = {"textrank": None, "rake": None, "tfidfed_textrank": None, "tfidfed_rake": None}
for extr_name, extr in tqdm(zip(("textrank", "rake", "tfidfed_textrank", "tfidfed_rake", "frankenrake", "tfidf"), \
(textrank, rake, tfidfed_textrank, tfidfed_rake, tfidfed_frankenrake, tfidf))):
scores[extr_name] = assess_extractor(extr, df, num_kwds=i, t=100, kw_removal=True)
return (i, scores)
%%time
from concurrent.futures import ProcessPoolExecutor
with ProcessPoolExecutor(max_workers=32) as p:
results = p.map(partial_assess_extractor, range(1, 21, 1))
results = list(results)
kw_threshholds = range(1, 21, 1)
scores = {i: {"textrank": None, "rake": None, "tfidfed_textrank": None, "tfidfed_rake": None}
for i in kw_threshholds}
for r in results:
for k, v in r[1].items():
scores[r[0]][k] = v
final_results[corpus_name] = scores
```
# multpile jobs end
# Store results
```
import pickle
with open("results.pkl", "wb") as fh:
pickle.dump(final_results, fh)
from collections import namedtuple
def get_data(algorithm, corpus):
Retrieval_scores = namedtuple("Retrieval_scores", "p r f a".split())
scores = final_results[corpus]
precision = [scores[i][algorithm]["precision"].mean() for i in kw_threshholds]
recall = [scores[i][algorithm]["recall"].mean() for i in kw_threshholds]
f1 = [scores[i][algorithm]["f1"].mean() for i in kw_threshholds]
return Retrieval_scores(precision, recall, f1, algorithm)
X = get_data("tfidf", "scopus")
Y = get_data("rake", "scopus")
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
plt.rcParams["font.family"] = 'serif'
colors = sns.color_palette("Set1", 6)
fig, ax = plt.subplots(figsize=(10, 8))
ax.plot(kw_threshholds, X.p, ':v', c=colors[0], label=f"$\pi$ {X.a}")
ax.plot(kw_threshholds, X.r, ':D', c=colors[1], label=f"$\\rho$ {X.a}")
ax.plot(kw_threshholds, X.f, ':d', c=colors[2], label=f"F1 {X.a}")
ax.plot(kw_threshholds, Y.p, '-v', c=colors[0], alpha=.4, label=f"$\pi$ {Y.a}")
ax.plot(kw_threshholds, Y.r, '-D', c=colors[1], alpha=.4, label=f"$\\rho$ {Y.a}")
ax.plot(kw_threshholds, Y.f, '-d', c=colors[2], alpha=.4, label=f"F1 {Y.a}")
ax.set_ylim(0.0, .5)
ax.set_xlabel('Number of Keyphrases', fontsize=16)
ax.set_ylabel('Score', fontsize=16)
ax.legend(fontsize=14)
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
#ax.set_facecolor("white")
for spine in plt.gca().spines.values():
spine.set_visible(True)
#plt.title(f"{corpus_name} without Fuzzy Matching + KW Removal", fontsize=18)
plt.xticks(kw_threshholds)
plt.show()
```
# Enlightning Tools
```
idx = np.random.choice(df.index)
extractor = rake
num_kwds = 10
text = df.loc[idx, "abstract"]
actual = df.loc[idx, "keywords"].split("; ")
actual = kwds_in_text(actual, text)
actual = list(map(lambda s: s.lower(), actual))
extracted = extractor(df.loc[idx, "abstract"], num_kwds=num_kwds)
extracted = list(map(lambda s: s.lower(), extracted))
all_kwds = set(actual).union(extracted)
y_true = [kw in actual for kw in all_kwds]
y_pred = [kw in extracted for kw in all_kwds]
precision = precision_score(y_true, y_pred, average="binary")
recall = recall_score(y_true, y_pred, average="binary")
f1 = f1_score(y_true, y_pred, average="binary")
print("ABSTRACT:\n", df.loc[idx, "abstract"], end="\n\n")
print("ACTUAL KEYWORDS:\n", ", ".join(actual), end="\n\n")
print("EXTRACTED:\n", ", ".join(extracted), end="\n\n")
print(f"PRECISION: {precision}\nRECALL: {recall}\nF1: {f1}")
def plot_ranking_stats(num_kwds, algorithm, metric):
y = scores[num_kwds][algorithm].sort_values(by=metric)[::-1][metric]
mean = scores[num_kwds][algorithm][metric].mean()
fig, ax = plt.subplots(figsize=(16,9))
ax.plot(range(y.values.shape[0]), y.values)
ax.axhline(mean, color="red")
ax.set_xlabel("rank")
ax.set_ylabel("score")
ax.set_title(f"{metric}@{num_kwds} Keywords using {algorithm}")
plt.show()
plot_ranking_stats(5, "tfidfed_rake", "f1")
plot_ranking_stats(5, "rake", "f1")
```
| github_jupyter |
# Optimization Methods
Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
<img src="images/cost.jpg" style="width:650px;height:300px;">
<caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
**Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
To get started, run the following code to import the libraries you will need.
### <font color='darkblue'> Updates to Assignment <font>
#### If you were working on a previous version
* The current notebook filename is version "Optimization_methods_v1b".
* You can find your work in the file directory as version "Optimization methods'.
* To see the file directory, click on the Coursera logo at the top left of the notebook.
#### List of Updates
* op_utils is now opt_utils_v1a. Assertion statement in `initialize_parameters` is fixed.
* opt_utils_v1a: `compute_cost` function now accumulates total cost of the batch without taking the average (average is taken for entire epoch instead).
* In `model` function, the total cost per mini-batch is accumulated, and the average of the entire epoch is taken as the average cost. So the plot of the cost function over time is now a smooth downward curve instead of an oscillating curve.
* Print statements used to check each function are reformatted, and 'expected output` is reformatted to match the format of the print statements (for easier visual comparisons).
```
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
```
## 1 - Gradient Descent
A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
**Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
$$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
```
**Expected Output**:
```
W1 =
[[ 1.63535156 -0.62320365 -0.53718766]
[-1.07799357 0.85639907 -2.29470142]]
b1 =
[[ 1.74604067]
[-0.75184921]]
W2 =
[[ 0.32171798 -0.25467393 1.46902454]
[-2.05617317 -0.31554548 -0.3756023 ]
[ 1.1404819 -1.09976462 -0.1612551 ]]
b2 =
[[-0.88020257]
[ 0.02561572]
[ 0.57539477]]
```
A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
- **(Batch) Gradient Descent**:
``` python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
# Forward propagation
a, caches = forward_propagation(X, parameters)
# Compute cost.
cost += compute_cost(a, Y)
# Backward propagation.
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
- **Stochastic Gradient Descent**:
```python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
for j in range(0, m):
# Forward propagation
a, caches = forward_propagation(X[:,j], parameters)
# Compute cost
cost += compute_cost(a, Y[:,j])
# Backward propagation
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
<img src="images/kiank_sgd.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
**Note** also that implementing SGD requires 3 for-loops in total:
1. Over the number of iterations
2. Over the $m$ training examples
3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
<img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
<font color='blue'>
**What you should remember**:
- The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
- You have to tune a learning rate hyperparameter $\alpha$.
- With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
## 2 - Mini-Batch Gradient descent
Let's learn how to build mini-batches from the training set (X, Y).
There are two steps:
- **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
<img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
- **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
<img src="images/kiank_partition.png" style="width:550px;height:300px;">
**Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
```python
first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
...
```
Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
```
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
end = m - mini_batch_size * math.floor(m / mini_batch_size)
mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]
mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td > **shape of the 1st mini_batch_X** </td>
<td > (12288, 64) </td>
</tr>
<tr>
<td > **shape of the 2nd mini_batch_X** </td>
<td > (12288, 64) </td>
</tr>
<tr>
<td > **shape of the 3rd mini_batch_X** </td>
<td > (12288, 20) </td>
</tr>
<tr>
<td > **shape of the 1st mini_batch_Y** </td>
<td > (1, 64) </td>
</tr>
<tr>
<td > **shape of the 2nd mini_batch_Y** </td>
<td > (1, 64) </td>
</tr>
<tr>
<td > **shape of the 3rd mini_batch_Y** </td>
<td > (1, 20) </td>
</tr>
<tr>
<td > **mini batch sanity check** </td>
<td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
</tr>
</table>
<font color='blue'>
**What you should remember**:
- Shuffling and Partitioning are the two steps required to build mini-batches
- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
## 3 - Momentum
Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
<img src="images/opt_momentum.png" style="width:400px;height:250px;">
<caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
**Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
for $l =1,...,L$:
```python
v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
```
**Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
```
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l + 1)] = np.zeros_like(parameters["W" + str(l+1)])
v["db" + str(l + 1)] = np.zeros_like(parameters["b" + str(l+1)])
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
```
**Expected Output**:
```
v["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db1"] =
[[ 0.]
[ 0.]]
v["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
```
**Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
$$ \begin{cases}
v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
\end{cases}\tag{3}$$
$$\begin{cases}
v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
\end{cases}\tag{4}$$
where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l + 1)] = beta * v["dW" + str(l + 1)] + (1 - beta) * grads['dW' + str(l + 1)]
v["db" + str(l + 1)] = beta * v["db" + str(l + 1)] + (1 - beta) * grads['db' + str(l + 1)]
# update parameters
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * v["dW" + str(l + 1)]
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * v["db" + str(l + 1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
```
**Expected Output**:
```
W1 =
[[ 1.62544598 -0.61290114 -0.52907334]
[-1.07347112 0.86450677 -2.30085497]]
b1 =
[[ 1.74493465]
[-0.76027113]]
W2 =
[[ 0.31930698 -0.24990073 1.4627996 ]
[-2.05974396 -0.32173003 -0.38320915]
[ 1.13444069 -1.0998786 -0.1713109 ]]
b2 =
[[-0.87809283]
[ 0.04055394]
[ 0.58207317]]
v["dW1"] =
[[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]]
v["db1"] =
[[-0.01228902]
[-0.09357694]]
v["dW2"] =
[[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]]
v["db2"] = v[[ 0.02344157]
[ 0.16598022]
[ 0.07420442]]
```
**Note** that:
- The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
- If $\beta = 0$, then this just becomes standard gradient descent without momentum.
**How do you choose $\beta$?**
- The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much.
- Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
- Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$.
<font color='blue'>
**What you should remember**:
- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
- You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
## 4 - Adam
Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
**How does Adam work?**
1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
3. It updates parameters in a direction based on combining information from "1" and "2".
The update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
\end{cases}$$
where:
- t counts the number of steps taken of Adam
- L is the number of layers
- $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
- $\alpha$ is the learning rate
- $\varepsilon$ is a very small number to avoid dividing by zero
As usual, we will store all parameters in the `parameters` dictionary
**Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.
**Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
for $l = 1, ..., L$:
```python
v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
```
```
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l + 1)] = np.zeros_like(parameters["W" + str(l + 1)])
v["db" + str(l + 1)] = np.zeros_like(parameters["b" + str(l + 1)])
s["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l + 1)])
s["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l + 1)])
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
```
**Expected Output**:
```
v["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db1"] =
[[ 0.]
[ 0.]]
v["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
s["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
s["db1"] =
[[ 0.]
[ 0.]]
s["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
s["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
```
**Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\
v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\
s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\
s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon}
\end{cases}$$
**Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l + 1)] = beta1 * v["dW" + str(l + 1)] + (1 - beta1) * grads['dW' + str(l + 1)]
v["db" + str(l + 1)] = beta1 * v["db" + str(l + 1)] + (1 - beta1) * grads['db' + str(l + 1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l + 1)] = v["dW" + str(l + 1)] / (1 - np.power(beta1, t))
v_corrected["db" + str(l + 1)] = v["db" + str(l + 1)] / (1 - np.power(beta1, t))
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l + 1)] = beta2 * s["dW" + str(l + 1)] + (1 - beta2) * np.power(grads['dW' + str(l + 1)], 2)
s["db" + str(l + 1)] = beta2 * s["db" + str(l + 1)] + (1 - beta2) * np.power(grads['db' + str(l + 1)], 2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l + 1)] = s["dW" + str(l + 1)] / (1 - np.power(beta2, t))
s_corrected["db" + str(l + 1)] = s["db" + str(l + 1)] / (1 - np.power(beta2, t))
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * v_corrected["dW" + str(l + 1)] / np.sqrt(s_corrected["dW" + str(l + 1)] + epsilon)
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * v_corrected["db" + str(l + 1)] / np.sqrt(s_corrected["db" + str(l + 1)] + epsilon)
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
```
**Expected Output**:
```
W1 =
[[ 1.63178673 -0.61919778 -0.53561312]
[-1.08040999 0.85796626 -2.29409733]]
b1 =
[[ 1.75225313]
[-0.75376553]]
W2 =
[[ 0.32648046 -0.25681174 1.46954931]
[-2.05269934 -0.31497584 -0.37661299]
[ 1.14121081 -1.09245036 -0.16498684]]
b2 =
[[-0.88529978]
[ 0.03477238]
[ 0.57537385]]
v["dW1"] =
[[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]]
v["db1"] =
[[-0.01228902]
[-0.09357694]]
v["dW2"] =
[[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]]
v["db2"] =
[[ 0.02344157]
[ 0.16598022]
[ 0.07420442]]
s["dW1"] =
[[ 0.00121136 0.00131039 0.00081287]
[ 0.0002525 0.00081154 0.00046748]]
s["db1"] =
[[ 1.51020075e-05]
[ 8.75664434e-04]]
s["dW2"] =
[[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
[ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
[ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]
s["db2"] =
[[ 5.49507194e-05]
[ 2.75494327e-03]
[ 5.50629536e-04]]
```
You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
## 5 - Model with different optimization algorithms
Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
```
train_X, train_Y = load_dataset()
```
We have already implemented a 3-layer neural network. You will train it with:
- Mini-batch **Gradient Descent**: it will call your function:
- `update_parameters_with_gd()`
- Mini-batch **Momentum**: it will call your functions:
- `initialize_velocity()` and `update_parameters_with_momentum()`
- Mini-batch **Adam**: it will call your functions:
- `initialize_adam()` and `update_parameters_with_adam()`
```
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
```
You will now run this 3 layer neural network with each of the 3 optimization methods.
### 5.1 - Mini-batch Gradient descent
Run the following code to see how the model does with mini-batch gradient descent.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.2 - Mini-batch gradient descent with momentum
Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.3 - Mini-batch with Adam mode
Run the following code to see how the model does with Adam.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.4 - Summary
<table>
<tr>
<td>
**optimization method**
</td>
<td>
**accuracy**
</td>
<td>
**cost shape**
</td>
</tr>
<td>
Gradient descent
</td>
<td>
79.7%
</td>
<td>
oscillations
</td>
<tr>
<td>
Momentum
</td>
<td>
79.7%
</td>
<td>
oscillations
</td>
</tr>
<tr>
<td>
Adam
</td>
<td>
94%
</td>
<td>
smoother
</td>
</tr>
</table>
Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.
Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
Some advantages of Adam include:
- Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
- Usually works well even with little tuning of hyperparameters (except $\alpha$)
**References**:
- Adam paper: https://arxiv.org/pdf/1412.6980.pdf
| github_jupyter |
# spaCy Tutorial
**(C) 2019-2021 by [Damir Cavar](http://damir.cavar.me/)**
**Version:** 1.5, January 2021
**Download:** This and various other Jupyter notebooks are available from my [GitHub repo](https://github.com/dcavar/python-tutorial-for-ipython).
This is a tutorial related to the L665 course on Machine Learning for NLP focusing on Deep Learning, Spring 2018 at Indiana University. The following tutorial assumes that you are using a newer distribution of Python 3 and spaCy 2.2 or newer.
## Introduction to spaCy
Follow the instructions on the [spaCy homepage](https://spacy.io/usage/) about installation of the module and language models. Your local spaCy module is correctly installed, if the following command is successfull:
```
import spacy
```
We can load the English NLP pipeline in the following way:
```
nlp = spacy.load("es_core_news_sm")
```
### Tokenization
```
doc = nlp(u'Como estas? Estoy bien.')
for token in doc:
print(token.text)
```
### Part-of-Speech Tagging
We can tokenize and part of speech tag the individual tokens using the following code:
```
doc = nlp(u'Como estas? Estoy bien.')
for token in doc:
print("\t".join( (token.text, str(token.idx), token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, str(token.is_alpha), str(token.is_stop) )))
```
The above output contains for every token in a line the token itself, the lemma, the Part-of-Speech tag, the dependency label, the orthographic shape (upper and lower case characters as X or x respectively), the boolean for the token being an alphanumeric string, and the boolean for it being a *stopword*.
### Dependency Parse
Using the same approach as above for PoS-tags, we can print the Dependency Parse relations:
```
for token in doc:
print(token.text, token.dep_, token.head.text, token.head.pos_,
[child for child in token.children])
```
As specified in the code, each line represents one token. The token is printed in the first column, followed by the dependency relation to it from the token in the third column, followed by its main category type.
### Named Entity Recognition
Similarly to PoS-tags and Dependency Parse Relations, we can print out Named Entity labels:
```
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
```
We can extend the input with some more entities:
```
doc = nlp(u'Ali Hassan Kuban said that Apple Inc. will buy Google in May 2018.')
```
The corresponding NE-labels are:
```
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
```
### Pattern Matching in spaCy
```
from spacy.matcher import Matcher
matcher = Matcher(nlp.vocab)
pattern = [{'LOWER': 'hello'}, {'IS_PUNCT': True}, {'LOWER': 'world'}]
matcher.add('HelloWorld', None, pattern)
doc = nlp(u'Hello, world! Hello... world!')
matches = matcher(doc)
for match_id, start, end in matches:
string_id = nlp.vocab.strings[match_id] # Get string representation
span = doc[start:end] # The matched span
print(match_id, string_id, start, end, span.text)
print("-" * 50)
doc = nlp(u'Hello, world! Hello world!')
matches = matcher(doc)
for match_id, start, end in matches:
string_id = nlp.vocab.strings[match_id] # Get string representation
span = doc[start:end] # The matched span
print(match_id, string_id, start, end, span.text)
```
### spaCy is Missing
From the linguistic standpoint, when looking at the analytical output of the NLP pipeline in spaCy, there are some important components missing:
- Clause boundary detection
- Constituent structure trees (scope relations over constituents and phrases)
- Anaphora resolution
- Coreference analysis
- Temporal reference resolution
- ...
#### Clause Boundary Detection
Complex sentences consist of clauses. For precise processing of semantic properties of natural language utterances we need to segment the sentences into clauses. The following sentence:
*The man said that the woman claimed that the child broke the toy.*
can be broken into the following clauses:
- Matrix clause: [ *the man said* ]
- Embedded clause: [ *that the woman claimed* ]
- Embedded clause: [ *that the child broke the toy* ]
These clauses do not form an ordered list or flat sequence, they in fact are hierarchically organized. The matrix clause verb selects as its complement an embedded finite clause with the complementizer *that*. The embedded predicate *claimed* selects the same kind of clausal complement. We express this hierarchical relation in form of embedding in tree representations:
[ *the man said* [ *that the woman claimed* [ *that the child broke the toy* ] ] ]
Or using a graphical representation in form of a tree:
<img src="Embedded_Clauses_1.png" width="60%" height="60%">
The hierarchical relation of sub-clauses is relevant when it comes to semantics. The clause *John sold his car* can be interpreted as an assertion that describes an event with *John* as the agent, and *the car* as the object of a *selling* event in the past. If the clause is embedded under a matrix clause that contains a sentential negation, the proposition is assumed to NOT be true: [ *Mary did not say that* [ *John sold his car* ] ]
It is possible with additional effort to translate the Dependency Trees into clauses and reconstruct the clause hierarchy into a relevant form or data structure. SpaCy does not offer a direct data output of such relations.
One problem still remains, and this is *clausal discontinuities*. None of the common NLP pipelines, and spaCy in particular, can deal with any kind of discontinuities in any reasonable way. Discontinuities can be observed when sytanctic structures are split over the clause or sentence, or elements ocur in a cannoically different position, as in the following example:
*Which car did John claim that Mary took?*
The embedded clause consists of the sequence [ *Mary took which car* ]. One part of the sequence appears dislocated and precedes the matrix clause in the above example. Simple Dependency Parsers cannot generate any reasonable output that makes it easy to identify and reconstruct the relations of clausal elements in these structures.
#### Constitutent Structure Trees
Dependency Parse trees are a simplification of relations of elements in the clause. They ignore structural and hierarchical relations in a sentence or clause, as shown in the examples above. Instead the Dependency Parse trees show simple functional relations in the sense of sentential functions like *subject* or *object* of a verb.
SpaCy does not output any kind of constituent structure and more detailed relational properties of phrases and more complex structural units in a sentence or clause.
Since many semantic properties are defined or determined in terms of structural relations and hierarchies, that is *scope relations*, this is more complicated to reconstruct or map from the Dependency Parse trees.
#### Anaphora Resolution
SpaCy does not offer any anaphora resolution annotation. That is, the referent of a pronoun, as in the following examples, is not annotated in the resulting linguistic data structure:
- *John saw **him**.*
- *John said that **he** saw the house.*
- *Tim sold **his** house. **He** moved to Paris.*
- *John saw **himself** in the mirror.*
Knowing the restrictions of pronominal binding (in English for example), we can partially generate the potential or most likely anaphora - antecedent relations. This - however - is not part of the spaCy output.
One problem, however, is that spaCy does not provide parse trees of the *constituent structure* and *clausal hierarchies*, which is crucial for the correct analysis of pronominal anaphoric relations.
#### Coreference Analysis
Some NLP pipelines are capable of providing coreference analyses for constituents in clauses. For example, the two clauses should be analyzed as talking about the same subject:
*The CEO of Apple, Tim Cook, decided to apply for a job at Google. Cook said that he is not satisfied with the quality of the iPhones anymore. He prefers the Pixel 2.*
The constituents [ *the CEO of Apple, Tim Cook* ] in the first sentence, [ *Cook* ] in the second sentence, and [ *he* ] in the third, should all be tagged as referencing the same entity, that is the one mentioned in the first sentence. SpaCy does not provide such a level of analysis or annotation.
#### Temporal Reference
For various analysis levels it is essential to identify the time references in a sentence or utterance, for example the time the utterance is made or the time the described event happened.
Certain tenses are expressed as periphrastic constructions, including auxiliaries and main verbs. SpaCy does not provide the relevant information to identify these constructions and tenses.
## Using the Dependency Parse Visualizer
More on Dependency Parse trees
```
import spacy
```
We can load the visualizer:
```
from spacy import displacy
```
Loading the English NLP pipeline:
```
nlp = spacy.load("en_core_web_sm")
```
Process an input sentence:
```
#doc = nlp(u'John said yesterday that Mary bought a new car for her older son.')
#doc = nlp(u"Dick ran and Jane danced yesterday.")
doc = nlp(u"Tim Cook is the CEO of Apple.")
#doc = nlp(u"Born in a small town, she took the midnight train going anywhere.")
#doc = nlp(u"John met Peter and Susan called Paul.")
```
If you want to generate a visualization running code outside of the Jupyter notebook, you could use the following code. You should not use this code, if you are running the notebook. Instead, use the function *display.render* two cells below.
Visualizing the Dependency Parse tree can be achieved by running the following server code and opening up a new tab on the URL [http://localhost:5000/](http://localhost:5000/). You can shut down the server by clicking on the stop button at the top in the notebook toolbar.
```
displacy.serve(doc, style='dep')
```
Instead of serving the graph, one can render it directly into a Jupyter Notebook:
```
displacy.render(doc, style='dep', jupyter=True, options={"distance": 120})
```
In addition to the visualization of the Dependency Trees, we can visualize named entity annotations:
```
text = """Apple decided to fire Tim Cook and hire somebody called John Doe as the new CEO.
They also discussed a merger with Google. On the long run it seems more likely that Apple
will merge with Amazon and Microsoft with Google. The companies will all relocate to
Austin in Texas before the end of the century. John Doe bought a Prosche."""
doc = nlp(text)
displacy.render(doc, style='ent', jupyter=True)
```
## Vectors
To use vectors in spaCy, you might consider installing the larger models for the particular language. The common module and language packages only come with the small models. The larger models can be installed as described on the [spaCy vectors page](https://spacy.io/usage/vectors-similarity):
python -m spacy download en_core_web_lg
The large model *en_core_web_lg* contains more than 1 million unique vectors.
Let us restart all necessary modules again, in particular spaCy:
```
import spacy
```
We can now import the English NLP pipeline to process some word list. Since the small models in spacy only include context-sensitive tensors, we should use the dowloaded large model for better word vectors. We load the large model as follows:
```
nlp = spacy.load('en_core_web_lg')
#nlp = spacy.load("en_core_web_sm")
```
We can process a list of words by the pipeline using the *nlp* object:
```
tokens = nlp(u'dog poodle beagle cat banana apple')
```
As described in the spaCy chapter *[Word Vectors and Semantic Similarity](https://spacy.io/usage/vectors-similarity)*, the resulting elements of *Doc*, *Span*, and *Token* provide a method *similarity()*, which returns the similarities between words:
```
for token1 in tokens:
for token2 in tokens:
print(token1, token2, token1.similarity(token2))
```
We can access the *vectors* of these objects using the *vector* attribute:
```
tokens = nlp(u'dog cat banana grungle')
for token in tokens:
print(token.text, token.has_vector, token.vector_norm, token.is_oov)
```
The attribute *has_vector* returns a boolean depending on whether the token has a vector in the model or not. The token *grungle* has no vector. It is also out-of-vocabulary (OOV), as the fourth column shows. Thus, it also has a norm of $0$, that is, it has a length of $0$.
Here the token vector has a length of $300$. We can print out the vector for a token:
```
n = 0
print(tokens[n].text, len(tokens[n].vector), tokens[n].vector)
```
Here just another example of similarities for some famous words:
```
tokens = nlp(u'queen king chef')
for token1 in tokens:
for token2 in tokens:
print(token1, token2, token1.similarity(token2))
```
### Similarities in Context
In spaCy parsing, tagging and NER models make use of vector representations of contexts that represent the *meaning of words*. A text *meaning representation* is represented as an array of floats, i.e. a tensor, computed during the NLP pipeline processing. With this approach words that have not been seen before can be typed or classified. SpaCy uses a 4-layer convolutional network for the computation of these tensors. In this approach these tensors model a context of four words left and right of any given word.
Let us use the example from the spaCy documentation and check the word *labrador*:
```
tokens = nlp(u'labrador')
for token in tokens:
print(token.text, token.has_vector, token.vector_norm, token.is_oov)
```
We can now test for the context:
```
doc1 = nlp(u"The labrador barked.")
doc2 = nlp(u"The labrador swam.")
doc3 = nlp(u"The people on Labrador are Canadians.")
dog = nlp(u"dog")
count = 0
for doc in [doc1, doc2, doc3]:
lab = doc
count += 1
print(str(count) + ":", lab.similarity(dog))
```
Using this strategy we can compute document or text similarities as well:
```
docs = ( nlp(u"Paris is the largest city in France."),
nlp(u"Vilnius is the capital of Lithuania."),
nlp(u"An emu is a large bird.") )
for x in range(len(docs)):
zset = set(range(len(docs)))
zset.remove(x)
for y in zset:
print(x, y, docs[x].similarity(docs[y]))
```
We can vary the word order in sentences and compare them:
```
docs = [nlp(u"dog bites man"), nlp(u"man bites dog"),
nlp(u"man dog bites"), nlp(u"cat eats mouse")]
for doc in docs:
for other_doc in docs:
print('"' + doc.text + '"', '"' + other_doc.text + '"', doc.similarity(other_doc))
```
### Custom Models
#### Optimization
```
nlp = spacy.load('en_core_web_lg')
```
## Training Models
This example code for training an NER model is based on the [training example in spaCy](https://github.com/explosion/spaCy/blob/master/examples/training/train_ner.py).
We will import some components from the *__future__* module. Read its [documentation here](https://docs.python.org/3/library/__future__.html).
```
from __future__ import unicode_literals, print_function
```
We import the [*random*](https://docs.python.org/3/library/random.html) module for pseudo-random number generation:
```
import random
```
We import the *Path* object from the [*pathlib*](https://docs.python.org/3/library/pathlib.html) module:
```
from pathlib import Path
```
We import *spaCy*:
```
import spacy
```
We also import the minibatch and compounding module from *spaCy.utils*:
```
from spacy.util import minibatch, compounding
```
The training data is formated as JSON:
```
TRAIN_DATA = [
("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}),
]
```
We created a blank 'xx' model:
```
nlp = spacy.blank("xx") # create blank Language class
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
```
We add the named entity labels to the NER model:
```
for _, annotations in TRAIN_DATA:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
```
Assuming that the model is empty and untrained, we reset and initialize the weights randomly using:
```
nlp.begin_training()
```
We would not do this, if the model is supposed to be tuned or retrained on new data.
We get all pipe-names in the model that are not our NER related pipes to disable them during training:
```
pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]
```
We can now disable the other pipes and train just the NER uing 100 iterations:
```
with nlp.disable_pipes(*other_pipes): # only train NER
for itn in range(100):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.5, # dropout - make it harder to memorise data
losses=losses,
)
print("Losses", losses)
```
We can test the trained model:
```
for text, _ in TRAIN_DATA:
doc = nlp(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
```
We can define the output directory where the model will be saved as the *models* folder in the directory where the notebook is running:
```
output_dir = Path("./models/")
```
Save model to output dir:
```
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
```
To make sure everything worked out well, we can test the saved model:
```
nlp2 = spacy.load(output_dir)
for text, _ in TRAIN_DATA:
doc = nlp2(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
```
**(C) 2021 by [Damir Cavar](http://damir.cavar.me/) <<dcavar@iu.edu>>**
| github_jupyter |
# Explore harvested text files
```
import os
import pandas as pd
import fileinput
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from textblob import TextBlob
from operator import itemgetter
from pathlib import Path
import nltk
import numpy as np
import altair as alt
import nltk
nltk.download('stopwords')
nltk.download('punkt')
stopwords = nltk.corpus.stopwords.words('english')
stopwords += ['tho', 'tbe']
# Are you using Jupyter Lab?
# If so either don't run this cell or comment out the line below
# alt.renderers.enable('notebook')
# If you forget, run this cell, and then get strange warnings when you make a chart,
# uncomment the following line and run this cell to reset the chart renderer
# alt.renderers.enable('default')
# alt.data_transformers.enable('json')
#nltk.download('stopwords')
#nltk.download('punkt')
#stopwords = nltk.corpus.stopwords.words('english')
# Import a harvest zip file you've created previously
# First upload the zip file to the data directory, then run this cell
import zipfile
for zipped in sorted(Path('data').glob('*.zip')):
print(f'Unzipping {zipped}...')
with zipfile.ZipFile(zipped, 'r') as zip_file:
zip_file.extractall(Path(f'data/{zipped.stem}'))
def get_latest_harvest():
'''
Get the timestamp of the most recent harvest.
'''
harvests = sorted([d for d in Path('data').iterdir() if d.is_dir() and not d.name.startswith('.')])
try:
harvest = harvests[-1]
except IndexError:
print('No harvests!')
harvest = None
return harvest
def get_docs(harvest):
docs_path = get_docs_path(harvest)
for p in docs_path:
yield p.read_text(encoding='utf-8').strip()
def get_docs_path(harvest):
path = Path(harvest, 'text')
docs_path = [p for p in sorted(path.glob('*.txt'))]
return docs_path
def get_file_names(harvest):
return [p.stem for p in get_docs_path(harvest)]
harvest = get_latest_harvest()
harvest
vectorizer = CountVectorizer(stop_words=frozenset(stopwords), max_features=10000, ngram_range=(1,1))
# preprocessor = lambda x: re.sub(r'(\d[\d\.])+', 'NUM', x.lower())
X_freq = np.asarray(vectorizer.fit_transform(get_docs(harvest)).todense())
df_freq = pd.DataFrame(X_freq, columns=vectorizer.get_feature_names(), index=get_file_names(harvest))
df_freq.sum().nlargest(20)
df_freq.unstack().to_frame().reset_index().dropna(axis=0, subset=[0])
%%time
# The number of words you want to show
num_words = 10
top_words = pd.DataFrame({n: df_freq.T[col].nlargest(num_words).index.tolist() for n, col in enumerate(df_freq.T)}).T
top_words.index = get_file_names(harvest)
top_words.head()
df_freq.T
```
## Add a 'year' column to the dataframe
Each file name includes the date on which the article was published. For example, `18601224-13-5696044` was published on 24 December 1860. We can easily extract the year by just slicing the first four characters off the index.
```
df_freq['article_year'] = df_freq.index.str.slice(0, 4)
```
## Most frequent words each year
```
# Group by year and sum the word counts
year_groups = df_freq.groupby(by='article_year')
year_group_totals = year_groups.sum()
# Reshape so that we have columns for year, word, and count
words_by_year = year_group_totals.unstack().to_frame().reset_index()
words_by_year.columns = ['word', 'year', 'count']
top_words_by_year = words_by_year.sort_values('count', ascending=False).groupby(by=['year']).head(10).reset_index(drop=True)
top_words_by_year['word'].value_counts()[:25]
```
## Visualise top ten words per year
```
alt.Chart(top_words_by_year).mark_bar().encode(
y=alt.Y('word:N', sort='-x'),
x='count:Q',
facet=alt.Facet('year', columns=4)
).properties(
width=120, height=120
).resolve_scale(
x='independent',
y='independent'
)
```
## Visualise word frequencies over time
### Create a faceted chart
```
alt.Chart(words_by_year.loc[words_by_year['word'].isin(['storm', 'cyclone', 'snow'])]).mark_line().encode(
x=alt.X('year:Q', axis=alt.Axis(format='c', title='Year')),
y='count:Q',
color='word:N',
facet=alt.Facet('word:N', columns=1)
).properties(width=700, height=100).resolve_scale(
y='independent'
)
```
----
Created by [Tim Sherratt](https://timsherratt.org) ([@wragge](https://twitter.com/wragge)) for the [GLAM Workbench](https://github.com/glam-workbench/).
Support this project by [becoming a GitHub sponsor](https://github.com/sponsors/wragge?o=esb).
| github_jupyter |
```
from pandas_datareader import data, wb ##Data reader to read data from web
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
```
# Data
**Name (DataFrame Name)**
<input type="checkbox"> Bank of America (BAC)
<input type="checkbox"> CitiGroup (C)
<input type="checkbox"> Goldman Sachs (GS)
<input type="checkbox"> JPMorgan Chase (JPM)
<input type="checkbox"> Morgan Stanley (MS)
<input type="checkbox"> Wells Fargo (WFC)
```
#Create time interval (2006 to 2019)
start = datetime.datetime(2006,1,1)
end = datetime.datetime(2019,1,1)
# Bank of America
BAC = data.DataReader("BAC", 'yahoo', start, end)
# CitiGroup
C = data.DataReader("C", 'yahoo', start, end)
# Goldman Sachs
GS = data.DataReader("GS", 'yahoo', start, end)
# JPMorgan Chase
JPM = data.DataReader("JPM", 'yahoo', start, end)
# Morgan Stanley
MS = data.DataReader("MS", 'yahoo', start, end)
# Wells Fargo
WFC = data.DataReader("WFC", 'yahoo', start, end)
tickers = ['BAC', 'C', 'GS', 'JPM', 'MS', 'WFC']
## Concat all banks
banks = pd.concat([BAC, C, GS, JPM, MS, WFC], axis=1, keys=tickers)
banks.columns.names=['Banks', 'Stock Info']
banks.head()
## Max stock close value per bank
banks.xs(key='Close',axis=1,level='Stock Info').max()
```
Stock Return
$$r_t = \frac{p_t - p_{t-1}}{p_{t-1}} = \frac{p_t}{p_{t-1}} - 1$$
Use pct_change() on close to represent return
```
returns = pd.DataFrame()
for tick in tickers:
returns[tick+' Return'] = banks[tick]['Close'].pct_change()
returns.head()
sns.pairplot(returns[1:]) ##NaN excluded
returns.min() ## Which day this happened?
returns.max() ## Which day this happened?
returns.idxmin() ## Worst
```
**BAC, GS, JPM and WFC had the same worst day
this day was Obama'inauguration**
```
returns.idxmax() ## Best
returns.std() ## CitiGroup has the highest std, risky stocks?
## Std on 2018
returns[(returns.index.date >= datetime.date(2018, 1, 1)) & (returns.index.date < datetime.date(2019, 1, 1))].std()
citi_2018 = returns[(returns.index.date>=datetime.date(2018,1,1)) & (returns.index.date<datetime.date(2019,1,1))]['C Return']
print("CitiGroup Return STD 2018: ",citi_2018.std())
sns.distplot(citi_2018, color='orange', bins=15)
sns.set_style('whitegrid')
```
**Line Graph to close price over time**
```
banks.head()
for tick in tickers: ##Walk over columns
banks[tick]['Close'].plot(figsize=(14,6), label=tick)
plt.legend()
```
# Moving Average
```
## Moving Average BAC 2018
BAC.head()
BAC_2018 = BAC['Close'][BAC.index.year==2018]
BAC_ma = BAC_2018.rolling(window=30).mean() ## Month rolling mean
plt.figure(figsize=(12,6))
BAC_ma.plot(label='30 days Moving Average')
BAC_2018.plot(label='Closure')
plt.legend()
```
| github_jupyter |
## Observations and Insights
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
combine_df = pd.merge(mouse_metadata,study_results, how='inner', on='Mouse ID')
# Display the data table for preview
combine_df
# Checking the number of mice.
mice_ct = len(combine_df["Mouse ID"].unique())
mice_df = pd.DataFrame({"# Mice": [mice_ct]})
mice_df
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicated_df = study_results[study_results.duplicated(subset=['Mouse ID','Timepoint'])]
dup_id_df = pd.DataFrame({'Mouse ID':duplicated_df['Mouse ID'].unique()})
dup_id_df
# Optional: Get all the data for the duplicate mouse ID.
dup_data_df = combine_df[combine_df['Mouse ID'].isin(dup_id_df['Mouse ID'])]
dup_data_df
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_combine_df = combine_df[~combine_df['Mouse ID'].isin(dup_id_df['Mouse ID'])]
# Checking the number of mice in the clean DataFrame.
clean_mice_ct = len(clean_combine_df["Mouse ID"].unique())
clean_mice_df = pd.DataFrame({"# Mice": [clean_mice_ct]})
clean_mice_df
```
## Summary Statistics
```
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
groupped_df = clean_combine_df.groupby(['Drug Regimen'])
tumor_mean = groupped_df['Tumor Volume (mm3)'].mean()
tumor_median = groupped_df['Tumor Volume (mm3)'].median()
tumor_variance = groupped_df['Tumor Volume (mm3)'].var()
tumor_std = groupped_df['Tumor Volume (mm3)'].std()
tumor_sem = groupped_df['Tumor Volume (mm3)'].sem()
#All toghether in the summary Data Frame
summary_data = pd.DataFrame({"Mean": tumor_mean,
"Median": tumor_median,
"Variance": tumor_variance,
"STD": tumor_std,
"SEM": tumor_sem
})
summary_data["Mean"] = summary_data["Mean"].astype(float).map("{:,.2f}".format)
summary_data["Median"] = summary_data["Median"].astype(float).map("{:,.2f}".format)
summary_data["Variance"] = summary_data["Variance"].astype(float).map("{:,.2f}".format)
summary_data["STD"] = summary_data["STD"].astype(float).map("{:,.2f}".format)
summary_data["SEM"] = summary_data["SEM"].astype(float).map("{:,.2f}".format)
summary_data
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Using the aggregation method, produce the same summary statistics in a single line
summary_data2 = groupped_df['Tumor Volume (mm3)'].agg([np.mean, np.median, np.var, np.std, st.sem])
#summary_data2 = summary_data2.astype(float).map("{:,.2f}".format)
summary_data2
```
## Bar and Pie Charts
```
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pandas.
mice_gr_ct = clean_combine_df.groupby("Drug Regimen").count()["Tumor Volume (mm3)"]
mice_gr_ct.plot.bar(title = "Number of unique mice tested on each drug regimen", facecolor="turquoise")
plt.ylabel("Number of mice")
plt.xlabel("Drug Regimen")
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pyplot.
mice_gr_ct = clean_combine_df.groupby("Drug Regimen").count()["Tumor Volume (mm3)"]
plt.bar(mice_gr_ct.index,mice_gr_ct.values,facecolor="turquoise")
plt.xticks(rotation=90)
plt.ylabel("Number of mice")
plt.xlabel("Drug Regimen")
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pandas
gender = clean_combine_df["Sex"].value_counts()
explode = (.1,0)
gender.plot.pie(explode=explode, autopct = "%1.1f%%", colors = ['lime', 'turquoise'])
# Generate a pie plot showing the distribution of female versus male mice using pyplot
labels = ["Male", "Female"]
explode = (.1,0)
plt.title = "Gender"
plt.pie(gender, labels = labels, explode=explode, autopct = "%1.1f%%", colors = ['lime', 'turquoise'])
plt.axis("equal")
plt.show()
```
## Quartiles, Outliers and Boxplots
```
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
gr_timepoint = clean_combine_df.groupby('Mouse ID').max()['Timepoint']
#gr_timepoint.head()
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
gr_time_comb_df = pd.merge(gr_timepoint,clean_combine_df, how ='inner', on= ('Mouse ID','Timepoint') )
gr_time_comb_df
#combine_df = pd.merge(mouse_metadata,study_results, how='inner', on='Mouse ID')
# Put treatments into a list for for loop (and later for plot labels)
Drug_reg = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"]
# Create empty list to fill with tumor vol data (for plotting)
Tumor_vol =[]
# Calculate the IQR and quantitatively determine if there are any potential outliers.
for drug in Drug_reg:
drug_df = gr_time_comb_df[gr_time_comb_df['Drug Regimen'] == drug]
quartiles = drug_df['Tumor Volume (mm3)'].quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
outlier = drug_df.loc[(drug_df['Tumor Volume (mm3)'] < lower_bound) | (drug_df['Tumor Volume (mm3)'] > upper_bound)].count()["Tumor Volume (mm3)"]
print(drug)
print(f"IQR = {iqr}")
print(f"outlier = {outlier}")
print("-----------------------------")
Tumor_vol_df = pd.DataFrame({'Drug Regimen': drug,
'Tumor Volume (mm3)':drug_df['Tumor Volume (mm3)'] })
Tumor_vol.append(Tumor_vol_df)
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
plot_data = [Tumor_vol[0]['Tumor Volume (mm3)'],Tumor_vol[1]['Tumor Volume (mm3)'],\
Tumor_vol[2]['Tumor Volume (mm3)'],Tumor_vol[3]['Tumor Volume (mm3)']]
plt.boxplot(plot_data, vert = True)
plt.ylabel("Tumor Volume (mm3)")
plt.xlabel("Drug Regimen")
plt.xticks([1,2,3,4],Drug_reg)
plt.show()
```
## Line and Scatter Plots
```
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
Capo_df = clean_combine_df[clean_combine_df['Drug Regimen'] == "Capomulin"]
Group_Capo_df = Capo_df.groupby('Timepoint')
Avg_tum_vol = Group_Capo_df["Tumor Volume (mm3)"].mean()
Capo_Avg_tum_vol_df = pd.DataFrame({"Avarage tumor volume (mm3)": Avg_tum_vol})
Capo_Avg_tum_vol_df.plot()
plt.show()
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
Capo_df.plot(kind="scatter",x="Weight (g)",y="Tumor Volume (mm3)")
plt.show()
```
## Correlation and Regression
```
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
correlation_coef = round(st.pearsonr(Capo_df['Weight (g)'],Capo_df['Tumor Volume (mm3)'])[0],2)
corr_coef = "R^2 = " + str(round(correlation_coef,2))
tw_slope, tw_int, tw_r, tw_p, tw_std_err = st.linregress(Capo_df['Weight (g)'],Capo_df['Tumor Volume (mm3)'])
tw_fit = tw_slope * Capo_df['Weight (g)'] + tw_int
reg_eq = "y = " + str(round(tw_slope,2)) + "x + " +str(round(tw_int,2))
Capo_df.plot(kind="scatter",x="Weight (g)",y="Tumor Volume (mm3)")
plt.plot(Capo_df['Weight (g)'],tw_fit,"lime")
plt.annotate(reg_eq,(19,26), fontsize = 20, color="lime")
plt.annotate(corr_coef,(21.2,30), fontsize = 20, color="lime")
plt.xticks(Capo_df['Weight (g)'])
plt.xlabel('Weight (g)')
plt.ylabel('Tumor Volume (mm3)')
plt.show()
```
| github_jupyter |
## <div align="center"> A Comprehensive ML Workflow for House Prices</div>
There are plenty of **courses and tutorials** that can help you learn machine learning from scratch but here in **Kaggle**, I want to predict **House prices** a popular machine learning Dataset as a comprehensive workflow with python packages.
After reading, you can use this workflow to solve other real problems and use it as a template to deal with machine learning problems.
<img src="http://s9.picofile.com/file/8338980150/House_price.png"></img>
<div style="text-align:center">last update: <b>10/09/2018</b></div>
---------------------------------------------------------------------
you can follow me on:
> ###### [ GitHub](https://github.com/mjbahmani)
> ###### [LinkedIn](https://www.linkedin.com/in/bahmani/)
> ###### [Kaggle](https://www.kaggle.com/mjbahmani/)
>###### you may be interested have a look at it: [A Comprehensive Machine Learning Workflow with Python](http://https://www.kaggle.com/mjbahmani/a-comprehensive-ml-workflow-with-python)
-------------------------------------------------------------------------------------------------------------
**I hope you find this kernel helpful and some upvotes would be very much appreciated**
-----------
## Notebook Content
* 1- [Introduction](#1)
* 2- [Machine learning workflow](#2)
* 3- [Problem Definition](#3)
* 3-1 [Problem feature](#4)
* 3-2 [Aim](#5)
* 3-3 [Variables](#6)
* 4-[ Inputs & Outputs](#7)
* 4-1 [Inputs ](#8)
* 4-2 [Outputs](#9)
* 5- [Installation](#10)
* 5-1 [ jupyter notebook](#11)
* 5-2[ kaggle kernel](#12)
* 5-3 [Colab notebook](#13)
* 5-4 [install python & packages](#14)
* 5-5 [Loading Packages](#15)
* 6- [Exploratory data analysis](#16)
* 6-1 [Data Collection](#17)
* 6-2 [Visualization](#18)
* 6-2-1 [Scatter plot](#19)
* 6-2-2 [Box](#20)
* 6-2-3 [Histogram](#21)
* 6-2-4 [Multivariate Plots](#22)
* 6-2-5 [Violinplots](#23)
* 6-2-6 [Pair plot](#24)
* 6-2-7 [Kde plot](#25)
* 6-2-8 [Joint plot](#26)
* 6-2-9 [Andrews curves](#27)
* 6-2-10 [Heatmap](#28)
* 6-2-11 [Radviz](#29)
* 6-3 [Data Preprocessing](#30)
* 6-4 [Data Cleaning](#31)
* 7- [Model Deployment](#32)
* 8- [Conclusion](#53)
* 9- [References](#54)
<a id="1"></a> <br>
## 1- Introduction
This is a **A Comprehensive ML Workflow for House Prices** data set, it is clear that everyone in this community is familiar with house prices dataset but if you need to review your information about the dataset please visit this [link](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data).
I have tried to help **Fans of Machine Learning** in Kaggle how to face machine learning problems. and I think it is a great opportunity for who want to learn machine learning workflow with python **completely**.
I have covered most of the methods that are implemented for house prices until **2018**, you can start to learn and review your knowledge about ML with a simple dataset and try to learn and memorize the workflow for your journey in Data science world.
I am open to getting your feedback for improving this **kernel**
<a id="2"></a> <br>
## 2- Machine Learning Workflow
If you have already read some [machine learning books](https://towardsdatascience.com/list-of-free-must-read-machine-learning-books-89576749d2ff). You have noticed that there are different ways to stream data into machine learning.
most of these books share the following steps:
* Define Problem
* Specify Inputs & Outputs
* Exploratory data analysis
* Data Collection
* Data Preprocessing
* Data Cleaning
* Visualization
* Model Design, Training, and Offline Evaluation
* Model Deployment, Online Evaluation, and Monitoring
* Model Maintenance, Diagnosis, and Retraining
**You can see my workflow in the below image** :
<img src="http://s9.picofile.com/file/8338227634/workflow.png" />
## 2-1 How to solve Problem?
**Data Science has so many techniques and procedures that can confuse anyone.**
**Step 1**: Translate your business problem statement into technical one
Analogous to any other software problem, data science aims at solving a business problem. Most of the times, business problem statements are vague and can be interpreted in multiple ways. This occurs mostly because we generally use qualitative words in our language which cannot be directly translated into a machine readable code.
Eg. Let’s say we need to develop a solution to reduce crime rate of a city. The term “reduce” can be interpreted as:
Decreasing crime rate of areas with high crime rate
Decreasing crime rate of the most common type of crime
It is a good practice to circle back with the client or the business team who define the problem to decide on the right interpretation.
**Step 2**: Decide on the supervised learning technique
The end goal of almost every data science problem is usually classification or regression. Deciding the supervised technique for the problem will help you get more clarity on the business statement.
Eg. Let’s look at our problem of reducing crime rate. While the problem of reducing crime rate is more of a policy decision, depending on the choice above, we would have to decide if we need to do classification or regression.
If we need to decrease crime rate of areas with high crime rate, we would need to determine the crime rate rate of an area. This is a regression problem.
If we need to decrease crime rate of most common type of crime, we would need to determine the most common type of crime in an area. This is a classification problem.
Again it is a good practice to circle back with the client or the business team who define the problem requirements to clarify on the exact requirement.
**Step 3**: Literature survey
Literature Survey is one of the most important step (and often most ignored step) to approach any problem. If you read any article about components of Data Science, you will find computer science, statistics / math and domain knowledge. As it is quite inhuman for someone to have subject expertise in all possible fields, literature survey can often help in bridging the gaps of inadequate subject expertise.
After going through existing literature related to a problem, I usually try to come up with a set of hypotheses that could form my potential set of features. Going through existing literature helps you understand existing proofs in the domain serving as a guide to take the right direction in your problem. It also helps in interpretation of the results obtained from the prediction models.
Eg. Going back to our problem of reducing crime rate, if you want to predict crime rate of an area, you would consider factors from general knowledge like demographics, neighboring areas, law enforcement rules etc. Literature survey will help you consider additional variables like climate, mode of transportation, divorce rate etc.
**Step 4**: Data cleaning
If you speak with anyone who has spent some time in data science, they will always say that most of their time is spent on cleaning the data. Real world data is always messy. Here are a few common discrepancies in most data-sets and some techniques of how to clean them:
Missing values
Missing values are values that are blank in the data-set. This can be due to various reasons like value being unknown, unrecorded, confidential etc. Since the reason for a value being missing is not clear, it is hard to guess the value.
You could try different techniques to impute missing values starting with simple methods like column mean, median etc. and complex methods like using machine leaning models to estimate missing values.
Duplicate records
The challenge with duplicate records is identifying a record being duplicate. Duplicate records often occur while merging data from multiple sources. It could also occur due to human error. To identify duplicates, you could approximate a numeric values to certain decimal places and for text values, fuzzy matching could be a good start. Identification of duplicates could help the data engineering team to improve collection of data to prevent such errors.
Incorrect values
Incorrect values are mostly due to human error. For Eg. If there is a field called age and the value is 500, it is clearly wrong. Having domain knowledge of the data will help identify such values. A good technique to identify incorrect values for numerical columns could be to manually look at values beyond 3 standard deviations from the mean to check for correctness.
**Step 5**: Feature engineering
Feature Engineering is one of the most important step in any data science problem. Good set of features might make simple models work for your data. If features are not good enough, you might need to go for complex models. Feature Engineering mostly involves:
Removing redundant features
If a feature is not contributing a lot to the output value or is a function of other features, you can remove the feature. There are various metrics like AIC and BIC to identify redundant features. There are built in packages to perform operations like forward selection, backward selection etc. to remove redundant features.
Transforming a feature
A feature might have a non linear relationship with the output column. While complex models can capture this with enough data, simple models might not be able to capture this. I usually try to visualize different functions of each column like log, inverse, quadratic, cubic etc. and choose the transformation that looks closest to a normal curve.
**Step 6**: Data modification
Once the data is cleaned, there are a few modifications that might be needed before applying machine learning models. One of the most common modification would be scaling every column to the same range in order to give same weight to all columns. Some of the other required modifications might be data specific Eg. If output column is skewed, you might need to up-sample or down-sample.
Steps 7 through 9 are iterative.
**Step 7**: Modelling
Once I have the data ready, I usually start with trying all the standard machine learning models. If it is a classification problem, a good start will beLogistic Regression, Naive Bayes, k-Nearest Neighbors, Decision Tree etc. If it is a regression problem, you could try linear regression, regression tree etc. The reason for starting with simple models is that simple models have lesser parameters to alter. If we start with a complex model like Neural Network orSupport Vector Machines, there are so many parameters that you could change that trying all options exhaustively might be time consuming.
Each of the machine learning models make some underlying assumptions about the data. For Eg. Linear Regression / Logistic Regression assumes that the data comes from a linear combination of input parameters. Naive Bayes makes an assumption that the input parameters are independent of each other. Having the knowledge of these assumptions can help you judge the results of the different models. It is often helpful to visualize the actual vs predicted values to see these differences.
**Step 8**: Model comparison
One of the most standard technique to evaluate different machine learning models would be through the process of cross validation. I usually choose 10-fold cross validation but you may choose the right cross validation split based on the size of the data. Cross validation basically brings out an average performance of a model. This can help eliminate choosing a model that performs good specific to the data or in other words avoid over-fitting. It is often a good practice to randomize data before cross validation.
A good technique to compare performance of different models is ROC curves. ROC curves help you visualize performance of different models across different thresholds. While ROC curves give a holistic sense of model performance, based on the business decision, you must choose the performance metric like Accuracy, True Positive Rate, False Positive Rate, F1-Score etc.
**Step 9**: Error analysis
At this point, you have tried a bunch of machine learning models and got the results. It is a good usage of time to not just look at the results like accuracy or True Positive Rate but to look at the set of data points that failed in some of the models. This will help you understand the data better and improve the models faster than trying all possible combinations of models. This is the time to try ensemble models like Random Forest, Gradient Boosting or a meta model of your own [Eg. Decision tree + Logistic Regression]. Ensemble models are almost always guaranteed to perform better than any standard model.
**Step 10**: Improving your best model
Once I have the best model, I usually plot training vs testing accuracy [or the right metric] against the number of parameters. Usually, it is easy to check training and testing accuracy against number of data points. Basically this plot will tell you whether your model is over-fitting or under-fitting. This articleDetecting over-fitting vs under-fitting explains this concept clearly.
Understanding if your model is over-fitting or under-fitting will tell you how to proceed with the next steps. If the model is over-fitting, you might consider collecting more data. If the model is under-fitting, you might consider making the models more complex. [Eg. Adding higher order terms to a linear / logistic regression]
**Step 11**: Deploying the model
Once you have your final model, you would want the model to be deployed so that it automatically predicts output for new data point without retraining. While you can derive a formula for simple models like Linear Regression, Logistic Regression, Decision Tree etc. , it is not so straight forward for complex models like SVM, Neural Networks, Random Forest etc. I’m not very familiar with other languages but Python has a library called pickle which allows you to save models and use it to predict output for new data.
**Step 12**: Adding feedback
Usually, data for any data science problem is historical data. While this might be similar to the current data up-to a certain degree, it might not be able to capture the current trends or changes. For Eg. If you are using population as an input parameter, while population from 2015–2016 might vary slightly, if you use the model after 5 years, it might give incorrect results.
One way to deal with this problem is to keep retraining your model with additional data. This might be a good option but retraining a model might be time consuming. Also, if you have applications in which data inflow is huge, this might need to be done at regular intervals. An alternative and a better option would be to use active learning. Active learning basically tries to use real time data as feedback and automatically update the model. The most common approaches to do this are Batch Gradient Descent and Stochastic Gradient Descent. It might be appropriate to use the right approach based on the application.
Concluding remarks
The field of data science is really vast. People spend their lifetime researching on individual topics discussed above. As a data scientist, you would mostly have to solve business problems than researching on individual subtopics. Additionally, you will have to explain the technical process and results to business teams who might not have enough technical knowledge. Thus, while you might not need a very in-depth knowledge of every technique, you need to have enough clarity to abstract the technical process and results and explain it in business terms.[3]
<a id="3"></a> <br>
## 3- Problem Definition
I think one of the important things when you start a new machine learning project is Defining your problem.
Problem Definition has four steps that have illustrated in the picture below:
<img src="http://s8.picofile.com/file/8338227734/ProblemDefination.png">
<a id="4"></a> <br>
### 3-1 Problem Feature
we will use the house prices data set. This dataset contains information about house prices and the target value is:
* SalePrice
<img src="https://kaggle2.blob.core.windows.net/competitions/kaggle/5407/media/housesbanner.png"></img>
**Why am I using House price dataset:**
1- This is a good project because it is so well understood.
2- Attributes are numeric and categurical so you have to figure out how to load and handle data.
3- It is a Regression problem, allowing you to practice with perhaps an easier type of supervised learning algorithm.
4- This is a perfect competition for data science students who have completed an online course in machine learning and are looking to expand their skill set before trying a featured competition.
5-Creative feature engineering .
<a id="5"></a> <br>
### 3-2 Aim
It is your job to predict the sales price for each house. For each Id in the test set, you must predict the value of the SalePrice variable.
<img src="https://totalbitcoin.org/wp-content/uploads/2015/12/Bitcoin-price-projections-for-2016.png"></img>
<a id="6"></a> <br>
### 3-3 Variables
The variables are :
* SalePrice - the property's sale price in dollars. This is the target variable that you're trying to predict.
* MSSubClass: The building class
* MSZoning: The general zoning classification
* LotFrontage: Linear feet of street connected to property
* LotArea: Lot size in square feet
* Street: Type of road access
* Alley: Type of alley access
* LotShape: General shape of property
* LandContour: Flatness of the property
* Utilities: Type of utilities available
* LotConfig: Lot configuration
* LandSlope: Slope of property
* Neighborhood: Physical locations within Ames city limits
* Condition1: Proximity to main road or railroad
* Condition2: Proximity to main road or railroad (if a second is present)
* BldgType: Type of dwelling
* HouseStyle: Style of dwelling
* OverallQual: Overall material and finish quality
* OverallCond: Overall condition rating
* YearBuilt: Original construction date
* YearRemodAdd: Remodel date
* RoofStyle: Type of roof
* RoofMatl: Roof material
* Exterior1st: Exterior covering on house
* Exterior2nd: Exterior covering on house (if more than one material)
* MasVnrType: Masonry veneer type
* MasVnrArea: Masonry veneer area in square feet
* ExterQual: Exterior material quality
* ExterCond: Present condition of the material on the exterior
* Foundation: Type of foundation
* BsmtQual: Height of the basement
* BsmtCond: General condition of the basement
* BsmtExposure: Walkout or garden level basement walls
* BsmtFinType1: Quality of basement finished area
* BsmtFinSF1: Type 1 finished square feet
* BsmtFinType2: Quality of second finished area (if present)
* BsmtFinSF2: Type 2 finished square feet
* BsmtUnfSF: Unfinished square feet of basement area
* TotalBsmtSF: Total square feet of basement area
* Heating: Type of heating
* HeatingQC: Heating quality and condition
* CentralAir: Central air conditioning
* Electrical: Electrical system
* 1stFlrSF: First Floor square feet
* 2ndFlrSF: Second floor square feet
* LowQualFinSF: Low quality finished square feet (all floors)
* GrLivArea: Above grade (ground) living area square feet
* BsmtFullBath: Basement full bathrooms
* BsmtHalfBath: Basement half bathrooms
* FullBath: Full bathrooms above grade
* HalfBath: Half baths above grade
* Bedroom: Number of bedrooms above basement level
* Kitchen: Number of kitchens
* KitchenQual: Kitchen quality
* TotRmsAbvGrd: Total rooms above grade (does not include bathrooms)
* Functional: Home functionality rating
* Fireplaces: Number of fireplaces
* FireplaceQu: Fireplace quality
* GarageType: Garage location
* GarageYrBlt: Year garage was built
* GarageFinish: Interior finish of the garage
* GarageCars: Size of garage in car capacity
* GarageArea: Size of garage in square feet
* GarageQual: Garage quality
* GarageCond: Garage condition
* PavedDrive: Paved driveway
* WoodDeckSF: Wood deck area in square feet
* OpenPorchSF: Open porch area in square feet
* EnclosedPorch: Enclosed porch area in square feet
* 3SsnPorch: Three season porch area in square feet
* ScreenPorch: Screen porch area in square feet
* PoolArea: Pool area in square feet
* PoolQC: Pool quality
* Fence: Fence quality
* MiscFeature: Miscellaneous feature not covered in other categories
* MiscVal: $Value of miscellaneous feature
* MoSold: Month Sold
* YrSold: Year Sold
* SaleType: Type of sale
* SaleCondition: Condition of sale
<a id="7"></a> <br>
## 4- Inputs & Outputs
<img src="https://upload.wikimedia.org/wikipedia/commons/b/bc/Input-Output.JPG"></img>
<a id="8"></a> <br>
### 4-1 Inputs
* train.csv - the training set
<a id="9"></a> <br>
### 4-2 Outputs
* sale prices for every record in test.csv
<a id="10"></a> <br>
## 5-Installation
#### Windows:
* Anaconda (from https://www.continuum.io) is a free Python distribution for SciPy stack. It is also available for Linux and Mac.
* Canopy (https://www.enthought.com/products/canopy/) is available as free as well as commercial distribution with full SciPy stack for Windows, Linux and Mac.
* Python (x,y) is a free Python distribution with SciPy stack and Spyder IDE for Windows OS. (Downloadable from http://python-xy.github.io/)
#### Linux
Package managers of respective Linux distributions are used to install one or more packages in SciPy stack.
For Ubuntu Users:
sudo apt-get install python-numpy python-scipy python-matplotlibipythonipythonnotebook
python-pandas python-sympy python-nose
<a id="11"></a> <br>
## 5-1 Jupyter notebook
I strongly recommend installing **Python** and **Jupyter** using the **[Anaconda Distribution](https://www.anaconda.com/download/)**, which includes Python, the Jupyter Notebook, and other commonly used packages for scientific computing and data science.
<img src="https://s3-ap-south-1.amazonaws.com/av-blog-media/wp-content/uploads/2018/04/jupyter-768x382.png"></img>
First, download Anaconda. We recommend downloading Anaconda’s latest Python 3 version.
Second, install the version of Anaconda which you downloaded, following the instructions on the download page.
Congratulations, you have installed Jupyter Notebook! To run the notebook, run the following command at the Terminal (Mac/Linux) or Command Prompt (Windows):
> jupyter notebook
>
<a id="12"></a> <br>
## 5-2 Kaggle Kernel
Kaggle kernel is an environment just like you use jupyter notebook, it's an **extension** of the where in you are able to carry out all the functions of jupyter notebooks plus it has some added tools like forking et al.
<a id="13"></a> <br>
## 5-3 Colab notebook
**Colaboratory** is a research tool for machine learning education and research. It’s a Jupyter notebook environment that requires no setup to use.
### 5-3-1 What browsers are supported?
Colaboratory works with most major browsers, and is most thoroughly tested with desktop versions of Chrome and Firefox.
### 5-3-2 Is it free to use?
Yes. Colaboratory is a research project that is free to use.
### 5-3-3 What is the difference between Jupyter and Colaboratory?
Jupyter is the open source project on which Colaboratory is based. Colaboratory allows you to use and share Jupyter notebooks with others without having to download, install, or run anything on your own computer other than a browser.
<a id="15"></a> <br>
## 5-5 Loading Packages
In this kernel we are using the following packages:
<img src="http://s8.picofile.com/file/8338227868/packages.png">
Now we import all of them
```
# packages to load
# Check the versions of libraries
# Python version
import warnings
warnings.filterwarnings('ignore')
import sys
print('Python: {}'.format(sys.version))
# scipy
import scipy
print('scipy: {}'.format(scipy.__version__))
import numpy
# matplotlib
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
# numpy
import numpy as np # linear algebra
print('numpy: {}'.format(np.__version__))
# pandas
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
print('pandas: {}'.format(pd.__version__))
import seaborn as sns
print('seaborn: {}'.format(sns.__version__))
sns.set(color_codes=True)
import matplotlib.pyplot as plt
print('matplotlib: {}'.format(matplotlib.__version__))
%matplotlib inline
# scikit-learn
import sklearn
print('sklearn: {}'.format(sklearn.__version__))
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
%matplotlib inline
from sklearn.metrics import accuracy_score
# Importing metrics for evaluation
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from scipy import stats
from scipy.stats import norm, skew #for some statistics
pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) #Limiting floats output to 3 decimal points
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8")) #check the files available in the directory
```
<a id="16"></a> <br>
## 6- Exploratory Data Analysis(EDA)
In this section, you'll learn how to use graphical and numerical techniques to begin uncovering the structure of your data.
* Which variables suggest interesting relationships?
* Which observations are unusual?
By the end of the section, you'll be able to answer these questions and more, while generating graphics that are both insightful and beautiful. then We will review analytical and statistical operations:
* 5-1 Data Collection
* 5-2 Visualization
* 5-3 Data Preprocessing
* 5-4 Data Cleaning
<img src="http://s9.picofile.com/file/8338476134/EDA.png">
<a id="17"></a> <br>
## 6-1 Data Collection
**Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia]
> **<< Note >>**
> **The rows being the samples and the columns being attributes**
```
# import Dataset to play with it
train = pd.read_csv('../input/train.csv')
test= pd.read_csv('../input/test.csv')
```
**<< Note 1 >>**
* Each row is an observation (also known as : sample, example, instance, record)
* Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate)
After loading the data via **pandas**, we should checkout what the content is, description and via the following:
```
type(train)
type(test)
```
## 6-1-1 Statistical Summary
1- Dimensions of the dataset.
2- Peek at the data itself.
3- Statistical summary of all attributes.
4- Breakdown of the data by the class variable.[7]
Don’t worry, each look at the data is **one command**. These are useful commands that you can use again and again on future projects.
```
# shape
print(train.shape)
```
Train has one column more than test why? (yes ==>> **target value**)
```
# shape
print(test.shape)
#columns*rows
train.size
#columns*rows
test.size
```
We can get a quick idea of how many instances (rows) and how many attributes (columns) the data contains with the shape property.
**You should see 1460 instances and 81 attributes for train and 1459 instances and 80 attributes for test**
for getting some information about the dataset you can use **info()** command
```
print(train.info())
```
**if you want see the type of data and unique value of it you use following script**
```
train['Fence'].unique()
train["HouseStyle"].value_counts()
```
**to check the first 5 rows of the data set, we can use head(5).**
```
train.head(5)
```
**to check out last 5 row of the data set, we use tail() function**
```
train.tail()
```
to pop up 5 random rows from the data set, we can use **sample(5)** function
```
train.sample(5)
```
to give a statistical summary about the dataset, we can use **describe()
```
train.describe()
```
to check out how many null info are on the dataset, we can use **isnull().sum()
```
train.isnull().sum()
train.groupby('SaleType').count()
```
to print dataset **columns**, we can use columns atribute
```
train.columns
type((train.columns))
```
**<< Note 2 >>**
in pandas's data frame you can perform some query such as "where"
```
train[train['SalePrice']>700000]
```
## 6-1-2Select numberical features and categorical features
```
numberic_features=train.select_dtypes(include=[np.number])
categorical_features=train.select_dtypes(include=[np.object])
```
## 6-1-3 Target Value Analysis
as you know **SalePrice** is our target value that we should predict it then now we take a look at it
```
train['SalePrice'].describe()
```
Flexibly plot a univariate distribution of observations.
```
sns.distplot(train['SalePrice']);
#skewness and kurtosis
print("Skewness: %f" % train['SalePrice'].skew())
print("Kurtosis: %f" % train['SalePrice'].kurt())
```
<a id="18"></a> <br>
## 6-2 Visualization
**Data visualization** is the presentation of data in a pictorial or graphical format. It enables decision makers to see analytics presented visually, so they can grasp difficult concepts or identify new patterns.
With interactive visualization, you can take the concept a step further by using technology to drill down into charts and graphs for more detail, interactively changing what data you see and how it’s processed.[SAS]
In this section I show you **11 plots** with **matplotlib** and **seaborn** that is listed in the blew picture:
<img src="http://s8.picofile.com/file/8338475500/visualization.jpg" />
<a id="19"></a> <br>
### 6-2-1 Scatter plot
Scatter plot Purpose To identify the type of relationship (if any) between two quantitative variables
```
# Modify the graph above by assigning each species an individual color.
columns = ['SalePrice','OverallQual','TotalBsmtSF','GrLivArea','GarageArea','FullBath','YearBuilt','YearRemodAdd']
sns.FacetGrid(train[columns], hue="OverallQual", size=5) \
.map(plt.scatter, "OverallQual", "SalePrice") \
.add_legend()
plt.show()
```
<a id="20"></a> <br>
### 6-2-2 Box
In descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
```
train[columns].plot(y='SalePrice',x='OverallQual',kind='box')
plt.figure()
#This gives us a much clearer idea of the distribution of the input attributes:
data = pd.concat([train['SalePrice'], train['OverallQual']], axis=1)
f, ax = plt.subplots(figsize=(12, 8))
fig = sns.boxplot(x='OverallQual', y="SalePrice", data=data)
# To plot the species data using a box plot:
sns.boxplot(x="OverallQual", y="SalePrice", data=train[columns] )
plt.show()
# Use Seaborn's striplot to add data points on top of the box plot
# Insert jitter=True so that the data points remain scattered and not piled into a verticle line.
# Assign ax to each axis, so that each plot is ontop of the previous axis.
ax= sns.boxplot(x="OverallQual", y="SalePrice", data=train[columns])
ax= sns.stripplot(x="OverallQual", y="SalePrice", data=train[columns], jitter=True, edgecolor="gray")
plt.show()
# Tweek the plot above to change fill and border color color using ax.artists.
# Assing ax.artists a variable name, and insert the box number into the corresponding brackets
ax= sns.boxplot(x="OverallQual", y="SalePrice", data=train[columns])
ax= sns.stripplot(x="OverallQual", y="SalePrice", data=train[columns], jitter=True, edgecolor="gray")
boxtwo = ax.artists[2]
boxtwo.set_facecolor('red')
boxtwo.set_edgecolor('black')
boxthree=ax.artists[1]
boxthree.set_facecolor('yellow')
boxthree.set_edgecolor('black')
plt.show()
```
<a id="21"></a> <br>
### 6-2-3 Histogram
We can also create a **histogram** of each input variable to get an idea of the distribution.
```
# histograms
train.hist(figsize=(15,20))
plt.figure()
```
It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
<a id="22"></a> <br>
### 6-2-4 Multivariate Plots
Now we can look at the interactions between the variables.
First, let’s look at scatterplots of all pairs of attributes. This can be helpful to spot structured relationships between input variables.
```
# scatter plot matrix
pd.plotting.scatter_matrix(train[columns],figsize=(10,10))
plt.figure()
```
Note the diagonal grouping of some pairs of attributes. This suggests a high correlation and a predictable relationship.
<a id="23"></a> <br>
### 6-2-5 violinplots
```
# violinplots on petal-length for each species
sns.violinplot(data=train,x="Functional", y="SalePrice")
```
<a id="24"></a> <br>
### 6-2-6 pairplot
```
# Using seaborn pairplot to see the bivariate relation between each pair of features
sns.set()
columns = ['SalePrice','OverallQual','TotalBsmtSF','GrLivArea','GarageArea','FullBath','YearBuilt','YearRemodAdd']
sns.pairplot(train[columns],size = 2 ,kind ='scatter')
plt.show()
```
From the plot, we can see that the species setosa is separataed from the other two across all feature combinations
We can also replace the histograms shown in the diagonal of the pairplot by kde.
<a id="25"></a> <br>
### 6-2-7 kdeplot
```
# seaborn's kdeplot, plots univariate or bivariate density estimates.
#Size can be changed by tweeking the value used
columns = ['SalePrice','OverallQual','TotalBsmtSF','GrLivArea','GarageArea','FullBath','YearBuilt','YearRemodAdd']
sns.FacetGrid(train[columns], hue="OverallQual", size=5).map(sns.kdeplot, "YearBuilt").add_legend()
plt.show()
```
<a id="26"></a> <br>
### 6-2-8 jointplot
```
# Use seaborn's jointplot to make a hexagonal bin plot
#Set desired size and ratio and choose a color.
columns = ['SalePrice','OverallQual','TotalBsmtSF','GrLivArea','GarageArea','FullBath','YearBuilt','YearRemodAdd']
sns.jointplot(x="OverallQual", y="SalePrice", data=train[columns], size=10,ratio=10, kind='hex',color='green')
plt.show()
```
<a id="27"></a> <br>
### 6-2-9 andrews_curves
```
#In Pandas use Andrews Curves to plot and visualize data structure.
#Each multivariate observation is transformed into a curve and represents the coefficients of a Fourier series.
#This useful for detecting outliers in times series data.
#Use colormap to change the color of the curves
columns = ['SalePrice','OverallQual','TotalBsmtSF','GrLivArea','GarageArea','FullBath','YearBuilt','YearRemodAdd']
from pandas.tools.plotting import andrews_curves
andrews_curves(train[columns], "YearBuilt",colormap='rainbow')
plt.show()
# we will use seaborn jointplot shows bivariate scatterplots and univariate histograms with Kernel density
# estimation in the same figure
columns = ['SalePrice','OverallQual','TotalBsmtSF','GrLivArea','GarageArea','FullBath','YearBuilt','YearRemodAdd']
sns.jointplot(x="SalePrice", y="YearBuilt", data=train[columns], size=6, kind='kde', color='#800000', space=0)
```
<a id="28"></a> <br>
### 6-2-10 Heatmap
```
plt.figure(figsize=(7,4))
columns = ['SalePrice','OverallQual','TotalBsmtSF','GrLivArea','GarageArea','FullBath','YearBuilt','YearRemodAdd']
sns.heatmap(train[columns].corr(),annot=True,cmap='cubehelix_r') #draws heatmap with input as the correlation matrix calculted by(iris.corr())
plt.show()
```
<a id="29"></a> <br>
### 6-2-11 radviz
```
# A final multivariate visualization technique pandas has is radviz
# Which puts each feature as a point on a 2D plane, and then simulates
# having each sample attached to those points through a spring weighted
# by the relative value for that feature
from pandas.tools.plotting import radviz
columns = ['SalePrice','OverallQual','TotalBsmtSF','GrLivArea','GarageArea','FullBath','YearBuilt','YearRemodAdd']
radviz(train[columns], "OverallQual")
```
### 6-2-12 Conclusion
we have used Python to apply data visualization tools to the House prices dataset. Color and size changes were made to the data points in scatterplots. I changed the border and fill color of the boxplot and violin, respectively.
<a id="30"></a> <br>
## 6-3 Data Preprocessing
**Data preprocessing** refers to the transformations applied to our data before feeding it to the algorithm.
Data Preprocessing is a technique that is used to convert the raw data into a clean data set. In other words, whenever the data is gathered from different sources it is collected in raw format which is not feasible for the analysis.
there are plenty of steps for data preprocessing and we just listed some of them :
* removing Target column (id)
* Sampling (without replacement)
* Making part of iris unbalanced and balancing (with undersampling and SMOTE)
* Introducing missing values and treating them (replacing by average values)
* Noise filtering
* Data discretization
* Normalization and standardization
* PCA analysis
* Feature selection (filter, embedded, wrapper)
## 6-3-1 removing ID
```
# Save Id and drop it
train_ID=train['Id']
test_ID=test['Id']
train.drop('Id',axis=1,inplace=True)
test.drop('Id',axis=1,inplace=True)
```
## 6-3-2 Noise filtering
```
fig, ax = plt.subplots()
ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
```
We can see at the bottom right two with extremely large GrLivArea that are of a low price. These values are huge oultliers. Therefore, we can safely delete them.
```
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
fig, ax = plt.subplots()
ax.scatter(train['GrLivArea'], train['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
```
## 6-3-3 Target Variable
SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
```
sns.distplot(train['SalePrice'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
```
The target variable is right skewed. As (linear) models love normally distributed data , we need to transform this variable and make it more normally distributed.
Log-transformation of the target variable
```
#We use the numpy fuction log1p which applies log(1+x) to all elements of the column
train["SalePrice"] = np.log1p(train["SalePrice"])
#Check the new distribution
sns.distplot(train['SalePrice'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
```
The skew seems now corrected and the data appears more normally distributed.
## 6-3-4 Feature selection
let's first concatenate the train and test data in the same dataframe
```
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("all_data size is : {}".format(all_data.shape))
```
<a id="31"></a> <br>
## 6-4 Data Cleaning
When dealing with real-world data, dirty data is the norm rather than the exception. We continuously need to predict correct values, impute missing ones, and find links between various data artefacts such as schemas and records. We need to stop treating data cleaning as a piecemeal exercise (resolving different types of errors in isolation), and instead leverage all signals and resources (such as constraints, available statistics, and dictionaries) to accurately predict corrective actions.
The primary goal of data cleaning is to detect and remove errors and anomalies to increase the value of data in analytics and decision making. While it has been the focus of many researchers for several years, individual problems have been addressed separately. These include missing value imputation, outliers detection, transformations, integrity constraints violations detection and repair, consistent query answering, deduplication, and many other related problems such as profiling and constraints mining.[8]
```
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data
f, ax = plt.subplots(figsize=(15, 12))
plt.xticks(rotation='90')
sns.barplot(x=all_data_na.index, y=all_data_na)
plt.xlabel('Features', fontsize=15)
plt.ylabel('Percent of missing values', fontsize=15)
plt.title('Percent missing data by feature', fontsize=15)
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True)
```
## 6-4-1 Imputing missing values
We impute them by proceeding sequentially through features with missing values
PoolQC : data description says NA means "No Pool". That make sense, given the huge ratio of missing value (+99%) and majority of houses have no Pool at all in general.
```
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
```
MiscFeature : data description says NA means "no misc feature"
```
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
```
Alley : data description says NA means "no alley access"
```
all_data["Alley"] = all_data["Alley"].fillna("None")
```
Fence : data description says NA means "no fence"
```
all_data["Fence"] = all_data["Fence"].fillna("None")
```
FireplaceQu : data description says NA means "no fireplace"
LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood , we can fill in missing values by the median LotFrontage of the neighborhood.
```
#Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median()))
```
GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
```
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
```
GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
```
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
```
BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath : missing values are likely zero for having no basement
```
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
```
BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 : For all these categorical basement-related features, NaN means that there is no basement.
```
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
```
MasVnrArea and MasVnrType : NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
```
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
```
MSZoning (The general zoning classification) : 'RL' is by far the most common value. So we can fill in missing values with 'RL'
```
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
```
Functional : data description says NA means typical
```
all_data["Functional"] = all_data["Functional"].fillna("Typ")
```
Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.
```
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
```
KitchenQual: Only one NA value, and same as Electrical, we set 'TA' (which is the most frequent) for the missing value in KitchenQual.
```
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
```
Exterior1st and Exterior2nd : Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string
```
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
```
SaleType : Fill in again with most frequent which is "WD"
```
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
```
MSSubClass : Na most likely means No building class. We can replace missing values with None
```
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
```
FireplaceQu : data description says NA means "no fireplace"
```
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
```
Is there any remaining missing value ?
```
#Check remaining missing values if any
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
```
## 6-4-2 More features engeneering
Transforming some numerical variables that are really categorical
```
#MSSubClass=The building class
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
#Changing OverallCond into a categorical variable
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
#Year and month sold are transformed into categorical features.
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
```
Label Encoding some categorical variables that may contain information in their ordering set
```
from sklearn.preprocessing import LabelEncoder
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
# process columns, apply LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# shape
print('Shape all_data: {}'.format(all_data.shape))
```
Adding one more important feature
Since area related features are very important to determine house prices, we add one more feature which is the total area of basement, first and second floor areas of each house
```
# Adding total sqfootage feature
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
```
## 6-5 Skewed features
```
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Check the skew of all numerical features
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
skewness.head(10)
```
Box Cox Transformation of (highly) skewed features
We use the scipy function boxcox1p which computes the Box-Cox transformation of
1+x
1+x
.
Note that setting
λ=0
λ=0
is equivalent to log1p used above for the target variable.
See this page for more details on Box Cox Transformation as well as the scipy function's page
```
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
#all_data[skewed_features] = np.log1p(all_data[skewed_features])
```
Getting dummy categorical features
```
all_data = pd.get_dummies(all_data)
print(all_data.shape)
```
Getting the new train and test sets.
```
train = all_data[:ntrain]
test = all_data[ntrain:]
```
<a id="32"></a> <br>
## 7- Model Deployment
In this section have been applied more than **20 learning algorithms** that play an important rule in your experiences and improve your knowledge in case of ML technique.
> **<< Note 3 >>** : The results shown here may be slightly different for your analysis because, for example, the neural network algorithms use random number generators for fixing the initial value of the weights (starting points) of the neural networks, which often result in obtaining slightly different (local minima) solutions each time you run the analysis. Also note that changing the seed for the random number generator used to create the train, test, and validation samples can change your results.
```
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
```
## 7-1 Define a cross validation strategy
We use the cross_val_score function of **Sklearn**. However this function has not a shuffle attribut, we add then one line of code, in order to shuffle the dataset prior to cross-validation
```
#Validation function
n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)
rmse= np.sqrt(-cross_val_score(model, train.values, y_train, scoring="neg_mean_squared_error", cv = kf))
return(rmse)
```
## 7-2 Model
## LASSO Regression
In statistics and machine learning, lasso (least absolute shrinkage and selection operator) is a **regression analysis** method that performs both variable selection and regularization in order to enhance the prediction accuracy and interpretability of the statistical model it produces. Lasso was originally formulated for least squares models and this simple case reveals a substantial amount about the behavior of the estimator, including its relationship to ridge regression and best subset selection and the connections between lasso coefficient estimates and so-called soft thresholding. It also reveals that (like standard linear regression) the coefficient estimates need not be unique if covariates are collinear.
This model may be very sensitive to outliers. So we need to made it more robust on them. For that we use the sklearn's Robustscaler() method on pipeline
```
lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))
```
## Elastic Net Regression
the elastic net is a regularized regression method that linearly combines the L1 and L2 penalties of the lasso and ridge methods.
again made robust to outliers
```
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
```
## Kernel Ridge Regression
Kernel ridge regression (KRR) combines Ridge Regression (linear least squares with l2-norm regularization) with the kernel trick. It thus learns a linear function in the space induced by the respective kernel and the data. For non-linear kernels, this corresponds to a non-linear function in the original space.
```
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
```
## Gradient Boosting Regression
With huber loss that makes it robust to outliers
```
GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
```
## XGBoost
```
model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,
learning_rate=0.05, max_depth=3,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state =7, nthread = -1)
```
## LightGBM
```
model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,
learning_rate=0.05, n_estimators=720,
max_bin = 55, bagging_fraction = 0.8,
bagging_freq = 5, feature_fraction = 0.2319,
feature_fraction_seed=9, bagging_seed=9,
min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)
```
## Base models scores
Let's see how these base models perform on the data by evaluating the cross-validation rmsle error
```
score = rmsle_cv(lasso)
print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(ENet)
print("ElasticNet score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(KRR)
print("Kernel Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(GBoost)
print("Gradient Boosting score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(model_xgb)
print("Xgboost score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(model_lgb)
print("LGBM score: {:.4f} ({:.4f})\n" .format(score.mean(), score.std()))
```
Stacking models
Simplest Stacking approach : Averaging base models
We begin with this simple approach of averaging base models. We build a new class to extend scikit-learn with our model and also to laverage encapsulation and code reuse (inheritance)
Averaged base models class
```
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
# we define clones of the original models to fit the data in
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
# Train cloned base models
for model in self.models_:
model.fit(X, y)
return self
#Now we do the predictions for cloned models and average them
def predict(self, X):
predictions = np.column_stack([
model.predict(X) for model in self.models_
])
return np.mean(predictions, axis=1)
```
Averaged base models score
We just average four models here ENet, GBoost, KRR and lasso. Of course we could easily add more models in the mix.
```
averaged_models = AveragingModels(models = (ENet, GBoost, KRR, lasso))
score = rmsle_cv(averaged_models)
print(" Averaged base models score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=5):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# We again fit the data on clones of the original models
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
#Do the predictions of all base models on the test data and use the averaged predictions as
#meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_ ])
return self.meta_model_.predict(meta_features)
stacked_averaged_models = StackingAveragedModels(base_models = (ENet, GBoost, KRR),
meta_model = lasso)
score = rmsle_cv(stacked_averaged_models)
print("Stacking Averaged models score: {:.4f} ({:.4f})".format(score.mean(), score.std()))
def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
#StackedRegressor
#Final Training and Prediction
stacked_averaged_models.fit(train.values, y_train)
stacked_train_pred = stacked_averaged_models.predict(train.values)
stacked_pred = np.expm1(stacked_averaged_models.predict(test.values))
print(rmsle(y_train, stacked_train_pred))
#XGBoost
model_xgb.fit(train, y_train)
xgb_train_pred = model_xgb.predict(train)
xgb_pred = np.expm1(model_xgb.predict(test))
print(rmsle(y_train, xgb_train_pred))
#lightGBM
model_lgb.fit(train, y_train)
lgb_train_pred = model_lgb.predict(train)
lgb_pred = np.expm1(model_lgb.predict(test.values))
print(rmsle(y_train, lgb_train_pred))
'''RMSE on the entire Train data when averaging'''
print('RMSLE score on train data:')
print(rmsle(y_train,stacked_train_pred*0.70 +
xgb_train_pred*0.15 + lgb_train_pred*0.15 ))
```
## Ensemble prediction
```
ensemble = stacked_pred*0.70 + xgb_pred*0.15 + lgb_pred*0.15
sub = pd.DataFrame()
sub['Id'] = test_ID
sub['SalePrice'] = ensemble
sub.to_csv('submission.csv',index=False)
```
-----------------
<a id="53"></a> <br>
## 8- Conclusion
In this kernel, I have tried to cover all the parts related to the process of ML with a variety of Python packages and I know that there are still some problems then I hope to get your feedback to improve it.
you can follow me on:
> ###### [ GitHub](https://github.com/mjbahmani)
> ###### [LinkedIn](https://www.linkedin.com/in/bahmani/)
> ###### [Kaggle](https://www.kaggle.com/mjbahmani/)
--------------------------------------
**I hope you find this kernel helpful and some upvotes would be very much appreciated**
----------
<a id="54"></a> <br>
# 9- References
* [1] [https://skymind.ai/wiki/machine-learning-workflow](https://skymind.ai/wiki/machine-learning-workflow)
* [2] [Problem-define](https://machinelearningmastery.com/machine-learning-in-python-step-by-step/)
* [3] [Sklearn](http://scikit-learn.org/)
* [4] [machine-learning-in-python-step-by-step](https://machinelearningmastery.com/machine-learning-in-python-step-by-step/)
* [5] [Data Cleaning](http://wp.sigmod.org/?p=2288)
* [6] [kaggle kernel](https://www.kaggle.com/serigne/stacked-regressions-top-4-on-leaderboard)
please write your idea!
| github_jupyter |
# [ATM 623: Climate Modeling](../index.ipynb)
[Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany
# Lecture 23: The surface energy balance
## Warning: content out of date and not maintained
You really should be looking at [The Climate Laboratory book](https://brian-rose.github.io/ClimateLaboratoryBook) by Brian Rose, where all the same content (and more!) is kept up to date.
***Here you are likely to find broken links and broken code.***
### About these notes:
This document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways:
- The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware
- The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)
- A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html).
[Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).
Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab
```
# Ensure compatibility with Python 2 and 3
from __future__ import print_function, division
```
## Contents
1. [Energy exchange mechanisms at the Earth's surface](#section1)
2. [Surface energy budget in CESM simulations](#section2)
3. [Sensible and Latent Heat Fluxes in the boundary layer](#section3)
4. [Bowen ratio in CESM simulations](#section4)
____________
<a id='section1'></a>
## 1. Energy exchange mechanisms at the Earth's surface
____________
The surface of the Earth is the boundary between the atmosphere and the land, ocean, or ice. Understanding the energy fluxes across the surface are very important for three main reasons:
1. We are most interested in the climate at the surface because we live at the surface.
2. The surface energy budget determines how much energy is available to evaporate water and moisten the atmosphere.
3. Air-sea energy fluxes set the thermal structure of the oceans, which in turn act to redistribute energy around the planet, with many important consequences for climate.
The energy budget at the surface is more complex that the budget at the top of the atmosphere. At the TOA the only energy transfer mechanisms are radiative (shortwave and longwave). At the surface, in addition to radiation we need to consider fluxes of energy by conduction and by convection of heat and moisture through turbulent fluid motion.
### Major terms in the surface energy budget
We will denote the **net upward energy flux at the surface** as $F_S$.
As we mentioned back in [Lecture 15 on heat transport](./Lecture15 -- Heat transport.ipynb), there are four principal contributions to $F_S$:
1. Shortwave radiation
2. Longwave radiation
3. Sensible heat flux
4. Evaporation or latent heat flux
Wherever $F_S \ne 0$, there is a net flux of energy between the atmosphere and the surface below. This implies either that there is heat storage / release occuring below the surface (e.g. warming or cooling of water, melting of snow and ice), and/or there is horizontal heat transport by fluid motions occuring below the surface (ocean circulation, groundwater flow).
### Minor terms in the surface energy budget
All of these terms are small globally but can be significant locally or seasonally.
- Latent heat of fusion required for melting ice and snow
- Conversion of the kinetic energy of winds and waves to thermal energy
- Heat transport by precipitation, if precipitation is at a different temperature than the surface
- Biological uptake of solar energy through photosynthesis
- Biological release of energy through oxidation (respiration, decay, fires)
- Geothermal heat sources (hot springs, volcanoes, etc.)
- Anthropogenic heat released through fossil fuel burning and nuclear power generation.
____________
<a id='section2'></a>
## 2. The surface energy budget in CESM simulations
____________
We will examine the surface budget in the CESM slab ocean simulations. The advantage of looking at surface fluxes in a model rather than observations is that the model fluxes are completely consistent with the model climate, so that the net flux $F_S$ will be a meaningful measure of the heat storage in the system.
The model also gives us an opportunity to look at how the surface budget reponds to global warming under a doubling of CO$_2$.
### First, load the data
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
from climlab import constants as const
datapath = "http://thredds.atmos.albany.edu:8080/thredds/dodsC/cesm/"
topo = xr.open_dataset(datapath+'som_input/USGS-gtopo30_1.9x2.5_remap_c050602.nc', decode_times=False)
runlist = ['control', '2xCO2']
runs = {}
for run in runlist:
runstr = 'som_' + run
path = datapath + runstr + '/' + runstr + '.cam.h0.clim.nc'
runs[run] = xr.open_dataset(path, decode_times=False)
lat = runs['control'].lat
lon = runs['control'].lon
lev = runs['control'].lev
```
### Annual mean surface energy budget
```
# Surface energy budget terms, all defined as positive up (from ocean to atmosphere)
surface_budget = {}
for (name, run) in runs.items():
budget = xr.Dataset()
budget['LHF'] = run.LHFLX
budget['SHF'] = run.SHFLX
budget['LWsfc'] = run.FLNS
budget['LWsfc_clr'] = run.FLNSC
budget['SWsfc'] = -run.FSNS
budget['SWsfc_clr'] = -run.FSNSC
budget['SnowFlux'] = ((run.PRECSC+run.PRECSL)
*const.rho_w*const.Lhfus)
# net upward radiation from surface
budget['NetRad'] = budget['LWsfc'] + budget['SWsfc']
budget['NetRad_clr'] = budget['LWsfc_clr'] + budget['SWsfc_clr']
# net upward surface heat flux
budget['Net'] = (budget['NetRad'] + budget['LHF'] +
budget['SHF'] + budget['SnowFlux'])
surface_budget[name] = budget
```
### Compute anomalies for all terms
```
# Here we take advantage of xarray!
# We can simply subtract the two xarray.Dataset objects
# to get anomalies for every term
surface_budget['anom'] = surface_budget['2xCO2'] - surface_budget['control']
# Also compute zonal averages
zonal_budget = {}
for run, budget in surface_budget.items():
zonal_budget[run] = budget.mean(dim='lon')
```
### Plot the annual mean net upward flux $F_S$ (control and anomaly after warming)
```
fig, axes = plt.subplots(1,2, figsize=(16,5))
cax1 = axes[0].pcolormesh(lon, lat, surface_budget['control'].Net.mean(dim='time'),
cmap=plt.cm.seismic, vmin=-200., vmax=200. )
axes[0].set_title('Annual mean net surface heat flux (+ up) - CESM control')
cax2 = axes[1].pcolormesh(lon, lat, surface_budget['anom'].Net.mean(dim='time'),
cmap=plt.cm.seismic, vmin=-20., vmax=20. )
fig.colorbar(cax1, ax=axes[0]); fig.colorbar(cax2, ax=axes[1])
axes[1].set_title('Anomaly after CO2 doubling')
for ax in axes:
ax.set_xlim(0, 360); ax.set_ylim(-90, 90); ax.contour( lon, lat, topo.LANDFRAC, [0.5], colors='k');
```
Some notable points about the control state:
- The net flux over all land surfaces is very close to zero!
- In the long-term annual mean, a non-zero $F_S$ must be balanced by heat transport.
- The spatial pattern of $F_S$ over the oceans is essentially just the prescribed q-flux that we have imposed on the slab ocean to represent ocean heat transport.
- We have looked at maps like this before, back in [Lecture 15](./Lecture15 -- Heat transport.ipynb) and [Assignment 2](../Assignments/Assignment02 -- Introducing CESM.ipynb).
- Net heat uptake by the oceans occurs mostly along the equator and the cold tongues on the eastern sides of the tropical basins.
- Net heat release from oceans to atmosphere occurs mostly in mid- to high latitudes. Hot spots include the Gulf Stream and Kuroshio regions on the western sides of the mid-latitude basins, as well as the subpolar North Atlantic. These features are largely determined by ocean dynamics.
**After greenhouse warming**:
- The net change in $F_S$ is very small in most locations.
- This indicates that the model has reached quasi-equilibrium. Non-zero changes in $F_S$ would indicate either
- heat storage below the surface
- changes in ocean heat transport (not permitted in a slab ocean model).
- Non-zero changes are found in areas where the sea ice cover is changing in the model.
### Variation of energy balance components with latitude
```
fieldlist = ['SWsfc', 'LWsfc', 'LHF', 'SHF', 'Net']
fig, axes = plt.subplots(1,2, figsize=(16,5))
for ax, run in zip(axes, ['control', 'anom']):
for field in fieldlist:
ax.plot(lat, zonal_budget[run][field].mean(dim='time'), label=field)
ax.set_xlim(-90, 90); ax.grid(); ax.legend()
axes[0].set_title('Components of ANNUAL surface energy budget (+ up) - CESM control')
axes[1].set_title('Anomaly after CO2 doubling');
```
In these graphs, the curve labeled "Net" is the net flux $F_S$. It is just the zonal average of the maps from the previous figure, and shows the ocean heat uptake at the equator and release in mid- to high latitudes.
More interestingly, these graphs show the contribution of the various terms to $F_S$. They are all plotted as positive up. A **negative** value thus indicates **heating of the surface**, and a **positive** value indicates a **cooling of the surface**.
Key points about the control simulation:
- Solar radiation acts to warm the surface everywhere.
- Note that this is a net shortwave flux, so it is the amount that is actually absorbed by the surface after accounting for the reflected fraction.
- All other mechanisms act to cool the surface.
- The dominant balance across the **tropics** is between **warming by solar radiation** and **cooling by evaporation** (latent heat flux or LHF).
- The latent heat flux decreases poleward.
- Latent heat flux is dominant over sensible heat flux at most latitudes except close to the poles.
- The net longwave radiation also acts to cool the surface.
- This is the residual between the surface emissions (essentially $\sigma~T_s^4$) and the back-radiation from the atmosphere.
**After greenhouse warming**
- The anomaly in net upward longwave radiation is negative at most latitudes.
- This implies that the downwelling back-radiation increases faster than the upwelling surface emissions.
- This is a signature of greenhouse warming! Both the upwelling and downwelling beams increase with warming. But the downwelling beam also increases because of the CO$_2$-induced increase in emissivity of the atmosphere.
- So the surface is warmed by excess longwave radiation.
- This warming is largely balanced by increased evaporation (red curve)!
- There are also significant changes in shortwave radiation. We could use the modeled clear-sky diagnostics to infer which of these changes are due to clouds.
- The negative shortwave anomalies in high latitudes are consistent with surface albedo feedback and loss of ice and snow.
### Seasonal variations
We will compute the budgets for the months of January and July, and plot their differences.
```
# July minus January
julminusjan_budget = {}
for name, budget in surface_budget.items():
# xarray.Dataset objects let you "select" a subset in various ways
# Here we are using the integer time index (0-11)
julminusjan_budget[name] = budget.isel(time=6) - budget.isel(time=0)
fieldlist = ['SWsfc', 'LWsfc', 'LHF', 'SHF', 'Net']
fig,axes = plt.subplots(1,2,figsize=(16,5))
for field in fieldlist:
axes[0].plot(lat, julminusjan_budget['control'][field].mean(dim='lon'), label=field)
axes[0].set_title('Components of JUL-JAN surface energy budget (+ up) - CESM control')
for field in fieldlist:
axes[1].plot(lat, julminusjan_budget['anom'][field].mean(dim='lon'), label=field)
axes[1].set_title('Anomaly after CO2 doubling')
for ax in axes:
ax.set_xlim(-90, 90)
ax.grid()
ax.legend()
```
Seasonally, the dominant balance by far is between solar radiation and heat storage!
____________
<a id='section3'></a>
## 3. Sensible and Latent Heat Fluxes in the boundary layer
____________
These notes largely follow Chapter 4 of Hartmann (1994) "Global Physical Climatology", Academic Press.
Turbulent fluxes of heat: eddy fluxes of heat and moisture at some level in the atmospheric boundary layer
$$ \text{SH} = c_p ~\rho ~ \overline{w^\prime T^\prime} $$
$$ \text{LE} = L ~\rho ~\overline{w^\prime q^\prime} $$
where $c_p$ is the specific heat of air at constant pressure, $L$ is the latent heat of vaporization, $\text{SH}$ is the sensible heat flux and $\text{LE}$ is the latent heat flux.
### Bulk aerodynamic formulas
From theory of boundary layer turbulence, we suppose that the eddy heat fluxes is related to boundary layer temperature gradients, as well as the mean wind speed:
$$ \text{SH} = c_p ~\rho ~ C_D ~ U \left( T_s - T_a \right) $$
where $T_s$ is the surface temperature and $T_a$ is the air temperature at some reference height above the surface. $U$ is the wind speed at the reference height, and $C_D$ is a dimensionless aerodynamic drag coefficient.
$C_D$ will depend, among other things, on the roughness of the surface.
Similarly, we assume that the latent heat flux is related to boundary layer moisture gradients:
$$ \text{LE} = L ~\rho ~ C_D ~ U \left( q_s - q_a \right) $$
where $q_s$ is the specific humidity of air immediately above the surface, and $q_a$ is the specific humidity at the reference height.
In general the transfer coefficients $C_D$ could be different for sensible and latent heat flux, but empirically they are found to be very similar to each other. We will assume they are equal here.
### The Bowen ratio
The **Bowen ratio** is a dimensionless number defined as
$$ B_o = \frac{\text{SH}}{\text{LE}} $$
i.e. the ratio of **sensible heat loss** to **evaporative cooling**.
From the above plots, the Bowen ratio tends to be small in the low latitudes.
### The Bowen ratio for wet surfaces
Over a water surface or a very wet land surface, we may assume that the mixing ratio of water vapor at the surface is equal to the saturation mixing ratio $q^*$ at the temperature of the surface:
$$ q_s = q^*(T_s) $$
Recall that the saturation vapor pressure $q^*$ is a sensitive function of temperature through the Clausius-Claperyon relation. (It also depends on pressure)
Let's approximate the mixing ratio for **saturated air** at the reference height through a first-order Taylor series expansion:
$$ q_a^* \approx q_s^*(T_s) + \frac{\partial q^*}{\partial T} \left( T_a - T_s \right) $$
The actual mixing ratio at the reference height can be expressed as
$$ q_a = r ~ q_a^* $$
where $r$ is the relative humidity at that level.
Then we have an appoximation for $q_a$ in terms of temperature gradients:
$$ q_a \approx r \left( q_s^*(T_s) + \frac{\partial q^*}{\partial T} \left( T_a - T_s \right) \right) $$
Substituting this into the bulk formula for latent heat flux, we get
$$ \text{LE} \approx L ~\rho ~ C_D ~ U \left( q_s^* - r \left( q_s^* + \frac{\partial q^*}{\partial T} \left( T_a - T_s \right) \right) \right) $$
or, rearranging a bit,
$$ \text{LE} \approx L ~\rho ~ C_D ~ U \left( (1-r) ~ q_s^* + r \frac{\partial q^*}{\partial T} \left( T_s - T_a \right) \right) $$
The Bowen ratio is thus
$$ B_o = \frac{c_p}{ L \left( \frac{(1-r)}{\left( T_s - T_a \right)} q_s^* + r \frac{\partial q^*}{\partial T} \right)} $$
### The equilibrium Bowen ratio (for saturated air)
Notice that **if the boundary layer air is saturated**, then $r=1$ and the Bowen ratio takes on a special value
$$ B_e = \frac{c_p}{ L \frac{\partial q^*}{\partial T} } $$
When the surface and the air at the reference level are saturated, the Bowen ratio approaches the value $B_e$, which is called the equilibrium Bowen ratio. We presume that the flux of moisture from the boundary layer to the free atmosphere is sufficient to just balance the upward flux of moisture from the surface so that the humidity at the reference height is in equilibrium at the saturation value.
Recall that from the Clausius-Claperyon relation, the rate of change of the saturation mixing ratio is itself a strong function of temperature:
$$ \frac{\partial q^*}{\partial T} = q^*(T) \frac{L}{R_v ~ T^2} $$
Here the quasi-exponential dependence of $q^*$ on $T$ far outweighs the inverse square dependence, so the **equilibrium Bowen ratio decreases roughly exponentially with temperature**.
The following code reproduces Figure 4.10 of Hartmann (1994).
```
from climlab.utils.thermo import qsat
T = np.linspace(-40, 40) + const.tempCtoK
qstar = qsat(T, const.ps) # in kg / kg
def Be(T):
qstar = qsat(T, const.ps) # in kg / kg
dqstardT = qstar * const.Lhvap / const.Rv / T**2
return const.cp / const.Lhvap / dqstardT
fig, ax = plt.subplots()
ax.semilogy(T + const.tempKtoC, qstar*1000, label='$q^*$')
ax.semilogy(T + const.tempKtoC, Be(T), label='$B_e$')
ax.grid()
ax.set_xlabel('Temperature (degC)')
ax.legend(loc='upper center')
ax.set_title('Saturation specific humidity (g/kg) and equilibrium Bowen ratio');
```
- Equilibrium Bowen ratio is near 1 at 0ºC, and decreases to about 0.2 at 30ºC.
- As relative humidity is decreased from 1 to smaller values, **evaporative cooling increases**.
- The equilibrium Bowen ratio is the **maximum possible Bowen ratio for a wet surface**.
- Actual Bowen ratio over a wet surface will generally be smaller than $B_e$, because the air is usually not saturated.
- Because of the strong temperature dependence of the saturation specific humidity:
- Evaporative cooling (latent heat flux) dominates over sensible cooling of wet surfaces at **tropical** temperatures.
- Sensible heat flux becomes important wherever the surface is either **cold** or **dry**.
____________
<a id='section4'></a>
## 4. Bowen ratio in CESM simulations
____________
```
Bo_control = (surface_budget['control'].SHF.mean(dim='time') /
surface_budget['control'].LHF.mean(dim='time'))
Be_control = Be(runs['control'].TS.mean(dim='time'))
fig,axes = plt.subplots(1,3,figsize=(16,4))
cax1 = axes[0].pcolormesh(lon, lat, Bo_control,
vmin=0., vmax=5. )
fig.colorbar(cax1, ax=axes[0])
axes[0].set_title('$B_o$ (CESM control)', fontsize=20)
cax2 = axes[1].pcolormesh(lon, lat, Be_control,
vmin=0., vmax=5. )
fig.colorbar(cax2, ax=axes[1])
axes[1].set_title('$B_e$ (CESM control)', fontsize=20)
cax3 = axes[2].pcolormesh(lon, lat, (Bo_control - Be_control),
cmap='seismic', vmin=-10., vmax=10. )
fig.colorbar(cax3, ax=axes[2])
axes[2].set_title('$B_o - B_e$ (CESM control)', fontsize=20)
for ax in axes:
ax.set_xlim(0, 360)
ax.set_ylim(-90, 90)
ax.contour( lon, lat, topo.variables['LANDFRAC'][:], [0.5], colors='k');
```
On the difference plot, the blue colors indicate the actual Bowen ratio is smaller than the equilibrium Bowen ratio. This will typically occur for **wet surfaces** with **undersaturated air**.
The red colors indicate the actual Bowen ratio is larger than the equilibrium Bowen ratio. This typically occurs for **dry surfaces** where there is not enough water available to satisfy the energetic demand for evaporation.
<div class="alert alert-success">
[Back to ATM 623 notebook home](../index.ipynb)
</div>
____________
## Version information
____________
```
%load_ext version_information
%version_information numpy, matplotlib, xarray, climlab
```
____________
## Credits
The author of this notebook is [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.
It was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php)
Development of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to Brian Rose. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation.
____________
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_03_2_keras.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 3: Introduction to TensorFlow**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 3 Material
* Part 3.1: Deep Learning and Neural Network Introduction [[Video]](https://www.youtube.com/watch?v=zYnI4iWRmpc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_1_neural_net.ipynb)
* **Part 3.2: Introduction to Tensorflow and Keras** [[Video]](https://www.youtube.com/watch?v=PsE73jk55cE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_2_keras.ipynb)
* Part 3.3: Saving and Loading a Keras Neural Network [[Video]](https://www.youtube.com/watch?v=-9QfbGM1qGw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_3_save_load.ipynb)
* Part 3.4: Early Stopping in Keras to Prevent Overfitting [[Video]](https://www.youtube.com/watch?v=m1LNunuI2fk&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_4_early_stop.ipynb)
* Part 3.5: Extracting Weights and Manual Calculation [[Video]](https://www.youtube.com/watch?v=7PWgx16kH8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_5_weights.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
```
# Part 3.2: Introduction to Tensorflow and Keras
TensorFlow is an open-source software library for machine learning in various kinds of perceptual and language understanding tasks. It is currently used for both research and production by different teams in many commercial Google products, such as speech recognition, Gmail, Google Photos, and search, many of which had previously used its predecessor DistBelief. TensorFlow was originally developed by the Google Brain team for Google's research and production purposes and later released under the Apache 2.0 open source license on November 9, 2015.
* [TensorFlow Homepage](https://www.tensorflow.org/)
* [TensorFlow GitHib](https://github.com/tensorflow/tensorflow)
* [TensorFlow Google Groups Support](https://groups.google.com/forum/#!forum/tensorflow)
* [TensorFlow Google Groups Developer Discussion](https://groups.google.com/a/tensorflow.org/forum/#!forum/discuss)
* [TensorFlow FAQ](https://www.tensorflow.org/resources/faq)
### What version of TensorFlow do you have?
TensorFlow is very new and changing rapidly. You must run the same version of it that I am using. For this semester, we will use a specific version of TensorFlow (mentioned in the last class notes).
```
import tensorflow as tf
print("Tensor Flow Version: {}".format(tf.__version__))
```
Installing TensorFlow was covered in Module 1.
## Why TensorFlow
* Supported by Google
* Works well on Windows, Linux, and Mac
* Excellent GPU support
* Python is an easy to learn programming language
* Python is extremely popular in the data science community
## Deep Learning Tools
TensorFlow is not the only game in town. The biggest competitor to TensorFlow/Keras is PyTorch. Listed below are some of the deep learning toolkits actively being supported:
* [TensorFlow](https://www.tensorflow.org/) Google's deep learning API. The focus of this class, along with Keras.
* [Keras](https://keras.io/) - Also by Google, higher level framework that allows the use of TensorFlow, MXNet and Theano interchangeably.
* [PyTorch](https://pytorch.org/) - PyTorch is an open source machine learning library based on the Torch library, used for applications such as computer vision and natural language processing. It is primarily developed by Facebook's AI Research lab.
Other deep learning tools:
* [MXNet](https://mxnet.incubator.apache.org/) Apache foundation's deep learning API. Can be used through Keras.
* [Torch](http://torch.ch/) is used by Google DeepMind, the Facebook AI Research Group, IBM, Yandex and the Idiap Research Institute. It has been used for some of the most advanced deep learning projects in the world. However, it requires the [LUA](https://en.wikipedia.org/wiki/Lua_(programming_language)) programming language. It is very advanced, but it is not mainstream. I have not worked with Torch (yet!).
* [PaddlePaddle](https://github.com/baidu/Paddle) - [Baidu](http://www.baidu.com/)'s deep learning API.
* [Deeplearning4J](http://deeplearning4j.org/) - Java based. Supports all major platforms. GPU support in Java!
* [Computational Network Toolkit (CNTK)](https://github.com/Microsoft/CNTK) - Microsoft. Support for Windows/Linux, command line only. Bindings for predictions for C#/Python. GPU support.
* [H2O](http://www.h2o.ai/) - Java based. Supports all major platforms. Limited support for computer vision. No GPU support.
## Using TensorFlow Directly
Most of the time in the course, we will communicate with TensorFlow using Keras [[Cite:franccois2017deep]](https://www.manning.com/books/deep-learning-with-python), which allows you to specify the number of hidden layers and create the neural network. TensorFlow is a low-level mathematics API, similar to [Numpy](http://www.numpy.org/). However, unlike Numpy, TensorFlow is built for deep learning. TensorFlow compiles these compute graphs into highly efficient C++/[CUDA](https://en.wikipedia.org/wiki/CUDA) code.
### TensorFlow Linear Algebra Examples
TensorFlow is a library for linear algebra. Keras is a higher-level abstraction for neural networks that you build upon TensorFlow. In this section, I will demonstrate some basic linear algebra that employs TensorFlow directly and does not make use of Keras. First, we will see how to multiply a row and column matrix.
```
import tensorflow as tf
# Create a Constant op that produces a 1x2 matrix. The op is
# added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
matrix1 = tf.constant([[3., 3.]])
# Create another Constant that produces a 2x1 matrix.
matrix2 = tf.constant([[2.],[2.]])
# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
# The returned value, 'product', represents the result of the matrix
# multiplication.
product = tf.matmul(matrix1, matrix2)
print(product)
print(float(product))
```
This example multiplied two TensorFlow constant tensors. Next, we will see how to subtract a constant from a variable.
```
import tensorflow as tf
x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])
# Add an op to subtract 'a' from 'x'. Run it and print the result
sub = tf.subtract(x, a)
print(sub)
print(sub.numpy())
# ==> [-2. -1.]
```
Of course, variables are only useful if their values can be changed. The program can accomplish this change in value by calling the assign function.
```
x.assign([4.0, 6.0])
```
The program can now perform the subtraction with this new value.
```
sub = tf.subtract(x, a)
print(sub)
print(sub.numpy())
```
In the next section, we will see a TensorFlow example that has nothing to do with neural networks.
### TensorFlow Mandelbrot Set Example
Next, we examine another example where we use TensorFlow directly. To demonstrate that TensorFlow is mathematical and does not only provide neural networks, we will also first use it for a non-machine learning rendering task. The code presented here is capable of rendering a [Mandelbrot set](https://en.wikipedia.org/wiki/Mandelbrot_set). Note, I based this example on a Mandelbrot example that I found [here]( https://chromium.googlesource.com/external/github.com/tensorflow/tensorflow/+/r0.10/tensorflow/g3doc/tutorials/mandelbrot/index.md). I've updated the code slightly to comply with current versions of TensorFlow.
```
# Import libraries for simulation
import tensorflow as tf
import numpy as np
# Imports for visualization
import PIL.Image
from io import BytesIO
from IPython.display import Image, display
def DisplayFractal(a, fmt='jpeg'):
"""Display an array of iteration counts as a
colorful picture of a fractal."""
a_cyclic = (6.28*a/20.0).reshape(list(a.shape)+[1])
img = np.concatenate([10+20*np.cos(a_cyclic),
30+50*np.sin(a_cyclic),
155-80*np.cos(a_cyclic)], 2)
img[a==a.max()] = 0
a = img
a = np.uint8(np.clip(a, 0, 255))
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# Use NumPy to create a 2D array of complex numbers
Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
Z = X+1j*Y
xs = tf.constant(Z.astype(np.complex64))
zs = tf.Variable(xs)
ns = tf.Variable(tf.zeros_like(xs, tf.float32))
# Operation to update the zs and the iteration count.
#
# Note: We keep computing zs after they diverge! This
# is very wasteful! There are better, if a little
# less simple, ways to do this.
#
for i in range(200):
# Compute the new values of z: z^2 + x
zs_ = zs*zs + xs
# Have we diverged with this new value?
not_diverged = tf.abs(zs_) < 4
zs.assign(zs_),
ns.assign_add(tf.cast(not_diverged, tf.float32))
DisplayFractal(ns.numpy())
```
Mandlebrot render is both simple and infinitely complex at the same time. This view shows the entire Mandlebrot universe at the same time, as it is completely zoomed out. However, if you zoom in on any non-black portion of the plot, you will find infinite hidden complexity.
### Introduction to Keras
[Keras](https://keras.io/) is a layer on top of Tensorflow that makes it much easier to create neural networks. Rather than define the graphs, as you see above, you set the individual layers of the network with a much more high-level API. Unless you are performing research into entirely new structures of deep neural networks, it is unlikely that you need to program TensorFlow directly.
**For this class, we will use usually use TensorFlow through Keras, rather than direct TensorFlow**
### Simple TensorFlow Regression: MPG
This example shows how to encode the MPG dataset for regression. This dataset is slightly more complicated than Iris, because:
* Input has both numeric and categorical
* Input has missing values
This example uses functions defined above in this notepad, the "helpful functions". These functions allow you to build the feature vector for a neural network. Consider the following:
* Predictors/Inputs
* Fill any missing inputs with the median for that column. Use **missing_median**.
* Encode textual/categorical values with **encode_text_dummy**.
* Encode numeric values with **encode_numeric_zscore**.
* Output
* Discard rows with missing outputs.
* Encode textual/categorical values with **encode_text_index**.
* Do not encode output numeric values.
* Produce final feature vectors (x) and expected output (y) with **to_xy**.
To encode categorical values that are part of the feature vector, use the functions from above if the categorical value is the target (as was the case with Iris, use the same technique as Iris). The iris technique allows you to decode back to Iris text strings from the predictions.
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
import pandas as pd
import io
import os
import requests
import numpy as np
from sklearn import metrics
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/auto-mpg.csv",
na_values=['NA', '?'])
cars = df['name']
# Handle missing value
df['horsepower'] = df['horsepower'].fillna(df['horsepower'].median())
# Pandas to Numpy
x = df[['cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'year', 'origin']].values
y = df['mpg'].values # regression
# Build the neural network
model = Sequential()
model.add(Dense(25, input_dim=x.shape[1], activation='relu')) # Hidden 1
model.add(Dense(10, activation='relu')) # Hidden 2
model.add(Dense(1)) # Output
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x,y,verbose=2,epochs=100)
```
### Introduction to Neural Network Hyperparameters
If you look at the above code, you will see that the neural network contains four layers. The first layer is the input layer because it contains the **input_dim** parameter that the programmer sets to be the number of inputs that the dataset has. The network needs one input neuron for every column in the data set (including dummy variables).
There are also several hidden layers, with 25 and 10 neurons each. You might be wondering how the programmer chose these numbers. Selecting a hidden neuron structure is one of the most common questions about neural networks. Unfortunately, there is not a right answer. These are hyperparameters. They are settings that can affect neural network performance, yet there are not a clearly defined means of setting them.
In general, more hidden neurons mean more capability to fit complex problems. However, too many neurons can lead to overfitting and lengthy training times. Too few can lead to underfitting the problem and will sacrifice accuracy. Also, how many layers you have is another hyperparameter. In general, more layers allow the neural network to be able to perform more of its feature engineering and data preprocessing. But this also comes at the expense of training times and the risk of overfitting. In general, you will see that neuron counts start larger near the input layer and tend to shrink towards the output layer in a sort of triangular fashion.
Some techniques use machine learning to optimize these values. These will be discussed in [Module 8.3](t81_558_class_08_3_keras_hyperparameters.ipynb).
### Controlling the Amount of Output
The program produces one line of output for each training epoch. You can eliminate this output by setting the verbose setting of the fit command:
* **verbose=0** - No progress output (use with Jupyter if you do not want output)
* **verbose=1** - Display progress bar, does not work well with Jupyter
* **verbose=2** - Summary progress output (use with Jupyter if you want to know the loss at each epoch)
### Regression Prediction
Next, we will perform actual predictions. The program assigns these predictions to the **pred** variable. These are all MPG predictions from the neural network. Notice that this is a 2D array? You can always see the dimensions of what Keras returns by printing out **pred.shape**. Neural networks can return multiple values, so the result is always an array. Here the neural network only returns one value per prediction (there are 398 cars, so 398 predictions). However, a 2D range is needed because the neural network has the potential of returning more than one value.
```
pred = model.predict(x)
print(f"Shape: {pred.shape}")
print(pred[0:10])
```
We would like to see how good these predictions are. We know what the correct MPG is for each car, so we can measure how close the neural network was.
```
# Measure RMSE error. RMSE is common for regression.
score = np.sqrt(metrics.mean_squared_error(pred,y))
print(f"Final score (RMSE): {score}")
```
The number printed above is the average amount that the predictions were above or below the expected output. We can also print out the first ten cars, with predictions and actual MPG.
```
# Sample predictions
for i in range(10):
print(f"{i+1}. Car name: {cars[i]}, MPG: {y[i]}, "
+ "predicted MPG: {pred[i]}")
```
### Simple TensorFlow Classification: Iris
Classification is the process by which a neural network attempts to classify the input into one or more classes. The simplest way of evaluating a classification network is to track the percentage of training set items that were classified incorrectly. We typically score human results in this manner. For example, you might have taken multiple-choice exams in school in which you had to shade in a bubble for choices A, B, C, or D. If you chose the wrong letter on a 10-question exam, you would earn a 90%. In the same way, we can grade computers; however, most classification algorithms do not merely choose A, B, C, or D. Computers typically report a classification as their percent confidence in each class. Figure 3.EXAM shows how a computer and a human might both respond to question number 1 on an exam.
**Figure 3.EXAM: Classification Neural Network Output**

As you can see, the human test taker marked the first question as "B." However, the computer test taker had an 80% (0.8) confidence in "B" and was also somewhat sure with 10% (0.1) on "A." The computer then distributed the remaining points on the other two. In the simplest sense, the machine would get 80% of the score for this question if the correct answer were "B." The computer would get only 5% (0.05) of the points if the correct answer were "D."
What we just saw is a straightforward example of how to perform the Iris classification using TensorFlow. The iris.csv file is used, rather than using the built-in data that many of the Google examples require.
**Make sure that you always run previous code blocks. If you run the code block below, without the code block above, you will get errors**
```
import pandas as pd
import io
import requests
import numpy as np
from sklearn import metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.callbacks import EarlyStopping
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/iris.csv",
na_values=['NA', '?'])
# Convert to numpy - Classification
x = df[['sepal_l', 'sepal_w', 'petal_l', 'petal_w']].values
dummies = pd.get_dummies(df['species']) # Classification
species = dummies.columns
y = dummies.values
# Build neural network
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu')) # Hidden 1
model.add(Dense(25, activation='relu')) # Hidden 2
model.add(Dense(y.shape[1],activation='softmax')) # Output
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(x,y,verbose=2,epochs=100)
# Print out number of species found:
print(species)
```
Now that you have a neural network training, we would like to be able to use it. The following code makes use of our neural network. Exactly like before, we will generate predictions. Notice that three values come back for each of the 150 iris flowers. There were three types of iris (Iris-setosa, Iris-versicolor, and Iris-virginica).
```
pred = model.predict(x)
print(f"Shape: {pred.shape}")
print(pred[0:10])
```
If you would like to turn of scientific notation, the following line can be used:
```
np.set_printoptions(suppress=True)
```
Now we see these values rounded up.
```
print(y[0:10])
```
Usually, the program considers the column with the highest prediction to be the prediction of the neural network. It is easy to convert the predictions to the expected iris species. The argmax function finds the index of the maximum prediction for each row.
```
predict_classes = np.argmax(pred,axis=1)
expected_classes = np.argmax(y,axis=1)
print(f"Predictions: {predict_classes}")
print(f"Expected: {expected_classes}")
```
Of course, it is straightforward to turn these indexes back into iris species. We use the species list that we created earlier.
```
print(species[predict_classes[1:10]])
```
Accuracy might be a more easily understood error metric. It is essentially a test score. For all of the iris predictions, what percent were correct? The downside is it does not consider how confident the neural network was in each prediction.
```
from sklearn.metrics import accuracy_score
correct = accuracy_score(expected_classes,predict_classes)
print(f"Accuracy: {correct}")
```
The code below performs two ad hoc predictions. The first prediction is simply a single iris flower, and the second predicts two iris flowers. Notice that the argmax in the second prediction requires **axis=1**? Since we have a 2D array now, we must specify which axis to take the argmax over. The value **axis=1** specifies we want the max column index for each row.
```
sample_flower = np.array( [[5.0,3.0,4.0,2.0]], dtype=float)
pred = model.predict(sample_flower)
print(pred)
pred = np.argmax(pred)
print(f"Predict that {sample_flower} is: {species[pred]}")
```
You can also predict two sample flowers.
```
sample_flower = np.array( [[5.0,3.0,4.0,2.0],[5.2,3.5,1.5,0.8]], dtype=float)
pred = model.predict(sample_flower)
print(pred)
pred = np.argmax(pred,axis=1)
print(f"Predict that these two flowers {sample_flower} are: {species[pred]}")
```
| github_jupyter |
<center>
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DL0110EN-SkillsNetwork/Template/module%201/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
</center>
<h1>Linear Regression Multiple Outputs</h1>
<h2>Objective</h2><ul><li> How to create a complicated models using pytorch build in functions.</li></ul>
<h2>Table of Contents</h2>
<p>In this lab, you will create a model the PyTroch way. This will help you more complicated models.</p>
<ul>
<li><a href="https://#Makeup_Data">Make Some Data</a></li>
<li><a href="https://#Model_Cost">Create the Model and Cost Function the PyTorch way</a></li>
<li><a href="https://#BGD">Train the Model: Batch Gradient Descent</a></li>
</ul>
<p>Estimated Time Needed: <strong>20 min</strong></p>
<hr>
<h2>Preparation</h2>
We'll need the following libraries:
```
# Import the libraries we need for this lab
from torch import nn,optim
import torch
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from torch.utils.data import Dataset, DataLoader
```
Set the random seed:
```
# Set the random seed to 1.
torch.manual_seed(1)
```
Use this function for plotting:
```
# The function for plotting 2D
def Plot_2D_Plane(model, dataset, n=0):
w1 = model.state_dict()['linear.weight'].numpy()[0][0]
w2 = model.state_dict()['linear.weight'].numpy()[0][1]
b = model.state_dict()['linear.bias'].numpy()
# Data
x1 = data_set.x[:, 0].view(-1, 1).numpy()
x2 = data_set.x[:, 1].view(-1, 1).numpy()
y = data_set.y.numpy()
# Make plane
X, Y = np.meshgrid(np.arange(x1.min(), x1.max(), 0.05), np.arange(x2.min(), x2.max(), 0.05))
yhat = w1 * X + w2 * Y + b
# Plotting
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(x1[:, 0], x2[:, 0], y[:, 0],'ro', label='y') # Scatter plot
ax.plot_surface(X, Y, yhat) # Plane plot
ax.set_xlabel('x1 ')
ax.set_ylabel('x2 ')
ax.set_zlabel('y')
plt.title('estimated plane iteration:' + str(n))
ax.legend()
plt.show()
```
<!--Empty Space for separating topics-->
<h2 id=" #Makeup_Data" > Make Some Data </h2>
Create a dataset class with two-dimensional features:
```
# Create a 2D dataset
class Data2D(Dataset):
# Constructor
def __init__(self):
self.x = torch.zeros(20, 2)
self.x[:, 0] = torch.arange(-1, 1, 0.1)
self.x[:, 1] = torch.arange(-1, 1, 0.1)
self.w = torch.tensor([[1.0], [1.0]])
self.b = 1
self.f = torch.mm(self.x, self.w) + self.b
self.y = self.f + 0.1 * torch.randn((self.x.shape[0],1))
self.len = self.x.shape[0]
# Getter
def __getitem__(self, index):
return self.x[index], self.y[index]
# Get Length
def __len__(self):
return self.len
```
Create a dataset object:
```
# Create the dataset object
data_set = Data2D()
```
<h2 id="Model_Cost">Create the Model, Optimizer, and Total Loss Function (Cost)</h2>
Create a customized linear regression module:
```
# Create a customized linear
class linear_regression(nn.Module):
# Constructor
def __init__(self, input_size, output_size):
super(linear_regression, self).__init__()
self.linear = nn.Linear(input_size, output_size)
# Prediction
def forward(self, x):
yhat = self.linear(x)
return yhat
```
Create a model. Use two features: make the input size 2 and the output size 1:
```
# Create the linear regression model and print the parameters
model = linear_regression(2,1)
print("The parameters: ", list(model.parameters()))
```
Create an optimizer object. Set the learning rate to 0.1. <b>Don't forget to enter the model parameters in the constructor.</b>
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter2/2.6.2paramater_hate.png" width = "100" alt="How the optimizer works" />
```
# Create the optimizer
optimizer = optim.SGD(model.parameters(), lr=0.1)
```
Create the criterion function that calculates the total loss or cost:
```
# Create the cost function
criterion = nn.MSELoss()
```
Create a data loader object. Set the batch_size equal to 2:
```
# Create the data loader
train_loader = DataLoader(dataset=data_set, batch_size=2)
```
<!--Empty Space for separating topics-->
<h2 id="BGD">Train the Model via Mini-Batch Gradient Descent</h2>
Run 100 epochs of Mini-Batch Gradient Descent and store the total loss or cost for every iteration. Remember that this is an approximation of the true total loss or cost:
```
# Train the model
LOSS = []
print("Before Training: ")
Plot_2D_Plane(model, data_set)
epochs = 100
def train_model(epochs):
for epoch in range(epochs):
for x,y in train_loader:
yhat = model(x)
loss = criterion(yhat, y)
LOSS.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_model(epochs)
print("After Training: ")
Plot_2D_Plane(model, data_set, epochs)
# Plot out the Loss and iteration diagram
plt.plot(LOSS)
plt.xlabel("Iterations ")
plt.ylabel("Cost/total loss ")
```
<h3>Practice</h3>
Create a new <code>model1</code>. Train the model with a batch size 30 and learning rate 0.1, store the loss or total cost in a list <code>LOSS1</code>, and plot the results.
```
# Practice create model1. Train the model with batch size 30 and learning rate 0.1, store the loss in a list <code>LOSS1</code>. Plot the results.
data_set = Data2D()
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
train_loader = DataLoader(dataset = data_set, batch_size = 30)
model1 = linear_regression(2, 1)
optimizer = optim.SGD(model1.parameters(), lr = 0.1)
LOSS1 = []
epochs = 100
def train_model(epochs):
for epoch in range(epochs):
for x,y in train_loader:
yhat = model1(x)
loss = criterion(yhat,y)
LOSS1.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_model(epochs)
Plot_2D_Plane(model1 , data_set)
plt.plot(LOSS1)
plt.xlabel("iterations ")
plt.ylabel("Cost/total loss ")
-->
Use the following validation data to calculate the total loss or cost for both models:
```
torch.manual_seed(2)
validation_data = Data2D()
Y = validation_data.y
X = validation_data.x
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
print("total loss or cost for model: ",criterion(model(X),Y))
print("total loss or cost for model: ",criterion(model1(X),Y))
-->
<a href="https://dataplatform.cloud.ibm.com/registration/stepone?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0110ENSkillsNetwork20647811-2021-01-01&context=cpdaas&apps=data_science_experience%2Cwatson_machine_learning"><img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DL0110EN-SkillsNetwork/Template/module%201/images/Watson_Studio.png"/></a>
<!--Empty Space for separating topics-->
<h2>About the Authors:</h2>
<a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0110ENSkillsNetwork20647811-2021-01-01">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0110ENSkillsNetwork20647811-2021-01-01">Michelle Carey</a>, <a href="https://www.linkedin.com/in/jiahui-mavis-zhou-a4537814a?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0110ENSkillsNetwork20647811-2021-01-01">Mavis Zhou</a>
## Change Log
| Date (YYYY-MM-DD) | Version | Changed By | Change Description |
| ----------------- | ------- | ---------- | ----------------------------------------------------------- |
| 2020-09-23 | 2.0 | Shubham | Migrated Lab to Markdown and added to course repo in GitLab |
<hr>
## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
| github_jupyter |
# ROC Curves and AUC results
### Imports
```
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, auc
import pickle
import matplotlib.pyplot as plt
```
### Data import
```
with open('data/y_test.pkl', 'rb') as f:
y_test = pickle.load(f)
with open('data/xgb_pred.pkl', 'rb') as f:
xgb_pred = pickle.load(f)
with open('data/ra_pred.pkl', 'rb') as f:
ra_pred = pickle.load(f)
with open('data/aa_pred.pkl', 'rb') as f:
aa_pred = pickle.load(f)
with open('data/jc_pred.pkl', 'rb') as f:
jc_pred = pickle.load(f)
with open('data/cn_pred.pkl', 'rb') as f:
cn_pred = pickle.load(f)
with open('data/fi.pkl', 'rb') as f:
fi_xgb = pickle.load(f)
```
### Feature importance
#### Features description
1. $CN(u,v) = \mid\Gamma(u)\cap\Gamma(v)\mid$<br><br>
1. $JC(u,v) = \frac{\mid\Gamma(u)\cap\Gamma(v)\mid}{\mid\Gamma(u)\cup\Gamma(v)\mid}$<br><br>
1. $AA(u,v) = \sum\limits_{w \in \Gamma(u)\cap\Gamma(v)}\frac{1}{log(\mid\Gamma(w)\mid)}$<br><br>
1. $RA(u,v) = \sum\limits_{w \in \Gamma(u)\cap\Gamma(v)}\frac{1}{\mid\Gamma(w)\mid}$<br><br>
1. $PA(u,v) = \mid\Gamma(u)\mid \times \mid\Gamma(v)\mid$<br><br>
1. $AR(u,v) = \frac{2(ad-bc)}{(a+b)(b+d)+(a+c)(c+d)}$<br><br>
1. $ND(u,v) = \frac{\mid\Gamma(u)\cap\Gamma(v)\mid}{\sqrt{\mid\Gamma(u)\mid \times \mid\Gamma(v)\mid}}$<br><br>
1. $TN(u,v) = |\Gamma(u)\cup\Gamma(v)|$<br><br>
1. $UD = \mid\Gamma(u)\mid$<br><br>
1. $VD = \mid\Gamma(v)\mid$<br><br>
1. $SC(u,v) =\left\{
\begin{array}{ll}
if \quad u \quad and \quad v \quad \in \quad same \quad community \\
0 \quad otherwise
\end{array}
\right.
$<br><br>
*$\Gamma(u) =$ {neighbours of u}<br><br>
```
import matplotlib.pyplot as plt
from matplotlib import font_manager
class PlotStyle:
PRIMARY = '#293847'
SECONDARY = '#E23B13'
TERTIARY = '#F3F2E0'
COLORS = [PRIMARY, SECONDARY, TERTIARY]
BLUE_1 = '#476079'
BLUE_2 = '#6F93B4'
BLUE_3 = '#92B6CC'
@classmethod
def cdiscount(cls):
"""
Turns on `cdiscount <https://cdiscount.com/>`_ style drawing mode.
This will only have effect on things drawn after this function is
called.
For best results, the "Montserrat" font should be installed: it is
not included with matplotlib.
Notes
-----
This function works by a number of rcParams, so it will probably
override others you have set before.
If you want the effects of this function to be temporary, it can
be used as a context manager, for example::
with plt.cdiscount():
# This figure will be in Cdiscount-style
fig1 = plt.figure()
# ...
# This figure will be in regular style
fig2 = plt.figure()
"""
if plt.rcParams['text.usetex']:
raise RuntimeError(
"cdiscount mode is not compatible with text.usetex = True")
font_manager._rebuild()
cdiscount_ctx = plt.rc_context({
'font.family': ['Montserrat', 'Arial', 'DejaVu Sans'],
'font.serif': ['Montserrat', 'serif', 'DejaVu Serif'],
'font.size': 11.0,
'lines.linewidth': 2.0,
'figure.facecolor': 'white',
'grid.linewidth': 1.0,
'grid.color': '#F0F0F0',
'axes.grid': True,
'axes.unicode_minus': False,
'axes.edgecolor': cls.PRIMARY,
'axes.labelcolor': cls.PRIMARY,
'axes.facecolor': 'white',
'axes.linewidth': 1.5,
'axes.axisbelow': True,
'xtick.major.size': 8,
'xtick.major.width': 3,
'xtick.color': cls.PRIMARY,
'ytick.color': cls.PRIMARY,
'ytick.major.size': 8,
'ytick.major.width': 3,
})
cdiscount_ctx.__enter__()
# In order to make the call to `cdiscount` that does not use a context manager
# (cm) work, we need to enter into the cm ourselves, and return a dummy
# cm that does nothing on entry and cleans up the cdiscount context on exit.
# Additionally, we need to keep a reference to the dummy cm because it
# would otherwise be exited when GC'd.
class dummy_ctx(object):
def __enter__(self):
pass
__exit__ = cdiscount_ctx.__exit__
return dummy_ctx()
importance = fi_xgb[1]
features = fi_xgb[0]
indices = np.argsort(fi_xgb[1])[::-1]
with PlotStyle.cdiscount():
fig=plt.figure(figsize=(13,8))
plt.bar(range(fi_xgb.shape[1]),height=importance[indices], align='center',color='r')
plt.title('Features importance (XGBoost)')
plt.xticks(range(fi_xgb.shape[1]), features[indices])
plt.xlabel('Features')
plt.ylabel('Importance')
plt.ylim((0, 0.35))
fig.savefig('feature_importance_xgb.png')
plt.show()
```
### Accuracy summary
```
fpr_xgb, tpr_xgb, threshold = roc_curve(y_test, xgb_pred[:,1])
roc_auc_xgb = auc(fpr_xgb, tpr_xgb)
fpr_ra, tpr_ra, threshold = roc_curve(y_test, ra_pred)
roc_auc_ra = auc(fpr_ra, tpr_ra)
fpr_aa, tpr_aa, threshold = roc_curve(y_test, aa_pred)
roc_auc_aa = auc(fpr_aa, tpr_aa)
fpr_jc, tpr_jc, threshold = roc_curve(y_test, jc_pred)
roc_auc_jc = auc(fpr_jc, tpr_jc)
fpr_cn, tpr_cn, threshold = roc_curve(y_test, cn_pred)
roc_auc_cn = auc(fpr_cn, tpr_cn)
with PlotStyle.cdiscount():
fig=plt.figure(figsize=(13,8))
plt.title('ROC Curves : XGBoost vs Unsupervised methods')
plt.plot(fpr_xgb, tpr_xgb, 'r', label = 'XGBoost (AUC = %0.2f)' % roc_auc_xgb)
plt.plot(fpr_ra, tpr_ra, 'g', label = 'Resource Allocation (AUC = %0.2f)' % roc_auc_ra)
plt.plot(fpr_aa, tpr_aa, 'xkcd:aqua blue', label = 'Adamic-Adar Index (AUC = %0.2f)' % roc_auc_aa)
plt.plot(fpr_jc, tpr_jc, 'b', label = 'Jaccard Coefficient (AUC = %0.2f)' % roc_auc_jc)
plt.plot(fpr_cn, tpr_cn, 'y', label = 'Common Neighbors (AUC = %0.2f)' % roc_auc_cn)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--', label='Random score (AUC = 0.50)')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc = 'lower right')
fig.savefig('roc_curves.png')
plt.show()
```
| github_jupyter |
```
import tqdm
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.losses import *
from tensorflow.keras.optimizers import *
from tensorflow.keras import backend as K
### PLOT ONE SAMPLE OF IMAGE IN THE DATA ###
original = cv.imread(root_path+'multiclass_segmentation/original77.jpg')
original = cv.resize(original,(224,224))
dress = cv.imread(root_path+'multiclass_segmentation/dress77.jpg')
dress = cv.resize(dress,(224,224))
body = cv.imread(root_path+'multiclass_segmentation/body77.jpg')
body = cv.resize(body,(224,224))
plt.figure(figsize=(16,8))
plt.subplot(1,3,1)
plt.title('Original')
plt.imshow(cv.cvtColor(original, cv.COLOR_BGRA2RGB))
plt.subplot(1,3,2)
plt.title('Person')
plt.imshow(cv.cvtColor(body, cv.COLOR_BGRA2RGB))
plt.subplot(1,3,3)
plt.title('Dress')
plt.imshow(cv.cvtColor(dress, cv.COLOR_BGRA2RGB))
dress = cv.imread(root_path+'multiclass_segmentation/dress77.jpg',0)
body = cv.imread(root_path+'multiclass_segmentation/body77.jpg',0)
### ENCODE DRESS ###
dress[dress == 255] = 0
dress[dress > 0] = 255
dress = cv.resize(dress,(224,224))
### ENCODE BODY ###
body[body == 255] = 0
body[body > 0] = 255
body = cv.resize(body,(224,224))
### ENCODE SKIN ###
skin = body - dress
plt.figure(figsize=(16,8))
plt.subplot(1,3,1)
plt.title('Person/Background')
bg = (255 - body)/255
plt.imshow(bg)
plt.subplot(1,3,2)
plt.title('Skin')
skin = (255 - skin)/255
plt.imshow(skin)
plt.subplot(1,3,3)
plt.title('Dress')
dress = (255 - dress)/255
plt.imshow(dress)
### COMBINE BACKGROUND, SKIN, DRESS ###
gt = np.zeros((224,224,3))
gt[:,:,0] = (1-skin)
gt[:,:,1] = (1-dress)
gt[:,:,2] = bg
plt.figure(figsize=(6,6))
plt.imshow(gt)
### ENCODE BACKGROUND, SKIN, DRESS FOR ALL TRAIN IMAGES ###
images_original = []
images_gt = []
mean = np.zeros((224,224,3))
n_img = 81
for i in tqdm.tqdm(range(1,n_img+1)):
original = cv.imread(root_path+'data/original/original'+str(i)+'.jpg')
original = cv.resize(original,(224,224))
images_original.append(original)
mean[:,:,0]=mean[:,:,0]+original[:,:,0]
mean[:,:,1]=mean[:,:,1]+original[:,:,1]
mean[:,:,2]=mean[:,:,2]+original[:,:,2]
body = cv.imread(root_path+'data/body/body'+str(i)+'.jpg',0)
dress = cv.imread(root_path+'data/dress/dress'+str(i)+'.jpg',0)
dress[dress == 255] = 0
dress[dress > 0] = 255
dress = cv.resize(dress,(224,224))
body[body == 255] = 0
body[body > 0] = 255
body = cv.resize(body,(224,224))
skin = body - dress
bg = (255 - body)/255
skin = (255 - skin)/255
dress = (255 - dress)/255
gt = np.zeros((224,224,3))
gt[:,:,0] = (1-skin)
gt[:,:,1] = (1-dress)
gt[:,:,2] = bg
images_gt.append(gt)
mean = mean / n_img
mean = mean.reshape((-1,224,224,3))
images_original = np.asarray(images_original)
images_gt = np.asarray(images_gt)
def get_unet(mean_pixels, do_rate=0):
inputs = Input((None, None, 3))
preproc = Lambda(lambda x: (x - tf.constant(mean_pixels, dtype=tf.float32)))(inputs)
conv1 = Dropout(do_rate)(Conv2D(32, (3, 3), padding='same', activation='relu')(preproc))
conv1 = Dropout(do_rate)(Conv2D(32, (3, 3), padding='same', activation='relu')(conv1))
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Dropout(do_rate)(Conv2D(64, (3, 3), padding='same', activation='relu')(pool1))
conv2 = Dropout(do_rate)(Conv2D(64, (3, 3), padding='same', activation='relu')(conv2))
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Dropout(do_rate)(Conv2D(128, (3, 3), padding='same', activation='relu')(pool2))
conv3 = Dropout(do_rate)(Conv2D(128, (3, 3), padding='same', activation='relu')(conv3))
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Dropout(do_rate)(Conv2D(256, (3, 3), padding='same', activation='relu')(pool3))
conv4 = Dropout(do_rate)(Conv2D(256, (3, 3), padding='same', activation='relu')(conv4))
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Dropout(do_rate)(Conv2D(512, (3, 3), padding='same', activation='relu')(pool4))
conv5 = Dropout(do_rate)(Conv2D(512, (3, 3), padding='same', activation='relu')(conv5))
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Dropout(do_rate)(Conv2D(256, (3, 3), padding='same', activation='relu')(up6))
conv6 = Dropout(do_rate)(Conv2D(256, (3, 3), padding='same', activation='relu')(conv6))
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Dropout(do_rate)(Conv2D(128, (3, 3), padding='same', activation='relu')(up7))
conv7 = Dropout(do_rate)(Conv2D(128, (3, 3), padding='same', activation='relu')(conv7))
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Dropout(do_rate)(Conv2D(64, (3, 3), padding='same', activation='relu')(up8))
conv8 = Dropout(do_rate)(Conv2D(64, (3, 3), padding='same', activation='relu')(conv8))
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Dropout(do_rate)(Conv2D(32, (3, 3), padding='same', activation='relu')(up9))
conv9 = Dropout(do_rate)(Conv2D(32, (3, 3), padding='same', activation='relu')(conv9))
conv10 = Conv2D(3, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=1e-3), loss='binary_crossentropy', metrics=['accuracy'])
return model
model = get_unet(mean)
model.fit(images_original, images_gt, epochs=120)
### SAVE TRAINED MODEL ###
model.save(root_path+'trained_models/fashion_unet.h5')
```
| github_jupyter |
# Optimization Methods
Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
<img src="images/cost.jpg" style="width:650px;height:300px;">
<caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
**Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
To get started, run the following code to import the libraries you will need.
### <font color='darkblue'> Updates to Assignment <font>
#### If you were working on a previous version
* The current notebook filename is version "Optimization_methods_v1b".
* You can find your work in the file directory as version "Optimization methods'.
* To see the file directory, click on the Coursera logo at the top left of the notebook.
#### List of Updates
* op_utils is now opt_utils_v1a. Assertion statement in `initialize_parameters` is fixed.
* opt_utils_v1a: `compute_cost` function now accumulates total cost of the batch without taking the average (average is taken for entire epoch instead).
* In `model` function, the total cost per mini-batch is accumulated, and the average of the entire epoch is taken as the average cost. So the plot of the cost function over time is now a smooth downward curve instead of an oscillating curve.
* Print statements used to check each function are reformatted, and 'expected output` is reformatted to match the format of the print statements (for easier visual comparisons).
```
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
```
## 1 - Gradient Descent
A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
**Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
$$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads['dW' + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads['db' + str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
```
**Expected Output**:
```
W1 =
[[ 1.63535156 -0.62320365 -0.53718766]
[-1.07799357 0.85639907 -2.29470142]]
b1 =
[[ 1.74604067]
[-0.75184921]]
W2 =
[[ 0.32171798 -0.25467393 1.46902454]
[-2.05617317 -0.31554548 -0.3756023 ]
[ 1.1404819 -1.09976462 -0.1612551 ]]
b2 =
[[-0.88020257]
[ 0.02561572]
[ 0.57539477]]
```
A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
- **(Batch) Gradient Descent**:
``` python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
# Forward propagation
a, caches = forward_propagation(X, parameters)
# Compute cost.
cost += compute_cost(a, Y)
# Backward propagation.
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
- **Stochastic Gradient Descent**:
```python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
for j in range(0, m):
# Forward propagation
a, caches = forward_propagation(X[:,j], parameters)
# Compute cost
cost += compute_cost(a, Y[:,j])
# Backward propagation
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
<img src="images/kiank_sgd.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
**Note** also that implementing SGD requires 3 for-loops in total:
1. Over the number of iterations
2. Over the $m$ training examples
3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
<img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
<font color='blue'>
**What you should remember**:
- The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
- You have to tune a learning rate hyperparameter $\alpha$.
- With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
## 2 - Mini-Batch Gradient descent
Let's learn how to build mini-batches from the training set (X, Y).
There are two steps:
- **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
<img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
- **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
<img src="images/kiank_partition.png" style="width:550px;height:300px;">
**Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
```python
first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
...
```
Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
```
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]
mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td > **shape of the 1st mini_batch_X** </td>
<td > (12288, 64) </td>
</tr>
<tr>
<td > **shape of the 2nd mini_batch_X** </td>
<td > (12288, 64) </td>
</tr>
<tr>
<td > **shape of the 3rd mini_batch_X** </td>
<td > (12288, 20) </td>
</tr>
<tr>
<td > **shape of the 1st mini_batch_Y** </td>
<td > (1, 64) </td>
</tr>
<tr>
<td > **shape of the 2nd mini_batch_Y** </td>
<td > (1, 64) </td>
</tr>
<tr>
<td > **shape of the 3rd mini_batch_Y** </td>
<td > (1, 20) </td>
</tr>
<tr>
<td > **mini batch sanity check** </td>
<td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
</tr>
</table>
<font color='blue'>
**What you should remember**:
- Shuffling and Partitioning are the two steps required to build mini-batches
- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
## 3 - Momentum
Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
<img src="images/opt_momentum.png" style="width:400px;height:250px;">
<caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
**Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
for $l =1,...,L$:
```python
v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
```
**Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
```
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)])
v["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)])
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
```
**Expected Output**:
```
v["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db1"] =
[[ 0.]
[ 0.]]
v["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
```
**Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
$$ \begin{cases}
v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
\end{cases}\tag{3}$$
$$\begin{cases}
v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
\end{cases}\tag{4}$$
where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = beta*v["dW" + str(l+1)] + (1-beta)*grads['dW' + str(l+1)]
v["db" + str(l+1)] = beta*v["db" + str(l+1)] + (1-beta)*grads['db' + str(l+1)]
# update parameters
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*v["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*v["db" + str(l+1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
```
**Expected Output**:
```
W1 =
[[ 1.62544598 -0.61290114 -0.52907334]
[-1.07347112 0.86450677 -2.30085497]]
b1 =
[[ 1.74493465]
[-0.76027113]]
W2 =
[[ 0.31930698 -0.24990073 1.4627996 ]
[-2.05974396 -0.32173003 -0.38320915]
[ 1.13444069 -1.0998786 -0.1713109 ]]
b2 =
[[-0.87809283]
[ 0.04055394]
[ 0.58207317]]
v["dW1"] =
[[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]]
v["db1"] =
[[-0.01228902]
[-0.09357694]]
v["dW2"] =
[[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]]
v["db2"] = v[[ 0.02344157]
[ 0.16598022]
[ 0.07420442]]
```
**Note** that:
- The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
- If $\beta = 0$, then this just becomes standard gradient descent without momentum.
**How do you choose $\beta$?**
- The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much.
- Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
- Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$.
<font color='blue'>
**What you should remember**:
- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
- You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
## 4 - Adam
Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
**How does Adam work?**
1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
3. It updates parameters in a direction based on combining information from "1" and "2".
The update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
\end{cases}$$
where:
- t counts the number of steps taken of Adam
- L is the number of layers
- $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
- $\alpha$ is the learning rate
- $\varepsilon$ is a very small number to avoid dividing by zero
As usual, we will store all parameters in the `parameters` dictionary
**Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.
**Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
for $l = 1, ..., L$:
```python
v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
```
```
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)])
v["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)])
s["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)])
s["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)])
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
```
**Expected Output**:
```
v["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db1"] =
[[ 0.]
[ 0.]]
v["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
s["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
s["db1"] =
[[ 0.]
[ 0.]]
s["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
s["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
```
**Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\
v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\
s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\
s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon}
\end{cases}$$
**Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1*v["dW" + str(l+1)] + (1-beta1)*grads['dW' + str(l+1)]
v["db" + str(l+1)] = beta1*v["db" + str(l+1)] + (1-beta1)*grads['db' + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)]/(1-beta1**t)
v_corrected["db" + str(l+1)] = v["db" + str(l+1)]/(1-beta1**t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2*s["dW" + str(l+1)] + + (1-beta2)*(grads['dW' + str(l+1)]**2)
s["db" + str(l+1)] = beta2*s["db" + str(l+1)] + + (1-beta2)*(grads['db' + str(l+1)]**2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)]/(1-beta2**t)
s_corrected["db" + str(l+1)] = s["db" + str(l+1)]/(1-beta2**t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - (learning_rate*v_corrected["dW" + str(l+1)])/(np.sqrt(s_corrected["dW" + str(l+1)])+epsilon)
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - (learning_rate*v_corrected["db" + str(l+1)])/(np.sqrt(s_corrected["db" + str(l+1)])+epsilon)
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
```
**Expected Output**:
```
W1 =
[[ 1.63178673 -0.61919778 -0.53561312]
[-1.08040999 0.85796626 -2.29409733]]
b1 =
[[ 1.75225313]
[-0.75376553]]
W2 =
[[ 0.32648046 -0.25681174 1.46954931]
[-2.05269934 -0.31497584 -0.37661299]
[ 1.14121081 -1.09245036 -0.16498684]]
b2 =
[[-0.88529978]
[ 0.03477238]
[ 0.57537385]]
v["dW1"] =
[[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]]
v["db1"] =
[[-0.01228902]
[-0.09357694]]
v["dW2"] =
[[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]]
v["db2"] =
[[ 0.02344157]
[ 0.16598022]
[ 0.07420442]]
s["dW1"] =
[[ 0.00121136 0.00131039 0.00081287]
[ 0.0002525 0.00081154 0.00046748]]
s["db1"] =
[[ 1.51020075e-05]
[ 8.75664434e-04]]
s["dW2"] =
[[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
[ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
[ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]
s["db2"] =
[[ 5.49507194e-05]
[ 2.75494327e-03]
[ 5.50629536e-04]]
```
You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
## 5 - Model with different optimization algorithms
Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
```
train_X, train_Y = load_dataset()
```
We have already implemented a 3-layer neural network. You will train it with:
- Mini-batch **Gradient Descent**: it will call your function:
- `update_parameters_with_gd()`
- Mini-batch **Momentum**: it will call your functions:
- `initialize_velocity()` and `update_parameters_with_momentum()`
- Mini-batch **Adam**: it will call your functions:
- `initialize_adam()` and `update_parameters_with_adam()`
```
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
```
You will now run this 3 layer neural network with each of the 3 optimization methods.
### 5.1 - Mini-batch Gradient descent
Run the following code to see how the model does with mini-batch gradient descent.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.2 - Mini-batch gradient descent with momentum
Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.3 - Mini-batch with Adam mode
Run the following code to see how the model does with Adam.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.4 - Summary
<table>
<tr>
<td>
**optimization method**
</td>
<td>
**accuracy**
</td>
<td>
**cost shape**
</td>
</tr>
<td>
Gradient descent
</td>
<td>
79.7%
</td>
<td>
oscillations
</td>
<tr>
<td>
Momentum
</td>
<td>
79.7%
</td>
<td>
oscillations
</td>
</tr>
<tr>
<td>
Adam
</td>
<td>
94%
</td>
<td>
smoother
</td>
</tr>
</table>
Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.
Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
Some advantages of Adam include:
- Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
- Usually works well even with little tuning of hyperparameters (except $\alpha$)
**References**:
- Adam paper: https://arxiv.org/pdf/1412.6980.pdf
| github_jupyter |
# Create and edit synsets
This demo shows how to create and edit synsets & relations.
A synset is the building block of a WordNet is the synset that expresses a unique concept. The synset (a synonym set) contains, as the name implies, a number of synonym words known as literals. The synset has more properties like a definition and links to other synsets. They also have a part-of-speech (pos) that groups them in four categories: nouns, verbs, adverbs and adjectives. Synsets are interlinked by semantic relations like hypernymy ("is-a"), meronymy ("is-part"), antonymy, and others.
Let's start by creating a synset.
```
from rowordnet import Synset
id = "my_id"
synset = Synset(id)
```
Print the synset.
```
print(synset)
```
Each synset belongs to a part of speech: NOUN, VERB, ADVERB, ADJECTIVE. We can set it by using the property ``pos`` and provide a member of the enumeration Pos.
```
# set a pos of type verb
pos = Synset.Pos.VERB
synset.pos = pos
```
As we modified the ``pos`` property, we can modify any property by simply accesing it.
As you can see, this synset has no literals. A literal is a word(synonym) that belongs to this synset. We can add a new literal with the ``add_literal()`` function.
```
literal = "tigru"
synset.add_literal(literal)
print("Number of literals for synset with id '{}': {}".format(synset.id, len(synset.literals)))
```
We can also remove the previous literal from the synset.
```
synset.remove_literal(literal)
print("Number of literals for synset with id '{}': {}".format(synset.id, len(synset.literals)))
```
To generate an id, it's recomanded to use the already build in function ``generate_id`` from RoWordNet class. You can use the default suffix and prefix, or you can provide your own.
```
import rowordnet
# generate a new id with default prefix and suffix
wn = rowordnet.RoWordNet()
id = wn.generate_synset_id()
print("New id '{}' generated with default prefix 'ENG30-' and suffix '-n'".format(id))
# generate a new id with custom prefix and suffix
prefix = 'ENG31-'
suffix = '-v'
new_id = wn.generate_synset_id(prefix=prefix, suffix=suffix)
print("New id '{}' generated with prefix '{}' and suffix '{}'".format(new_id, prefix, suffix))
# create a synset with previous id
synset = Synset(id)
```
Add the previous synset to the wordnet.
```
wn.add_synset(synset)
```
Whenever you want to modify the literals of a synset inside the wordnet you have to use the function ``reindex_literals``, so the literals have a correct internal representation. The following cell will contain functions from the 'Basic ops with a wordnet' tutorial. If you find it difficult to understand, try to read it and come back afterwards.
```
# create a new literal
literal = 'iepure'
sense = '1'
# get a synset
synset_id = wn.synsets()[0]
synset = wn(synset_id)
# add a new literal to wordnet
synset.add_literal(literal, sense)
# tell the rowordnet that synsets's literals have been changed.
wn.reindex_literals()
print("Added literal with literal '{}' and sense '{}' to the synset '{}'. "
"Number of synsets containing literal '{}': {}"
.format(literal, sense, synset.id, literal, len(wn.synsets(literal))))
# remove the previous literal from synset.
synset.remove_literal(literal)
# again, we have to tell the rowordnet that synset's literals have been changed.
wn.reindex_literals()
print("Removed literal with literal '{}' from the synset '{}'. Number of synsets containing literal '{}': {}"
.format(literal, synset.id, literal, len(wn.synsets(literal))))
```
Synsets are linked by relations, encoded as directed edges in a graph. We can add/remove relations between synsets by using ``add_relation`` and ``remove_relation`` functions. You have to provinde, as parameters, the ids of the synsets and the type of relation between them.
```
# generate a new synset
prefix = 'ENG31-'
suffix = '-n'
new_id = wn.generate_synset_id(prefix, suffix)
new_synset = Synset(new_id)
wn.add_synset(new_synset)
print("Added new synset with id '{}' to the rowordnet".format(new_synset.id))
# add a relation of type 'hypernym' from 'synset' to 'new_synset'
relation = 'hypernym'
wn.add_relation(synset.id, new_synset.id, relation)
print("Added '{}' relation from synset with id '{}' to synset with id '{}'"
.format(relation, synset.id, new_synset.id))
# remove relation of type 'hypernym' from 'synset' to 'new_synset'
wn.remove_relation(synset.id, new_synset.id)
print("Removed relation from synset with id '{}' to synset with id '{}'".format(synset.id, new_synset.id))
```
| github_jupyter |
# Steady-State Growth and Optimal Resource Allocation
Griffin Chure - January 26, 2021
```
import numpy as np
import pandas as pd
import altair as alt
import sympy as sp
import panel as pn
import param
import scipy.stats
import diaux.viz
pn.extension('vega')
colors, palette = diaux.viz.altair_style()
```
In this notebook, I derive and explore a simple model which describes steady-state balanced bacterial growth on a single nutrient (i.e. carbon) source.
## Protein Synthesis
We begin by considering the influence of peptide bond formation (translation or *protein syntehsis*) as the most resource-intensive process involved in bacterial growth. In balanced, exponential steady-state, the cellular composition at any point in remains the same with the synthesis of new proteins being balanced by dilution via cell division. In this mode of growth, the total protein mass of the cell $M$ grows linearly with the cellularl growth rate $\lambda$,
$$
\frac{dM}{dt} = \lambda M. \tag{1}
$$
Assuming we have an unlimited pool of precursors (meaning, tRNAs charged with amino acids) at our disposal, we can write Equation 1 in terms of the total number of translating ribosomes ribosomes $N_R^{(active)}$ as
$$
\frac{dM}{dt} = N_R^{(active)} k_R, \tag{2}
$$
where the rate $k_R$ is the effective translation rate per active ribosome with dimensions of [MT$^{-1}$]. Here, we cast this in terms of *actively translating ribosomes*, but that does not reflect the *total* number of ribosomes. There will always exist some minimum number of ribosomes $N_R^\text{(inactive)}$ that are not actively translating. Together, the sum of the actively translating and inactive ribosome pools yields the total ribosome copy number,
$$
N_R = N_R^\text{(active)} + N_R^\text{(inactive)}. \tag{3}
$$
To speak in terms of total ribosome copy number $N_R$, we can restate Equation 2 using the conservation of mass in Equation 3 to yield
$$
\frac{dM}{dt} = (N_R - N_R^\text{(inactive)}) k_R. \tag{4}.
$$
By Equation 1, we can relate this to the growth rate $\lambda$ by simple rearrangement to yield
$$
\lambda = \frac{1}{M}\frac{dM}{dt} = \frac{\left(N_R - N_R^\text{(inactive)}\right) k_R}{M}. \tag{4}
$$
Rather than working in terms of ribosome copy numbers (which can be difficult to measure), we can define the fraction of the entire protein mass that is ribosomal $\phi_R$, defined as
$$
\phi_R = \frac{M_R}{M}, \tag{5}
$$
where $M_R$ is the the mass of *all* ribosomes in the cell. We state that each ribosome has a proteinaceous mass of $m_R$, the total ribosomal mass $M_R$ can be calculated as $M_R = m_R N_R$. Thus, by computing the mass fraction, we can resetate Equation 5 as
$$
\lambda = \frac{\left(M_R - M_R^\text{(inactive)}\right)}{m_R}\frac{k_R}{M} = \frac{k_R}{m_R}\left(\phi_R - \phi_R^\text{(inactive)}\right) = \gamma\left(\phi_R - \phi_R^\text{(inactive)}\right), \tag{6}
$$
where we have introduced the term $\gamma = \frac{k_R}{m_R}$. This term, commonly referred to as the *translational efficiency* is the ratio of the average translation rate $k_R$ and the mass of a single ribosome $m_R$ and thus has dimensions of [T$^{-1}$]. This term capture what fraction of a ribosomes mass synthesized per unit time, or alternatively, the inverse $\frac{1}{\gamma}$ is the length of time it takes to synthesize one ribosomes worth of peptide mass. For *E. coli*, a sisngle ribosome consisits of ≈ 7500 amino acids and has a maximal translation rate of ≈ 17 amino acids / second, yielding
$$
\gamma = {k_R}{m_R} \approx \frac{17\,\text{amino acids / s}}{7500\,\text{amino acids}} \approx 2.2 \times 10^{-3} \text{ribosomes / sec}; \frac{1}{\gamma} \approx 400 \frac{\text{s}}{\text{ribosome}}.
$$.
Equation 6 provides a means to calculate the cellular growth rate so long as one knows the translational efficiency $\gamma$ and the proteome mass fraction $\phi_R$. Alternatively, knowing the growth rate $\lambda$ and having measured the proteome mass fraction $\phi_R$, one can easily infer the inactive ribosomal mass fraction and the translational efficiency $\gamma$ by performing a simple linear regression of
$$
\phi_R = \phi_R^\text{(inactive)} + \frac{\lambda}{\gamma}. \tag{7}
$$
Below, we load data from a variety of proteomic and other studies where the ribosomal mass fraction (either by RNA/protein ratio or via proteomics) and the growth rate were measured and perform this simple inference.
```
# Load the data
mass_fraction = pd.read_csv('../../data/other/mass_fraction_compiled.csv')
# Do the simple inference of the parameters
popt = scipy.stats.linregress(mass_fraction['growth_rate_hr'], mass_fraction['mass_fraction'])
# Compute the trendline
growth_rate = np.linspace(0, 2, 100)
trend = popt[1] + popt[0] * growth_rate
fit_df = pd.DataFrame(np.array([growth_rate, trend]).T,
columns=['growth_rate_hr', 'mass_fraction'])
fit = alt.Chart(fit_df).mark_line(color=colors['black'], size=2).encode(
x=alt.X(field='growth_rate_hr', type='quantitative', title='growth rate [hr\u207b\u00B9]'),
y=alt.Y(field='mass_fraction', type='quantitative', title='ribosomal mass fraction'))
# Set up the plot
base = alt.Chart(mass_fraction).encode(
x=alt.X(field='growth_rate_hr', type='quantitative', title='growth rate [hr\u207B\u00B9]'),
y=alt.Y(field='mass_fraction', type='quantitative', title='ribosomal mass fraction'),
color=alt.Color(field='source', type='nominal', title='data source'),
shape=alt.Shape(field='source', type='nominal'))
points = base.mark_point(size=100,opacity=0.75)
print(f"""
Estimated Parameters
---------------------
γ ≈ {1/popt[0]:0.0f} hr^-1
Φ_R (inactive) = {popt[1]:0.2f}
""")
(points + fit)
```
The linear relation given in Equation 7, and subsequent parameteter estimates, provide some more context for what the parameter $\phi_R$ $^\text{(inactive)}$ means. When the growth rate is 0, protein synthesis is no longer proceeding meaning that *all* ribosomes in the cell are inactive. Thus, this implies that the parameter $\phi_R^\text{(inactive)}$ represents the *minimum* fraction of the proteome that can be occupied by ribosomal mass. Thus, going forward, we will make the definition
$$
\phi_R^\text{(inactive)} \equiv \phi_R^\text{(min)}. \tag{8}
$$
such that the meaning of $\phi_R^\text{(min)}$ is clear.
## Nutrient Transport and General Metabolism
Thus far we have only considered the synthesis of proteis as the rate-limiting step of growth (sometimes referred to as *translation limited growth*). In doing so, we have made the assumption *a priori* that the concentration of precursors (think amino acids or charged tRNA) are in sufficient abundance. Here, we extend the model to consider the transport or synthesis of nutrients, which we will approximate as being a type of amino acid.
Consider some amino acid $a$ (not specificially *alanine*), whose total mass in the cell is $M_a$. As the cell grows, will need to transport or synthesize at some flux $J_a$ to at least match the rate at which it is being consumed. The dynamics of this process can be written as
$$
\frac{dM_a}{dt} = J_a - \beta \frac{dM}{dt}, \tag{9}
$$
where $\beta$, defined on the range [0, 1], corresponds to the frequency at which $a$ is being incorporated into the new cell mass. For example, if we consider that all 20 amino acids are used in equal measure, the parameter $\beta = \frac{1}{20}$ to indicate that one out of every 20 amino acids is the nutrient $a$. In addition, we can stipulate that there must be a residual pool of mass $M_a^\text{(residual)}$ such that the kinetics of the translation isn't slowed. Noting that rather than working in the space of absolute mass $M_a$, we can again work in the language of mass fraction by defining
$$
\theta_a = \frac{M_a}{M}, \tag{10}
$$
where we have chosen $\theta$ as to note that it is possible for $\theta > 1$ where as it is not the case for $\phi$. Thus, to impose the limit that there must be a residual concentration of $a$ at a given mass fraction $\theta_a$, we can amend Equation 9 to read
$$
\frac{dM_a}{dt} = J_a - \beta \frac{dM}{dt} - \theta_a \frac{dM}{dt} = J_a - \left( \beta + \theta_a\right)\frac{dM}{dt}. \tag{11}
$$
We can express the entire dynamics enumerated in Equation 11 in terms of the nutrient mass fraction $\theta_a$ by dividing Equation 11 by $M$, yielding
$$
\frac{d\theta_a}{dt} = \frac{J_a}{M} - \frac{\beta + \theta_a}{M}\frac{dM}{dt}. \tag{12}
$$
Using the relation given in Equation 1, we can state Equation 12 in terms of the growth rate as
$$
\frac{d\theta_a}{dt} = \frac{J_a}{M} - \lambda\left(\beta + \theta_a\right). \tag{13}
$$
In steady-state, the mass fraction of the nutrient $\theta_a$ is constant, meaning that Equation 13 is equal to zero. Therefore, we can state that
$$
\lambda = \frac{J_a}{M\left(\beta + \theta_a\right)}. \tag{14}
$$
The flux or synthesis parameter $J_a$ reflects the concerted action of an array of metabolic proteins involving enzymatic pathways and transporters. We can abstract this complicated process by stating that all of the metabolic proteins involved in this synthesis and transport have a combined mass $M_A$, proceeding with a generalized rate $k_A$ which has units of nutrient mass produced/transported per unit mass of metabolic protein. This allows us to define the flux as
$$
J_a = k_P M_P. \tag{15}
$$
Plugging Equation 15 into Equation 14 yields a complete expression for the growth rate,
$$
\lambda = \frac{k_P}{\beta + \theta_a} \frac{M_P}{M} = \frac{k_P}{\beta + \theta_a} \phi_P. \tag{16}
$$
where $\phi_P$ is the mass fraction of the proteome occupied by metabolic proteins. In our section on *Protein Synthesis* we noted that there existed a maximum and minimum fraction fo teh total cellular proteome that can be occupied by ribosomes, $\phi_R^\text{(min)}$ and $\phi_R^\text{(max)}$. In order for the ribosomal mass fraction of the proteome to be able to change, there must be a comensurate change in the mass fraction in the rest of the proteome. We can state this mathematically by noting that the mass of the proteome must be partitioned between metabolic and ribosomal proteins,
$$
\phi_P + \phi_R = \phi_R^\text{(max)}. \tag{17}
$$
Thus, as $\phi_P$ gets smaller and smaller, the fraction of the proteome occupied by the ribosomes grows larger. Using this simple constraint, we can define expression for the growth rate given by Equation 16 in terms of the ribosomal mass fraction as
$$
\lambda = \frac{k_P}{\beta + \theta_a} \left(\phi_R^\text{(max)} - \phi_R\right). \tag{18}
$$
In standard physiological conditions, the standing mass fraction pool of amino acids is small compared to the number being incorporated into protein at any point in time. In this regime, we can make the mathematical approximation that
$$
\beta + \theta_a \approx \beta, \tag{19}
$$
allowing us to simplify Equation 18 to
$$
\lambda = \nu\left(\phi_R^\text{(max)} - \phi_R\right), \tag{20}
$$
where we have introduced $\nu = \frac{k_P}{\beta}$ which defines the synthesis/transport rate of nutrients to their frequency of usage. As we did in the previous section, we can rewrite Equation 20 to define the ribosomal mass fraction $\phi_R$ as a function of the growth rate as
$$
\phi_R = \phi_R^\text{(max)} - \frac{\lambda}{\nu}. \tag{21}
$$
Equation 21 immediately illustratese that, for a fixed value of $\nu$, the ribosomal mass fraction decreases relative as the growth rate increases. In their seminal 2010 work, Matthew Scott and company decreased the growth rate of *E. coli* in a single growth medium by titrating in different amounts of chloramphenicol, an antibiotic which reversibly inactivates ribosomes. Thus, as $\nu$ is dictated by the identity of the growth medium, adding chloramphenicol acts as a means to decrease $\lambda$, theoretically *increasing* the ribosomal mass fraction. Below we plot these data and estimate the relevant parameters $\nu$ and $\phi_R^\text{(max)}$ for each minimal growth medium.
```
# Load data for Scott et al 2010, Fig 2AC
scott_data = pd.read_csv('../../data/other/Scott2010_chlor_inhibition_minimal.csv')
scott_data['mass_fraction'] = scott_data['RNA_protein_ratio'] * 0.4855 # Converts RNA/Protein to ribosome mass fraction
# Set up the plot of the points
points = alt.Chart(scott_data).mark_point(size=80).encode(
x=alt.X(field='growth_rate_hr', type='quantitative', title='growth rate [hr\u207b\u00b9]'),
y=alt.Y(field='mass_fraction', type='quantitative', title='ribosomal mass fraction',
scale=alt.Scale(domain=[0, 0.4])),
color=alt.Color(field='medium', type='nominal',
title='growth medium'),
shape=alt.Shape(field='chloramphenicol_conc_uM', type='nominal', title='chloramphenicol [µM]'))
# For each growth medium, do the fit.
fit_dfs = []
growth_rate = np.linspace(0, 1, 100)
print("""
Estimated Parameters
--------------------
""")
for g, d in scott_data.groupby(['medium']):
popt = scipy.stats.linregress(d['growth_rate_hr'],d['mass_fraction'])
print(f'Growth medium {g}: Φ_R max ≈ {popt[1]:0.2f}; ν ≈ {-1 * popt[0]**-1:0.1f}')
_df = pd.DataFrame([])
_df['growth_rate_hr'] = growth_rate
_df['mass_fraction'] = popt[1] + popt[0] * growth_rate
_df['medium'] = g
fit_dfs.append(_df)
fit_df = pd.concat(fit_dfs, sort=False)
fit_df = fit_df[fit_df['mass_fraction']>=0]
# Generate the plot with the fits.
fit = alt.Chart(fit_df).mark_line(size=2).encode(
x=alt.X(field='growth_rate_hr', type='quantitative', title='growth rate [hr\u207b\u00b9]'),
y=alt.Y(field='mass_fraction', type='quantitative', title='ribosomal mass fraction'),
color=alt.Color(field='medium', type='nominal', title='growth medium')
)
(points + fit)
```
From this inference, we can come to the conclusion that the maximum ribosomal mass fraction $\phi_R^\text{(max)}$ is largely independent of the growth medium and is around $\approx$ 0.3 - 0.4. The nutritional capacity $\nu$ is (as expected), dependent on the growth medium.
We now have in hand two "growth laws" for exponential steady-state growth. Using Equations 7 and 20, we can enumerate a mathematical expression for the growth rate as a function of $\nu$, $\gamma$, $\phi_R^\text{max}$ and $\phi_R^\text{min}$ as
$$
\lambda = \frac{\phi_R^\text{(max)} - \phi_R^\text{(min)}}{\frac{1}{\nu} + \frac{1}{\gamma}}. \tag{22}
$$
```
# Define the parameter ranges.
nu_range = np.round(np.linspace(1, 5, 20), decimals=2)
gamma_range = np.linspace(20, 1, 20)
# Define the maximum and minimum bounds of the ribosomal mass fraction
phi_R_max = 0.35
phi_R_min = 0.04
# Compute the grid.
nu_mesh, gamma_mesh = np.meshgrid(nu_range, gamma_range)
growth_rate = (phi_R_max - phi_R_min) / (nu_mesh**-1 + gamma_mesh**-1)
# Assemble teh dataframe
df = pd.DataFrame({'nu': nu_mesh.ravel(),
'gamma': gamma_mesh.ravel(),
'growth_rate_hr': growth_rate.ravel()})
# Plot the heatmap
heatmap = alt.Chart(df).mark_rect().encode(
x=alt.X(field='nu', type='ordinal', title='nutritional capacity; ν'),
y=alt.Y(field='gamma', type='ordinal', title='translational capacity; γ'),
color=alt.Color(field='growth_rate_hr', type='quantitative',
title='growth rate; λ [hr\u207b\u00b9]', scale=alt.Scale(scheme='viridis'))
)
heatmap.interactive()
```
The above heatmap shows that growth rate monotonically increases as $\nu$ and $\gamma$ get larger. However, this assumes that for any combination of $\gamma$ and $\nu$, their values are fixed and independent of the nutrients in the growth medium, a strict assumption we relax below.
## Michaelis-Menten Kinetics of $\gamma$ and $\nu$
Up to this point, we have considered the translational capacity $\gamma$ and the nutritional capacity $\nu$ to be constants or at least independent of the nutrient concentration $\theta_0$ in the growth medium (as made by the approximation Equation 19). However, it is not unreasonable to assume that these parameters will be dependent on the nutrient concentrations in the cell. Take for example the translational capacity $\gamma$ which is defined as a function of the effective translation rate $k_t$. To proceed at its maximal rate of $k_t \approx 17.1\,$ amino acids / s, charged tRNAs (read, nutrients) need to be in a sufficient excess such that it is the actual enzymatic reaction of forming new peptide bonds that is limiting and not finding the correct tRNA to match the codon. However, as the concentration of charged tRNAs drop, the opposite can be true where the kinetics is determined by the association with the correct tRNA.
Another way to think about this is that the translational capacity is tuned such that the formation of new peptide bonds (consumption of nutrients) does not outpace the synthesis of new charged tRNAs (production of nutrients). In similar situations, it's not unreasonable to model the concentration-dependent rate as following Michaelis-Menten kinetics, mathematized as
$$
\gamma(\theta_a) = \gamma^\text{(max)} \frac{\theta_a}{\theta_a + \theta_{0}} = \frac{\gamma^\text{(max)}}{1 + \frac{\theta_{0}}{\theta_a}}, \tag{23}
$$
where $\theta_{0}$ is the Michaelis-Menten constant and represents the mass fraction of nutrients where the translational capacity is half maximal. Equation 23 states that so long as the mass fraction of nutrients $\theta_a >> \theta_0$, the translational capacity will remain high and growth will be rapid. However, as $\theta_a << \theta_0$, $\gamma$ plummets, slowing translation as to not exhaust nutrients and leave ribosomes stalled, unable to synthesize new proteins.
In principle, we can take a similar approach to modeling the nutrient dependence on the nutritional capacity $\nu$. If nutrient conditions are high $\theta_a >> \theta_0$, then it is not optimal to devote a large swath of these resources towards making metabolic machinery. Rather, devoting more resources to making more ribosomes would lead to faster growth. Thus, it's reasonalbe to assume that there is a negative dependence of the nutritional capacity $\nu$ on the nutrient mass fraction $\theta_a$, such that $\nu$ is maximal when $\theta_a << \theta_0$,
$$
\nu(\theta_a) = \frac{\nu^\text{(max)}}{1 + \frac{\theta_a}{\theta_0}}. \tag{24}
$$
Plots of these functions are given below. Of course, Equations 23 and 24 are very simplistic and biological reality may impose more parameterization, such as a cooperativity that we have not considered here.
```
# Define the parameter ranges.
t_t0 = np.logspace(-3, 3, 200)
gamma_max = [1E-5, 1E-4, 1E-3, 1E-2, 1E-1]
nu_max = [1E-5, 1E-4, 1E-3, 1E-2, 1E-1]
# Compute the equations and add to dataframe
dfs = []
for g, n in zip(gamma_max, nu_max):
gamma = g / (1 + t_t0**-1)
nu = n / ( 1 + t_t0)
_df = pd.DataFrame(np.array([t_t0, gamma, nu]).T,
columns=['t_t0', 'gamma', 'nu'])
_df['gamma_param'] = g
_df['nu_param'] = n
dfs.append(_df)
df = pd.concat(dfs, sort=False)
# Set up the plots.
gamma_plot = alt.Chart(df, title='translational capacity, γ', width=300).mark_line(size=2).encode(
x=alt.X(field='t_t0', type='quantitative', scale=alt.Scale(type='log'),
title='θ\u2090 / θ\u2080', axis=alt.Axis(values=[0.001, 0.01, 0.1, 1, 10, 100, 1000])),
y=alt.Y(field='gamma', type='quantitative', scale=alt.Scale(type='log'),
title='γ [T\u207b\u00b9]', axis=alt.Axis(values=[1E-8, 1E-6, 1E-4, 1E-2, 1])),
color=alt.Color(field='gamma_param', type='quantitative', title='γ max [T\u207b\u00b9]'))
nu_plot = alt.Chart(df, title='nutritional capacity, ν', width=300).mark_line(size=2).encode(
x=alt.X(field='t_t0', type='quantitative', scale=alt.Scale(type='log'),
title='θ\u2090 / θ\u2080', axis=alt.Axis(values=[0.001, 0.01, 0.1, 1, 10, 100, 1000])),
y=alt.Y(field='nu', type='quantitative', scale=alt.Scale(type='log'),
title='ν [T\u207b\u00b9]', axis=alt.Axis(values=[1E-8, 1E-6, 1E-4, 1E-2, 1])),
color=alt.Color(field='nu_param', type='quantitative', title='ν max'))
gamma_plot | nu_plot
```
## Modeling Steady-State Growth and Nutrient Concentrations
With these theoretical underpinnings in hand, we can now turn to the real meat of this notebook which is presenting a complete model of steady state growth rates as a function of the nutrient concentration, or mass fraction $\theta_a$ as we have done thus far.
Given the equations derived above, we have two expressions for the growth rate as a function of the ribosomal mass fraction $\phi_R$ and the nutrient concentration $\theta_a$,
$$
\lambda = \frac{\gamma^\text{(max)}}{1 + \frac{\theta_0}{\theta_a}}\left(\phi_R - \phi_R^\text{(min)}\right), \tag{25}
$$
and
$$
\lambda = \frac{k_P}{\beta + \theta_a}\left(\phi_R^\text{(max)} - \phi_R\right). \tag{26}
$$
Equation 26 can be rearranged to solve for the nutrient mass fraction pool $\theta_a$ to yield
$$
\theta_a = \beta\left[\frac{\nu}{\lambda}\left(\phi_R^\text{(max)} - \phi_R\right) - 1 \right]. \tag{27}
$$
Substituting Equation 24 for $\nu$ in Equation 27 and solving for $\theta_a$ results in one physically meaningful positive root, whose form I calculated symbollically below. It has an unweildly form which we will neglect enumerating here for now.
However, with a closed-form solution for $\theta_a$ in hand, we can solve Equation 25 analytically for $\lambda$, which again is just a quadratic equation. Computing symbollically results in a single physically meaningful root with a nasty form that we don't present here for clarity. What is more insightful is to choose a set of parameters and plot how the growth rate changes as a function of the parameters, which we implement below
```
sp.init_printing()
# Define the symbols
nu_max = sp.Symbol('{{\\nu^{(max)}}}')
gamma_max = sp.Symbol('{{\\gamma^{(max)}}}')
theta_0 = sp.Symbol('\\theta_0')
theta_a = sp.Symbol('\\theta_a')
beta = sp.Symbol('\\beta')
lam = sp.Symbol('\\lambda')
phir_max = sp.Symbol('{{(\\phi_R^{(max)} - \\phi_R)}}')
phir_min = sp.Symbol('{{(\\phi_R - \\phi_R^{(min)}}}')
# Define Eq27 with MM nu
eq27 = (beta / lam) * ((nu_max / (1 + (theta_a/theta_0))) * phir_max - lam) - theta_a
theta_a_soln = sp.solve(eq27, theta_a)[0]
# Plug in this solution for theta_a into Equation 25 and solve.
eq25 = (gamma_max * phir_min)/ (1 + theta_0/theta_a_soln) - lam
lam_soln = sp.solve(eq25, lam)
# lam_soln[1].simplify().expand().factor()
theta_a_soln
# Define the function for the steady state growth rate
def steady_state_growth(phi_R, gamma_max, nu_max, theta_0, beta=1,
phi_R_max=0.4, phi_R_min=0.04):
# Compute the mass fraction differences
phi_min = phi_R - phi_R_min
phi_max = phi_R_max - phi_R
# Compute the numerator piecewise
prefix = beta * phi_min**2 * gamma_max**2 + 2 * beta * phi_min * phi_max * gamma_max * nu_max
sqrt = np.sqrt(beta**2 * phi_min**4 * gamma_max**4 + 4 * beta * theta_0 * phi_min **3 * phi_max * gamma_max**3 * nu_max)
numer = prefix - sqrt
# Compute the denominator and return
denom = 2 * (beta * phi_min * gamma_max + beta * phi_max * nu_max - theta_0 * phi_min * gamma_max)
return numer / denom
def steady_state_theta_a(lam, theta_0, beta, phi_R_max, phi_R):
phi_max = (phi_R_max - phi_R)
prefix = -lam * (beta + theta_0)
sqrt = np.sqrt(lam * (beta**2 * lam - 2 * beta * lam*theta_0 + 4 * beta * theta_0 * phi_max * nu_max + lam * theta_0**2))
numer = prefix + sqrt
denom = 2 * lam
return numer / denom
def nutritional_capacity(nu_max, theta_a, theta_0):
return nu_max * (1 + theta_a/theta_0)**-1
def translational_capacity(gamma_max, theta_a, theta_0):
return gamma_max * (1 + theta_0/theta_a)**-1
# Define the parameters
# beta = 1
# gamma_max = 10
# nu_max = 5.5
# phi_R_max = 0.5
# phi_R_min = 0.025
# delta = 0.001 # To avoid division by zero
# # Typical AA concentration is ≈ 1mM. Here, we convert that to mass frac
# m_aa = 110 # mass of an aa in Da
# N_aa = 20 * 1E6 # total number of alll free amino acids
# M = 1E9 * m_aa # mass of whole proteome
# theta_0 = (N_aa * m_aa)/M #
# phi_R = np.linspace(phi_R_min+delta, phi_R_max-delta, 100)
# print(theta_0)
# Define the panel parameterized function for plot generation
gamma_slider = pn.widgets.FloatSlider(name='γ max', start=0.1, end=10, step=0.1, value=8)
nu_slider = pn.widgets.FloatSlider(name='ν max', start=0.1, end=10, step=0.1, value=6)
theta_0_slider = pn.widgets.FloatSlider(name='log10 θ\u2080', start=-4, end=0,step=0.01)
@pn.depends(gamma_slider.param.value, nu_slider.param.value, theta_0_slider.param.value)
def generate_plot(gamma_max, nu_max, theta_0, beta=1, phi_R_max=0.5, phi_R_min=0.025):
theta_0 = 10**theta_0
# Define the mass fraction range.
delta = 0.0001
phi_R_range = np.linspace(phi_R_min + delta, phi_R_max - delta, 200)
# Compute the quantitites and set up the dataframe
ss_gr = steady_state_growth(phi_R_range, gamma_max, nu_max, theta_0,
phi_R_max=phi_R_max, phi_R_min=phi_R_min)
ss_ta = steady_state_theta_a(ss_gr, theta_0, beta, phi_R_max, phi_R_range)
ss_nu = nutritional_capacity(nu_max, ss_ta, theta_0)
ss_gamma = translational_capacity(gamma_max, ss_ta, theta_0)
ss_df = pd.DataFrame(np.array([phi_R_range, ss_gr, ss_ta,
ss_nu, ss_gamma]).T,
columns=['mass_fraction', 'growth_rate_hr',
'theta_a', 'nu', 'gamma'])
ss_df['status'] = 'physical'
ss_df.loc[ss_df['theta_a'] < 0, 'status'] = 'non-physical'
ss_df['theta_a'] = np.log10(ss_df['theta_a'].values)
# Set up the plots
ss_gr_plot = alt.Chart(ss_df, width=200, height=200, title='steady-state growth rate').mark_line().encode(
x=alt.X(field='mass_fraction', type='quantitative', title='ribosomal mass fraction',
scale=alt.Scale(domain=[phi_R_min, phi_R_max])),
y=alt.Y(field='growth_rate_hr', type='quantitative', title='growth rate [hr\u207b\u00b9]',
scale=alt.Scale(domain=[0, 1.5])),
strokeDash=alt.StrokeDash(field='status', type='nominal', legend=None, sort='descending'))
ss_ta_plot = alt.Chart(ss_df, width=200, height=200, title='steady-state θ_A').mark_line().encode(
x=alt.X(field='mass_fraction', type='quantitative', title='ribosomal mass fraction',
scale=alt.Scale(domain=[phi_R_min, phi_R_max])),
y=alt.Y(field='theta_a', type='quantitative', title='log10 nutrient mass fraction; θ_a',
scale=alt.Scale(domain=[-8, 1])),
strokeDash=alt.StrokeDash(field='status', type='nominal', legend=None, sort='descending'))
ss_gamma_plot = alt.Chart(ss_df, width=200, height=200, title='steady-state translational capacity γ').mark_line().encode(
x=alt.X(field='mass_fraction', type='quantitative', title='ribosomal mass fraction'),
y=alt.Y(field='gamma', type='quantitative', title='translational capacity γ [hr\u207b\u00b9]',
scale=alt.Scale(domain=[0, 10])),
strokeDash=alt.StrokeDash(field='status', type='nominal', legend=None, sort='descending'))
ss_nu_plot = alt.Chart(ss_df, width=200, height=200, title='steady-state nutritional capacity ν').mark_line().encode(
x=alt.X(field='mass_fraction', type='quantitative', title='ribosomal mass fraction'),
y=alt.Y(field='nu', type='quantitative', title='nutritional capacity ν'),
strokeDash=alt.StrokeDash(field='status', type='nominal', legend=None, sort='descending'))
# Return the plotting canvas
return (ss_gr_plot | ss_ta_plot | ss_gamma_plot | ss_nu_plot)
# Set up the panel object
widgets = pn.Row(gamma_slider, nu_slider, theta_0_slider)
plots = pn.Column(widgets, generate_plot)
plots
```
| github_jupyter |
# Many Particle Model (MPM)
The Many Paticle Model (MPM) of a lithium-ion battery is an extension of the Single Particle Model to account for a continuous distribution of active particle sizes in each electrode $\text{k}=\text{n},\text{p}$. Therefore, many of the same model assumptions hold, e.g., the transport in the electrolyte is instantaneous and hence the through-cell variation (in $x$) is neglected. The full set of assumptions and description of the particle size geometry is given in [[4]](#References). Note that the MPM in [[4]](#References) is for a half cell and the version implemented in PyBaMM is for a full cell and uses the notation and scaling given in [[5]](#References).
## Particle size geometry
In this notebook we state the dimensional model first, and the dimensionless version at the end. In each electrode $\text{k}=\text{n},\text{p}$, there are spherical particles of each radius $R_\text{k}$ in the range $R_\text{k,min}<R_\text{k}<R_\text{k,max}$, with the fraction of all particles of a given radius $R_\text{k}$ given by the particle-size distribution (base on number)
$f_\text{k,num}(R_\text{k})$. However, it is more convenient to deal with the fraction
of _surface area_ contributed by particles of radius $R_\text{k}$, which we denote $f_{\text{k},a}(R_\text{k})$ and refer to as the _area-weighted_ particle-size distribution. The area and number-based distributions are related via
$$
f_{\text{k},a}(R_\text{k}) = \frac{4\pi R_\text{k}^2 f_\text{k,num}(R_\text{k})}{\int_{R_\text{k,min}}^{R_\text{k,max}} 4\pi R_\text{k}^2 f_\text{k,num}(R_\text{k})\,\text{d}R_\text{k}}
$$
The total amount of surface area (per unit volume) $a_\text{k,tot}$ accounting for all particles is expressed in terms of the active material volume fraction $\epsilon_{s,\text{k}}$, similar to the other models in PyBaMM (SPM, DFN):
$$
\epsilon_{s,\text{k}}= \int \frac{1}{3} R_\text{k} \underbrace{a_\text{k,tot}f_{\text{k},a}(R_\text{k})}_{\text{area }a_\text{k}(R_\text{k})\text{ of particles size }R_\text{k}}\,\text{d}R_\text{k}
$$
Rearranging and defining $\bar{R}_{\text{k},a}=\int R_\text{k} f_{\text{k},a}(R_\text{k})\,\text{d}R_\text{k}$ as the mean of the area-weighted distribution, we find
$$
a_\text{k,tot}=\frac{3\epsilon_{s,\text{k}}}{\int R_\text{k} f_{\text{k},a}(R_\text{k})\,\text{d}R_\text{k}} = \frac{3\epsilon_{s,\text{k}}}{\bar{R}_{\text{k},a}}.
$$
Then $a_\text{k,tot}$ is the aggregate surface area of the particle population and analogous to the variables `"X-averaged negative electrode surface area to volume ratio [m-1]"`, etc. in the SPM, SPMe, and DFN models, and can be calculated in a similar way as shown above using the _area-weighted mean radius_ $\bar{R}_{\text{k},a}$ (other mean radii do not have this property). See [[4]](#References) for more details on the different types of distribution and mean radii.
Another common way to express the size distribution is via particle volume. The fraction of volume contributed by the particles of radius $R_\text{k}$, denoted the _volume-weighted_ particle-size distribution is related to the number and area ones via
$$
f_{\text{k},v}(R_\text{k}) = \frac{\frac{1}{3} R_\text{k} f_{\text{k},a}(R_\text{k})}{\int_{R_\text{k,min}}^{R_\text{k,max}} \frac{1}{3} R_\text{k} f_{\text{k},a}(R_\text{k})\,\text{d}R_\text{k}} =\frac{\frac{4}{3}\pi R_\text{k}^3 f_\text{k,num}(R_\text{k})}{\int_{R_\text{k,min}}^{R_\text{k,max}} \frac{4}{3}\pi R_\text{k}^3 f_\text{k,num}(R_\text{k})\,\text{d}R_\text{k}}
$$
It is sufficient to specify $f_{\text{k},a}(R_\text{k})$, which is present requirement in the MPM.
## Model equations
In each electrode, only one representative particle of each size $R_\text{k}$ needs to be modelled. The concentration of lithium in the solid particles is denoted $c_{\text{s,k}}(t,R_\text{k}, r_\text{k})$, which varies with time, particle radius $R_\text{k}$, and the radial coordinate $r_{\text{k}} \in[0,R_{\text{k}}]$ within the spherical particle. The potential is uniform across all particles in the electrode, $\phi_{\text{s,k}}(t)$.
The equations for molar conservation of lithium ($c_{\text{s,k}}$) are then:
$$
\frac{\partial c_{\text{s,k}}}{\partial t} = -\frac{1}{r_{\text{k}}^2} \frac{\partial}{\partial r_{\text{k}}} \left(r_{\text{k}}^2 N_{\text{s,k}}\right), \\
N_{\text{s,k}} = -D_{\text{s,k}}(c_{\text{s,k}}) \frac{\partial c_{\text{s,k}}}{\partial r_{\text{k}}}, \quad \text{k} \in \text{n, p},
$$
$$
N_{\text{s,k}}\big|_{r_{\text{k}}=0} = 0, \ \ N_{\text{s,k}}\big|_{r_{\text{k}}=R_{\text{k}}} =
\frac{j_{\text{k}}}{F} \quad \text{k} \in \text{n, p}, \quad\\
c_{\text{s,k}}(0,R_\text{k},r_{\text{k}}) = c_{\text{s,k,0}}, \quad \text{k} \in \text{n, p},$$
where $D_{\text{s,k}}$ is the diffusion coefficient in the solid, $N_{\text{s,k}}$ denotes the flux of lithium ions in the solid particle, $F$ is Faraday's constant. The interfacial current density is given by $j_\text{k}$, which also varies with particle size.
### Algebraic equations for the potentials
The potentials $\phi_{\text{s,k}}(t)$ are determined via the integral constraint that the total current flowing across the electrode interface must equal (up to a minus sign) the through-cell current density $i$. Writing this in terms of the potential differences $\Delta \phi_{\text{s,k}} = \phi_{\text{s,k}} - \phi_{\text{e}}$,
$$
L_\text{k}a_\text{k,tot}\int_{R_\text{k,min}}^{R_\text{k,max}} f_{\text{k},a}(R_\text{k})j_\text{k}\,\text{d}R_\text{k} = \begin{cases}
i,\quad \text{k}=\text{n}\\
-i,\quad \text{k}=\text{p}
\end{cases}
$$
with Butler-Volmer kinetics
$$
j_\text{k}=j_{\text{0,k}} \sinh\left[\frac{F}{2R_g T}(\Delta \phi_{\text{s,k}}-U_{\text{k}}(c_{\text{s},\text{k}}))\right], \ \ j_{\text{0,k}} = m_{\text{k}}(c_{\text{e}}c_{\text{s,k}})^{1/2}(c_\text{k,max}-c_{\text{s,k}})^{1/2}.
$$
This gives an integral (or algebraic once discretized) equation for $\Delta \phi_{\text{s,k}}$ which is coupled to the concentration equations above.
The terminal voltage is then obtained from
$$
V = \Delta \phi_{\text{s,p}} - \Delta \phi_{\text{s,n}}
$$
### Dimensionless equations
The dimensionless scheme can be found in the appendix of [[5]](#References), giving similar dimensionless variables and parameters to those in the SPM.
## Example solving MPM
```
%pip install pybamm -q # install PyBaMM if it is not installed
import pybamm
import numpy as np
import matplotlib.pyplot as plt
```
Create an instance of the model
```
model = pybamm.lithium_ion.MPM()
```
First, let's inspect some variables (e.g. the lithium concentration and interfacial current densities) that depend on particle size $R_\text{k}$. The variables of interest are `X-averaged` versions as there is no dependence on $x$.
```
model.variables.search("X-averaged negative particle concentration")
```
The concentration that is being solved for in the MPM, and which varies with particle size is the one ending in `"distribution"`.
```
c_n_R_dependent = model.variables["X-averaged negative particle concentration distribution [mol.m-3]"]
c_n_R_dependent.domains
```
Notice that the secondary domain is `'negative particle size'`, which is treated as another (microscale) domain in PyBaMM.
The variable without the `"distribution"` has been "size averaged" and can be compared to the variable with the same name from the other lithium-ion models in PyBaMM with only a single particle size. The concentration within the particles is a volume-based quantity and is thus averaged by volume (to preserve the total amount of lithium):
$$
\left<c_{\text{s,k}}\right>_v = \int_{R_\text{k,min}}^{R_\text{k,max}} f_{\text{k},v}(R_\text{k})c_{\text{s,k}}(t,R_\text{k}, r_\text{k})\,\text{d}R_\text{k}
$$
In particular, if the variance of the particle-size distribution $f_{\text{k},a}$ is shrunk to zero and all particles become concentrated at its mean radius $\bar{R}_{\text{k},a}$, the variable `"X-averaged negative particle concentration [mol.m-3]"` will coincide with the same variable from an SPM with particle radius $R_\text{k}=\bar{R}_{\text{k},a}$. However, `"X-averaged negative particle concentration distribution [mol.m-3]"` will remain "particle-size dependent".
The convention of adding `"distribution"` to the end of a variable name to indicate particle-size dependence has been used for other variables, such as the interfacial current density:
```
model.variables.search("X-averaged negative electrode interfacial current density")
```
As the interfacial current density is a flux per unit area on the particle surface, the "size averaging" is done by area (to preserve the total flux of lithium):
$$
\left<j_{\text{k}}\right>_a = \int_{R_\text{k,min}}^{R_\text{k,max}} f_{\text{k},a}(R_\text{k})j_{\text{k}}(t,R_\text{k})\,\text{d}R_\text{k}
$$
The averaging is merely done to allow comparison to variables from other models with only a single size, and are not necessarily used within the MPM itself, or are physically meaningful.
Note: not all variables have a "distribution" version, such as the potentials or temperature variables, as they do not vary with particle size in the MPM as implemented here.
### Mesh points
By default, the size domain is discretized into 30 grid points on a uniform 1D mesh.
```
for k, t in model.default_submesh_types.items():
print(k,'is of type',t.__name__)
for var, npts in model.default_var_pts.items():
print(var,'has',npts,'mesh points')
```
## Solve
Now solve the MPM with the default parameters and size distributions.
```
sim = pybamm.Simulation(model)
sim.solve(t_eval=[0, 3600])
# plot some variables that depend on R
output_variables = [
"X-averaged negative particle surface concentration distribution",
"X-averaged positive particle surface concentration distribution",
"X-averaged positive electrode interfacial current density distribution",
"X-averaged negative area-weighted particle-size distribution",
"X-averaged positive area-weighted particle-size distribution",
"Terminal voltage [V]",
]
sim.plot(output_variables=output_variables)
```
We can also visualise the concentration within the particles. Note that we use the dimensionless radial coordinate $r_\text{k}/R_\text{k}$ which always lies in the range $0<r_\text{k}/R_\text{k}<1$, rather than $r_\text{k}$ itself, whose range changes with $R_\text{k}$.
```
# Concentrations as a function of t, r and R
c_s_n = sim.solution["X-averaged negative particle concentration distribution"]
c_s_p = sim.solution["X-averaged positive particle concentration distribution"]
# dimensionless r_n, r_p
r_n = sim.solution["r_n"].entries[:,0,0]
r_p = sim.solution["r_p"].entries[:,0,0]
# dimensional R_n, R_p
R_n = sim.solution["Negative particle sizes [m]"].entries[:,0]
R_p = sim.solution["Positive particle sizes [m]"].entries[:,0]
t = sim.solution["Time [s]"].entries
def plot_concentrations(t):
f, axs = plt.subplots(1, 2 ,figsize=(10,3))
plot_c_n = axs[0].pcolormesh(
R_n, r_n, c_s_n(r=r_n, R=R_n, t=t), vmin=0.15, vmax=0.8
)
plot_c_p = axs[1].pcolormesh(
R_p, r_p, c_s_p(r=r_p, R=R_p, t=t), vmin=0.6, vmax=0.95
)
axs[0].set_xlabel(r'$R_n$ [$\mu$m]')
axs[1].set_xlabel(r'$R_p$ [$\mu$m]')
axs[0].set_ylabel(r'$r_n / R_n$')
axs[1].set_ylabel(r'$r_p / R_p$')
axs[0].set_title('Concentration in negative particles')
axs[1].set_title('Concentration in positive particles')
plt.colorbar(plot_c_n, ax=axs[0])
plt.colorbar(plot_c_p, ax=axs[1])
plt.show()
# initial time
plot_concentrations(t[0])
# final time
plot_concentrations(t[-1])
```
## Input custom particle-size distributions
In order to solve the MPM, one must input the area-weighted particle-size distribution $f_{\text{k},a}$ for each electrode $\text{k}=\text{n,p}$ and the minimum and maximum radius limits $R_\text{k,min}$, $R_\text{k,max}$. The default distributions $f_{\text{k},a}$, usable with the Marquis et al. [[6]](#References) parameter set, are lognormals with means equal to the `"Negative particle radius [m]"` and `"Positive particle radius [m]"` values, and standard deviations equal to 0.3 times the mean.
You can input any size distribution $f_{\text{k},a}(R_\text{k})$ as a function of $R_\text{k}$, which we will now demonstrate.
Note: $f_{\text{k},a}(R_\text{k})$ should ideally integrate to 1 over the specified $R_\text{k}$ range, although it is automatically normalized within PyBaMM anyway. A distribution such as a lognormal, once restricted to $[R_\text{k,min},R_\text{k,max}]$, discretized, and then renormalized, strictly will not integrate to 1 or have the originally desired mean or variance. The mean and variance of the final discretized distribution can be checked as output variables (see below). Having a sufficient number of mesh points in $R_\text{k}$ or a sufficiently wide interval $[R_\text{k,min},R_\text{k,max}]$ should alleviate this issue, however.
```
# Parameter set (no distribution parameters by default)
params = pybamm.ParameterValues("Marquis2019")
# Extract the radii values. We will choose these to be the means of our area-weighted distributions
R_a_n_dim = params["Negative particle radius [m]"]
R_a_p_dim = params["Positive particle radius [m]"]
# Standard deviations (dimensional)
sd_a_n_dim = 0.2 * R_a_n_dim
sd_a_p_dim = 0.6 * R_a_p_dim
# Minimum and maximum particle sizes (dimensional)
R_min_n = 0
R_min_p = 0
R_max_n = 2 * R_a_n_dim
R_max_p = 3 * R_a_p_dim
# Set the area-weighted particle-size distributions.
# Choose a lognormal (but any pybamm function could be used)
def f_a_dist_n_dim(R):
return pybamm.lognormal(R, R_a_n_dim, sd_a_n_dim)
def f_a_dist_p_dim(R):
return pybamm.lognormal(R, R_a_p_dim, sd_a_p_dim)
# Note: the only argument must be the particle size R
# input distribution params to the dictionary
distribution_params = {
"Negative minimum particle radius [m]": R_min_n,
"Positive minimum particle radius [m]": R_min_p,
"Negative maximum particle radius [m]": R_max_n,
"Positive maximum particle radius [m]": R_max_p,
"Negative area-weighted "
+ "particle-size distribution [m-1]": f_a_dist_n_dim,
"Positive area-weighted "
+ "particle-size distribution [m-1]": f_a_dist_p_dim,
}
params.update(distribution_params, check_already_exists=False)
sim = pybamm.Simulation(model, parameter_values=params)
sim.solve(t_eval=[0, 3600])
sim.plot(output_variables=output_variables)
```
The discretized size distributions can be plotted as histograms. Only the area-weighted distribution has been input, but the corresponding number and volume-weighted ones are also given as output variables.
```
# The discrete sizes or "bins" used, and the distributions
R_p = sim.solution["Positive particle sizes [m]"].entries[:,0] # const in the current collector direction
# The distributions
f_a_p = sim.solution["X-averaged positive area-weighted particle-size distribution [m-1]"].entries[:,0]
f_num_p = sim.solution["X-averaged positive number-based particle-size distribution [m-1]"].entries[:,0]
f_v_p = sim.solution["X-averaged positive volume-weighted particle-size distribution [m-1]"].entries[:,0]
# plot
width_p = (R_p[-1] - R_p[-2])/ 1e-6
plt.bar(R_p / 1e-6, f_a_p * 1e-6, width=width_p, alpha=0.3, color="tab:blue",
label="area-weighted")
plt.bar(R_p / 1e-6, f_num_p * 1e-6, width=width_p, alpha=0.3, color="tab:red",
label="number-weighted")
plt.bar(R_p / 1e-6, f_v_p * 1e-6, width=width_p, alpha=0.3, color="tab:green",
label="volume-weighted")
plt.xlim((0,30))
plt.xlabel("Particle size $R_{\mathrm{p}}$ [$\mu$m]", fontsize=12)
plt.ylabel("[$\mu$m$^{-1}$]", fontsize=12)
plt.legend(fontsize=10)
plt.title("Discretized distributions (histograms) in positive electrode")
plt.show()
```
### Vary standard deviation as an input parameter
You may define the standard deviation (or other distribution parameters except for the min or max radii) of the distribution as a pybamm "input" parameter, to quickly change the distribution at the solve stage.
```
# Define standard deviation in negative electrode to vary
sd_a_p_dim = pybamm.Parameter("Positive electrode area-weighted particle-size standard deviation [m]")
# Set the area-weighted particle-size distribution
def f_a_dist_p_dim(R):
return pybamm.lognormal(R, R_a_p_dim, sd_a_p_dim)
# input to param dictionary
distribution_params = {
"Positive electrode area-weighted particle-size "
+ "standard deviation [m]": "[input]",
"Positive area-weighted "
+ "particle-size distribution [m-1]": f_a_dist_p_dim,
}
params.update(distribution_params, check_already_exists=False)
# Experiment with a relaxation period, to see the effect of distribution width
experiment = pybamm.Experiment(["Discharge at 1 C for 3400 s", "Rest for 1 hours"])
sim = pybamm.Simulation(model, parameter_values=params, experiment=experiment)
solutions = []
for sd_a_p in [0.4, 0.6, 0.8]:
solution = sim.solve(
inputs={
"Positive electrode area-weighted particle-size "
+ "standard deviation [m]": sd_a_p * R_a_p_dim
}
)
solutions.append(solution)
pybamm.dynamic_plot(
solutions,
output_variables=output_variables,
labels=["MPM, sd_a_p=0.4", "MPM, sd_a_p=0.6", "MPM, sd_a_p=0.8"]
)
```
## Check the distribution statistics
The mean and standard deviations of the final discretized distributions can be investigated using the output variables `"Negative area-weighted mean particle radius"` and `"Negative area-weighted particle-size standard deviation"`, etc.
```
print("The mean of the input lognormal was:", R_a_p_dim)
print("The means of discretized distributions are:")
for solution in solutions:
R = solution["Positive area-weighted mean particle radius [m]"]
print("Positive area-weighted mean particle radius [m]", R.entries[0])
print("The standard deviations of the input lognormal were:")
print(0.4 * R_a_p_dim)
print(0.6 * R_a_p_dim)
print(0.8 * R_a_p_dim)
print("The standard deviations of discretized distributions are:")
for solution in solutions:
sd = solution["Positive area-weighted particle-size standard deviation [m]"]
print("Positive area-weighted particle-size standard deviation [m]", sd.entries[0])
```
## Compare to SPM and DFN
The MPM can also be easily compared to PyBaMM models with a single particle size. The standard output variables are computed in the MPM, averaging over the particle size domain.
```
models = [
pybamm.lithium_ion.SPM(),
pybamm.lithium_ion.MPM(),
pybamm.lithium_ion.DFN()
]
# solve
sims = []
for model in models:
sim = pybamm.Simulation(model)
sim.solve(t_eval=[0, 3500])
sims.append(sim)
# plot
pybamm.dynamic_plot(sims)
```
## Model options
The MPM is compatible with the current collector and thermal models (except the "x-full" thermal option). Currently, the MPM is not compatible with the various degradation submodels in PyBaMM (i.e. SEI models, particle cracking/swelling, or lithium plating).
### Fickian diffusion vs Uniform profile
One can choose from Fickian diffusion or a uniform concentration profile within the particles. Teh default is "Fickian diffusion".
```
model_Fickian = pybamm.lithium_ion.MPM(name="MPM Fickian")
model_Uniform = pybamm.lithium_ion.MPM(
name="MPM Uniform",
options={"particle": "uniform profile"}
)
sim_Fickian = pybamm.Simulation(model_Fickian)
sim_Uniform = pybamm.Simulation(model_Uniform)
sim_Fickian.solve(t_eval=[0, 3500])
sim_Uniform.solve(t_eval=[0, 3500])
pybamm.dynamic_plot([sim_Fickian, sim_Uniform], output_variables=output_variables)
```
### 1D current collector model
Add another macroscale dimension "z", employing the "potential pair" option solving for the potential in the current collectors.
```
# choose model options
model_cc = pybamm.lithium_ion.MPM(
options={
"current collector": "potential pair",
"dimensionality": 1,
"particle": "uniform profile", # to reduce computation time
}
)
# solve
sim_cc = pybamm.Simulation(model_cc)
sim_cc.solve(t_eval=[0, 3600])
# variables to plot
output_variables = [
"X-averaged negative particle surface concentration distribution",
"X-averaged positive particle surface concentration distribution",
"X-averaged positive electrode interfacial current density distribution",
"Negative current collector potential [V]",
"Positive current collector potential [V]",
"Terminal voltage [V]",
]
pybamm.dynamic_plot(sim_cc, output_variables=output_variables)
```
## References
The relevant papers for this notebook are:
```
pybamm.print_citations()
```
| github_jupyter |
# Day 3 of 100 Days of Machine Learning
# Python Basics — 4: Functions, Working with functions, Classes, Working with Class, Inheritance
## Content
- Functions
- How to define a function
- Passing value in function
- Classes
- How to make an instance of class and call methods from class
- Inheritance
## Functions
### How to define a function
Let’s have a look at how to define a function in code.
```
def welcome_to_blog():
# print welcome message
print("Welcome to my blog.")
welcome_to_blog()
# output
# Welcome to my blog.
```
### Passing value in function
```
def welcome_to_blog(name):
# print welcome message
message = "Hello " + name.title() + ", Welcome to blog."
print(message)
welcome_to_blog('Durgesh')
def welcome_to_blog(name):
# print welcome message
message = "Hello " + name.title() + ", Welcome to blog."
print(message)
welcome_to_blog('Durgesh')
welcome_to_blog('John')
welcome_to_blog('Jessica')
```
## Classes
```
class class_name():
def __init__(self,..,..):
... do something ..
.. do something ..
```
```
class users():
def __init__(self, name):
self.name = name
def greetings(self):
print("Hello ", self.name.title())
```
## Making Instance and Call Methods from Class
```
class users():
def __init__(self, name):
self.name = name
def greetings(self):
print("Hello ", self.name.title())
user = users('Durgesh')
user.greetings()
# output
# Hello, Durgesh
class users():
def __init__(self, name):
self.name = name
def greetings(self):
print("Hello ", self.name.title())
user1 = users('James')
user1.greetings()
# output
# Hello, James
user1 = users('Jessica')
user1.greetings()
# output
# Hello, Jessica
```
## Inheritance
```
class users():
def __init__(self, name):
self.name = name
def greetings(self):
print("Hello ", self.name.title())
class admin(users):
def __init__(self, name):
super().__init__(name)
def describe_admin(self):
print(self.name.title() + " is an admin.")
admin1 = admin('James')
admin1.greetings()
admin1.describe_admin()
# output
# Hello, James
# James is admin.
user1 = users('Jessica')
user1.greetings()
# output
# Hello, Jessica
```
Today we learned the Basics of python. My goal for this 100 Days of Machine Learning Challenge is to learn machine learning from scratch and help others who want to start their journey in machine learning. Many concepts are known to me, but I am starting from scratch to help beginners of the Machine Learning community and revise concepts.
Thanks for Reading!
| github_jupyter |
```
%matplotlib inline
import pandas as pd
import pandas_datareader as web
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.mlab as mlab
from pandas.plotting import register_matplotlib_converters
import datetime
from tqdm import tqdm
import sqlalchemy as db
import datetime
register_matplotlib_converters()
```
## Stock analyzer
### Goals:
1. Read historic stock price data
- Todo: Find source of historic index data
- Todo: Read data for all of Daniels stocks
- Todo: Write data into db
2. Visualize stock charts
3. Write functions to obtain empirical stock indicators (e.g. Uwe Lang)
4. Run accurate test using above empirical stock indicators using historic data
## 1. Read stock data (pandas_datareader)
```
# Dictionary of stocks to read (Name: Ticker symbol))
yahoo_data = {"Apple": "AAPL", "Google":"GOOG", "Coca Cola":"KO"}
stooq_data = {"S&P 500 - U.S." : "^SPX", "Dow Jones" : "^DJI"}
# Combine dictionaries
source_bucket = {"yahoo" : yahoo_data, "stooq" : stooq_data}
source_bucket
start = datetime.datetime(2000, 1, 1)
end = datetime.datetime(2019, 10, 29)
stock_data_dataframe = pd.DataFrame()
for source, data_list in source_bucket.items():
print("Source: %s" % (source))
for name, ticker_symbol in data_list.items():
# Query stock data from yahoo
# TODO: error handling if symbol is not found
current_stock_data = web.DataReader(ticker_symbol, source, start, end)
# Add info about stock in new columns
current_stock_data["Ticker symbol"] = ticker_symbol
current_stock_data["Name"] = name
print(name, ticker_symbol)
# Add date as column and not just index
current_stock_data["Date"] = current_stock_data.index
# Append to big dataframe
stock_data_dataframe = stock_data_dataframe.append(current_stock_data, sort = False)
stock_data_dataframe.head()
```
## 2. Save in database
```
db_engine = db.create_engine('sqlite:///stock_data.db')
db_connection = db_engine.connect()
# Create table
db_metadata = db.MetaData()
db_stocks_table = db.Table('stock_data', db_metadata,
db.Column('Date', db.Date()),
db.Column('Open', db.Float(255)),
db.Column('Close', db.Float(255)),
db.Column('Volume', db.Float(255)),
db.Column('Ticker symbol', db.String(255)),
db.Column('Name', db.String(255))
)
db_metadata.create_all(db_engine)
db_query = db.insert(db_stocks_table)
values = [{"Date" : datetime.date(2019,10,30), "Open": 27712.1, "Close": 1232143, "Volume": 1233, "Ticker symbol": "ABC", "Name": "Bla"}]
ResultProxy = db_connection.execute(db_query, values)
print(repr(db_metadata.tables['stock_data']))
```
## 3. Plot stock data
```
# Plot stocks
plt.rcParams.update({'font.size': 14})
for stock_str in stock_data_dataframe["Name"].unique():
stock_data_dataframe_subset = stock_data_dataframe[stock_data_dataframe["Name"] == stock_str]
fig, ax1 = plt.subplots(figsize=(12,4))
x = stock_data_dataframe_subset["Date"]
y1 = stock_data_dataframe_subset["Open"]
y2 = stock_data_dataframe_subset["Volume"]
plt.title(stock_str)
plt.ylabel("Opening price (1 / stock)")
ax2 = ax1.twinx()
ax1.plot(x, y1, ',-', alpha = 0.9)
ax2.plot(x, y2, ',-', color = (1,0,0), alpha = 0.5)
plt.ylabel("Trade volume (1 / day)")
```
| github_jupyter |
# Description
GAM/NGAM calculation for consensus iCBI model
## Background
In this notebook we train GAM/NGAM parameters using the most comprehensive metabolic flux data set collection we could gather.
The GAM represents ATP used in biomass synthesis, and NGAM corresponds to ATP used for maintenance functions. Biomass composition and maintenance requirement can vary significantly between conditions and genotypes. Thus, the purpose of this value in the model is to establish a baseline for realistic quantitative growth rate predictions, but no to create an accurate description under every possible condition.
## Approach
__Model configuration__
We start with the iCBI curated model with the final stoichiometry.
- All secretion products are allowed. While this configuration often leads to the secretion of products which are not commonly observed, it allows to accomodate the most number of experimental points. If more restrective secretion constraints are used, the most descriptive data points (i.e. those in AN7 medium which measure aminoacid, etc) may become unfeasible. While not shown in this notebook, setting the model to only common secretion products, usually does not change the GAM/NGAM values significantly.
- Experimental fluxes are reported by mean and standard deviation (std), thus they can be constrained in multiple ways, as described in the table below. The "both" approach is preferred since it allows for the most flexibilitiy, and it sets upper bounds, preventing rates highly dissimilar to those measured.
|Type| lb | ub|
|----|----|---|
|min| mean-std| 1000|
|mean| mean| 1000|
|max | mean+std| 1000|
|both | mean-std| mean+std|
__Calculation__
1. The NGAM and GAM values are set to 0.
2. The bounds are constrainted to the experimental fluxes, including growth rate, substrate uptake rate, and product secretion rates.
3. The ATP hydrolysis (ATPM) reaction is maximized.
4. The maximum flux of ATPM is plotted against the mean measured growth rate. Linear regression is performed to determine GAM and NGAM which correspond to the slope and intercept of this line, respectively.
## Outcome
- A linear correlation is observed for three different conditions: cellobiose chemostat, cellulose chemostat, and batch reactor (both cellulose and cellobiose). These are saved accordingly in a ATP configuration file.
- A new model, iCBI665_v4 is saved wih the batch GAM/NGAM configuration and cellobiose as a substrate.
# Setup
```
import os, sys
sys.path.append(os.path.dirname(os.path.abspath('.')))
#plt.style.use('seaborn')
#plt.style.use('ggplot')
#import matplotlib as mpl
#mpl.style.use('ggplot')
import csv
from tools.train_ATP_costs import *
import tools.conf_model
import cobra as cb
import string
import pandas as pd
from scipy import stats
import tools.ms2bigg
# plotting
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='ticks',
font='Arial', font_scale=1,
rc={'figure.figsize':(4,4)})
sns.set_palette('pastel')
plt.rcParams["figure.figsize"] = (4,4)
# (extra)
params = {'legend.fontsize': 'x-large',
'figure.figsize': (4, 4),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
%matplotlib inline
def load_model():
return cb.io.load_json_model(os.path.join(settings.PROJECT_ROOT,'iCBI','intermediate','iCBI665_v6.json'))
model = load_model()
```
# GAM calculation
Create a table with the different options for maximum ATP estimation.
```
t_min = train(model, constraint_mode='min')
t_mean = train(model, constraint_mode='mean')
t_max = train(model, constraint_mode='max')
t_both = train(model, constraint_mode='both')
ds = pd.read_csv(settings.EXTRACELLULAR_FLUX_DATA)
ds = ds.set_index('index')
ds = ds[['Strain', 'Medium', 'Reactor', 'GR']]
dfs = {'ATP_min': t_min['gamdf'], 'ATP_mean': t_mean['gamdf'], 'ATP_max': t_max['gamdf'], 'ATP_both': t_both['gamdf']}
for col_id, tdf in dfs.items():
tdf = tdf.set_index('training_dataset_index')
tdf.index = tdf.index.map(int) # ds as integer indices whie
atp = tdf.loc[:,['ATP']]
atp.columns = [col_id]
atp.index.rename('index', inplace=True)
ds = ds.join(atp)
std = ds.std(axis=1)
ds['std'] = std
ds
```
- There is a small difference between methods.
## Raw plot of all computed points
```
ds.drop(columns=['ATP_min','ATP_mean','ATP_max','std'],inplace=True)
# Plot all data sets
ax = ds.plot.scatter('GR', 'ATP_both')
ax.set_xlabel('Experimental growth rate (1/h)')
ax.set_ylabel('Maximum predicted ATP (mmol/gCDW/h)')
```
- For the chemostat cellodextrin experiments there is a large amount of available ATP. This excess amounts of ATP are generated due to the cleavage of cellodextirns which may actually be wasted, or the model is not capturing its application.
## Define each type of point and omit cellodextrins
```
cdf = ds.loc[(ds.Reactor=='Batch') & ds.Medium.str.contains('cellb')]
cb2 = plt.scatter(cdf['GR'], cdf['ATP_both'], c='b', marker='*')
cdf = ds.loc[(ds.Reactor=='Chemostat') & ds.Medium.str.contains('cellb')]
cc = plt.scatter(cdf['GR'], cdf['ATP_both'], c='b', marker='o')
cdf = ds.loc[(ds.Reactor=='Batch') & ds.Medium.str.contains('avcell')]
ab = plt.scatter(cdf['GR'], cdf['ATP_both'], c='g', marker='*')
cdf = ds.loc[(ds.Reactor=='Chemostat') & ds.Medium.str.contains('avcell')]
ac = plt.scatter(cdf['GR'], cdf['ATP_both'], c='g', marker='o')
plt.legend([cb2,cc,ab,ac], ['Cellobiose batch', 'Cellobiose chemostat', 'Avicell batch', 'Avicell chemostat'])
ax.set_xlabel('Experimental growth rate (1/h)')
ax.set_ylabel('Maximum predicted ATP (mmol/gCDW/h)');
```
## Plot condidtions independently
### Cellobiose conditions
```
cellb = ds.loc[ds.Medium.str.contains('cellb'), :]
cellb.plot.scatter('GR', 'ATP_both')
cellb.loc[cellb.GR>0.2, :]
```
- The high growth region corresponds to the batch reactors. This can be due to several reasons:
1) They belong to two different WT strains (ATCC vs DSM)
2) They are two different conditions with different maintenance requirements. Notably in batch cultures multiple fermentation products are measured, not only acetate and ethanol.
```
# Train GAM/NGAM using chemostats
cellb_chem_idx = ds.loc[ds.Medium.str.contains('cellb') & ds.Reactor.str.contains('Chemostat'),:].index
excl_idx = list(set(ds.index) - set(cellb_chem_idx))
with model:
trainout = train(model, exclude_data_index=excl_idx , constraint_mode='both')
print(trainout)
```
There is a good R^2 coefficient, however the values of GAM and NGAM are significantly higher than other models.
(iML uses a GAM of 75.5 and NGAM of 6.8)
The issue with such value is that it will render batch data unusable. In particular, at a substrate uptake rate of ~ -4 cellobiose, the model will predict a maximum growth rate of around 0.1.
### Cellulose conditions
```
# Print cellobiose points
cellb = ds.loc[ds.Medium.str.contains('avcell'), :]
cellb.plot.scatter('GR', 'ATP_both')
ds.loc[ds.Reactor.str.contains('Batch') & ds.Medium.str.contains('avcell') , :]
```
In this case the high substrate batch reactor, with a slower growth rate, lies close to the chemostat dataset.
```
# Train using GAM/NGAM using chemostats
cell_idx = ds.loc[ds.Medium.str.contains('avcell') & (ds.GR <0.2),:].index
excl_idx = list(set(ds.index) - set(cell_idx)) + [7] # 7 is NaN dataset
with model:
trainout_cellulose = train(model, exclude_data_index=excl_idx , constraint_mode='both')
#trainout_cellulose.pop('gamdf')
print(trainout_cellulose)
```
# Batch conditions
It has become aparent that the energetic requirements between chemostat and batch cultures are different. Instead of thinking about cellulose vs cellobiose, a batch energy maintenance can be determined
```
bds = ds.loc[ds.Reactor=='Batch',:]
bds = bds.drop(8) # The high loading avicell vented datapoint is not consistent with the batch trend
bds = bds.drop(2) # The hydG-ech mutant also seems to be an outlier
batch_outlier_idx = [2,8]
bds
x = bds['GR'].as_matrix()
y = bds['ATP_both'].as_matrix()
s = stats.linregress(x,y)
print(s)
batch_gam = s[0]
batch_ngam = s[1]
batch_r = s[2]
```
# Final plot
```
plt.figure(figsize=(4,4))
#sns.set_style("ticks")
#plt.style.use('seaborn-ticks')
#palette = sns.p
sns.set_palette("pastel")
palette = sns.color_palette()
palette_dark = sns.color_palette("deep")
## Scatter plot
#plt.rcParams['font.size'] = 12
marker_size = 50
params = {'legend.fontsize': 'x-large',
'figure.figsize': (6, 6), # TODO Try 4x4
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
text_fontisze = 12
cdf = ds.loc[(ds.Reactor=='Batch') & ds.Medium.str.contains('cellb')]
cb1 = plt.scatter(cdf['GR'], cdf['ATP_both'], c='b', marker='*', s=marker_size)
cdf = ds.loc[(ds.Reactor=='Chemostat') & ds.Medium.str.contains('cellb')]
cc = plt.scatter(cdf['GR'], cdf['ATP_both'], c='b', marker='o', s=marker_size)
cdf = ds.loc[(ds.Reactor=='Batch') & ds.Medium.str.contains('avcell')]
ab = plt.scatter(cdf['GR'], cdf['ATP_both'], marker='*', s=marker_size, facecolors='none', edgecolors='g')
cdf = ds.loc[(ds.Reactor=='Chemostat') & ds.Medium.str.contains('avcell')]
ac = plt.scatter(cdf['GR'], cdf['ATP_both'], marker='o', s=marker_size, facecolors='none', edgecolors='g')
# Circle around outliers
cdf = ds.loc[(ds.Reactor=='Batch')]
cdf = cdf.iloc[batch_outlier_idx]
ol = plt.scatter(cdf['GR'], cdf['ATP_both'], s=140, facecolors='none', edgecolors='r',linewidth=2)
# Regression
## Cellobiose
gamdf = trainout['gamdf']
x = gamdf['growth_rate']
y = gamdf['ATP']
def line(x_val):
return trainout['GAM']*x_val+ trainout['NGAM']
plt.plot([0, x[-1:]], [line(0), line(x[-1:])], '--',c=palette_dark[0])
equation_str = '$y={}x + {}$\n$R^2={}$'.format(round(trainout['GAM'], 2), round(trainout['NGAM'], 1),
round(trainout['rsquared'], 2))
plt.text(0.13, 35, equation_str, color=palette_dark[0], fontsize=text_fontisze)
## Cellulose
gamdf = trainout_cellulose['gamdf']
x = gamdf['growth_rate']
y = gamdf['ATP']
def line(x_val):
return trainout_cellulose['GAM']*x_val+ trainout_cellulose['NGAM']
plt.plot([0, x[-1:]], [line(0), line(x[-1:])], '--', c=palette_dark[2])
equation_str = '$y={}x + {}$\n$R^2={}$'.format(round(trainout_cellulose['GAM'], 2), round(trainout_cellulose['NGAM'], 2),
round(trainout_cellulose['rsquared'], 2))
plt.text(0.14, 19, equation_str, color=palette_dark[2], fontsize=text_fontisze)
## Batch
x = bds['GR']
y = bds['ATP_both']
def line(x_val):
return batch_gam*x_val+ batch_ngam
plt.plot([0, x[5]], [line(0), line(x[5])], '--', c=palette_dark[4])
equation_str = '$y={}x + {}$\n$R^2={}$'.format(round(batch_gam, 2), round(batch_ngam, 2),
round(batch_r, 2))
plt.text(0.22, 9, equation_str, color=palette_dark[4], fontsize=text_fontisze)
## Legend
legend = plt.legend([cb1,cc,ab,ac, ol], ['Cellobiose batch', 'Cellobiose chemostat', 'Avicell batch', 'Avicell chemostat','Outliers'], fontsize=12, frameon=1, ncol=1)
legend.get_frame().set_facecolor('white')
## Labels
ax = plt.gca()
ax.set_xlabel('Experimental growth rate (1/h)')
ax.set_ylabel('Maximum predicted ATP \n(mmol/gCDW/h)')
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
## lines
sns.despine()
## save
plt.tight_layout()
plt.savefig('atp_training.svgz')
```
# Model setup
The three different GAM/NGAM configurations will be will be stored in the file atp_parm.csv and read into the model when the method tools.conf_model.set_conditions is used.
```
# Create ATP parameter files
with open(os.path.join(settings.MEDIA_ROOT, 'atp_param.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['parameter', 'cellobiose_chemostat','cellulose_chemostat','batch'])
writer.writerow(['GAM', trainout['GAM'], trainout_cellulose['GAM'], batch_gam])
writer.writerow(['NGAM', trainout['NGAM'], trainout_cellulose['NGAM'], batch_ngam])
# Create iSG file with batch parameters as default
model = load_model()
conf_model.set_conditions(model, 'comp_minimal_cellobiose', secretion='common_secretion', reactor_type='batch')
#cb.io.save_json_model(model, os.path.join('iCBI655bigg_cellb_batch.json'))
cb.io.save_json_model(model,os.path.join('intermediate','iCBI665_v7.json'))
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Removing `if` Statements from Expressions
## Author: Patrick Nelson
### NRPy+ Source Code for this module:
* [Min_Max_and_Piecewise_Expressions.py](../edit/Min_Max_and_Piecewise_Expressions.py) Contains functions that can be used to compute the minimum or maximum of two values and to implement piecewise-defined expressions
## Introduction:
Conditional statements are a critical tool in programming, allowing us to control the flow through a program to avoid pitfalls, code piecewise-defined functions, and so forth. However, there are times when it is useful to work around them. It takes a processor time to evaluate the whether or not to execute the code block, so for some expressions, performance can be improved by rewriting the expression to use an absolute value function in a manner upon which we will expand in this tutorial. Even more relevant to NRPy+ are piecewise-defined functions. These inherently involve `if` statements, but NRPy+'s automatic code generation cannot handle these by itself, requiring hand-coding to be done. However, if it is possible to rewrite the expression in terms of absolute values, then NRPy+ can handle the entire thing itself.
The absolute value is a function that simply returns the magnitude of its argument, a positive value. That is,
\begin{align}
|x|&= \left \{ \begin{array}{lll}x & \mbox{if} & x \geq 0 \\
-x & \mbox{if} & x \leq 0 \end{array} \right. \\
\end{align}
In C, this is implemented as `fabs()`, which merely has to make the first bit of a double-precision floating point number 0, and is thus quite fast.
There are myriad uses for these tricks in practice. One example comes from GRMHD (and, by extension, the special cases of GRFFE and GRHD), in which it is necessary to limit the velocity of the plasma in order to keep the simulations stable. This is done by calculating the Lorentz factor $\Gamma$ of the plasma and comparing to some predefined maximum $\Gamma_\max$. Then, if
$$
R = 1-\frac{1}{\Gamma^2} > 1-\frac{1}{\Gamma_{\max}^2} = R_\max,
$$
we rescale the velocities by $\sqrt{R_\max/R}$. In NRPy+, we instead always rescale by
$$
\sqrt{\frac{\min(R,R_\max)}{R+\epsilon}},
$$
which has the same effect while allowing the entire process to be handled by NRPy+'s automatic code generation. ($\epsilon$ is some small number chosen to avoid division by zero without affecting the results otherwise.) See [here](Tutorial-GRHD_Equations-Cartesian.ipynb#convertvtou) for more information on this.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#min_max): Minimum and Maximum
1. [Step 1.a](#confirm): Confirm that these work for real numbers
1. [Step 2](#piecewise): Piecewise-defined functions
1. [Step 3](#sympy): Rewrite functions to work with symbolic expressions
1. [Step 4](#validation): Validation against `Min_Max_and_Piecewise_Expressions` NRPy+ module
1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='min_max'></a>
# Step 1: Minimum and Maximum \[Back to [top](#toc)\]
$$\label{min_max}$$
Our first job will be to rewrite minimum and maximum functions without if statements. For example, the typical implementation of `min(a,b)` will be something like this:
```python
def min(a,b):
if a<b:
return a
else:
return b
```
However, to take full advantage of NRPy+'s automated function generation capabilities, we want to write this without the `if` statements, replacing them with calls to `fabs()`. We will define these functions in the following way:
$$\boxed{
\min(a,b) = \tfrac{1}{2} \left( a+b - \lvert a-b \rvert \right)\\
\max(a,b) = \tfrac{1}{2} \left( a+b + \lvert a-b \rvert \right).}
$$
<a id='confirm'></a>
## Step 1.a: Confirm that these work for real numbers \[Back to [top](#toc)\]
$$\label{confirm}$$
For real numbers, these operate exactly as expected. In the case $a>b$,
\begin{align}
\min(a,b) &= \tfrac{1}{2} \left( a+b - (a-b) \right) = b \\
\max(a,b) &= \tfrac{1}{2} \left( a+b + (a-b) \right) = a, \\
\end{align}
and in the case $a<b$, the reverse holds:
\begin{align}
\min(a,b) &= \tfrac{1}{2} \left( a+b - (b-a) \right) = a \\
\max(a,b) &= \tfrac{1}{2} \left( a+b + (b-a) \right) = b, \\
\end{align}
In code, we will represent this as:
```
min_noif(a,b) = sp.Rational(1,2)*(a+b-nrpyAbs(a-b))
max_noif(a,b) = sp.Rational(1,2)*(a+b+nrpyAbs(a-b))
```
For demonstration purposes, we will use `np.absolute()` and floating point numbers.
```
import numpy as np # NumPy: Python module specializing in numerical computations
import matplotlib.pyplot as plt # matplotlib: Python module specializing in plotting capabilities
thismodule = "Min_Max_and_Piecewise_Expressions"
# First, we'll write the functions. Note that we are not using sympy right now. For NRPy+ code generation,
# use the expressions above.
def min_noif(a,b):
return 0.5 * (a+b-np.absolute(a-b))
def max_noif(a,b):
return 0.5 * (a+b+np.absolute(a-b))
# Now, let's put these through their paces.
a_number = 5.0
another_number = 10.0
print("The minimum of "+str(a_number)+" and "+str(another_number)+" is "+str(min_noif(a_number,another_number)))
```
Feel free to test other cases above if you'd like. Note that we use a suffix, `_noif`, to avoid conflicts with other functions. When using this in NRPy+, make sure you use `sp.Rational()` and the `nrpyAbs()` function, which will always be interpreted as the C function `fabs()` (Sympy's `sp.Abs()` may get interpreted as $\sqrt{zz^*}$, for instance).
<a id='piecewise'></a>
# Step 2: Piecewise-defined functions \[Back to [top](#toc)\]
$$\label{piecewise}$$
Next, we'll define functions to represent branches of a piecewise-defined function. For example, consider the function
\begin{align}
f(x) &= \left \{ \begin{array}{lll} \frac{1}{10}x^2+1 & \mbox{if} & x \leq 0 \\
\exp(\frac{x}{5}) & \mbox{if} & x > 0 \end{array} \right. , \\
\end{align}
which is continuous, but not differentiable at $x=0$.
To solve this problem, let's add the two parts together, multiplying each part by a function that is either one or zero depending on $x$. To define $x \leq 0$, this can be done by multiplying by the minimum of $x$ and $0$. We also will need to normalize this. To avoid putting a zero in the denominator, however, we will add some small $\epsilon$ to the denominator, i.e.,
$$
\frac{\min(x,0)}{x-\epsilon}
$$
This $\epsilon$ corresponds `TINYDOUBLE` in NRPy+; so, we will define the variable here with its default value, `1e-100`. Additionally, to get the correct behavior on the boundary, we shift the boundary by $\epsilon$, giving us
$$
\frac{\min(x-\epsilon,0)}{x-\epsilon}
$$
The corresponding expression for $x > 0$ can be written as
$$
\frac{\max(x,0)}{x+\epsilon},
$$
using a positive small number to once again avoid division by zero.
When using these for numerical relativity codes, it is important to consider the relationship between $\epsilon$, or `TINYDOUBLE`, and the gridpoints in the simulation. As long as $\epsilon$ is positive and large enough to avoid catastrophic cancellation, these functional forms avoid division by zero, as proven [below](#proof).
So, we'll code NumPy versions of these expressions below. Naturally, there are many circumstances in which one will want the boundary between two pieces of a function to be something other than 0; if we let that boundary be $x^*$, this can easily be done by passing $x-x^*$ to the maximum/minimum functions. For the sake of code readability, we will write the functions to pass $x$ and $x^*$ as separate arguments. Additionally, we code separate functions for $\leq$ and $<$, and likewise for $\geq$ and $>$. The "or equal to" versions add a small offset to the boundary to give the proper behavior on the desired boundary.
```
TINYDOUBLE = 1.0e-100
def coord_leq_bound(x,xstar):
# Returns 1.0 if x <= xstar, 0.0 otherwise.
# Requires appropriately defined TINYDOUBLE
return min_noif(x-xstar-TINYDOUBLE,0.0)/(x-xstar-TINYDOUBLE)
def coord_geq_bound(x,xstar):
# Returns 1.0 if x >= xstar, 0.0 otherwise.
# Requires appropriately defined TINYDOUBLE
return max_noif(x-xstar+TINYDOUBLE,0.0)/(x-xstar+TINYDOUBLE)
def coord_less_bound(x,xstar):
# Returns 1.0 if x < xstar, 0.0 otherwise.
# Requires appropriately defined TINYDOUBLE
return min_noif(x-xstar,0.0)/(x-xstar-TINYDOUBLE)
def coord_greater_bound(x,xstar):
# Returns 1.0 if x > xstar, 0.0 otherwise.
# Requires appropriately defined TINYDOUBLE
return max_noif(x-xstar,0.0)/(x-xstar+TINYDOUBLE)
# Now, define our the equation and plot it.
x_data = np.arange(start = -10.0, stop = 11.0, step = 1.0)
y_data = coord_less_bound(x_data,0.0)*(0.1*x_data**2.0+1.0)\
+coord_geq_bound(x_data,0.0)*np.exp(x_data/5.0)
plt.figure()
a = plt.plot(x_data,y_data,'k',label="Piecewise function")
b = plt.plot(x_data,0.1*x_data**2.0+1.0,'b.',label="y=0.1*x^2+1")
c = plt.plot(x_data,np.exp(x_data/5.0),'g.',label="y=exp(x/5)")
plt.legend()
plt.xlabel("x")
plt.ylabel("y")
plt.show()
```
The plot above shows the expected piecewise-defined function. It is important in applying these functions that each greater-than be paired with a less-than-or-equal-to, or vice versa. Otherwise, the way these are written, a point on the boundary will be set to zero or twice the expected value.
These functions can be easily combined for more complicated piecewise-defined functions; if a piece of a function is defined as $f(x)$ on $x^*_- \leq x < x^*_+$, for instance, simply multiply by both functions, e.g.
```
coord_geq_bound(x,x_star_minus)*coord_less_bound(x,x_star_plus)*f(x)
```
<a id='sympy'></a>
# Step 3: Rewrite functions to work with symbolic expressions \[Back to [top](#toc)\]
$$\label{sympy}$$
In order to use this with sympy expressions in NRPy+, we will need to rewrite the `min` and `max` functions with slightly different syntax. Critically, we will change `0.5` to `sp.Rational(1,2)` and calls to `np.absolute()` to `nrpyAbs()`. We will also need to import `outputC.py` here for access to `nrpyAbs()`. The other functions will not require redefinition, because they only call specific combinations of the `min` and `max` function.
In practice, we want to use `nrpyAbs()` and *not* `sp.Abs()` with our symbolic expressions, which will force `outputC` to use the C function `fabs()`, and not try to multiply the argument by its complex conjugate and then take the square root.
```
from outputC import nrpyAbs # NRPy+: Core C code output module
def min_noif(a,b):
# Returns the minimum of a and b
if a==sp.sympify(0):
return sp.Rational(1,2) * (b-nrpyAbs(b))
if b==sp.sympify(0):
return sp.Rational(1,2) * (a-nrpyAbs(a))
return sp.Rational(1,2) * (a+b-nrpyAbs(a-b))
def max_noif(a,b):
# Returns the maximum of a and b
if a==sp.sympify(0):
return sp.Rational(1,2) * (b+nrpyAbs(b))
if b==sp.sympify(0):
return sp.Rational(1,2) * (a+nrpyAbs(a))
return sp.Rational(1,2) * (a+b+nrpyAbs(a-b))
```
<a id='validation'></a>
# Step 4: Validation against `Min_Max_and_Piecewise_Expressions` NRPy+ module \[Back to [top](#toc)\]
$$\label{validation}$$
As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between
1. this tutorial and
2. the NRPy+ [Min_Max_and_Piecewise_Expressions](../edit/Min_Max_and_Piecewise_Expressions.py) module.
```
# Reset & redefine TINYDOUBLE for proper comparison
%reset_selective -f TINYDOUBLE
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: parameter interface
TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100)
import Min_Max_and_Piecewise_Expressions as noif
all_passed=0
def comp_func(expr1,expr2,basename,prefixname2="noif."):
passed = 0
if str(expr1-expr2)!="0":
print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2))
passed = 1
return passed
a,b = sp.symbols("a b")
here = min_noif(a,b)
there = noif.min_noif(a,b)
all_passed += comp_func(here,there,"min_noif")
here = max_noif(a,b)
there = noif.max_noif(a,b)
all_passed += comp_func(here,there,"max_noif")
here = coord_leq_bound(a,b)
there = noif.coord_leq_bound(a,b)
all_passed += comp_func(here,there,"coord_leq_bound")
here = coord_geq_bound(a,b)
there = noif.coord_geq_bound(a,b)
all_passed += comp_func(here,there,"coord_geq_bound")
here = coord_less_bound(a,b)
there = noif.coord_less_bound(a,b)
all_passed += comp_func(here,there,"coord_less_bound")
here = coord_greater_bound(a,b)
there = noif.coord_greater_bound(a,b)
all_passed += comp_func(here,there,"coord_greater_bound")
import sys
if all_passed==0:
print("ALL TESTS PASSED!")
else:
print("ERROR: AT LEAST ONE TEST DID NOT PASS")
sys.exit(1)
```
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-Min_Max_and_Piecewise_Expressions.pdf](Tutorial-Min_Max_and_Piecewise_Expressions.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Min_Max_and_Piecewise_Expressions")
```
| github_jupyter |
```
import numpy as np # useful for many scientific computing in Python
import pandas as pd # primary data structure library
import folium
print('Folium installed and imported!')
# define the world map
world_map = folium.Map()
# display world map
world_map
# define the world map centered around Canada with a low zoom level
world_map = folium.Map(location=[56.130, -106.35], zoom_start=4)
# display world map
world_map
# define the world map centered around Canada with a higher zoom level
world_map = folium.Map(location=[56.130, -106.35], zoom_start=8)
# display world map
world_map
# create a Stamen Toner map of the world centered around Canada
world_map = folium.Map(location=[56.130, -106.35], zoom_start=4, tiles='Stamen Toner')
# display map
world_map
# create a Stamen Toner map of the world centered around Canada
world_map = folium.Map(location=[56.130, -106.35], zoom_start=4, tiles='Stamen Terrain')
# display map
world_map
# create a world map with a Mapbox Bright style.
world_map = folium.Map(tiles='Mapbox Bright')
# display the map
world_map
df_incidents = pd.read_csv('https://ibm.box.com/shared/static/nmcltjmocdi8sd5tk93uembzdec8zyaq.csv')
print('Dataset downloaded and read into a pandas dataframe!')
df_incidents.head()
df_incidents.shape
# get the first 100 crimes in the df_incidents dataframe
limit = 100
df_incidents = df_incidents.iloc[0:limit, :]
df_incidents.shape
# San Francisco latitude and longitude values
latitude = 37.77
longitude = -122.42
# create map and display it
sanfran_map = folium.Map(location=[latitude, longitude], zoom_start=12)
# display the map of San Francisco
sanfran_map
# instantiate a feature group for the incidents in the dataframe
incidents = folium.map.FeatureGroup()
# loop through the 100 crimes and add each to the incidents feature group
for lat, lng, in zip(df_incidents.Y, df_incidents.X):
incidents.add_child(
folium.features.CircleMarker(
[lat, lng],
radius=5, # define how big you want the circle markers to be
color='yellow',
fill=True,
fill_color='blue',
fill_opacity=0.6
)
)
# add incidents to map
sanfran_map.add_child(incidents)
# instantiate a feature group for the incidents in the dataframe
incidents = folium.map.FeatureGroup()
# loop through the 100 crimes and add each to the incidents feature group
for lat, lng, in zip(df_incidents.Y, df_incidents.X):
incidents.add_child(
folium.features.CircleMarker(
[lat, lng],
radius=5, # define how big you want the circle markers to be
color='yellow',
fill=True,
fill_color='blue',
fill_opacity=0.6
)
)
# add incidents to map
sanfran_map.add_child(incidents)
# create map and display it
sanfran_map = folium.Map(location=[latitude, longitude], zoom_start=12)
# loop through the 100 crimes and add each to the map
for lat, lng, label in zip(df_incidents.Y, df_incidents.X, df_incidents.Category):
folium.features.CircleMarker(
[lat, lng],
radius=5, # define how big you want the circle markers to be
color='yellow',
fill=True,
popup=label,
fill_color='blue',
fill_opacity=0.6
).add_to(sanfran_map)
# show map
sanfran_map
from folium import plugins
# let's start again with a clean copy of the map of San Francisco
sanfran_map = folium.Map(location = [latitude, longitude], zoom_start = 12)
# instantiate a mark cluster object for the incidents in the dataframe
incidents = plugins.MarkerCluster().add_to(sanfran_map)
# loop through the dataframe and add each data point to the mark cluster
for lat, lng, label, in zip(df_incidents.Y, df_incidents.X, df_incidents.Category):
folium.Marker(
location=[lat, lng],
icon=None,
popup=label,
).add_to(incidents)
# display map
sanfran_map
df_can = pd.read_excel('https://ibm.box.com/shared/static/lw190pt9zpy5bd1ptyg2aw15awomz9pu.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skip_footer=2)
print('Data downloaded and read into a dataframe!')
df_can.head()
# print the dimensions of the dataframe
print(df_can.shape)
# clean up the dataset to remove unnecessary columns (eg. REG)
df_can.drop(['AREA','REG','DEV','Type','Coverage'], axis=1, inplace=True)
# let's rename the columns so that they make sense
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True)
# for sake of consistency, let's also make all column labels of type string
df_can.columns = list(map(str, df_can.columns))
# add total column
df_can['Total'] = df_can.sum(axis=1)
# years that we will be using in this lesson - useful for plotting later on
years = list(map(str, range(1980, 2014)))
print ('data dimensions:', df_can.shape)
df_can.head()
# download countries geojson file
!wget --quiet https://ibm.box.com/shared/static/cto2qv7nx6yq19logfcissyy4euo8lho.json -O world_countries.json
print('GeoJSON file downloaded!')
world_geo = r'world_countries.json' # geojson file
# create a plain world map
world_map = folium.Map(location=[0, 0], zoom_start=2, tiles='Mapbox Bright')
# generate choropleth map using the total immigration of each country to Canada from 1980 to 2013
world_map.choropleth(
geo_data=world_geo,
data=df_can,
columns=['Country', 'Total'],
key_on='feature.properties.name',
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='Immigration to Canada'
)
# display map
world_map
world_geo = r'world_countries.json'
# create a numpy array of length 6 and has linear spacing from the minium total immigration to the maximum total immigration
threshold_scale = np.linspace(df_can['Total'].min(),
df_can['Total'].max(),
6, dtype=int)
threshold_scale = threshold_scale.tolist() # change the numpy array to a list
threshold_scale[-1] = threshold_scale[-1] + 1 # make sure that the last value of the list is greater than the maximum immigration
# let Folium determine the scale.
world_map = folium.Map(location=[0, 0], zoom_start=2, tiles='Mapbox Bright')
world_map.choropleth(
geo_data=world_geo,
data=df_can,
columns=['Country', 'Total'],
key_on='feature.properties.name',
threshold_scale=threshold_scale,
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='Immigration to Canada',
reset=True
)
world_map
```
| github_jupyter |
## Dependencies
```
import json, warnings, shutil
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses, layers
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
```
# Load data
```
database_base_path = '/kaggle/input/tweet-dataset-split-roberta-base-96/'
k_fold = pd.read_csv(database_base_path + '5-fold.csv')
display(k_fold.head())
# Unzip files
!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_1.tar.gz
!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_2.tar.gz
!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_3.tar.gz
!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_4.tar.gz
!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_5.tar.gz
```
# Model parameters
```
vocab_path = database_base_path + 'vocab.json'
merges_path = database_base_path + 'merges.txt'
base_path = '/kaggle/input/qa-transformers/roberta/'
config = {
'MAX_LEN': 96,
'BATCH_SIZE': 32,
'EPOCHS': 5,
'LEARNING_RATE': 3e-5,
'ES_PATIENCE': 1,
'question_size': 4,
'N_FOLDS': 5,
'smooth_factor': .5,
'base_model_path': base_path + 'roberta-base-tf_model.h5',
'config_path': base_path + 'roberta-base-config.json'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
```
# Tokenizer
```
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path,
lowercase=True, add_prefix_space=True)
tokenizer.save('./')
```
## Learning rate schedule
```
LR_MIN = 1e-6
LR_MAX = config['LEARNING_RATE']
LR_EXP_DECAY = .5
@tf.function
def lrfn(epoch):
lr = LR_MAX * LR_EXP_DECAY**epoch
if lr < LR_MIN:
lr = LR_MIN
return lr
rng = [i for i in range(config['EPOCHS'])]
y = [lrfn(x) for x in rng]
fig, ax = plt.subplots(figsize=(20, 6))
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
```
# Model
```
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
x = layers.Dropout(.1)(last_hidden_state)
x_start = layers.Dense(1)(x)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('softmax', name='y_start')(x_start)
x_end = layers.Dense(1)(x)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('softmax', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
return model
```
# Train
```
import scipy
dist = [pow(config['smooth_factor'], i) for i in range(1, config['MAX_LEN'])]
def smooth_labels_dist_start(y, dist=dist):
center = y.argmax()
y[center+1:] = dist[1:len(y[center:])]
y[:center] = dist[::-1][-len(y[:center]):]
# Re-scale
y = y / np.sum(y)
return y
def smooth_labels_dist_end(y, dist=dist):
center = y.argmax()
y[center+1:] = dist[:len(y[center+1:])]
y[:center] = dist[:0:-1][-len(y[:center]):]
# Re-scale
y = y / np.sum(y)
return y
AUTO = tf.data.experimental.AUTOTUNE
strategy = tf.distribute.get_strategy()
history_list = []
for n_fold in range(config['N_FOLDS']):
n_fold +=1
print('\nFOLD: %d' % (n_fold))
# Load data
base_data_path = 'fold_%d/' % (n_fold)
x_train = np.load(base_data_path + 'x_train.npy')
y_train = np.load(base_data_path + 'y_train.npy')
x_valid = np.load(base_data_path + 'x_valid.npy')
y_valid = np.load(base_data_path + 'y_valid.npy')
# Apply custom smoothing
np.apply_along_axis(smooth_labels_dist_start, -1, y_train[0])
np.apply_along_axis(smooth_labels_dist_end, -1, y_train[1])
step_size = x_train.shape[1] // config['BATCH_SIZE']
valid_step_size = x_valid.shape[1] // config['BATCH_SIZE']
### Delete data dir
shutil.rmtree(base_data_path)
# Build TF datasets
train_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO, seed=SEED))
valid_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid, y_valid, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED))
train_data_iter = iter(train_dist_ds)
valid_data_iter = iter(valid_dist_ds)
# Step functions
@tf.function
def train_step(data_iter):
def train_step_fn(x, y):
with tf.GradientTape() as tape:
probabilities = model(x, training=True)
loss_start = loss_fn_start(y['y_start'], probabilities[0])
loss_end = loss_fn_end(y['y_end'], probabilities[1])
loss = tf.math.add(loss_start, loss_end)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# update metrics
train_acc_start.update_state(y['y_start'], probabilities)
train_acc_end.update_state(y['y_end'], probabilities)
train_loss.update_state(loss)
train_loss_start.update_state(loss_start)
train_loss_end.update_state(loss_end)
for _ in tf.range(step_size):
strategy.experimental_run_v2(train_step_fn, next(data_iter))
@tf.function
def valid_step(data_iter):
def valid_step_fn(x, y):
probabilities = model(x, training=False)
loss_start = loss_fn_start(y['y_start'], probabilities[0])
loss_end = loss_fn_end(y['y_end'], probabilities[1])
loss = tf.math.add(loss_start, loss_end)
# update metrics
valid_acc_start.update_state(y['y_start'], probabilities)
valid_acc_end.update_state(y['y_end'], probabilities)
valid_loss.update_state(loss)
valid_loss_start.update_state(loss_start)
valid_loss_end.update_state(loss_end)
for _ in tf.range(valid_step_size):
strategy.experimental_run_v2(valid_step_fn, next(data_iter))
# Train model
model_path = 'model_fold_%d.h5' % (n_fold)
model = model_fn(config['MAX_LEN'])
optimizer = optimizers.Adam(learning_rate=lambda: lrfn(tf.cast(optimizer.iterations, tf.float32)//step_size))
loss_fn_start = losses.categorical_crossentropy
loss_fn_end = losses.categorical_crossentropy
train_acc_start = metrics.CategoricalAccuracy()
valid_acc_start = metrics.CategoricalAccuracy()
train_acc_end = metrics.CategoricalAccuracy()
valid_acc_end = metrics.CategoricalAccuracy()
train_loss = metrics.Sum()
valid_loss = metrics.Sum()
train_loss_start = metrics.Sum()
valid_loss_start = metrics.Sum()
train_loss_end = metrics.Sum()
valid_loss_end = metrics.Sum()
metrics_dict = {'loss': train_loss, 'loss_start': train_loss_start, 'loss_end': train_loss_end,
'acc_start': train_acc_start, 'acc_end': train_acc_end,
'val_loss': valid_loss, 'val_loss_start': valid_loss_start, 'val_loss_end': valid_loss_end,
'val_acc_start': valid_acc_start, 'val_acc_end': valid_acc_end}
history = custom_fit(model, metrics_dict, train_step, valid_step, train_data_iter, valid_data_iter,
step_size, valid_step_size, config['BATCH_SIZE'], config['EPOCHS'], config['ES_PATIENCE'], model_path)
history_list.append(history)
model.load_weights(model_path)
# Make predictions
train_preds = model.predict(list(x_train))
valid_preds = model.predict(list(x_valid))
k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'train', 'start_fold_%d' % (n_fold)] = train_preds[0].argmax(axis=-1)
k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'train', 'end_fold_%d' % (n_fold)] = train_preds[1].argmax(axis=-1)
k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'validation', 'start_fold_%d' % (n_fold)] = valid_preds[0].argmax(axis=-1)
k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'validation', 'end_fold_%d' % (n_fold)] = valid_preds[1].argmax(axis=-1)
k_fold['end_fold_%d' % (n_fold)] = k_fold['end_fold_%d' % (n_fold)].astype(int)
k_fold['start_fold_%d' % (n_fold)] = k_fold['start_fold_%d' % (n_fold)].astype(int)
k_fold['end_fold_%d' % (n_fold)].clip(0, k_fold['text_len'], inplace=True)
k_fold['start_fold_%d' % (n_fold)].clip(0, k_fold['end_fold_%d' % (n_fold)], inplace=True)
k_fold['prediction_fold_%d' % (n_fold)] = k_fold.apply(lambda x: decode(x['start_fold_%d' % (n_fold)], x['end_fold_%d' % (n_fold)], x['text'], config['question_size'], tokenizer), axis=1)
k_fold['prediction_fold_%d' % (n_fold)].fillna(k_fold["text"], inplace=True)
k_fold['jaccard_fold_%d' % (n_fold)] = k_fold.apply(lambda x: jaccard(x['selected_text'], x['prediction_fold_%d' % (n_fold)]), axis=1)
```
# Model loss graph
```
sns.set(style="whitegrid")
for n_fold in range(config['N_FOLDS']):
print('Fold: %d' % (n_fold+1))
plot_metrics(history_list[n_fold])
```
# Model evaluation
```
display(evaluate_model_kfold(k_fold, config['N_FOLDS']).style.applymap(color_map))
```
# Visualize predictions
```
display(k_fold[[c for c in k_fold.columns if not (c.startswith('textID') or
c.startswith('text_len') or
c.startswith('selected_text_len') or
c.startswith('text_wordCnt') or
c.startswith('selected_text_wordCnt') or
c.startswith('fold_') or
c.startswith('start_fold_') or
c.startswith('end_fold_'))]].head(15))
```
| github_jupyter |
# Automating the MUF
This notebook is an example of using the MUF as a scriptable service from python. Requires modifications to the MUF source code to replace the variable Application.StartupPath, as it will be registered as the path to python.exe instead of the MUF application. Written by Aric Sanders 10/2017.
This example requires the following dependencies:
1. The MUF
3. pythonnet
2. pyMez only for the bottom example.
## Automating a VNAUncert process
### Step 1. Importing packages and .net assemblies
```
# to use the python-.net bridge
import clr
# import sys for path
import sys
import os
# add the file paths to the .net assemblies (either dll or .exe)
# this is the location of my altered MUF version with the Application.StartupPath replaced
# with a hard coded location
sys.path.append(r"C:\Share\MUF-develop\VNAUncertainty\bin\Debug")
sys.path.append(r"C:\Share\MUF-develop\PostProcessor\bin\Debug")
#For some reason it cannot find PNAGrabber, is there unmanaged c code?
#sys.path.append(r"C:\Share\MUF-develop\PNAGrabber\bin\Debug")
#print(sys.path)
# create a reference to the applications that you want to use
clr.AddReference("Measurement")
clr.AddReference("PostProcessor")
clr.AddReference("VNAUncertainty")
#clr.AddReference("PNAGrabber")
# Now they can be imported as python libraries
import Measurement
import VNAUncertainty
import PostProcessor
#import PNAGrabber
```
### Step 2. Creating a class instance and initializing it
```
# now we can call the classes as python classes
# but we must trigger the OnLoad event to create the proper variables
# We must create a .net event first
from System import EventArgs
event=EventArgs()
# now if we want to run VNAUncertainty we create an instance and intialize it
vna=VNAUncertainty.VNAUncertainty()
vna.OnLoad(event)
```
### Step 3. Opening a menu
```
# the basic command that loads a menu is .myOpenMenu
# We choose a menu that exits
vna.myOpenMenu(r"C:\Share\MUF_FILES\SOL_new.vnauncert_archive")
```
### Step 4. Running the calibration
```
# We then call RunCalibration in order to run VNAUncertainty
# This takes ~3seconds and will create a set of MUF files in a default location
vna.RunCalibration(0)
```
### Step 5. Closing the class instance
```
# we close the application after we are done
vna.Close()
```
# Repeating for an automated Measurement.exe
```
# If we wanted to do the same thing with Measurement.exe we would
# initialize the class and we can a plot remotely
measurement=Measurement.Measurement()
measurement.OnLoad(event)
# we open an existing .meas file
measurement.myOpenMenu(r"C:\8510calfiles\35CalComp\MUF_results\DUTs\M105P1.meas")
#measurement.Run()
# we can use the method .SavePlotToDisk to export data.
# The integers are parameter,plot_type, curve_type
# parameter S11=0,..
# plot_type Linear Magnitude = 1
# curve_type Sensitvity Analysis Result = 0
# This function will not overwrite an existing file with the same name
measurement.SavePlotToDisk(os.path.join(os.getcwd(),"new_plot4.png"),0,1,1)
measurement.Close()
```
# Repeating for an automated PostProcessor.exe
```
# we can also run a post processor in the same way
post=PostProcessor.PostProcessor()
post.OnLoad(event)
post.myOpenMenu(r"C:\Share\MUF_FILES\Step 9 - Correct DUTs to 50 ohm\AtRefHalfThru_HF.post")
post.RunCalculation()
post.Close()
pna=PNAGr
```
## Implementing a full solution using xml manipulation
From here down we use a package I have developed to make things easier currently called pyMez.
### Step 1. Importing my modules
```
# to load the pyMez API as configured
from pyMez import *
# this module is still experimental so it is not in the main import
from pyMez.Code.DataHandlers.MUFModels import *
```
### Step 2. Using my XML menu handler to change values
```
# now we can load the xml based vnauncert menu and manipulate it
vna_menu=MUFVNAUncert(r"C:\Share\MUF_FILES\SOL_new.vnauncert_archive")
# there are several helper functions for doing the most common things
vna_menu.get_results_directory()
# to check the action of a set/get
vna_menu.set_results_directory("C:\\Data")
print(vna_menu.get_results_directory())
# we can save the new menu in a different location using .save(new_path)
vna_menu.save("new_menu.vnauncert_archive")
```
### Step 3. Use my script to run the vnauncert menu
```
# if we wanted to run it we can use a script version of the above automation
run_muf_script("new_menu.vnauncert_archive")
```
### Step 4. Change something and run again
```
# we can change the number of montecarlos, the results directory and run again
vna_menu.set_number_montecarlo(1000)
vna_menu.set_results_directory("C:\\Data\\UncertDemo")
vna_menu.save("new_menu.vnauncert_archive")
run_muf_script("new_menu.vnauncert_archive")
```
## Example of testing the effect of the number of Monte Carlo Trials in a Script
```
# If we wanted to automate a series to investigate the effect of different number of trials,
# we can write a script that sets the number of trails, runs then repeats
# by putting this into a function it allows others to call it later
def monte_carlo_trials_test_script(menu_or_archive,top_level_results_folder,number_trials_list=[10,15,20,30,100,1000]):
"""Takes a vnauncert menu and steps through different numbers of montecarlo trials saving the result"""
vna_menu=MUFVNAUncert(menu_or_archive)
# create and initialize a VNAUncert instance
from System import EventArgs
event=EventArgs()
vna=VNAUncertainty.VNAUncertainty()
vna.OnLoad(event)
[directory_name,menu_name]=os.path.split(menu_or_archive)
prefix_name=menu_name.split(".")[0]
extension=menu_name.split(".")[-1]
# we generate sub folders to store the results
folders=[]
total_start=datetime.datetime.utcnow()
for number in number_trials_list:
try:
print("Begining N = {0}".format(number))
start=datetime.datetime.utcnow()
folder_name=os.path.join(top_level_results_folder,"N_{}".format(number))
os.mkdir(folder_name)
vna_menu.set_number_montecarlo(number)
vna_menu.set_results_directory(folder_name)
# rename and save the new menu
new_menu_name=os.path.join(folder_name,prefix_name+"_N_{0}".format(number)+"."+extension)
vna_menu.save(new_menu_name)
vna.myOpenMenu(new_menu_name)
vna.RunCalibration(0)
stop=datetime.datetime.utcnow()
runtime=stop-start
print("Finished running N = {0} it took {1} seconds".format(number,runtime.seconds))
print(" The new menu is {0} and results are in {1}".format(new_menu_name,folder_name))
print("*"*80)
except:
print("Trial N = {0} failed moving to next trial".format(number))
pass
total_stop=datetime.datetime.utcnow()
total_runtime=total_stop-total_start
print("Montecarlo trial Script is finished, total runtime is {0} seconds".format(total_runtime.seconds))
monte_carlo_trials_test_script(r"C:\Share\8510calfiles\35CalComp\35CalComp_setup.vnauncert_archive",
r"C:\Share\Montecarlo_Trial_20171004_2")
```
### Functional Plot.
This sometimes falls a little short so here is where an inline script is really useful.
```
def plot_montecarlo_trial_S11(top_level_results_folder,dut_name,number_trials_list=[10,15,20,30,100,1000]):
"generates a comparison plot for the S11 magnitude of a montecarlo trial"
folder_name=os.path.join(top_level_results_folder,"N_{}".format(number_trials_list[0]))
nominal_file_path=os.path.join(folder_name,"DUTs","{0}_Support".format(dut_name),"{0}_0.s2p".format(dut_name))
covariance_directory=os.path.join(folder_name,"DUTs","{0}_Support".format(dut_name),"Covariance")
sensitvity_reference=create_sensitivity_reference_curve(nominal_file_path=nominal_file_path,
sensitivity_directory=covariance_directory,
format="MA")
montecarlo_list=[]
for index,number in enumerate(number_trials_list):
folder_name=os.path.join(top_level_results_folder,"N_{}".format(number))
montecarlo_folder_name=os.path.join(folder_name,"DUTs","{0}_Support".format(dut_name),"MonteCarlo")
montecarlo_list.append(create_monte_carlo_reference_curve(montecarlo_folder_name,format="MA"))
plt.close()
plt.rcParams.update({'font.size': 22,'figure.figsize':(12,6)})
for index,montecarlo in enumerate(montecarlo_list):
montecarlo_magS11=np.array(montecarlo["magS11"])
sensitivity_magS11=np.array(sensitvity_reference["magS11"])
uncert_sensitvity=np.array(sensitvity_reference["umagS11"])
uncert_montecarlo=np.array(montecarlo["umagS11"])
f=np.array(sensitvity_reference["Frequency"])
plt.plot(f,sensitivity_magS11-montecarlo_magS11,'-',label="N = {0}".format(number_trials_list[index]),alpha=.3)
plt.fill_between(f,-1*uncert_sensitvity,uncert_sensitvity,
color="blue",
alpha=.25,
edgecolor="black",label="Sensitivity Uncertainty")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#plt.ylim([-.004,.004])
plt.xlabel("Frequency(GHz)")
plt.title("{0} Nominal-Montecarlo |S11| ".format(dut_name))
plt.savefig(os.path.join(top_level_results_folder,
"MonteCarlo_Trial_Size_Test.png"),bbox_inches='tight')
plt.show()
plot_montecarlo_trial_S11( r"C:\Share\Montecarlo_Trial_20171004","M105P1")
```
### Inline script.
Notice in the above plot we can't really see the information of interest, we need to rescale the y-axis. There are a lot of options. We could use the `%matplotlib` magic to change the mode to notebook or wx. But I just copied and pasted the function above and made the cell into a script.
```
top_level_results_folder=r"C:\Share\Montecarlo_Trial_20171004_2"
dut_name="M105P1"
number_trials_list=[10,15,20,30,100,1000]
folder_name=os.path.join(top_level_results_folder,"N_{}".format(number_trials_list[0]))
nominal_file_path=os.path.join(folder_name,"DUTs","{0}_Support".format(dut_name),"{0}_0.s2p".format(dut_name))
covariance_directory=os.path.join(folder_name,"DUTs","{0}_Support".format(dut_name),"Covariance")
sensitvity_reference=create_sensitivity_reference_curve(nominal_file_path=nominal_file_path,
sensitivity_directory=covariance_directory,
format="MA")
montecarlo_list=[]
for index,number in enumerate(number_trials_list):
folder_name=os.path.join(top_level_results_folder,"N_{}".format(number))
montecarlo_folder_name=os.path.join(folder_name,"DUTs","{0}_Support".format(dut_name),"MonteCarlo")
montecarlo_list.append(create_monte_carlo_reference_curve(montecarlo_folder_name,format="MA"))
plt.close()
plt.rcParams.update({'font.size': 22,'figure.figsize':(12,6)})
for index,montecarlo in enumerate(montecarlo_list):
montecarlo_magS11=np.array(montecarlo["magS11"])
sensitivity_magS11=np.array(sensitvity_reference["magS11"])
uncert_sensitvity=np.array(sensitvity_reference["umagS11"])
uncert_montecarlo=np.array(montecarlo["umagS11"])
f=np.array(sensitvity_reference["Frequency"])
plt.plot(f,sensitivity_magS11-montecarlo_magS11,'--',label="N = {0}".format(number_trials_list[index]),alpha=1)
plt.fill_between(f,-1*uncert_sensitvity,uncert_sensitvity,
color="red",
alpha=.25,
edgecolor="black",label="Sensitivity Uncertainty")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim([-.004,.004])
plt.xlabel("Frequency(GHz)")
plt.title("{0} Nominal-Montecarlo |S11| ".format(dut_name))
plt.savefig(os.path.join(top_level_results_folder,
"MonteCarlo_Trial_Size_Test2.png"),bbox_inches='tight')
plt.show()
plt.close()
for index,montecarlo in enumerate(montecarlo_list):
montecarlo_magS11=np.array(montecarlo["magS11"])
sensitivity_magS11=np.array(sensitvity_reference["magS11"])
uncert_sensitvity=np.array(sensitvity_reference["umagS11"])
uncert_montecarlo=np.array(montecarlo["umagS11"])
f=np.array(sensitvity_reference["Frequency"])
plt.plot(f,uncert_montecarlo,'--',label="N = {0}".format(number_trials_list[index]),alpha=1)
plt.fill_between(f,-1*uncert_sensitvity,uncert_sensitvity,
color="red",
alpha=.25,
edgecolor="black",label="Sensitivity Uncertainty")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim([-.004,.004])
plt.xlabel("Frequency(GHz)")
plt.title("{0} Uncertainty Montecarlo |S11| ".format(dut_name))
plt.savefig(os.path.join(top_level_results_folder,
"MonteCarlo_Trial_Size_Test2.png"),bbox_inches='tight')
plt.show()
```
## Analysis
The plot above shows that it is rare for the difference of the statistically biased mean and nominal value to exceed the uncertainty from the sensitivity analysis. Here the difference between 100 trials and 1000 is very little (except a factor of ten in time). In addition, we see a strange stability issue in the statistically biased mean, that exists for all number of Monte Carlo trials. Early tests indicate this is a stability issue in TRL for the distribution widths chosen for certain parameters.
| github_jupyter |
```
import numpy as np
dir_name = './weights_npy/'
layer_names = ['block1_conv1',
'block1_conv2',
'block2_conv1',
'block2_conv2',
'block3_conv1',
'block3_conv2',
'block3_conv3',
'block4_conv1',
'block4_conv2',
'block4_conv3',
'block5_conv1',
'block5_conv2',
'block5_conv3',
'fc1',
'fc2',
'predictions']
for i in range(len(layer_names)):
kernel = np.load(dir_name + layer_names[i] + 'kernel.npy')
bias = np.load(dir_name + layer_names[i] + 'bias.npy')
print kernel.shape
print bias.shape
import cv2
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
im = cv2.resize(cv2.imread('persian_cat.jpg'), (224, 224)).astype(np.float32)
# im[:,:,0] -= 103.939
# im[:,:,1] -= 116.779
# im[:,:,2] -= 123.68
# im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
print im.shape
print im
from layer_funcs import affine_forward, max_pool_forward, conv_forward, relu_forward
x = im
print x
stride = 1
for i in range(0, 2):
kernel = np.load(dir_name + layer_names[i] + 'kernel.npy')
bias = np.load(dir_name + layer_names[i] + 'bias.npy')
pad_width = 1
x = conv_forward(x, kernel, bias, pad_width, stride)
x, _ = relu_forward(x)
print("layer{}".format(i))
print(x.shape)
print x
x = max_pool_forward(x, pool_size=2, stride=2)
print("maxpool")
print x.shape
print x
for i in range(2, 4):
kernel = np.load(dir_name + layer_names[i] + 'kernel.npy')
bias = np.load(dir_name + layer_names[i] + 'bias.npy')
pad_width = 1
x = conv_forward(x, kernel, bias, pad_width, stride)
x, _ = relu_forward(x)
print("layer{}".format(i))
print(x.shape)
print x
x = max_pool_forward(x, pool_size=2, stride=2)
print("maxpool")
print x.shape
print x
for i in range(4, 7):
kernel = np.load(dir_name + layer_names[i] + 'kernel.npy')
bias = np.load(dir_name + layer_names[i] + 'bias.npy')
pad_width = 1
x = conv_forward(x, kernel, bias, pad_width, stride)
x, _ = relu_forward(x)
print("layer{}".format(i))
print(x.shape)
print x
x = max_pool_forward(x, pool_size=2, stride=2)
print("maxpool")
print x.shape
print x
for i in range(7, 10):
kernel = np.load(dir_name + layer_names[i] + 'kernel.npy')
bias = np.load(dir_name + layer_names[i] + 'bias.npy')
pad_width = 1
x = conv_forward(x, kernel, bias, pad_width, stride)
x, _ = relu_forward(x)
print("layer{}".format(i))
print(x.shape)
print x
x = max_pool_forward(x, pool_size=2, stride=2)
print("maxpool")
print x.shape
print x
for i in range(10, 13):
kernel = np.load(dir_name + layer_names[i] + 'kernel.npy')
bias = np.load(dir_name + layer_names[i] + 'bias.npy')
pad_width = 1
x = conv_forward(x, kernel, bias, pad_width, stride)
x, _ = relu_forward(x)
print("layer{}".format(i))
print(x.shape)
print x
x = max_pool_forward(x, pool_size=2, stride=2)
print("maxpool")
print x.shape
print x
for i in range(13, 16):
kernel = np.load(dir_name + layer_names[i] + 'kernel.npy')
bias = np.load(dir_name + layer_names[i] + 'bias.npy')
x, _ = affine_forward(x, kernel, bias)
x, _ = relu_forward(x)
print("layer{}".format(i))
print x.shape
print x
result = x
print np.argmax(result)
print result.shape
print result
print result[0][np.argmax(result)]
from layer_funcs import softmax
sresult = softmax(result)
print sresult.shape
print np.argmax(sresult)
print sresult[0, np.argmax(sresult)]
print sresult
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.