text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
## Dependencies
```
import json, warnings, shutil, glob
from jigsaw_utility_scripts import *
from scripts_step_lr_schedulers import *
from transformers import TFXLMRobertaModel, XLMRobertaConfig
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses, layers
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
```
## TPU configuration
```
strategy, tpu = set_up_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
AUTO = tf.data.experimental.AUTOTUNE
```
# Load data
```
database_base_path = '/kaggle/input/jigsaw-data-split-roberta-192-ratio-2-clean-tail4/'
k_fold = pd.read_csv(database_base_path + '5-fold.csv')
valid_df = pd.read_csv("/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv",
usecols=['comment_text', 'toxic', 'lang'])
print('Train samples: %d' % len(k_fold))
display(k_fold.head())
print('Validation samples: %d' % len(valid_df))
display(valid_df.head())
base_data_path = 'fold_1/'
fold_n = 1
# Unzip files
!tar -xvf /kaggle/input/jigsaw-data-split-roberta-192-ratio-2-clean-tail4/fold_1.tar.gz
```
# Model parameters
```
base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/'
config = {
"MAX_LEN": 192,
"BATCH_SIZE": 128,
"EPOCHS": 4,
"LEARNING_RATE": 1e-5,
"ES_PATIENCE": None,
"base_model_path": base_path + 'tf-xlm-roberta-large-tf_model.h5',
"config_path": base_path + 'xlm-roberta-large-config.json'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
```
## Learning rate schedule
```
lr_min = 1e-7
lr_start = 1e-7
lr_max = config['LEARNING_RATE']
step_size = len(k_fold[k_fold[f'fold_{fold_n}'] == 'train']) // config['BATCH_SIZE']
total_steps = config['EPOCHS'] * step_size
hold_max_steps = 0
warmup_steps = step_size * 1
decay = .9997
rng = [i for i in range(0, total_steps, config['BATCH_SIZE'])]
y = [exponential_schedule_with_warmup(tf.cast(x, tf.float32), warmup_steps, hold_max_steps,
lr_start, lr_max, lr_min, decay) for x in rng]
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=(20, 6))
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
```
# Model
```
module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config)
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
cls_token = last_hidden_state[:, 0, :]
output = layers.Dense(1, activation='sigmoid', name='output')(cls_token)
model = Model(inputs=[input_ids, attention_mask], outputs=output)
return model
```
# Train
```
# Load data
x_train = np.load(base_data_path + 'x_train.npy')
y_train = np.load(base_data_path + 'y_train_int.npy').reshape(x_train.shape[1], 1).astype(np.float32)
x_valid_ml = np.load(database_base_path + 'x_valid.npy')
y_valid_ml = np.load(database_base_path + 'y_valid.npy').reshape(x_valid_ml.shape[1], 1).astype(np.float32)
#################### ADD TAIL ####################
x_train_tail = np.load(base_data_path + 'x_train_tail.npy')
y_train_tail = np.load(base_data_path + 'y_train_int_tail.npy').reshape(x_train_tail.shape[1], 1).astype(np.float32)
x_train = np.hstack([x_train, x_train_tail])
y_train = np.vstack([y_train, y_train_tail])
step_size = x_train.shape[1] // config['BATCH_SIZE']
valid_step_size = x_valid_ml.shape[1] // config['BATCH_SIZE']
# Build TF datasets
train_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO, seed=SEED))
valid_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED))
train_data_iter = iter(train_dist_ds)
valid_data_iter = iter(valid_dist_ds)
# Step functions
@tf.function
def train_step(data_iter):
def train_step_fn(x, y):
with tf.GradientTape() as tape:
probabilities = model(x, training=True)
loss = loss_fn(y, probabilities)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_auc.update_state(y, probabilities)
train_loss.update_state(loss)
for _ in tf.range(step_size):
strategy.experimental_run_v2(train_step_fn, next(data_iter))
@tf.function
def valid_step(data_iter):
def valid_step_fn(x, y):
probabilities = model(x, training=False)
loss = loss_fn(y, probabilities)
valid_auc.update_state(y, probabilities)
valid_loss.update_state(loss)
for _ in tf.range(valid_step_size):
strategy.experimental_run_v2(valid_step_fn, next(data_iter))
# Train model
with strategy.scope():
model = model_fn(config['MAX_LEN'])
optimizer = optimizers.Adam(learning_rate=lambda:
exponential_schedule_with_warmup(tf.cast(optimizer.iterations, tf.float32),
warmup_steps, hold_max_steps, lr_start,
lr_max, lr_min, decay))
loss_fn = losses.binary_crossentropy
train_auc = metrics.AUC()
valid_auc = metrics.AUC()
train_loss = metrics.Sum()
valid_loss = metrics.Sum()
metrics_dict = {'loss': train_loss, 'auc': train_auc,
'val_loss': valid_loss, 'val_auc': valid_auc}
history = custom_fit(model, metrics_dict, train_step, valid_step, train_data_iter, valid_data_iter,
step_size, valid_step_size, config['BATCH_SIZE'], config['EPOCHS'],
config['ES_PATIENCE'], save_last=False)
# model.save_weights('model.h5')
# Make predictions
x_train = np.load(base_data_path + 'x_train.npy')
x_valid = np.load(base_data_path + 'x_valid.npy')
x_valid_ml_eval = np.load(database_base_path + 'x_valid.npy')
train_preds = model.predict(get_test_dataset(x_train, config['BATCH_SIZE'], AUTO))
valid_preds = model.predict(get_test_dataset(x_valid, config['BATCH_SIZE'], AUTO))
valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO))
k_fold.loc[k_fold[f'fold_{fold_n}'] == 'train', f'pred_{fold_n}'] = np.round(train_preds)
k_fold.loc[k_fold[f'fold_{fold_n}'] == 'validation', f'pred_{fold_n}'] = np.round(valid_preds)
valid_df[f'pred_{fold_n}'] = valid_ml_preds
# Fine-tune on validation set
#################### ADD TAIL ####################
x_valid_ml_tail = np.hstack([x_valid_ml, np.load(database_base_path + 'x_valid_tail.npy')])
y_valid_ml_tail = np.vstack([y_valid_ml, y_valid_ml])
valid_step_size_tail = x_valid_ml_tail.shape[1] // config['BATCH_SIZE']
# Build TF datasets
train_ml_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_valid_ml_tail, y_valid_ml_tail,
config['BATCH_SIZE'], AUTO, seed=SEED))
train_ml_data_iter = iter(train_ml_dist_ds)
history_ml = custom_fit(model, metrics_dict, train_step, valid_step, train_ml_data_iter, valid_data_iter,
valid_step_size_tail, valid_step_size, config['BATCH_SIZE'], 1,
config['ES_PATIENCE'], save_last=False)
# Join history
for key in history_ml.keys():
history[key] += history_ml[key]
model.save_weights('model_ml.h5')
# Make predictions
valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO))
valid_df[f'pred_ml_{fold_n}'] = valid_ml_preds
### Delete data dir
shutil.rmtree(base_data_path)
```
## Model loss graph
```
plot_metrics(history)
```
# Model evaluation
```
display(evaluate_model_single_fold(k_fold, fold_n, label_col='toxic_int').style.applymap(color_map))
```
# Confusion matrix
```
train_set = k_fold[k_fold[f'fold_{fold_n}'] == 'train']
validation_set = k_fold[k_fold[f'fold_{fold_n}'] == 'validation']
plot_confusion_matrix(train_set['toxic_int'], train_set[f'pred_{fold_n}'],
validation_set['toxic_int'], validation_set[f'pred_{fold_n}'])
```
# Model evaluation by language
```
display(evaluate_model_single_fold_lang(valid_df, fold_n).style.applymap(color_map))
# ML fine-tunned preds
display(evaluate_model_single_fold_lang(valid_df, fold_n, pred_col='pred_ml').style.applymap(color_map))
```
# Visualize predictions
```
pd.set_option('max_colwidth', 120)
print('English validation set')
display(k_fold[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(10))
print('Multilingual validation set')
display(valid_df[['comment_text', 'toxic'] + [c for c in valid_df.columns if c.startswith('pred')]].head(10))
```
# Test set predictions
```
x_test = np.load(database_base_path + 'x_test.npy')
test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'], AUTO))
submission = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv')
submission['toxic'] = test_preds
submission.to_csv('submission.csv', index=False)
display(submission.describe())
display(submission.head(10))
```
| github_jupyter |
# CS446/546 - Class Session 19 - Correlation networks
In this class session we are going to analyze gene expression data from a human bladder cancer cohort. We will load a data matrix of expression measurements of 4,473 genes in 414 different bladder cancer samples. These genes have been selected because they are differentially expressed between normal bladder and bladder cancer (thus more likely to have a function in bladder cancer specifically), but the columns in the data matrix are restricted to bladder cancer samples (not normal bladder) because we want to obtain a network representing variation across cancers. The measurements in the matrix have already been normalized to account for inter-sample heterogeneity and then log2 transformed. Our job is to compute Pearson correlation coefficients between all pairs of genes, obtain Fisher-transformed *z*-scores for all pairs of genes, test each pair of genes for significance of the z score, adjust for multiple hypothesis testing, filter to eliminate any pair for which *R* < 0.75 or *P*adj > 0.01, load the graph into an `igraph::Graph`, and plot the degree distribution on log-log scale. We will then answer two questions: (1) does the network look to be scale-free? and (2) what is it's best-fit scaling exponent?
Let's start by loading the packages that we will need for this notebook. Note the difference in language-design philosophy between R (which requires one package for this analysis) and python (where we have to load eight modules). Python keeps its core minimal, whereas R has a lot of statistical and plotting functions in the base language (or in packages that are loaded by default).
```
suppressPackageStartupMessages(
library(igraph)
)
```
Read the tab-deliminted text file of gene expression measurements (rows correspond to genes, columns correspond to bladder tumor samples). (use `read.table` with `row.names=1`). As always, sanity check that the file that you loaded has the expected dimensions (4,473 x 414)
```
gene_matrix_for_network <- read.table("shared/bladder_cancer_genes_tcga.txt",
sep="\t",
header=TRUE,
row.names=1,
stringsAsFactors=FALSE)
dim(gene_matrix_for_network)
```
Look up the online help for the `cor(x)` function, using the command `?cor`. When the `x` argument is a matrix, does `cor` compute the correlation coefficient of pairs of columns or pairs of rows?
```
?cor
```
Since `cor(x)` computes the correlation coefficients of pairs of *columns* of its matrix argument but we want to compute correlation coefficients of pairs of *genes* (and recall that genes are *rows* in our matrix `gene_matrix_for_network`), use the `t` function to transpose the matrix. Then use the `cor` function to compute the 4,473 x 4,473 matrix of gene-gene Pearson correlation coefficients, and call this matrix `gene_matrix_for_network_cor`. It is in your interest to do this as a one-line command (by composing the function calls), so we store only one big matrix in the environment:
```
gene_matrix_for_network_cor <- cor(t(gene_matrix_for_network))
```
Look up the online help for the `upper.tri` function. Does it return the matrix's upper triangle entries or the *index values* of the upper triangle entries of the matrix?
```
?upper.tri
```
Set the upper-triangle of the matrix `gene_matrix_for_network_cor` to zero, using (from left to right) three things: array indexing `[`, the function `upper.tri`, and the assignment operator `<-`. In the call to `upper.tri`, specify `diag=TRUE` to also zero out the diagonal.
```
gene_matrix_for_network_cor[upper.tri(gene_matrix_for_network_cor, diag=TRUE)] <- 0
```
Use the function `which` in order to obtain a vector `inds_correl_above_thresh` containing the indices of the entries of the matrix `gene_matrix_for_network_cor` for which *R* >= 0.75. Use array indexing to obtain the *R* values for these matrix entries, as a numpy array `cor_coeff_values_above_thresh`.
```
inds_correl_above_thresh <- which(gene_matrix_for_network_cor >= 0.75)
cor_coeff_values_above_thresh <- gene_matrix_for_network_cor[inds_correl_above_thresh]
```
Refer to Eq. (13.5) in the assigned readding for today's class (p9 of the PDF). Obtain a vector of the correlation coefficients that exceeded 0.75, and Fisher-transform the correlation coefficient values to get a vector `z_scores` of *z* scores. Each of these *z* scores will correspond to an **edge** in the network, unless the absolute *z* score is too small such that we can't exclude the null hypothesis that the corresponding two genes' expression values are indepdenent (we will perform that check in the next step).
```
z_scores <- 0.5*log((1+gene_matrix_for_network_cor[inds_correl_above_thresh])/
(1-gene_matrix_for_network_cor[inds_correl_above_thresh]))
```
Delete the correlation matrix object in order to save memory (we won't need it from here on out).
```
rm(gene_matrix_for_network_cor)
```
Now we are going to perform a statistical test on *each* correlation coefficient that was >= 0.75. Assume that under the null hypothesis that two genes are independent, then sqrt(M-3)z for the pair of genes is independent sample from the normal distribution with zero mean and unit variance, where M is the number of samples used to compute the Pearson correlation coefficient (i.e., M = 414). For each entry in `z_scores` compute a P value as the area under two tails of the normal distribution N(x), where the two tails are x < -sqrt(M-3)z and x > sqrt(M-3)z. (You'll know you are doing it right if z=0 means you get a P value of 1). You will want to use the functions `sqrt` and `pnorm`.
```
M <- ncol(gene_matrix_for_network)
P_values <- 2*(pnorm(-z_scores*sqrt(M-3)))
```
Look up the function `p.adjust` in the online help
```
?p.adjust
```
Adjust the P values for multiple hypothesis testing, using the `p.adjust` function with `method="hochberg"`
```
P_values_adj <- p.adjust(P_values, method="hochberg")
```
Verify that we don't need to drop any entries due to the adjusted P value not being small enough (use `which` and `length`); this should produce zero since we have M=414 samples per gene.
```
length(which(P_values_adj > 0.01))
```
Make an undirected graph from the row/column indices of the (upper-triangle) gene pairs whose correlations were above our threshold (you'll want to use the `arrayInd`, `cbind`, and `graph_from_data_frame` functions for this). Print a summary of the network, as a sanity check. Make sure to specify `directed=FALSE` when you construct the graph.
```
N <- nrow(gene_matrix_for_network)
final_network <- graph_from_data_frame(cbind(arrayInd(inds_correl_above_thresh,
.dim=c(N,N))), directed=FALSE)
summary(final_network)
```
Plot the degree distribution on log-log scale; does it appear to be scale-free?
```
suppressWarnings(
plot(degree.distribution(final_network), log="xy", xlab="k", ylab="Pk")
)
```
Use the `igraph::power.law.fit` function to estimate the scaling exponent *alpha* of the degree distribution:
```
power.law.fit(degree(final_network))$alpha
```
## extra challenge:
If you got this far, see if you can scatter plot the relationship between R (as the independent variable) and -log10(P) value (as the dependent variable). When the effect size variable (e.g., *R*) can range from negative to positive, this plot is sometimes called a "volcano plot".
```
inds_use = which(P_values_adj > 0)
plot(cor_coeff_values_above_thresh[inds_use], -log10(P_values_adj[inds_use]), xlab="R", ylab="-log10(P)")
```
## extra-extra challenge
For each of the gene pairs for which R>0.75, see if you can compute the t-test P value for each correlation coefficient (don't bother adjusting for false discovery rate control). Compare to the (un-adjusted) P values that you got using the Fisher transformation, using a scatter plot. How do they compare? Which test has better statistical power, for this case where M = 414? (If you are wondering, *general* advice is to use Fisher if M>=10; for very small numbers of samples, use the Student t test).
```
ts = cor_coeff_values_above_thresh * sqrt(M-2) / sqrt((1 - cor_coeff_values_above_thresh^2))
P_values_studentT = 2*pt(-ts, M-2)
inds_use = which(P_values > 0 & P_values_studentT > 0)
plot(-log10(P_values[inds_use]), -log10(P_values_studentT[inds_use]), xlab="Fisher transformation", ylab="Student t")
```
| github_jupyter |
<h1> Time series prediction using RNNs, with TensorFlow and Cloud ML Engine </h1>
This notebook illustrates:
<ol>
<li> Creating a Recurrent Neural Network in TensorFlow
<li> Creating a Custom Estimator in tf.contrib.learn
<li> Training on Cloud ML Engine
</ol>
<p>
<h3> Simulate some time-series data </h3>
Essentially a set of sinusoids with random amplitudes and frequencies.
```
!pip install --upgrade tensorflow
import tensorflow as tf
print tf.__version__
import numpy as np
import tensorflow as tf
import seaborn as sns
import pandas as pd
SEQ_LEN = 10
def create_time_series():
freq = (np.random.random()*0.5) + 0.1 # 0.1 to 0.6
ampl = np.random.random() + 0.5 # 0.5 to 1.5
x = np.sin(np.arange(0,SEQ_LEN) * freq) * ampl
return x
for i in xrange(0, 5):
sns.tsplot( create_time_series() ); # 5 series
def to_csv(filename, N):
with open(filename, 'w') as ofp:
for lineno in xrange(0, N):
seq = create_time_series()
line = ",".join(map(str, seq))
ofp.write(line + '\n')
to_csv('train.csv', 1000) # 1000 sequences
to_csv('valid.csv', 50)
!head -5 train.csv valid.csv
```
<h2> RNN </h2>
For more info, see:
<ol>
<li> http://colah.github.io/posts/2015-08-Understanding-LSTMs/ for the theory
<li> https://www.tensorflow.org/tutorials/recurrent for explanations
<li> https://github.com/tensorflow/models/tree/master/tutorials/rnn/ptb for sample code
</ol>
Here, we are trying to predict from 8 values of a timeseries, the next two values.
<p>
<h3> Imports </h3>
Several tensorflow packages and shutil
```
import tensorflow as tf
import shutil
import tensorflow.contrib.learn as tflearn
import tensorflow.contrib.layers as tflayers
from tensorflow.contrib.learn.python.learn import learn_runner
import tensorflow.contrib.metrics as metrics
import tensorflow.contrib.rnn as rnn
```
<h3> Input Fn to read CSV </h3>
Our CSV file structure is quite simple -- a bunch of floating point numbers (note the type of DEFAULTS). We ask for the data to be read BATCH_SIZE sequences at a time. The Estimator API in tf.contrib.learn wants the features returned as a dict. We'll just call this timeseries column 'rawdata'.
<p>
Our CSV file sequences consist of 10 numbers. We'll assume that 8 of them are inputs and we need to predict the next two.
```
DEFAULTS = [[0.0] for x in xrange(0, SEQ_LEN)]
BATCH_SIZE = 20
TIMESERIES_COL = 'rawdata'
N_OUTPUTS = 2 # in each sequence, 1-8 are features, and 9-10 is label
N_INPUTS = SEQ_LEN - N_OUTPUTS
```
Reading data using the Estimator API in tf.learn requires an input_fn. This input_fn needs to return a dict of features and the corresponding labels.
<p>
So, we read the CSV file. The Tensor format here will be batchsize x 1 -- entire line. We then decode the CSV. At this point, all_data will contain a list of Tensors. Each tensor has a shape batchsize x 1. There will be 10 of these tensors, since SEQ_LEN is 10.
<p>
We split these 10 into 8 and 2 (N_OUTPUTS is 2). Put the 8 into a dict, call it features. The other 2 are the ground truth, so labels.
```
# read data and convert to needed format
def read_dataset(filename, mode=tf.contrib.learn.ModeKeys.TRAIN):
def _input_fn():
num_epochs = 100 if mode == tf.contrib.learn.ModeKeys.TRAIN else 1
# could be a path to one file or a file pattern.
input_file_names = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(
input_file_names, num_epochs=num_epochs, shuffle=True)
reader = tf.TextLineReader()
_, value = reader.read_up_to(filename_queue, num_records=BATCH_SIZE)
value_column = tf.expand_dims(value, -1)
print 'readcsv={}'.format(value_column)
# all_data is a list of tensors
all_data = tf.decode_csv(value_column, record_defaults=DEFAULTS)
inputs = all_data[:len(all_data)-N_OUTPUTS] # first few values
label = all_data[len(all_data)-N_OUTPUTS : ] # last few values
# from list of tensors to tensor with one more dimension
inputs = tf.concat(inputs, axis=1)
label = tf.concat(label, axis=1)
print 'inputs={}'.format(inputs)
return {TIMESERIES_COL: inputs}, label # dict of features, label
return _input_fn
```
<h3> Define RNN </h3>
A recursive neural network consists of possibly stacked LSTM cells.
<p>
The RNN has one output per input, so it will have 8 output cells. We use only the last output cell, but rather use it directly, we do a matrix multiplication of that cell by a set of weights to get the actual predictions. This allows for a degree of scaling between inputs and predictions if necessary (we don't really need it in this problem).
<p>
Finally, to supply a model function to the Estimator API, you need to return a ModelFnOps. The rest of the function creates the necessary objects.
```
LSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells
# create the inference model
def simple_rnn(features, targets, mode):
# 0. Reformat input shape to become a sequence
x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
#print 'x={}'.format(x)
# 1. configure the RNN
lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias=1.0)
outputs, _ = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# slice to keep only the last cell of the RNN
outputs = outputs[-1]
#print 'last outputs={}'.format(outputs)
# output is result of linear activation of last layer of RNN
weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
bias = tf.Variable(tf.random_normal([N_OUTPUTS]))
predictions = tf.matmul(outputs, weight) + bias
# 2. loss function, training/eval ops
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(targets, predictions)
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=0.01,
optimizer="SGD")
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(targets, predictions)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"predicted": predictions}
# 4. return ModelFnOps
return tflearn.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
```
<h3> Experiment </h3>
Distributed training is launched off using an Experiment. The key line here is that we use tflearn.Estimator rather than, say tflearn.DNNRegressor. This allows us to provide a model_fn, which will be our RNN defined above. Note also that we specify a serving_input_fn -- this is how we parse the input data provided to us at prediction time.
```
def get_train():
return read_dataset('train.csv', mode=tf.contrib.learn.ModeKeys.TRAIN)
def get_valid():
return read_dataset('valid.csv', mode=tf.contrib.learn.ModeKeys.EVAL)
def serving_input_fn():
feature_placeholders = {
TIMESERIES_COL: tf.placeholder(tf.float32, [None, N_INPUTS])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis=[2])
print 'serving: features={}'.format(features[TIMESERIES_COL])
return tflearn.utils.input_fn_utils.InputFnOps(
features,
None,
feature_placeholders
)
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
def experiment_fn(output_dir):
# run experiment
return tflearn.Experiment(
tflearn.Estimator(model_fn=simple_rnn, model_dir=output_dir),
train_input_fn=get_train(),
eval_input_fn=get_valid(),
eval_metrics={
'rmse': tflearn.MetricSpec(
metric_fn=metrics.streaming_root_mean_squared_error
)
},
export_strategies=[saved_model_export_utils.make_export_strategy(
serving_input_fn,
default_output_alternative_key=None,
exports_to_keep=1
)]
)
shutil.rmtree('outputdir', ignore_errors=True) # start fresh each time
learn_runner.run(experiment_fn, 'outputdir')
```
<h3> Standalone Python module </h3>
To train this on Cloud ML Engine, we take the code in this notebook, make an standalone Python module.
```
%bash
# run module as-is
REPO=$(pwd)
echo $REPO
rm -rf outputdir
export PYTHONPATH=${PYTHONPATH}:${REPO}/simplernn
python -m trainer.task \
--train_data_paths="${REPO}/train.csv*" \
--eval_data_paths="${REPO}/valid.csv*" \
--output_dir=${REPO}/outputdir \
--job-dir=./tmp
```
Try out online prediction. This is how the REST API will work after you train on Cloud ML Engine
```
%writefile test.json
{"rawdata": [0.0,0.0527,0.10498,0.1561,0.2056,0.253,0.2978,0.3395]}
%bash
MODEL_DIR=$(ls ./outputdir/export/Servo/)
gcloud ml-engine local predict --model-dir=./outputdir/export/Servo/$MODEL_DIR --json-instances=test.json
```
<h3> Cloud ML Engine </h3>
Now to train on Cloud ML Engine.
```
%bash
# run module on Cloud ML Engine
REPO=$(pwd)
BUCKET=cloud-training-demos-ml # CHANGE AS NEEDED
OUTDIR=gs://${BUCKET}/simplernn/model_trained
JOBNAME=simplernn_$(date -u +%y%m%d_%H%M%S)
REGION=us-central1
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${REPO}/simplernn/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=1.2 \
-- \
--train_data_paths="gs://${BUCKET}/train.csv*" \
--eval_data_paths="gs://${BUCKET}/valid.csv*" \
--output_dir=$OUTDIR \
--num_epochs=100
```
<h2> Variant: long sequence </h2>
To create short sequences from a very long sequence.
```
import tensorflow as tf
import numpy as np
def breakup(sess, x, lookback_len):
N = sess.run(tf.size(x))
windows = [tf.slice(x, [b], [lookback_len]) for b in xrange(0, N-lookback_len)]
windows = tf.stack(windows)
return windows
x = tf.constant(np.arange(1,11, dtype=np.float32))
with tf.Session() as sess:
print 'input=', x.eval()
seqx = breakup(sess, x, 5)
print 'output=', seqx.eval()
```
Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
# The Monty Hall problem, with lists
For inspiration, see this simulation of [the Monty Hall
Problem](../more-simulation/monty_hall) using arrays.
We use arrays often in data science, but sometimes, it is more efficient to use
Python [lists](../data-types/lists).
To follow along in this section, you will also need [more on
lists](more_on_lists).
## Simulating one trial
To operate on lists we use the Python standard `random` module, instead of the Numpy `random` module. The Numpy module always returns arrays, but in our case, we want to return lists.
```
import random
```
In particular, we are going to use the `shuffle` function in the Python
standard `random` module.
```
doors = ['car', 'goat', 'goat']
random.shuffle(doors)
doors
```
Here we chose a door at random. We use the standard `random.choice` instead of
`np.random.choice`.
```
my_door_index = random.choice([0, 1, 2])
my_door_index
```
We get the result of staying with our original choice, and remove that option from the list of available doors.
```
stay_result = doors.pop(my_door_index)
stay_result
```
We are left with the two doors that Monty has to choose from.
```
doors
```
Behind one of these doors, Monty knows there is a goat. He opens the door. We simulate that by removing the first door with a goat behind it.
[Remember](more_on_lists), `remove` removes only the first instance of "goat", leaving the second, if there is one.
```
doors.remove('goat')
doors
```
Now we have only one remaining door. The item behind that door is the result from switching from our original door.
```
switch_result = doors[0]
switch_result
```
## Many trials.
That's one trial. Now let's do that 100000 times.
Here we are using `range` instead of `np.arange`. `range` is the standard
Python equivalent of `np.arange`; it has the same effect, in this case, when we
use it in a loop.
```
# Make 10000 trials.
n_tries = 100000
# Lists to store results from stay and switch strategy
stay_results = []
switch_results = []
for i in range(n_tries):
# Same code as above, for one trial
doors = ['car', 'goat', 'goat']
random.shuffle(doors)
my_door_index = random.choice([0, 1, 2])
stay_result = doors.pop(my_door_index)
doors.remove('goat')
switch_result = doors[0]
# Put results into result lists
stay_results.append(stay_result)
switch_results.append(switch_result)
```
We use the `count` method of the list to count the number of "car" element in
each list, and divide by the length of the list, to get the proportion of
successes.
```
stay_results.count('car') / n_tries
switch_results.count('car') / n_tries
```
Compare this solution to the [solution using arrays](../more-simulation/monty_hall). Which solution is easier to read and understand?
| github_jupyter |
# *CoNNear*: A convolutional neural-network model of human cochlear mechanics and filter tuning for real-time applications
Python notebook for reproducing the evaluation results of the proposed CoNNear model.
## Prerequisites
- First, let us compile the cochlea_utils.c file that is used for solving the transmission line (TL) model of the cochlea. This requires some C++ compiler which should be installed beforehand. Then go the connear folder from the terminal and run:
```
gcc -shared -fpic -O3 -ffast-math -o tridiag.so cochlea_utils.c
```
- Install numpy, scipy, keras and tensorflow
## Import required python packages and functions
Import required python packages and load the connear model.
```
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import keras
from keras.models import model_from_json
from keras.utils import CustomObjectScope
from keras.initializers import glorot_uniform
from tlmodel.get_tl_vbm_and_oae import tl_vbm_and_oae
json_file = open("connear/Gmodel.json", "r")
loaded_model_json = json_file.read()
json_file.close()
connear = model_from_json(loaded_model_json)
connear.load_weights("connear/Gmodel.h5")
connear.summary()
```
Define some functions here
```
def rms (x):
# compute rms of a matrix
sq = np.mean(np.square(x), axis = 0)
return np.sqrt(sq)
# Define model specific variables
down_rate = 2
fs = 20e3
fs_tl = 100e3
p0 = 2e-5
factor_fs = int(fs_tl / fs)
right_context = 256
left_context = 256
# load CFs
CF = np.loadtxt('tlmodel/cf.txt')
```
## Click response
Compare the responses of the models to a click stimulus.
**Notice that for all the simulations, TL model operates at 100kHz and the CoNNear model operates at 20kHz.**
```
#Define the click stimulus
dur = 128.0e-3 # for 2560 samples #CONTEXT
click_duration = 2 # 100 us click
stim = np.zeros((1, int(dur * fs)))
L = 70.0
samples = dur * fs
click_duration = 2 # 100 us click
click_duration_tl = factor_fs * click_duration
silence = 60 #samples in silence
samples = int(samples - right_context - left_context)
'''
# GET TL model response
stim = np.zeros((1, (samples + right_context + left_context)*factor_fs))
stim[0, (factor_fs * (right_context+silence)) : (factor_fs * (right_context+silence)) + click_duration_tl] = 2 * np.sqrt(2) * p0 * 10**(L/20)
output = tl_vbm_and_oae(stim , L)
CF = output[0]['cf'][::down_rate]
# basilar membrane motion for click response
# the context samples (first and last 256 samples)
# are removed. Also downsample it to 20kHz
bmm_click_out_full = np.array(output[0]['v'])
stimrange = range(right_context*factor_fs, (right_context*factor_fs) + (factor_fs*samples))
bmm_click_tl = sp_sig.resample_poly(output[0]['v'][stimrange,::down_rate], fs, fs_tl)
bmm_click_tl = bmm_click_tl.T
'''
# Prepare the same for CoNNear model
stim = np.zeros((1, int(dur * fs)))
stim[0, right_context + silence : right_context + silence + click_duration] = 2 * np.sqrt(2) * p0 * 10**(L/20)
# Get the CoNNear response
stim = np.expand_dims(stim, axis=2)
connear_pred_click = connear.predict(stim.T, verbose=1)
bmm_click_connear = connear_pred_click[0,:,:].T * 1e-6
```
Plotting the results.
```
plt.plot(stim[0,256:-256]), plt.xlim(0,2000)
plt.show()
'''
plt.imshow(bmm_click_tl, aspect='auto', cmap='jet')
plt.xlim(0,2000), plt.clim(-4e-7,5e-7)
plt.colorbar()
plt.show()
'''
plt.imshow(bmm_click_connear, aspect='auto', cmap='jet')
plt.xlim(0,2000), plt.clim(-4e-7,5e-7)
plt.colorbar()
plt.show()
```
## Cochlear Excitation Patterns
Here, we plot the simulated RMS levels of basilar memberane (BM) displacement across CF for tone stimuli presented at SPLs between 0 and 90 dB SPL.
```
f_tone = 1e3 # You can change this tone frequency to see how the excitation pattern changes
# with stimulus frequency
fs = 20e3
p0 = 2e-5
dur = 102.4e-3 # for 2048 samples
window_len = int(fs * dur)
L = np.arange(0., 91.0, 10.) # SPLs from 0 to 90dB
#CoNNear
t = np.arange(0., dur, 1./fs)
hanlength = int(10e-3 * fs) # 10ms length hanning window
stim_sin = np.sin(2 * np.pi * f_tone * t)
han = signal.windows.hann(hanlength)
stim_sin[:int(hanlength/2)] = stim_sin[:int(hanlength/2)] * han[:int(hanlength/2)]
stim_sin[-int(hanlength/2):] = stim_sin[-int(hanlength/2):] * han[int(hanlength/2):]
stim = np.zeros((len(L), int(len(stim_sin))))
#total_length = 2560 #CONTEXT
total_length = window_len + right_context + left_context # CONTEXT
stim = np.zeros((len(L), total_length)) #CONTEXT
for j in range(len(L)):
stim[j,right_context:window_len+right_context] = p0 * np.sqrt(2) * 10**(L[j]/20) * stim_sin
# prepare for feeding to the DNN
stim = np.expand_dims(stim, axis=2)
connear_pred_tone = connear.predict(stim, verbose=1)
bmm_tone_connear = connear_pred_tone# * 1e-6
bmm_tone_connear.shape
# Compute rms for each level
cochlear_pred_tone_rms = np.vstack([rms(bmm_tone_connear[i]) for i in range(len(L))])
# Plot the RMS
cftile=np.tile(CF, (len(L),1))
plt.semilogx(cftile.T, 20.*np.log10(cochlear_pred_tone_rms.T))
plt.xlim(0.25,8.), plt.grid(which='both'),
plt.xticks(ticks=(0.25, 0.5, 1., 2., 4., 8.) , labels=(0.25, 0.5, 1., 2., 4., 8.))
plt.ylim(-80, 20)
plt.xlabel('CF (kHz)')
plt.ylabel('RMS of y_bm (dB)')
plt.title('CoNNear Predicted')
plt.show()
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# MLMD Model Card Toolkit Demo
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/responsible_ai/model_card_toolkit/examples/MLMD_Model_Card_Toolkit_Demo"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/model-card-toolkit/blob/master/model_card_toolkit/documentation/examples/MLMD_Model_Card_Toolkit_Demo.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/model-card-toolkit/blob/master/model_card_toolkit/documentation/examples/MLMD_Model_Card_Toolkit_Demo.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/model-card-toolkit/model_card_toolkit/documentation/examples/MLMD_Model_Card_Toolkit_Demo.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Background
This notebook demonstrates how to generate a model card using the Model Card Toolkit with MLMD and TFX pipeline in a Jupyter/Colab environment. You can learn more about model cards at https://modelcards.withgoogle.com/about.
## Setup
We first need to a) install and import the necessary packages, and b) download the data.
### Upgrade to Pip 20.2 and Install TFX
```
!pip install --upgrade pip==20.2
!pip install "tfx==0.26.0"
!pip install model-card-toolkit
```
### Did you restart the runtime?
If you are using Google Colab, the first time that you run the cell above, you must restart the runtime (Runtime > Restart runtime ...). This is because of the way that Colab loads packages.
### Import packages
We import necessary packages, including standard TFX component classes and check the library versions.
```
import os
import pprint
import tempfile
import urllib
import absl
import tensorflow as tf
import tensorflow_model_analysis as tfma
tf.get_logger().propagate = False
pp = pprint.PrettyPrinter()
import tfx
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.base import executor_spec
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
import ml_metadata as mlmd
print('TensorFlow version: {}'.format(tf.__version__))
print('TFX version: {}'.format(tfx.version.__version__))
print('MLMD version: {}'.format(mlmd.__version__))
```
### Set up pipeline paths
```
# This is the root directory for your TFX pip package installation.
_tfx_root = tfx.__path__
# Set up logging.
absl.logging.set_verbosity(absl.logging.INFO)
```
### Download example data
We download the example dataset for use in our TFX pipeline.
```
DATA_PATH = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/' \
'adult.data'
_data_root = tempfile.mkdtemp(prefix='tfx-data')
_data_filepath = os.path.join(_data_root, "data.csv")
urllib.request.urlretrieve(DATA_PATH, _data_filepath)
columns = [
"Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Marital-Status",
"Occupation", "Relationship", "Race", "Sex", "Capital-Gain", "Capital-Loss",
"Hours-per-week", "Country", "Over-50K"]
with open(_data_filepath, 'r') as f:
content = f.read()
content = content.replace(", <=50K", ', 0').replace(", >50K", ', 1')
with open(_data_filepath, 'w') as f:
f.write(','.join(columns) + '\n' + content)
```
Take a quick look at the CSV file.
```
!head {_data_filepath}
```
### Create the InteractiveContext
Last, we create an InteractiveContext, which will allow us to run TFX components interactively in this notebook.
```
# Here, we create an InteractiveContext using default parameters. This will
# use a temporary directory with an ephemeral ML Metadata database instance.
# To use your own pipeline root or database, the optional properties
# `pipeline_root` and `metadata_connection_config` may be passed to
# InteractiveContext. Calls to InteractiveContext are no-ops outside of the
# notebook.
context = InteractiveContext()
```
## Run TFX components interactively
In the cells that follow, we create TFX components one-by-one, run each of them, and visualize their output artifacts. In this notebook, we won’t provide detailed explanations of each TFX component, but you can see what each does at [TFX Colab workshop](https://github.com/tensorflow/workshops/blob/master/tfx_labs/Lab_1_Pipeline_in_Colab.ipynb).
### ExampleGen
Create the `ExampleGen` component to split data into training and evaluation sets, convert the data into `tf.Example` format, and copy data into the `_tfx_root` directory for other components to access.
```
example_gen = CsvExampleGen(input=external_input(_data_root))
context.run(example_gen)
artifact = example_gen.outputs['examples'].get()[0]
print(artifact.split_names, artifact.uri)
```
Let’s take a look at the first three training examples:
```
# Get the URI of the output artifact representing the training examples, which is a directory
train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# Iterate over the first 3 records and decode them.
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = tf.train.Example()
example.ParseFromString(serialized_example)
pp.pprint(example)
```
### StatisticsGen
`StatisticsGen` takes as input the dataset we just ingested using `ExampleGen` and allows you to perform some analysis of your dataset using TensorFlow Data Validation (TFDV).
```
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen)
```
After `StatisticsGen` finishes running, we can visualize the outputted statistics. Try playing with the different plots!
```
context.show(statistics_gen.outputs['statistics'])
```
### SchemaGen
`SchemaGen` will take as input the statistics that we generated with `StatisticsGen`, looking at the training split by default.
```
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
context.run(schema_gen)
context.show(schema_gen.outputs['schema'])
```
To learn more about schemas, see [the SchemaGen documentation](https://www.tensorflow.org/tfx/guide/schemagen).
### Transform
`Transform` will take as input the data from `ExampleGen`, the schema from `SchemaGen`, as well as a module that contains user-defined Transform code.
Let's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, [see the tutorial](https://www.tensorflow.org/tfx/tutorials/transform/simple)).
```
_census_income_constants_module_file = 'census_income_constants.py'
%%writefile {_census_income_constants_module_file}
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [20]
CATEGORICAL_FEATURE_KEYS = ["Education-Num"]
DENSE_FLOAT_FEATURE_KEYS = ["Capital-Gain", "Hours-per-week", "Capital-Loss"]
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
BUCKET_FEATURE_KEYS = ["Age"]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 200
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
VOCAB_FEATURE_KEYS = ["Workclass", "Education", "Marital-Status", "Occupation",
"Relationship", "Race", "Sex", "Country"]
# Keys
LABEL_KEY = "Over-50K"
def transformed_name(key):
return key + '_xf'
_census_income_transform_module_file = 'census_income_transform.py'
%%writefile {_census_income_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
import census_income_constants
_DENSE_FLOAT_FEATURE_KEYS = census_income_constants.DENSE_FLOAT_FEATURE_KEYS
_VOCAB_FEATURE_KEYS = census_income_constants.VOCAB_FEATURE_KEYS
_VOCAB_SIZE = census_income_constants.VOCAB_SIZE
_OOV_SIZE = census_income_constants.OOV_SIZE
_FEATURE_BUCKET_COUNT = census_income_constants.FEATURE_BUCKET_COUNT
_BUCKET_FEATURE_KEYS = census_income_constants.BUCKET_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = census_income_constants.CATEGORICAL_FEATURE_KEYS
_LABEL_KEY = census_income_constants.LABEL_KEY
_transformed_name = census_income_constants.transformed_name
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
label = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = label
return outputs
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_census_income_transform_module_file))
context.run(transform)
transform.outputs
```
### Trainer
Let's see an example of user-defined model code below (for an introduction to the TensorFlow Keras APIs, [see the tutorial](https://www.tensorflow.org/guide/keras)):
```
_census_income_trainer_module_file = 'census_income_trainer.py'
%%writefile {_census_income_trainer_module_file}
from typing import List, Text
import os
import absl
import datetime
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.executor import TrainerFnArgs
import census_income_constants
_DENSE_FLOAT_FEATURE_KEYS = census_income_constants.DENSE_FLOAT_FEATURE_KEYS
_VOCAB_FEATURE_KEYS = census_income_constants.VOCAB_FEATURE_KEYS
_VOCAB_SIZE = census_income_constants.VOCAB_SIZE
_OOV_SIZE = census_income_constants.OOV_SIZE
_FEATURE_BUCKET_COUNT = census_income_constants.FEATURE_BUCKET_COUNT
_BUCKET_FEATURE_KEYS = census_income_constants.BUCKET_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = census_income_constants.CATEGORICAL_FEATURE_KEYS
_MAX_CATEGORICAL_FEATURE_VALUES = census_income_constants.MAX_CATEGORICAL_FEATURE_VALUES
_LABEL_KEY = census_income_constants.LABEL_KEY
_transformed_name = census_income_constants.transformed_name
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
def _gzip_reader_fn(filenames):
"""Small utility returning a record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
if _transformed_name(_LABEL_KEY) in transformed_features:
transformed_features.pop(_transformed_name(_LABEL_KEY))
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_transformed_name(_LABEL_KEY))
return dataset
def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:
"""Creates a DNN Keras model.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/139668410) replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25])
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):
"""Build a simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
Returns:
A Wide and Deep Keras model
"""
# Following values are hard coded for simplicity in this example,
# However prefarably they should be passsed in as hparams.
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_VOCAB_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_BUCKET_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)
})
# TODO(b/161816639): SparseFeatures for feature columns + Keras.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(
1, activation='sigmoid')(
tf.keras.layers.concatenate([deep, wide]))
model = tf.keras.Model(input_layers, output)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Trainer will call this function.
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, tf_transform_output, 40)
eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output, 40)
model = _build_keras_model(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
])
# This log path might change in the future.
log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
trainer = Trainer(
module_file=os.path.abspath(_census_income_trainer_module_file),
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=100),
eval_args=trainer_pb2.EvalArgs(num_steps=50))
context.run(trainer)
```
### Evaluator
The `Evaluator` component computes model performance metrics over the evaluation set. It uses the [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) library.
`Evaluator` will take as input the data from `ExampleGen`, the trained model from `Trainer`, and slicing configuration. The slicing configuration allows you to slice your metrics on feature values. See an example of this configuration below:
```
from google.protobuf.wrappers_pb2 import BoolValue
eval_config = tfma.EvalConfig(
model_specs=[
# This assumes a serving model with signature 'serving_default'. If
# using estimator based EvalSavedModel, add signature_name: 'eval' and
# remove the label_key.
tfma.ModelSpec(label_key="Over-50K")
],
metrics_specs=[
tfma.MetricsSpec(
# The metrics added here are in addition to those saved with the
# model (assuming either a keras model or EvalSavedModel is used).
# Any metrics added into the saved model (for example using
# model.compile(..., metrics=[...]), etc) will be computed
# automatically.
# To add validation thresholds for metrics saved with the model,
# add them keyed by metric name to the thresholds map.
metrics=[
tfma.MetricConfig(class_name='ExampleCount'),
tfma.MetricConfig(class_name='BinaryAccuracy'),
tfma.MetricConfig(class_name='FairnessIndicators',
config='{ "thresholds": [0.5] }'),
]
)
],
slicing_specs=[
# An empty slice spec means the overall slice, i.e. the whole dataset.
tfma.SlicingSpec(),
# Data can be sliced along a feature column. In this case, data is
# sliced by feature column Race and Sex.
tfma.SlicingSpec(feature_keys=['Race']),
tfma.SlicingSpec(feature_keys=['Sex']),
tfma.SlicingSpec(feature_keys=['Race', 'Sex']),
],
options = tfma.Options(compute_confidence_intervals=BoolValue(value=True))
)
# Use TFMA to compute a evaluation statistics over features of a model and
# validate them against a baseline.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
eval_config=eval_config)
context.run(evaluator)
evaluator.outputs
```
Using the `evaluation` output we can show the default visualization of global metrics on the entire evaluation set.
```
context.show(evaluator.outputs['evaluation'])
```
## Populate Properties from ModelCard with Model Card Toolkit
Now that we’ve set up our TFX pipeline, we will use the Model Card Toolkit to extract key artifacts from the run and populate a Model Card.
### Connect to the MLMD store used by the InteractiveContext
```
from ml_metadata.metadata_store import metadata_store
from IPython import display
mlmd_store = metadata_store.MetadataStore(context.metadata_connection_config)
model_uri = trainer.outputs["model"].get()[0].uri
```
### Use Model Card Toolkit
#### Initialize the Model Card Toolkit.
```
from model_card_toolkit import ModelCardToolkit
mct = ModelCardToolkit(mlmd_store=mlmd_store, model_uri=model_uri)
```
#### Create Model Card workspace.
```
model_card = mct.scaffold_assets()
```
#### Annotate more information into Model Card.
It is also important to document model information that might be important to downstream users, such as its limitations, intended use cases, trade offs, and ethical considerations. For each of these sections, we can directly add new JSON objects to represent this information.
```
model_card.model_details.name = 'Census Income Classifier'
model_card.model_details.overview = (
'This is a wide and deep Keras model which aims to classify whether or not '
'an individual has an income of over $50,000 based on various demographic '
'features. The model is trained on the UCI Census Income Dataset. This is '
'not a production model, and this dataset has traditionally only been used '
'for research purposes. In this Model Card, you can review quantitative '
'components of the model’s performance and data, as well as information '
'about the model’s intended uses, limitations, and ethical considerations.'
)
model_card.model_details.owners = [
{'name': 'Model Cards Team', 'contact': 'model-cards@google.com'}
]
model_card.considerations.use_cases = [
'This dataset that this model was trained on was originally created to '
'support the machine learning community in conducting empirical analysis '
'of ML algorithms. The Adult Data Set can be used in fairness-related '
'studies that compare inequalities across sex and race, based on '
'people’s annual incomes.'
]
model_card.considerations.limitations = [
'This is a class-imbalanced dataset across a variety of sensitive classes.'
' The ratio of male-to-female examples is about 2:1 and there are far more'
' examples with the “white” attribute than every other race combined. '
'Furthermore, the ratio of $50,000 or less earners to $50,000 or more '
'earners is just over 3:1. Due to the imbalance across income levels, we '
'can see that our true negative rate seems quite high, while our true '
'positive rate seems quite low. This is true to an even greater degree '
'when we only look at the “female” sub-group, because there are even '
'fewer female examples in the $50,000+ earner group, causing our model to '
'overfit these examples. To avoid this, we can try various remediation '
'strategies in future iterations (e.g. undersampling, hyperparameter '
'tuning, etc), but we may not be able to fix all of the fairness issues.'
]
model_card.considerations.ethical_considerations = [{
'name':
'We risk expressing the viewpoint that the attributes in this dataset '
'are the only ones that are predictive of someone’s income, even '
'though we know this is not the case.',
'mitigation_strategy':
'As mentioned, some interventions may need to be performed to address '
'the class imbalances in the dataset.'
}]
```
#### Filter and Add Graphs.
We can filter the graphs generated by the TFX components to include those most relevant for the Model Card using the function defined below. In this example, we filter for `race` and `sex`, two potentially sensitive attributes.
Each Model Card will have up to three sections for graphs -- training dataset statistics, evaluation dataset statistics, and quantitative analysis of our model’s performance.
```
# These are the graphs that will appear in the Quantiative Analysis portion of
# the Model Card. Feel free to add or remove from this list.
TARGET_EVAL_GRAPH_NAMES = [
'fairness_indicators_metrics/false_positive_rate@0.5',
'fairness_indicators_metrics/false_negative_rate@0.5',
'binary_accuracy',
'example_count | Race_X_Sex',
]
# These are the graphs that will appear in both the Train Set and Eval Set
# portions of the Model Card. Feel free to add or remove from this list.
TARGET_DATASET_GRAPH_NAMES = [
'counts | Race',
'counts | Sex',
]
def filter_graphs(graphics, target_graph_names):
result = []
for graph in graphics:
for target_graph_name in target_graph_names:
if graph.name.startswith(target_graph_name):
result.append(graph)
result.sort(key=lambda g: g.name)
return result
# Populating the three different sections using the filter defined above. To
# see all the graphs available in a section, we can iterate through each of the
# different collections.
model_card.quantitative_analysis.graphics.collection = filter_graphs(
model_card.quantitative_analysis.graphics.collection, TARGET_EVAL_GRAPH_NAMES)
model_card.model_parameters.data.eval.graphics.collection = filter_graphs(
model_card.model_parameters.data.eval.graphics.collection, TARGET_DATASET_GRAPH_NAMES)
model_card.model_parameters.data.train.graphics.collection = filter_graphs(
model_card.model_parameters.data.train.graphics.collection, TARGET_DATASET_GRAPH_NAMES)
```
We then add (optional) descriptions for each of the each of the graph sections.
```
model_card.model_parameters.data.train.graphics.description = (
'This section includes graphs displaying the class distribution for the '
'“Race” and “Sex” attributes in our training dataset. We chose to '
'show these graphs in particular because we felt it was important that '
'users see the class imbalance.'
)
model_card.model_parameters.data.eval.graphics.description = (
'Like the training set, we provide graphs showing the class distribution '
'of the data we used to evaluate our model’s performance. '
)
model_card.quantitative_analysis.graphics.description = (
'These graphs show how the model performs for data sliced by “Race”, '
'“Sex” and the intersection of these attributes. The metrics we chose '
'to display are “Accuracy”, “False Positive Rate”, and “False '
'Negative Rate”, because we anticipated that the class imbalances might '
'cause our model to underperform for certain groups.'
)
mct.update_model_card_json(model_card)
```
#### Generate the Model Card.
We can now display the Model Card in HTML format.
```
html = mct.export_format()
display.display(display.HTML(html))
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# 過学習と学習不足について知る
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/r1/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/r1/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [docs-ja@tensorflow.org メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。
いつものように、この例のプログラムは`tf.keras` APIを使用します。詳しくはTensorFlowの[Keras guide](https://www.tensorflow.org/r1/guide/keras)を参照してください。
これまでの例、つまり、映画レビューの分類と燃費の推定では、検証用データでのモデルの正解率が、数エポックでピークを迎え、その後低下するという現象が見られました。
言い換えると、モデルが訓練用データを**過学習**したと考えられます。過学習への対処の仕方を学ぶことは重要です。**訓練用データセット**で高い正解率を達成することは難しくありませんが、我々は、(これまで見たこともない)**テスト用データ**に汎化したモデルを開発したいのです。
過学習の反対語は**学習不足**(underfitting)です。学習不足は、モデルがテストデータに対してまだ改善の余地がある場合に発生します。学習不足の原因は様々です。モデルが十分強力でないとか、正則化のしすぎだとか、単に訓練時間が短すぎるといった理由があります。学習不足は、訓練用データの中の関連したパターンを学習しきっていないということを意味します。
モデルの訓練をやりすぎると、モデルは過学習を始め、訓練用データの中のパターンで、テストデータには一般的ではないパターンを学習します。我々は、過学習と学習不足の中間を目指す必要があります。これから見ていくように、ちょうどよいエポック数だけ訓練を行うというのは必要なスキルなのです。
過学習を防止するための、最良の解決策は、より多くの訓練用データを使うことです。多くのデータで訓練を行えば行うほど、モデルは自然により汎化していく様になります。これが不可能な場合、次善の策は正則化のようなテクニックを使うことです。正則化は、モデルに保存される情報の量とタイプに制約を課すものです。ネットワークが少数のパターンしか記憶できなければ、最適化プロセスにより、最も主要なパターンのみを学習することになり、より汎化される可能性が高くなります。
このノートブックでは、重みの正則化とドロップアウトという、よく使われる2つの正則化テクニックをご紹介します。これらを使って、IMDBの映画レビューを分類するノートブックの改善を図ります。
```
import tensorflow.compat.v1 as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
```
## IMDBデータセットのダウンロード
以前のノートブックで使用したエンベディングの代わりに、ここでは文をマルチホットエンコードします。このモデルは、訓練用データセットをすぐに過学習します。このモデルを使って、過学習がいつ起きるかということと、どうやって過学習と戦うかをデモします。
リストをマルチホットエンコードすると言うのは、0と1のベクトルにするということです。具体的にいうと、例えば`[3, 5]`というシーケンスを、インデックス3と5の値が1で、それ以外がすべて0の、10,000次元のベクトルに変換するということを意味します。
```
NUM_WORDS = 10000
(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)
def multi_hot_sequences(sequences, dimension):
# 形状が (len(sequences), dimension)ですべて0の行列を作る
results = np.zeros((len(sequences), dimension))
for i, word_indices in enumerate(sequences):
results[i, word_indices] = 1.0 # 特定のインデックスに対してresults[i] を1に設定する
return results
train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)
```
結果として得られるマルチホットベクトルの1つを見てみましょう。単語のインデックスは頻度順にソートされています。このため、インデックスが0に近いほど1が多く出現するはずです。分布を見てみましょう。
```
plt.plot(train_data[0])
```
## 過学習のデモ
過学習を防止するための最も単純な方法は、モデルのサイズ、すなわち、モデル内の学習可能なパラメータの数を小さくすることです(学習パラメータの数は、層の数と層ごとのユニット数で決まります)。ディープラーニングでは、モデルの学習可能なパラメータ数を、しばしばモデルの「キャパシティ」と呼びます。直感的に考えれば、パラメータ数の多いモデルほど「記憶容量」が大きくなり、訓練用のサンプルとその目的変数の間の辞書のようなマッピングをたやすく学習することができます。このマッピングには汎化能力がまったくなく、これまで見たことが無いデータを使って予測をする際には役に立ちません。
ディープラーニングのモデルは訓練用データに適応しやすいけれど、本当のチャレレンジは汎化であって適応ではないということを、肝に銘じておく必要があります。
一方、ネットワークの記憶容量が限られている場合、前述のようなマッピングを簡単に学習することはできません。損失を減らすためには、より予測能力が高い圧縮された表現を学習しなければなりません。同時に、モデルを小さくしすぎると、訓練用データに適応するのが難しくなります。「多すぎる容量」と「容量不足」の間にちょうどよい容量があるのです。
残念ながら、(層の数や、層ごとの大きさといった)モデルの適切なサイズやアーキテクチャを決める魔法の方程式はありません。一連の異なるアーキテクチャを使って実験を行う必要があります。
適切なモデルのサイズを見つけるには、比較的少ない層の数とパラメータから始めるのがベストです。それから、検証用データでの損失値の改善が見られなくなるまで、徐々に層の大きさを増やしたり、新たな層を加えたりします。映画レビューの分類ネットワークでこれを試してみましょう。
比較基準として、```Dense```層だけを使ったシンプルなモデルを構築し、その後、それより小さいバージョンと大きいバージョンを作って比較します。
### 比較基準を作る
```
baseline_model = keras.Sequential([
# `.summary` を見るために`input_shape`が必要
keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
baseline_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
baseline_model.summary()
baseline_history = baseline_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
```
### より小さいモデルの構築
今作成したばかりの比較基準となるモデルに比べて隠れユニット数が少ないモデルを作りましょう。
```
smaller_model = keras.Sequential([
keras.layers.Dense(4, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dense(4, activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
smaller_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
smaller_model.summary()
```
同じデータを使って訓練します。
```
smaller_history = smaller_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
```
### より大きなモデルの構築
練習として、より大きなモデルを作成し、どれほど急速に過学習が起きるかを見ることもできます。次はこのベンチマークに、この問題が必要とするよりはるかに容量の大きなネットワークを追加しましょう。
```
bigger_model = keras.models.Sequential([
keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dense(512, activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
bigger_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
bigger_model.summary()
```
このモデルもまた同じデータを使って訓練します。
```
bigger_history = bigger_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
```
### 訓練時と検証時の損失をグラフにする
<!--TODO(markdaoust): This should be a one-liner with tensorboard -->
実線は訓練用データセットの損失、破線は検証用データセットでの損失です(検証用データでの損失が小さい方が良いモデルです)。これをみると、小さいネットワークのほうが比較基準のモデルよりも過学習が始まるのが遅いことがわかります(4エポックではなく6エポック後)。また、過学習が始まっても性能の低下がよりゆっくりしています。
```
def plot_history(histories, key='binary_crossentropy'):
plt.figure(figsize=(16,10))
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_'+key],
'--', label=name.title()+' Val')
plt.plot(history.epoch, history.history[key], color=val[0].get_color(),
label=name.title()+' Train')
plt.xlabel('Epochs')
plt.ylabel(key.replace('_',' ').title())
plt.legend()
plt.xlim([0,max(history.epoch)])
plot_history([('baseline', baseline_history),
('smaller', smaller_history),
('bigger', bigger_history)])
```
より大きなネットワークでは、すぐに、1エポックで過学習が始まり、その度合も強いことに注目してください。ネットワークの容量が大きいほど訓練用データをモデル化するスピードが早くなり(結果として訓練時の損失値が小さくなり)ますが、より過学習しやすく(結果として訓練時の損失値と検証時の損失値が大きく乖離しやすく)なります。
## 戦略
### 重みの正則化を加える
「オッカムの剃刀」の原則をご存知でしょうか。何かの説明が2つあるとすると、最も正しいと考えられる説明は、仮定の数が最も少ない「一番単純な」説明だというものです。この原則は、ニューラルネットワークを使って学習されたモデルにも当てはまります。ある訓練用データとネットワーク構造があって、そのデータを説明できる重みの集合が複数ある時(つまり、複数のモデルがある時)、単純なモデルのほうが複雑なものよりも過学習しにくいのです。
ここで言う「単純なモデル」とは、パラメータ値の分布のエントロピーが小さいもの(あるいは、上記で見たように、そもそもパラメータの数が少ないもの)です。したがって、過学習を緩和するための一般的な手法は、重みが小さい値のみをとることで、重み値の分布がより整然となる(正則)様に制約を与えるものです。これを「重みの正則化」と呼ばれ、ネットワークの損失関数に、重みの大きさに関連するコストを加えることで行われます。このコストには2つの種類があります。
* [L1正則化](https://developers.google.com/machine-learning/glossary/#L1_regularization) 重み係数の絶対値に比例するコストを加える(重みの「L1ノルム」と呼ばれる)。
* [L2正則化](https://developers.google.com/machine-learning/glossary/#L2_regularization) 重み係数の二乗に比例するコストを加える(重み係数の二乗「L2ノルム」と呼ばれる)。L2正則化はニューラルネットワーク用語では重み減衰(Weight Decay)と呼ばれる。呼び方が違うので混乱しないように。重み減衰は数学的にはL2正則化と同義である。
`tf.keras`では、重みの正則化をするために、重み正則化のインスタンスをキーワード引数として層に加えます。ここでは、L2正則化を追加してみましょう。
```
l2_model = keras.models.Sequential([
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
l2_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
l2_model_history = l2_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
```
```l2(0.001)```というのは、層の重み行列の係数全てに対して```0.001 * 重み係数の値 **2```をネットワークの損失値合計に加えることを意味します。このペナルティは訓練時のみに加えられるため、このネットワークの損失値は、訓練時にはテスト時に比べて大きくなることに注意してください。
L2正則化の影響を見てみましょう。
```
plot_history([('baseline', baseline_history),
('l2', l2_model_history)])
```
ご覧のように、L2正則化ありのモデルは比較基準のモデルに比べて過学習しにくくなっています。両方のモデルのパラメータ数は同じであるにもかかわらずです。
### ドロップアウトを追加する
ドロップアウトは、ニューラルネットワークの正則化テクニックとして最もよく使われる手法の一つです。この手法は、トロント大学のヒントンと彼の学生が開発したものです。ドロップアウトは層に適用するもので、訓練時に層から出力された特徴量に対してランダムに「ドロップアウト(つまりゼロ化)」を行うものです。例えば、ある層が訓練時にある入力サンプルに対して、普通は`[0.2, 0.5, 1.3, 0.8, 1.1]` というベクトルを出力するとします。ドロップアウトを適用すると、このベクトルは例えば`[0, 0.5, 1.3, 0, 1.1]`のようにランダムに散らばったいくつかのゼロを含むようになります。「ドロップアウト率」はゼロ化される特徴の割合で、通常は0.2から0.5の間に設定します。テスト時は、どのユニットもドロップアウトされず、代わりに出力値がドロップアウト率と同じ比率でスケールダウンされます。これは、訓練時に比べてたくさんのユニットがアクティブであることに対してバランスをとるためです。
`tf.keras`では、Dropout層を使ってドロップアウトをネットワークに導入できます。ドロップアウト層は、その直前の層の出力に対してドロップアウトを適用します。
それでは、IMDBネットワークに2つのドロップアウト層を追加しましょう。
```
dpt_model = keras.models.Sequential([
keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dropout(rate=0.5),
keras.layers.Dense(16, activation=tf.nn.relu),
keras.layers.Dropout(rate=0.5),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
dpt_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
dpt_model_history = dpt_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
plot_history([('baseline', baseline_history),
('dropout', dpt_model_history)])
```
ドロップアウトを追加することで、比較対象モデルより明らかに改善が見られます。
まとめ:ニューラルネットワークにおいて過学習を防ぐ最も一般的な方法は次のとおりです。
* 訓練データを増やす
* ネットワークの容量をへらす
* 重みの正則化を行う
* ドロップアウトを追加する
このガイドで触れていない2つの重要なアプローチがあります。データ拡張とバッチ正規化です。
| github_jupyter |
# Extractive QA to build structured data
_This notebook is part of a tutorial series on [txtai](https://github.com/neuml/txtai), an AI-powered semantic search platform._
Traditional ETL/data parsing systems establish rules to extract information of interest. Regular expressions, string parsing and similar methods define fixed rules. This works in many cases but what if you are working with unstructured data containing numerous variations? The rules can be cumbersome and hard to maintain over time.
This notebook uses machine learning and extractive question-answering (QA) to utilize the vast knowledge built into large language models. These models have been trained on extremely large datasets, learning the many variations of natural language.
# Install dependencies
Install `txtai` and all dependencies.
```
%%capture
!pip install git+https://github.com/neuml/txtai
```
# Train a QA model with few-shot learning
The code below trains a new QA model using a few examples. These examples gives the model hints on the type of questions that will be asked and the type of answers to look for. It doesn't take a lot of examples to do this as shown below.
```
import pandas as pd
from txtai.pipeline import HFTrainer, Questions, Labels
# Training data for few-shot learning
data = [
{"question": "What is the url?",
"context": "Faiss (https://github.com/facebookresearch/faiss) is a library for efficient similarity search.",
"answers": "https://github.com/facebookresearch/faiss"},
{"question": "What is the url", "context": "The last release was Wed Sept 25 2021", "answers": None},
{"question": "What is the date?", "context": "The last release was Wed Sept 25 2021", "answers": "Wed Sept 25 2021"},
{"question": "What is the date?", "context": "The order total comes to $44.33", "answers": None},
{"question": "What is the amount?", "context": "The order total comes to $44.33", "answers": "$44.33"},
{"question": "What is the amount?", "context": "The last release was Wed Sept 25 2021", "answers": None},
]
# Fine-tune QA model
trainer = HFTrainer()
model, tokenizer = trainer("distilbert-base-cased-distilled-squad", data, task="question-answering")
```
# Parse data into a structured table
The next section takes a series of rows of text and runs a set of questions against each row. The answers are then used to build a pandas DataFrame.
```
# Input data
context = ["Released on 6/03/2021",
"Release delayed until the 11th of August",
"Documentation can be found here: neuml.github.io/txtai",
"The stock price fell to three dollars",
"Great day: closing price for March 23rd is $33.11, for details - https://finance.google.com"]
# Define column queries
queries = ["What is the url?", "What is the date?", "What is the amount?"]
# Extract fields
questions = Questions(path=(model, tokenizer), gpu=True)
results = [questions([question] * len(context), context) for question in queries]
results.append(context)
# Load into DataFrame
pd.DataFrame(list(zip(*results)), columns=["URL", "Date", "Amount", "Text"])
```
# Add additional columns
This method can be combined with other models to categorize, group or otherwise derive additional columns. The code below derives an additional sentiment column.
```
# Add sentiment
labels = Labels(path="distilbert-base-uncased-finetuned-sst-2-english", dynamic=False)
labels = ["POSITIVE" if x[0][0] == 1 else "NEGATIVE" for x in labels(context)]
results.insert(len(results) - 1, labels)
# Load into DataFrame
pd.DataFrame(list(zip(*results)), columns=["URL", "Date", "Amount", "Sentiment", "Text"])
```
| github_jupyter |
# cadCAD Tutorials: The Robot and the Marbles, part 2
In [Part 1](../robot-marbles-part-1/robot-marbles-part-1.ipynb) we introduced the 'language' in which a system must be described in order for it to be interpretable by cadCAD and some of the basic concepts of the library:
* State Variables
* Timestep
* State Update Functions
* Partial State Update Blocks
* Simulation Configuration Parameters
This article will introduce the concept of __Policies__. But first let's copy the base configuration from Part 1. As a reminder, here's the description of the simple system we are using for illustration purposes.
__The robot and the marbles__
* Picture a box (`box_A`) with ten marbles in it; an empty box (`box_B`) next to the first one; and a robot arm capable of taking a marble from any one of the boxes and dropping it into the other one.
* The robot is programmed to take one marble at a time from the box containing the largest number of marbles and drop it in the other box. It repeats that process until the boxes contain an equal number of marbles.
```
%%capture
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# List of all the state variables in the system and their initial values
genesis_states = {
'box_A': 10, # as per the description of the example, box_A starts out with 10 marbles in it
'box_B': 0 # as per the description of the example, box_B starts out empty
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def update_A(params, step, sH, s, _input):
y = 'box_A'
add_to_A = 0
if (s['box_A'] > s['box_B']):
add_to_A = -1
elif (s['box_A'] < s['box_B']):
add_to_A = 1
x = s['box_A'] + add_to_A
return (y, x)
def update_B(params, step, sH, s, _input):
y = 'box_B'
add_to_B = 0
if (s['box_B'] > s['box_A']):
add_to_B = -1
elif (s['box_B'] < s['box_A']):
add_to_B = 1
x = s['box_B'] + add_to_B
return (y, x)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# In the Partial State Update Blocks, the user specifies if state update functions will be run in series or in parallel
partial_state_update_blocks = [
{
'policies': { # We'll ignore policies for now
},
'variables': { # The following state variables will be updated simultaneously
'box_A': update_A,
'box_B': update_B
}
}
]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Settings of general simulation parameters, unrelated to the system itself
# `T` is a range with the number of discrete units of time the simulation will run for;
# `N` is the number of times the simulation will be run (Monte Carlo runs)
# In this example, we'll run the simulation once (N=1) and its duration will be of 10 timesteps
# We'll cover the `M` key in a future article. For now, let's omit it
sim_config_dict = {
'T': range(10),
'N': 1,
#'M': {}
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#imported some addition utilities to help with configuration set-up
from cadCAD.configuration.utils import config_sim
from cadCAD.configuration import Experiment
exp = Experiment()
c = config_sim(sim_config_dict)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The configurations above are then packaged into a `Configuration` object
exp.append_configs(initial_state=genesis_states, #dict containing variable names and initial values
partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
sim_configs=c #preprocessed dictionaries containing simulation parameters
)
from cadCAD.engine import ExecutionMode, ExecutionContext
exec_mode = ExecutionMode()
local_mode_ctx = ExecutionContext(exec_mode.local_mode)
from cadCAD.engine import Executor
from cadCAD import configs
simulation = Executor(exec_context=local_mode_ctx, configs=configs) # Pass the configuration object inside an array
raw_system_events, tensor_field, sessions = simulation.execute() # The `execute()` method returns a tuple; its first elements contains the raw results
%matplotlib inline
import pandas as pd
simulation_result = pd.DataFrame(raw_result)
simulation_result.plot('timestep', ['box_A', 'box_B'], grid=True,
colormap = 'RdYlGn',
xticks=list(simulation_result['timestep'].drop_duplicates()),
yticks=list(range(1+(simulation_result['box_A']+simulation_result['box_B']).max())));
```
# Policies
In part 1, we ignored the `_input` argument of state update functions. That argument is a signal passed to the state update function by another set of functions: Policy Functions.
Policy Functions are most commonly used as representations of the behavior of agents that interact with the components of the system we're simulating in cadCAD. But more generally, they describe the logic of some component or mechanism of the system. It is possible to encode the functionality of a policy function in the state update functions themselves (as we did in part 1, where we had the robot's algorithm reside in the `update_A` and `update_B` functions), but as systems grow more complex this approach makes the code harder to read and maintain, and in some cases more inefficient because of unnecessary repetition of computational steps.
The general structure of a policy function is:
```python
def policy_function(params, step, sL, s):
...
return {'value1': value1, 'value2': value2, ...}
```
Just like State Update Functions, policies can read the current state of the system from argument `s`, a Python `dict` where the `dict_keys` are the __names of the variables__ and the `dict_values` are their __current values__. The Policy Function must return a dictionary, which will be passed as an argument (`_input`) to the state update functions.

Let's update our simulation so that the robot arm's logic is encoded in a Policy instead of in the State Update Functions.
```
%%capture
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# We specify the robot arm's logic in a Policy Function
def robot_arm(params, step, sH, s):
add_to_A = 0
if (s['box_A'] > s['box_B']):
add_to_A = -1
elif (s['box_A'] < s['box_B']):
add_to_A = 1
return({'add_to_A': add_to_A, 'add_to_B': -add_to_A})
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# We make the state update functions less "intelligent",
# ie. they simply add the number of marbles specified in _input
# (which, per the policy function definition, may be negative)
def increment_A(params, step, sH, s, _input):
y = 'box_A'
x = s['box_A'] + _input['add_to_A']
return (y, x)
def increment_B(params, step, sH, s, _input):
y = 'box_B'
x = s['box_B'] + _input['add_to_B']
return (y, x)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# In the Partial State Update Blocks,
# the user specifies if state update functions will be run in series or in parallel
# and the policy functions that will be evaluated in that block
partial_state_update_blocks = [
{
'policies': { # The following policy functions will be evaluated and their returns will be passed to the state update functions
'robot_arm': robot_arm
},
'states': { # The following state variables will be updated simultaneously
'box_A': increment_A,
'box_B': increment_B
}
}
]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
del configs[:] # Clear any prior configs
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The configurations above are then packaged into a `Configuration` object
exp.append_configs(initial_state=genesis_states, #dict containing variable names and initial values
partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
sim_configs=c #preprocessed dictionaries containing simulation parameters
)
executor = Executor(local_mode_ctx, configs) # Pass the configuration object inside an array
raw_result, tensor, sessions = executor.execute() # The `execute()` method returns a tuple; its first elements contains the raw results
%matplotlib inline
simulation_result = pd.DataFrame(raw_result)
simulation_result.plot('timestep', ['box_A', 'box_B'], grid=True,
xticks=list(simulation_result['timestep'].drop_duplicates()),
colormap = 'RdYlGn',
yticks=list(range(1+(simulation_result['box_A']+simulation_result['box_B']).max())));
```
As expected, the results are the same as when the robot arm logic was encoded within the state update functions.
Several policies may be evaluated within a Partial State Update Block. When that's the case, cadCAD's engine aggregates the outputs of the policies and passes them as a single signal to the state update functions.

Aggregation of policies is defined in cadCAD as __key-wise sum (+) of the elements of the outputted `dict`s__.
```python
>policy_1_output = {'int': 1, 'str': 'abc', 'list': [1, 2], '1-only': 'Specific to policy 1'}
>policy_2_output = {'int': 2, 'str': 'def', 'list': [3, 4], '2-only': 'Specific to policy 2'}
>print(aggregate([policy_1_output, policy_2_output]))
```
```
{'int': 3, 'str': 'abcdef', 'list': [1, 2, 3, 4], '1-only': 'Specific to policy 1', '2-only': 'Specific to policy 2'}
```
To illustrate, let's add to another system another robot arm identical to the first one, that acts in tandem with it. All it takes is to add a policy to the `dict` that describes the partial state update block.
```
%%capture
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# In the Partial State Update Blocks,
# the user specifies if state update functions will be run in series or in parallel
# and the policy functions that will be evaluated in that block
partial_state_update_blocks = [
{
'policies': { # The following policy functions will be evaluated and their returns will be passed to the state update functions
'robot_arm_1': robot_arm,
'robot_arm_2': robot_arm
},
'variables': { # The following state variables will be updated simultaneously
'box_A': increment_A,
'box_B': increment_B
}
}
]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
del configs[:] # Clear any prior configs
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The configurations above are then packaged into a `Configuration` object
exp.append_configs(initial_state=genesis_states, #dict containing variable names and initial values
partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
sim_configs=c #preprocessed dictionaries containing simulation parameters
)
executor = Executor(local_mode_ctx, configs) # Pass the configuration object inside an array
raw_result, tensor, sessions = executor.execute() # The `execute()` method returns a tuple; its first elements contains the raw results
%matplotlib inline
simulation_result = pd.DataFrame(raw_result)
simulation_result.plot('timestep', ['box_A', 'box_B'], grid=True,
xticks=list(simulation_result['timestep'].drop_duplicates()),
colormap = 'RdYlGn',
yticks=list(range(1+(simulation_result['box_A']+simulation_result['box_B']).max())));
```
Because we have made it so that both robots read and update the state of the system at the same time, the equilibrium we had before (with 5 marbles in each box) is never reached. Instead, the system oscillates around that point.
---
_About BlockScience_
[BlockScience](http://bit.ly/github_articles_M_02) is a research and engineering firm specialized in complex adaptive systems and applying practical methodologies from engineering design, development and testing to projects in emerging technologies such as blockchain. Follow us on [Medium](http://bit.ly/bsci-medium) or [Twitter](http://bit.ly/bsci-twitter) to stay in touch.
| github_jupyter |
```
#Toy example: this does not mean anything really, just write out some random returns series and find the maximum level (this was born as a unit test basically, for the logic of a private application).
#requires py3.7 scipy==1.1.0 (conda)
#requires pyDOE (pip)
from dnlcb import DynamicNegativeLowerConfidenceBound
from emukit.bayesian_optimization.loops import BayesianOptimizationLoop
from emukit.core import ParameterSpace, ContinuousParameter
from emukit.core.optimization import RandomSearchAcquisitionOptimizer
from emukit.model_wrappers import GPyModelWrapper
from GPy.models import GPRegression
from GPy.kern.src.brownian import Brownian
from matplotlib import pyplot as plt
import numpy as np
from pandas import Series
import random
#time horizon
parameter_space = ParameterSpace([ContinuousParameter('days', 0, 251)])
#Sample some arithmetic brownian motion, e.g. returns.
def ABM(
X0: float = 100,
T: int = 252,
n: int = 1,
alpha: float = 0,
sigma: float = 100
) -> Series:
dt = n/T
X = Series([X0])
for i in np.arange(1, T):
ei = np.random.normal(0, 1)
X.loc[i] = X.loc[i-1] + alpha*dt + sigma*ei*(dt**0.5)
return X.to_numpy()
data = ABM(sigma=100, alpha=200)
def f(x):
i=np.round(x).astype(int)
return -data[i]
def XY():
x = np.array(np.array([random.uniform(0, 251)]))
y = f(x)
return (x, y)
X = np.zeros(5)
Y = np.zeros(5)
for i in range(5):
x,y = XY()
X[i] = x
Y[i] = y
X_init = X[:, None]
Y_init = Y[:, None]
#Kernel choice: brownian. This kernel is hardly used in applications, the most common non-smooth kernel is a fractional Matérn I guess. Something very inconvenient with a brownian kernel is its not-differentiability, and the fact that it is strictly one-dimensional (at least in its classic definition): this forces you to marginalize on the dimensions, with an overhead linear in the number of dimensions of course.
#This said it is technically the best assumption if your underlying process does have a brownian nature, like in this example.
kernel = Brownian(variance=np.var(X_init))
#Negate Y_init results as we are solving the dual problem (see below)
#Train and wrap the model in Emukit
model_gpy = GPRegression(X_init,-Y_init,kernel=kernel)
model_emukit = GPyModelWrapper(model_gpy)
#Attention: DNLCB does *not* have convergence guarantees for non-smooth kernel surfaces (see paper), like a brownian ones; this basically means we have no guarantee to find the optimum no matter the number of iterations. As this is a toy example on a conveniently discretized space it's all good, but with real applications be careful on the acquisition choice.
#The input space size must be the same as the parameter space range (see above); starting with a low delta.
dynamic_lcb = DynamicNegativeLowerConfidenceBound(model = model_emukit,input_space_size=252, delta=0.2)
#A brownian motion is nowhere differentiable so its gradient function https://gpy.readthedocs.io/en/deploy/tuto_creating_new_kernels.html#gradients-x-self-dl-dk-x-x2 is undefined; this also means we cannot use any gradient-based acquisition optimizer
acquisition_optimizer = RandomSearchAcquisitionOptimizer(parameter_space, 30)
bayesopt_loop_cust = BayesianOptimizationLoop(
model = model_emukit,
space = parameter_space,
acquisition = dynamic_lcb,acquisition_optimizer=acquisition_optimizer,
batch_size = 1
)
def f_opt(x):
return -f(x)
bayesopt_loop_cust.run_loop(f_opt, 30)
#The optimization engine can only minimize, apparently; so to maximize the original function we will minimize its inverse, by duality max(f)=min(-f).
smin=parameter_space.parameters[0].min
smax=parameter_space.parameters[0].max
x_plot = np.linspace(smin, smax , 200)[:, None]
y_plot = f(x_plot)
z_plot = f_opt(x_plot)
plt.figure(figsize=(12, 8))
plt.plot(x_plot, y_plot, "k", label="Original Function")
plt.plot(x_plot, z_plot, "c", label="Optimization Function")
plt.legend(loc=2, prop={'size': 10})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(smin, smax)
plt.show()
mu_plot, var_plot = bayesopt_loop_cust.model.predict(x_plot)
plt.figure(figsize=(12, 8))
plt.plot(bayesopt_loop_cust.loop_state.X, bayesopt_loop_cust.loop_state.Y, "ro", markersize=10, label="Observations")
plt.plot(x_plot, y_plot, "k", label="Original Function")
plt.plot(x_plot, z_plot, "c", label="Optimization Function")
plt.plot(x_plot, mu_plot, "C0", label="Model")
x_mean = np.mean(bayesopt_loop_cust.loop_state.X)
plt.scatter(x_mean, f(np.array(x_mean)), label="Average", color='b', s=500)
plt.scatter(bayesopt_loop_cust.get_results().minimum_location,
bayesopt_loop_cust.get_results().minimum_location, label="Maximum original function / minimum optimization function", color='g', s=500)
plt.legend(loc=2, prop={'size': 10})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(smin, smax)
plt.show()
#Variance parameter gets automatically optimized; you may set it as fixed too (see GPy docs).
print(np.var(X_init)) #Initial value we set it to; this should be meaningful with respect to the variance across the year levels.
bayesopt_loop_cust.model.model.parameters[0].variance #final iteration value
#This is how we ask for the next best guess for x, evaluate its y offline (example evaluation: 42) and come back to update the model
from emukit.core.loop import UserFunctionResult
print(bayesopt_loop_cust.loop_state.iteration)
state = bayesopt_loop_cust.loop_state
next_point = bayesopt_loop_cust.candidate_point_calculator.compute_next_points(state)
print(next_point[0])
evaluation_result = [UserFunctionResult(next_point[0], np.array([42]))]
state.update(evaluation_result)
bayesopt_loop_cust.model_updaters[0].update(state)
print(bayesopt_loop_cust.loop_state.iteration)
```
| github_jupyter |
# Preprocess Docs
```
# load dependency libraries
import os
import re
import pickle
from bs4 import BeautifulSoup
from bs4.element import Comment
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
# extracting english stop words
stop_words = stopwords.words('english')
# Initializing Porter Stemmer object
st = PorterStemmer()
# Initializing regex to remove words with one or two characters length
# shortword = re.compile(r'\W*\b\w{1,2}\b')
# folder to store pickel files
pickle_folder = "../PickleFiles/"
os.makedirs(pickle_folder, exist_ok=True)
pages_folder = "../FetchedPages/"
filenames = os.listdir(pages_folder)
# list to store filenames of all stored crawled webpages
files = []
for name in filenames:
files.append(name)
# len(files)
# for file in files[:1]:
# web_page = open(pages_folder + file, "r", encoding="utf-8")
# code = web_page.read()
# # print(code)
# soup = BeautifulSoup(code, "html.parser")
# [s.extract() for s in soup(['style', 'script', '[document]', 'head'])]
# visible_text = soup.getText()
# print(visible_text)
# function to filter tags that are visible on webpage i.e. excluding style, script, meta, etc. tags
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'meta', '[document]']:
return False
elif isinstance(element, Comment): # check if element is html comment
return False
elif re.match(r"[\s\r\n]+",str(element)): # to eliminate remaining extra white spaces and new lines
return False
else:
return True
# function to extract only the visible text from the html code of each webpage
def get_text_from_code(page):
soup = BeautifulSoup(page, "lxml")
text_in_page = soup.find_all(text=True) # return all text in page
visible_text = filter(tag_visible, text_in_page) # return only visible text
return " ".join(term.strip() for term in visible_text)
# dict to create inverted index
inverted_index = {}
# dict to store tokens in each web page
webpage_tokens = {}
for file in files:
web_page = open(pages_folder + file, "r", encoding="utf-8")
code = web_page.read()
# print(code)
text = get_text_from_code(code) # get all text actually visible on web page
# print(text,"\n")
text = text.lower()
text = re.sub('[^a-z]+', ' ', text) # remove all punctuations and digits
tokens = text.split()
# print(tokens, "\n")
# # removing stop words from the tokens
# clean_tokens = [word for word in tokens if word not in stop_words]
# # stemming the tokens
# stem_tokens = [st.stem(word) for word in clean_tokens]
# # checking for stopwords again
# clean_stem_tokens = [word for word in stem_tokens if word not in stop_words]
# # converting list of tokens to string
# clean_stem_tokens = ' '.join(map(str, clean_stem_tokens))
# # removing tokens with one or two characters length
# clean_stem_tokens = shortword.sub('', clean_stem_tokens)
# print(clean_stem_tokens, "\n")
# removing stop words and stemming each token while only accepting stemmed tokens with length greater than 2
clean_stem_tokens = [
st.stem(token) for token in tokens
if (token not in stop_words and st.stem(token) not in stop_words) and len(st.stem(token))>2
]
# print(clean_stem_tokens, "\n")
webpage_tokens[file] = clean_stem_tokens # add tokens in web page to dict
for token in clean_stem_tokens:
freq = inverted_index.setdefault(token,{}).get(file,0) # get frequency of token and set to 0 if token not in dict
inverted_index.setdefault(token,{})[file] = freq + 1 # add 1 to frequency of token in current webpage
# inverted_index.setdefault(token,{})[file] = inverted_index.setdefault(token,{})
# x = inverted_index.setdefault(token,{})[file]
# print(x)
# pickling inverted index and tokens
with open(pickle_folder + '6000_inverted_index.pickle', 'wb') as f:
pickle.dump(inverted_index,f)
with open(pickle_folder + '6000_webpages_tokens.pickle', 'wb') as f:
pickle.dump(webpage_tokens,f)
```
| github_jupyter |
```
import CNN2Head_input
import os
import tensorflow as tf
import numpy as np
import BKNetStyle
from const import *
''' PREPARE DATA '''
''' PREPARE DATA '''
smile_train, smile_test = CNN2Head_input.getSmileImage()
gender_train, gender_test = CNN2Head_input.getGenderImage()
age_train, age_test = CNN2Head_input.getAgeImage()
def one_hot(index, num_classes):
assert index < num_classes and index >= 0
tmp = np.zeros(num_classes, dtype=np.float32)
tmp[index] = 1.0
return tmp
sess = tf.InteractiveSession()
global_step = tf.contrib.framework.get_or_create_global_step()
x, y_, mask = BKNetStyle.Input()
y_smile_conv, y_gender_conv, y_age_conv, phase_train, keep_prob = BKNetStyle.BKNetModel(x)
smile_loss, gender_loss, age_loss, l2_loss, loss = BKNetStyle.selective_loss(y_smile_conv, y_gender_conv,
y_age_conv, y_, mask)
train_step = BKNetStyle.train_op(loss, global_step)
smile_mask = tf.get_collection('smile_mask')[0]
gender_mask = tf.get_collection('gender_mask')[0]
age_mask = tf.get_collection('age_mask')[0]
y_smile = tf.get_collection('y_smile')[0]
y_gender = tf.get_collection('y_gender')[0]
y_age = tf.get_collection('y_age')[0]
smile_correct_prediction = tf.equal(tf.argmax(y_smile_conv, 1), tf.argmax(y_smile, 1))
gender_correct_prediction = tf.equal(tf.argmax(y_gender_conv, 1), tf.argmax(y_gender, 1))
age_correct_prediction = tf.equal(tf.argmax(y_age_conv, 1), tf.argmax(y_age, 1))
smile_true_pred = tf.reduce_sum(tf.cast(smile_correct_prediction, dtype=tf.float32) * smile_mask)
gender_true_pred = tf.reduce_sum(tf.cast(gender_correct_prediction, dtype=tf.float32) * gender_mask)
age_true_pred = tf.reduce_sum(tf.cast(age_correct_prediction, dtype=tf.float32) * age_mask)
train_data = []
# Mask: Smile -> 0, Gender -> 1, Age -> 2
for i in range(len(smile_train) * 10):
img = (smile_train[i % 3000][0] - 128) / 255.0
label = smile_train[i % 3000][1]
train_data.append((img, one_hot(label, 4), 0.0))
for i in range(len(gender_train)):
img = (gender_train[i][0] - 128) / 255.0
label = (int)(gender_train[i][1])
train_data.append((img, one_hot(label, 4), 1.0))
for i in range(len(age_train)):
img = (age_train[i][0] - 128) / 255.0
label = (int)(age_train[i][1])
train_data.append((img, one_hot(label, 4), 2.0))
saver = tf.train.Saver()
if not os.path.isfile(SAVE_FOLDER + 'model.ckpt.index'):
print('Create new model')
sess.run(tf.global_variables_initializer())
print('OK')
else:
print('Restoring existed model')
saver.restore(sess, SAVE_FOLDER + 'model.ckpt')
print('OK')
loss_summary_placeholder = tf.placeholder(tf.float32)
tf.summary.scalar('loss', loss_summary_placeholder)
merge_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("./summary/")
learning_rate = tf.get_collection('learning_rate')[0]
current_epoch = (int)(global_step.eval() / (len(train_data) // BATCH_SIZE))
for epoch in range(current_epoch + 1, NUM_EPOCHS):
print('Epoch:', str(epoch))
np.random.shuffle(train_data)
train_img = []
train_label = []
train_mask = []
for i in range(len(train_data)):
train_img.append(train_data[i][0])
train_label.append(train_data[i][1])
train_mask.append(train_data[i][2])
number_batch = len(train_data) // BATCH_SIZE
avg_ttl = []
avg_rgl = []
avg_smile_loss = []
avg_gender_loss = []
avg_age_loss = []
smile_nb_true_pred = 0
gender_nb_true_pred = 0
age_nb_true_pred = 0
smile_nb_train = 0
gender_nb_train = 0
age_nb_train = 0
print("Learning rate: %f" % learning_rate.eval())
for batch in range(number_batch):
# print('Training on batch {0}/{1}'.format(str(batch + 1), str(number_batch)))
top = batch * BATCH_SIZE
bot = min((batch + 1) * BATCH_SIZE, len(train_data))
batch_img = np.asarray(train_img[top:bot])
batch_label = np.asarray(train_label[top:bot])
batch_mask = np.asarray(train_mask[top:bot])
for i in range(BATCH_SIZE):
if batch_mask[i] == 0.0:
smile_nb_train += 1
else:
if batch_mask[i] == 1.0:
gender_nb_train += 1
else:
age_nb_train += 1
batch_img = CNN2Head_input.augmentation(batch_img, 48)
ttl, sml, gel, agl, l2l, _ = sess.run([loss, smile_loss, gender_loss, age_loss, l2_loss, train_step],
feed_dict={x: batch_img, y_: batch_label, mask: batch_mask,
phase_train: True,
keep_prob: 0.5})
smile_nb_true_pred += sess.run(smile_true_pred, feed_dict={x: batch_img, y_: batch_label, mask: batch_mask,
phase_train: True,
keep_prob: 0.5})
gender_nb_true_pred += sess.run(gender_true_pred,
feed_dict={x: batch_img, y_: batch_label, mask: batch_mask,
phase_train: True,
keep_prob: 0.5})
age_nb_true_pred += sess.run(age_true_pred,
feed_dict={x: batch_img, y_: batch_label, mask: batch_mask,
phase_train: True,
keep_prob: 0.5})
'''--------------------------------------- DEBUG -----------------------------------------------------'''
'''
sm_mask, em_mask, ge_mask = sess.run([smile_mask, gender_mask, age_mask],
feed_dict={x: batch_img, y_: batch_label, mask: batch_mask,
phase_train: True,
keep_prob: 0.5})
print('Smile mask: ', sm_mask)
print('Gender mask', ge_mask)
print('Age mask', ag_mask)
print('Batch mask', batch_mask)
y_true_sm, y_true_ge, y_true_ag = sess.run([y_smile, y_gender, y_age],
feed_dict={x: batch_img, y_: batch_label, mask: batch_mask,
phase_train: True,
keep_prob: 0.5})
print('Smile label', y_true_sm)
print('Gender label', y_true_ge)
print('Age label', y_true_ag)
print('Batch label', batch_label)
y_conv_sm, y_conv_ge, y_conv_ag = sess.run([y_smile_conv, y_gender_conv, y_age_conv],
feed_dict={x: batch_img, y_: batch_label, mask: batch_mask,
phase_train: True,
keep_prob: 0.5})
print('Smile conv', y_conv_sm)
print('Gender conv', y_conv_ge)
print('Age conv', y_conv_ag)
'''
'''---------------------------------- END OF DEBUG ----------------------------------------------------'''
avg_ttl.append(ttl)
avg_smile_loss.append(sml)
avg_gender_loss.append(gel)
avg_age_loss.append(agl)
avg_rgl.append(l2l)
smile_train_accuracy = smile_nb_true_pred * 1.0 / smile_nb_train
gender_train_accuracy = gender_nb_true_pred * 1.0 / gender_nb_train
age_train_accuracy = age_nb_true_pred * 1.0 / age_nb_train
avg_smile_loss = np.average(avg_smile_loss)
avg_gender_loss = np.average(avg_gender_loss)
avg_age_loss = np.average(avg_age_loss)
avg_rgl = np.average(avg_rgl)
avg_ttl = np.average(avg_ttl)
# print('Avg_ttl: ' + str(avg_ttl))
# print('loss_summary_placeholder: ' + str(loss_summary_placeholder))
# print('merge_summary: ' + str(merge_summary))
summary = sess.run(merge_summary, feed_dict={loss_summary_placeholder: avg_ttl})
writer.add_summary(summary, global_step=epoch)
with open('log.csv', 'w+') as f:
# epochs, smile_train_accuracy, gender_train_accuracy, age_train_accuracy,
# avg_smile_loss, avg_gender_loss, avg_age_loss, avg_ttl, avg_rgl
f.write('{0},{1},{2},{3},{4},{5},{6},{7},{8}\n'.format(current_epoch, smile_train_accuracy, gender_train_accuracy, age_train_accuracy, avg_smile_loss, avg_gender_loss, avg_age_loss, avg_ttl, avg_rgl))
print('Smile task train accuracy: ' + str(smile_train_accuracy * 100))
print('Gender task train accuracy: ' + str(gender_train_accuracy * 100))
print('Age task train accuracy: ' + str(age_train_accuracy * 100))
print('Total loss: ' + str(avg_ttl) + '. L2-loss: ' + str(avg_rgl))
print('Smile loss: ' + str(avg_smile_loss))
print('Gender loss: ' + str(avg_gender_loss))
print('Age loss: ' + str(avg_age_loss))
print('\n')
saver.save(sess, SAVE_FOLDER + 'model.ckpt')
```
| github_jupyter |
# High-level RNN MXNet Example
```
import os
import sys
import numpy as np
import mxnet as mx
from mxnet.io import DataDesc
from common.params_lstm import *
from common.utils import *
# Force one-gpu
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("Numpy: ", np.__version__)
print("MXNet: ", mx.__version__)
print("GPU: ", get_gpu_name())
print(get_cuda_version())
print("CuDNN Version ", get_cudnn_version())
def create_symbol(CUDNN=True,
maxf=MAXFEATURES, edim=EMBEDSIZE, nhid=NUMHIDDEN, maxl=MAXLEN):
# https://mxnet.incubator.apache.org/api/python/rnn.html
data = mx.symbol.Variable('data')
embedded_step = mx.symbol.Embedding(data=data, input_dim=maxf, output_dim=edim)
# Fusing RNN layers across time step into one kernel
# Improves speed but is less flexible
# Currently only supported if using cuDNN on GPU
if not CUDNN:
gru_cell = mx.rnn.GRUCell(num_hidden=nhid)
else:
gru_cell = mx.rnn.FusedRNNCell(num_hidden=nhid, num_layers=1, mode='gru')
begin_state = gru_cell.begin_state()
# Call the cell to get the output of one time step for a batch.
# TODO: TNC layout (sequence length, batch size, and feature dimensions) is faster for RNN
outputs, states = gru_cell.unroll(length=maxl, inputs=embedded_step, merge_outputs=False)
fc1 = mx.symbol.FullyConnected(data=outputs[-1], num_hidden=2)
input_y = mx.symbol.Variable('softmax_label')
m = mx.symbol.SoftmaxOutput(data=fc1, label=input_y, name="softmax")
return m
def init_model(m, batchs=BATCHSIZE, maxl=MAXLEN, lr=LR, b1=BETA_1, b2=BETA_2, eps=EPS):
ctx = [mx.gpu(0)]
mod = mx.mod.Module(context=ctx, symbol=m)
mod.bind(data_shapes=[DataDesc(name='data', shape=(batchs, maxl))],
label_shapes=[DataDesc(name='softmax_label', shape=(batchs,))])
# Glorot-uniform initializer
mod.init_params(initializer=mx.init.Xavier(rnd_type='uniform'))
mod.init_optimizer(optimizer='Adam',
optimizer_params=(('learning_rate', lr),
('beta1', b1),
('beta2', b2),
('epsilon', eps)))
return mod
%%time
# Data into format for library
x_train, x_test, y_train, y_test = imdb_for_library(seq_len=MAXLEN, max_features=MAXFEATURES)
# TNC layout faster for RNN
# Train iterator
train_iter = mx.io.NDArrayIter(x_train, y_train, BATCHSIZE, shuffle=True)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)
%%time
# Load symbol
# See Notebook "MXNet_RNN_TNC.ipynb" for example with TNC layout
sym = create_symbol()
%%time
# Initialise model
model = init_model(sym)
%%time
# Main training loop: 12.7s
metric = mx.metric.create('acc')
for j in range(EPOCHS):
train_iter.reset()
metric.reset()
for batch in train_iter:
model.forward(batch, is_train=True)
model.update_metric(metric, batch.label)
model.backward()
model.update()
print('Epoch %d, Training %s' % (j, metric.get()))
%%time
# Main evaluation loop: 1.52s
y_guess = model.predict(mx.io.NDArrayIter(x_test, batch_size=BATCHSIZE, shuffle=False))
y_guess = np.argmax(y_guess.asnumpy(), axis=-1)
print("Accuracy: ", 1.*sum(y_guess == y_test)/len(y_guess))
```
| github_jupyter |
# k-Nearest Neighbor (kNN) exercise
*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
The kNN classifier consists of two stages:
- During training, the classifier takes the training data and simply remembers it
- During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples
- The value of k is cross-validated
In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code.
```
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print X_train.shape, X_test.shape
from cs231n.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
```
We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps:
1. First we must compute the distances between all test examples and all train examples.
2. Given these distances, for each test example we find the k nearest examples and have them vote for the label
Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example.
First, open `cs231n/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.
```
# Open cs231n/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_two_loops(X_test)
print dists.shape
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
```
**Inline Question #1:** Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.)
- What in the data is the cause behind the distinctly bright rows?
- What causes the columns?
**Your Answer**:
*-If the ith test data is similar to a large number of train data, the ith row would be black. Otherwise,the ith row would be white.
-If the jth train data is similar to a large number of test data, the jth column would be black. Otherwise,the jth column would be white.*
```
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
```
You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`:
```
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
```
You should expect to see a slightly better performance than with `k = 1`.
```
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Let's compare how fast the implementations are
def time_function(f, *args):
"""
Call a function f with args and return the time (in seconds) that it took to execute.
"""
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print 'Two loop version took %f seconds' % two_loop_time
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print 'One loop version took %f seconds' % one_loop_time
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print 'No loop version took %f seconds' % no_loop_time
# you should see significantly faster performance with the fully vectorized implementation
```
### Cross-validation
We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation.
```
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
X_train_folds = []
y_train_folds = []
################################################################################
# TODO: #
# Split up the training data into folds. After splitting, X_train_folds and #
# y_train_folds should each be lists of length num_folds, where #
# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #
# Hint: Look up the numpy array_split function. #
################################################################################
# pass
y_train_ = y_train.reshape(-1, 1)
X_train_folds , y_train_folds = np.array_split(X_train, 5), np.array_split(y_train_, 5)
################################################################################
# END OF YOUR CODE #
################################################################################
# A dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
k_to_accuracies = {}
################################################################################
# TODO: #
# Perform k-fold cross validation to find the best value of k. For each #
# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #
# where in each case you use all but one of the folds as training data and the #
# last fold as a validation set. Store the accuracies for all fold and all #
# values of k in the k_to_accuracies dictionary. #
################################################################################
# pass
for k_ in k_choices:
k_to_accuracies.setdefault(k_, [])
for i in range(num_folds):
classifier = KNearestNeighbor()
X_val_train = np.vstack(X_train_folds[0:i] + X_train_folds[i+1:])
y_val_train = np.vstack(y_train_folds[0:i] + y_train_folds[i+1:])
y_val_train = y_val_train[:,0]
classifier.train(X_val_train, y_val_train)
for k_ in k_choices:
y_val_pred = classifier.predict(X_train_folds[i], k=k_)
num_correct = np.sum(y_val_pred == y_train_folds[i][:,0])
accuracy = float(num_correct) / len(y_val_pred)
k_to_accuracies[k_] = k_to_accuracies[k_] + [accuracy]
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print 'k = %d, accuracy = %f' % (k, accuracy)
# plot the raw observations
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 10
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
```
| github_jupyter |
# Intro
SQL is the programming language used with databases, and it is an important skill for any data scientist. You'll build your SQL skills in this course apply those skills using BigQuery, a database system that lets you apply SQL to huge datasets.
This lesson describes basics about connecting to the database and running your first SQL query. After you have a handle on these basics, we'll come back to build your SQL skills.
# Your First BigQuery Commands
We'll access BigQuery using a Python package called `bq_helper` that puts BigQuery results into Pandas DataFrames. This is valuable if you are familiar with Pandas. In case you aren't, we have a separate [Pandas course](https://www.kaggle.com/learn/pandas).
You can import`bq_helper` in the standard way.
```
import bq_helper
```
We also need to create a BigQueryHelper object pointing to a specific dataset.
For now, we will give you the names of the datasets you will connect to. The current example uses a dataset of posts to HackerNews.
```
# create a helper object for our bigquery dataset
hacker_news = bq_helper.BigQueryHelper(active_project= "bigquery-public-data",
dataset_name = "hacker_news")
```
# Database Schemas
The structure of a dataset is called its **schema**.
We need to understand a database's schema to effectively pull out the data we want (called "querying the database"). The `BigQueryHelper.list_tables()` method lists the tables in the dataset. A table is composed of rows and columns, like a spreadsheet table. The database itself can hold multiple tables, much as a spreadsheet file can hold multiple tables.
```
# print a list of all the tables in the hacker_news dataset
hacker_news.list_tables()
```
Now that we know what tables are in this dataset, we can explore the columns in individual tables. In this example, we'll look at table called "full". Note that other data sets have different table names, so you will not always use "full."
```
# print information on all the columns in the "full" table
# in the hacker_news dataset
hacker_news.table_schema("full")
```
Each SchemaField tells us about a specific column. In order, the information is:
* The name of the column
* The datatype in the column
* [The mode of the column](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.mode) (NULLABLE means that a column allows NULL values, and is the default)
* A description of the data in that column
The first field has the SchemaField:
`SchemaField('by', 'string', 'NULLABLE', "The username of the item's author.",())`
This tells us
- the field is called "by"
- the data in this field is strings
- NULL values are allowed
- It contains the "username" of the item's author.
We can use the `BigQueryHelper.head()` method to check just the first couple of lines of of the "full" table to make sure this is right. (Sometimes databases out there have outdated description, so it's good to check.)
```
# preview the first couple lines of the "full" table
hacker_news.head("full")
```
The `BigQueryHelper.head()` method will also let us look at just the information in a specific column. If we want to see the first ten entries in the "by" column, for example, we can do that!
```
# preview the first ten entries in the by column of the full table
hacker_news.head("full", selected_columns="by", num_rows=10)
```
# Wrap Up
You've seen how to:
- Set up a helper function to access your database (`BigQueryHelper`)
- List the tables in your database (`list_tables`)
- Review the schema for any table (`table_schema`)
- Inspect the top few rows in a table (`head`)
You're about to get a chance to try these out.
Before we go into the coding exercise, a quick disclaimer for those who already know some SQL:
**Each Kaggle user can scan 5TB every 30 days for free. Once you hit that limit, you'll have to wait for it to reset.**
The commands you've seen so far won't demand a meaningful fraction of that limit. But some BiqQuery datasets are huge. So, if you already know SQL, wait to run `SELECT` queries until you've seen how to use your allotment effectively. If you are like most people reading this, you don't know how to write these queries yet, so you don't need to worry about this disclaimer.
# Your Turn
Practice the commands you've seen to **[Explore The Structure of a Dataset](#$NEXT_NOTEBOOK_URL$)** with crimes in the city of Chicago.
| github_jupyter |
```
%load_ext memory_profiler
```
# Iterators, generators and itertools
```
for i in range(5): print(i, end=" ")
print()
for i in (0, 1, 2, 3, 4): print(i, end=" ")
print()
for i in {0, 1, 2, 3, 4}: print(i, end=" ")
list(map(type, (range(5), (0, 1, 2, 3, 4), {0, 1, 2, 3, 4})))
range(5).__sizeof__(), (0, 1, 2, 3, 4).__sizeof__(), {0, 1, 2, 3, 4}.__sizeof__()
mil = range(10**6)
mil.__sizeof__(), tuple(mil).__sizeof__()
```
## Collections, sequences and containers
<img src="https://miro.medium.com/max/1200/1*X3GmUh7dqAMJLM5KwGBKYQ.png">
## Iterable
```
hasattr(range(5), "__iter__"), hasattr(tuple(), "__iter__"), hasattr(set(), "__iter__")
```
```python
iterable.__iter__() -> iterator
```
## Iterator protocol
- `__iter__` - return the iterator object itself
- `__next__` - return the next item from the iterator
Return the next item from the container. If there are no further items, raise the `StopIteration` exception. Once an iterator’s `__next__` method raises `StopIteration`, it must continue to do so on subsequent calls.
```
for i in range(5):
print(i, end=" ")
print()
iterator = iter(range(5))
while True:
try:
i = next(iterator)
print(i, end=" ")
except StopIteration:
break
type(range(5)), type(iterator)
from collections import namedtuple
from typing import Iterator
Page = namedtuple("Page", ["text", "number"])
class Book:
def __init__(self) -> None:
self.pages = []
def add_page(self, text: str) -> None:
self.pages.append(
Page(text, number=len(self.pages) + 1)
)
def __iter__(self) -> Iterator[Page]:
return BookIter(self)
class BookIter:
def __init__(self, book: Book) -> None:
self.pages = book.pages
self._cursor = -1
def __iter__(self) -> "BookIter":
return self
def __next__(self) -> Page:
self._cursor += 1
if len(self.pages) > self._cursor:
return self.pages[self._cursor]
raise StopIteration
book = Book()
for i in range(1, 5):
book.add_page(f"page_{i}")
for page in book:
print(page)
type(book), type(iter(book))
```
## Why do we need BookIter?
```
class LazyBook(Book):
def __iter__(self) -> Iterator[Page]:
return iter(self.pages)
lazy_book = LazyBook()
for i in range(1, 5):
lazy_book.add_page(f"page_{i}")
for page in lazy_book:
print(page)
type(lazy_book), type(iter(lazy_book))
class PurchasableBook(Book):
def __init__(self, purchased: bool = False) -> None:
self.purchased = purchased
super().__init__()
def __iter__(self) -> "PurchasableBookIter":
return PurchasableBookIter(self)
class BookIter:
def __init__(self, book: Book) -> None:
self.pages = book.pages
self.book = book
self._cursor = 0 # self._cursor = -1
def __iter__(self) -> "BookIter":
return self
def __next__(self) -> Page:
if len(self.pages) > self._cursor: # self._cursor += 1
result = self.pages[self._cursor] # if len(self.pages) > self._cursor:
self._cursor += 1 # return self.pages[self._cursor]
return result
raise StopIteration # raise StopIteration
class PurchasableBookIter(BookIter):
def __init__(self, book: Book):
self.purchased = book.purchased
super().__init__(book)
def __next__(self) -> Page:
if not self.purchased and self._cursor > 0:
print("Buy the book to view next pages!")
raise StopIteration
return super().__next__()
purchased_book = PurchasableBook()
for i in range(1, 5):
purchased_book.add_page(f"page_{i}")
for page in purchased_book:
print(page)
it = iter(purchased_book)
for page in it:
print(page)
purchased_book.purchased = True
for page in it:
print(page)
purchased_book.purchased = True
for page in purchased_book:
print(page)
```
## Is PurchasableBookIter fully match the iterator protocol?
<h2 align=center>Quiz time</h2>
$$
iterators \supset iterable
$$
<center>or</center>
$$
iterators \subset iterable
$$
## Recup
What should an iterator do?
1. Track the current state
1. Know how to return next element
1. ?????
1. <strike>PROFIT</strike> `StopIteration`
Do we realy need a collection for an iterator?
Do we realy need to stop?
```
class RecurrentSequence:
def __init__(self, a_1: int, a_2: int) -> None:
self.a_1 = a_1
self.a_2 = a_2
def __iter__(self) -> Iterator[int]:
return RecurrentSequenceIterator(self.a_1, self.a_2)
class RecurrentSequenceIterator:
def __init__(self, a_1: int, a_2: int) -> None:
self.a_1 = a_1
self.a_2 = a_2
def __iter__(self) -> Iterator[int]:
return self
def __next__(self) -> int:
result = self.a_1
self.a_1, self.a_2 = self.a_2, self.a_1 + self.a_2
return result
fib = RecurrentSequence(1, 1)
for i, f in zip(range(1, 20), fib):
print(f"{i} - {f}", end="; ")
print()
for i, f in zip(range(1, 20), fib):
print(f"{i} - {f}", end="; ")
fib_iter = iter(fib)
for i, f in zip(range(1, 10), fib_iter):
print(f"{i} - {f}", end="; ")
print()
for i, f in zip(range(1, 10), fib_iter):
print(f"{i} - {f}", end="; ")
fib.__sizeof__(), fib_iter.__sizeof__()
```
## Any side effects?
- one can exhauste an iterator:
```
iterator = iter([1, 2, 3, 4])
print(sum(iterator))
print(sum(iterator))
not_iterator = [1, 2, 3, 4]
print(sum(not_iterator))
print(sum(not_iterator))
```
- an iterable has `__contains__` method
```
print(list(book))
Page("page_2", 2) in book, Page("page_2", 2) in book, Page("page_5", 5) in book, 3 in book
iterator = iter(book)
Page("page_2", 2) in iterator, Page("page_2", 2) in iterator
5 in fib
# 6 in fib
```
## Iterables with `__getitem__`
```
class HiddenList:
def __init__(self, lst):
self._lst = lst
h_list = HiddenList([1, 2, 3])
iter(h_list)
class IterableHiddenList(HiddenList):
def __getitem__(self, index):
print(f"Index: {index}")
return self._lst[index]
ih_list = IterableHiddenList([1, 2, 3])
iter(ih_list)
# for i in ih_list:
# print(i)
# pass
# 5 in ih_list
```
How might it work?
```
print(dir(IterableHiddenList))
print(dir(RecurrentSequence))
```
```python
print(index)
```
## Any questions so far?
# GENERATORS
Are generators iterators?
```
def gen():
yield 1
print(dir(gen()))
```
Are generators iterators?
\- Yes*
```
gen, gen(), (1 for _ in [])
```
- **Generator functions** - a function or method which uses the yield statement
- **Generator iterator** - an object created by a generator function
- **Generator expression** -an expression that returns an iterator
```
def recurrent_sequence(a1: int, a2: int):
while True:
yield a1
a1, a2 = a2, a1 + a2
fib = recurrent_sequence(0, 1)
for i, f in zip(range(1, 20), fib):
print(f"{i} - {f}", end="; ")
```
## Generators and `return`
```
def gen():
yield 1
return 2
g = gen()
next(g)
next(g)
```
What will happen here?
```
def gen():
return 2
yield 1
gen().__next__()
try:
next(gen())
except StopIteration as e:
print(e.value)
```
## Preserving operations order
```
def do_in_order():
x = 1
print(f"Do first, {x}")
yield
x += 1
print(f"Do second, {x}")
yield
x *= x
print(f"Do third, {x}")
gen = do_in_order()
next(gen)
next(gen)
next(gen, "Stop")
```
## Send
```
def do_in_order_2():
x = 1
print(f"Do first, {x}")
y = yield
print(f"Do second, {y}")
z = yield 42
print(f"Do third, {z}")
gen = do_in_order_2()
for _ in gen:
print(f"step {_}")
gen = do_in_order_2()
next(gen)
next(gen)
next(gen, "Stop")
```
## Send
```
def do_in_order_2():
x = 1
print(f"Do first, {x}")
x = yield "123"
print(f"Do second, {x}")
x = yield 42
print(f"Do third, {x}")
gen = do_in_order_2()
gen.send(None)
gen.send("Hello")
try:
gen.send("World")
except StopIteration:
print("I'm out!")
```
## Throw
```
def g():
try:
yield 42
except Exception as e:
yield e
gen = g()
next(gen)
gen.throw(NotImplementedError, "Exception text")
gen.throw(NotImplementedError, "Exception text returns")
```
## Close
```
def do_in_order_2():
x = 1
print(f"Do first, {x}")
x = yield
print(f"Do second, {x}")
x = yield 42
print(f"Do third, {x}")
gen = do_in_order_2()
gen.send(None)
gen.close()
gen.send(None)
gen.throw(NotImplementedError, "Exception text")
```
How did `close` works?
```
BaseException.__subclasses__()
def tricky_gen():
yield "first"
try:
yield "second"
finally:
yield "from finally"
gen = tricky_gen()
for i in gen:
print(i, end=" ")
gen = tricky_gen()
next(gen), next(gen)
gen.close()
```
https://amir.rachum.com/blog/2017/03/03/generator-cleanup/
## yield from
- pass the execution to another generator
- pass `send` and `throw`
```
def chain(*iterables):
for iterable in iterables:
for item in iterable:
yield item
list(chain(
[1, 2], ("4", "5"), {"key1": "val1", "key2": "val2"}, "iter", {("a",), ("b",)}
))
```
Can we do better?
```
def chain(*iterables):
for iterable in iterables:
yield from iterable
return "Stop"
list(chain(
[1, 2], ("4", "5"), {"key1": "val1", "key2": "val2"}, "iter", {("a",), ("b",)}
))
```
## Example: Sieve of Eratosthenes
In mathematics, the sieve of Eratosthenes is an ancient algorithm for finding all prime numbers up to any given limit.
<img src="https://upload.wikimedia.org/wikipedia/commons/b/b9/Sieve_of_Eratosthenes_animation.gif">
1. Create a list of consecutive integers from 2 through n: (2, 3, 4, ..., n).
1. Initially, let p equal 2, the smallest prime number.
1. Enumerate the multiples of p by counting in increments of p from 2p to n, and mark them in the list (these will be 2p, 3p, 4p, ...; the p itself should not be marked).
1. Find the smallest number in the list greater than p that is not marked. If there was no such number, stop. Otherwise, let p now equal this new number (which is the next prime), and repeat from step 3.
1. When the algorithm terminates, the numbers remaining not marked in the list are all the primes below n.
Step 1: generate all natural numbers:
```
def natural_numbers(start=1):
while True:
yield start
start += 1
for _, number in zip(range(10), natural_numbers(1)):
print(number, end=" ")
```
Step 2: ~draw the rest of the owl~
```
s = sieve(natural_numbers(2))
def sieve(numbers):
prime = next(numbers)
yield prime
yield from sieve(p for p in numbers if p % prime != 0)
for _, prime in zip(range(10), sieve(natural_numbers(2))):
print(f"{_} - {prime}")
```
# Lazy evaluations
```
n = 10**7
%%memit
sum([i for i in range(n)])
%%memit
sum(i for i in range(n))
%%memit
sum(list(map(lambda x: x**2, [i for i in range(n)])))
%%memit
sum(map(lambda x: x**2, [i for i in range(n)]))
%%memit
sum(map(lambda x: x**2, (i for i in range(n))))
```
# Context managers
- `__enter__(self)`
- `__exit__(self, exception_type, exception_value, traceback)`
Patterns:
- acquisition/release of the resource
- doing something in different context
```
import os
class cd:
def __init__(self, path):
self.path = path
def __enter__(self):
self.cwd = os.getcwd()
os.chdir(self.path)
def __exit__(self, *args):
os.chdir(self.cwd)
print(os.getcwd())
with cd(".."):
print(os.getcwd())
print(os.getcwd())
from contextlib import contextmanager
@contextmanager
def cd_gen(path):
# <__init__>
cwd = os.getcwd()
# </__init__>
try:
# <__enter__>
os.chdir(path)
# </__enter__>
yield
finally:
# <__exit__>
os.chdir(cwd)
# </__exit__>
print(os.getcwd())
with cd_gen("/home"):
print(os.getcwd())
print(os.getcwd())
```
# `itertools`
This module implements a number of iterator building blocks inspired by constructs from APL, Haskell, and SML. Each has been recast in a form suitable for Python.
The module standardizes a core set of fast, memory efficient tools that are useful by themselves or in combination. Together, they form an “iterator algebra” making it possible to construct specialized tools succinctly and efficiently in pure Python.
```
from itertools import islice
def take(iterable, n=10):
return list(islice(iterable, n))
take(fib, 5)
from itertools import count, repeat, cycle
take(count())
take(repeat([1, 2]), 3)
take(cycle([1, 2]), 11)
from itertools import dropwhile, takewhile
list(dropwhile(lambda x: x < 3, range(6)))
list(takewhile(lambda x: x < 3, range(6)))
from itertools import chain
list(chain(
[1, 2], ("4", "5"), {"key1": "val1", "key2": "val2"}, "iter", {("a",), ("b",)}
))
collection = [[1, 2], ("4", "5"), {"key1": "val1", "key2": "val2"}, "iter", {("a",), ("b",)}]
list(chain(*collection))
list(chain.from_iterable(collection))
```
What is the difference?
```
from itertools import tee
iterator = iter(range(4))
a = b = iterator
list(a), list(b)
iterator = range(4)
a, b = tee(iterator, 2)
list(iterator)
list(a), list(b)
iterator = iter(range(4))
a, b = tee(iterator, 2)
list(iterator)
list(a), list(b)
```
## Combinatoric iterators
```
from itertools import product
list(product("12", repeat=3))
list(product("12", "abc"))
sum(1 for a, b, c, d in product(l1, l2, l3, l4) if a + b + c + d == 0)
# or
sum(a + b + c + d == 0 for a, b, c, d in product(l1, l2, l3, l4))
from itertools import permutations, combinations, combinations_with_replacement
list(permutations("BAC", 2))
list(combinations("ABC", 2))
list(combinations_with_replacement("ABC", 2))
```
# Links
More itertools:
https://docs.python.org/3/library/itertools.html#itertools-recipes
David Beazley:
https://www.dabeaz.com/tutorials.html
# Fin.
| github_jupyter |
## Notebook 0 - Labeling Languages of Texts
For our project, we will be using the Dota dataset: https://www.kaggle.com/romovpa/gosuai-dota-2-game-chats
This dataset contains multiple languages that our group cannot interpret. For this case, we will be using the English portion of the dataset. If we have more time by the end of this project, we may find a way to translate different languages.
```
import numpy as np
import pandas as pd
import warnings
import fasttext
# To load the large dataset (21 million rows) with AWS
!pip install boto3
# Installing the language labeler
!pip install fasttext
# Loading data into Colab via AWS https://medium.com/python-in-plain-english/how-to-load-data-from-aws-s3-into-google-colab-7e76fbf534d2
import boto3
BUCKET_NAME = 'ggwp-project'
# Authentication credentials
s3 = boto3.resource('s3', aws_access_key_id = 'AKIAJ52OAEJJXOEMTRVA',
aws_secret_access_key= '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')
KEY = 'dota2_chat_messages.csv'
try:
# Downloading training dataset from s3 with name `dota2_chat_messages.csv`
# to colab dir as `dota2_chat_messages.csv`
s3.Bucket(BUCKET_NAME).download_file(KEY, 'dota2_chat_messages.csv')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
# Loading the Full Dota dataset (all languages)
df = pd.read_csv('dota2_chat_messages.csv')
df.head()
df.shape
# Checking null values
for col in df.columns:
print(col, "NA:", sum(df[col].isna()))
```
There are only null values in the `text` column. While the dataset description suggests theres no missing values, pandas does not identify empty strings as Na/NaN. This suggests the missing values are other invalid characters such as emojis which we won't be considering for our purposes. As such, we dropped the null values.
```
# Dropping nulls since they are only in the text column (nothing to analyze w/ no text)
df = df.dropna()
df.head()
# Converting the single monolith dataframe into a dictionary of multiple smaller dataframes to allow batch processing
DataFrameDict = {}
countEach = int(df.shape[0] / 10) + 1
for i in range(0,10):
df_temp = df[(i * countEach) : ((i + 1) * countEach)]
DataFrameDict[str(i)] = df_temp.copy()
# Loading the FastText model
!wget https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin
model = fasttext.load_model("lid.176.bin")
# Using FastText to run a batch language detection process on the small dfs that are part of the above dict
xCt = 0
for key in DataFrameDict:
thisDf = DataFrameDict[key]
i = -1
langs = np.zeros(len(thisDf)).astype(str)
for message in thisDf['text'].values:
i += 1
try:
predictions = model.predict(message)
langs[i] = predictions[0][0][-2:]
except:
continue
print("FINISHED!" + str(xCt))
thisDf['language'] = langs
print(thisDf.head())
xCt += 1
# Recreating the dataframe, now with languages identified
newDf = pd.DataFrame()
for key in DataFrameDict:
newDf = pd.concat([newDf, DataFrameDict[key]])
print(newDf.shape)
# # Saving the dataframe with the language column
# newDf.to_csv('dataSetWithLang.csv', index=False)
```
| github_jupyter |
```
import tensorflow as tf
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.45)
tf.enable_eager_execution(config=tf.ConfigProto(gpu_options=gpu_options))
import time
from pathlib import Path
import matplotlib.pyplot as plt
from IPython.display import clear_output
from shared import make_dataset, random_jitter, Generator, Discriminator, normalize, \
train_step, generate_images, generate_plot
%matplotlib inline
PATH = Path("/scratch/datasets/astro_deconv_2019/")
CHECKPOINT_PREFIX = Path('training_checkpoints/gan')
BUFFER_SIZE = 200
BATCH_SIZE = 1
IMG_SIZE = 256
OUTPUT_CHANNELS = 1
LAMBDA = 100
EPOCHS = 5
LR = 0.001
TEST_INTERVAL = 100
CHECKPOINT_INTERVAL = 5000
train_dirty_dataset = make_dataset(PATH / 'train/*-dirty.fits')
train_skymodel_dataset = make_dataset(PATH / 'train/*-skymodel.fits')
train_psf_dataset = make_dataset(PATH / 'train/*-psf.fits')
train_clean_beam_dataset = make_dataset(PATH / 'train/*-clean-beam.fits')
train_dataset = tf.data.Dataset.zip((train_dirty_dataset, train_skymodel_dataset, train_psf_dataset, train_clean_beam_dataset))
train_dataset = train_dataset.map(random_jitter)
train_dataset = train_dataset.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.batch(1)
test_dirty_dataset = make_dataset(PATH / 'test/*-dirty.fits')
test_wsclean_dataset = make_dataset(PATH / 'test/*-wsclean-model.fits')
test_skymodel_dataset = make_dataset(PATH / 'test/*-skymodel.fits')
test_psf_dataset = make_dataset(PATH / 'test/*-psf.fits')
test_clean_beam_dataset = make_dataset(PATH / 'test/*-clean-beam.fits')
test_dataset = tf.data.Dataset.zip((test_dirty_dataset, test_skymodel_dataset, test_psf_dataset, test_clean_beam_dataset, test_wsclean_dataset))
test_dataset = test_dataset.shuffle(BUFFER_SIZE)
test_dataset = test_dataset.batch(1)
generator = Generator(IMG_SIZE=IMG_SIZE, OUTPUT_CHANNELS=OUTPUT_CHANNELS)
discriminator = Discriminator()
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
generator_optimizer = tf.train.AdamOptimizer(learning_rate=LR, beta1=0.5)
discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=LR, beta1=0.5)
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
```
# Train the shit
```
l1s = []
gans = []
for epoch in range(EPOCHS):
step = 0
start = time.time()
for input_, target, psf, clean_beam in train_dataset:
min_, max_, input_, target, = normalize(input_, target)
train_step(loss_object, generator, generator_optimizer,
discriminator_optimizer, discriminator, input_, target, LAMBDA)
step += 1
print(".", end = '')
if (step + 1) % TEST_INTERVAL == 0:
clear_output(wait=True)
for test_input, test_target, test_psf, test_cleanbeam, test_wsclean in test_dataset.take(1):
r = normalize(test_input, test_target, test_wsclean)
min_, max_, test_input, test_target, wsclean = r
test_prediction = generator(test_input, training=True)
disc_real_output = discriminator([test_input, test_target], training=True)
disc_generated_output = discriminator([test_input, test_prediction], training=True)
gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)
l1_loss = tf.reduce_mean(tf.abs(test_target - test_prediction))
print(f"l1_loss: {l1_loss:.4f} gan_loss: {gan_loss:.4f}")
l1s.append(l1_loss)
gans.append(gan_loss)
generate_images(test_prediction, test_input, test_target)
generate_plot([i.numpy() for i in l1s], 'l1')
generate_plot([i.numpy() for i in gans], 'gan')
duration = time.time()-start
speed = step / duration
print(f"step: {step + 1} epoch: {epoch + 1} duration: {duration:.2f}s step/s: {speed:.2f}\n")
if (step + 1) % CHECKPOINT_INTERVAL == 0:
checkpoint.save(file_prefix=str(CHECKPOINT_PREFIX))
```
| github_jupyter |
- title: Cox's Theorem: Establishing Probability Theory
- summary: Cox's theorem is the strongest argument for the use of standard probability theory. Here we examine the axioms to establish a firm foundation for the interpretation of probability theory as the unique extension of true-false logic to degrees of belief.
- author: Daniel Cox
- date: 2019-11-03
- category: arXiv highlights
- image: /static/images/arXiv.gif
# Ranging farther afield
Today I'll be taking advantage of my stated intention to pull back from the stream of _recent_ papers, and look at some papers for their impact or fundamental importance as I see it. So today I'm doing something unusual, highlighting a paper not from last week, but from _four years_ ago, and not directly from AI, but from the field of probability theory: [Cox's Theorem and the Jaynesian Interpretation of Probability](https://arxiv.org/abs/1507.06597).
I've been reading a book by E. T. Jaynes, called [Probability Theory: The Logic of Science](https://www.amazon.com/Probability-Theory-Science-T-Jaynes/dp/0521592712), a brilliant and practical exposition of the Bayesian view of probability theory, partially on [the recommendation of another AI researcher](https://www.lesswrong.com/posts/kXSETKZ3X9oidMozA/the-level-above-mine). The thoughts of an ideal reasoner would have Bayesian structure, so I am both personally and professionally interested in mastering the concepts.
# Overview
Cox's theorem is an attempt to derive probability theory from a small, common-sense set of uncontroversial desiderata, and to demonstrate its uniqueness as an extension of two-valued (true/false) logic to degrees of belief. That's a big deal. As today's paper mentions, Peter Cheeseman [has called](https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1467-8640.1988.tb00091.x) Cox's theorem the "strongest argument for the use of standard (Bayesian) probability theory". But Cox's theorem is non-rigorous as originally formulated, and many people have patched up the holes for use in their various fields. Often today, if someone refers to "Cox's theorem", they usually mean one of the fixed-up versions.
Jaynes' version unfortunately contains a mistake, and today's paper fixes it by replacing some of the axioms with the simple requirement that probability theory remain consistent with respect to repeated events.
It may be difficult without reading the book to see why this paper is important to AI, so perhaps in the near future I'll discuss that at greater length. For today, however, I'll simply be explaining each of the axioms, and setting you up to read the paper more easily. It is certainly worth a close reading, to ground your confidence in the interpretation of probability theory as a _logical system_ that extends true-false logic to handle uncertainty, so you can reap the associated benefits.
# Abstract
> There are multiple proposed interpretations of probability theory: one such interpretation is true-false logic under uncertainty. Cox's Theorem is a representation theorem that states, under a certain set of axioms describing the meaning of uncertainty, that every true-false logic under uncertainty is isomorphic to conditional probability theory. This result was used by Jaynes to develop a philosophical framework in which statistical inference under uncertainty should be conducted through the use of probability, via Bayes' Rule. Unfortunately, most existing correct proofs of Cox's Theorem require restrictive assumptions: for instance, many do not apply even to the simple example of rolling a pair of fair dice. We offer a new axiomatization by replacing various technical conditions with an axiom stating that our theory must be consistent with respect to repeated events. We discuss the implications of our results, both for the philosophy of probability and for the philosophy of statistics.
# Axioms $\newcommand{\P}{\mathbb{P}} \newcommand{\F}{\mathscr{F}}$
This paper proposes a new axiomatization of probability theory, with five axioms. As a variant of Cox's theorem, these axioms are supposed to represent a set of "common sense" desiderata for a logical system under uncertainty. That is, each of these axioms are things we naturally want to be true of any logical system under uncertainty. Cox's original axioms were more intuitively essential to me, however, so I'll also try to give justifications for demanding each of the following axioms, as well as explaining them technically.
Remember the ultimate goal is to _build_ probability theory up from a minimal set of absolute requirements for _any_ logical system. The punchline is that probability theory as described historically by greats like Kolmogorov turns out to be the _unique_ extension of true-false logic under uncertainty, and we can derive it from "common sense".
To emphasize the point that while we're writing these axioms we haven't yet got _probability_, following Jaynes I'll refer to our measure of certainty/uncertainty as "plausibility".
## 1. Plausibility must be representable by a real number
> Let $\Omega$ be a set and $\mathscr{F}$ be a $\sigma$-algebra on $\Omega$.
>
> Let $\P: \F \times (\F \setminus \emptyset) \rightarrow R \subseteq \mathbb{R}$ be a function, written using notation $\P(A|B)$.
It makes intuitive sense that we should be able to measure our uncertainty on a smooth, finite scale, so it makes sense to demand that our plausibility scale be chosen from some definite subset of the reals.
$\F$ being "[a $\sigma$-algebra on $\Omega$](https://en.wikipedia.org/wiki/Sigma-algebra)" means that it is the set of every subset of $\Omega$ (including $\Omega$ and $\emptyset$), is closed under complement, and is closed under countable unions. (Being "closed under" some operation means that taking that operation on any element in the set yields an element that's also defined to be in the set.) The idea is that $\Omega$ comprises all primitive events, and $\F$ therefore includes every possible logical combination of these primitive events, in a way that makes it eqivalent to a Boolean algebra.
I found it clarifying that $\P(\Omega)=1$. That's what made it click for me that a set in $\F$ represents a disjunction of primitive events, and $\Omega$ contains _all_ primitive events, so $\P(\Omega)$ is the probability that _anything_ happens.
$\P(A|B)$ is a function of two arguments $A,B \in \mathscr{F}$, and B cannot be empty. The interpretation is, "The probability of some event A, given that event B is true." The second argument cannot be empty, Jaynes often describes it as "the background information", including everything else known (such as the rules of probability themselves, and the number of penguins in Antarctica).
The arguments of $\P$ are sets, but as the paper mentions, "by [Stone's Representation Theorem](https://www.jstor.org/stable/1989664), every Boolean algebra is isomorphic to an algebra of sets".
## 2. Sequential continuity
> We have that
> $$A_1 \subseteq A_2 \subseteq A_3 \subseteq\ldots \text{ such that } A_i \nearrow A \text{ implies } \P (A_i | B)\nearrow \P(A | B )$$
> for all $A, A_i, B$.
Another intuitive requirement for a system of logical inference is that our plausibility measure return arbitrarily small differences in plausibility for arbitrarily small changes in truth value. This concept is also known as "continuity".
If you can arrange a sequence of events (sets) so that earlier events (e.g., $A_1$) are included in later events (e.g., $A_3$), then there is "sequential continuity" between earlier sets and later sets in this sequence. In the notation of the paper, $A_1 \nearrow A_3$.
What this axiom is saying is that as long as there is sequential continuity between two logical propositions, there is also sequential continuity between their plausibilities. This formalizes our requirement for continuity. Also notice that if $\P (A_i | B)\nearrow \mathbb{P}(A | B )$ then $\P (A_i | B) \leq \mathbb{P}(A | B )$, because our definition of sequential continuity also implies that the cardinality of the sets is non-decreasing. This will be useful reading the proof.
## 3. Decomposability
> $\P(AB | C )$ can be written as
> $$\P(A | C ) \circ \P(B | AC)$$
> for some some function $\circ : (R \times R) \rightarrow R$.
This is the first axiom that I had trouble seeing as intuitive, and in fact I thought it was a bit question-begging at first because it looks like the product rule. It represents the demand that plausibilities of compound propositions be decomposable into plausibilities of the their constituents, and that that decomposition has a particular form. It's the demand that it follow a particular form that seems somewhat arbitrary to me at first. Of course we would want to be able to decompose compound uncertainty into more fundamental elements, or else probability theory wouldn't be very useful. But why should it take the form described of $\circ$?
The answer is that this form is _minimal_ for decomposability. That is, it's the weakest statement that could be made about the details of decomposition. In English: "The plausibility of A _and_ B is a function of the plausibility of one of those (say, $A$), and the plausibility of the other ($B$) once we can assume $A$ is true."
Note that logical conjunctions are commutative ($AB = BA$), so by this axiom $\P(AB | C )$ can _also_ be written as $\P(B | C ) \circ \P(A | BC)$. They prove later also that $\circ$ is commutative, but that is not assumed in the axioms.
## 4. Negation
> There exists a function $N : R \rightarrow R$ such that
> $$
\P(A^c | B)= N[ \P(A | B)]
$$
> for all $A,B$.
This axiom also seemed a bit question-begging to me, because it looks like the sum rule of probability theory, and because it seemed arbitrary that you would want uniquely determined probabilities for the negations of propositions.
Upon further reflection, however, this seems like a reasonable demand to be consistent with two-valued logic. Every proposition $A$ in true-false logic has a unique proposition $A^c$ representating its negation, (This superscript complement notation emphasizes the representation as propositions as sets, but is equivalent to $\bar A$, $\neg A$, etc.) so it makes sense that an extension of true-false logic to uncertainty would also include a method of determining the opposite.
In actual fact, this _may_ be the most controversial axiom, since there are logics other than true-false logic that don't require the "law of the excluded middle" (they allow "maybe"). But if you are willing to accept that all well-formed propositions are either true or false, and our system of plausibility represents levels of certainty about their truth or falsehood, then this axiom represents a reasonable and necessary demand.
## 5. Consistency under extension
> If $(\Omega, \mathscr{F}, \P)$ satisfies the axioms above, then $(\Omega \times \Omega, \mathscr{F} \otimes \mathscr{F}, \P \operatorname{\circ} \P)$ must as well, i.e., the definition $\P(A \times B | C \times D) = \P(A | C) \circ \P(B | D)$ is consistent.
This axiom represents the core of the authors' contribution. Although there were many correct variants of Cox's theorem, and many ways to axiomatize probability theory, they all had either disappointingly narrow scope, or had lost their intuitive nature in the formalization. The authors' of our paper replace several technical axioms from other axiomatizations with this one demand _that their rules be consistent under extention to repeated events_.
In English, this axiom is, "If the rules apply to a single trial (e.g., a single coinflip), then they also apply to a system of two independent trials (e.g., two coinflips)." To me, that's obviously intuitive, so it's delightful to find that it covers so much ground.
Examining their formal expression, with the coinflips example, with $A$ meaning "heads on the first coinflip" and B meaning "tails on the second coinflip":
$\P(A \times B | C \times D)$ means "the plausibility of heads-then-tails given two piles of background information $C$ and $D$". The axiom states this must equal $\P(A | C) \circ \P(B | D)$, meaning that the plausibility of a pair of coinflips coming up heads-tails is equal to the plausibility of a single coinflip coming up heads (given background information $C$), composed (using $\circ$) with another coinflip coming up tails (given background information $D$).
# Parting thoughts
1. I hope this exposition of the axioms helps you read the paper yourself, though I realize I may not have provided sufficient motivation to do so yet. That would make it a bit like [my post deriving something surprising about Boltzmann machines](https://computable.ai/articles/2019/Mar/10/boltzmann-machines-differentiation-work.html) without first explaining what Boltzmann machines _are_. I intend to rectify this in the future for both posts.
2. I could make this a lot clearer for people with less set theory, group theory, or probability theory background. If that would be helpful to you, please leave me a comment on what specifically didn't make sense so I can get a feel for my audience.
3. To memorize these and make reading the proof easier, I labeled each of the five axioms with some relevant symbol, and combined them into a mneumonic. In case that helps you too, here it is: $\mathbb{R}$ $\nearrow$ $\circ$ $N$ $\times$.
| github_jupyter |
# Example Gawain notebook
In this notebook I show how to set up, run, and plot a simple simulation using the gawain plasma physics module.
```
import numpy as np
from gawain.main import run_gawain
from gawain.io import Reader
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation, rc
from IPython.display import HTML
```
# Set up run
Here we define the simulation parameters and initial and boundary conditions.
For this simple example, I use the Sod shock tube problem. This is a 1D hydrodynamics problem, and so mhd routines are turned off.
First define the run_name and output directory, this will create a directory containing the output from the simulation.
```
run_name = "sod_shock_tube"
output_dir = "."
```
Here I choose whether to run an MHD or Hydro simulation, and whether to turn on thermal conductivity and resistivty. As the Sod shock tube is a hydrodynamic problem, MHD and resistivity are turned off. I also do not turn on thermal conductivity.
```
with_mhd = False
with_thermal_conductivity = False
with_resistivity = False
```
These cells define the cfl number, the total simulation time, and which time integrator and flux calculation methods are to be used.
Currently the supported time integration methods are
- euler forward step
- 2nd order Runge-Kutta
- Leapfrog
- Predictor-Corrector
The currently supported flux calculation methods are
- Lax-Wendroff (two-step Richtmeyer form)
- Lax-Friedrichs
- HLLE with MUSCL reconstruction
For all but the simplest simulations it is strongly advised to use HLL, as Lax-Wendroff is susceptible to oscillations about sharp discontinuities and Lax-Friedrichs is very diffusive.
```
cfl = 0.5
t_max = 0.25
# "euler", "rk2", "leapfrog", "predictor-corrector"
integrator = "euler"
# "lax-wendroff", "lax-friedrichs", "hll"
fluxer = "hll"
```
## Define mesh
This cell defines the mesh shape (number of cells in each direction), dimensions (length of each dimension) and the number of output dumps to use.
```
nx, ny, nz = 200, 1, 1
mesh_shape = (nx, ny, nz)
n_outputs = 100
lx, ly, lz = 1.0, 0.001, 0.001
mesh_size = (lx, ly, lz)
x = np.linspace(0.0, lx,num=nx)
y = np.linspace(0.0, ly,num=ny)
z = np.linspace(0.0, lz,num=nz)
X,Y,Z =np.meshgrid(x,y,z, indexing='ij')
```
## Define initial condition
The mesh information is used to create an initial condition. If this were an mhd simulation, the magnetic field initial condition would also need to be included.
```
adiabatic_idx = 7.0/5.0
rho = np.piecewise(X, [X < 0.5, X >= 0.5], [1.0, 0.125])
pressure = np.piecewise(X, [X < 0.5, X >= 0.5], [1.0, 0.1])
mx = np.zeros(X.shape)
my = np.zeros(X.shape)
mz = np.zeros(X.shape)
e = pressure/(adiabatic_idx-1) + 0.5*mx*mx/rho
initial_condition = np.array([rho, mx, my, mz, e])
source = 0.0*np.ones(initial_condition.shape)
```
adiabatic_idx = 7.0/5.0
rho = np.ones(mesh_shape)
pressure = np.ones(mesh_shape)
mx = np.zeros(mesh_shape)
my = np.zeros(mesh_shape)
mz = np.zeros(mesh_shape)
e = pressure/(adiabatic_idx-1) + 0.5*mx*mx/rho
initial_condition = np.array([rho, mx, my, mz, e])
rho_s= np.zeros(mesh_shape)
mx_s= np.zeros(mesh_shape)
my_s= np.zeros(mesh_shape)
mz_s= np.zeros(mesh_shape)
e_s=np.zeros(mesh_shape)
e_s[80:120, :, :]=1.0
source = np.array([rho_s, mx_s, my_s, mz_s, e_s])
## Define boundary conditions
The available boundary conditions are
- periodic
- fixed (to the value specified in the initial condition)
- reflective
```
boundary_conditions = ['fixed', 'periodic', 'periodic']
config = {
"run_name": run_name,
"cfl": cfl,
"mesh_shape": mesh_shape,
"mesh_size": mesh_size,
"t_max": t_max,
"n_dumps": n_outputs,
"initial_condition": initial_condition,
"boundary_type": boundary_conditions,
"adi_idx": adiabatic_idx,
"integrator": integrator,
"fluxer": fluxer,
"output_dir": output_dir,
"with_mhd": with_mhd,
"source":source,
}
```
# Run Simulation
Combine all the above simulation parameters into a parameter dictionary. This dictionary is then fed to the run_gawain function which begins the simulation. Ensure the all keys for this dictionary are defined, and ensure the names are spelt correctly.
```
run_gawain(config)
```
# Plot Results
One can create simple plots to visualise the results using the Reader object
```
data = Reader(run_name)
data.variables
data.plot('density', timesteps=[0,10,20,50,90])
```
One can also create animations from the raw data using the method below
```
raw_data = data.get_data('energy')
raw_data.shape
fig, ax = plt.subplots()
ax.set_xlim(( 0, 200))
ax.set_ylim((0, 1))
line, = ax.plot([], [], lw=2)
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
return (line,)
# animation function. This is called sequentially
def animate(i):
x = np.linspace(0, 200, 200)
y = raw_data[i].reshape(200,)
line.set_data(x, y)
return (line,)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=100, interval=20,
blit=True)
HTML(anim.to_jshtml())
```
| github_jupyter |
# Verifying that the matrix DWPC method generates results similar to the Neo4j method
The matrix-based DWPC calculation method does not provide results exactly equal to the Neo4j-based method for all metapaths. We would like to verify that these differences in DWPC calculation do not result in significant differences in the resulting predictions.
```
import pandas as pd
import matplotlib
import seaborn as sns
%matplotlib inline
```
---
## Data
Data files are from fold 3 of the full size network (no rare disease data).
```
orig = pd.read_csv("orig_pred_res_for_roc.tsv", sep='\t')
matx = pd.read_csv("matrix_pred_res_for_roc.tsv", sep='\t')
orig.shape
matx.shape
orig.head(2)
matx.head(2)
```
---
## Check that the pairs are equal
Check that the predictions involve the same chemical-disease pairs.
```
assert (
set((r["compound_id"], r["disease_id"]) for i, r in orig.iterrows())
==
set((r["chemical_id"], r["disease_id"]) for i, r in matx.iterrows())
)
```
---
## Extract out the relevant information
We will exclude the actual DWPC values when comparing the ranks of the predicted results.
```
tempa = (orig
[["compound_id", "disease_id", "predicted_value", "true_label"]]
.rename(columns={
"compound_id": "chemical_id",
"predicted_value": "orig_value",
"true_label": "orig_label"
})
)
tempb = (matx
[["chemical_id", "disease_id", "predicted_value", "true_label"]]
.rename(columns={
"predicted_value": "matx_value",
"true_label": "matx_label"
})
)
res = tempa.merge(tempb, how="inner", on=["chemical_id", "disease_id"])
res.shape
res.head()
```
### Check that the true labels are equal
```
(res["matx_label"] == res["orig_label"]).all()
res = (res
.drop("matx_label", axis=1)
.rename(columns={"orig_label": "true_label"})
)
res.head()
```
---
## Calculate the ranks of the predictions
Ranks are assigned in descending order (rank of 1 means the top prediction).
```
ranks = (res
.rank(numeric_only=True, ascending=False)
.drop("true_label", axis=1)
.rename(columns={
"orig_value": "orig_rank",
"matx_value": "matx_rank"
})
)
ranks.head()
fin = res.merge(ranks, left_index=True, right_index=True)
fin.head()
```
---
## Calculate the difference in rank for a prediction between the two methods
Calculate how the ranks differ between the two methods.
```
fin = (fin
.assign(
rank_diff = lambda df: pd.Series.abs(df["orig_rank"] - df["matx_rank"])
)
.assign(
diff_pct = lambda df: df["rank_diff"] / len(fin) * 100
)
)
fin.head()
fin["diff_pct"].max()
```
## How much variance in ranking is there?
```
sns.boxplot(y=fin["diff_pct"])
```
## Does variance vary based on the true label?
```
sns.violinplot(data=fin, x="true_label", y="diff_pct")
```
---
## Visual comparison of ranks
```
sns.jointplot(
data=fin,
x="orig_rank", y="matx_rank",
size=9
)
```
For the most part this seems to show that the matrix method is comparable to the original neo4j method at ranking the predictions.
There don't seem to be any major deviations from the y=x line on the predictions. The medium amount of fuzziness is probably due to the fact that the matrix method cannot provide exact matches in DWPC values to the original neo4j method, and therefore this influences the final predicted value, which changes the calculated rank.
```
sns.jointplot(
data=fin.query("true_label == 1"),
x="orig_rank", y="matx_rank",
size=9
)
sns.jointplot(
data=fin.query("true_label == 0"),
x="orig_rank", y="matx_rank",
size=9
)
```
No obvious outliers in the prediction ranks, which implies that the matrix method is generating similar results.
Next: look at the model being built, and the features being selected.
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D3_ModelFitting/W1D3_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy: Week 1, Day 3, Tutorial 3
# Model Fitting: Confidence intervals and bootstrapping
**Content creators**: Pierre-Étienne Fiquet, Anqi Wu, Alex Hyafil with help from Byron Galbraith
**Content reviewers**: Lina Teichmann, Saeed Salehi, Patrick Mineault, Ella Batty, Michael Waskom
#Tutorial Objectives
This is Tutorial 3 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of regression models by generalizing to multiple linear regression and polynomial regression (Tutorial 4). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 5) and Cross Validation for model selection (Tutorial 6).
In this tutorial, we wil discuss how to gauge how good our estimated model parameters are.
- Learn how to use bootstrapping to generate new sample datasets
- Estimate our model parameter on these new sample datasets
- Quantify the variance of our estimate using confidence intervals
```
#@title Video 1: Confidence Intervals & Bootstrapping
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="hs6bVGQNSIs", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Up to this point we have been finding ways to estimate model parameters to fit some observed data. Our approach has been to optimize some criterion, either minimize the mean squared error or maximize the likelihood while using the entire dataset. How good is our estimate really? How confident are we that it will generalize to describe new data we haven't seen yet?
One solution to this is to just collect more data and check the MSE on this new dataset with the previously estimated parameters. However this is not always feasible and still leaves open the question of how quantifiably confident we are in the accuracy of our model.
In Section 1, we will explore how to implement bootstrapping. In Section 2, we will build confidence intervals of our estimates using the bootstrapping method.
---
# Setup
```
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper Functions
def solve_normal_eqn(x, y):
"""Solve the normal equations to produce the value of theta_hat that minimizes
MSE.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
thata_hat (float): An estimate of the slope parameter.
Returns:
float: the value for theta_hat arrived from minimizing MSE
"""
theta_hat = (x.T @ y) / (x.T @ x)
return theta_hat
```
---
# Section 1: Bootstrapping
[Bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)) is a widely applicable method to assess confidence/uncertainty about estimated parameters, it was originally [proposed](https://projecteuclid.org/euclid.aos/1176344552) by [Bradley Efron](https://en.wikipedia.org/wiki/Bradley_Efron). The idea is to generate many new synthetic datasets from the initial true dataset by randomly sampling from it, then finding estimators for each one of these new datasets, and finally looking at the distribution of all these estimators to quantify our confidence.
Note that each new resampled datasets will be the same size as our original one, with the new data points sampled with replacement i.e. we can repeat the same data point multiple times. Also note that in practice we need a lot of resampled datasets, here we use 2000.
To explore this idea, we will start again with our noisy samples along the line $y_n = 1.2x_n + \epsilon_n$, but this time only use half the data points as last time (15 instead of 30).
```
#@title
#@markdown Execute this cell to simulate some data
# setting a fixed seed to our random number generator ensures we will always
# get the same psuedorandom number sequence
np.random.seed(121)
# Let's set some parameters
theta = 1.2
n_samples = 15
# Draw x and then calculate y
x = 10 * np.random.rand(n_samples) # sample from a uniform distribution over [0,10)
noise = np.random.randn(n_samples) # sample from a standard normal distribution
y = theta * x + noise
fig, ax = plt.subplots()
ax.scatter(x, y) # produces a scatter plot
ax.set(xlabel='x', ylabel='y');
```
### Exercise 1: Resample Dataset with Replacement
In this exercise you will implement a method to resample a dataset with replacement. The method accepts $x$ and $y$ arrays. It should return a new set of $x'$ and $y'$ arrays that are created by randomly sampling from the originals.
We will then compare the original dataset to a resampled dataset.
TIP: The [numpy.random.choice](https://numpy.org/doc/stable/reference/random/generated/numpy.random.choice.html) method would be useful here.
```
def resample_with_replacement(x, y):
"""Resample data points with replacement from the dataset of `x` inputs and
`y` measurements.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
Returns:
ndarray, ndarray: The newly resampled `x` and `y` data points.
"""
#######################################################
## TODO for students: resample dataset with replacement
# Fill out function and remove
raise NotImplementedError("Student exercise: resample dataset with replacement")
#######################################################
# Get array of indices for resampled points
sample_idx = ...
# Sample from x and y according to sample_idx
x_ = ...
y_ = ...
return x_, y_
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
ax1.scatter(x, y)
ax1.set(title='Original', xlabel='x', ylabel='y')
# Uncomment below to test your function
#x_, y_ = resample_with_replacement(x, y)
#ax2.scatter(x_, y_, color='c')
ax2.set(title='Resampled', xlabel='x', ylabel='y',
xlim=ax1.get_xlim(), ylim=ax1.get_ylim());
# to_remove solution
def resample_with_replacement(x, y):
"""Resample data points with replacement from the dataset of `x` inputs and
`y` measurements.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
Returns:
ndarray, ndarray: The newly resampled `x` and `y` data points.
"""
# Get array of indices for resampled points
sample_idx = np.random.choice(len(x), size=len(x), replace=True)
# Sample from x and y according to sample_idx
x_ = x[sample_idx]
y_ = y[sample_idx]
return x_, y_
with plt.xkcd():
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
ax1.scatter(x, y)
ax1.set(title='Original', xlabel='x', ylabel='y')
x_, y_ = resample_with_replacement(x, y)
ax2.scatter(x_, y_, color='c')
ax2.set(title='Resampled', xlabel='x', ylabel='y',
xlim=ax1.get_xlim(), ylim=ax1.get_ylim());
```
In the resampled plot on the right, the actual number of points is the same, but some have been repeated so they only display once.
Now that we have a way to resample the data, we can use that in the full bootstrapping process.
### Exercise 2: Bootstrap Estimates
In this exercise you will implement a method to run the bootstrap process of generating a set of $\hat\theta$ values from a dataset of $x$ inputs and $y$ measurements. You should use `resample_with_replacement` here, and you may also invoke helper function `solve_normal_eqn` from Tutorial 1 to produce the MSE-based estimator.
We will then use this function to look at the theta_hat from different samples.
```
def bootstrap_estimates(x, y, n=2000):
"""Generate a set of theta_hat estimates using the bootstrap method.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
n (int): The number of estimates to compute
Returns:
ndarray: An array of estimated parameters with size (n,)
"""
theta_hats = np.zeros(n)
##############################################################################
## TODO for students: implement bootstrap estimation
# Fill out function and remove
raise NotImplementedError("Student exercise: implement bootstrap estimation")
##############################################################################
# Loop over number of estimates
for i in range(n):
# Resample x and y
x_, y_ = ...
# Compute theta_hat for this sample
theta_hats[i] = ...
return theta_hats
np.random.seed(123) # set random seed for checking solutions
# Uncomment below to test function
# theta_hats = bootstrap_estimates(x, y, n=2000)
# print(theta_hats[0:5])
# to_remove solution
def bootstrap_estimates(x, y, n=2000):
"""Generate a set of theta_hat estimates using the bootstrap method.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
n (int): The number of estimates to compute
Returns:
ndarray: An array of estimated parameters with size (n,)
"""
theta_hats = np.zeros(n)
# Loop over number of estimates
for i in range(n):
# Resample x and y
x_, y_ = resample_with_replacement(x, y)
# Compute theta_hat for this sample
theta_hats[i] = solve_normal_eqn(x_, y_)
return theta_hats
np.random.seed(123) # set random seed for checking solutions
theta_hats = bootstrap_estimates(x, y, n=2000)
print(theta_hats[0:5])
```
You should see `[1.27550888 1.17317819 1.18198819 1.25329255 1.20714664]` as the first five estimates.
Now that we have our bootstrap estimates, we can visualize all the potential models (models computed with different resampling) together to see how distributed they are.
```
#@title
#@markdown Execute this cell to visualize all potential models
fig, ax = plt.subplots()
# For each theta_hat, plot model
theta_hats = bootstrap_estimates(x, y, n=2000)
for i, theta_hat in enumerate(theta_hats):
y_hat = theta_hat * x
ax.plot(x, y_hat, c='r', alpha=0.01, label='Resampled Fits' if i==0 else '')
# Plot observed data
ax.scatter(x, y, label='Observed')
# Plot true fit data
y_true = theta * x
ax.plot(x, y_true, 'g', linewidth=2, label='True Model')
ax.set(
title='Bootstrapped Slope Estimation',
xlabel='x',
ylabel='y'
)
# Change legend line alpha property
handles, labels = ax.get_legend_handles_labels()
handles[0].set_alpha(1)
ax.legend();
```
This looks pretty good! The bootstrapped estimates spread around the true model, as we would have hoped. Note that here we have the luxury to know the ground truth value for $\theta$, but in applications we are trying to guess it from data. Therefore, assessing the quality of estimates based on finite data is a task of fundamental importance in data analysis.
---
# Section 2: Confidence Intervals
Let us now quantify how uncertain our estimated slope is. We do so by computing [confidence intervals](https://en.wikipedia.org/wiki/Confidence_interval) (CIs) from our bootstrapped estimates. The most direct approach is to compute percentiles from the empirical distribution of bootstrapped estimates. Note that this is widely applicable as we are not assuming that this empirical distribution is Gaussian.
```
#@title
#@markdown Execute this cell to plot bootstrapped CI
theta_hats = bootstrap_estimates(x, y, n=2000)
print(f"mean = {np.mean(theta_hats):.2f}, std = {np.std(theta_hats):.2f}")
fig, ax = plt.subplots()
ax.hist(theta_hats, bins=20, facecolor='C1', alpha=0.75)
ax.axvline(theta, c='g', label=r'True $\theta$')
ax.axvline(np.percentile(theta_hats, 50), color='r', label='Median')
ax.axvline(np.percentile(theta_hats, 2.5), color='b', label='95% CI')
ax.axvline(np.percentile(theta_hats, 97.5), color='b')
ax.legend()
ax.set(
title='Bootstrapped Confidence Interval',
xlabel=r'$\hat{{\theta}}$',
ylabel='count',
xlim=[1.0, 1.5]
);
```
Looking at the distribution of bootstrapped $\hat{\theta}$ values, we see that the true $\theta$ falls well within the 95% confidence interval, wich is reinsuring. We also see that the value $\theta = 1$ does not fall within the confidence interval. From this we would reject the hypothesis that the slope was 1.
---
# Summary
- Bootstrapping is a resampling procedure that allows to build confidence intervals around inferred parameter values
- it is a widely applicable and very practical method that relies on computational power and pseudo-random number generators (as opposed to more classical approaches than depend on analytical derivations)
**Suggested readings**
Computer Age Statistical Inference: Algorithms, Evidence and Data Science, by Bradley Efron and Trevor Hastie
| github_jupyter |
# Lab Three - Clustering
Team Members
* Chance Robinson
* Dan Crouthamel
* Shane Weinstock
# Business Understanding 1
_Describe the purpose of the data set you selected (i.e., why was this data collected in the first place?). How will you measure the effectiveness of a good algorithm? Why does your chosen validation method make sense for this specific dataset and the stakeholders needs?_
```
# Base Imports
import pandas as pd
import numpy as np
import time
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
import seaborn as sns; sns.set()
%matplotlib inline
# Pre-Processing
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
# Metrics and Evaluation
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import plot_roc_curve
# Train/ Test Split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
# Imbalanced Data
# from imblearn.over_sampling import SMOTE
# from imblearn.over_sampling import BorderlineSMOTE
# from imblearn.pipeline import make_pipeline, Pipeline
# Estimators
# from sklearn.naive_bayes import MultinomialNB
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
# from sklearn.tree import DecisionTreeClassifier
# Hyper Parameter Tuning
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
# T-Tests
# from mlxtend.evaluate import paired_ttest_5x2cv
# Machine Learning Visualizations
# from yellowbrick.classifier import ROCAUC
# from yellowbrick.classifier import PrecisionRecallCurve
# from yellowbrick.classifier import ClassificationReport
plt.style.use("ggplot")
```
# Data Understanding 1
_Describe the meaning and type of data (scale, values, etc.) for each attribute in the data file. Verify data quality: Are there missing values? Duplicate data? Outliers? Are those mistakes? How do you deal with these problems?_
## Load Data
```
df = pd.read_csv('../../../../../../data/cardio_train.csv', delimiter=';')
# set id as index
df.set_index("id", inplace=True)
# copy original data
df_clean = df.copy(deep=True)
# drop duplicates
df_clean.drop_duplicates(inplace=True)
```
## Apply Transformations
```
# %%time
# re-encode gender to male (1) and female (0)
df_clean['gender'] = np.where((df_clean.gender == 2), 1, 0)
# If > 200, replace with ap_hi median (120)
# If < 80 (median for ap_lo), replace with ap_hi median (120)
df_clean['ap_hi'] = np.where(df_clean['ap_hi'] > 200, 120, df_clean['ap_hi'])
df_clean['ap_hi'] = np.where(df_clean['ap_hi'] < 80, 120, df_clean['ap_hi'])
# If > 120 (median for hi), replace with ap_lo median (80)
# If < 0 replace with ap_lo median (80)
df_clean['ap_lo'] = np.where(df_clean['ap_lo'] > 120, 80, df_clean['ap_lo'])
df_clean['ap_lo'] = np.where(df_clean['ap_lo'] < 0, 80, df_clean['ap_lo'])
```
**Table 1: Cardiovascular Dataset - Attribute Descriptions**
| Column Description | Feature Type | Column Name | Data Type |
|:---|:---|:---|:---|
| **Age** | Objective | age | int (days) |
| **Height** | Objective | height | int (cm) |
| **Weight** | Objective | weight | float (kg) |
| **Gender** | Objective | gender | 0: female, 1: male |
| **Systolic blood pressure** | Examination | ap_hi | int |
| **Diastolic blood pressure** | Examination | ap_lo | int |
| **Cholesterol** | Examination | cholesterol | 1: normal, 2: above normal, 3: well above normal |
| **Glucose** | Examination | gluc | 1: normal, 2: above normal, 3: well above normal |
| **Smoking** | Subjective | smoke | binary |
| **Alcohol intake** | Subjective | alco | binary |
| **Physical activity** | Subjective | active | binary |
| **Has CVD?** | Target * | cardio | binary |
```
df_clean.describe()
```
# Data Understanding 2
_Visualize the any important attributes appropriately. Important: Provide an interpretation for any charts or graphs._
```
corr_features = ['height', 'weight', 'ap_hi', 'ap_lo',]
plt.figure(figsize=(8,6))
# Use an easier to see colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Mask
correlation = df_clean[corr_features].corr()
# correlation[np.abs(correlation)<.2] = 0
sns.heatmap(correlation, annot = True, cmap=cmap).set(title = 'Correlation Heatmap')
plt.show()
```
### Baseline Classification Performance
```
from sklearn.model_selection import StratifiedKFold, cross_val_score
numeric_features = ['age', 'height', 'weight', 'ap_hi', 'ap_lo', 'cholesterol', 'gluc']
categorical_features = ['gender', 'smoke', 'alco', 'active']
# Impute Numeric Features with the mean value
# One Hot Encode Categorical Features
# Robust Scaler
from sklearn.preprocessing import RobustScaler
rs = RobustScaler()
df_clean[["age", "height", "weight", "ap_hi", "ap_lo"]] = rs.fit_transform(df_clean[["age", "height", "weight", "ap_hi", "ap_lo"]])
X_cols = ['age', 'gender', 'height', 'weight', 'ap_hi', 'ap_lo', 'cholesterol', 'gluc', 'smoke', 'alco', 'active']
y = df_clean['cardio']
X = df_clean[X_cols]
cv = StratifiedKFold(n_splits=10)
clf_logreg = LogisticRegression(random_state=1, penalty='l2', C=.01)
roc = cross_val_score(clf_logreg, X, y=y, cv=cv, scoring='roc_auc')
print ("Average ROC (AUC) = ", roc.mean()*100, "+-", roc.std()*100)
```
# Modeling and Evaluation 1
_Train and adjust parameters_
```
from kneed import KneeLocator
# kl = KneeLocator(range(1, 11), sse, curve="convex", direction="decreasing")
# n_clusters = kl.elbow
# n_clusters
# plt.figure()
# # plt.subplot(1,2,1)
# X2=X2.values
# plt.scatter(X2[:, 0], X2[:, 1]+np.random.random(X2[:, 1].shape)/2, c=new_feature, cmap=plt.cm.rainbow, s=20, linewidths=0)
# plt.xlabel('ap_hi (normalized)'), plt.ylabel('ap_lo (normalized)')
# plt.grid()
```
## Spectral Clustering
```
%%time
# an example using SpectralClustering, which assumes that the graphi
from sklearn.cluster import SpectralClustering
X_cols = ['age', 'gender', 'height', 'weight', 'ap_hi', 'ap_lo', 'cholesterol', 'gluc', 'smoke', 'alco', 'active']
y = df_clean['cardio']
X = df_clean[X_cols]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.50, random_state=1)
X2 = X_train[['ap_hi','ap_lo']]
y = y_train
X = X_train[['age', 'gender', 'height', 'weight', 'cholesterol', 'gluc', 'smoke', 'alco', 'active']]
nclust = 3
# If a string, this may be one of
# ‘nearest_neighbors’, ‘precomputed’, ‘rbf’
# or one of the kernels supported by sklearn.metrics.pairwise_kernels
spc = SpectralClustering(n_clusters=nclust, affinity='nearest_neighbors', n_jobs=-1, random_state=1, assign_labels="kmeans")
labels = spc.fit_predict(X2)
X = np.column_stack((X, pd.get_dummies(labels)))
roc = cross_val_score(clf_logreg, X, y=y, cv=cv, scoring='roc_auc')
print ("Average ROC (AUC) = ", roc.mean()*100, "+-", roc.std()*100)
# plt.scatter(X2[:, 0], X2[:, 1], c=labels,
# cmap=plt.cm.rainbow, s=5, linewidths=0)
# plt.show()
```
# Modeling and Evaluation 2
_Evaluate and Compare_
# Modeling and Evaluation 3
_Visualize Results_
# Modeling and Evaluation 4
_Summarize the Ramifications_
# Deployment
_Be critical of your performance and tell the reader how you current model might be usable by other parties. Did you achieve your goals? If not, can you reign in the utility of your modeling? How useful is your model for interested parties (i.e., the companies or organizations that might want to use it)? How would your deploy your model for interested parties? What other data should be collected? How often would the model need to be updated, etc.?_
# Exceptional Work
_You have free reign to provide additional analyses or combine analyses._
```
X_cols = ['age', 'gender', 'height', 'weight', 'ap_hi', 'ap_lo', 'cholesterol', 'gluc', 'smoke', 'alco', 'active']
y = df_clean['cardio']
X = df_clean[X_cols]
clf_logreg = LogisticRegression(random_state=1)
pipe_logreg = Pipeline([['clf', clf_logreg]])
model_params = {
# "logisticregression": {
# "model": pipe_logreg,
# "params": {
# "clf__C": [.01, .1, 1, 5, 10, 25, 50],
# "clf__penalty": ["l1", "l2"]
# }
# }
}
scores = []
for model_name, mp in model_params.items():
start = time.time()
# clf = GridSearchCV(estimator = mp["model"], param_grid=mp["params"], cv=10, scoring="roc_auc", n_jobs=-1)
clf = RandomizedSearchCV(estimator = mp["model"], param_distributions=mp["params"], cv=10, scoring="roc_auc", n_jobs=-1)
clf.fit(X, y)
elapsed_time = (time.time() - start)
scores.append({"Model": model_name,
"Best ROC AUC": clf.best_score_, # Mean cross-validated score of the best_estimator
"Best Params": clf.best_params_,
"results": clf.cv_results_,
"Cross Validation Time": elapsed_time,
"Best Estimator": clf.best_estimator_
})
print('10 Fold Cross Validation Scores (CVD):')
for model in scores:
print()
for key, value in model.items():
if key == 'Best Estimator':
print("Prediction Accuracy",': ',value.score(X, y))
elif key == 'results':
print('Mean Fit Time: ', value['mean_fit_time'].mean())
print('Mean Score Time: ', value['mean_score_time'].mean())
else:
print(key,': ',value)
```
| github_jupyter |
## Importing and prepping data
```
import pandas as pd
import numpy as np
import diff_classifier.aws as aws
import diff_classifier.pca as pca
import os
features = []
remote_folder = 'Gel_studies' #Folder in AWS S3 containing files to be analyzed
bucket = 'dtoghani.data'
vids = 10
mws = ['5k_PEG', 'PS_COOH', '5k_PEG_NH2', 'PS_NH2']
nonnum = ['Particle Type', 'Video Number', 'Track_ID', 'Deff2',
'Mean Mean_Intensity', 'Std Mean_Intensity',
'X', 'Y', 'Mean X', 'Mean Y', 'Std X', 'Std Y']
calcs = [2]
counter = 0
for calc in calcs:
for mw in mws:
for num in range(1, vids+1):
try:
filename = 'features_{}_{}mM_XY{}.csv'.format(mw, calc, '%02d' % num)
#os.remove(filename)
#aws.download_s3('{}/{}'.format(remote_folder, filename), filename, bucket_name=bucket)
fstats = pd.read_csv(filename, encoding = "ISO-8859-1", index_col='Unnamed: 0')
fstats['Particle Type'] = pd.Series(fstats.shape[0]*[mw], index=fstats.index)
fstats['Video Number'] = pd.Series(fstats.shape[0]*[num], index=fstats.index)
#fstats['Calcium Concentration'] = pd.Series(fstats.shape[0]*[str(calcs)], index=fstats.index)
#print(num)
print(filename)
counter = counter + 1
if counter == 1:
fstats_tot = fstats
else:
fstats_tot = fstats_tot.append(fstats, ignore_index=True)
except:
print('skip filename: {}'.format(filename))
fstats_new.to_csv('features.csv')
fstats_tot.shape
#PCA analyses with too many datapoints fail. You get rows with lots of NAs. I'm going to try making a subset of the data first
#and then do a PCA analysis on that.
#include all in analysis
import random
subset = np.sort(np.array(random.sample(range(fstats_new.shape[0]), 500000)))
fstats_sub = fstats_new.loc[subset, :].reset_index(drop=True)
#with equal sample sizes for each particle type
import random
counter = 0
#mws = ['10k_PEG', '5k_PEG', '1k_PEG', 'PS_COOH']
for mw in mws:
fstats_type = fstats_tot[fstats_tot['Particle Type']==mw].reset_index(drop=True)
print(fstats_type.shape)
subset = np.sort(np.array(random.sample(range(fstats_type.shape[0]), 11000)))
if counter == 0:
fstats_sub = fstats_type.loc[subset, :].reset_index(drop=True)
else:
fstats_sub = fstats_sub.append(fstats_type.loc[subset, :].reset_index(drop=True), ignore_index=True)
counter = counter + 1
for mw in mws:
print(fstats_tot[fstats_tot['Particle Type'] == mw].shape)
#fstats = pd.read_csv(filename, encoding = "ISO-8859-1", index_col='Unnamed: 0')
fstats_totMW = fstats_sub[fstats_sub['Particle Type'].isin(mws)].reset_index(drop=True)
#nonnum = ['Particle Type', 'Video Number', 'Track_ID', 'Calcium Concentration', 'Deff2']
fstats_num = fstats_totMW.drop(nonnum, axis=1)
fstats_raw = fstats_num.values
#fstats
```
## PCA analysis
The pca.pca_analysis function provides a completely contained PCA analysis of the input trajectory features dataset. It includes options to impute NaN values (fill in with average values or drop them), and to scale features. Read the docstring for more information.
```
ncomp = 10
pcadataset = pca.pca_analysis(fstats_totMW, dropcols=nonnum, n_components=ncomp)
```
The pca.kmo function calculates the Kaiser-Meyer-Olkin statistic, a measure of sampling adequacy. Check the docstring for more information.
```
kmostat = pca.kmo(pcadataset.scaled)
```
## Visualization
Users can then compare average principle component values between subgroups of the data. In this case, all particles were taken from the same sample, so there are no experimental subgroups. I chose to compare short trajectories to long trajectories, as I would expect differences between the two groups.
```
import numpy as np
#ncomp = 10
dicti = {}
#test = np.exp(np.nanmean(np.log(pcadataset.final[pcadataset.final['Particle Size']==200].as_matrix()), axis=0))[-6:]
#test1 = np.exp(np.nanmean(np.log(pcadataset.final[pcadataset.final['Particle Size']==500].as_matrix()), axis=0))[-6:]
dicti[0] = np.nanmean(pcadataset.final[pcadataset.final['Particle Type']=='5k_PEG'].values[:, -ncomp:], axis=0)
dicti[1] = np.nanmean(pcadataset.final[pcadataset.final['Particle Type']=='PS_COOH'].values[:, -ncomp:], axis=0)
dicti[2] = np.nanmean(pcadataset.final[pcadataset.final['Particle Type']=='5k_PEG_NH2'].values[:, -ncomp:], axis=0)
dicti[3] = np.nanmean(pcadataset.final[pcadataset.final['Particle Type']=='PS_NH2'].values[:, -ncomp:], axis=0)
dicti[3]
labels = mws
pca.plot_pca(dicti, savefig=True, labels=labels, rticks=np.linspace(-5, 5, 11))
```
The variable pcadataset.prcomps shows the user the major contributions to each of the new principle components. When observing the graph above, users can see that there are some differences between short trajectories and long trajectories in component 0 (asymmetry1 being the major contributor) and component 1 (elongation being the major contributor).
```
pcadataset.prcomps
feats = pca.feature_violin(pcadataset.final, label='Particle Type', lvals=labels, fsubset=ncomp, yrange=[-12, 12])
fstats1 = pca.feature_plot_2D(pcadataset.final, label='Particle Type', lvals=labels, randcount=400, yrange=[-6, 6],
xrange=[-4, 4])
fstats1 = pca.feature_plot_3D(pcadataset.final, label='Particle Type', lvals=labels, randcount=400, ylim=[-12, 12],
xlim=[-12, 12], zlim=[-12, 12], features=[0, 1, 3])
#ncomp = 12
trainp = np.array([])
testp = np.array([])
for i in range(0, 20):
KNNmod, X, y = pca.build_model(pcadataset.final, 'Particle Type', labels, equal_sampling=True,
tsize=400, input_cols=ncomp, model='MLP', NNhidden_layer=(6, 2))
trainp = np.append(trainp, pca.predict_model(KNNmod, X, y))
X2 = pcadataset.final.values[:, -ncomp:]
y2 = pcadataset.final['Particle Type'].values
testp = np.append(testp, pca.predict_model(KNNmod, X2, y2))
print('Run {}: {}'.format(i, testp[i]))
print('{} +/ {}'.format(np.mean(trainp), np.std(trainp)))
print('{} +/ {}'.format(np.mean(testp), np.std(testp)))
subset
fstats_new
bitesize['Particle Type']
pcadataset.components.to_csv('components.csv')
pcadataset.prcomps
```
| github_jupyter |
```
#this allows plots to be displayed inline with the notebook
%matplotlib inline
```
Generally, you want to put your import statements at the top of the code, whether in notebooks or code files.
These first two import statements bring in the matplotlib plotting library and the numpy library, two core components of the "Scipy Stack", using a common convention among scientific Python developers (numpy as np, pyplot as plt).
```
import matplotlib.pyplot as plt
import numpy as np
```
These import statements bring in the PAGER models (population growth, exposure, fatality, etc.)
```
from losspager.models.emploss import LognormalModel,EmpiricalLoss
from losspager.models.growth import PopulationGrowth
from losspager.models.exposure import Exposure
from losspager.models.econexposure import EconExposure
```
These imports bring in other useful modules in the Python standard library, and other oddments.
```
import os.path
import time
from mpl_toolkits.axes_grid1 import make_axes_locatable
import fiona
```
LognormalModel objects contain properties and methods for the lognormal fatality models. This object contains a number of methods to calculate losses (fatalities and economic losses), calculate the loss rates for given MMI values, and even override the lognormal model with arbitrary rates.
To construct a LognormalModel, you provide it with a name (usually two letter country code) theta, beta, L2G, and (for economic losses) alpha values. Here we're ignoring the L2G values and simply assigning zero, as these are not used in loss calculations.
Fatalities
----------
```
iran = LognormalModel('IR',9.318099,0.100001,0.0)
california = LognormalModel('XF',37.729406,0.360337,0.0)
afghanistan = LognormalModel('AF',11.613073,0.180683,0.0)
china = LognormalModel('CN',10.328811,0.100058,0.0)
japan = LognormalModel('JP',11.862534,0.100779,0.0)
```
A lognormal fatality model "knows" how deadly it is (by calculating the area under the curve defined by MMI values from 5-9 and the fatality rates.) This allows the user to compare two models to each other, and even sort a list of them.
```
print('Iran is more deadly than California: %s\n' % (iran > california))
mlist = [iran,california,afghanistan,china,japan]
mlist.sort()
print('Sorted list of country models:')
print('%5s %6s %6s %-6s %14s' % ('Name','Theta','Beta','Area','Deaths'))
for model in mlist:
exp_pop = np.array([1e6,1e6,1e6,1e6,1e6])
mmirange = np.arange(5,10)
deaths = model.getLosses(exp_pop,mmirange)
print('%5s %6.3f %6.3f %6.4f %14.4f' % (model.name,model.theta,model.beta,model.getArea(),deaths))
```
You can plot the fatality rates of each of these models:
```
mmirange = np.arange(5,10)
f = plt.figure(figsize=(8,6))
colors = ['k','b','r','c','m']
for i in range(0,len(mlist)):
rates = mlist[i].getLossRates(mmirange)
plt.semilogy(mmirange,rates,colors[i],lw=2);
names = [m.name for m in mlist]
plt.legend(names,loc='lower right',numpoints=2);
plt.xlabel('MMI');
plt.ylabel('Loss Ratio');
```
You can see the fatality rates as rates by simply printing the LognormalModel object:
```
print('California Model:\n%s' % california)
print('Afhanistan Model:\n%s' % japan)
```
You can obtain the name, theta/beta/L2G values as properties from the LognormalModel object:
```
print('California:')
print('\tName %s' % california.name)
print('\tTheta %f' % california.theta)
print('\tBeta %f' % california.beta)
print('\tL2G %f' % california.l2g)
```
Up to this point, we've defined each model manually. We have all of these models for PAGER in one XML data file, which is included in this code repository. The EmpiricalLoss() class exists to handle large numbers of LognormalModel() objects, and has a method loadFromXML() to read in the XML file and create an EmpiricalLoss() instance.
```
xmlfile = os.path.join(os.getcwd(),'..','test','data','fatality.xml')
empfat = EmpiricalLoss.loadFromXML(xmlfile)
```
You can retrieve LognormalModel() objects from EmpiricalFatality() objects using the getModel() method.
```
chile = empfat.getModel('CL')
chile
```
And this is what PAGER will do...
(Example: Northridge)
```
t1 = time.time()
growthfile = os.path.join(os.getcwd(),'..','test','data','WPP2015_POP_F02_POPULATION_GROWTH_RATE.xls')
popgrowth = PopulationGrowth.loadFromUNSpreadsheet(growthfile)
sampledir = os.path.join(os.getcwd(),'..','test','data','eventdata','northridge')
popfile = os.path.join(sampledir,'northridge_gpw.flt')
isofile = os.path.join(sampledir,'northridge_isogrid.bil')
shakefile = os.path.join(sampledir,'northridge_grid.xml')
expmodel = Exposure(popfile,2012,isofile,popgrowth)
expdict = expmodel.calcExposure(shakefile)
for key,exp_pop in expdict.items():
print('Exposure for %s' % key)
for i in range(0,len(exp_pop)):
mmi = i+1
print('\tMMI %i: %s' % (mmi,format(int(exp_pop[i]),',d')))
#call fatality model
fat_results = empfat.getLosses(expdict)
for key,value in fat_results.items():
print('\nFatalities for %s: %i' % (key,value))
t2 = time.time()
print('\nTotal elapsed time for loss calculations: %.2f seconds' % (t2-t1))
```
In addition, the fatality module provides the ability to make a gridded fatality map. Note that this map may not be a reliable way to determine the distribution of fatalities, particularly as you zoom in.
```
mmidata = expmodel.getShakeGrid().getLayer('mmi').getData()
popdata = expmodel.getPopulationGrid().getData()
isodata = expmodel.getCountryGrid().getData()
fatgrid = empfat.getLossGrid(mmidata,popdata,isodata)
f = plt.figure(figsize=(8,8))
dmin = np.nanmin(fatgrid)
dmax = np.nanmax(fatgrid)
dmean = np.nanmean(fatgrid)
dstd = np.nanstd(fatgrid)
#Here we're zooming in on the affected regions...
plt.imshow(fatgrid[125:250,125:300],vmin=dmin,vmax=dmean+(3*dstd));
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(cax=cax);
```
We can also split up the fatalities from the above grid into input polygons. Here we take a shapefile of Los Angeles County city boundaries, and use it to divide up the fatalities.
```
shapefile = os.path.join(sampledir,'City_BoundariesWGS84','City_Boundaries.shp')
popdict = expmodel.getPopulationGrid().getGeoDict()
shapes = []
f = fiona.open(shapefile,'r')
for row in f:
shapes.append(row)
f.close()
fatshapes,totfat = empfat.getLossByShapes(mmidata,popdata,isodata,shapes,popdict)
for shape in fatshapes:
if shape['properties']['fatalities'] > 0:
cname = shape['properties']['CITY_NAME']
deaths = shape['properties']['fatalities']
print('%s: %i fatalities' % (cname,deaths))
```
With all the fatality models at our disposal, it's easy to see what fatalities Northridge exposures would bring in other countries:
```
for name in names:
fmodel = empfat.getModel(name)
exp_pop = expdict['XF'][4:9]
exp_pop[-1] += expdict['XF'][-1] #MMI 10 is folded into MMI 9 for loss modeling
deaths = fmodel.getLosses(exp_pop,np.arange(5,10))
dstr = format(int(deaths),",d")
print('%s fatalities from Northridge exposure: %8s' % (name,dstr))
```
We can also manually override the rates for a model with a custom rates array, which need not be lognormal in form. Here we are verifying that doubling the default rates doubles the fatalities.
```
#Testing modifying rates and stuffing them back in...
chile = LognormalModel('CL',19.786773,0.259531,0.0)
rates = chile.getLossRates(np.arange(5,10))
modrates = rates * 2 #does this make event twice as deadly?
#roughly the exposures from 2015-9-16 CL event
expo_pop = np.array([0,0,0,1047000,7314000,1789000,699000,158000,0,0])
mmirange = np.arange(5,10)
chile_deaths = chile.getLosses(expo_pop[4:9],mmirange)
chile_double_deaths = chile.getLosses(expo_pop[4:9],mmirange,rates=modrates)
print('Chile model fatalities: %f' % chile_deaths)
print('Chile model x2 fatalities: %f' % chile_double_deaths)
```
Economic Losses
---------------
All of the above fatality methods apply in a very similar way to economic losses. The difference between calculating fatalities and dollars lost is by use of a subclass of the Exposure() class, EconExposure().
```
iran = LognormalModel('IR',9.483180,0.100000,7.949160,alpha=15.614500)
california = LognormalModel('XF',9.592240,0.100117,9.753500,alpha=14.433700)
afghanistan = LognormalModel('AF',9.013810,0.100000,4.113200,alpha=15.065400)
china = LognormalModel('CN',7.511120,0.100328,9.340890,alpha=9.794960)
japan = LognormalModel('JP',10.290800,0.100015,10.068600,alpha=13.389900)
```
Again, we can sort the country models by vulnerability.
```
mlist = [iran,california,afghanistan,china,japan]
mlist.sort()
print('Sorted list of country models:')
print('%5s %6s %6s %-6s %14s' % ('Name','Theta','Beta','Area','Dollars'))
for model in mlist:
exp_pop = np.array([1e6,1e6,1e6,1e6,1e6])
mmirange = np.arange(5,10)
deaths = model.getLosses(exp_pop,mmirange)
print('%5s %6.3f %6.3f %6.4f %14.4f' % (model.name,model.theta,model.beta,model.getArea(),deaths))
```
Again, we can plot the loss rates for each of these countries against each other.
```
mmirange = np.arange(5,10)
f = plt.figure(figsize=(8,6))
colors = ['k','b','r','c','m']
for i in range(0,len(mlist)):
rates = mlist[i].getLossRates(mmirange)
plt.semilogy(mmirange,rates,colors[i],lw=2);
names = [m.name for m in mlist]
plt.legend(names,loc='lower right',numpoints=2);
plt.xlabel('MMI');
plt.ylabel('Loss Ratio');
```
Print the properties of the California economic loss model (now includes alpha).
```
print('California:')
print('\tName %s' % california.name)
print('\tTheta %f' % california.theta)
print('\tBeta %f' % california.beta)
print('\tAlpha %f' % california.alpha)
print('\tL2G %f' % california.l2g)
```
We can provide an XML file with economic models defined in it - the format is very similar to the fatality XML file.
```
xmlfile = os.path.join(os.getcwd(),'..','test','data','economy.xml')
empeco = EmpiricalLoss.loadFromXML(xmlfile)
```
We can print out the rates of an economic model as well - recall that these rates include GDP and alpha, an exposure correction factor.
```
chile = empeco.getModel('CL')
chile
```
And again, here is how PAGER would calculate dollar losses:
```
t1 = time.time()
growthfile = os.path.join(os.getcwd(),'..','test','data','WPP2015_POP_F02_POPULATION_GROWTH_RATE.xls')
popgrowth = PopulationGrowth.loadFromUNSpreadsheet(growthfile)
sampledir = os.path.join(os.getcwd(),'..','test','data','eventdata','northridge')
popfile = os.path.join(sampledir,'northridge_gpw.flt')
isofile = os.path.join(sampledir,'northridge_isogrid.bil')
gdpfile = os.path.join(os.getcwd(),'..','test','data','API_NY.GDP.PCAP.CD_DS2_en_excel_v2.xls')
xmlfile = os.path.join(os.getcwd(),'..','test','data','economy.xml')
shakefile = os.path.join(sampledir,'northridge_grid.xml')
expmodel = EconExposure(popfile,2012,isofile,popgrowth,gdpfile,xmlfile)
expdict = expmodel.calcExposure(shakefile)
for key,exp_pop in expdict.items():
print('Economic Exposure for %s' % key)
for i in range(0,len(exp_pop)):
mmi = i+1
print('\tMMI %i: %s' % (mmi,format(int(exp_pop[i]),',d')))
#call fatality model
eco_results = empeco.getLosses(expdict)
for key,value in eco_results.items():
print('\nEconomic losses for %s: $%s' % (key,format(value,",d")))
t2 = time.time()
print('\nTotal elapsed time for loss calculations: %.2f seconds' % (t2-t1))
```
Create an economic loss grid.
```
mmidata = expmodel.getShakeGrid().getLayer('mmi').getData()
popdata = expmodel.getEconPopulationGrid().getData()
isodata = expmodel.getCountryGrid().getData()
ecogrid = empeco.getLossGrid(mmidata,popdata,isodata)
f = plt.figure(figsize=(8,8))
dmin = np.nanmin(fatgrid)
dmax = np.nanmax(fatgrid)
dmean = np.nanmean(fatgrid)
dstd = np.nanstd(fatgrid)
#Here we're zooming in on the affected regions...
plt.imshow(fatgrid[125:250,125:300],vmin=dmin,vmax=dmean+(3*dstd));
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(cax=cax);
```
Divide that economic loss grid by cities in LA County.
```
shapefile = os.path.join(sampledir,'City_BoundariesWGS84','City_Boundaries.shp')
popdict = expmodel.getPopulationGrid().getGeoDict()
shapes = []
f = fiona.open(shapefile,'r')
for row in f:
shapes.append(row)
f.close()
ecoshapes,toteco = empeco.getLossByShapes(mmidata,popdata,isodata,shapes,popdict)
ecoshapes = sorted(ecoshapes,key=lambda shape:shape['properties']['dollars_lost'],reverse=True)
for shape in ecoshapes:
if shape['properties']['dollars_lost'] > 0:
cname = shape['properties']['CITY_NAME']
dollars = shape['properties']['dollars_lost']
print('%s: $%s dollars lost' % (cname,format(dollars,",d")))
```
Compare Northridge losses to losses in other countries using the same economic exposure.
```
for name in names:
fmodel = empeco.getModel(name)
exp_pop = expdict['XF'][4:9]
exp_pop[-1] += expdict['XF'][-1] #MMI 10 is folded into MMI 9 for loss modeling
dollars = fmodel.getLosses(exp_pop,np.arange(5,10))
dstr = format(int(dollars),",d")
print('%s dollars lost from Northridge exposure: $%s' % (name,dstr))
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Adversarial example using FGSM
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/generative/adversarial_fgsm"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial creates an *adversarial example* using the Fast Gradient Signed Method (FGSM) attack as described in [Explaining and Harnessing Adversarial Examples](https://arxiv.org/abs/1412.6572) by Goodfellow *et al*. This was one of the first and most popular attacks to fool a neural network.
## What is an adversarial example?
Adversarial examples are specialised inputs created with the purpose of confusing a neural network, resulting in the misclassification of a given input. These notorious inputs are indistinguishable to the human eye, but cause the network to fail to identify the contents of the image. There are several types of such attacks, however, here the focus is on the fast gradient sign method attack, which is a *white box* attack whose goal is to ensure misclassification. A white box attack is where the attacker has complete access to the model being attacked. One of the most famous examples of an adversarial image shown below is taken from the aforementioned paper.

Here, starting with the image of a panda, the attacker adds small perturbations (distortions) to the original image, which results in the model labelling this image as a gibbon, with high confidence. The process of adding these perturbations is explained below.
## Fast gradient sign method
The fast gradient sign method works by using the gradients of the neural network to create an adversarial example. For an input image, the method uses the gradients of the loss with respect to the input image to create a new image that maximises the loss. This new image is called the adversarial image. This can be summarised using the following expression:
$$adv\_x = x + \epsilon*\text{sign}(\nabla_xJ(\theta, x, y))$$
where
* adv_x : Adversarial image.
* x : Original input image.
* y : Original input label.
* $\epsilon$ : Multiplier to ensure the perturbations are small.
* $\theta$ : Model parameters.
* $J$ : Loss.
An intriguing property here, is the fact that the gradients are taken with respect to the input image. This is done because the objective is to create an image that maximises the loss. A method to accomplish this is to find how much each pixel in the image contributes to the loss value, and add a perturbation accordingly. This works pretty fast because it is easy find how each input pixel contributes to the loss, by using the chain rule, and finding the required gradients. Hence, the gradients are used with respect to the image. In addition, since the model is no longer being trained (thus the gradient is not taken with respect to the trainable variables, i.e., the model parameters), and so the model parameters remain constant. The only goal is to fool an already trained model.
So let's try and fool a pretrained model. In this tutorial, the model is [MobileNetV2](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/applications/MobileNetV2) model, pretrained on [ImageNet](http://www.image-net.org/).
```
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
import tensorflow.compat.v2 as tf
except Exception:
pass
tf.enable_v2_behavior()
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (8, 8)
mpl.rcParams['axes.grid'] = False
```
Let's load the pretained MobileNetV2 model and the ImageNet class names.
```
pretrained_model = tf.keras.applications.MobileNetV2(include_top=True,
weights='imagenet')
pretrained_model.trainable = False
# ImageNet labels
decode_predictions = tf.keras.applications.mobilenet_v2.decode_predictions
# Helper function to preprocess the image so that it can be inputted in MobileNetV2
def preprocess(image):
image = tf.cast(image, tf.float32)
image = image/255
image = tf.image.resize(image, (224, 224))
image = image[None, ...]
return image
# Helper function to extract labels from probability vector
def get_imagenet_label(probs):
return decode_predictions(probs, top=1)[0][0]
```
## Original image
Let's use a sample image of a [Labrador Retriever](https://commons.wikimedia.org/wiki/File:YellowLabradorLooking_new.jpg) -by Mirko [CC-BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0/) from Wikimedia Common and create adversarial examples from it. The first step is to preprocess it so that it can be fed as an input to the MobileNetV2 model.
```
image_path = tf.keras.utils.get_file('YellowLabradorLooking_new.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg')
image_raw = tf.io.read_file(image_path)
image = tf.image.decode_image(image_raw)
image = preprocess(image)
image_probs = pretrained_model.predict(image)
```
Let's have a look at the image.
```
plt.figure()
plt.imshow(image[0])
_, image_class, class_confidence = get_imagenet_label(image_probs)
plt.title('{} : {:.2f}% Confidence'.format(image_class, class_confidence*100))
plt.show()
```
## Create the adversarial image
### Implementing fast gradient sign method
The first step is to create perturbations which will be used to distort the original image resulting in an adversarial image. As mentioned, for this task, the gradients are taken with respect to the image.
```
loss_object = tf.keras.losses.CategoricalCrossentropy()
def create_adversarial_pattern(input_image, input_label):
with tf.GradientTape() as tape:
tape.watch(input_image)
prediction = pretrained_model(input_image)
loss = loss_object(input_label, prediction)
# Get the gradients of the loss w.r.t to the input image.
gradient = tape.gradient(loss, input_image)
# Get the sign of the gradients to create the perturbation
signed_grad = tf.sign(gradient)
return signed_grad
```
The resulting perturbations can also be visualised.
```
perturbations = create_adversarial_pattern(image, image_probs)
plt.imshow(perturbations[0])
```
Let's try this out for different values of epsilon and observe the resultant image. You'll notice that as the value of epsilon is increased, it becomes easier to fool the network, however, this comes as a trade-off which results in the perturbations becoming more identifiable.
```
def display_images(image, description):
_, label, confidence = get_imagenet_label(pretrained_model.predict(image))
plt.figure()
plt.imshow(image[0])
plt.title('{} \n {} : {:.2f}% Confidence'.format(description,
label, confidence*100))
plt.show()
epsilons = [0, 0.01, 0.1, 0.15]
descriptions = [('Epsilon = {:0.3f}'.format(eps) if eps else 'Input')
for eps in epsilons]
for i, eps in enumerate(epsilons):
adv_x = image + eps*perturbations
adv_x = tf.clip_by_value(adv_x, 0, 1)
display_images(adv_x, descriptions[i])
```
## Next steps
Now that you know about adversarial attacks, try this out on different datasets and different architectures. You may also create and train your own model, and then attempt to fool it using the same method. You can also try and see how the confidence in predictions vary as you change epsilon.
Though powerful, the attack shown in this tutorial was just the start of research into adversarial attacks, and there have been multiple papers creating more powerful attacks since then. In addition to adversarial attacks, research has also led to the creation of defenses, which aims at creating robust machine learning models. You may review this [survey paper](https://arxiv.org/abs/1810.00069) for a comprehensive list of adversarial attacks and defences.
For many more implementations of adversarial attacks and defenses, you may want to see the adversarial example library [CleverHans](https://github.com/tensorflow/cleverhans).
| github_jupyter |
Exercise 2 - Simple Linear Regression
===
In Exercise 1, we used R within Jupyter Notebooks to load information about chocolate bars, and stored it in a variable named `choc_data`. We checked the structure of `choc_data`, and explored some of the variables we have about chocolate bars using graphs.
In this exercise, we want to know how to make our chocolate-bar customers happier. To do this, we need to know whether chocolate bar _features_ can predict customer happiness. For example, customers may be happier when chocolate bars are bigger, or when they contain more cocoa.
We have data on customer happiness when eating chocolate bars with different features. Let's explore the relationship between customer happiness and the different features we have available.
Step 1
---
First, we need to load the required libraries and data we will use in this exercise.
Below, we'll also use the functions `str`, `head`, and `tail` to inspect the structure of `choc_data`.
** In the cell below replace: **
** 1. `<structureFunction>` with `str` **
** 2. `<headFunction>` with `head` **
** 3. `<tailFunction>` with `tail` **
** then __run the code__. **
```
# Load `ggplot2` library for graphing capabilities
library(ggplot2)
# Load the chocolate data and save it to the variable name `choc_data`
choc_data <- read.delim("Data/chocolate data.txt")
###
# REPLACE <structureFunction> <headFunction> <tailFunction> WITH str, head, and tail
###
# Check the structure of `choc_data` using `str(choc_data)`
<structureFunction>(choc_data)
# Inspect the start of the data by typing `head(choc_data)`
<headFunction>(choc_data)
# Inspect the end of the data by typing `tail(choc_data)`
<tailFunction>(choc_data)
```
Our object `choc_data` contains 100 different chocolate bar observations for 5 variables: weight, cocoa percent, sugar percent, milk percent, and customer happiness.
Step 2
---
We want to know which chocolate bar features make customers happy.
The example below shows a linear regression between __cocoa percentage__ and __customer happiness__.
** Run the code below to visualise this. You do not need to edit the code block below, just run it. **
```
# Run this box
# DO NOT EDIT THIS CODE
# Create our own function to generate a linear regression model then graph the result
lin_reg_choc <- function(x, y, my_data){
x_arg <- my_data[ , substitute(x)]
y_arg <- my_data[ , substitute(y)]
# Perform linear regression using `lm` (stands for linear models) function
lm_choc <- lm(formula = y_arg ~ x_arg, data = my_data)
# Create scatter plot of choc_data together with linear model
ggplot(data = my_data, aes_string(x = x, y = y)) +
geom_point() +
# Add line based on linear model
geom_abline(intercept = lm_choc$coefficients[1],
slope = lm_choc$coefficients[2],
colour = "red") +
# x-axis label remains constant
xlab("Customer happiness") +
# y-axis label; use `gsub` function to remove underscore from
ylab(gsub("_", " ", y)) +
# graph title
ggtitle(paste("Customer satisfaction with chocolate bars given", gsub("_", " ", y))) +
theme(plot.title = element_text(hjust = 0.5))
}
# This performs the linear regression steps listed above
lin_reg_choc(x = "customer_happiness", y = "cocoa_percent", my_data = choc_data)
```
In the scatter plot above, each point represents an observation for a single chocolate bar.
It seems that __a higher percentage of cocoa increases customer happiness__. We think this because as we increase the amount of cocoa (y-axis), the amount of customer happiness (x-axis) increases, as shown by our linear model (red line).
Step 3
---
** In the cell below: **
** 1. replace the text `<addFeatureHere>` with __`weight`__ to see if heavier chocolate bars make people happier. **
** 2. Also try the variables `sugar_percent` and `milk_percent` to see if these improve customers' experiences. **
** Remember to run each box when you are ready.**
```
###
# CHANGE <addFeatureHere> TO "weight" IN THE LINE BELOW (INCLUDING THE QUOTATION MARKS)
###
lin_reg_choc(x = "customer_happiness", y = <addFeatureHere>, my_data = choc_data)
###
###
# CHANGE <addFeatureHere> TO "sugar_percent" IN THE LINE BELOW (INCLUDING THE QUOTATION MARKS)
###
lin_reg_choc(x = "customer_happiness", y = <addFeatureHere>, my_data = choc_data)
###
###
# CHANGE <addFeatureHere> TO "milk_percent" IN THE LINE BELOW (INCLUDING THE QUOTATION MARKS)
###
lin_reg_choc(x = "customer_happiness", y = <addFeatureHere>, my_data = choc_data)
###
```
It looks like heavier chocolate bars make customers happier, whereas larger amounts of sugar or milk don't seem to make customers happier.
We can draw this conclusion based on the slope of our linear regression models (red line):
* Our linear regression model for "weight vs. customer happiness" reveals that as chocolate bar weight increases, customer happiness also increases;
* Our linear regression models for "sugar percent vs. customer happiness" and "milk percent vs. customer happiness" reveal that as the percentage of sugar or milk increases, customer happiness decreases.
> *N.B. It is possible to perform linear regression directly with `ggplot2` using the following function and arguments: `stat_smooth(method = "lm")`. However, we want to show you how to create linear models without the dependency of `ggplot2`.*
Conclusion
---
Well done! You have run a simple linear regression that revealed chocolate bars heavier in weight and with higher percentages of cocoa make customers happy.
You can now go back to the course and click __'Next Step'__ to move onto using linear regression with multiple features.
| github_jupyter |
##### Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Introduction to TensorFlow Part 3 - Advanced Tensor Manipulation
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/google/tf-quant-finance/blob/master/tf_quant_finance/examples/jupyter_notebooks/Introduction_to_TensorFlow.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/google/tf-quant-finance/blob/master/tf_quant_finance/examples/jupyter_notebooks/Introduction_to_TensorFlow.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
```
#@title Upgrade to TensorFlow 2.5+
!pip install --upgrade tensorflow
#@title Install and import Libraries for this colab. RUN ME FIRST!
import matplotlib.pyplot as plt
import tensorflow as tf
```
# What this notebook covers
This notebook carries on from [part 2](https://colab.research.google.com/github/google/tf-quant-finance/blob/master/tf_quant_finance/examples/jupyter_notebooks/Introduction_to_TensorFlow_Part_2_-_Debugging_and_Control_Flow.ipynb
), and covers various advanced ways of manipulating tensors, including
* Gather
* Updating tensor entries
* Sparse Tensors
* Various functional ops:
* tf.foldl
* tf.foldr
* tf.map_fn
* tf.vectorized_map
* XLA compilation
# Scatter / Gather
## tf.gather_nd
[Full documentation](https://www.tensorflow.org/api_docs/python/tf/gather_nd)
This operation allows you to take a multi-dimensional tensor and extract a list of subsets of data from it, according to a list of indices.
```
source = tf.constant([[[111,112,113], [121,122,123], [131,132,133]],
[[211,212,213], [221,222,223], [231,232,233]]])
# if we specify all values for all of source's dimensions, then we get a
# single value
indices = [[1,1,1]]
print("Looking up %s gives us\n%s" %(
indices, tf.gather_nd(source, indices)))
# we can look up multiple sets of indices
indices = [[1,1,1], [0,0,0], [0,0,1]]
print("\nLooking up %s gives us\n%s" %(
indices, tf.gather_nd(source, indices)))
# if we don't specify values for all of source's dimensions, then we get
# results of larger shape
indices = [[0,0]]
print("\nLooking up %s gives us\n%s" %(
indices, tf.gather_nd(source, indices)))
indices = [[1]]
print("\nLooking up %s gives us\n%s" %(
indices, tf.gather_nd(source, indices)))
```
The indices can easily be generated with tf.where:
```
source = tf.constant([[[111,112,113], [121,122,123], [131,132,133]],
[[211,212,213], [221,222,223], [231,232,233]]])
values_divisible_by_three = tf.gather_nd(
source, tf.where(tf.equal(0, source % 3)))
print(values_divisible_by_three)
```
## Updating elements of a Tensor
Tensors are immutable objects. Often there is a need to update certain values of a Tensor. In order to achieve this, one can use `tf.tensor_scatter_nd`, which creates a copy of the input Tensor along with updated values at the specified indices.
For user convenience a number of similar methods are available, such as `tf.tensor_scatter_nd_add/sub/min/max`.
```
x = tf.constant([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[11, 12, 13], [14, 15, 16], [17, 18, 19]]])
# Original Tensor
print("Original Tensor:\n%s"%x)
print("Updating a single single value:\n%s"%
tf.tensor_scatter_nd_update(
x,
indices = [[0, 1, 2]],
updates = [-1]))
print("\nUpdating multiple values:\n%s"%
tf.tensor_scatter_nd_update(
x,
indices = [[0, 0, 0], [0, 1, 1], [0, 2, 2]],
updates = [-1, -2, -3]))
# You can reduce the dimensions of indices and increase the dimensions of
# updates
print("\nScattering entire rows:\n%s"%
tf.tensor_scatter_nd_update(
x,
indices = [[0,0], [0,1]],
updates = [[-1, -2, -3], [-4, -5,- 6]]))
print("\nUpdating the entire matrix:\n%s"%
tf.tensor_scatter_nd_update(
x,
indices = [[0]],
updates = [[[-1, -2, -3], [-4, -5, -6], [-7, -8, -9]]]))
# Note that if `indices` contains duplicate or overlapping values, then the
# clashing updates will be added together (in an indeterminate order, which
# may result in non-deterministic output in the case of multiple floating
# point values of wildly different sizes).
print("\nUpdating single value multiple times:\n%s"%
tf.tensor_scatter_nd_update(
x,
indices = [[0,0,0], [0,0,0], [0,0,0]],
updates = [-1, -2, -3]))
```
##tf.scatter_nd
[Full documentation](https://www.tensorflow.org/api_docs/python/tf/scatter_nd)
`scatter_nd` is similar to `tf.tensor_scatter_nd_update`. It creates a zero-initialised tensor of a given shape, and then writes a series of specified values at specified positions in that tensor.
### Gather then Update
In some cases, you will want `tensor_scatter_nd` to act as a "setter" to `gather_nd`'s "getter": i.e. you have a tensor, you extract a subset of values that meet a certain criteria using `gather_nd`, you calculate new values for that subset, and then create a new tensor based on the original that replaces the elements that met the criteria with the new values.
```
source = tf.constant([[[111,112,113], [121,122,123], [131,132,133]],
[[211,212,213], [221,222,223], [231,232,233]]])
# Create a list of indices where is_divisible_by_three is true (we no longer
# need the to keep a reference to the result of tf.equal)
indices = tf.where(tf.equal(0, source % 3))
# Extract a list of values that need updating
values_divisible_by_three = tf.gather_nd(source, indices)
# Perform a really expensive operation on those values
new_values = values_divisible_by_three % 100
# Update entries in the original Tensor
new_tensor = tf.tensor_scatter_nd_update(
source, indices, new_values)
# Updated Tensor
print(new_tensor)
```
## Exercise: Mandlebrot set
Lets revisit the Mandlebrot set from the previous training course. In that solution, we ran the z=z*z+c calculation for all co-ordinates, even the ones whose magnitude had already gone over 2.
For the purpose of this exercise, we will pretend that the complex calculation is very expensive and that we should eliminate the calculation where possible. In actual fact, the calculation is utterly trivial and swamped by the cost of the gather/scatter operations, but the same methods can be used in situations rather more expensive than a complex add and multiply
```
MAX_ITERATIONS = 64
NUM_PIXELS = 512
def generate_grid(nX, nY, bottom_left=(-1.0, -1.0), top_right=(1.0, 1.0)):
"""Generates a complex matrix of shape [nX, nY].
Generates an evenly spaced grid of complex numbers spanning the rectangle
between the supplied diagonal points.
Args:
nX: A positive integer. The number of points in the horizontal direction.
nY: A positive integer. The number of points in the vertical direction.
bottom_left: The coordinates of the bottom left corner of the rectangle to
cover.
top_right: The coordinates of the top right corner of the rectangle to
cover.
Returns:
A constant tensor of type complex64 and shape [nX, nY].
"""
x = tf.linspace(bottom_left[0], top_right[0], nX)
y = tf.linspace(bottom_left[1], top_right[1], nY)
real, imag = tf.meshgrid(x, y)
return tf.cast(tf.complex(real, imag), tf.complex128)
c_values = generate_grid(NUM_PIXELS, NUM_PIXELS)
initial_Z_values = tf.zeros_like(c_values, dtype=tf.complex128)
initial_diverged_after = tf.ones_like(c_values, dtype=tf.int32) * MAX_ITERATIONS
# You need to put the various values you want to change inside the loop here
loop_vars = (0, initial_Z_values, initial_diverged_after)
# this needs to take the same number of arguments as loop_vars contains and
# return a tuple of equal size with the next iteration's values
def body(iteration_count, Z_values, diverged_after):
# a matrix of bools showing all the co-ordinatesthat haven't diverged yet
not_diverged = tf.equal(diverged_after, MAX_ITERATIONS)
# a list of the indices in not_diverged that are true
not_diverged_indices = tf.where(not_diverged)
# you now need to gather just the Z and c values covered by
# not_diverged_indices, calculate the new Z values, and then scatter the
# values back into a new Z_values matrix to pass to the next iteration.
new_Z_values = # TODO
# And now we're back to the original code
has_diverged = tf.abs(new_Z_values) > 2.0
new_diverged_after = tf.minimum(diverged_after, tf.where(
has_diverged, iteration_count, MAX_ITERATIONS))
return (iteration_count+1, new_Z_values, new_diverged_after)
# this just needs to take the same number of arguments as loop_vars contains and
# return true (we'll use maximum_iterations to exit the loop)
def cond(iteration_count, Z_values, diverged_after):
return True
results = tf.while_loop(
loop_vars=loop_vars,
body = body,
cond = cond,
maximum_iterations=MAX_ITERATIONS)
## extract the final value of diverged_after from the tuple
final_diverged_after = results[-1]
plt.matshow(final_diverged_after)
pass
#@title Solution: Mandlebrot set (Double-click to reveal)
MAX_ITERATIONS = 64
NUM_PIXELS = 512
def GenerateGrid(nX, nY, bottom_left=(-1.0, -1.0), top_right=(1.0, 1.0)):
"""Generates a complex matrix of shape [nX, nY].
Generates an evenly spaced grid of complex numbers spanning the rectangle
between the supplied diagonal points.
Args:
nX: A positive integer. The number of points in the horizontal direction.
nY: A positive integer. The number of points in the vertical direction.
bottom_left: The coordinates of the bottom left corner of the rectangle to
cover.
top_right: The coordinates of the top right corner of the rectangle to
cover.
Returns:
A constant tensor of type complex64 and shape [nX, nY].
"""
x = tf.linspace(bottom_left[0], top_right[0], nX)
y = tf.linspace(bottom_left[1], top_right[1], nY)
real, imag = tf.meshgrid(x, y)
return tf.cast(tf.complex(real, imag), tf.complex128)
c_values = GenerateGrid(NUM_PIXELS, NUM_PIXELS)
initial_Z_values = tf.zeros_like(c_values, dtype=tf.complex128)
initial_diverged_after = tf.ones_like(c_values, dtype=tf.int32) * MAX_ITERATIONS
# You need to put the various values you want to change inside the loop here
loop_vars = (0, initial_Z_values, initial_diverged_after)
# this needs to take the same number of arguments as loop_vars contains and
# return a tuple of equal size with the next iteration's values
def body(iteration_count, Z_values, diverged_after):
# a matrix of bools showing all the co-ordinatesthat haven't diverged yet
not_diverged = tf.equal(diverged_after, MAX_ITERATIONS)
# a list of the indices in not_diverged that are true
not_diverged_indices = tf.where(not_diverged)
# Gather the values for just the undiverged co-ordinates, and generate the
# next iteration's values
not_diverged_c_values_array = tf.gather_nd(c_values, not_diverged_indices)
not_diverged_Z_values_array = tf.gather_nd(Z_values, not_diverged_indices)
new_Z_values_array = (not_diverged_Z_values_array * not_diverged_Z_values_array
+ not_diverged_c_values_array)
# merge the new values with the already-diverged
new_Z_values_or_zeroes = tf.scatter_nd(
not_diverged_indices,
new_Z_values_array,
tf.shape(Z_values, out_type=tf.dtypes.int64))
new_Z_values = tf.where(not_diverged, new_Z_values_or_zeroes, Z_values)
# And now we're back to the original code
has_diverged = tf.abs(new_Z_values) > 2.0
new_diverged_after = tf.minimum(diverged_after, tf.where(
has_diverged, iteration_count, MAX_ITERATIONS))
return (iteration_count+1, new_Z_values, new_diverged_after)
# this just needs to take the same number of arguments as loop_vars contains and
# return true (we'll use maximum_iterations to exit the loop)
def cond(iteration_count, Z_values, diverged_after):
return True
results = tf.while_loop(
loop_vars=loop_vars,
body = body,
cond = cond,
maximum_iterations=MAX_ITERATIONS)
## extract the final value of diverged_after from the tuple
final_diverged_after = results[-1]
plt.matshow(final_diverged_after)
plt.show()
```
## SparseTensor
[Full documentation](https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor])
A sparse tensor is created from a list of indices, a list of values and a shape: the same as the arguments to scatter_nd. Any element within the tensor that doesn't have an explicit value will be treated as zero. So a sparse tensor can be viewed as a deferred call to scatter_nd.
For large tensors where most of the values are zero, sparse tensors can grant major savings in memory. The [tf.sparse module](https://www.tensorflow.org/api_docs/python/tf/sparse) contains several specialised operations that know how to work with sparse tensor's internals and skipping all the zero values, thus granting major savings in processing speed as well.
Similarly sparse tensors can be efficiently divided or multiplied by a tensor or scalar. But attempts to perform inefficient operations on a sparse tensor (i.e. ones likely to set most elements to a non-zero value) are not allowed. You need to convert the sparse tensor to a normal, or "dense", tensor with the ```tf.sparse.to_dense``` function.
```
source = tf.constant([[[111,112,113], [121,122,123], [131,132,133]],
[[211,212,213], [221,222,223], [231,232,233]]])
# create a list of indices where is_divisible_by_three is true
indices = tf.where(tf.equal(0, source % 3))
# extract a matching list of values
values_divisible_by_three = tf.gather_nd(source, indices)
sparse = tf.sparse.SparseTensor(
indices,
values_divisible_by_three,
tf.shape(source, out_type=tf.dtypes.int64))
print ("sparse =")
print(sparse)
# We can efficiently multiply sparse by a dense tensor
print ("\nsparse * dense =")
print(sparse * source)
# We can efficiently multiply a dense tensor by a sparse
print ("\ndense * sparse")
print(source * sparse)
# We can efficiently divide sparse by a dense tensor
print ("\nsparse / dense")
print(sparse / source)
# But attempts to perform inefficient operations on a sparse tensor (i.e. ones
# likely to set most elements to a non-zero value) are not allowed.
# You need to convert the sparse tensor into a dense tensor first.
try:
not_allowed = sparse + source
except ValueError:
pass
# Running to_dense is exactly the same as calling scatter_nd:
print ("\nto_dense gives")
print(tf.sparse.to_dense(sparse))
print ("\nscatter_nd gives")
print(tf.scatter_nd(sparse.indices, sparse.values, sparse.dense_shape))
```
# Functional ops
## tf.foldl and tf.foldr
[Full documentation](https://www.tensorflow.org/api_docs/python/tf/foldl)
These two functions split a given tensor across its first dimension. The resulting subtensors are then each passed to an op along with an "accumulator".
For most iterations, the value of the accumulator will be the result of the previous iteration. But for the first iteration, the accumulator will either be passed an initial value passed into the foldl/foldr call, or the first subtensor. The final iteration's result then becomes the overall result of the op.
So the rough pseudo code of ```x = tf.foldl(op, [[1,1], [2,2], [3,3]], initializer)``` would be
``` python
result_iteration1 = op(initializer, [1,1])
result_iteration2 = op(result_iteration1, [2,2])
result iteration3 = op(result_iteration2, [3,3])
x = result_iteration3
```
Whereas the rough pseudo-code of ```x = tf.foldl(op, [[1,1], [2,2], [3,3])``` (i.e. no initializer supplied) would be
``` python
result_iteration1 = op([1,1]], [2,2])
result_iteration2 = op(result_iteration1, [3,3])
x = result_iteration3
```
```foldr``` is identical to ```foldl```, except that the order the tensor is iterated through is reversed. So the rough pseudo code of ```x = tf.foldr(op, [[1,1], [2,2], [3,3], initializer)``` would be
``` python
result_iteration1 = op(initializer, [3,3])
result_iteration2 = op(result_iteration1, [2,2])
result iteration3 = op(result_iteration2, [1,1])
x = result_iteration3
```
The only complication of this method is that the op is defined by a python callable. Note that the callable is only called once, at execution time, to build the operation. **Your python callable is not called for every row in the input**, nor can it see the individual values. It is the op created by your python code that will be repeatedly called.
Note that despite this, use of these methods still eliminates several optimisation opportunities that are present in tensorflow built-in operations. So if you can use something like `tf.math.reduce_sum` instead of these ops then your code may well run significantly faster.
```
source = tf.constant([[1,2],[3,4],[5,6]])
# element
@tf.function
def my_function(previous_iterations_result, element):
print("In my_function.")
# this depends on the previous values, thus highlighting the difference
# between foldl and foldr
return tf.math.maximum(
previous_iterations_result, element) + previous_iterations_result
print("Executing foldl")
print("foldl result:\n%s"%
tf.foldl(my_function, source))
print("\nExecuting foldr")
print("foldr result:\n%s"%
tf.foldr(my_function, source))
```
## tf.map_fn
This op is similar to `foldl`, but the python function only takes a single argument, it lacks the accumulator argument containing the result of the previous iteration. Again the callable is called just once, and is used to generate an tensorflow op. It is this generated op that is executed once per row. And again, be aware that replacing the map_fn call with a built-in op - if possible - can result in significant increases in speed.
```
source = tf.constant([[1,2],[3,4],[5,6]])
# element
@tf.function
def my_function(element):
print("In my_function")
return tf.math.reduce_sum(element)
print("foldr result:\n%s"%
tf.map_fn(my_function, source))
```
## tf.vectrorized_map
This op is similar to `map_fn`, but has a much better performance due to vectorization. `map_fn` is serial since it is based on `tf.while_loop`, while `tf.vectorized_map` relies on [`pfor`](https://github.com/tensorflow/tensorflow/blob/a4dfb8d1a71385bd6d122e4f27f86dcebb96712d/tensorflow/python/ops/parallel_for/control_flow_ops.py#L546) (parallel for) in its implementation. Potential speed can be the same as from batching.
The op is useful to parallelize tasks where batching is hard to achieve (e.g., Jacobian calculation).
See [official documentation](https://www.tensorflow.org/api_docs/python/tf/vectorized_map) for more details.
Again the callable is called just once, and is used to generate a tensorflow op.
```
source = tf.constant([[1,2],[3,4],[5,6]])
# element
@tf.function
def my_function(element):
print("In my_function")
return tf.math.reduce_sum(element)
print("foldr result:\n%s"%
tf.vectorized_map(my_function, source))
# Vectorization map vs map_fn vs batching
@tf.function
def square_map(x):
return tf.map_fn(lambda x: x**2, x)
@tf.function
def square_vectorized_map(x):
return tf.vectorized_map(lambda x: x**2, x)
dtype = tf.float64
x = tf.random.uniform([1_000], dtype=dtype)
```
When timing, we call `.numpy()` to ensure the result is copied to memory.
```
%%timeit
# map_fn speed
square_map(x).numpy()
%%timeit
# vectorized_map speed
square_vectorized_map(x).numpy()
%%timeit
# Batched version
(x**2).numpy()
```
# XLA compilation
One of the main TensorFlow concepts is computational graph. One can imagine that knowing the graph should provide enough information to create an efficient low-level code targeting a specific device (i.e., CPU/GPU/TPU). XLA (Accelerated Linear Algebra) is a compiler that does precisely that - it creates an LLVM representation from the computational graph, potentially brinning significant speed up to the calculation. Compilation can be done in either Ahead-of-time (AOT) or Just-in-time (JIT) modes.
Refer to the [official XLA page](https://www.tensorflow.org/xla) for more details. XLA Architecture details can be found [here](https://www.tensorflow.org/xla/architecture).
From user perspective, using JIT compilation is easy: simply set `jit_compile=True` argument of `tf.function`.
To use AOT compilation mode please refer to the [documentation](https://www.tensorflow.org/xla/tfcompile).
**NB**
* At the moment not every function can be XLA-compiled. For example, inputs and output shapes of `tf.while_loop` should be the same. Also, some of the ops might be missing an XLA implementation.
* JIT-compilation means that compilation happens at the first function call. For the successive calls, the compiled function is used. If an input has a different shape from the one used during the compilation, JIT-compilation happens again for the new input shapes.
```
@tf.function
def square_map(x):
return tf.map_fn(lambda x: x**2, x)
@tf.function(jit_compile=True)
def square_map_xla(x):
return tf.map_fn(lambda x: x**2, x)
@tf.function(jit_compile=True)
def square_vectorized_map_xla(x):
return tf.vectorized_map(lambda x: x**2, x)
dtype = tf.float64
x = tf.random.uniform([1_000], dtype=dtype)
%%timeit
square_map(x).numpy()
%%timeit
# Compare time to the non-compiled code above
square_map_xla(x).numpy()
# Now compare with compiled vectorized_map
x = tf.random.uniform([500_000], dtype=dtype)
%%timeit
# map_fn + XLA
square_map_xla(x).numpy()
%%timeit
# vectorized_map + XLA
square_vectorized_map(x).numpy()
```
| github_jupyter |
# Accelerate pretraining of BERT model using ONNX Runtime
This notebook contains a walkthrough of using ONNX Runtime in Azure Machine Learning service to pretrain [BERT: Bidirectional Encoder Representations from Transformers](https://arxiv.org/abs/1810.04805) models. This example shows how ONNX Runtime training can accelerate BERT pretraining implementation in PyTorch maintained at https://github.com/NVIDIA/DeepLearningExamples.
Steps:
- Intialize an AzureML workspace
- Register a datastore to use preprocessed data for training
- Create an AzureML experiment
- Provision a compute target
- Create an Estimator
- Configure and Run
Prerequisites
If you are using an Azure Machine Learning [Compute Instance](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-instance) you are all set. Otherwise, you need to setup your environment by installing AzureML Python SDK to run this notebook. Refer to [How to use Estimator in Azure ML](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training-with-deep-learning/how-to-use-estimator/how-to-use-estimator.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace.
Refer to instructions at https://github.com/microsoft/onnxruntime-training-examples/blob/master/nvidia-bert/README.md before running the steps below.
### Check SDK installation
```
import os
import requests
import sys
# AzureML libraries
import azureml.core
from azureml.core import Experiment, Workspace, Datastore, Run
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.container_registry import ContainerRegistry
from azureml.core.runconfig import MpiConfiguration, RunConfiguration, DEFAULT_GPU_IMAGE
from azureml.train.dnn import PyTorch
from azureml.train.estimator import Estimator
from azureml.widgets import RunDetails
# Check core SDK version number
print("SDK version:", azureml.core.VERSION)
```
### AzureML Workspace setup
```
# Create or retrieve Azure machine learning workspace
# see https://docs.microsoft.com/en-us/python/api/overview/azure/ml/?view=azure-ml-py
ws = Workspace.get(name="myworkspace", subscription_id='<azure-subscription-id>', resource_group='myresourcegroup')
# Print workspace attributes
print('Workspace name: ' + ws.name,
'Workspace region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
```
### Register Datastore
Before running the step below, data prepared using the instructions at https://github.com/microsoft/onnxruntime-training-examples/blob/master/nvidia-bert/README.md should be transferred to an Azure Blob container referenced in the `Datastore` registration step. Refer to the documentation at https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data for details on using data in Azure ML experiments.
```
# Create a datastore from blob storage containing training data.
# Consult README.md for instructions downloading and uploading training data.
ds = Datastore.register_azure_blob_container(workspace=ws,
datastore_name='<datastore-name>',
account_name='<storage-account-name>',
account_key='<storage-account-key>',
container_name='<storage-container-name>')
# Print datastore attributes
print('Datastore name: ' + ds.name,
'Container name: ' + ds.container_name,
'Datastore type: ' + ds.datastore_type,
'Workspace name: ' + ds.workspace.name, sep = '\n')
```
### Create AzureML Compute Cluster
This recipe is supported on Azure Machine Learning Service using 16 x Standard_NC24rs_v3 or 8 x Standard_ND40rs_v2 VMs. In the next step, you will create an AzureML Compute cluster of Standard_NC40s_v2 GPU VMs with the specified name, if it doesn't already exist in your workspace.
```
# Create GPU cluster
gpu_cluster_name = "ortbertpretrain"
try:
gpu_compute_target = ComputeTarget(workspace=ws, name=gpu_cluster_name)
print('Found existing compute target.')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_ND40rs_v2', min_nodes=0, max_nodes=8)
gpu_compute_target = ComputeTarget.create(ws, gpu_cluster_name, compute_config)
gpu_compute_target.wait_for_completion(show_output=True)
# Create experiment for phase 1
experiment_name = 'nvbert-ort-pretraining-phase1'
experiment = Experiment(ws, name=experiment_name)
```
### Create Estimator
Notes before running the following step:
* Update the following step to replace two occurences of `<blob-path-to-phase1-training-data>` with the actual path in the datastore that contains the training files.
* If you followed instructions at https://github.com/microsoft/onnxruntime-training-examples/blob/master/nvidia-bert/README.md to prepare data, make sure that the data and others files that are not code or config are moved out `workspace` directory. Data files should have been moved to a `Datastore` to use in training.
To fully utilize capacity, we suggest parameters from below table for phase 1.
| VM SKU | node_count | gpu_memory_limit_gb | train_batch_size | gradient_accumulation_steps |
| ------------------ |:------------------:|-----------------:|-----------------:| ---------------------------:|
| Standard_ND40rs_v2 | 1 (8 GPUs total) | 32 | 8192 | 64 |
| Standard_ND40rs_v2 | 2 (16 GPUs total) | 32 | 4096 | 32 |
| Standard_ND40rs_v2 | 4 (32 GPUs total) | 32 | 2048 | 16 |
| Standard_ND40rs_v2 | 8 (64 GPUs total) | 32 | 1024 | 8 |
| Standard_NC24rs_v3 | 1 (4 GPUs total) | 16 | 16320 | 340 |
| Standard_NC24rs_v3 | 2 (8 GPUs total) | 16 | 8160 | 170 |
| Standard_NC24rs_v3 | 4 (16 GPUs total) | 16 | 4080 | 85 |
| Standard_NC24rs_v3 | 8 (32 GPUs total) | 16 | 2016 | 42 |
| Standard_NC24rs_v3 | 16 (64 GPUs total) | 16 | 1008 | 21 |
Refer to [README.md](../README.md) for an in-depth explanation of batch sizes and gradient accumulation steps.
```
# this directory should contain run_pretraining_ort.py, ort_supplement directory and other files copied over based on the instructions at https://github.com/microsoft/onnxruntime-training-examples/blob/master/nvidia-bert/README.md
project_folder = '../../workspace/BERT'
image_name = 'mcr.microsoft.com/azureml/onnxruntime-training:0.1-rc3.1-openmpi4.0-cuda10.2-cudnn8.0-nccl2.7-for-bert'
# set MPI configuration
# set processes per node to be equal to GPU count on SKU.
mpi = MpiConfiguration()
mpi.process_count_per_node = 8
import uuid
output_id = uuid.uuid1().hex
# Define training estimator for phase 1
# Consult https://docs.microsoft.com/en-us/azure/machine-learning/how-to-train-ml-models
# Fill in blob path to phase 1 training data in argument below
estimator_ph1 = Estimator(source_directory=project_folder,
# Compute configuration
compute_target = gpu_compute_target,
node_count=4,
process_count_per_node=1, # separate MPI jobs
distributed_training = mpi,
use_gpu = True,
# supply Docker image
use_docker = True,
custom_docker_image = image_name,
user_managed = True,
# Training script parameters
script_params = {
"--config_file": "bert_config.json",
'--input_dir' : ds.path('<blob-path-to-phase1-training-data>').as_mount(),
'--output_dir': ds.path(f'output/{experiment_name}/{output_id}/').as_mount(),
'--bert_model' : 'bert-large-uncased',
'--train_batch_size' : 2048,
'--max_seq_length': 128,
'--max_predictions_per_seq': 20,
'--max_steps' : 7038,
'--warmup_proportion' : '0.2843',
'--num_steps_per_checkpoint' : 200,
'--learning_rate' : '6e-3',
'--seed': 42,
'--fp16' : '',
'--gradient_accumulation_steps' : 16,
'--allreduce_post_accumulation' : '',
'--allreduce_post_accumulation_fp16' : '',
'--do_train' : '',
'--use_ib' : '', # pass if infiniband available on SKU
'--gpu_memory_limit_gb' : 32 # set to per GPU memory in GB (check SKU)
},
entry_script = 'run_pretraining_ort.py',
inputs = [ds.path('<blob-path-to-phase1-training-data>').as_mount()]
)
```
### Run AzureML experiment - Phase 1 of pretraining
```
# Submit phase 1 (check logs from Outputs + logs tab of corresponding link)
run = experiment.submit(estimator_ph1)
RunDetails(run).show()
print(run.get_portal_url())
# Create experiment for phase 2
experiment_name = 'nvbert-ort-pretraining-phase2'
experiment = Experiment(ws, name=experiment_name)
```
### Create Estimator - Phase 2
Notes before running the following step:
* Update the following step to replace two occurences of `<blob-path-to-phase1-training-data>` with the actual path in the datastore that contains the training files.
* If you followed instructions at https://github.com/microsoft/onnxruntime-training-examples/blob/master/nvidia-bert/README.md to prepare data, make sure that the data and others files that are not code or config are moved out `workspace` directory. Data files should have been moved to a `Datastore` to use in training.
To fully utilize capacity, we suggest parameters from below table for phase 2.
| VM SKU | node_count | gpu_memory_limit_gb | train_batch_size | gradient_accumulation_steps |
| ------------------ |:------------------:|-----------------:|-----------------:| ---------------------------:|
| Standard_ND40rs_v2 | 1 (8 GPUs total) | 32 | 4096 | 256 |
| Standard_ND40rs_v2 | 2 (16 GPUs total) | 32 | 2048 | 128 |
| Standard_ND40rs_v2 | 4 (32 GPUs total) | 32 | 1024 | 64 |
| Standard_ND40rs_v2 | 8 (64 GPUs total) | 32 | 512 | 32 |
| Standard_NC24rs_v3 | 1 (4 GPUs total) | 16 | 8192 | 1024 |
| Standard_NC24rs_v3 | 2 (8 GPUs total) | 16 | 4096 | 512 |
| Standard_NC24rs_v3 | 4 (16 GPUs total) | 16 | 2048 | 256 |
| Standard_NC24rs_v3 | 8 (32 GPUs total) | 16 | 1024 | 128 |
| Standard_NC24rs_v3 | 16 (64 GPUs total) | 16 | 512 | 64 |
```
# Define training estimator for phase 2
# Fill in blob path to phase 1 training data as well as phase 1 checkpoint in arguments below
estimator_ph2 = Estimator(source_directory=project_folder,
# Compute configuration
compute_target = gpu_compute_target,
node_count=4,
process_count_per_node=1, # separate MPI jobs
distributed_training = mpi,
use_gpu = True,
#Docker image
use_docker = True,
custom_docker_image = image_name,
user_managed = True,
# Training script parameters
script_params = {
# Required Params
"--config_file": "bert_config.json",
'--input_dir' : ds.path('<blob-path-to-phase2-training-data>').as_mount(),
'--output_dir': ds.path(f'output/{experiment_name}/{output_id}/').as_mount(),
'--bert_model' : 'bert-large-uncased',
'--train_batch_size' : 1024,
'--max_seq_length': 512,
'--max_predictions_per_seq': 80,
'--max_steps' : 1563,
'--warmup_proportion' : '0.128',
'--num_steps_per_checkpoint' : 200,
'--learning_rate' : '4e-3',
'--seed': 42,
'--fp16' : '',
'--gradient_accumulation_steps' : 64,
'--allreduce_post_accumulation' : '',
'--allreduce_post_accumulation_fp16' : '',
'--do_train' : '',
'--phase2' : '',
'--resume_from_checkpoint' : '',
'--phase1_end_step' : '7038',
'--init_checkpoint' : ds.path('<path-to-checkpoint-from-phase-1>'),
'--use_ib' : '', # pass if infiniband available on SKU
'--gpu_memory_limit_gb' : 32 # set to per GPU memory in GB (check SKU)
},
entry_script='run_pretraining_ort.py',
inputs=[ds.path('<blob-path-to-phase2-training-data>').as_mount()])
```
Run AzureML experiment - Phase 2 of pretraining
```
# Submit phase 2 run (check logs from Outputs + logs tab of corresponding link)
run = experiment.submit(estimator_ph2)
RunDetails(run).show()
print(run.get_portal_url())
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Start-to-Finish Example: $\text{GiRaFFE_HO}$ 1D tests
### Author: Patrick Nelson
### Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb)
## This module implements a basic GRFFE code to evolve one-dimensional GRFFE waves.
### NRPy+ Source Code for this module:
1. [GiRaFFEfood_HO/GiRaFFEfood_HO_1D_tests.py](../edit/GiRaFFEfood_HO/GiRaFFEfood_HO_1D_tests.py); [\[**tutorial**\]](Tutorial-GiRaFFEfood_HO_1D_tests.ipynb): Aligned rotator initial data, sets all FFE variables in a Cartesian basis.
1. [GiRaFFE_HO/GiRaFFE_Higher_Order_v2.py](../edit/GiRaFFE_HO/GiRaFFE_Higher_Order_v2.py); [\[**tutorial**\]](Tutorial-GiRaFFE_Higher_Order_v2.ipynb): Generates the right-hand sides for the GRFFE evolution equations in Cartesian coordinates.
We will also borrow C code from the ETK implementation of $\text{GiRaFFE_HO}$
Here we use NRPy+ to generate the C source code necessary to set up initial data for a model neutron star (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).
The entire algorithm is outlined below, with NRPy+-based components highlighted in <font color='green'>green</font>.
1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.
1. (**Step 2** below) <font color='green'>Set gridfunction values to initial data (**[documented in previous module](Tutorial-GiRaFFEfood_HO_1D_tests.ipynb)**).</font>
1. Evolve the initial data forward in time using RK4 time integration. At each RK4 substep, do the following:
1. (**Step 3A** below) <font color='green'>Evaluate GRFFE RHS expressions.</font>
1. (**Step 4** below) Apply singular, curvilinear coordinate boundary conditions [*a la* the SENR/NRPy+ paper](https://arxiv.org/abs/1712.07658)
1. (**Step 3B** below) At the end of each iteration in time, output the <font color='green'>FFE variables</font>. (This is in Step 3B, because Step 4 requires that *all* gridfunctions be defined.)
1. Repeat above steps at two numerical resolutions to confirm convergence to the expected value.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
1. [Step 1](#writec): Use NRPy+ to generate necessary C code to solve the scalar wave equation in curvilinear, singular coordinates
1. [Step 1.a](#mol_timestep): Use NRPy+ to generate the RK4 timestepping method
1. [Step 1.b](#c_headers): Use NRPy+ to generate C headers
1. [Step 1.c](#copy_c): Copy code over from the GiRaFFE C code library
1. [Step 1.d](#initial_data): Import Alfvén Wave initial data C function
1. [Step 1.e](#poynting): Import densitized Poynting flux initial data conversion C function
1. [Step 1.f](#rhs): Output GiRaFFE RHS expressions
1. [Step 2](#mainc): `GiRaFFE_standalone.c`: The Main C Code
1. [Step 2.a](#import_headers): Import needed header files
1. [Step 2.b](#data_type): Set data type
1. [Step 2.c](#free_params): Set free parameters
1. [Step 2.d](#idx4): Declare the IDX4 macro
1. [Step 2.e](#gridfuncs): Define gridfunctions
1. [Step 2.f](#bcs): Boundary Conditions, the A-to-B driver, and the conservative-to-primitive solver
1. [Step 2.g](#timestep): Find the CFL-constrained timestep
1. [Step 2.h](#initial_data_c): Declare the function for the exact solution
1. [Step 2.i](#rhsC): Declare the functions to evaluate the GRFFE RHSs
1. [Step 2.j](#main): The `main()` function
1. [Step 3](#convergence): Code validation: Verify that relative error in numerical solution converges to zero at the expected order
1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='writec'></a>
# Step 1: Use NRPy+ to generate necessary C code to solve the scalar wave equation in curvilinear, singular coordinates \[Back to [top](#toc)\]
$$\label{writec}$$
We first begin by importing the needed NRPy+ modules; we then create the directory to which we wish to write our C code. Note that we first remove it and all its contents to make sure we have a fresh start. We set the FD-order to two and our spatial dimension to 3. We set our coordinate system to Cartesian and set up the reference metric.
We also set the symmetry axes parameter from `indexedexp` to `2`. This is done because the Alfvén wave is completely independent of $z$, which is direction `2` in Cartesian coordinates; so, we can speed up the code in this case by assuming that *any* derivative in the $z$ direction is 0.
```
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
# Step P1: First we import needed core NRPy+ modules
from outputC import * # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
#Step P2: Create C code output directory:
import cmdline_helper as cmd
import shutil, os
cmd.delete_existing_files("GiRaFFE_standalone_Ccodes/*")
Ccodesdir = os.path.join("GiRaFFE_standalone_Ccodes/")
cmd.mkdir(Ccodesdir)
# Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 2)
# Set spatial dimension (must be 3 for BSSN)
DIM = 3
par.set_parval_from_str("grid::DIM",DIM)
# Then we set the coordinate system for the numerical grid
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc.
# Then we set the y and z axes to be the symmetry axis; i.e., axis "2", corresponding to the i2 direction.
# This sets all spatial derivatives in the phi direction to zero.
par.set_parval_from_str("indexedexp::symmetry_axes","2")
# Then we set the phi axis to be the symmetry axis; i.e., axis "2", corresponding to the i2 direction.
# This sets all spatial derivatives in the phi direction to zero.
#par.set_parval_from_str("indexedexp::symmetry_axes","2") # Let's not deal with this yet.
```
<a id='mol_timestep'></a>
## Step 1.a: Use NRPy+ to generate the RK4 timestepping method \[Back to [top](#toc)\]
$$\label{mol_timestep}$$
Next, we will we will use the `MoLtimestepping` module to write the code for our timestepping algorithm. Here, we will use it to write RK4, but note that by changing the variable `RK_method` we can easily and immediately use many other algorithms.
It is also imperative to pass the correct strings `RHS_string` and `post_RHS_string`. *Any* functions that we want to call between each step need to be included here.
```
# Choices are: Euler, "RK2 Heun", "RK2 MP", "RK2 Ralston", RK3, "RK3 Heun", "RK3 Ralston",
# SSPRK3, RK4, DP5, DP5alt, CK5, DP6, L6, DP8
RK_method = "RK4"
# Generate timestepping code. As described above the Table of Contents, this is a 3-step process:
# 3.A: Evaluate RHSs (RHS_string)
# 3.B: Apply boundary conditions (post_RHS_string, pt 1)
# 3.C: Enforce det(gammabar) = det(gammahat) constraint (post_RHS_string, pt 2)
import MoLtimestepping.C_Code_Generation as MoL
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
RK_order = Butcher_dict[RK_method][1]
cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/"))
MoL.MoL_C_Code_Generation(RK_method,
RHS_string = """
calc_u0(Nxx_plus_2NGHOSTS,aux_gfs);
quantities_to_FD_for_rhs_eval(Nxx_plus_2NGHOSTS,dxx,xx,RK_INPUT_GFS,aux_gfs);
rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx, RK_INPUT_GFS, aux_gfs, RK_OUTPUT_GFS);""",
post_RHS_string = """
GiRaFFE_HO_conserv_to_prims_FFE(Nxx, Nxx_plus_2NGHOSTS, dxx,xx, RK_OUTPUT_GFS, aux_gfs);
apply_bcs(Nxx, Nxx_plus_2NGHOSTS, RK_OUTPUT_GFS, aux_gfs);
driver_A_to_B(Nxx, Nxx_plus_2NGHOSTS, dxx, RK_OUTPUT_GFS, aux_gfs);
//apply_bcs_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,RK_OUTPUT_GFS,aux_gfs);\n""",
outdir = os.path.join(Ccodesdir,"MoLtimestepping/"))
```
<a id='c_headers'></a>
## Step 1.b: Use NRPy+ to generate C headers \[Back to [top](#toc)\]
$$\label{c_headers}$$
We will also need to output C header files that are related to the numerical grids and the coordinate system we set up using the reference metric. These end up as very simple files in Cartesian, but they will be more complex in other coordinate systems.
```
#################
# Next output C headers related to the numerical grids we just set up:
#################
# First output the coordinate bounds xxmin[] and xxmax[]:
with open(os.path.join(Ccodesdir,"xxminmax.h"), "w") as file:
file.write("const REAL xxmin[3] = {"+str(rfm.xxmin[0])+","+str(rfm.xxmin[1])+","+str(rfm.xxmin[2])+"};\n")
file.write("const REAL xxmax[3] = {"+str(rfm.xxmax[0])+","+str(rfm.xxmax[1])+","+str(rfm.xxmax[2])+"};\n")
# Next output the proper distance between gridpoints in given coordinate system.
# This is used to find the minimum timestep.
dxx = ixp.declarerank1("dxx",DIM=3)
ds_dirn = rfm.ds_dirn(dxx)
outputC([ds_dirn[0],ds_dirn[1],ds_dirn[2]],["ds_dirn0","ds_dirn1","ds_dirn2"],os.path.join(Ccodesdir,"ds_dirn.h"))
# Generic coordinate NRPy+ file output, Part 2: output the conversion from (x0,x1,x2) to Cartesian (x,y,z)
outputC([rfm.xx_to_Cart[0],rfm.xx_to_Cart[1],rfm.xx_to_Cart[2]],["xCart[0]","xCart[1]","xCart[2]"],
os.path.join(Ccodesdir,"xx_to_Cart.h"))
```
<a id='copy_c'></a>
## Step 1.c: Copy code over from the GiRaFFE C code library \[Back to [top](#toc)\]
$$\label{copy_c}$$
There are some important C codes that we have already written that are stored elsewhere. We will now copy them to our working directory. More detail about these codes can be found here:
* [Tutorial-GiRaFFE_HO_C_code_library-A2B.ipynb](Tutorial-GiRaFFE_HO_C_code_library-A2B.ipynb)
* [Tutorial-GiRaFFE_HO_C_code_library-C2P_P2C.ipynb](Tutorial-GiRaFFE_HO_C_code_library-C2P_P2C.ipynb)
* [Tutorial-GiRaFFE_HO_C_code_library-BCs.ipynb](Tutorial-GiRaFFE_HO_C_code_library-BCs.ipynb)
```
# First, let's make sure that the directories to which we want to copy files exist.
# The cmdline_helper function mkdir will do this regardless of OS
cmd.mkdir(os.path.join(Ccodesdir,"boundary_conditions/"))
cmd.mkdir(os.path.join(Ccodesdir,"A2B/"))
# Now, we'll start copying files using shutil.copy(src,dst)
shutil.copy(os.path.join("GiRaFFE_HO/GiRaFFE_Ccode_library/A2B/driver_AtoB.c"),os.path.join(Ccodesdir,"A2B/"))
shutil.copy(os.path.join("GiRaFFE_HO/GiRaFFE_Ccode_library/driver_conserv_to_prims_FFE.C"),os.path.join(Ccodesdir))
shutil.copy(os.path.join("GiRaFFE_HO/GiRaFFE_Ccode_library/compute_conservatives_FFE.C"),os.path.join(Ccodesdir))
shutil.copy(os.path.join("GiRaFFE_HO/GiRaFFE_Ccode_library/boundary_conditions/GiRaFFE_boundary_conditions.h"),os.path.join(Ccodesdir,"boundary_conditions/"))
```
<a id='initial_data'></a>
## Step 1.d: Import Alfvén Wave initial data C function
$$\label{initial_data}$$
The [GiRaFFEfood_HO.GiRaFFEfood_HO_1D_tests.py](../edit/GiRaFFEfood_HO/GiRaFFEfood_HO_AlignedRotator.py) NRPy+ module does the following:
1. Set up Alfvén Wave initial data quantities in the **Cartesian basis**, as [documented here](Tutorial-GiRaFFEfood_HO_1D_tests.ipynb).
```
import GiRaFFEfood_HO.GiRaFFEfood_HO_1D_tests as gf1D
gf1D.GiRaFFEfood_HO_1D_tests()
# Step 2: Create the C code output kernel.
#BU = ixp.register_gridfunctions_for_single_rank1("AUX","BU")
GiRaFFEfood_A_v_to_print_left = [\
lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gf1D.AleftD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gf1D.AleftD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gf1D.AleftD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU0"),rhs=gf1D.ValenciavleftU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU1"),rhs=gf1D.ValenciavleftU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU2"),rhs=gf1D.ValenciavleftU[2]),\
]
GiRaFFEfood_A_v_to_print_center = [\
lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gf1D.AcenterD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gf1D.AcenterD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gf1D.AcenterD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU0"),rhs=gf1D.ValenciavcenterU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU1"),rhs=gf1D.ValenciavcenterU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU2"),rhs=gf1D.ValenciavcenterU[2]),\
]
GiRaFFEfood_A_v_to_print_right = [\
lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gf1D.ArightD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gf1D.ArightD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gf1D.ArightD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU0"),rhs=gf1D.ValenciavrightU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU1"),rhs=gf1D.ValenciavrightU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU2"),rhs=gf1D.ValenciavrightU[2]),\
]
GiRaFFEfood_A_v_CKernel_left = fin.FD_outputC("returnstring",GiRaFFEfood_A_v_to_print_left, params="outCverbose=False")
GiRaFFEfood_A_v_CKernel_center = fin.FD_outputC("returnstring",GiRaFFEfood_A_v_to_print_center, params="outCverbose=False")
GiRaFFEfood_A_v_CKernel_right = fin.FD_outputC("returnstring",GiRaFFEfood_A_v_to_print_right, params="outCverbose=False")
# Step 4: Write the C code kernel to file.
with open(os.path.join(Ccodesdir,"GiRaFFEfood_A_v_1D_tests_left.h"), "w") as file:
file.write(str(GiRaFFEfood_A_v_CKernel_left).replace("IDX4","IDX4S"))
with open(os.path.join(Ccodesdir,"GiRaFFEfood_A_v_1D_tests_center.h"), "w") as file:
file.write(str(GiRaFFEfood_A_v_CKernel_center).replace("IDX4","IDX4S"))
with open(os.path.join(Ccodesdir,"GiRaFFEfood_A_v_1D_tests_right.h"), "w") as file:
file.write(str(GiRaFFEfood_A_v_CKernel_right).replace("IDX4","IDX4S"))
# We will also need to declare some C parameters for this initial data
lbound,rbound = par.Cparameters("REAL","GiRaFFEfood_HO_1D",["lbound","rbound"], [-0.1,0.1])
```
<a id='poynting'></a>
## Step 1.e: Import densitized Poynting flux initial data conversion C function
$$\label{poynting}$$
The [GiRaFFEfood_HO.GiRaFFEfood_HO.py](../edit/GiRaFFEfood_HO/GiRaFFEfood_HO.py) NRPy+ module does the following:
1. Set up Exact Wald initial data quantities in the **Cartesian basis**, as [documented here](Tutorial-GiRaFFEfood_HO_Aligned_Rotator.ipynb).
2. Convert initial magnetic fields and Valencia 3-velocities into densitized Poynting flux initial data.
We only use the second functionality here (for now).
```
# Step 2: Create the C code output kernel.
gri.glb_gridfcs_list = []
import GiRaFFEfood_HO.GiRaFFEfood_HO_Exact_Wald as gfho
gfho.GiRaFFEfood_HO_ID_converter()
# To best format this for the ETK, we'll need to register this gridfunction.
StildeD = ixp.register_gridfunctions_for_single_rank1("EVOL","StildeD")
GiRaFFE_S_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","StildeD0"),rhs=gfho.StildeD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","StildeD1"),rhs=gfho.StildeD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","StildeD2"),rhs=gfho.StildeD[2]),\
]
GiRaFFE_S_CKernel = fin.FD_outputC("returnstring",GiRaFFE_S_to_print,params="outCverbose=False")
# Format the code within a C loop over cctkGH
#GiRaFFE_S_looped = lp.loop(["i2","i1","i0"],["0","0","0"],
# ["Nxx_plus_2NGHOSTS[2]","Nxx_plus_2NGHOSTS[1]","Nxx_plus_2NGHOSTS[0]"],\
# ["1","1","1"],["#pragma omp parallel for","",""],"",\
# GiRaFFE_S_CKernel.replace("time","cctk_time"))
# Step 4: Write the C code kernel to file.
with open(os.path.join(Ccodesdir,"GiRaFFEfood_HO_Stilde.h"), "w") as file:
file.write(str(GiRaFFE_S_CKernel))
```
<a id='rhs'></a>
## Step 1.f: Output GiRaFFE RHS expressions
$$\label{rhs}$$
```
gri.glb_gridfcs_list = [] # This is necessary because, since this was originally designed as two ETK thorns,
# some gridfunctions are registered twice.
import GiRaFFE_HO.GiRaFFE_Higher_Order_v2 as gho
gho.GiRaFFE_Higher_Order_v2()
# Declaring StildeD as a gridfunction is unnecessary in GiRaFFE_HO. While it was declared in GiRaFFEfood_HO,
# those have since been cleared to avoid conflict; so, we re-declare it here.
StildeD = ixp.register_gridfunctions_for_single_rank1("EVOL","StildeD")
# Create the C code output kernel.
# Here, "Prereqs" refers to quantities that must be finite-difference to construct the RHSs.
Prereqs_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","AevolParen"),rhs=gho.AevolParen),\
lhrh(lhs=gri.gfaccess("out_gfs","PevolParenU0"),rhs=gho.PevolParenU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","PevolParenU1"),rhs=gho.PevolParenU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","PevolParenU2"),rhs=gho.PevolParenU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD00"),rhs=gho.SevolParenUD[0][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD01"),rhs=gho.SevolParenUD[0][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD02"),rhs=gho.SevolParenUD[0][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD10"),rhs=gho.SevolParenUD[1][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD11"),rhs=gho.SevolParenUD[1][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD12"),rhs=gho.SevolParenUD[1][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD20"),rhs=gho.SevolParenUD[2][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD21"),rhs=gho.SevolParenUD[2][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","SevolParenUD22"),rhs=gho.SevolParenUD[2][2]),\
]
metric_quantities_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU00"),rhs=gho.gammaUU[0][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU01"),rhs=gho.gammaUU[0][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU02"),rhs=gho.gammaUU[0][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU11"),rhs=gho.gammaUU[1][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU12"),rhs=gho.gammaUU[1][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU22"),rhs=gho.gammaUU[2][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammadet"),rhs=gho.gammadet),\
]
# Now, we'll add the Kreiss-Oliger dissipation terms to the RHS of StildeD
# thismodule = __name__
# diss_strength = par.Cparameters("REAL", thismodule, "diss_strength", 1e300) # diss_strength must be set in C, and
# # we set it crazy high to ensure this.
# StildeD_dKOD = ixp.declarerank2("StildeD_dKOD","nosym")
# for k in range(DIM):
# for i in range(DIM):
# gho.Stilde_rhsD[i] += diss_strength * StildeD_dKOD[i][k]
# To best format this for the ETK, we'll need to register these gridfunctions.
#Stilde_rhsD = ixp.register_gridfunctions_for_single_rank1("AUX","Stilde_rhsD")
#A_rhsD = ixp.register_gridfunctions_for_single_rank1("AUX","A_rhsD")
#psi6Phi_rhs = gri.register_gridfunctions("AUX","psi6Phi_rhs")
Conservs_to_print = [\
lhrh(lhs=gri.gfaccess("rhs_gfs","StildeD0"),rhs=gho.Stilde_rhsD[0]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","StildeD1"),rhs=gho.Stilde_rhsD[1]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","StildeD2"),rhs=gho.Stilde_rhsD[2]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD0"),rhs=gho.A_rhsD[0]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD1"),rhs=gho.A_rhsD[1]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD2"),rhs=gho.A_rhsD[2]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","psi6Phi"),rhs=gho.psi6Phi_rhs),\
]
import time
print("Generating C code for GiRaFFE RHSs in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.")
start = time.time()
desc="Evaluate quantities to FD for RHSs"
name="quantities_to_FD_for_rhs_eval"
outCfunction(
outfile = os.path.join(Ccodesdir,"Prereqs.h"), desc=desc, name=name,
params = """const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], REAL *xx[3],
const REAL *in_gfs, REAL *aux_gfs""",
body = fin.FD_outputC("returnstring",Prereqs_to_print,params="outCverbose=False"),
loopopts = "AllPoints,Enable_rfm_precompute")
desc="Calculate the metric determinant and inverse"
name="update_metric_det_inverse"
outCfunction(
outfile = os.path.join(Ccodesdir,"metric_quantities.h"), desc=desc, name=name,
params = """const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3],REAL *xx[3],REAL *aux_gfs""",
body = fin.FD_outputC("returnstring",metric_quantities_to_print,params="outCverbose=False"),
loopopts = "AllPoints,Enable_rfm_precompute")
desc="Evaluate the RHSs"
name="rhs_eval"
outCfunction(
outfile = os.path.join(Ccodesdir,"Conservs.h"), desc=desc, name=name,
params = """rfm_struct *restrict rfmstruct,const paramstruct *restrict params,
REAL *restrict in_gfs, REAL *restrict aux_gfs""",
preloop = """REAL invdx0 = 1.0/dxx[0];
REAL invdx1 = 1.0/dxx[1];
REAL invdx2 = 1.0/dxx[2];""",
body = fin.FD_outputC("returnstring",Conservs_to_print,params="outCverbose=False"),
loopopts = "InteriorPoints,Enable_rfm_precompute",
postloop = """LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],0,Nxx_plus_2NGHOSTS[1],0,Nxx_plus_2NGHOSTS[2]){
if (sqrt(sq_radial_coord(xx[0][i0],xx[1][i1],xx[2][i2])) < min_radius_inside_of_which_conserv_to_prims_FFE_and_FFE_evolution_is_DISABLED){
idx = IDX3(i0,i1,i2);
rhs_gfs[IDX4pt(STILDED0GF, idx)] = 0.0;
rhs_gfs[IDX4pt(STILDED1GF, idx)] = 0.0;
rhs_gfs[IDX4pt(STILDED2GF, idx)] = 0.0;
rhs_gfs[IDX4pt(AD0GF, idx)] = 0.0;
rhs_gfs[IDX4pt(AD1GF, idx)] = 0.0;
rhs_gfs[IDX4pt(AD2GF, idx)] = 0.0;
rhs_gfs[IDX4pt(PSI6PHIGF, idx)] = 0.0;
}
}""")
end = time.time()
# Step 5: Import the function to calculate u0 and write it to a file.
import u0_smallb_Poynting__Cartesian.u0_smallb_Poynting__Cartesian as u0etc
#u0etc.compute_u0_smallb_Poynting__Cartesian(gammaDD,betaU,alpha,ValenciavU,BU)
with open(os.path.join(Ccodesdir,"computeu0_Cfunction.h"), "w") as file:
file.write(u0etc.computeu0_Cfunction)
```
<a id='cparams_rfm_and_domainsize'></a>
## Step 1.g: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`
$$\label{cparams_rfm_and_domainsize}$$
```
# Step 3.d.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir))
# Step 3.d.ii: Set free_parameters.h
with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file:
file.write("""
// Step P3: Set free parameters
// Step P3a: Free parameters for the numerical grid
// Cartesian coordinates parameters
params.xmin = -4.0;params.xmax=4.0;
params.ymin = -4.0;params.ymax=4.0;
params.zmin = -4.0;params.zmax=4.0;
/*params.ymin = -0.0125;params.ymax=0.0125;
params.zmin = -0.0125;params.zmax=0.0125;*/
// Step P3b: Free parameters for the spacetime evolution
params.B_p_aligned_rotator = 1.0e-5;
params.Omega_aligned_rotator = 0.2;
// Disable these when doing 1D tests!
params.min_radius_inside_of_which_conserv_to_prims_FFE_and_FFE_evolution_is_DISABLED = -1.0; // Must be equal! v
params.R_NS_aligned_rotator = -1.0; // Must be equal! ^
params.xi = 0.1;
params.diss_strength = 0.3;
params.GAMMA_SPEED_LIMIT = 2000.0;
params.current_sheet_null_v = 0; // Boolean: 1=true,0=false
// Step P3c: Free parameters defining a 1D wave
//const REAL mu_AW = -0.5; // The wave speed of the Alfven wave
#define mu_AW -0.5
params.lbound = -0.1*sqrt(1-mu_AW*mu_AW); // The left -most edge of the wave: divide by the
params.rbound = 0.1*sqrt(1-mu_AW*mu_AW); // The right-most edge of the wave: Lorentz Factor
// Time coordinate parameters
params.t_final = 2.0; /* Final time is set so that at t=t_final,
* data at the origin have not been corrupted
* by the approximate outer boundary condition */
params.CFL_FACTOR = 0.5; // Set the CFL Factor
""")
```
## Step 4: Apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)
```
import CurviBoundaryConditions.CurviBoundaryConditions as cbcs
cmd.mkdir(os.path.join(Ccodesdir,"boundary_conditions/"))
cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"))
```
## Step 3B:
Now, we will generate the header files used to calculate the magnetic field $B^i$ from the vector potential $A_i$. We can use the function `GiRaFFE_HO_A2B()` that we wrote for this.
```
gri.glb_gridfcs_list = []
import GiRaFFE_HO.GiRaFFE_HO_A2B as A2B
A2B.GiRaFFE_HO_A2B("GiRaFFE_standalone_Ccodes/A2B/")
# Declaring StildeD as a gridfunction is unnecessary in GiRaFFE_HO. While it was declared in GiRaFFEfood_HO,
# those have since been cleared to avoid conflict; so, we re-declare it here.
StildeD = ixp.register_gridfunctions_for_single_rank1("EVOL","StildeD")
```
<a id='mainc'></a>
# Step 2: GiRaFFE_standalone.c: The Main C Code \[Back to [top](#toc)\]
$$\label{mainc}$$
```
# Part P0: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER
with open(os.path.join(Ccodesdir,"NGHOSTS.h"), "w") as file:
file.write("// Part P0: Set the number of ghost zones, from NRPy+'s FD_CENTDERIVS_ORDER\n")
# We do not need upwinding in GiRaFFE
file.write("#define NGHOSTS "+str(int(par.parval_from_str("finite_difference::FD_CENTDERIVS_ORDER")/2+1))+"\n")
```
<a id='import_headers'></a>
# Step 2.a: Import needed header files \[Back to [top](#toc)\]
$$\label{import_headers}$$
```
%%writefile $Ccodesdir/GiRaFFE_standalone.c
// Step P1: Import needed header files
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "string.h" // Needed for strncmp, etc.
#include "stdint.h" // Needed for Windows GCC 6.x compatibility
#ifndef M_PI
#define M_PI 3.141592653589793238462643383279502884L
#endif
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.707106781186547524400844362104849039L
#endif
#include "time.h"
#include "NGHOSTS.h" // A NRPy+-generated file, which is set based on FD_CENTDERIVS_ORDER.
```
<a id='data_type'></a>
# Step 2.b: Set data type \[Back to [top](#toc)\]
$$\label{data_type}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
// Step P2: Add needed #define's to set data type, the IDX4() macro, and the gridfunctions
// Step P2a: set REAL=double, so that all floating point numbers are stored to at least ~16 significant digits.
#define REAL double
```
<a id='free_params'></a>
# Step 2.c: Set free parameters \[Back to [top](#toc)\]
$$\label{free_params}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
#include "declare_Cparameters_struct.h"
```
<a id='idx4'></a>
# Step 2.d: Declare the IDX4 macro \[Back to [top](#toc)\]
$$\label{idx4}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
// Step P6: Declare the IDX4(gf,i,j,k) macro, which enables us to store 4-dimensions of
// data in a 1D array. In this case, consecutive values of "i"
// (all other indices held to a fixed value) are consecutive in memory, where
// consecutive values of "j" (fixing all other indices) are separated by
// Nxx_plus_2NGHOSTS[0] elements in memory. Similarly, consecutive values of
// "k" are separated by Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1] in memory, etc.
#define IDX4S(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
#define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) )
#define IDX3(i,j,k) ( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * (k) ) )
// Assuming idx = IDX3(i,j,k). Much faster if idx can be reused over and over:
// To be deprecated soon:
#define IDX4(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * ( (k) + Nxx_plus_2NGHOSTS[2] * (g) ) ) )
#define IDX4pt(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2]) * (g) )
```
<a id='gridfuncs'></a>
# Step 2.e: Define gridfunctions \[Back to [top](#toc)\]
$$\label{gridfuncs}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
// Step P7: Set #define's for GRFFE gridfunctions. C code generated above
#include "boundary_conditions/gridfunction_defines.h"
#define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++)
#define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \
for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++)
#include "boundary_conditions/EigenCoord_xx_to_Cart.h"
```
<a id='bcs'></a>
# Step 2.f: Boundary Conditions, the A-to-B driver, and the conservative-to-primitive solver \[Back to [top](#toc)\]
$$\label{bcs}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
// Step P8: Include basic functions needed to impose boundary conditions.
//#include "../CurviBoundaryConditions/curvilinear_parity_and_outer_boundary_conditions.h"
#include "boundary_conditions/GiRaFFE_boundary_conditions.h"
// Step P8c: Import C files for the A-to-B driver and the conservative-to-primitive solver
#include "A2B/driver_AtoB.c"
#include "driver_conserv_to_prims_FFE.C"
```
<a id='timestep'></a>
# Step 2.g: Find the CFL-constrained timestep \[Back to [top](#toc)\]
$$\label{timestep}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
// Step P9: Find the CFL-constrained timestep
REAL find_timestep(const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3],REAL *xx[3], const REAL CFL_FACTOR) {
const REAL dxx0 = dxx[0], dxx1 = dxx[1], dxx2 = dxx[2];
REAL dsmin = 1e38; // Start with a crazy high value... close to the largest number in single precision.
LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) {
const REAL xx0 = xx[0][i0], xx1 = xx[1][i1], xx2 = xx[2][i2];
REAL ds_dirn0, ds_dirn1, ds_dirn2;
#include "ds_dirn.h"
//#define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) // Provided by driver_conserv_to_prims_FFE.C
// Set dsmin = MIN(dsmin, ds_dirn0, ds_dirn1, ds_dirn2);
dsmin = MIN(dsmin,MIN(ds_dirn0,MIN(ds_dirn1,ds_dirn2)));
}
return dsmin*CFL_FACTOR;
}
```
<a id='initial_data'></a>
# Step 2.h: Declare the function for the exact solution \[Back to [top](#toc)\]
$$\label{initial_data_c}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
// Step P10: Declare the function for the exact solution. time==0 corresponds to the initial data.
void initial_data(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *out_gfs, REAL *aux_gfs) {
#pragma omp parallel for
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0], 0,Nxx_plus_2NGHOSTS[1], 0,Nxx_plus_2NGHOSTS[2]) {
const int idx = IDX3(i0,i1,i2);
aux_gfs[IDX4pt(GAMMADD00GF, idx)] = 1.0;
aux_gfs[IDX4pt(GAMMADD01GF, idx)] = 0.0;
aux_gfs[IDX4pt(GAMMADD02GF, idx)] = 0.0;
aux_gfs[IDX4pt(GAMMADD11GF, idx)] = 1.0;
aux_gfs[IDX4pt(GAMMADD12GF, idx)] = 0.0;
aux_gfs[IDX4pt(GAMMADD22GF, idx)] = 1.0;
aux_gfs[IDX4pt(BETAU0GF, idx)] = 0.0;
aux_gfs[IDX4pt(BETAU1GF, idx)] = 0.0;
aux_gfs[IDX4pt(BETAU2GF, idx)] = 0.0;
aux_gfs[IDX4pt(ALPHAGF, idx)] = 1.0;
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
if(xx0<=lbound) {
#include "GiRaFFEfood_A_v_1D_tests_left.h"
}
else if (xx0<rbound) {
#include "GiRaFFEfood_A_v_1D_tests_center.h"
}
else {
#include "GiRaFFEfood_A_v_1D_tests_right.h"
}
out_gfs[IDX4pt(PSI6PHIGF, idx)] = 0.0;
}
}
void initial_Stilde_from_ID(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],const REAL *aux_gfs, REAL *out_gfs) {
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],0,Nxx_plus_2NGHOSTS[1],0,Nxx_plus_2NGHOSTS[2]){
#include "GiRaFFEfood_HO_Stilde.h"
}
}
```
<a id='rhsC'></a>
# Step 2.i: Declare the functions to evaluate the GRFFE RHSs \[Back to [top](#toc)\]
$$\label{rhsC}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
// Step P11: Declare the functions to evaluate the GRFFE RHSs
// Step P11a: Create the function to calculate u4upperZero:
void calc_u0(const int Nxx_plus_2NGHOSTS[3],REAL *aux_gfs)
{
int idx;
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],0,Nxx_plus_2NGHOSTS[1],0,Nxx_plus_2NGHOSTS[2]){
idx = IDX3(i0,i1,i2);
REAL u0;
REAL ValenciavU0 = aux_gfs[IDX4pt(VALENCIAVU0GF,idx)];
REAL ValenciavU1 = aux_gfs[IDX4pt(VALENCIAVU1GF,idx)];
REAL ValenciavU2 = aux_gfs[IDX4pt(VALENCIAVU2GF,idx)];
REAL alpha = aux_gfs[IDX4pt(ALPHAGF,idx)];
REAL gammaDD00 = aux_gfs[IDX4pt(GAMMADD00GF,idx)];
REAL gammaDD01 = aux_gfs[IDX4pt(GAMMADD01GF,idx)];
REAL gammaDD02 = aux_gfs[IDX4pt(GAMMADD02GF,idx)];
REAL gammaDD11 = aux_gfs[IDX4pt(GAMMADD11GF,idx)];
REAL gammaDD12 = aux_gfs[IDX4pt(GAMMADD12GF,idx)];
REAL gammaDD22 = aux_gfs[IDX4pt(GAMMADD22GF,idx)];
#include "computeu0_Cfunction.h"
aux_gfs[IDX4pt(U4UPPERZEROGF,idx)] = u0;
aux_gfs[IDX4pt(VALENCIAVU0GF,idx)] = ValenciavU0;
aux_gfs[IDX4pt(VALENCIAVU1GF,idx)] = ValenciavU1;
aux_gfs[IDX4pt(VALENCIAVU2GF,idx)] = ValenciavU2;
}
}
// Step P11b: Set the quantities to be differentiated by finite difference for the RHSs--ALWAYS run immediately
// before rhs_eval()
#include "Prereqs.h"
// While this code is generally cartesian, we will need an r coordinate for the evolution:
REAL sq_radial_coord(const REAL x,const REAL y,const REAL z) { return x*x+y*y+z*z; }
// Step P11c: Set the RHSs themselves.
#include "Conservs.h"
```
<a id='main'></a>
# Step 2.j: The `main()` function \[Back to [top](#toc)\]
$$\label{main}$$
```
%%writefile -a $Ccodesdir/GiRaFFE_standalone.c
// main() function:
// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates
// Step 1: Set up scalar wave initial data
// Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm,
// applying quadratic extrapolation outer boundary conditions.
// Step 3: Output relative error between numerical and exact solution.
// Step 4: Free all allocated memory
int main(int argc, const char *argv[]) {
// Step 0a: Read command-line input, error out if nonconformant
if((argc != 4 && argc != 5) || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < 2 /* FIXME; allow for axisymmetric sims */) {
fprintf(stderr,"Error: Expected three command-line arguments: ./BrillLindquist_Playground Nx0 Nx1 Nx2,\n");
fprintf(stderr,"where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n");
fprintf(stderr,"Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS);
exit(1);
}
if(argc == 5) {
CFL_FACTOR = strtod(argv[4],NULL);
if(CFL_FACTOR > 0.5 && atoi(argv[3])!=2) {
fprintf(stderr,"WARNING: CFL_FACTOR was set to %e, which is > 0.5.\n",CFL_FACTOR);
fprintf(stderr," This will generally only be stable if the simulation is purely axisymmetric\n");
fprintf(stderr," However, Nx2 was set to %d>2, which implies a non-axisymmetric simulation\n",atoi(argv[3]));
}
}
// Step 0b: Set up numerical grid structure, first in space...
const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) };
if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) {
fprintf(stderr,"Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n");
fprintf(stderr," For example, in case of angular directions, proper symmetry zones will not exist.\n");
exit(1);
}
const int Nxx_plus_2NGHOSTS[3] = { Nxx[0]+2*NGHOSTS, Nxx[1]+2*NGHOSTS, Nxx[2]+2*NGHOSTS };
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2];
#include "xxminmax.h"
// Step 0c: Allocate memory for gridfunctions
REAL *aux_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUX_GFS * Nxx_plus_2NGHOSTS_tot);
#include "../MoLtimestepping/RK_Allocate_Memory.h"
for(int i=0;i<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;i++) {
y_n_gfs[i] = 1.0/0.0;
//k_even_gfs[i] = 1.0/0.0;
//k_odd_gfs[i] = 1.0/0.0;
}
for(int i=0;i<NUM_AUX_GFS * Nxx_plus_2NGHOSTS_tot;i++) {
aux_gfs[i] = 1.0/0.0;
}
REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *aux_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUX_GFS * Nxx_plus_2NGHOSTS_tot);
// Step 0c: Set free parameters, overwriting Cparameters defaults
// by hand or with command-line input, as desired.
#include "free_parameters.h"
#include "set_Cparameters-nopointer.h"
// Step 0d: Set up space and time coordinates
// Step 0d.i: Set \Delta x^i on uniform grids.
REAL dxx[3];
for(int i=0;i<3;i++) dxx[i] = (xxmax[i] - xxmin[i]) / ((REAL)Nxx[i]);
for(int i=0;i<3;i++) printf("dxx[%d] = %.15e\n",i,dxx[i]);
// Step 0d.ii: Set up uniform coordinate grids
REAL *xx[3];
for(int i=0;i<3;i++) {
xx[i] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS[i]);
for(int j=0;j<Nxx_plus_2NGHOSTS[i];j++) {
xx[i][j] = xxmin[i] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*dxx[i]; // Cell-centered grid.
}
}
// Step 0d.iii: Set timestep based on smallest proper distance between gridpoints and CFL factor
REAL dt = find_timestep(Nxx_plus_2NGHOSTS, dxx,xx, CFL_FACTOR);
printf("# Timestep set to = %e\n",(double)dt);
int N_final = (int)(t_final / dt + 0.5); // The number of iterations in time.
//Add 0.5 to account for C rounding down integers.
// Step 1: Set up initial data to an exact solution at time=0:
// Step 1a: Set up the exact initial data:
initial_data(Nxx_plus_2NGHOSTS, xx, y_n_gfs, aux_gfs);
// Step 1b: Run the initial A-to-B driver:
driver_A_to_B(Nxx, Nxx_plus_2NGHOSTS, dxx, y_n_gfs, aux_gfs);
// Step 1c: Solve for StildeD from BU and ValenciavU
initial_Stilde_from_ID(Nxx_plus_2NGHOSTS, xx, aux_gfs, y_n_gfs);
// Step 1d: Apply boundary conditions, as initial data
// are sometimes ill-defined in ghost zones.
// E.g., spherical initial data might not be
// properly defined at points where r=-1.
// Step 1e: Run the conservative-to-primitive solver:
GiRaFFE_HO_conserv_to_prims_FFE(Nxx, Nxx_plus_2NGHOSTS, dxx,xx, y_n_gfs, aux_gfs);
//apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions,NUM_EVOL_GFS,evol_gf_parity, y_n_gfs);
apply_bcs(Nxx, Nxx_plus_2NGHOSTS, y_n_gfs, aux_gfs);
// Rerun AtoB for consistency:
driver_A_to_B(Nxx, Nxx_plus_2NGHOSTS, dxx, y_n_gfs, aux_gfs);
// Step 3: Start the timer, for keeping track of how fast the simulation is progressing.
struct timespec start, end;
clock_gettime(CLOCK_REALTIME, &start);
// Step 4: Integrate the initial data forward in time using the Method of Lines and RK4
for(int n=0;n<=N_final;n++) { // Main loop to progress forward in time.
/* Step 5: Output 2D data file, for visualization. Do this first to get initial data. */
// For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution.
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],0,1,0,1) {
xx[0][i0] += -mu_AW*(n)*dt;
}
// Recalculate the initial data on the shifted grid, using the same process as before for consistency.
initial_data(Nxx_plus_2NGHOSTS, xx, evol_gfs_exact, aux_gfs_exact);
driver_A_to_B(Nxx, Nxx_plus_2NGHOSTS, dxx, evol_gfs_exact, aux_gfs_exact);
initial_Stilde_from_ID(Nxx_plus_2NGHOSTS, xx, aux_gfs_exact, evol_gfs_exact);
GiRaFFE_HO_conserv_to_prims_FFE(Nxx, Nxx_plus_2NGHOSTS, dxx,xx, evol_gfs_exact, aux_gfs_exact);
driver_A_to_B(Nxx, Nxx_plus_2NGHOSTS, dxx, evol_gfs_exact, aux_gfs_exact);
apply_bcs(Nxx, Nxx_plus_2NGHOSTS, evol_gfs_exact, aux_gfs_exact);
// And now, we'll set the grid back to rights.
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],0,1,0,1) {
xx[0][i0] -= -mu_AW*(n)*dt;
}
if(n%10 == 0) {
printf("Writing output...\n");
// const int i1mid = Nxx_plus_2NGHOSTS[1]/2;
char filename[100];
sprintf(filename,"out%d-%08d_numer.txt",Nxx[0],n);
FILE *out2D_numer = fopen(filename, "w");
//LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) {
//LOOP_REGION(0,Nxx_plus_2NGHOSTS[0], Nxx_plus_2NGHOSTS[1]/2,Nxx_plus_2NGHOSTS[1]/2+1,Nxx_plus_2NGHOSTS[2]/2,Nxx_plus_2NGHOSTS[2]/2+1) {
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],Nxx_plus_2NGHOSTS[1]/2,Nxx_plus_2NGHOSTS[1]/2+1,Nxx_plus_2NGHOSTS[2]/2,Nxx_plus_2NGHOSTS[2]/2+1) {
const int idx = IDX3(i0,i1,i2);
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
REAL xCart[3];
#include "xx_to_Cart.h"
fprintf(out2D_numer,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n",
xCart[0],xCart[1],xCart[2],
aux_gfs[IDX4pt(BU0GF,idx)],aux_gfs[IDX4pt(BU1GF,idx)],aux_gfs[IDX4pt(BU2GF,idx)],
y_n_gfs[IDX4pt(AD0GF,idx)],y_n_gfs[IDX4pt(AD1GF,idx)],y_n_gfs[IDX4pt(AD2GF,idx)],
y_n_gfs[IDX4pt(STILDED0GF,idx)],y_n_gfs[IDX4pt(STILDED1GF,idx)],y_n_gfs[IDX4pt(STILDED2GF,idx)],
aux_gfs[IDX4pt(VALENCIAVU0GF,idx)],aux_gfs[IDX4pt(VALENCIAVU1GF,idx)],aux_gfs[IDX4pt(VALENCIAVU2GF,idx)]);
}
fclose(out2D_numer);
// Now rerun the same output code we used in the main simulation.
printf("Writing EXACT output...\n");
sprintf(filename,"out%d-%08d_exact.txt",Nxx[0],n);
FILE *out2D_exact = fopen(filename, "w");
//LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) {
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0], Nxx_plus_2NGHOSTS[1]/2,Nxx_plus_2NGHOSTS[1]/2+1,Nxx_plus_2NGHOSTS[2]/2,Nxx_plus_2NGHOSTS[2]/2+1) {
const int idx = IDX3(i0,i1,i2);
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
REAL xCart[3];
#include "xx_to_Cart.h"
fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n",
xCart[0],xCart[1],xCart[2],
aux_gfs_exact[IDX4pt(BU0GF,idx)],aux_gfs_exact[IDX4pt(BU1GF,idx)],aux_gfs_exact[IDX4pt(BU2GF,idx)],
evol_gfs_exact[IDX4pt(AD0GF,idx)],evol_gfs_exact[IDX4pt(AD1GF,idx)],evol_gfs_exact[IDX4pt(AD2GF,idx)],
evol_gfs_exact[IDX4pt(STILDED0GF,idx)],evol_gfs_exact[IDX4pt(STILDED1GF,idx)],evol_gfs_exact[IDX4pt(STILDED2GF,idx)],
aux_gfs_exact[IDX4pt(VALENCIAVU0GF,idx)],aux_gfs_exact[IDX4pt(VALENCIAVU1GF,idx)],aux_gfs_exact[IDX4pt(VALENCIAVU2GF,idx)]);
}
fclose(out2D_exact);
}
// Step 6: Step forward one timestep (t -> t+dt) in time using
// chosen RK-like MoL timestepping algorithm
#include "../MoLtimestepping/RK_MoL.h"
for(int gf=0;gf<NUM_EVOL_GFS;gf++) {
LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS,NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS,NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS){
if(isnan(y_n_gfs[IDX4(gf,i0,i1,i2)])) {
printf("ERROR, FOUND A NAN ON GF %d AT POINT %d %d %d\n",gf,i0,i1,i2);
exit(1);
}
}
}
// This function will now write the Exact solution for StildeD to the boundaries.
//apply_bcs_EXACT_StildeD(Nxx, Nxx_plus_2NGHOSTS, xx,y_n_gfs,evol_gfs_exact);
// Progress indicator printing to stdout
// Measure average time per iteration
clock_gettime(CLOCK_REALTIME, &end);
const long long unsigned int time_in_ns = 1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
const REAL s_per_iteration_avg = ((REAL)time_in_ns / (REAL)n) / 1.0e9;
const int iterations_remaining = N_final - n;
const REAL time_remaining_in_mins = s_per_iteration_avg * (REAL)iterations_remaining / 60.0;
const REAL num_RHS_pt_evals = (REAL)(Nxx[0]*Nxx[1]*Nxx[2]) * 4.0 * (REAL)n; // 4 RHS evals per gridpoint for RK4
const REAL RHS_pt_evals_per_sec = num_RHS_pt_evals / ((REAL)time_in_ns / 1.0e9);
// Progress indicator printing to stderr
fprintf(stderr,"%c[2K", 27); // Clear the line
fprintf(stderr,"It: %d t=%.2f | %.1f%%; ETA %.0f s | t/h %.2f | gp/s %.2e\r", // \r is carriage return, move cursor to the beginning of the line
n, n * (double)dt, (double)(100.0 * (REAL)n / (REAL)N_final),
(double)time_remaining_in_mins*60, (double)(dt * 3600.0 / s_per_iteration_avg), (double)RHS_pt_evals_per_sec);
fflush(stderr); // Flush the stderr buffer
} // End main loop to progress forward in time.
fprintf(stderr,"\n"); // Clear the line.
/* Step 6: Free all allocated memory */
#include "../MoLtimestepping/RK_Free_Memory.h"
free(aux_gfs);
free(aux_gfs_exact);
free(evol_gfs_exact);
for(int i=0;i<3;i++) free(xx[i]);
return 0;
}
```
Now that the C code is put together, we will compile and run the code.
```
import cmdline_helper as cmd
print("Now compiling, should take ~2 seconds...\n")
start = time.time()
cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_standalone.c"), "GiRaFFE_standalone_1D")
# Switching back so I can use a nan checker
#!gcc -g -O2 -fopenmp GiRaFFE_standalone/GiRaFFE_standalone.c -o GiRaFFE_standalone_1D -lm
end = time.time()
print("Finished in "+str(end-start)+" seconds.\n\n")
cmd.delete_existing_files("out128*.txt")
cmd.delete_existing_files("out2560*.txt")
print("Now running at low resolution. Should take ~20 seconds...\n")
start = time.time()
#cmd.Execute("GiRaFFE_standalone_1D", "1280 2 2 0.5")
# Switching back to see more output
!taskset -c 0,1 ./GiRaFFE_standalone_1D 1280 2 2 0.5
end = time.time()
print("Finished in "+str(end-start)+" seconds.\n\n")
print("Now running at medium resolution. Should take ~300 seconds...\n")
start = time.time()
#cmd.Execute("GiRaFFE_standalone_1D", "2560 8 8 0.5")
end = time.time()
print("Finished in "+str(end-start)+" seconds.\n\n")
```
<a id='convergence'></a>
# Step 3: Code validation: Verify that relative error in numerical solution converges to zero at the expected order \[Back to [top](#toc)\]
$$\label{convergence}$$
Now, we will load the data generated by the simulation and plot it in order to test for convergence.
```
import numpy as np
import matplotlib.pyplot as plt
Data_numer = np.loadtxt("out256-00000010_numer.txt")
#Data_oldbc = np.loadtxt("oldbc_out1280-00000030_numer.txt")
#Data_num_2 = np.loadtxt("out2560-00000600_numer.txt")
Data_exact = np.loadtxt("out256-00000010_exact.txt")
#Data_exa_2 = np.loadtxt("out2560-00000600_exact.txt")
#path = "/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave/"
#Data_oldG = np.loadtxt(path + "giraffe-grmhd_primitives_bi.x.asc")
#n=32
#Data_oldG_atT = Data_oldG[1285*n:1285*(n+1)-1,:]
#predicted_order = 4.0
column = 5
plt.figure()
#plt.plot(Data_numer[:,0],Data_numer[:,column]-Data_oldbc[:,column],'.')
#plt.plot(Data_num_2[:,0],(Data_num_2[:,column]-Data_exa_2[:,column])*(2**predicted_order),'.')
plt.plot(Data_numer[:,0],Data_numer[:,column],'.')
plt.plot(Data_exact[:,0],Data_exact[:,column],'.')
#plt.plot(Data_oldG_atT[1:-1,9],)
#plt.plot(Data_numer[1:-2,1],(Data_numer[0:-3,column]-Data_numer[2:-1,column])/3.125e-3,'.-')
#plt.plot(Data_numer[1:-2,1],(Data_oldbc[0:-3,column]-Data_oldbc[2:-1,column])/3.125e-3,'.')
#plt.plot(Data_numer[0,0],Data_numer[0,column],'o')
#plt.xlim(-0.15,0.15)
#plt.ylim(-0.2e-10,0.2e-10)
plt.xlabel("y")
plt.ylabel("BU2")
plt.show()
```
This code will create an animation of the wave over time to hopefully show us where things go wrong.
```
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
from IPython.display import HTML
import matplotlib.image as mgimg
import glob
import sys
from matplotlib import animation
globby = glob.glob('out1280-00*.txt')
file_list = []
for x in sorted(globby):
file_list.append(x)
number_of_files = len(file_list)/2
for timestep in range(number_of_files):
fig = plt.figure()
numer_filename = file_list[2*timestep]
exact_filename = file_list[2*timestep+1]
Numer = np.loadtxt(numer_filename)
Exact = np.loadtxt(exact_filename)
plt.title("Alfven Wave")
plt.xlabel("x")
plt.ylabel("BU2")
plt.xlim(-4.0,4.0)
plt.ylim(1.14,1.57)
plt.plot(Numer[:,0],Numer[:,5])
plt.plot(Exact[:,0],Exact[:,5],'.')
savefig(numer_filename+".png",dpi=150)
plt.close(fig)
sys.stdout.write("%c[2K" % 27)
sys.stdout.write("Processing file "+numer_filename+"\r")
sys.stdout.flush()
## VISUALIZATION ANIMATION, PART 2: Combine PNGs to generate movie ##
# https://stackoverflow.com/questions/14908576/how-to-remove-frame-from-matplotlib-pyplot-figure-vs-matplotlib-figure-frame
# https://stackoverflow.com/questions/23176161/animating-pngs-in-matplotlib-using-artistanimation
!rm -f GiRaFFE_HO-1D_tests.mp4
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
myimages = []
for i in range(len(file_list)/2):
img = mgimg.imread(file_list[2*i]+".png")
imgplot = plt.imshow(img)
myimages.append([imgplot])
ani = animation.ArtistAnimation(fig, myimages, interval=100, repeat_delay=1000)
plt.close()
ani.save('GiRaFFE_HO-1D_tests.mp4', fps=5,dpi=150)
%%HTML
<video width="480" height="360" controls>
<source src="GiRaFFE_HO-1D_tests.mp4" type="video/mp4">
</video>
```
<a id='latex_pdf_output'></a>
# Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-Start_to_Finish-GiRaFFE_HO-1D_tests.pdf](Tutorial-Start_to_Finish-GiRaFFE_HO-1D_tests.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-Start_to_Finish-GiRaFFE_HO-1D_tests.ipynb
!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-GiRaFFE_HO-1D_tests.tex
!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-GiRaFFE_HO-1D_tests.tex
!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-GiRaFFE_HO-1D_tests.tex
!rm -f Tut*.out Tut*.aux Tut*.log
```
| github_jupyter |
# **Multiple Sequence Alignment Workflow**
In this notebook the actual multiple sequence alignment (MSA) analysis work flow is explained.
## **Goals**
1. To conduct a MSA on the combined African Insecta data sets listed below
>1. **enafroCOI_Under500_data.fasta: 6,715 sequences**
>2. **enafroCOI_Over700_data.fasta: 1,607 sequences**
>3. **enafroCOI_650to660_data.fasta: 99,698 sequences**
>4. **enafroCOI_500to700_data-650to660.fasta: 85,157 sequences**
2. For the above listed data sets; conduct a rigorous subsetting and alignment until a good quality sequence alignment is generated.
3. Extract only sequences from these alignments that fit the right loci and length of the 658 5' cytochrome c oxidase subunit 1 gene; must extend to both ends of the gene, with or without gaps within the sequnce; Only a maximum of ten gaps, '-', and ten unidentified nucleotides, 'N', are allowed in the terminals, 3' or 5', of the sequnce.
4. Conduct the final alignment of combined cleaned up data set from all the four sets of data above in one set.
## **Tasks**
1. Perform a MSA on eafroCOI_650to660_data.fasta (*24,475 sequences*), East African insect sequences, and vizualise in SeaView to assess the quality of alignment. This alignment will be used to define the 3' and 5' ends of the COI-5P sequences. It is on these basis that trimming will be done and comparison to other data sets will be done.
Refine the alignment if necessesary or use fewer sequences but but many enough to be accurately representative of all possible COI-5P lengths.
2. Conduct MSA on enafroCOI_Under500_data.fasta and refine when needed to acquire a good quality alignment. Compare the alignment to the reference eafroCOI_650to660_data.fasta alignment and trim at the determined 3' and 5' positions. Take the output and delete sequences that have in excess of ten end gaps "-" in the aligned nucleotide blocks/columns, excluding largely gappy columns from the counting of the ten gaps. save the output.
3. carry out task 2 above on enafroCOI_Over700_data.fasta
4. carry out task 2 on enafroCOI_650to660_data.fasta
5. Then finally on enafroCOI_500to700_data-650to660.fasta: There are a lot of sequences and possibly a lot of impurities in this set. Will require a lot of subsetting and iterations.
6. Concatate all the outputs from 2, 3, 4, and 5; then align them again and refune the alignment further.
### **1. MSA alignment on eafroCOI_650to660_data.fasta (24,475 sequences)**
### **2. MSA alignment on enafroCOI_Under500_data.fasta (6,715 sequences)**
### **3. MSA alignment on enafroCOI_Over700_data.fasta (1,607 sequences)**
### **4. MSA alignment on enafroCOI_650to660_data.fasta (99,698 sequences)**
### **5. MSA alignment on enafroCOI_500to700_data-650to660.fasta (85,157 sequences)**
### **6. Concatate all the outputs from 2, 3, 4, and 5;** then align them again and refune the alignment further.
#### **6.1.**
#### **6._**
```
%%bash
cd ../data/output/alignment/pasta_output/aligned/
wc -l enafroCOI_all_clean*
%%bash
cd ../data/output/alignment/pasta_output/aligned/
source ../../../../../code/process_all_input_files.sh
delete_shortseqs_N enafroCOI_all_clean.aln << EOF
10
10
EOF
```
Visualise the output file "enafroCOI_all_clean_sN10-eN10.aln" in Seaview and remove gap sites only, possibly created by the removal of some sequences in the step above. Save the output file.
```
%%bash
cd ../data/output/alignment/pasta_output/aligned/
seaview enafroCOI_all_clean_sN10-eN10.aln
```
Delete Outgroups (Crustacea, Arachnida and Chilopoda), they were initially included in the alignment of enafroCOI_all_clean_sN10-eN10.aln.
See the code below:
```
%%bash
cd ../data/output/alignment/pasta_output/aligned/
source ../../../../../code/process_all_input_files.sh
delete_unwanted enafroCOI_all_clean_sN10-eN10.aln << EOF
1
Crustacea
1
Arachnida
1
Chilopoda
2
EOF
#Renaming
mv enafroCOI_all_clean_sN10-eN10_generaNA_undesired.fasta outgroups.aln
```
Then sort the "enafroCOI_all_clean_sN10-eN10.aln" into two files:
**1. enafroCOI_all_clean_sN10-eN10_genera.aln** - contains African COI sequences with genus names
**2. enafroCOI_all_clean_sN10-eN10_generaNA.aln** - Contains African COI sequences without genus names
```
%%bash
cd ../data/output/alignment/pasta_output/aligned/
source ../../../../../code/process_all_input_files.sh
cp enafroCOI_all_clean_sN10-eN10.aln enafroCOI_all_clean_sN10-eN10.aln2
delete_unwanted enafroCOI_all_clean_sN10-eN10.aln2 << EOF
1
gs-NA
2
EOF
#Renaming
mv enafroCOI_all_clean_sN10-eN10.aln2 enafroCOI_all_clean_sN10-eN10_genera.aln
mv enafroCOI_all_clean_sN10-eN10_undesired.aln enafroCOI_all_clean_sN10-eN10_generaNA.aln
```
Then sort the data into two files:
**1. enafroCOI_all_clean_sN10-eN10_eafro.aln** - Contains East African sequences; Kenya, Uganda, Tanzania, Rwanda, Burundi, South Sudan and Ethiopia
**2. enafroCOI_all_clean_sN10-eN10_eafroNA.aln** - Contains non East African Data
```
%%bash
cd ../data/output/alignment/pasta_output/aligned/
source ../../../../../code/process_all_input_files.sh
cp enafroCOI_all_clean_sN10-eN10.aln enafroCOI_all_clean_sN10-eN10.aln2
delete_unwanted enafroCOI_all_clean_sN10-eN10.aln2 << EOF
1
Kenya
1
Tanzania
1
Uganda
1
Rwanda
1
Burundi
1
South_Sudan
1
Ethiopia
2
EOF
#Renaming
mv enafroCOI_all_clean_sN10-eN10_undesired.fasta enafroCOI_all_clean_sN10-eN10_eafro.aln
mv enafroCOI_all_clean_sN10-eN10.aln2 enafroCOI_all_clean_sN10-eN10_eafroNA.aln
```
Then sort enafroCOI_all_clean_sN10-eN10_eafro.aln into two files:
**1. enafroCOI_all_clean_sN10-eN10_eafro_genera.aln** - Contains East African COI records with genus names
**2. enafroCOI_all_clean_sN10-eN10_eafro_generaNA.aln** - Contains East African COI records without genus names
Those without genus names do not have the species names either, howerver those with species names may lack the genus names
```
%%bash
cd ../data/output/alignment/pasta_output/aligned/
cp enafroCOI_all_clean_sN10-eN10_eafro.aln enafroCOI_all_clean_sN10-eN10_eafro.aln2
delete_unwanted enafroCOI_all_clean_sN10-eN10_eafro.aln2 << EOF
1
gs-NA
2
EOF
#Renaming
mv enafroCOI_all_clean_sN10-eN10_eafro.aln2 enafroCOI_all_clean_sN10-eN10_eafro_genera.aln
mv enafroCOI_all_clean_sN10-eN10_eafro_undesired.fasta enafroCOI_all_clean_sN10-eN10_eafro_generaNA.aln
%%bash
cd ../data/output/alignment/pasta_output/aligned/
ls enafroCOI_all_clean*sN10-eN10*.aln
#cat $(ls enafroCOI_all_clean*sN10-eN10*.aln2) | head -5
```
#### **Replacing Illigal characters.**
Replacing **Illegal characters in taxon-names are: tabulators, carriage returns, spaces, ":", ",", ")", "(", ";", "]", "\[", "'"** that affect the interpretation in RAxML
This has to be done, otherwise RAxML will throw up an error and exit
```
%%bash
cd ../data/output/alignment/pasta_output/aligned/
for i in $(ls enafroCOI_all_clean*sN10-eN10*.aln); do vim $i -n << EOF
:%s/\[//g
:%s/\]//g
:%s/ /_/g
:%s/://g
:%s/;//g
:%s/,/__/g
:%s/(/__/g
:%s/)//g
:%s/'//g
:wq
EOF
echo -e "`basename -- ${i}` completed"
done
```
Updating the remote server (hpc01.icipe.org) ready for execution
```
%%bash
cd ../data/output/alignment/pasta_output/aligned/
scp ./enafroCOI_all_clean* gilbert@hpc01.icipe.org:/home/gilbert/bioinformatics/github/co1_metaanalysis/data/output/alignment/pasta_output/aligned/ << EOF
<password>
EOF
```
| github_jupyter |
#### Copyright 2017 Google LLC.
本课程原版地址:https://colab.research.google.com/notebooks/mlcc/multi-class_classification_of_handwritten_digits.ipynb?utm_source=mlcc&utm_campaign=colab-external&utm_medium=referral&utm_content=multiclass-colab&hl=en
采用Apache 2.0协议
# Classifying Handwritten Digits with Neural Networks

**Learning Objectives:**
* Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set
* Compare the performance of the linear and neural network classification models
* Visualize the weights of a neural-network hidden layer
Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class.
## Setup
First, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
```
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
```
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.

Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
```
mnist_dataframe.loc[:, 72:72]
```
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
```
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
```
Show a random example and its corresponding label.
```
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
```
## Task 1: Build a Linear Model for MNIST
First, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.
You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?
Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
```
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
```
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
```
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
```
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**
Stop if you get anything above about 0.9 accuracy.
```
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=100,
batch_size=10,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
### Solution
Click below for one possible solution.
Here is a set of parameters that should attain roughly 0.9 accuracy.
```
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
## Task 2: Replace the Linear Classifier with a Neural Network
**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**
You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
```
#
# YOUR CODE HERE: Replace the linear classifier with a neural network.
#
```
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
```
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
#
# YOUR CODE HERE: Calculate accuracy on the test set.
#
```
### Solution
Click below for a possible solution.
The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
```
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
Next, we verify the accuracy on the test set.
```
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
```
## Task 3: Visualize the weights of the first hidden layer.
Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.
The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.
Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
```
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
```
The first hidden layer of the neural network should be modeling some pretty low level features, so visualizing the weights will probably just show some fuzzy blobs or possibly a few parts of digits. You may also see some neurons that are essentially noise -- these are either unconverged or they are being ignored by higher layers.
It can be interesting to stop training at different numbers of iterations and see the effect.
**Train the classifier for 10, 100 and respectively 1000 steps. Then run this visualization again.**
What differences do you see visually for the different levels of convergence?
| github_jupyter |
```
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution1D, Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import callbacks
import time
import logging
import keras
print (keras.__version__)
class printbatch(callbacks.Callback):
def on_batch_end(self, batch, logs={}):
# if batch%10 == 0:
print "Batch " + str(batch) + " ends"
def on_epoch_begin(self, epoch, logs={}):
print(logs)
def on_epoch_end(self, epoch, logs={}):
print(logs)
def myGenerator():
# Dataset of 60,000 28x28 grayscale images of the 10 digits, along with a test set of 10,000 images.
# X_train, X_test: uint8 array of grayscale image data with shape (nb_samples, 28, 28).
# y_train, y_test: uint8 array of digit labels (integers in range 0-9) with shape (nb_samples,).
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# ((60000, 28, 28), (60000,), (10000, 28, 28), (10000,))
y_train = np_utils.to_categorical(y_train,10)
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
count = 0
while 1:
for i in range(20): # 1875 * 32 = 60000 -> # of training samples
print('i:' + str(i) + ' count:' + str(count))
count = count + 1
print('i*32:' + str(i*32) + ' (i+1)*32:' + str((i+1)*32))
yield X_train[i*32:(i+1)*32], y_train[i*32:(i+1)*32] # 32만큼 전달한다.
print("-------")
batch_size = 32
nb_classes = 10
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
pb = printbatch()
model.fit_generator(myGenerator(),
#samples_per_epoch = 60000,
samples_per_epoch = 640,
nb_epoch = 2,
verbose=1,
callbacks=[pb],
validation_data=None,
class_weight=None,
max_q_size = 1,
nb_worker=1)
for i in range(10):
print i
```
| github_jupyter |
```
test_index = 0
```
#### testing
```
from load_data import *
# load_data()
```
## Loading the data
```
from load_data import *
X_train,X_test,y_train,y_test = load_data()
len(X_train),len(y_train)
len(X_test),len(y_test)
```
## Test Modelling
```
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class Test_Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.c1 = nn.Conv2d(1,64,5)
self.c2 = nn.Conv2d(64,128,5)
self.c3 = nn.Conv2d(128,256,5)
self.fc4 = nn.Linear(256*10*10,256)
self.fc6 = nn.Linear(256,128)
self.fc5 = nn.Linear(128,4)
def forward(self,X):
preds = F.max_pool2d(F.relu(self.c1(X)),(2,2))
preds = F.max_pool2d(F.relu(self.c2(preds)),(2,2))
preds = F.max_pool2d(F.relu(self.c3(preds)),(2,2))
# print(preds.shape)
preds = preds.view(-1,256*10*10)
preds = F.relu(self.fc4(preds))
preds = F.relu(self.fc6(preds))
preds = self.fc5(preds)
return preds
device = torch.device('cuda')
BATCH_SIZE = 32
IMG_SIZE = 112
model = Test_Model().to(device)
optimizer = optim.SGD(model.parameters(),lr=0.1)
criterion = nn.CrossEntropyLoss()
EPOCHS = 12
from tqdm import tqdm
PROJECT_NAME = 'Weather-Clf'
import wandb
# test_index += 1
# wandb.init(project=PROJECT_NAME,name=f'test-{test_index}')
# for _ in tqdm(range(EPOCHS)):
# for i in range(0,len(X_train),BATCH_SIZE):
# X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)
# y_batch = y_train[i:i+BATCH_SIZE].to(device)
# model.to(device)
# preds = model(X_batch.float())
# preds.to(device)
# loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# wandb.log({'loss':loss.item()})
# wandb.finish()
# for index in range(10):
# print(torch.argmax(preds[index]))
# print(y_batch[index])
# print('\n')
class Test_Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1,64,5)
self.conv2 = nn.Conv2d(64,128,5)
self.conv3 = nn.Conv2d(128,256,5)
self.fc1 = nn.Linear(256*10*10,64)
self.fc2 = nn.Linear(64,128)
self.fc3 = nn.Linear(128,256)
self.fc4 = nn.Linear(256,128)
self.fc5 = nn.Linear(128,6)
def forward(self,X):
preds = F.max_pool2d(F.relu(self.conv1(X)),(2,2))
preds = F.max_pool2d(F.relu(self.conv2(preds)),(2,2))
preds = F.max_pool2d(F.relu(self.conv3(preds)),(2,2))
# print(preds.shape)
preds = preds.view(-1,256*10*10)
preds = F.relu(self.fc1(preds))
preds = F.relu(self.fc2(preds))
preds = F.relu(self.fc3(preds))
preds = F.relu(self.fc4(preds))
preds = F.relu(self.fc5(preds))
return preds
model = Test_Model().to(device)
optimizer = optim.SGD(model.parameters(),lr=0.1)
criterion = nn.CrossEntropyLoss()
test_index += 1
wandb.init(project=PROJECT_NAME,name=f'test-{test_index}')
for _ in tqdm(range(EPOCHS)):
for i in range(0,len(X_train),BATCH_SIZE):
X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)
y_batch = y_train[i:i+BATCH_SIZE].to(device)
model.to(device)
preds = model(X_batch.float())
preds.to(device)
loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
optimizer.zero_grad()
loss.backward()
optimizer.step()
wandb.log({'loss':loss.item()})
wandb.finish()
for index in range(10):
print(torch.argmax(preds[index]))
print(y_batch[index])
print('\n')
```
| github_jupyter |
# Quantum Machine Learning with Amazon Braket: Binary Classifiers
This post details an approach taken by Aioi to build an exploratory
quantum machine learning application using Amazon Braket. Quantum
machine learning has been defined as "a research area that explores the
interplay of ideas from quantum computing and machine learning." Specifically, we explore how to use quantum computers to build a proof-of-principle classifier for risk assessment in a hypothetical car insurance use case. We use a hybrid quantum-classical approach and train a so-called quantum neural network to perform binary classification.
## Background
This demonstration is a result of collaboration with Aioi USA -
subsidiary of Aioi Nissay Dowa Insurance which is a member of MS&AD
Insurance Group Holdings - a major worldwide insurance organization
with close ties to the Toyota group, offering Toyota Insurance in 37
countries. Aioi USA is a full-service "insurtech" insurance agency
that develops data science-based products and services for the
transportation industry. Aioi was one of the first insurance companies
to work with Amazon Braket.
Aioi analyzes telematics data from self-driving vehicles to predict
driving risks. The vehicles are equipped with a multitude of sensors and
the goal is to use the sensor data to assign each vehicle a binary score
(safe or fail) that indicates the health of the vehicle. The problem can
be formalized computationally as a binary classification task in which
the driving risk score is a binary label to vehicle's sensor data.
To learn label assignments for each data point, classical machine learning
techniques such as e.g., linear regression (LR) or deep learning (DL)
can be applied. LR is a popular approach when the data-label mapping
is described by a linear function. For large and complex data structures, DL offers a way to capture
nonlinear behavior in data-label mapping.
So, we have powerful classical methods to perform classification tasks; how can quantum computers help here? The short answer is, we don't quite know yet. There are results ([arXiv:1204.5242](https://arxiv.org/abs/1204.5242), [arXiv:1601.07823](https://arxiv.org/abs/1601.07823) ) indicating that quantum LR algorithms applied to quantum data under specific assumptions can be exponentially faster than their classical counterparts operating on classical data. The flip side is that these quantum algorithms output a solution in the form of a quantum state which may not be immediately useful for further processing on a classical computer. On the DL front, quantum neural networks (QNNs) emerged as a potential replacement for classical neural nets ([arXiv:quant-ph/0201144](https://arxiv.org/abs/quant-ph/0201144)) . QNN designs to perform binary classification tasks were proposed recently (see e.g., [arXiv:1802.06002](https://arxiv.org/abs/1802.06002)) as well. An advantage of QNNs is that they can directly output a classical label value, though one still has to input data in the form of a quantum state. Whether or not QNNs have practical computational advantage over classical neural nets in DL task is very much an area of active research and the jury is not out yet on QNNs. This motivated us to explore how QNNs can be utilized for the driving risk
assignment in the case of binary sensor data with an eye towards near-term hardware implementation that constraints QNN's circuit depth due to decoherence.
In this post we build quantum machine learning applications using [Amazon Braket](https://aws.amazon.com/braket/). To run the example applications developed here, you need access to the [Amazon Braket SDK](https://github.com/aws/amazon-braket-sdk-python). You can either install the Braket SDK locally from the [Amazon Braket GitHub repo](https://github.com/aws/amazon-braket-sdk-python) or, alternatively, create a managed notebook in the [Amazon Braket console](https://aws.amazon.com/console/). (Please note that you need an AWS account, if you would like to run this demo on one of the quantum hardware backends offered by Amazon Braket.)
## Problem Setting
Binary classification is an example of supervised machine learning. It
requires a training data set to build a model that can be used to predict
labels (driving risk scores). We assume that we are given a training set
$T$ that consists of $M$ data-label pairs ${\bf x}, {\bf y}$
$(T=\{{\bf x}_i, {\bf y}_i\}$,$i=1,M)$. Here, ${\bf x}_i$ represents vehicle sensor data as a $N$-bit string
${\bf x}_i=\{x_{i0},\cdots,x_{iN-1}\}$ ($x_{ij}=\{0,1\}$). A label
${\bf y}_i=\{0,1\}$ represents the driving risk score associated with ${\bf x}_i$.
Before we proceed with a quantum solution, it is instructive to recall
the main steps of constructing a classical neural net (NN) based
solution. A classical NN takes data ${\bf x}$ and a set of
parameters $\vec{\theta}$ (so-called weights) as an input and transforms it into an output
label ${\bf z}$ such that $\hat{{\bf y} }= f({\bf x},\vec{\theta})$ where
$f$ is determined by NN. The goal is then
to use a training set to train the NN, i.e. to determine the values of
$\vec{\theta}$ for which the discrepancy between the output labels and
the training set labels is minimized. You achieve this by minimizing a
suitably chosen loss function $L(\hat{{\bf y}},{\bf y})$ over the NN
parameters $\vec{\theta}$ using e.g., a gradient-based optimizer.
To construct a quantum binary classifier we follow a similar procedure
with a couple of modifications
- We map our classical $N$-bit data $\{{\bf x}_i\}$ onto $N$-qubit quantum states $\{|\psi_i\rangle \}$. For example, a classical bit string $\{{\bf x}_i\}=0010$ maps onto $|\psi_i\rangle = |0010\rangle$
- Instead of a classical NN we construct a QNN - a $N+1$-qubit circuit $\mathcal{C}(\{\vec{\theta}\})$ (a sequence of elementary single- and two-qubit gates) that transforms the input states $\{|\psi_i\rangle|0\rangle \}$ into output states $\{|\phi_i \rangle \}$ $|\phi_i\rangle = \mathcal{C}|\psi_i\rangle $. The QNN circuit $\mathcal{C}(\{\vec{\theta}\})$ depends on classical parameters $\{\vec{\theta}\}$ that can be adjusted to change the output $\{|\phi_i\rangle \}$
- We use the $N+1$-th qubit to read out labels after the QNN acted on the input state. Every time we run the QNN with the same input state and parameters $\{\vec{\theta}\}$, we measure in what quantum state the $N+1$-th qubit ends up ($|0\rangle$ or $|1\rangle$). We denote the frequency of observing the state $|0\rangle$ ($|1\rangle$ ) as $p_0$ ($p_1$). We define the observed label $\hat{{\bf y}}$ as $\hat{{\bf y}} = \frac{1 - (p_0-p_1)}{2}$. (Note: in the language of quantum computing the difference $p_0-p_1$ equals the expected value of the Pauli $\hat{Z}$ operator measured on the $N+1$-th qubit.) By definition, $p_0-p_1$ is a function of the QNN parameters $\{\vec{\theta}\}$ in the range $ [-1,1] $ and, thus, $\hat{{\bf y}}$ has the range $ [0,1] $ .
In the training of the QNN circuit $\mathcal{C}$ our goal is to find a set of parameters $\{\vec{\theta}_o\}$ such that for each data point in the training set $T$ the label value ${\bf y}_i$ is close
to $\hat{{\bf y}}_i$.
To achieve this, we minimize the log loss function $L(\{\vec{\theta}\})$ defined as,
$L(\{\vec{\theta}\})=-(\sum\limits_{i=1}^{M}{\bf y}_i\log(\hat{{\bf y}}_i)+(1-{\bf y}_i)\log(1-\hat{{\bf y}}_i))$.
We use the Amazon Braket local simulator to evaluate $L(\{\vec{\theta}\})$ and a classical optimizer from $\verb+scipy.optimize+$ to minimize it.
## Mapping classical data onto quantum states.
The first step in the implementation of a quantum binary classifier is to specify a quantum circuit that maps classical data onto quantum states. We map classical bit values "0" and "1" onto quantum states
$|0\rangle$ and $|1\rangle$, respectively. By convention, the
initial state of a qubit is always assumed to be $|0\rangle$. If the
input quantum state is $|1\rangle$ then we obtain it from
$|0\rangle$ by applying a qubit flip gate $X$ i.e.
$|1\rangle = X|0\rangle$. Similarly, a quantum circuit to prepare an
input state, corresponding to classical data, consists of $X$
gates acting on qubits that are in state $|1\rangle$. For example, a
quantum circuit to prepare $|\psi_i\rangle =|101\rangle$ will consist
of two $X$ gate acting on qubits 0 and 2. Below we provide code that
generates a quantum circuit for preparing an arbitrary computational basis state
$|\psi_i\rangle$ using Amazon Braket.
```
# Import Braket libraries
from braket.circuits import Circuit
from braket.aws import AwsDevice
# A function that converts a bit string bitStr into a quantum circuit
def bit_string_to_circuit(bitStr):
circuit = Circuit()
for ind in range(len(bitStr)):
if bitStr[ind]=='1':
circuit.x(ind)
return circuit
# provide a feature string to test the function above
feature = '00101010'
# print quantum circuit that prepares corresponding quantum state
print(bit_string_to_circuit(feature))
```
## Designing Quantum Neural Networks and Training
Now that we know how to prepare input quantum states that correspond to classical data, the next step is to define and constuct a QNN circuit $\mathcal{C}(\{\vec{\theta}\})$ that we will train to
perform binary classification. We use the QNN design layout depicted in
the figure below. It is has $2N+1$ classical parameters defining:
$N$ two-qubit gates
$XX(\theta_k) = e^{-i\frac{\theta_k}{2} \hat{X}_j\hat{X}_{N+1}}$, $N$
single-qubit gates $R_{y}(\theta_m) = e^{-i\frac{\theta_m}{2}\hat{Y}_j}$, and one single-qubit gate $R_{x}(\theta) = e^{-i\frac{\theta}{2}\hat{X}_N}$ acting on the $N+1$-th qubit..

The code below implements this QNN, applies it to an arbitrary input state defined by a classical bit string, and measures the values of the label qubit using Amazon Braket.
```
# import standard numpy libraries and optimizers
import numpy as np
from scipy.optimize import minimize
# Braket imports
from braket.circuits import Circuit, Gate, Instruction, circuit, Observable
from braket.aws import AwsDevice, AwsQuantumTask
from braket.devices import LocalSimulator
# set Braket backend to local simulator (can be changed to other backends)
device = LocalSimulator()
# Quantum Neural Net from the QNN figure implemented in Braket
# Inputs: bitStr - data bit string (e.g. '01010101')
# pars - array of parameters theta (see the QNN figure for more details)
def QNN(bitStr,pars):
## size of the quantum neural net circuit
nQbts = len(bitStr) + 1 # extra qubit is allocated for the label
## initialize the circuit
qnn = Circuit()
## add single-qubit X rotation to the label qubit,
## initialize the input state to the one specified by bitStr
## add single-qubit Y rotations to data qubits,
## add XX gate between qubit i and the label qubit,
qnn.rx(nQbts-1, pars[0])
for ind in range(nQbts-1):
angles = pars[2*ind + 1:2*ind+1+2]
if bitStr[ind] == '1': # by default Braket sets input states to '0',
# qnn.x(ind) flips qubit number ind to state |1\
qnn.x(ind)
qnn.ry(ind, angles[0]).xx(ind, nQbts-1, angles[1])
## add Z observable to the label qubit
observZ = Observable.Z()
qnn.expectation(observZ, target=[nQbts-1])
return qnn
```
With the QNN defined, we need to code up the loss function $L(\{\vec{\theta}\})$ that we minimize in order to train
the QNN to perform binary classification. Below is the code that computes $L(\{\vec{\theta}\})$ using the local simulator in Amazon Braket.
```
## Function that computes the label of a given feature bit sting bitStr
def parity(bitStr):
return bitStr.count('1') % 2
## Log loss function L(theta,phi) for a given training set trainSet
## inputs: trainSet - array of feature bit strings e.g. ['0101','1110','0000']
## pars - quantum neural net parameters theta (See the QNN figure)
## device - Braket backend that will compute the log loss
def loss(trainSet, pars, device):
loss = 0.0
for ind in range(np.size(trainSet)):
## run QNN on Braket device
task = device.run(QNN(trainSet[ind], pars), shots=0)
## retrieve the run results <Z>
result = task.result()
if parity(trainSet[ind])==0:
loss += -np.log2(1.0-0.5*(1.0-result.values[0]))
else:
loss += -np.log2(0.5*(1.0-result.values[0]))
print ("Current value of the loss function: ", loss)
return loss
```
Putting it all together we are now ready to train our QNN circuit to reproduce binary classification of a training set $T$. For the example below, we assume that labels ${\bf y}_i$ are generated by a Boolean function $\hat{f}({\bf x}_i) = (\sum\limits_{j=0}^{N-1}x_{ij})\ {\rm mod}\ 2$. To emulate data in the training set $T$, we generated $11$ random $10$-bit strings (data) and assign them labels according to $\hat{f}$.
```
## Training the QNN using gradient-based optimizer
nBits = 10 # number of bits per feature
## Random training set consisting of 11 10-bit features
## Please explore other training sets
trainSet = ['1101011010',
'1000110011',
'0101001001',
'0010000110',
'0101111010',
'0000100010',
'1001010000',
'1100110001',
'1000010001',
'0000111101',
'0000000001']
## Initial assignment of QNN parameters theta and phi (random angles in [-pi,pi])
pars0 = 2 * np.pi * np.random.rand(2*nBits+1) - np.pi
## Run minimization
res = minimize(lambda pars: loss(trainSet, pars, device), pars0, method='BFGS', options={'disp':True})
```
Run the code and wait for the optimizer to converge. It outputs a message that looks like this when the optimizer finishes.
```
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 55
Function evaluations: 1430
Gradient evaluations: 65
```
We note that our QNN circuit is designed to compute the parity of input data exactly for an appropriate choice of the parameters $\{\vec{\theta}\}$. Thus, the global minimum of the loss function using this QNN is zero. This is generally not the case in DL applications, however. Note also that $L(\{\vec{\theta}\})$ is not convex
with respect to the parameters $\{\vec{\theta}\}$. This means that if the final value of the loss function value is not zero, the optimizer got stuck in a local minimum. Do not panic. Try running the optimizer with a
different set of initial parameters \verb+pars0+. You can also explore various minimization algorithms by
specifying $\verb+method=' '+$ in the minimize function.
Calling $\verb+res.x+$ outputs the optimal values of the parameters $\{\vec{\theta}\}$
and you can use them to run the "optimal" QNN and perform binary classification on the data that is not a part of the training set. Try that and compute the mean square error of the classifier.
For our 10-bit data example there are $2^{10}=1024$ possible
10-bit strings, we chose a training set that has only 11 data points. Yet it is
sufficiently large to train the QNN to act as a perfect
binary classifier for all 1024 possible features. Can you demonstrate
that?
```
## Print the predicted label values for all N-bit data points using the optimal QNN parameters res.x
for ind in range(2**nBits):
data = format(ind, '0'+str(nBits)+'b')
task = device.run(QNN(data, res.x), shots=100)
result = task.result()
if (data in trainSet):
inSet = 'in the training set'
else:
inSet = 'NOT in the training set'
print('Feature:', data, '| QNN predicted parity: ', 0.5*(1-result.values[0]), ' | ', inSet)
print('---------------------------------------------------')
```
As an exercise, use the optimal QNN parameters in $\verb+res.x+$ and apply the
resulting QNN to all 10-bit strings that are not in the training set.
Record the mean square error between the predicted and computed label
values.
### Conclusion
This post explored the use case of binary classification to analyze
binary (telematic) data by combining QNNs with Amazon Braket. The QNN binary classifier designed in this post
requires the number of two-qubit gates that scales linearly with the
feature size. This is advantageous for Noisy Intermediate Scale Quantum
(NISQ) devices that are limited in the circuit depth due to noise. A
future area of investigation for the team is to apply more complex
feature sets, and constructing QNNs to classify them. You can download and play with the code from this post here.
| github_jupyter |
```
from quchem.Hamiltonian_Generator_Functions import *
from quchem.Graph import *
### HAMILTONIAN start
Molecule = 'LiH'
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))]
basis = 'sto-3g'
### Get Hamiltonian
Hamilt = Hamiltonian(Molecule,
run_scf=1, run_mp2=1, run_cisd=1, run_ccsd=1, run_fci=1,
basis=basis,
multiplicity=1,
geometry=geometry) # normally None!
QubitHamiltonian = Hamilt.Get_Qubit_Hamiltonian(threshold=None, transformation='JW')
### HAMILTONIAN end
#####################################
Hamiltonian_graph_obj = Openfermion_Hamiltonian_Graph(QubitHamiltonian)
commutativity_flag = 'AC' ## <- defines relationship between sets!!!
plot_graph = False
Graph_colouring_strategy='largest_first'
anti_commuting_sets = Hamiltonian_graph_obj.Get_Clique_Cover_as_QubitOp(commutativity_flag, Graph_colouring_strategy=Graph_colouring_strategy, plot_graph=plot_graph)
anti_commuting_sets
from quchem.Ansatz_Generator_Functions import *
##
NOON_spins_combined, NMO_basis = Hamilt.Get_NOON()
##
Hamilt.Get_CCSD_Amplitudes()
ansatz_obj = Ansatz(Hamilt.molecule.n_electrons, Hamilt.molecule.n_qubits)
reduced_Sec_Quant_CC_ops_ia, reduced_Sec_Quant_CC_ops_ijab, reduced_theta_parameters_ia, reduced_theta_parameters_ijab =ansatz_obj.Remove_NOON_terms(
NOON=NOON_spins_combined,
occ_threshold= 1.999,
unocc_threshold=1e-4,
indices_to_remove_list_manual=None,
single_cc_amplitudes=Hamilt.molecule.single_cc_amplitudes,
double_cc_amplitudes=Hamilt.molecule.double_cc_amplitudes,
singles_hamiltonian=Hamilt.singles_hamiltonian,
doubles_hamiltonian=Hamilt.doubles_hamiltonian,
tol_filter_small_terms=None)
reduced_Sec_Quant_CC_ops_ijab
ia_terms, ijab_terms, ia_theta, ijab_theta = ansatz_obj.Get_ia_and_ijab_terms()
print('REDUCTION')
print('ia_terms', len(ia_terms), 'TO', len(reduced_Sec_Quant_CC_ops_ia))
print('ijab_terms', len(ijab_terms), 'TO', len(reduced_Sec_Quant_CC_ops_ijab))
Qubit_Op_list_Second_Quant_CC_Ops_ia, Qubit_Op_list_Second_Quant_CC_Ops_ijab = ansatz_obj.UCCSD_single_trotter_step(reduced_Sec_Quant_CC_ops_ia,
reduced_Sec_Quant_CC_ops_ijab)
full_ansatz_Q_Circ = Ansatz_Circuit(Qubit_Op_list_Second_Quant_CC_Ops_ia, Qubit_Op_list_Second_Quant_CC_Ops_ijab,
Hamilt.molecule.n_qubits, Hamilt.molecule.n_electrons)
ansatz_cirq_circuit = full_ansatz_Q_Circ.Get_Full_HF_UCCSD_QC(reduced_theta_parameters_ia, reduced_theta_parameters_ijab)
print(ansatz_cirq_circuit.to_text_diagram(transpose=True))
simulator = cirq.Simulator()
result = simulator.compute_amplitudes(ansatz_cirq_circuit, bitstrings=[i for i in range(2 ** len(ansatz_cirq_circuit.all_qubits()))])
result=np.around(result, 5)
print(result.reshape([(2 ** len(ansatz_cirq_circuit.all_qubits())), 1]))
from quchem.LCU_method import *
from quchem.Simulating_Quantum_Circuit import *
R_uncorrected, Pn, gamma_l = Get_R_linear_combination(anti_commuting_sets[10], 1)
R_corrected_Op_list, R_corr_list, ancilla_amplitudes, l1 = absorb_complex_phases(R_uncorrected)
full_circ = Full_Q_Circuit(Pn, R_corrected_Op_list, R_corr_list, ancilla_amplitudes, Hamilt.molecule.n_qubits, ansatz_cirq_circuit)
output_bin_dict = Get_binary_dict_project(full_circ, Pn, 100000, Hamilt.molecule.n_qubits, ancilla_amplitudes, l1)
expectation_value_by_parity(output_bin_dict) * gamma_l
# from timeit import default_timer as timer
# n_shots = 1000
# start = timer()
# simulator = cirq.Simulator()
# raw_result = simulator.run(full_circ, repetitions=10*n_shots * int(np.ceil(1 / (1/l1)**2)))
# end = timer()
# print('time taken = ', end - start ,'seconds')
from timeit import default_timer as timer
n_shots=100
###
start = timer()
testing = VQE_Experiment_LCU_UP(anti_commuting_sets,
ansatz_cirq_circuit,
n_shots,
Hamilt.molecule.n_qubits,
N_indices_dict=None)
print(testing.Calc_Energy())
end = timer()
###
print('time taken = ', end - start ,'seconds')
Hamilt.molecule.fci_energy
n_shots=100
def GIVE_ENERGY(theta_ia_theta_jab_list):
theta_ia = theta_ia_theta_jab_list[:len(reduced_theta_parameters_ia)]
theta_ijab = theta_ia_theta_jab_list[len(reduced_theta_parameters_ia):]
ansatz_cirq_circuit = full_ansatz_Q_Circ.Get_Full_HF_UCCSD_QC(theta_ia, theta_ijab)
VQE_exp_LCU = VQE_Experiment_LCU_UP(anti_commuting_sets,
ansatz_cirq_circuit,
n_shots,
Hamilt.molecule.n_qubits,
N_indices_dict=None)#{7:0, 8:1, 9:0, 10:1})
return VQE_exp_LCU.Calc_Energy().real
### optimizer
from quchem.Scipy_Optimizer import *
THETA_params=[0 for _ in range(len(reduced_theta_parameters_ia)+len(reduced_theta_parameters_ijab))]
GG = Optimizer(GIVE_ENERGY, THETA_params, 'Nelder-Mead', store_values=True, display_iter_steps=True,
tol=1e-5,
display_convergence_message=True)
GG.get_env(50)
GG.plot_convergence()
plt.show()
```
### Tensorflow
```
from quchem.TensorFlow_Opt import *
```
**gradient is given by**
https://arxiv.org/pdf/1906.08728.pdf
$$\frac{\partial O(\theta)}{\partial \theta}=\left\langle\overrightarrow{0}\left|\hat{U}^{\dagger} \hat{R}_{y}^{C \dagger}(\theta+\pi / 4) \hat{V}^{\dagger} \hat{O} \hat{V} \hat{R}_{y}^{C}(\theta+\pi / 4) \hat{U}\right| \overrightarrow{0}\right\rangle -\left\langle\overrightarrow{0}\left|\hat{U}^{\dagger} \hat{R}_{y}^{C \dagger}(\theta-\pi / 4) \hat{V}^{\dagger} \hat{O} \hat{V} \hat{R}_{y}^{C}(\theta-\pi / 4) \hat{U}\right| \overrightarrow{0}\right\rangle$$
$$\frac{\partial O(\theta)}{\partial \theta} =O(\theta+\pi / 4)-O(\theta-\pi / 4)$$
```
def calc_gradient(theta_ia_theta_jab_list):
grad_list=[]
for index, theta in enumerate(theta_ia_theta_jab_list):
new_theta_list = theta_ia_theta_jab_list.copy()
new_theta_list[index] = theta + np.pi/4
Obs_PLUS = GIVE_ENERGY(new_theta_list)
new_theta_list[index] = theta - np.pi/4
Obs_MINUS = GIVE_ENERGY(new_theta_list)
gradient = Obs_PLUS - Obs_MINUS
grad_list.append((gradient, theta))
return grad_list
```
note:
this is very SLOW as it has to run a separate experiment TWICE for each parameter before taking a step!
```
import random
X0 = [random.uniform(0, 2*np.pi) for _ in range(len(reduced_Sec_Quant_CC_ops_ia) + len(reduced_Sec_Quant_CC_ops_ijab))]
GG = Tensor_Flow_Optimizer(GIVE_ENERGY, X0, 'Adam', calc_gradient, learning_rate=0.1, beta1=0.9,
beta2=0.999, store_values=True, display_iter_steps=True)
GG.optimize(50)
GG.plot_convergence()
from quchem.Adam_Optimizer import *
import random
def calc_gradient_ADAM(theta_ia_theta_jab_list):
grad_list=[]
for index, theta in enumerate(theta_ia_theta_jab_list):
new_theta_list = theta_ia_theta_jab_list.copy()
new_theta_list[index] = theta + np.pi/4
Obs_PLUS = GIVE_ENERGY(new_theta_list)
new_theta_list[index] = theta - np.pi/4
Obs_MINUS = GIVE_ENERGY(new_theta_list)
gradient = Obs_PLUS - Obs_MINUS
grad_list.append(gradient)
return np.array(grad_list)
X0 = np.array([random.uniform(0, 2*np.pi) for _ in range(len(reduced_Sec_Quant_CC_ops_ia) + len(reduced_Sec_Quant_CC_ops_ijab))])
opt_params, list_of_inputs, list_of_outputs = Adam_Opt(X0, GIVE_ENERGY,
calc_gradient_ADAM,
learning_rate=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
max_iter=50,
disp=True,
tolerance=1e-3,
store_steps=True)
VQE_experiment_ENERGY(opt_params)
def LinAlgEnergy(theta_ia_theta_jab_list):
theta_ia = theta_ia_theta_jab_list[:len(reduced_theta_parameters_ia)]
theta_ijab = theta_ia_theta_jab_list[len(reduced_theta_parameters_ia):]
Qubit_Ham_matrix_JW = Hamilt.Get_sparse_Qubit_Hamiltonian_matrix(QubitHamiltonian)
ansatz_lin_alg_obj = Ansatz_MATRIX(Hamilt.molecule.n_electrons, Hamilt.molecule.n_qubits, reduced_Sec_Quant_CC_ops_ia, reduced_Sec_Quant_CC_ops_ijab)
# state_ket = ansatz_lin_alg_obj.Calc_ansatz_state_withOUT_trot(reduced_theta_parameters_ia, reduced_theta_parameters_ijab, 'JW')
state_ket = ansatz_lin_alg_obj.Calc_ansatz_state_WITH_trot_SINGLE_STEP(theta_ia, theta_ijab, 'JW')
Energy = ansatz_lin_alg_obj.Calc_energy_of_state(state_ket, Qubit_Ham_matrix_JW)
return Energy
X0 = np.array([random.uniform(0, 2*np.pi) for _ in range(len(reduced_Sec_Quant_CC_ops_ia) + len(reduced_Sec_Quant_CC_ops_ijab))])
LinAlgEnergy(X0)
def calc_gradient_ADAM_linAlg(theta_ia_theta_jab_list):
grad_list=[]
for index, theta in enumerate(theta_ia_theta_jab_list):
new_theta_list = theta_ia_theta_jab_list.copy()
new_theta_list[index] = theta + np.pi/4
Obs_PLUS = LinAlgEnergy(new_theta_list)
new_theta_list[index] = theta - np.pi/4
Obs_MINUS = LinAlgEnergy(new_theta_list)
gradient = Obs_PLUS - Obs_MINUS
grad_list.append(gradient)
return np.array(grad_list)
calc_gradient_ADAM_linAlg(X0)
opt_params, list_of_inputs, list_of_outputs = Adam_Opt(X0, LinAlgEnergy,
calc_gradient_ADAM_linAlg,
learning_rate=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
max_iter=50,
disp=True,
tolerance=1e-3,
store_steps=True)
LinAlgEnergy(opt_params)
INPUT = np.array([4.52776195, 4.42573835, 1.72722763, 3.14817708, 0.28249851,
1.43945444, 3.90949061, 7.84202741, 4.38961051, 6.35661061,
3.28588646, 6.25964932])
opt_params, list_of_inputs, list_of_outputs = Adam_Opt(INPUT, LinAlgEnergy,
calc_gradient_ADAM_linAlg,
learning_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
max_iter=50,
disp=True,
tolerance=1e-5,
store_steps=True)
LinAlgEnergy(opt_params)
INPUT = np.array([4.55383355, 4.55428763, 1.56073891, 3.16001904, 0.30119385,
1.55034305, 3.83016092, 7.89648636, 4.41184851, 6.29877879,
3.2903192 , 6.29409434])
opt_params, list_of_inputs, list_of_outputs = Adam_Opt(INPUT, LinAlgEnergy,
calc_gradient_ADAM_linAlg,
learning_rate=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
max_iter=50,
disp=True,
tolerance=1e-5,
store_steps=True)
LinAlgEnergy(opt_params)
Hamilt.molecule.ccsd_energy
```
| github_jupyter |
# M² Experimental Design
**Scott Prahl**
**Mar 2021**
The basic idea for measuring M² is simple. Use a CCD imager to capture changing beam profile at different points along the direction of propagation. Doing this accurately is a challenge because the beam must always fit within camera sensor and the measurement locations should include both points near the focus and far from the focus. Moreover, in most situations, the focus is not accessible. In this case a lens is used to create an artificial focus that can be measured.
One of the nice properties of M² is that it is not affected by refocusing: the artificially focused beam will have different beam waist and Rayleigh distances but the M² value will be the same as the original beam.
This notebook describes a set of constraints for selection of an imaging lens and then gives an example of a successful measurement and an unsuccessful measurement.
---
*If* `` laserbeamsize `` *is not installed, uncomment the following cell (i.e., delete the initial #) and execute it with* `` shift-enter ``. *Afterwards, you may need to restart the kernel/runtime before the module will import successfully.*
```
#!pip install --user laserbeamsize
import numpy as np
import matplotlib.pyplot as plt
try:
import laserbeamsize as lbs
except ModuleNotFoundError:
print('laserbeamsize is not installed. To install, uncomment and run the cell above.')
print('Once installation is successful, rerun this cell again.')
pixel_size = 3.75e-6 # pixel size in m
pixel_size_mm = pixel_size * 1e3
pixel_size_µm = pixel_size * 1e6
```
## Designing an M² measurement
We first need to to figure out the focal length of the lens that will be used. The design example that we will use is for a low divergence beam. (High divergence lasers (e.g. diodes) are more suited to other techniques.)
Obviously, we do not want to introduce experimental artifacts into the measurement and therefore we want to minimize introducing wavefront aberrations with the lens. In general, to avoid spherical aberrations the f-number (the focal length divided by the beam diameter) of the lens should be over 20. For a low divergence beam the beam diameter will be about 1mm at the lens and, as we will see below, the allowed f-numbers will all be much greater than 20 and we don't need to worry about it further (as long as a plano-convex lens or doublet is used in the right orientation).
### Creating an artificial focus
An example of beam propagation is shown below. The beam waist is at -500mm and a lens is located at 0mm. The beam cross section is exaggerated because the aspect ratio on the axes is 1000:1.
```
lambda0 = 632.8e-9 # wavelength of light [m]
w0 = 450e-6 # radius at beam waist [m]
f = 300e-3 # focal length of lens [m]
lbs.M2_focus_plot(w0, lambda0, f, z0=-500e-3, M2=2)
plt.show()
```
### Axial measurement positions
The ISO 11146-1 document, [Lasers and laser-related equipment - Test methods for laser beam widths, divergence angles and beam propagation, Part 1: Stigmatic and simple astigmatic beams](https://www.iso.org/obp/ui/#iso:std:iso:11146:-1:ed-1:v1:en) gives specific instructions for how to measure the M² value.
> If the beam waist is accessible for direct measurement, the beam waist location, beam widths, divergence angles and beam propagation ratios shall be determined by a hyperbolic fit to different measurements of the beam width along the propagation axis $z$. Hence, measurements at at least 10 different $z$ positions shall be taken. Approximately half of the measurements shall be distributed within one Rayleigh length on either side of the beam waist, and approximately half of them shall be distributed beyond two Rayleigh lengths from the beam waist. For simple astigmatic beams this procedure shall be applied separately for both principal directions.
In the picture above, the artificial beam waist is at 362mm and the Rayleigh distance for the artificial beam is 155mm. Therefore, to comply with the requirements above, five measurements should be made between 207 and 517mm of the lens and then five more at distances greater than 672mm. One possibility might be the ten measurements shown below.
```
lambda0 = 632.8e-9 # wavelength of light [m]
w0 = 450e-6 # radius at beam waist [m]
f = 300e-3 # focal length of lens [m]
z = np.array([250, 300, 350, 400, 450, 675, 725, 775, 825, 875])*1e-3
lbs.M2_focus_plot(w0, lambda0, f, z0=-500e-3, M2=2)
r = lbs.beam_radius(250e-6, lambda0, z, z0=362e-3, M2=2)
plt.plot(z*1e3,r*1e6,'or')
plt.show()
```
### Camera sensor size constraints
If the beam is centered on the camera sensor then should be larger than 20 pixels and it should less than 1/4 of the narrower sensor dimension. The first constraint is critical for weakly divergent beams (e.g., HeNe) and the second is critical for strongly divergent beams (e.g., diode laser).
For a HeNe, this ensures that the focal length of the lens should be greater than 100mm. If we want 40 pixel diameters then the requirement is that the focal length must be more than 190mm.
(Use M²=1 so that the beam size is smallest possible.)
```
w0 = (1e-3)/2
lambda0 = 632.8e-9
f = np.linspace(10,250)*1e-3
s = -400e-3
max_size = 960 * 0.25 * pixel_size_µm
min_size = 20 * pixel_size_µm
w0_artificial = w0 * lbs.magnification(w0,lambda0,s,f,M2=1)
plt.plot(f*1e3, w0_artificial*1e6)
plt.axhspan(min_size, 0, color='blue', alpha=0.1)
plt.text(70, 20, "Image too small")
plt.xlabel("Focal Length (mm)")
plt.ylabel("Beam Radius (µm)")
plt.axvline(190,color='black')
plt.show()
```
### Working size constraints (i.e., the optical table is only so big)
The measurements must be made on an optical table. Now, while mirrors could be used to bounce the light around the table, this makes exact measurements of the lens to the camera sensor difficult. Thus we would like the distance from the lens to the focus + 4 Rayleigh distances to be less than a meter.
Longer focal length lenses reduce the relative error in the positioning of the camera sensor relative to the lens. If one is doing these measurements by hand then ±1mm might be a typical positioning error. A motorized stage could minimize such errors, but who has the money for a stage that moves half of a meter!
This means the focal distance needs to be less than 320mm. However, at this distance, the beam becomes too large and the largest focal length lens is now about 275mm.
```
w0 = 1e-3 / 2
lambda0 = 632.8e-9
f = np.linspace(50,500)*1e-3
s = -400e-3
M2 = 2
w0_artificial = w0 * lbs.magnification(w0,lambda0,s,f,M2=M2)
z0_artificial = lbs.image_distance(w0,lambda0,s,f,M2=M2)
zR_artificial = lbs.z_rayleigh(w0_artificial, lambda0, M2=M2)
lens_to_4zr_distance = z0_artificial + 4 * zR_artificial
plt.plot(f*1e3, lens_to_4zr_distance*1e3)
plt.axhspan(1000, lens_to_4zr_distance[-1]*1e3, color='blue', alpha=0.1)
plt.text(350, 1050, "Axial distance too far")
plt.xlabel("Focal Length (mm)")
plt.ylabel("$z_0+4z_R$ (mm)")
plt.axvline(320,color='black')
plt.show()
radius_at_4zr = lbs.beam_radius(w0_artificial, lambda0, lens_to_4zr_distance, z0=z0_artificial, M2=M2)
max_size = 960 * 0.25 * pixel_size_µm
plt.plot(f*1e3, radius_at_4zr*1e6)
plt.axhspan(1600, max_size, color='blue', alpha=0.1)
plt.text(350, 1000, "Beam too big")
plt.axvline(275,color='black')
plt.xlabel("Focal Length (mm)")
plt.ylabel("Beam Radius (mm)")
plt.show()
```
### Putting it all together
The focal length of the lens to measure a multimode HeNe beam should then be between 190 and 275 mm. Here is what a reasonable set of measurements should be for a f=250mm lens.
```
lambda0 = 632.8e-9 # wavelength of light [m]
w0 = 500e-6 # radius at beam waist [m]
f = 250e-3 # focal length of lens [m]
s = -400e-3 # beam waist in laser to lens distance [m]
M2 = 2
lbs.M2_focus_plot(w0, lambda0, f, z0=s, M2=M2)
z0_after = lbs.image_distance(w0,lambda0,s,f,M2=M2)
w0_after = w0 * lbs.magnification(w0,lambda0,s,f,M2=M2)
zR_after = lbs.z_rayleigh(w0_after,lambda0,M2=M2)
zn = np.linspace(z0_after-zR_after,z0_after+zR_after,5)
zf = np.linspace(z0_after+2*zR_after,z0_after+4*zR_after,5)
rn = lbs.beam_radius(w0_after, lambda0, zn, z0=z0_after, M2=2)
rf = lbs.beam_radius(w0_after, lambda0, zf, z0=z0_after, M2=2)
plt.plot(zn*1e3,rn*1e6,'or')
plt.plot(zf*1e3,rf*1e6,'ob')
plt.show()
```
## Good spacing of beam size measurements
```
# datapoints digitized by hand from the graph at https://www.rp-photonics.com/beam_quality.html
lambda1=308e-9
z1_all=np.array([-200,-180,-160,-140,-120,-100,-80,-60,-40,-20,0,20,40,60,80,99,120,140,160,180,200])*1e-3
d1_all=2*np.array([416,384,366,311,279,245,216,176,151,120,101,93,102,120,147,177,217,256,291,316,348])*1e-6
lbs.M2_radius_plot(z1_all, d1_all, lambda1, strict=True)
```
## Poor spacing of beam size measurements
A nice fit of the beam is achieved, however the fitted value for M²<1. This is impossible. Basically the problem boils down to the fact that the measurements in the beam waist are terrible for determining the actual divergence of the beam. The fit then severely underestimates the divergence of the beam and claims that the beam diverges more slowly than a simple Gaussian beam!!
```
## Some Examples
f=500e-3 # m
lambda2 = 632.8e-9 # m
z2_all = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770]) * 1e-3 # [m]
d2_all = 2*np.array([597, 572, 547, 554, 479, 404, 415, 399, 377, 391, 326, 397]) * 1e-6 # [m]
lbs.M2_radius_plot(z2_all, d2_all, lambda2, strict=True)
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/darshvaghasia12/Awesome-Web-Art/blob/master/Music_Genre_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
pip install python_speech_features
from python_speech_features import mfcc
import scipy.io.wavfile as wav
import numpy as np
from tempfile import TemporaryFile
import os
import pickle
import random
import operator
import math
#To get the distance between feature vectors and get neighbours
def getNeighbors(trainingSet, instance, k):
distances = []
for x in range (len(trainingSet)):
dist = distance(trainingSet[x], instance, k )+ distance(instance, trainingSet[x], k)
distances.append((trainingSet[x][2], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def nearestClass(neighbors):
classVote = {}
for x in range(len(neighbors)):
response = neighbors[x]
if response in classVote:
classVote[response]+=1
else:
classVote[response]=1
sorter = sorted(classVote.items(), key = operator.itemgetter(1), reverse=True)
return sorter[0][0]
#Model Evaluation
def getAccuracy(testSet, predictions):
correct = 0
for x in range (len(testSet)):
if testSet[x][-1]==predictions[x]:
correct+=1
return 1.0*correct/len(testSet)
#Extraction of Features
directory="/content/drive/MyDrive/genres"
f=open("my.dat",'wb')
i=0
for folder in os.listdir(directory):
i+=1
if i==11:
break
for file in os.listdir(directory+"/"+folder):
(rate,sig)=wav.read(directory+"/"+folder+"/"+file)
mfcc_feat=mfcc(sig,rate,winlen=0.020,appendEnergy=False)
covariance=np.cov(np.matrix.transpose(mfcc_feat))
mean_matrix=mfcc_feat.mean(0)
feature=(mean_matrix,covariance,i)
pickle.dump(feature,f)
f.close()
dataset=[]
def loadDataset(filename,split,trSet,teSet):
with open("my.dat",'rb') as f:
while True:
try:
dataset.append(pickle.load(f))
except EOFerror:
f.close()
break
for i in range(len(dataset)):
if random.random()<split:
trSet.append(dataset[i])
else:
teSet.append(dataset[i])
trainingSet=[]
testSet=[]
loadDataset("my.dat",0.66,trainingSet,testSet)
def distance(instance1 , instance2 , k ):
distance =0
mm1 = instance1[0]
cm1 = instance1[1]
mm2 = instance2[0]
cm2 = instance2[1]
distance = np.trace(np.dot(np.linalg.inv(cm2), cm1))
distance+=(np.dot(np.dot((mm2-mm1).transpose() , np.linalg.inv(cm2)) , mm2-mm1 ))
distance+= np.log(np.linalg.det(cm2)) - np.log(np.linalg.det(cm1))
distance-= k
return distance
#prediction on Accuracy
leng = len(testSet)
predictions = []
for x in range (leng):
predictions.append(nearestClass(getNeighbors(trainingSet ,testSet[x] , 5)))
accuracy1 = getAccuracy(testSet , predictions)
print(accuracy1)
from python_speech_features import mfcc
import scipy.io.wavfile as wav
import numpy as np
from tempfile import TemporaryFile
import os
import pickle
import random
import operator
import math
import numpy as np
from collections import defaultdict
dataset = []
def loadDataset(filename):
with open("my.dat" , 'rb') as f:
while True:
try:
dataset.append(pickle.load(f))
except EOFError:
f.close()
break
loadDataset("my.dat")
def distance(instance1 , instance2 , k ):
distance =0
mm1 = instance1[0]
cm1 = instance1[1]
mm2 = instance2[0]
cm2 = instance2[1]
distance = np.trace(np.dot(np.linalg.inv(cm2), cm1))
distance+=(np.dot(np.dot((mm2-mm1).transpose() , np.linalg.inv(cm2)) , mm2-mm1 ))
distance+= np.log(np.linalg.det(cm2)) - np.log(np.linalg.det(cm1))
distance-= k
return distance
def getNeighbors(trainingSet , instance , k):
distances =[]
for x in range (len(trainingSet)):
dist = distance(trainingSet[x], instance, k )+ distance(instance, trainingSet[x], k)
distances.append((trainingSet[x][2], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def nearestClass(neighbors):
classVote ={}
for x in range(len(neighbors)):
response = neighbors[x]
if response in classVote:
classVote[response]+=1
else:
classVote[response]=1
sorter = sorted(classVote.items(), key = operator.itemgetter(1), reverse=True)
return sorter[0][0]
results=defaultdict(int)
i=1
for folder in os.listdir("/content/drive/MyDrive/genres"):
results[i]=folder
i+=1
(rate,sig)=wav.read("/content/drive/MyDrive/genres/Baarishein (DARSH MUSIC).wav")
mfcc_feat=mfcc(sig,rate,winlen=0.020,appendEnergy=False)
covariance = np.cov(np.matrix.transpose(mfcc_feat))
mean_matrix = mfcc_feat.mean(0)
feature=(mean_matrix,covariance,0)
pred=nearestClass(getNeighbors(dataset ,feature , 5))
print(results[pred])
```
| github_jupyter |
# Convolutional Layer
In this notebook, we visualize four filtered outputs (a.k.a. activation maps) of a convolutional layer.
In this example, *we* are defining four filters that are applied to an input image by initializing the **weights** of a convolutional layer, but a trained CNN will learn the values of these weights.
<img src='notebook_ims/conv_layer.gif' height=60% width=60% />
### Import the image
```
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'data/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
```
### Define and visualize the filters
```
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
```
## Define a convolutional layer
The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll start by defining a:
* Convolutional layer
Initialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network!
#### `__init__` and `forward`
To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the forward behavior of a network that applyies those initialized layers to an input (`x`) in the function `forward`. In PyTorch we convert all inputs into the Tensor datatype, which is similar to a list data type in Python.
Below, I define the structure of a class called `Net` that has a convolutional layer that can contain four 4x4 grayscale filters.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
```
### Visualize the output of each filter
First, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.
```
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
```
Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied.
```
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
```
#### ReLu activation
In this model, we've used an activation function that scales the output of the convolutional layer. We've chose a ReLu function to do this, and this function simply turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`.
<img src='notebook_ims/relu_ex.png' height=50% width=50% />
```
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Train Your Own Model and Convert It to TFLite
# 0. Setup
Uninstall TensorFlow เวอร์ชันที่อยู่ใน Colab, Install Version nightly แล้ว Restart Runtime
```
# !pip3 uninstall tensorflow
# !pip3 install tf-nightly
```
ใน ep นี้ เราจะใช้ TensorFlow 2 ด้วยคำสั่ง Magic %tensorflow_version 2.x (สำหรับ Google Colab)
```
try:
%tensorflow_version 2.x
except:
pass
```
# 1. Import
Import Library ที่เกี่ยวข้อง และ Print เลข Version
```
import pathlib
import numpy as np
import matplotlib.pylab as plt
import tensorflow as tf
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
print("\u2022 Using TensorFlow Version:", tf.__version__)
print('\u2022 GPU Device Found.' if tf.test.is_gpu_available() else '\u2022 GPU Device Not Found. Running on CPU')
```
# 2. Dataset
## 2.1 Split Data to Training / Validation / Test Set
เราจะใช้ TensorFlow Dataset `tfds` โหลดชุดข้อมูล [Fashion MNIST Dataset](https://www.bualabs.com/archives/3398/what-is-fashion-mnist-dataset/) ขึ้นมา แล้ว [Split Training / Validation / Test Set](https://www.bualabs.com/archives/532/what-is-training-set-why-train-test-split-training-set-validation-set-test-set/) ด้วยสัดส่วน 80/10/10
```
splits = tfds.Split.ALL.subsplit(weighted=(80, 10, 10))
splits, info = tfds.load('fashion_mnist', with_info=True, as_supervised=True, split = splits)
(train_examples, validation_examples, test_examples) = splits
num_examples = info.splits['train'].num_examples
num_classes = info.features['label'].num_classes
num_examples, num_classes
```
จะได้ [Dataset](https://www.bualabs.com/archives/1994/dataset-dataloader-feed-data-x-y-batch-to-neural-network-refactor-training-loop-neural-network-ep-5/) ที่มีข้อมูล Training Set 60,000 ตัวอย่าง มี 10 Class เนื่องจาก ชื่อ Class Name ไม่มีใน Dataset เราจะประกาศใส่ List ไว้ดังด้านล่าง
```
class_names = ['T-shirt_top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
สร้างไฟล์เก็บ Label ตั้งชื่อว่า labels.txt
```
# Create a labels.txt file with the class names
with open('labels.txt', 'w') as f:
f.write('\n'.join(class_names))
```
ขนาดรูปของ Fashion MNIST เหมือนกับ MNIST คือ 28 x 28 Pixel
```
# The images in the dataset are 28 by 28 pixels.
IMG_SIZE = 28
```
# 2.2 Preprocessing data
ประกาศฟังก์ชัน ใช้ `tf.image` เพื่อแปลงรูปใน Dataset ให้อยู่ในรูปแบบที่โมเดลต้องการ ในที่นี้คือ Resize เป็นขนาดที่กำหนด และ Rescale ค่าสี จาก 0-255 หาร 255 ให้เป็น Float 0-1
```
def format_example(image, label):
# Cast image to float32
image = tf.dtypes.cast(image, tf.float32)
# Normalize the image in the range [0, 1]
image = image / 255.0
return image, label
```
กำหนดขนาด [Batch Size](https://www.bualabs.com/archives/729/what-is-batch-size-in-deep-neural-networks-how-to-adjust-machine-learning-model-accuracy-deep-learning-hyperparameter-tuning-ep-2/) ให้ [DataLoader](https://www.bualabs.com/archives/1994/dataset-dataloader-feed-data-x-y-batch-to-neural-network-refactor-training-loop-neural-network-ep-5/)
```
# Specify the batch size
BATCH_SIZE = 256
```
Shuffle สับไพ่ข้อมูล และแบ่งข้อมูลเป็น Batch ตาม Batch Size ที่กำหนดด้านบน
```
# Create Datasets
train_batches = train_examples.cache().shuffle(num_examples//4).batch(BATCH_SIZE).map(format_example).prefetch(1)
validation_batches = validation_examples.cache().batch(BATCH_SIZE).map(format_example)
test_batches = test_examples.batch(1).map(format_example)
```
# 3. Building the Model
สร้างโมเดล Convolutional Neural Network ที่มี 10 Class
```
model = tf.keras.Sequential([
# Set the input shape to (28, 28, 1), kernel size=3, filters=16 and use ReLU activation,
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
# Set the number of filters to 32, kernel size to 3 and use ReLU activation
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
# Flatten the output layer to 1 dimension
tf.keras.layers.Flatten(),
# Add a fully connected layer with 64 hidden units and ReLU activation
tf.keras.layers.Dense(64, activation='relu'),
# Attach a final softmax classification head
tf.keras.layers.Dense(10, activation='softmax')])
```
ใช้ Adam [Optimizer](https://www.bualabs.com/archives/2042/refactor-parameter-optimizer-neural-network-train-deep-learning-machine-learning-neural-network-ep-6/) ใช้ Categorical [Cross Entropy Loss](https://www.bualabs.com/archives/1945/what-is-cross-entropy-loss-logistic-regression-log-loss-loss-function-ep-3/) สำหรับงาน [Multi-class Classification](https://www.bualabs.com/archives/3396/tensorflow-js-fashion-mnist-dataset-convolutional-neural-network-convnet-cnn-visualization-tfvis-tfjs-ep-5/) และ ใช้ Accuracy เป็น [Metrics](https://www.bualabs.com/archives/1968/what-is-metrics-confusion-matrix-accuracy-precision-recall-f1-score-difference-metrics-ep-1/) วัดผล
```
# Set the appropriate loss function and use accuracy as your metric
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
ดู [Model Architecture](https://www.bualabs.com/archives/2703/how-to-read-model-convolutional-neural-network-shape-activation-map-model-architecture-convnet-ep-7/) ที่เราสร้างด้านบน
```
model.summary()
```
# 4. Training the Model
```
model.fit(train_batches,
epochs=10,
validation_data=validation_batches)
```
# 5. Export the Model
Export โมเดลที่เทรนเสร็จเรียบร้อยแล้ว ในรูปแบบ ไฟล์ SavedModel Format
```
export_dir = 'saved_model/1'
tf.saved_model.save(model, export_dir)
```
# 6. Convert ไฟล์ SavedModel ด้วย TFLite Converter
ใช้ [TFLite Converter](https://www.bualabs.com/archives/3595/what-is-tensorflow-lite-converter-convert-mobilenet-transfer-learning-classifier-head-deploy-mobile-iot-edge-device-microcontroller-tflite-ep-3/) โหลดไฟล์โมเดล SavedModel ที่เรา Export ไว้ด้านบน
```
#@title เลือกโหมดที่จะทำ optimization
mode = "Speed" #@param ["Default", "Storage", "Speed"]
if mode == 'Storage':
optimization = tf.lite.Optimize.OPTIMIZE_FOR_SIZE
elif mode == 'Speed':
optimization = tf.lite.Optimize.OPTIMIZE_FOR_LATENCY
else:
optimization = tf.lite.Optimize.DEFAULT
```
Convert โมเดลเป็นไฟล์ tflite แล้ว Save ลง Disk
```
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
converter.experimental_new_converter = True
# Set the optimzations
converter.optimizations = mode
# Invoke the converter to finally generate the TFLite model
tflite_model = converter.convert()
```
ได้ไฟล์รวม ขนาด 1015768 Bytes
```
tflite_model_file = pathlib.Path('./model.tflite')
tflite_model_file.write_bytes(tflite_model)
```
# 7. Test โมเดล tflite ด้วย TFLite Intepreter
ใช้ TFLite Intepreter โหลดไฟล์ tflite ขึ้นมา
```
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
```
สุ่มเลือกรูปจาก Test Set มาให้โมเดล ทำ Inference จำนวน 50 รูป
```
# Gather results for the randomly sampled test images
predictions = []
test_labels = []
test_images = []
for img, label in test_batches.take(50):
interpreter.set_tensor(input_index, img)
interpreter.invoke()
predictions.append(interpreter.get_tensor(output_index))
test_labels.append(label[0])
test_images.append(np.array(img))
```
ฟังก์ชันสำหรับการพล็อตแสดงผล
```
#@title Utility functions for plotting
# Utilities for plotting
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
img = np.squeeze(img)
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label.numpy():
color = 'green'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]), color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks(list(range(10)), class_names, rotation='vertical')
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array[0], color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array[0])
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('green')
```
นำผลลัพธ์ที่ได้ มาพล็อตแสดงรูป เปรียบเทียบ label และ prediction เราสามารถเลื่อนดู ตัวอย่างต่าง ๆ ได้ทั้ง 50 ตัวอย่าง
```
#@title Visualize the outputs { run: "auto" }
index = 44 #@param {type:"slider", min:1, max:50, step:1}
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(index, predictions, test_labels, test_images)
plt.show()
plot_value_array(index, predictions, test_labels)
plt.show()
```
# 8. Save และ Download ไฟล์ tflite
Save ไฟล์ และ Download โมเดล และ Label มาที่ Local Disk เพื่อนำไปใส่ Device ที่ต้องการต่อไป
หมายเหตุ: เราอาจจะต้อง กดอนุญาตให้ Web Browser สามารถ Download หลาย ๆ ไฟล์ได้พร้อมกัน
```
try:
from google.colab import files
files.download(tflite_model_file)
files.download('labels.txt')
except:
pass
```
# 9. Credit
* https://www.coursera.org/learn/device-based-models-tensorflow/
* https://github.com/lmoroney/dlaicourse/tree/master/TensorFlow%20Deployment
* https://www.tensorflow.org/lite/convert
| github_jupyter |
## Set Root Directory and Out Directory
```
import os
import time
ROOT_DIR = os.path.abspath('')
OUT_DIR = os.path.join(ROOT_DIR, 'out')
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
```
## Load WebDriver for Chrome
https://sites.google.com/a/chromium.org/chromedriver/downloads
```
DRIVER = os.path.join(ROOT_DIR, 'chromedriver')
from selenium import webdriver
driver = webdriver.Chrome(DRIVER)
driver.implicitly_wait(5)
window = {}
window['main'] = driver.window_handles[-1]
```
## Open Page
```
ECAMPUS = 'https://ecampus.ut.ac.kr'
driver.get(ECAMPUS)
```
## Load Authentication Info
```
import json
SECRET_JSON = os.path.join(ROOT_DIR, 'secrets.json')
with open(SECRET_JSON) as f:
secrets = json.load(f)
login_id = secrets["ID"]
login_pw = secrets["PW"]
```
## Login
```
def logout():
btn_logout = driver.find_element_by_id('btn_logout')
btn_logout.click()
def login(_id, _pw):
try:
logout()
except:
pass
# Enter Info
input_id = driver.find_element_by_id('id')
input_pw = driver.find_element_by_id('pass')
input_id.send_keys(_id)
input_pw.send_keys(_pw)
# Login
driver.execute_script('login_proc()')
time.sleep(5)
login(login_id, login_pw)
login_id, login_pw = (None, None)
time.sleep(10)
panel = driver.find_element_by_id('selfInfoAfter')
lecture_list = panel.find_element_by_class_name('lecInfo')
print(lecture_list)
lectures = lecture_list.find_elements_by_xpath("//a[contains(., '2020')]")
print(lectures)
```
## Enter The Lecture
```
sample_lecture = lectures[0]
sample_lecture.click()
```
You can enter the 'lecture room' with path below.
It will lead you to the lecture room of the last lecture you entered...
```
lecture_room_url = "https://ecampus.ut.ac.kr/lms/class/courseSchedule/doListView.dunet"
driver.get(lecture_room_url)
time.sleep(2)
```
## Get courses list
```
# not_progressed_list = driver.find_elements_by_xpath("//td[contains(., 'not progressed')]")
current_courses_link = driver.find_elements_by_xpath("//a[contains(., 'Lecture view')]")
current_courses = [course_link.find_element_by_xpath("../..") for course_link in current_courses_link]
current_courses_data = []
titles = []
links = []
mins = []
for course in current_courses:
datas = course.find_elements_by_tag_name('td')
title = datas[1].text
lecture_time = datas[2].text
period = datas[3].text
status = datas[4].text
link = datas[5].find_element_by_class_name('lectureWindow')
# link = datas[5]
print(title, lecture_time, period, status, link)
print()
if status != "Complete":
titles.append(title)
links.append(link)
mins.append(int(lecture_time[:-6]))
print(titles)
```
## Check Study Time and Open Lecture Window
```
import tqdm
print("{} courses.".format(len(links)))
seconds = [minute * 60 for minute in mins]
for sec, title, link in tqdm.tqdm(zip(seconds, titles, links)):
print("{} for {}minutes...".format(title, sec//60))
link.click()
window_lecture = driver.window_handles[-1]
time.sleep(sec + 100)
driver.switch_to_window(window_lecture)
driver.close()
print("Course End")
print("Finished.")
# sample_link = links[-1]
# sample_course = sample_link.find_element_by_xpath("../..")
# tds = sample_course.find_elements_by_tag_name('td')
# # sample_lecture_time = abcs[-1].find_element_by_name('td')[2][:-7]
# print(tds[1].text)
#TODO: "not progressed and lecture VIEW'
# sample_link.click()
# window['lecture'] = driver.window_handles[-1]
# time.sleep(5)
window['lecture']
```
| github_jupyter |
# Scene Classification
## 3. Build Model-InceptionV3 BatchTrain Top2Layer
- Import pkg
- Load sample data, only first 1000 objects
-
Reference:
- https://challenger.ai/competitions
- https://github.com/jupyter/notebook/issues/2287
**Tensorboard**
1. Input at command: **tensorboard --logdir=./log**
2. Input at browser: **http://127.0.0.1:6006**
### Import pkg
```
import numpy as np
import pandas as pd
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler, TensorBoard
# import zipfile
import os
import zipfile
import math
import time
from IPython.display import display
import pdb
import json
from PIL import Image
import glob
import pickle
```
### Load sample data, only first 1000 objects
```
input_path = './input'
datasetName = 'train'
date = '20170904'
zip_path = input_path + '/ai_challenger_scene_{0}_{1}.zip'.format(datasetName, date)
extract_path = input_path + '/ai_challenger_scene_{0}_{1}'.format(datasetName, date)
image_path = extract_path + '/scene_{0}_images_{1}'.format(datasetName, date)
scene_classes_path = extract_path + '/scene_classes.csv'
scene_annotations_path = extract_path + '/scene_{0}_annotations_{1}.json'.format(datasetName, date)
print(input_path)
print(zip_path)
print(extract_path)
print(image_path)
print(scene_classes_path)
print(scene_annotations_path)
scene_classes = pd.read_csv(scene_classes_path, header=None)
display(scene_classes.head())
def get_scene_name(lable_number, scene_classes_path):
scene_classes = pd.read_csv(scene_classes_path, header=None)
return scene_classes.loc[lable_number, 2]
print(get_scene_name(0, scene_classes_path))
def load_pickle_data(dataset, index):
pickleFolder = './input/pickle_{0}'.format(dataset)
print(pickleFolder)
x_path = pickleFolder + '/x_data{0}.p'.format(index)
y_path = pickleFolder + '/y_data{0}.p'.format(index)
print(x_path)
print(y_path)
if not os.path.exists(x_path):
print(x_path + ' do not exist!')
return
if not os.path.exists(y_path):
print(y_path + ' do not exist!')
return
x_data = pickle.load(open(x_path, mode='rb'))
y_data = pickle.load(open(y_path, mode='rb'))
# y_data = to_categorical(y_train)
print(x_data.shape)
print(y_data.shape)
return (x_data, y_data)
x_train, y_train = load_pickle_data("train", 0)
print(x_train.shape)
print(y_train.shape)
del x_train
del y_train
%%time
x_train, y_train = load_pickle_data("train", 0)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].imshow(x_train[0])
ax[0].set_title(get_scene_name(y_train[0], scene_classes_path))
ax[1].imshow(x_train[1])
ax[1].set_title(get_scene_name(y_train[1], scene_classes_path))
del x_train
del y_train
%%time
x_val, y_val = load_pickle_data("validation", 0)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].imshow(x_val[0])
ax[0].set_title(get_scene_name(y_val[0], scene_classes_path))
ax[1].imshow(x_val[1])
ax[1].set_title(get_scene_name(y_val[1], scene_classes_path))
del x_val
del y_val
```
### Load model
```
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
from keras.applications.inception_v3 import InceptionV3
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(80, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=1e-4), metrics=["accuracy"])
for i, layer in enumerate(model.layers):
print(i, layer.name)
def saveModel(model, middleName):
modelPath = './model'
if not os.path.isdir(modelPath):
os.mkdir(modelPath)
fileName = middleName + time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
jsonFileName = modelPath + '/' + fileName + '.json'
yamlFileName = modelPath + '/' + fileName + '.yaml'
json_string = model.to_json()
with open(jsonFileName, 'w') as file:
file.write(json_string)
yaml_string = model.to_yaml()
with open(yamlFileName, 'w') as file:
file.write(yaml_string)
weigthsFile = modelPath + '/' + fileName + '.h5'
model.save(weigthsFile)
# saveModel(model, 'ModelSaveTest')
```
**Train top 2 inception**
```
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range = 20,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1,
horizontal_flip = True,
vertical_flip = True)
train_generator = train_datagen.flow_from_directory('./input/data_train',
target_size=(224, 224),
batch_size=64,
class_mode = "categorical")
print(train_generator.classes[0:1000])
# annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x)
tensorBoard = TensorBoard(log_dir='./log_Top2Inc_171001')
x_val, y_val = load_pickle_data("validation", 0)
y_val = to_categorical(y_val)
# model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=1e-4), metrics=["accuracy"])
hist = model.fit_generator(train_generator,
steps_per_epoch=128,
epochs=32, #Increase this when not on Kaggle kernel
verbose=2, #1 for ETA, 0 for silent
validation_data=(x_val, y_val),
callbacks=[tensorBoard])
saveModel(model, 'TrainImageFolder')
final_loss, final_acc = model.evaluate(x_val, y_val, verbose=0)
print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
plt.plot(hist.history['loss'], color='b')
plt.plot(hist.history['val_loss'], color='r')
plt.show()
plt.plot(hist.history['acc'], color='b')
plt.plot(hist.history['val_acc'], color='r')
plt.show()
print('Done!')
```
| github_jupyter |
# Deep Learning & Art: Neural Style Transfer
Welcome to the second assignment of this week. In this assignment, you will learn about Neural Style Transfer. This algorithm was created by Gatys et al. (2015) (https://arxiv.org/abs/1508.06576).
**In this assignment, you will:**
- Implement the neural style transfer algorithm
- Generate novel artistic images using your algorithm
Most of the algorithms you've studied optimize a cost function to get a set of parameter values. In Neural Style Transfer, you'll optimize a cost function to get pixel values!
```
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from nst_utils import *
import numpy as np
import tensorflow as tf
%matplotlib inline
```
## 1 - Problem Statement
Neural Style Transfer (NST) is one of the most fun techniques in deep learning. As seen below, it merges two images, namely, a "content" image (C) and a "style" image (S), to create a "generated" image (G). The generated image G combines the "content" of the image C with the "style" of image S.
In this example, you are going to generate an image of the Louvre museum in Paris (content image C), mixed with a painting by Claude Monet, a leader of the impressionist movement (style image S).
<img src="images/louvre_generated.png" style="width:750px;height:200px;">
Let's see how you can do this.
## 2 - Transfer Learning
Neural Style Transfer (NST) uses a previously trained convolutional network, and builds on top of that. The idea of using a network trained on a different task and applying it to a new task is called transfer learning.
Following the original NST paper (https://arxiv.org/abs/1508.06576), we will use the VGG network. Specifically, we'll use VGG-19, a 19-layer version of the VGG network. This model has already been trained on the very large ImageNet database, and thus has learned to recognize a variety of low level features (at the earlier layers) and high level features (at the deeper layers).
Run the following code to load parameters from the VGG model. This may take a few seconds.
```
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
print(model)
```
The model is stored in a python dictionary where each variable name is the key and the corresponding value is a tensor containing that variable's value. To run an image through this network, you just have to feed the image to the model. In TensorFlow, you can do so using the [tf.assign](https://www.tensorflow.org/api_docs/python/tf/assign) function. In particular, you will use the assign function like this:
```python
model["input"].assign(image)
```
This assigns the image as an input to the model. After this, if you want to access the activations of a particular layer, say layer `4_2` when the network is run on this image, you would run a TensorFlow session on the correct tensor `conv4_2`, as follows:
```python
sess.run(model["conv4_2"])
```
## 3 - Neural Style Transfer
We will build the NST algorithm in three steps:
- Build the content cost function $J_{content}(C,G)$
- Build the style cost function $J_{style}(S,G)$
- Put it together to get $J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$.
### 3.1 - Computing the content cost
In our running example, the content image C will be the picture of the Louvre Museum in Paris. Run the code below to see a picture of the Louvre.
```
content_image = scipy.misc.imread("images/louvre.jpg")
imshow(content_image)
```
The content image (C) shows the Louvre museum's pyramid surrounded by old Paris buildings, against a sunny sky with a few clouds.
** 3.1.1 - How do you ensure the generated image G matches the content of the image C?**
As we saw in lecture, the earlier (shallower) layers of a ConvNet tend to detect lower-level features such as edges and simple textures, and the later (deeper) layers tend to detect higher-level features such as more complex textures as well as object classes.
We would like the "generated" image G to have similar content as the input image C. Suppose you have chosen some layer's activations to represent the content of an image. In practice, you'll get the most visually pleasing results if you choose a layer in the middle of the network--neither too shallow nor too deep. (After you have finished this exercise, feel free to come back and experiment with using different layers, to see how the results vary.)
So, suppose you have picked one particular hidden layer to use. Now, set the image C as the input to the pretrained VGG network, and run forward propagation. Let $a^{(C)}$ be the hidden layer activations in the layer you had chosen. (In lecture, we had written this as $a^{[l](C)}$, but here we'll drop the superscript $[l]$ to simplify the notation.) This will be a $n_H \times n_W \times n_C$ tensor. Repeat this process with the image G: Set G as the input, and run forward progation. Let $$a^{(G)}$$ be the corresponding hidden layer activation. We will define as the content cost function as:
$$J_{content}(C,G) = \frac{1}{4 \times n_H \times n_W \times n_C}\sum _{ \text{all entries}} (a^{(C)} - a^{(G)})^2\tag{1} $$
Here, $n_H, n_W$ and $n_C$ are the height, width and number of channels of the hidden layer you have chosen, and appear in a normalization term in the cost. For clarity, note that $a^{(C)}$ and $a^{(G)}$ are the volumes corresponding to a hidden layer's activations. In order to compute the cost $J_{content}(C,G)$, it might also be convenient to unroll these 3D volumes into a 2D matrix, as shown below. (Technically this unrolling step isn't needed to compute $J_{content}$, but it will be good practice for when you do need to carry out a similar operation later for computing the style const $J_{style}$.)
<img src="images/NST_LOSS.png" style="width:800px;height:400px;">
**Exercise:** Compute the "content cost" using TensorFlow.
**Instructions**: The 3 steps to implement this function are:
1. Retrieve dimensions from a_G:
- To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`
2. Unroll a_C and a_G as explained in the picture above
- If you are stuck, take a look at [Hint1](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/transpose) and [Hint2](https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/reshape).
3. Compute the content cost:
- If you are stuck, take a look at [Hint3](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [Hint4](https://www.tensorflow.org/api_docs/python/tf/square) and [Hint5](https://www.tensorflow.org/api_docs/python/tf/subtract).
```
# GRADED FUNCTION: compute_content_cost
def compute_content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G
Returns:
J_content -- scalar that you compute using equation 1 above.
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G (≈2 lines)
a_C_unrolled = tf.transpose(tf.reshape(a_C, [n_H * n_W, n_C]))
a_G_unrolled = tf.transpose(tf.reshape(a_G, [n_H * n_W, n_C]))
# compute the cost with tensorflow (≈1 line)
J_content = tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled,a_G_unrolled))) / (4 * n_H * n_W * n_C)
### END CODE HERE ###
return J_content
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_content = compute_content_cost(a_C, a_G)
print("J_content = " + str(J_content.eval()))
```
**Expected Output**:
<table>
<tr>
<td>
**J_content**
</td>
<td>
6.76559
</td>
</tr>
</table>
<font color='blue'>
**What you should remember**:
- The content cost takes a hidden layer activation of the neural network, and measures how different $a^{(C)}$ and $a^{(G)}$ are.
- When we minimize the content cost later, this will help make sure $G$ has similar content as $C$.
### 3.2 - Computing the style cost
For our running example, we will use the following style image:
```
style_image = scipy.misc.imread("images/monet_800600.jpg")
imshow(style_image)
```
This painting was painted in the style of *[impressionism](https://en.wikipedia.org/wiki/Impressionism)*.
Lets see how you can now define a "style" const function $J_{style}(S,G)$.
### 3.2.1 - Style matrix
The style matrix is also called a "Gram matrix." In linear algebra, the Gram matrix G of a set of vectors $(v_{1},\dots ,v_{n})$ is the matrix of dot products, whose entries are ${\displaystyle G_{ij} = v_{i}^T v_{j} = np.dot(v_{i}, v_{j}) }$. In other words, $G_{ij}$ compares how similar $v_i$ is to $v_j$: If they are highly similar, you would expect them to have a large dot product, and thus for $G_{ij}$ to be large.
Note that there is an unfortunate collision in the variable names used here. We are following common terminology used in the literature, but $G$ is used to denote the Style matrix (or Gram matrix) as well as to denote the generated image $G$. We will try to make sure which $G$ we are referring to is always clear from the context.
In NST, you can compute the Style matrix by multiplying the "unrolled" filter matrix with their transpose:
<img src="images/NST_GM.png" style="width:900px;height:300px;">
The result is a matrix of dimension $(n_C,n_C)$ where $n_C$ is the number of filters. The value $G_{ij}$ measures how similar the activations of filter $i$ are to the activations of filter $j$.
One important part of the gram matrix is that the diagonal elements such as $G_{ii}$ also measures how active filter $i$ is. For example, suppose filter $i$ is detecting vertical textures in the image. Then $G_{ii}$ measures how common vertical textures are in the image as a whole: If $G_{ii}$ is large, this means that the image has a lot of vertical texture.
By capturing the prevalence of different types of features ($G_{ii}$), as well as how much different features occur together ($G_{ij}$), the Style matrix $G$ measures the style of an image.
**Exercise**:
Using TensorFlow, implement a function that computes the Gram matrix of a matrix A. The formula is: The gram matrix of A is $G_A = AA^T$. If you are stuck, take a look at [Hint 1](https://www.tensorflow.org/api_docs/python/tf/matmul) and [Hint 2](https://www.tensorflow.org/api_docs/python/tf/transpose).
```
# GRADED FUNCTION: gram_matrix
def gram_matrix(A):
"""
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
### START CODE HERE ### (≈1 line)
GA = tf.matmul(A, tf.transpose(A))
### END CODE HERE ###
return GA
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
A = tf.random_normal([3, 2*1], mean=1, stddev=4)
GA = gram_matrix(A)
print("GA = " + str(GA.eval()))
```
**Expected Output**:
<table>
<tr>
<td>
**GA**
</td>
<td>
[[ 6.42230511 -4.42912197 -2.09668207] <br>
[ -4.42912197 19.46583748 19.56387138] <br>
[ -2.09668207 19.56387138 20.6864624 ]]
</td>
</tr>
</table>
### 3.2.2 - Style cost
After generating the Style matrix (Gram matrix), your goal will be to minimize the distance between the Gram matrix of the "style" image S and that of the "generated" image G. For now, we are using only a single hidden layer $a^{[l]}$, and the corresponding style cost for this layer is defined as:
$$J_{style}^{[l]}(S,G) = \frac{1}{4 \times {n_C}^2 \times (n_H \times n_W)^2} \sum _{i=1}^{n_C}\sum_{j=1}^{n_C}(G^{(S)}_{ij} - G^{(G)}_{ij})^2\tag{2} $$
where $G^{(S)}$ and $G^{(G)}$ are respectively the Gram matrices of the "style" image and the "generated" image, computed using the hidden layer activations for a particular hidden layer in the network.
**Exercise**: Compute the style cost for a single layer.
**Instructions**: The 3 steps to implement this function are:
1. Retrieve dimensions from the hidden layer activations a_G:
- To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`
2. Unroll the hidden layer activations a_S and a_G into 2D matrices, as explained in the picture above.
- You may find [Hint1](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/transpose) and [Hint2](https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/reshape) useful.
3. Compute the Style matrix of the images S and G. (Use the function you had previously written.)
4. Compute the Style cost:
- You may find [Hint3](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [Hint4](https://www.tensorflow.org/api_docs/python/tf/square) and [Hint5](https://www.tensorflow.org/api_docs/python/tf/subtract) useful.
```
# GRADED FUNCTION: compute_layer_style_cost
def compute_layer_style_cost(a_S, a_G):
"""
Arguments:
a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
a_S = tf.transpose(tf.reshape(a_S, [n_H * n_W, n_C]))
a_G = tf.transpose(tf.reshape(a_G, [n_H * n_W, n_C]))
# Computing gram_matrices for both images S and G (≈2 lines)
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss (≈1 line)
J_style_layer = tf.reduce_sum(tf.square(tf.subtract(GS, GG))) / (4 * n_C **2 * (n_W * n_H) ** 2)
### END CODE HERE ###
return J_style_layer
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_style_layer = compute_layer_style_cost(a_S, a_G)
print("J_style_layer = " + str(J_style_layer.eval()))
```
**Expected Output**:
<table>
<tr>
<td>
**J_style_layer**
</td>
<td>
9.19028
</td>
</tr>
</table>
### 3.2.3 Style Weights
So far you have captured the style from only one layer. We'll get better results if we "merge" style costs from several different layers. After completing this exercise, feel free to come back and experiment with different weights to see how it changes the generated image $G$. But for now, this is a pretty reasonable default:
```
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
```
You can combine the style costs for different layers as follows:
$$J_{style}(S,G) = \sum_{l} \lambda^{[l]} J^{[l]}_{style}(S,G)$$
where the values for $\lambda^{[l]}$ are given in `STYLE_LAYERS`.
We've implemented a compute_style_cost(...) function. It simply calls your `compute_layer_style_cost(...)` several times, and weights their results using the values in `STYLE_LAYERS`. Read over it to make sure you understand what it's doing.
<!--
2. Loop over (layer_name, coeff) from STYLE_LAYERS:
a. Select the output tensor of the current layer. As an example, to call the tensor from the "conv1_1" layer you would do: out = model["conv1_1"]
b. Get the style of the style image from the current layer by running the session on the tensor "out"
c. Get a tensor representing the style of the generated image from the current layer. It is just "out".
d. Now that you have both styles. Use the function you've implemented above to compute the style_cost for the current layer
e. Add (style_cost x coeff) of the current layer to overall style cost (J_style)
3. Return J_style, which should now be the sum of the (style_cost x coeff) for each layer.
!-->
```
def compute_style_cost(model, STYLE_LAYERS):
"""
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = compute_layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
```
**Note**: In the inner-loop of the for-loop above, `a_G` is a tensor and hasn't been evaluated yet. It will be evaluated and updated at each iteration when we run the TensorFlow graph in model_nn() below.
<!--
How do you choose the coefficients for each layer? The deeper layers capture higher-level concepts, and the features in the deeper layers are less localized in the image relative to each other. So if you want the generated image to softly follow the style image, try choosing larger weights for deeper layers and smaller weights for the first layers. In contrast, if you want the generated image to strongly follow the style image, try choosing smaller weights for deeper layers and larger weights for the first layers
!-->
<font color='blue'>
**What you should remember**:
- The style of an image can be represented using the Gram matrix of a hidden layer's activations. However, we get even better results combining this representation from multiple different layers. This is in contrast to the content representation, where usually using just a single hidden layer is sufficient.
- Minimizing the style cost will cause the image $G$ to follow the style of the image $S$.
</font color='blue'>
### 3.3 - Defining the total cost to optimize
Finally, let's create a cost function that minimizes both the style and the content cost. The formula is:
$$J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$$
**Exercise**: Implement the total cost function which includes both the content cost and the style cost.
```
# GRADED FUNCTION: total_cost
def total_cost(J_content, J_style, alpha = 10, beta = 40):
"""
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost as defined by the formula above.
"""
### START CODE HERE ### (≈1 line)
J = alpha * J_content + beta*J_style
### END CODE HERE ###
return J
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(3)
J_content = np.random.randn()
J_style = np.random.randn()
J = total_cost(J_content, J_style)
print("J = " + str(J))
```
**Expected Output**:
<table>
<tr>
<td>
**J**
</td>
<td>
35.34667875478276
</td>
</tr>
</table>
<font color='blue'>
**What you should remember**:
- The total cost is a linear combination of the content cost $J_{content}(C,G)$ and the style cost $J_{style}(S,G)$
- $\alpha$ and $\beta$ are hyperparameters that control the relative weighting between content and style
## 4 - Solving the optimization problem
Finally, let's put everything together to implement Neural Style Transfer!
Here's what the program will have to do:
<font color='purple'>
1. Create an Interactive Session
2. Load the content image
3. Load the style image
4. Randomly initialize the image to be generated
5. Load the VGG16 model
7. Build the TensorFlow graph:
- Run the content image through the VGG16 model and compute the content cost
- Run the style image through the VGG16 model and compute the style cost
- Compute the total cost
- Define the optimizer and the learning rate
8. Initialize the TensorFlow graph and run it for a large number of iterations, updating the generated image at every step.
</font>
Lets go through the individual steps in detail.
You've previously implemented the overall cost $J(G)$. We'll now set up TensorFlow to optimize this with respect to $G$. To do so, your program has to reset the graph and use an "[Interactive Session](https://www.tensorflow.org/api_docs/python/tf/InteractiveSession)". Unlike a regular session, the "Interactive Session" installs itself as the default session to build a graph. This allows you to run variables without constantly needing to refer to the session object, which simplifies the code.
Lets start the interactive session.
```
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
```
Let's load, reshape, and normalize our "content" image (the Louvre museum picture):
```
content_image = scipy.misc.imread("images/louvre_small.jpg")
content_image = reshape_and_normalize_image(content_image)
```
Let's load, reshape and normalize our "style" image (Claude Monet's painting):
```
style_image = scipy.misc.imread("images/monet.jpg")
style_image = reshape_and_normalize_image(style_image)
```
Now, we initialize the "generated" image as a noisy image created from the content_image. By initializing the pixels of the generated image to be mostly noise but still slightly correlated with the content image, this will help the content of the "generated" image more rapidly match the content of the "content" image. (Feel free to look in `nst_utils.py` to see the details of `generate_noise_image(...)`; to do so, click "File-->Open..." at the upper-left corner of this Jupyter notebook.)
```
generated_image = generate_noise_image(content_image)
imshow(generated_image[0])
```
Next, as explained in part (2), let's load the VGG16 model.
```
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
```
To get the program to compute the content cost, we will now assign `a_C` and `a_G` to be the appropriate hidden layer activations. We will use layer `conv4_2` to compute the content cost. The code below does the following:
1. Assign the content image to be the input to the VGG model.
2. Set a_C to be the tensor giving the hidden layer activation for layer "conv4_2".
3. Set a_G to be the tensor giving the hidden layer activation for the same layer.
4. Compute the content cost using a_C and a_G.
```
# Assign the content image to be the input of the VGG model.
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
```
**Note**: At this point, a_G is a tensor and hasn't been evaluated. It will be evaluated and updated at each iteration when we run the Tensorflow graph in model_nn() below.
```
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(model, STYLE_LAYERS)
```
**Exercise**: Now that you have J_content and J_style, compute the total cost J by calling `total_cost()`. Use `alpha = 10` and `beta = 40`.
```
### START CODE HERE ### (1 line)
J = total_cost(J_content, J_style, 10, 40)
### END CODE HERE ###
```
You'd previously learned how to set up the Adam optimizer in TensorFlow. Lets do that here, using a learning rate of 2.0. [See reference](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)
```
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
```
**Exercise**: Implement the model_nn() function which initializes the variables of the tensorflow graph, assigns the input image (initial generated image) as the input of the VGG16 model and runs the train_step for a large number of steps.
```
def model_nn(sess, input_image, num_iterations = 200):
# Initialize global variables (you need to run the session on the initializer)
### START CODE HERE ### (1 line)
sess.run(tf.global_variables_initializer())
### END CODE HERE ###
# Run the noisy input image (initial generated image) through the model. Use assign().
### START CODE HERE ### (1 line)
sess.run(model['input'].assign(input_image))
### END CODE HERE ###
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
### START CODE HERE ### (1 line)
_ = sess.run(train_step)
### END CODE HERE ###
# Compute the generated image by running the session on the current model['input']
### START CODE HERE ### (1 line)
generated_image = sess.run(model['input'])
### END CODE HERE ###
# Print every 20 iteration.
if i%20 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# save current generated image in the "/output" directory
save_image("output/" + str(i) + ".png", generated_image)
# save last generated image
save_image('output/generated_image.jpg', generated_image)
return generated_image
```
Run the following cell to generate an artistic image. It should take about 3min on CPU for every 20 iterations but you start observing attractive results after ≈140 iterations. Neural Style Transfer is generally trained using GPUs.
```
model_nn(sess, generated_image)
```
**Expected Output**:
<table>
<tr>
<td>
**Iteration 0 : **
</td>
<td>
total cost = 5.05035e+09 <br>
content cost = 7877.67 <br>
style cost = 1.26257e+08
</td>
</tr>
</table>
You're done! After running this, in the upper bar of the notebook click on "File" and then "Open". Go to the "/output" directory to see all the saved images. Open "generated_image" to see the generated image! :)
You should see something the image presented below on the right:
<img src="images/louvre_generated.png" style="width:800px;height:300px;">
We didn't want you to wait too long to see an initial result, and so had set the hyperparameters accordingly. To get the best looking results, running the optimization algorithm longer (and perhaps with a smaller learning rate) might work better. After completing and submitting this assignment, we encourage you to come back and play more with this notebook, and see if you can generate even better looking images.
Here are few other examples:
- The beautiful ruins of the ancient city of Persepolis (Iran) with the style of Van Gogh (The Starry Night)
<img src="images/perspolis_vangogh.png" style="width:750px;height:300px;">
- The tomb of Cyrus the great in Pasargadae with the style of a Ceramic Kashi from Ispahan.
<img src="images/pasargad_kashi.png" style="width:750px;height:300px;">
- A scientific study of a turbulent fluid with the style of a abstract blue fluid painting.
<img src="images/circle_abstract.png" style="width:750px;height:300px;">
## 5 - Test with your own image (Optional/Ungraded)
Finally, you can also rerun the algorithm on your own images!
To do so, go back to part 4 and change the content image and style image with your own pictures. In detail, here's what you should do:
1. Click on "File -> Open" in the upper tab of the notebook
2. Go to "/images" and upload your images (requirement: (WIDTH = 300, HEIGHT = 225)), rename them "my_content.png" and "my_style.png" for example.
3. Change the code in part (3.4) from :
```python
content_image = scipy.misc.imread("images/louvre.jpg")
style_image = scipy.misc.imread("images/claude-monet.jpg")
```
to:
```python
content_image = scipy.misc.imread("images/my_content.jpg")
style_image = scipy.misc.imread("images/my_style.jpg")
```
4. Rerun the cells (you may need to restart the Kernel in the upper tab of the notebook).
You can also tune your hyperparameters:
- Which layers are responsible for representing the style? STYLE_LAYERS
- How many iterations do you want to run the algorithm? num_iterations
- What is the relative weighting between content and style? alpha/beta
## 6 - Conclusion
Great job on completing this assignment! You are now able to use Neural Style Transfer to generate artistic images. This is also your first time building a model in which the optimization algorithm updates the pixel values rather than the neural network's parameters. Deep learning has many different types of models and this is only one of them!
<font color='blue'>
What you should remember:
- Neural Style Transfer is an algorithm that given a content image C and a style image S can generate an artistic image
- It uses representations (hidden layer activations) based on a pretrained ConvNet.
- The content cost function is computed using one hidden layer's activations.
- The style cost function for one layer is computed using the Gram matrix of that layer's activations. The overall style cost function is obtained using several hidden layers.
- Optimizing the total cost function results in synthesizing new images.
This was the final programming exercise of this course. Congratulations--you've finished all the programming exercises of this course on Convolutional Networks! We hope to also see you in Course 5, on Sequence models!
### References:
The Neural Style Transfer algorithm was due to Gatys et al. (2015). Harish Narayanan and Github user "log0" also have highly readable write-ups from which we drew inspiration. The pre-trained network used in this implementation is a VGG network, which is due to Simonyan and Zisserman (2015). Pre-trained weights were from the work of the MathConvNet team.
- Leon A. Gatys, Alexander S. Ecker, Matthias Bethge, (2015). A Neural Algorithm of Artistic Style (https://arxiv.org/abs/1508.06576)
- Harish Narayanan, Convolutional neural networks for artistic style transfer. https://harishnarayanan.org/writing/artistic-style-transfer/
- Log0, TensorFlow Implementation of "A Neural Algorithm of Artistic Style". http://www.chioka.in/tensorflow-implementation-neural-algorithm-of-artistic-style
- Karen Simonyan and Andrew Zisserman (2015). Very deep convolutional networks for large-scale image recognition (https://arxiv.org/pdf/1409.1556.pdf)
- MatConvNet. http://www.vlfeat.org/matconvnet/pretrained/
| github_jupyter |
# Recap
We started by learning about permutation importance and partial dependence plots for an overview of what the model has learned.
We then learned about SHAP values to break down the components of individual predictions.
Now we'll expand on SHAP values, seeing how aggregating many SHAP values can give more detailed alternatives to permutation importance and partial dependence plots.
# SHAP Values Review
Shap values show how much a given feature changed our prediction (compared to if we made that prediction at some baseline value of that feature).
For example, consider an ultra-simple model:
$$y = 4 * x1 + 2 * x2$$
If $x1$ takes the value 2, instead of a baseline value of 0, then our SHAP value for $x1$ would be 8 (from 4 times 2).
These are harder to calculate with the sophisticated models we use in practice. But through some algorithmic cleverness, Shap values allow us to decompose any prediction into the sum of effects of each feature value, yielding a graph like this:

[Link to larger view](https://i.imgur.com/JVD2U7k.png)*
In addition to this nice breakdown for each prediction, the [Shap library](https://github.com/slundberg/shap) offers great visualizations of groups of Shap values. We will focus on two of these visualizations. These visualizations have conceptual similarities to permutation importance and partial dependence plots. So multiple threads from the previous exercises will come together here.
# Summary Plots
[Permutation importance](https://www.kaggle.com/dansbecker/permutation-importance) is great because it created simple numeric measures to see which features mattered to a model. This helped us make comparisons between features easily, and you can present the resulting graphs to non-technical audiences.
But it doesn't tell you how each features matter. If a feature has medium permutation importance, that could mean it has
- a large effect for a few predictions, but no effect in general, or
- a medium effect for all predictions.
SHAP summary plots give us a birds-eye view of feature importance and what is driving it. We'll walk through an example plot for the soccer data:

This plot is made of many dots. Each dot has three characteristics:
- Vertical location shows what feature it is depicting
- Color shows whether that feature was high or low for that row of the dataset
- Horizontal location shows whether the effect of that value caused a higher or lower prediction.
For example, the point in the upper left was for a team that scored few goals, reducing the prediction by 0.25.
Some things you should be able to easily pick out:
- The model ignored the `Red` and `Yellow & Red` features.
- Usually `Yellow Card` doesn't affect the prediction, but there is an extreme case where a high value caused a much lower prediction.
- High values of Goal scored caused higher predictions, and low values caused low predictions
If you look for long enough, there's a lot of information in this graph. You'll face some questions to test how you read them in the exercise.
# Summary Plots in Code
You have already seen the code to load the soccer/football data:
```
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
data = pd.read_csv('../input/fifa-2018-match-statistics/FIFA 2018 Statistics.csv')
y = (data['Man of the Match'] == "Yes") # Convert from string "Yes"/"No" to binary
feature_names = [i for i in data.columns if data[i].dtype in [np.int64, np.int64]]
X = data[feature_names]
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
my_model = RandomForestClassifier(random_state=0).fit(train_X, train_y)
```
We get the SHAP values for all validation data with the following code. It is short enough that we explain it in the comments.
```
import shap # package used to calculate Shap values
# Create object that can calculate shap values
explainer = shap.TreeExplainer(my_model)
# calculate shap values. This is what we will plot.
# Calculate shap_values for all of val_X rather than a single row, to have more data for plot.
shap_values = explainer.shap_values(val_X)
# Make plot. Index of [1] is explained in text below.
shap.summary_plot(shap_values[1], val_X)
```
The code isn't too complex. But there are a few caveats.
- When plotting, we call `shap_values[1]`. For classification problems, there is a separate array of SHAP values for each possible outcome. In this case, we index in to get the SHAP values for the prediction of "True".
- Calculating SHAP values can be slow. It isn't a problem here, because this dataset is small. But you'll want to be careful when running these to plot with reasonably sized datasets. The exception is when using an `xgboost` model, which SHAP has some optimizations for and which is thus much faster.
This provides a great overview of the model, but we might want to delve into a single feature. That's where SHAP dependence contribution plots come into play.
# SHAP Dependence Contribution Plots
We've previously used Partial Dependence Plots to show how a single feature impacts predictions. These are insightful and relevant for many real-world use cases. Plus, with a little effort, they can be explained to a non-technical audience.
But there's a lot they don't show. For instance, what is the distribution of effects? Is the effect of having a certain value pretty constant, or does it vary a lot depending on the values of other feaures. SHAP dependence contribution plots provide a similar insight to PDP's, but they add a lot more detail.

Start by focusing on the shape, and we'll come back to color in a minute. Each dot represents a row of the data. The horizontal location is the actual value from the dataset, and the vertical location shows what having that value did to the prediction. The fact this slopes upward says that the more you possess the ball, the higher the model's prediction is for winning the *Man of the Match* award.
The spread suggests that other features must interact with Ball Possession %. For example, here we have highlighted two points with similar ball possession values. That value caused one prediction to increase, and it caused the other prediction to decrease.

For comparison, a simple linear regression would produce plots that are perfect lines, without this spread.
This suggests we delve into the interactions, and the plots include color coding to help do that. While the primary trend is upward, you can visually inspect whether that varies by dot color.
Consider the following very narrow example for concreteness.

These two points stand out spatially as being far away from the upward trend. They are both colored purple, indicating the team scored one goal. You can interpret this to say **In general, having the ball increases a team's chance of having their player win the award. But if they only score one goal, that trend reverses and the award judges may penalize them for having the ball so much if they score that little.**
Outside of those few outliers, the interaction indicated by color isn't very dramatic here. But sometimes it will jump out at you.
# Dependence Contribution Plots in Code
We get the dependence contribution plot with the following code. The only line that's different from the `summary_plot` is the last line.
```
import shap # package used to calculate Shap values
# Create object that can calculate shap values
explainer = shap.TreeExplainer(my_model)
# calculate shap values. This is what we will plot.
shap_values = explainer.shap_values(X)
# make plot.
shap.dependence_plot('Ball Possession %', shap_values[1], X, interaction_index="Goal Scored")
```
If you don't supply an argument for `interaction_index`, Shapley uses some logic to pick one that may be interesting.
This didn't require writing a lot of code. But the trick with these techniques is in thinking critically about the results rather than writing code itself.
# Your Turn
**[Test yourself](#$NEXT_NOTEBOOK_URL$)** with some questions to develop your skill with these techniques.
| github_jupyter |
# NESTS algorithm **Kopuru Vespa Velutina Competition**
Purpose: Bring together weather data, geographic data, food availability data, and identified nests in each municipality of Biscay in order to have a dataset suitable for analysis and potential predictions in a Machine Learning model.
Outputs: QUEENtrain and QUEENpredict datasets *(WBds03_QUEENtrain.csv & WBds03_QUEENpredict.csv)*
@authors:
* mario.bejar@student.ie.edu
* pedro.geirinhas@student.ie.edu
* a.berrizbeitia@student.ie.edu
* pcasaverde@student.ie.edu
## Libraries
```
import pandas as pd
import numpy as np
import math
from plotnine import *
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn import preprocessing
```
## Functions
```
def silueta(iterations_int, features_df):
silhouettes = []
for i in range(2,iterations_int,1):
model = KMeans(n_clusters=i)
aux = features_df
model.fit(aux)
labels = model.labels_
sol = silhouette_score(aux, labels)
silhouettes.append(sol)
silhouette = pd.DataFrame()
silhouette['Labels'] = silhouettes
silhouette['NumberOfClusters'] = range(2,iterations_int,1)
return silhouette
def codos(numClusters_int, features_df):
inertias = []
for i in range(1,numClusters_int,1):
model = KMeans(n_clusters=i)
aux = features_df
model.fit(aux)
inertias.append(model.inertia_)
elbow = pd.DataFrame()
elbow['Inertia'] = inertias
elbow['NumberOfClusters'] = range(1,numClusters_int,1)
return elbow
def kmedias(numClusters_int, features_df):
model = KMeans(n_clusters = numClusters_int)
aux = features_df
model.fit(aux)
modelLabels = model.labels_
modelCenters = model.cluster_centers_
return pd.Series(modelLabels, index=features_df.index)
```
## Get the data
```
df01 = pd.read_csv('../../../Input_open_data/ds01_PLANTILLA-RETO-AVISPAS-KOPURU.csv', sep=";")
df02 = pd.read_csv('../../../Input_open_data/ds02_datos-nidos-avispa-asiatica.csv', sep=",")
df03 = pd.read_csv('../../../Input_open_data/ds03_APICULTURA_COLMENAS_KOPURU.csv', sep=";")
df04 = pd.read_csv('../../../Input_open_data/ds04_FRUTALES-DECLARADOS-KOPURU.csv', sep=";")
WBdf01 = pd.read_csv('./WBds01_GEO.csv', sep=',')
WBdf02 = pd.read_csv('./WBds02_METEO.csv', sep=',')
df_population = pd.read_csv('../../../Other_open_data/population.csv', sep=',')
```
## Data cleanup
### Getting the names right
```
# Dropping and Renaming columns in accordance to the DataMap
# DataMap's URL: https://docs.google.com/spreadsheets/d/1Ad7s4IOmj9Tn2WcEOz4ArwedTzDs9Y0_EaUSm6uRHMQ/edit#gid=0
df01.columns = ['municip_code', 'municip_name', 'nests_2020']
df01.drop(columns=['nests_2020'], inplace=True) # just note that this is the final variable to predict in the competition
df02.drop(columns=['JARDUERA_ZENBAKIA/NUM_ACTUACION', 'ERABILTZAILEA_EU/USUARIO_EU', 'ERABILTZAILEA_CAS/USUARIO_CAS', 'HELBIDEA/DIRECCION', 'EGOERA_EU/ESTADO_EU', 'ITXIERA_DATA/FECHA CIERRE', 'ITXIERAKO AGENTEA_EU/AGENTE CIERRE_EU', 'ITXIERAKO AGENTEA_CAS/AGENTE CIERRE_CAS'], inplace=True)
df02.columns = ['waspbust_id', 'year', 'nest_foundDate', 'municip_name', 'species', 'nest_locType', 'nest_hight', 'nest_diameter', 'nest_longitude', 'nest_latitude', 'nest_status']
df03.drop(columns=['CP'], inplace=True)
df03.columns = ['municip_name','municip_code','colonies_amount']
df04.columns = ['agriculture_type','municip_code','municip_name']
# We don't have the "months" specified for any of the records in 2017 ('nest_foundDate' is incorrect for this year), so we'll drop those records
df02 = df02.drop(df02[df02['year'] == 2017].index, inplace = False)
# Cleaning municipality names in ds02 with names from ds01
df02_wrong_mun = ['ABADIÑO' ,'ABANTO Y CIERVANA' ,'ABANTO Y CIERVANA-ABANTO ZIERBENA' ,'AJANGIZ' ,'ALONSOTEGI' ,'AMOREBIETA-ETXANO' ,'AMOROTO' ,'ARAKALDO' ,'ARANTZAZU' ,'AREATZA' ,'ARRANKUDIAGA' ,'ARRATZU' ,'ARRIETA' ,'ARRIGORRIAGA' ,'ARTEA' ,'ARTZENTALES' ,'ATXONDO' ,'AULESTI' ,'BAKIO' ,'BALMASEDA' ,'BARAKALDO' ,'BARRIKA' ,'BASAURI' ,'BEDIA' ,'BERANGO' ,'BERMEO' ,'BERRIATUA' ,'BERRIZ' ,'BUSTURIA' ,'DERIO' ,'DIMA' ,'DURANGO' ,'EA' ,'ELANTXOBE' ,'ELORRIO' ,'ERANDIO' ,'EREÑO' ,'ERMUA' ,'ERRIGOITI' ,'ETXEBARRI' ,'ETXEBARRIA', 'ETXEBARRIa','FORUA' ,'FRUIZ' ,'GALDAKAO' ,'GALDAMES' ,'GAMIZ-FIKA' ,'GARAI' ,'GATIKA' ,'GAUTEGIZ ARTEAGA' ,'GERNIKA-LUMO' ,'GETXO' ,'GETXO ' ,'GIZABURUAGA' ,'GORDEXOLA' ,'GORLIZ' ,'GUEÑES' ,'IBARRANGELU' ,'IGORRE' ,'ISPASTER' ,'IURRETA' ,'IZURTZA' ,'KARRANTZA HARANA/VALLE DE CARRANZA' ,'KARRANTZA HARANA-VALLE DE CARRANZA' ,'KORTEZUBI' ,'LANESTOSA' ,'LARRABETZU' ,'LAUKIZ' ,'LEIOA' ,'LEKEITIO' ,'LEMOA' ,'LEMOIZ' ,'LEZAMA' ,'LOIU' ,'MALLABIA' ,'MAÑARIA' ,'MARKINA-XEMEIN' ,'MARURI-JATABE' ,'MEÑAKA' ,'MENDATA' ,'MENDEXA' ,'MORGA' ,'MUNDAKA' ,'MUNGIA' ,'MUNITIBAR-ARBATZEGI' ,'MUNITIBAR-ARBATZEGI GERRIKAITZ' ,'MURUETA' ,'MUSKIZ' ,'MUXIKA' ,'NABARNIZ' ,'ONDARROA' ,'OROZKO' ,'ORTUELLA' ,'OTXANDIO' ,'PLENTZIA' ,'PORTUGALETE' ,'SANTURTZI' ,'SESTAO' ,'SONDIKA' ,'SOPELA' ,'SOPUERTA' ,'SUKARRIETA' ,'TRUCIOS-TURTZIOZ' ,'UBIDE' ,'UGAO-MIRABALLES' ,'URDULIZ' ,'URDUÑA/ORDUÑA' ,'URDUÑA-ORDUÑA' ,'VALLE DE TRAPAGA' ,'VALLE DE TRAPAGA-TRAPAGARAN' ,'ZALDIBAR' ,'ZALLA' ,'ZAMUDIO' ,'ZARATAMO' ,'ZEANURI' ,'ZEBERIO' ,'ZIERBENA' ,'ZIORTZA-BOLIBAR' ]
df02_correct_mun = ['Abadiño' ,'Abanto y Ciérvana-Abanto Zierbena' ,'Abanto y Ciérvana-Abanto Zierbena' ,'Ajangiz' ,'Alonsotegi' ,'Amorebieta-Etxano' ,'Amoroto' ,'Arakaldo' ,'Arantzazu' ,'Areatza' ,'Arrankudiaga' ,'Arratzu' ,'Arrieta' ,'Arrigorriaga' ,'Artea' ,'Artzentales' ,'Atxondo' ,'Aulesti' ,'Bakio' ,'Balmaseda' ,'Barakaldo' ,'Barrika' ,'Basauri' ,'Bedia' ,'Berango' ,'Bermeo' ,'Berriatua' ,'Berriz' ,'Busturia' ,'Derio' ,'Dima' ,'Durango' ,'Ea' ,'Elantxobe' ,'Elorrio' ,'Erandio' ,'Ereño' ,'Ermua' ,'Errigoiti' ,'Etxebarri' , 'Etxebarria', 'Etxebarria','Forua' ,'Fruiz' ,'Galdakao' ,'Galdames' ,'Gamiz-Fika' ,'Garai' ,'Gatika' ,'Gautegiz Arteaga' ,'Gernika-Lumo' ,'Getxo' ,'Getxo' ,'Gizaburuaga' ,'Gordexola' ,'Gorliz' ,'Güeñes' ,'Ibarrangelu' ,'Igorre' ,'Ispaster' ,'Iurreta' ,'Izurtza' ,'Karrantza Harana/Valle de Carranza' ,'Karrantza Harana/Valle de Carranza' ,'Kortezubi' ,'Lanestosa' ,'Larrabetzu' ,'Laukiz' ,'Leioa' ,'Lekeitio' ,'Lemoa' ,'Lemoiz' ,'Lezama' ,'Loiu' ,'Mallabia' ,'Mañaria' ,'Markina-Xemein' ,'Maruri-Jatabe' ,'Meñaka' ,'Mendata' ,'Mendexa' ,'Morga' ,'Mundaka' ,'Mungia' ,'Munitibar-Arbatzegi Gerrikaitz' ,'Munitibar-Arbatzegi Gerrikaitz' ,'Murueta' ,'Muskiz' ,'Muxika' ,'Nabarniz' ,'Ondarroa' ,'Orozko' ,'Ortuella' ,'Otxandio' ,'Plentzia' ,'Portugalete' ,'Santurtzi' ,'Sestao' ,'Sondika' ,'Sopela' ,'Sopuerta' ,'Sukarrieta' ,'Trucios-Turtzioz' ,'Ubide' ,'Ugao-Miraballes' ,'Urduliz' ,'Urduña/Orduña' ,'Urduña/Orduña' ,'Valle de Trápaga-Trapagaran' ,'Valle de Trápaga-Trapagaran' ,'Zaldibar' ,'Zalla' ,'Zamudio' ,'Zaratamo' ,'Zeanuri' ,'Zeberio' ,'Zierbena' ,'Ziortza-Bolibar',]
df02.municip_name.replace(to_replace = df02_wrong_mun, value = df02_correct_mun, inplace = True)
df02.shape
# Translate the `species` variable contents to English
df02.species.replace(to_replace=['AVISPA ASIÁTICA', 'AVISPA COMÚN', 'ABEJA'], value=['Vespa Velutina', 'Common Wasp', 'Wild Bee'], inplace=True)
# Translate the contents of the `nest_locType` and `nest_status` variables to English
# But note that this data makes is of no use from a "forecastoing" standpoint eventually, since we will predict with a one-year offset (and thus, use thigs like weather mostly)
df02.nest_locType.replace(to_replace=['CONSTRUCCIÓN', 'ARBOLADO'], value=['Urban Environment', 'Natural Environment'], inplace=True)
df02.nest_status.replace(to_replace=['CERRADA - ELIMINADO', 'CERRADA - NO ELIMINABLE', 'PENDIENTE DE GRUPO'], value=['Nest Terminated', 'Cannot Terminate', 'Pending classification'], inplace=True)
```
### Getting the dates right
Including the addition of a `year_offset` variable to comply with the competition's rules
```
# Changing 'nest_foundDate' the to "datetime" format
df02['nest_foundDate'] = pd.to_datetime(df02['nest_foundDate'])
# Create a "month" variable in the main dataframe
df02['month'] = pd.DatetimeIndex(df02['nest_foundDate']).month
# Create a "year_offset" variable in the main dataframe
# IMPORTANT: THIS REFLECTS OUR ASSUMPTION THAT `YEAR-1` DATA CAN BE USE TO PREDICT `YEAR` DATA, AS MANDATED BY THE COMPETITION'S BASE REQUIREMENTS
df02['year_offset'] = pd.DatetimeIndex(df02['nest_foundDate']).year - 1
df02.columns
```
### Creating distinct dataFrames for each `species`
```
df02.species.value_counts()
df02_vespas = df02.loc[df02.species == 'Vespa Velutina', :]
df02_wasps = df02.loc[df02.species == 'Common Wasp', :]
df02_bees = df02.loc[df02.species == 'Wild Bee', :]
df02_vespas.shape
```
## Create a TEMPLATE dataframe with the missing municipalities and months
```
template = pd.read_csv('../../../Input_open_data/ds01_PLANTILLA-RETO-AVISPAS-KOPURU.csv', sep=";")
template.drop(columns='NIDOS 2020', inplace=True)
template.columns = ['municip_code', 'municip_name']
template['year2019'] = 2019
template['year2018'] = 2018
template['year2017'] = 2017
template = pd.melt(template, id_vars=['municip_code', 'municip_name'], value_vars=['year2019', 'year2018', 'year2017'], value_name = 'year_offset')
template.drop(columns='variable', inplace=True)
for i in range(1,13,1):
template[i] = i
template = pd.melt(template, id_vars=['municip_code', 'municip_name', 'year_offset'],\
value_vars=[1,2,3,4,5,6,7,8,9,10,11,12], value_name = 'month')
template.drop(columns='variable', inplace=True)
template.shape
112*12*3 == template.shape[0]
template.columns
```
## Merge the datasets
### Match each `municip_name` to its `municip_code` as per the competition's official template (i.e. `df01`)
```
# Merge dataFrames df01 and df02 by 'municip_name', in order to identify every wasp nest with its 'municip_code'
# The intention is that 'all_the_queens-wasps' will be the final dataFrame to use in the ML model eventually
all_the_queens_wasps = pd.merge(df02_vespas, df01, how = 'left', on = 'municip_name')
# check if there are any municipalities missing from the df02 dataframe, and add them if necessary
df01.municip_code[~df01.municip_code.isin(all_the_queens_wasps.municip_code.unique())]
```
### Input municipalities and months missing from the dataset
```
all_the_queens_wasps = pd.merge(all_the_queens_wasps, template,\
how = 'outer', left_on = ['municip_code', 'municip_name', 'year_offset', 'month'],\
right_on = ['municip_code', 'municip_name', 'year_offset', 'month'])
all_the_queens_wasps.isnull().sum()
all_the_queens_wasps.year.fillna(value='no registers', inplace=True)
all_the_queens_wasps.shape
```
### Discarding some variables
Namely: **species** (since by now they are all Vespa Velutina only), **nest_foundDate**,**nest_longitude**, and **nest_latitude**.
```
all_the_queens_wasps.drop(columns=['nest_foundDate', 'nest_longitude', 'nest_latitude', 'species'], inplace=True)
```
### Creating a new categorical variable for Nest Size
[Formula for nest volume](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6723431/)
[Example calculation in cubic meters](https://www.easycalculation.com/shapes/volume-of-prolate-spheroid.php)
```
#ggplot(aes(x='nest_hight', y='nest_diameter'), all_the_queens_wasps) + geom_point(stat='identity')
#all_the_queens_wasps['nest_volume_l'] = 4/3 * math.pi * (all_the_queens_wasps['nest_hight']/100/2)**2 * (all_the_queens_wasps['nest_diameter']/100/2) * 1000
#all_the_queens_wasps['nest_volume_l'].fillna(0, inplace=True)
all_the_queens_wasps['nest_size'] = all_the_queens_wasps['nest_hight'] * all_the_queens_wasps['nest_diameter']
all_the_queens_wasps['nest_size'].fillna(0, inplace=True)
all_the_queens_wasps['nest_size'].describe()
vespaVoluminous = all_the_queens_wasps.loc[:, ['municip_code', 'nest_size']].groupby(by='municip_code', as_index=False).mean()
ggplot(aes(x='nest_size'), vespaVoluminous) + geom_histogram()
#vespaVoluminous['nest_size_equals'] = pd.qcut(vespaVoluminous['nest_size'], 3, labels=['small', 'mid', 'large'])
#vespaVoluminous['nest_size_equals'].value_counts()
vespaVoluminous['nest_size'] = pd.cut(vespaVoluminous['nest_size'], bins=3, labels=['small', 'mid', 'large'])
vespaVoluminous['nest_size'].value_counts()
all_the_queens_wasps = pd.merge(all_the_queens_wasps, vespaVoluminous, how = 'left', on= 'municip_code')
#all_the_queens_wasps
```
### Converting categoricals to dummy variables
... and dropping some variables. Namely: **nest_locType**, **nest_hight**, **nest_diameter**, **nest_size_x**, **nest_size_y**, and **nest_latitude**.
```
queen_big = pd.get_dummies(all_the_queens_wasps.nest_size_y)
all_the_queens_wasps = pd.concat([all_the_queens_wasps, queen_big], axis=1)
queen_cosmo = pd.get_dummies(all_the_queens_wasps.nest_locType)
all_the_queens_wasps = pd.concat([all_the_queens_wasps, queen_cosmo], axis=1)
queen_hastalavista = pd.get_dummies(all_the_queens_wasps.nest_status)
all_the_queens_wasps = pd.concat([all_the_queens_wasps, queen_hastalavista], axis=1)
all_the_queens_wasps.drop(columns=['nest_locType', 'nest_hight', 'nest_diameter', 'nest_size_y', 'nest_size_x', 'nest_status'], inplace=True)
all_the_queens_wasps.rename(columns = {"small":"fv_size_small", "mid":"fv_size_mid", "large":"fv_size_large",\
"Natural Environment":"fv_type_natural", "Urban Environment":"fv_type_urban",\
"Cannot Terminate":"fv_status_cantkill", "Nest Terminated":"fv_status_dead", "Pending classification":"fv_status_pending"}, inplace = True)
#all_the_queens_wasps
#all_the_queens_wasps.isnull().sum()
```
### Counting the amount of wasp nests in each municipality, for each year, ~not for the months~
```
all_the_queens_wasps = all_the_queens_wasps.loc[:, ['waspbust_id', 'fv_size_small', 'fv_size_mid', 'fv_size_large', 'fv_type_natural', 'fv_type_urban',\
'fv_status_cantkill', 'fv_status_dead', 'fv_status_pending',\
'year', 'municip_name', 'municip_code', 'year_offset']]\
.groupby(by =['year', 'municip_name', 'municip_code', 'year_offset'], as_index=False)\
.agg({'waspbust_id':'count', 'fv_size_small':'sum', 'fv_size_mid':'sum', 'fv_size_large':'sum', 'fv_type_natural':'sum', 'fv_type_urban':'sum',\
'fv_status_cantkill':'sum', 'fv_status_dead':'sum', 'fv_status_pending':'sum'})
# let's rename the id to NESTS, now that it has been counted
all_the_queens_wasps.rename(columns = {"waspbust_id":"NESTS"}, inplace = True)
all_the_queens_wasps.columns
# for all those "outer merge" rows with no associated year, set their NESTS to zero
all_the_queens_wasps.loc[all_the_queens_wasps.year == 'no registers', ['NESTS']] = 0
all_the_queens_wasps.NESTS.sum() == df02_vespas.shape[0]
# grouping by 'year_offset' and making the former 'year' variable dissappear
all_the_queens_wasps = all_the_queens_wasps.loc[:, ['municip_name', 'municip_code', 'year_offset', 'NESTS', 'fv_size_small', 'fv_size_mid', 'fv_size_large', 'fv_type_natural', 'fv_type_urban', 'fv_status_cantkill', 'fv_status_dead', 'fv_status_pending']]\
.groupby(by =['municip_name', 'municip_code', 'year_offset'], as_index = False).sum()
# verifying that the DataFrame has the right number of rows
all_the_queens_wasps.shape[0] == 112*3
#all_the_queens_wasps.isnull().sum()
```
### Food sources
```
# Group df03 by 'municip_code' because there are multiple rows for each municipality (and we need a 1:1 relationship)
df03 = df03.groupby(by = 'municip_code', as_index= False).colonies_amount.sum()
# Now merge df03 to add number of bee hives (which is a food source for the wasp) in each municipality
# Note that NaNs (unknown amount of hives) are replaced with zeroes for the 'colonies_amount' variable
all_the_queens_wasps = pd.merge(all_the_queens_wasps, df03, how = 'left', on = 'municip_code')
all_the_queens_wasps.colonies_amount.fillna(value=0, inplace=True)
all_the_queens_wasps.shape
#all_the_queens_wasps.isnull().sum()
# Group df04 (agricultural food sources) by municipality code, after appending variables with the amount of each type of agricultural product
aux = df04.copy(deep=True)
aux.drop(columns=['municip_name'], inplace=True)
aux['food_fruit'] = np.where(aux['agriculture_type'] == 'FRUTALES', '1', '0')
aux['food_fruit'] = aux['food_fruit'].astype('int')
aux['food_apple'] = np.where(aux['agriculture_type'] == 'MANZANO', '1', '0')
aux['food_apple'] = aux['food_apple'].astype('int')
txakoli_string = df04.agriculture_type[45]
aux['food_txakoli'] = np.where(aux['agriculture_type'] == txakoli_string, '1', '0')
aux['food_txakoli'] = aux['food_txakoli'].astype('int')
aux['food_kiwi'] = np.where(aux['agriculture_type'] == 'AKTINIDIA (KIWI)', '1', '0')
aux['food_kiwi'] = aux['food_kiwi'].astype('int')
aux['food_pear'] = np.where(aux['agriculture_type'] == 'PERAL', '1', '0')
aux['food_pear'] = aux['food_pear'].astype('int')
aux['food_blueberry'] = np.where(aux['agriculture_type'] == 'ARANDANOS', '1', '0')
aux['food_blueberry'] = aux['food_blueberry'].astype('int')
aux['food_raspberry'] = np.where(aux['agriculture_type'] == 'FRAMBUESAS', '1', '0')
aux['food_raspberry'] = aux['food_raspberry'].astype('int')
aux = aux.groupby(by='municip_code', as_index=False).sum()
df04 = aux.copy(deep=True)
# Now merge df04 to add number of each type of food source ('agriculture_type') present in each municipality
# Any municipality not present in df04 will get assigned 'zero' food sources for any given type of fruit
all_the_queens_wasps = pd.merge(all_the_queens_wasps, df04, how = 'left', on= 'municip_code')
all_the_queens_wasps.food_fruit.fillna(value=0, inplace=True)
all_the_queens_wasps.food_apple.fillna(value=0, inplace=True)
all_the_queens_wasps.food_txakoli.fillna(value=0, inplace=True)
all_the_queens_wasps.food_kiwi.fillna(value=0, inplace=True)
all_the_queens_wasps.food_pear.fillna(value=0, inplace=True)
all_the_queens_wasps.food_blueberry.fillna(value=0, inplace=True)
all_the_queens_wasps.food_raspberry.fillna(value=0, inplace=True)
all_the_queens_wasps.shape
#all_the_queens_wasps.isnull().sum()
```
### Geographic
Here, a very important assumption regarding which station corresponds to each municipality is being brought from the HONEYCOMB script
```
# Adding weather station code to each municipality in all_the_queens_wasps. "No municipality left behind!"
all_the_queens_wasps = pd.merge(all_the_queens_wasps, WBdf01, how = 'left', on= 'municip_code')
all_the_queens_wasps.shape
#all_the_queens_wasps.isnull().sum()
all_the_queens_wasps.year_offset.value_counts()
```
### Weather
MANDATORY ASSUMPTION: As per the competition's rules. 2020 weather data cannot be used to predict 2020's number of wasp nests.
Therefore, **this merge links 2018's wasp nests to 2017's weather data** (all of which falls under the $2017$ value for `year_offset`).
Likewise, **2019's wasp nests are linked to 2018's weather data** (all of which falls under the $2018$ value for `year_offset`).
Finally, the $2019$ value for `year_offset` contains zero NESTS and the year 2019's weather which we will use to predict 2020's number of NESTS (the target variable of the competition)
```
# Now, merge the Main 'all_the_queens_wasps' dataFrame with the weather data 'WBdf02' dataFrame
all_the_queens_wasps = pd.merge(all_the_queens_wasps, WBdf02, how = 'left',\
left_on = ['station_code', 'year_offset'],\
right_on = ['codigo', 'year'])
all_the_queens_wasps.columns
all_the_queens_wasps_TRAIN = all_the_queens_wasps.loc[all_the_queens_wasps.year_offset.isin([2017, 2018]),:]
all_the_queens_wasps_PREDICT = all_the_queens_wasps.loc[all_the_queens_wasps.year_offset.isin([2019]),:]
```
### Adding `Population`, a publicly available dataset
```
# Adding population by municipality
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, df_population, how = 'left',\
left_on= ['municip_code', 'year_offset'],\
right_on = ['municip_code', 'year'])
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, df_population, how = 'left',\
left_on= ['municip_code', 'year_offset'],\
right_on = ['municip_code', 'year'])
all_the_queens_wasps_TRAIN.shape
all_the_queens_wasps_PREDICT.shape
```
## Further cleanup
```
#dropping unnecessary/duplicate columns
all_the_queens_wasps_TRAIN.drop(columns=['year_x', 'year_y', 'codigo'], inplace=True)
all_the_queens_wasps_TRAIN.columns
# this step shouldn't be necessary
all_the_queens_wasps_TRAIN.columns = ['municip_name', 'municip_code', 'year_offset', 'NESTS',\
'fv_size_small', 'fv_size_mid', 'fv_size_large', 'fv_type_natural',\
'fv_type_urban', 'fv_status_cantkill', 'fv_status_dead',\
'fv_status_pending', 'colonies_amount', 'food_fruit', 'food_apple',\
'food_txakoli', 'food_kiwi', 'food_pear', 'food_blueberry',\
'food_raspberry', 'station_code', 'weath_days_frost',\
'weath_humidity', 'weath_maxLevel', 'weath_midLevel', 'weath_minLevel',\
'weath_days_rain', 'weath_days_rain1mm', 'weath_accuRainfall',\
'weath_10minRainfall', 'weath_1dayRainfall', 'weath_solar',\
'weath_meanTemp', 'weath_maxTemp', 'weath_maxMeanTemp', 'weath_minTemp',\
'weath_meanWindM', 'weath_maxWindM', 'weath_meanDayMaxWind',\
'population']
all_the_queens_wasps_TRAIN.columns
all_the_queens_wasps_PREDICT.drop(columns=['year_x', 'year_y', 'codigo'], inplace=True)
all_the_queens_wasps_PREDICT.columns
# this step shouldn't be necessary
all_the_queens_wasps_PREDICT.columns = ['municip_name', 'municip_code', 'year_offset', 'NESTS',\
'fv_size_small', 'fv_size_mid', 'fv_size_large', 'fv_type_natural',\
'fv_type_urban', 'fv_status_cantkill', 'fv_status_dead',\
'fv_status_pending', 'colonies_amount', 'food_fruit', 'food_apple',\
'food_txakoli', 'food_kiwi', 'food_pear', 'food_blueberry',\
'food_raspberry', 'station_code', 'weath_days_frost',\
'weath_humidity', 'weath_maxLevel', 'weath_midLevel', 'weath_minLevel',\
'weath_days_rain', 'weath_days_rain1mm', 'weath_accuRainfall',\
'weath_10minRainfall', 'weath_1dayRainfall', 'weath_solar',\
'weath_meanTemp', 'weath_maxTemp', 'weath_maxMeanTemp', 'weath_minTemp',\
'weath_meanWindM', 'weath_maxWindM', 'weath_meanDayMaxWind',\
'population']
all_the_queens_wasps_TRAIN.NESTS.sum() == df02_vespas.shape[0]
all_the_queens_wasps_PREDICT.NESTS.sum() == 0
```
## Clustering municipalities
### by the size of its Vespa Velutina nests (`fv_...`)
```
sizeMatters = all_the_queens_wasps_TRAIN.loc[:, ['municip_code', 'fv_size_small', 'fv_size_mid', 'fv_size_large']].groupby(by='municip_code', as_index=True).mean()
sizeSilhouette = silueta(15, sizeMatters)
ggplot(aes(x='NumberOfClusters', y='Labels'), sizeSilhouette) + geom_line() + geom_point()
clustersby_size = 5
sizeClusters = pd.DataFrame()
sizeClusters['cluster_size'] = kmedias(clustersby_size, sizeMatters)
sizeClusters['cluster_size'].reset_index()
sizeClusters['cluster_size'].value_counts()
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, sizeClusters['cluster_size'], how = 'left', on= 'municip_code')
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, sizeClusters['cluster_size'], how = 'left', on= 'municip_code')
```
### by the usual environment of its wasp nests (`fv_...`)
```
cosmopolitan = all_the_queens_wasps_TRAIN.loc[:, ['municip_code', 'fv_type_natural', 'fv_type_urban']].groupby(by='municip_code', as_index=True).mean()
cosmoSilhouette = silueta(10, cosmopolitan)
ggplot(aes(x='NumberOfClusters', y='Labels'), cosmoSilhouette) + geom_line() + geom_point()
clustersby_cosmo = 2
cosmoClusters = pd.DataFrame()
cosmoClusters['cluster_cosmo'] = kmedias(clustersby_cosmo, cosmopolitan)
cosmoClusters['cluster_cosmo'].reset_index()
cosmoClusters['cluster_cosmo'].value_counts()
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, cosmoClusters['cluster_cosmo'], how = 'left', on= 'municip_code')
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, cosmoClusters['cluster_cosmo'], how = 'left', on= 'municip_code')
```
### by the usual status its wasp nests are left in (`fv_...`)
```
survivalists = all_the_queens_wasps_TRAIN.loc[:, ['municip_code', 'fv_status_cantkill', 'fv_status_dead', 'fv_status_pending']].groupby(by='municip_code', as_index=True).mean()
surviveSilhouette = silueta(10, survivalists)
ggplot(aes(x='NumberOfClusters', y='Labels'), surviveSilhouette) + geom_line() + geom_point()
clustersby_survive = 2
surviveClusters = pd.DataFrame()
surviveClusters['cluster_survive'] = kmedias(clustersby_cosmo, survivalists)
surviveClusters['cluster_survive'].reset_index()
surviveClusters['cluster_survive'].value_counts()
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, surviveClusters['cluster_survive'], how = 'left', on= 'municip_code')
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, surviveClusters['cluster_survive'], how = 'left', on= 'municip_code')
```
### Dropping all that future information (aka, future variables (`fv_...`)) from the dataset
```
all_the_queens_wasps_TRAIN.drop(columns=['fv_size_small', 'fv_size_mid', 'fv_size_large', 'fv_type_natural', 'fv_type_urban', 'fv_status_cantkill', 'fv_status_dead', 'fv_status_pending'], inplace=True)
all_the_queens_wasps_PREDICT.drop(columns=['fv_size_small', 'fv_size_mid', 'fv_size_large', 'fv_type_natural', 'fv_type_urban', 'fv_status_cantkill', 'fv_status_dead', 'fv_status_pending'], inplace=True)
```
### by the availability of food sources (`food_`)
```
foodies = all_the_queens_wasps_TRAIN.loc[:, ['municip_code', 'colonies_amount', 'food_fruit', 'food_apple', 'food_txakoli', 'food_kiwi', 'food_pear', 'food_blueberry', 'food_raspberry']].groupby(by='municip_code', as_index=True).mean()
slimSilhouette = silueta(10, foodies)
ggplot(aes(x='NumberOfClusters', y='Labels'), slimSilhouette) + geom_line() + geom_point()
clustersby_foodie = 2
foodieClusters = pd.DataFrame()
foodieClusters['cluster_food'] = kmedias(clustersby_foodie, foodies)
foodieClusters['cluster_food'].reset_index()
foodieClusters['cluster_food'].value_counts()
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, foodieClusters['cluster_food'], how = 'left', on= 'municip_code')
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, foodieClusters['cluster_food'], how = 'left', on= 'municip_code')
```
### Exploring clustering of weather variables (`weath_...`)
#### Humidity-related variables
```
# scale the dataset using MinMaxScaler, the most common approach
#scalators = ['weath_days_frost', 'weath_humidity', 'weath_maxLevel', 'weath_midLevel', 'weath_minLevel', 'weath_days_rain', 'weath_days_rain1mm', 'weath_accuRainfall', 'weath_10minRainfall', 'weath_1dayRainfall', 'weath_solar', 'weath_meanTemp', 'weath_maxTemp', 'weath_maxMeanTemp', 'weath_minTemp', 'weath_meanWindM', 'weath_maxWindM', 'weath_meanDayMaxWind']
scalators_wet = ['municip_code', 'weath_days_frost', 'weath_humidity', 'weath_days_rain', 'weath_days_rain1mm', 'weath_accuRainfall', 'weath_10minRainfall', 'weath_1dayRainfall', 'weath_solar']
weathercock_water = all_the_queens_wasps_TRAIN[scalators_wet].copy()
weathercock_water.iloc[:,1:] = preprocessing.minmax_scale(weathercock_water.iloc[:,1:])
weathercock_water = weathercock_water.groupby(by='municip_code', as_index=True).mean()
wetSilhouette = silueta(15, weathercock_water)
ggplot(aes(x='NumberOfClusters', y='Labels'), wetSilhouette) + geom_line() + geom_point()
clustersby_weather_humid = 2
weatherWetClusters = pd.DataFrame()
weatherWetClusters['cluster_weather_wet'] = kmedias(clustersby_weather_humid, weathercock_water)
weatherWetClusters['cluster_weather_wet'].reset_index()
weatherWetClusters['cluster_weather_wet'].value_counts()
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, weatherWetClusters['cluster_weather_wet'], how = 'left', on= 'municip_code')
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, weatherWetClusters['cluster_weather_wet'], how = 'left', on= 'municip_code')
```
#### Temperature-related variables
```
# scale the dataset using MinMaxScaler, the most common approach
#scalators = ['weath_days_frost', 'weath_humidity', 'weath_maxLevel', 'weath_midLevel', 'weath_minLevel', 'weath_days_rain', 'weath_days_rain1mm', 'weath_accuRainfall', 'weath_10minRainfall', 'weath_1dayRainfall', 'weath_solar', 'weath_meanTemp', 'weath_maxTemp', 'weath_maxMeanTemp', 'weath_minTemp', 'weath_meanWindM', 'weath_maxWindM', 'weath_meanDayMaxWind']
scalators_temp = ['municip_code', 'weath_meanTemp', 'weath_maxTemp', 'weath_maxMeanTemp', 'weath_minTemp']
weathercock_temp = all_the_queens_wasps_TRAIN[scalators_temp].copy()
weathercock_temp.iloc[:,1:] = preprocessing.minmax_scale(weathercock_temp.iloc[:,1:])
weathercock_temp = weathercock_temp.groupby(by='municip_code', as_index=True).mean()
tempSilhouette = silueta(10, weathercock_temp)
ggplot(aes(x='NumberOfClusters', y='Labels'), tempSilhouette) + geom_line() + geom_point()
clustersby_weather_temp = 2
weatherTempClusters = pd.DataFrame()
weatherTempClusters['cluster_weather_temp'] = kmedias(clustersby_weather_temp, weathercock_temp)
weatherTempClusters['cluster_weather_temp'].reset_index()
weatherTempClusters['cluster_weather_temp'].value_counts()
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, weatherTempClusters['cluster_weather_temp'], how = 'left', on= 'municip_code')
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, weatherTempClusters['cluster_weather_temp'], how = 'left', on= 'municip_code')
```
#### Wind-related variables
```
# scale the dataset using MinMaxScaler, the most common approach
#scalators = ['weath_days_frost', 'weath_humidity', 'weath_maxLevel', 'weath_midLevel', 'weath_minLevel', 'weath_days_rain', 'weath_days_rain1mm', 'weath_accuRainfall', 'weath_10minRainfall', 'weath_1dayRainfall', 'weath_solar', 'weath_meanTemp', 'weath_maxTemp', 'weath_maxMeanTemp', 'weath_minTemp', 'weath_meanWindM', 'weath_maxWindM', 'weath_meanDayMaxWind']
scalators_wind = ['municip_code', 'weath_meanWindM', 'weath_maxWindM', 'weath_meanDayMaxWind']
weathercock_wind = all_the_queens_wasps_TRAIN[scalators_wind].copy()
weathercock_wind.iloc[:,1:] = preprocessing.minmax_scale(weathercock_wind.iloc[:,1:])
weathercock_wind = weathercock_wind.groupby(by='municip_code', as_index=True).mean()
windSilhouette = silueta(15, weathercock_wind)
ggplot(aes(x='NumberOfClusters', y='Labels'), windSilhouette) + geom_line() + geom_point()
clustersby_weather_wind = 2
weatherWindClusters = pd.DataFrame()
weatherWindClusters['cluster_weather_wind'] = kmedias(clustersby_weather_wind, weathercock_wind)
weatherWindClusters['cluster_weather_wind'].reset_index()
weatherWindClusters['cluster_weather_wind'].value_counts()
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, weatherWindClusters['cluster_weather_wind'], how = 'left', on= 'municip_code')
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, weatherWindClusters['cluster_weather_wind'], how = 'left', on= 'municip_code')
```
#### Other weather variables
```
# scale the dataset using MinMaxScaler, the most common approach
#scalators = ['weath_days_frost', 'weath_humidity', 'weath_maxLevel', 'weath_midLevel', 'weath_minLevel', 'weath_days_rain', 'weath_days_rain1mm', 'weath_accuRainfall', 'weath_10minRainfall', 'weath_1dayRainfall', 'weath_solar', 'weath_meanTemp', 'weath_maxTemp', 'weath_maxMeanTemp', 'weath_minTemp', 'weath_meanWindM', 'weath_maxWindM', 'weath_meanDayMaxWind']
scalators_level = ['municip_code', 'weath_maxLevel', 'weath_midLevel', 'weath_minLevel']
weathercock_level = all_the_queens_wasps_TRAIN[scalators_level].copy()
weathercock_level.iloc[:,1:] = preprocessing.minmax_scale(weathercock_level.iloc[:,1:])
weathercock_level = weathercock_level.groupby(by='municip_code', as_index=True).mean()
levelSilhouette = silueta(10, weathercock_level)
ggplot(aes(x='NumberOfClusters', y='Labels'), slimSilhouette) + geom_line() + geom_point()
clustersby_weather_level = 2
weatherLevelClusters = pd.DataFrame()
weatherLevelClusters['cluster_weather_level'] = kmedias(clustersby_weather_level, weathercock_level)
weatherLevelClusters['cluster_weather_level'].reset_index()
weatherLevelClusters['cluster_weather_level'].value_counts()
all_the_queens_wasps_TRAIN = pd.merge(all_the_queens_wasps_TRAIN, weatherLevelClusters['cluster_weather_level'], how = 'left', on= 'municip_code')
all_the_queens_wasps_PREDICT = pd.merge(all_the_queens_wasps_PREDICT, weatherLevelClusters['cluster_weather_level'], how = 'left', on= 'municip_code')
```
### Cluster table
## Final check
```
all_the_queens_wasps_TRAIN.isnull().sum()
# check how many rows (municipalities) are there in the dataframe for each year/month combination
pd.crosstab(all_the_queens_wasps.municip_code, all_the_queens_wasps.year_offset)
all_the_queens_wasps_TRAIN.NESTS.sum() == df02_vespas.shape[0]
all_the_queens_wasps_PREDICT.NESTS.sum() == 0
```
## Export the TRAINING dataset for the model
A dataset which relates the weather from a previous year (12 months ago) to an amount of NESTS in any given year (and month).
```
#all_the_queens_wasps.to_csv('WBds03_QUEEN.csv', index=False)
all_the_queens_wasps_TRAIN.to_csv('WBds03_QUEENtrainYEARS.csv', index=False)
```
## Export the PREDICTION dataset for the model
```
all_the_queens_wasps_PREDICT.to_csv('WBds03_QUEENpredictYEARS.csv', index=False)
```
| github_jupyter |
# Training Models
We will practice training machine learning models for both regression and for classification problems.
# 1) Regression Models
We will start by fitting regression models. We will download the time series of the GPS station deployed on Montague Island.
<img src="AC29_map.png" alt="AC29 GPS stations on Montague Island" width="600"/>
```
import requests, zipfile, io, gzip, glob, os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
%matplotlib inline
# Download data
sta="AC29"
file_url="http://geodesy.unr.edu/gps_timeseries/tenv/IGS14/"+ sta + ".tenv"
r = requests.get(file_url).text.splitlines() # download, read text, split lines into a list
ue=[];un=[];uv=[];se=[];sn=[];sv=[];date=[];date_year=[];df=[]
for iday in r: # this loops through the days of data
crap=iday.split()
if len(crap)<10:
continue
date.append((crap[1]))
date_year.append(float(crap[2]))
ue.append(float(crap[6])*1000)
un.append(float(crap[7])*1000)
uv.append(float(crap[8])*1000)
# # errors
se.append(float(crap[10])*1000)
sn.append(float(crap[11])*1000)
sv.append(float(crap[12])*1000)
# make dataframe
crap={'station':sta,'date':date,'date_year':date_year,'east':ue,'north':un,'up':uv}
if len(df)==0:
df = pd.DataFrame(crap, columns = ['station', 'date','date_year','east','north','up'])
else:
df=pd.concat([df,pd.DataFrame(crap, columns = ['station', 'date','date_year','east','north','up'])])
df.head()
# select the first 2 years of data from the east component
y = ue[0:365*5]
plt.plot(df['date_year'],df['east']);plt.grid(True)
```
### 1.1 Linear regression
Let $y$ be the data, and $\hat{y}$ be the predicted value of the data. A general linear regression can be formulated as
$\hat{y} = w_0 + w_1 x_1 + ... + w_n x_n = h_w (\mathbf{x})$.
$\mathbf{\hat{y}} = \mathbf{G} \mathbf{w}$.
$y$ is a data vector of length $m$, $\mathbf{x}$ is a feature vector of length $n$. $\mathbf{w}$ is a vector of model parameter, $h_w$ is refered to as the *hypothesis function* or the *model* using the model parameter $w$. In the most simple case of a linear regression with time, the formulation becomes:
$\hat{y} = w_0 + w_1 t$,
where $x_1 = t$ the time feature.
To evaluate how well the model performs, we will compute a *loss score*, or a *residual*. It is the result of applying a *loss* or *cost* or *objective* function to the prediction and the data. The most basic *cost function* is the **Mean Square Error (MSE)**:
$MSE(\mathbf{x},h_w) = \frac{1}{m} \sum_{i=1}^{m} \left( h_w(\mathbf{x})_i - y_i \right)^2 = \frac{1}{m} \sum_{i=1}^{m} \left( \hat{y}_i - y_i \right)^2 $, in the case of a linear regression.
The *Normal Equation* is the solution to the linear regression that minimize the MSE.
$\mathbf{w} = \left( \mathbf{x}^T\mathbf{x} \right)^{-1} \mathbf{x}^T \mathbf{y}$
This compares with the classic inverse problem framed by $\mathbf{d} = \mathbf{G} \mathbf{m}$.
$\mathbf{m} = \left( \mathbf{G}^T\mathbf{G} \right)^{-1} \mathbf{G}^T \mathbf{d} $
It can be solved using Numpy linear algebra module. If $\left( \mathbf{x}^T\mathbf{x} \right) $ is singular and cannot be inverted, a lower rank matrix called the *pseudoinverse* can be calculated using singular value decomposition. We also used in a previous class that the Scikit-learn function for ``sklearn.linear_model.LinearRegression``, which is the implementation of the *pseudoinverse* We practice below how to use these standard inversions:
```
x = np.asarray(date_year[0:2*365])
x = x-np.min(x)
y = np.asarray(ue[0:2*365])
G = np.c_[np.ones((2*365,1)),x]
m = len(y)
print(G)
#normal equation
w1 = np.linalg.inv(G.T.dot(G)).dot(G.T).dot(y)
# Pseudoinverse
w2 = np.linalg.pinv(G).dot(y)
# scikitlearn LinearRegression
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(x.reshape(1,-1),y.reshape(1,-1))
print(lin_reg)
w3 = [lin_reg.intercept_, lin_reg.coef_]
y_predict1=G.dot(w1)
y_predict2=G.dot(w2)
y_predict3=lin_reg.predict(x.reshape(1,-1))
plt.plot(x,y);plt.grid(True)
plt.plot(x,y_predict1,'r',linewidth=3);
plt.plot(x,y_predict2,'g--');
plt.plot(x.reshape(1,-1),y_predict3,'k');
plt.xlabel("Time (years)")
plt.ylabel('East displacement (mm) at AC29')
print("modeled parameters. Normal equation")
print(w1)
print("modeled parameters. pseudoinverse")
print(w2)
```
## 1.2 Loss functions for regressions
Loss functions are used to measure the difference between the data and the predictions. Loss functions $\mathcal{L}(\mathbf{w}) $ are differentiable with respect to models.
In the previous example, we used the MSE as a loss function:
$ MSE(\mathbf{x},h_w) = \frac{1}{m} \sum_{i=1}^m \left( \hat{y}_i - y_i \right) ^2 $
The regression aims to find $h_w$ that minimizes the loss function $\mathcal{L}(\mathbf{w}) $. Other examples of loss functions are:
$MAE(\mathbf{x},h_w) = \frac{1}{m} \sum_{i=1}^m |\hat{y}_i - y_i|$
You can find interesting comparisons of Loss functions for regression problems here: https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
# 2) Gradient Descent
Gradient Descent is used to *train* machine learning models.
Gradient Descent marches down the misfit function through the parameter space: it evaluates the loss function and attempting to find its global minimum. The model $\mathbf{w}$ is updated iteratively in the direction that reduces the loss/misfit:
$w_j^{(k + 1)} = w_j^{(k)} - \alpha \frac{\partial \mathcal{L}}{\partial w_j}$ for $j = 1 , \cdots , n ,$
where $\alpha$ is the **learning rate**.
<table><tr>
<td> <img src="GD_cartoon.jpeg" alt="Gradient Descent" style="width: 400px;"/> </td>
<td> <img src="GD_non_global.png" alt="Gradient Descent non convex" style="width: 400px;"/> </td>
</tr>
<tr>
<td>Gradient descent for a convex, well behaved loss function. </td>
<td> Gradient descent in a poorly behaved loss function with local minima. <td>
</tr>
</table>
## 2.1 Batch Gradient Descent
Batch GD is performing the GD over the entire data and taking the steps to go down the gradient by finding the appropriate learning rate $\alpha$.
<table><tr>
<td> <img src="GD_AlphaTooSmall.png" alt="Learning rate too small" style="width: 400px;"/> </td>
<td> <img src="GD_AlphaTooLarge.png" alt="Learning rate too large" style="width: 400px;"/> </td>
</tr>
<tr>
<td>Learning rate $\alpha$ is too small. It will take longer to converge. </td>
<td> Learning rate $\alpha$ is too large. Converge to global minimum. <td>
</tr>
</table>
The iteration in GD can be stopped by imposing a convergence rate (tolerance) that is a thershold under which the error will not longer be calculated. Gradient Descent require re-scaling the data.
```
# normalize the data. Without normalization this will fail!
x = np.asarray(date_year[0:3*365]).reshape(-1,1)
y = np.asarray(ue[0:3*365]).reshape(-1,1)
x = x-np.min(x)
G = np.c_[np.ones((len(x),1)),x]
scale = (np.max(y)-np.min(y)) # minmax scaling
newy = y / scale
plt.plot(x,newy*scale);plt.grid(True)
alpha = 0.1
n_iterations =1000
for k in range(100): # perform 100 times the random initialization
w = np.random.rand(2,1) # initialize the model parameters.
for iteration in range(n_iterations):
gradients = 2/m *G.T.dot(G.dot(w)-newy.reshape(-1,1))
w = w - alpha * gradients
plt.plot(x,G.dot(w)*scale,'r')
# Now let's vary the learning rate
n_iterations =1000
for alpha in [0.001,0.01,0.1]:
fig,ax=plt.subplots(1,1)
ax.plot(x,newy*scale);ax.grid(True)
for k in range(100): # perform 100 times the random initialization
w = np.random.rand(2,1) # initialize the model parameters.
for iteration in range(n_iterations):
gradients = 2/m *G.T.dot(G.dot(w)-newy.reshape(-1,1))
w = w - alpha * gradients
ax.plot(x,G.dot(w)*scale,'r')
ax.set_title("alpha = "+str(alpha))
```
## 2.2 Stochastic Gradient Descent
SGD takes the gradient for each single instance. By default, SGD in Scikit-learn will minimize the MSE cost function. The advantages of GD are:
* Efficiency.
* Ease of implementation (lots of opportunities for code tuning).
The disadvantages of Stochastic Gradient Descent include:
* SGD requires a number of hyperparameters such as the regularization parameter and the number of iterations.
* SGD is sensitive to feature scaling.
```
from sklearn.linear_model import SGDRegressor
alpha = 0.01 # learning rate
sgd_reg = SGDRegressor(max_iter=1000,tol=1e-2,penalty=None,eta0=alpha)
sgd_reg.fit(x,y)
w=[sgd_reg.intercept_[0],sgd_reg.coef_[0]]
print(w)
fig,ax=plt.subplots(1,1)
ax.plot(x,y);ax.grid(True)
ax.plot(x,G.dot(w),'r')
```
## 2.3 Mini Batch Gradient Descent
It is a combination of Batch GD and SGD. Minibatch computes the gradient over a subset of instances (as against a single one in SGD or the full one in Batched GD). At each step, using one minibatch randomly drawn from our dataset, we will estimate the gradient of the loss with respect to our parameters. Next, we will update our parameters in the direction that may reduce the loss.
# 2) Under-fitting and Overfitting
**Bias**
This part of the generalization error is due to wrong assumptions, such as assuming that the data is linear when it is actually quadratic. A high-bias model is most likely to underfit the training data. Biased is reduced by adjusting, optimizing the model to get the best performance possible on the training data.
**Variance**
This part is due to the model’s excessive sensitivity to small variations in the training data. A model with many degrees of freedom (such as a high-degree polynomial model) is likely to have high variance, and thus to overfit the training data. Variance is reduced
**Irreducible error**
This part is due to the noisiness of the data itself. The only way to reduce this part of the error is to clean up the data (e.g., fix the data sources, such as broken sensors, or detect and remove outliers).
**Underfitting**: the model is too simple, the bias is high but the model variance is low. This occurs in most cases at the beginning of training, where the model has not yet learned to fit the data. With iterative training, the algorithm starts by underfitting the data (high loss for both validation and training data) and progressively "learn" and improve the fit. It remains a problem with the loss in both training and validation have high values.
The solution is to increase the complexity of the model, or to design better feature from the data (feature engineering), and to reduce the constrains on the model (such as the parameterization of model regularization). Underfitting is identified by having a high bias and low variance of the residuals. It is usually obvious and rarely a problem because the training and validation errors are high.
**Overfitting**: the model is too complex, the bias is low but the model variance is high. Data may contain noise that should not be fit by the algorithm. It happens when the model is too complex relative to the amount and the noisiness of the training data. Overfitting is a common problem in geoscience machine learning problems. Overfitting can be detected when the model performs perfectly on the training data, but poorly on the validation and test data. It can also be detected using **cross-validation metrics** and **learning curves**.
Some solutions are to reduce the model size, reduce the number of attributes in the training data, gather more training data, to reduce the noise in the training data (fix data errors and remove outliers). Another way to keep the model complexity but constrain its variance is called **regularization**.
***You do not know if you overfit, until you do***. The model may not be complex enough until your reached overfitting. Once reached, back up a little bit to find the best tradeoff in optimization and generalization.
**Assessing Overfitting**
To evaluate the model's ability to generalize to other data sets, and have the appropriate level of variance, we plot **learning curves**. These plots the model performance on the training and validation set as a function of the training set size.
```
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y,c1="b+",c2="b"):
# Setting the random_state variable in the train_test_split is necessary to reproduce the results.
# When tuning parameters such as test_size, you need to set the random state otherwise too many parameters change.
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2,random_state=42)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
plt.plot(np.sqrt(train_errors), c1, linewidth=2, label="train")
plt.plot(np.sqrt(val_errors),c2, linewidth=3, label="val")
plt.legend(['training','validation'])
plt.grid(True)
plt.title("Learning curve")
plt.ylabel("RMSE")
plt.xlabel('Training size')
x = np.asarray(date_year[0:2*365]).reshape(-1,1)
y = np.asarray(ue[0:2*365]).reshape(-1,1)
x = x-np.min(x)
# G = np.c_[np.ones((len(x),1)),x]
scale = (np.max(y)-np.min(y)) # minmax scaling
newy = y / scale
alpha = 0.01 # learning rate
sgd_reg = SGDRegressor(max_iter=1000,tol=1e-2,penalty=None,eta0=alpha)
sgd_reg.fit(x,newy)
y_predict=sgd_reg.predict(x)
plt.plot(x,y);plt.grid(True)
plt.plot(x,y_predict*scale,"m",linewidth=3)
plt.ylabel('East displacement (mm) at AC29')
plot_learning_curves(sgd_reg, x.ravel(), y.ravel())
plt.ylim([1,2])
```
Let's read and interpret these curves.
You will notice that when you re-run the cell with ``plot_learning_curves`` that you will get different answers: this is because the initialization of the SGD will give different answers. This is a first reason why one should run these multiple times and then average over the curves.
* **The good signs**:
Loss curves plateau at low value for both training and validation. Training loss should be smaller, but not by much, than the validation loss. Low loss values are signs of good fit and good generalization.
* **The bad signs: underfitting**:
RMSE are high for both training and validation.
* **The bad signs: overfitting**:
RMSE is low for training but high for validation.
# 3) Regularization
Constraining a model of a given complexity to make it simpler is called **regularization**.
## 3.1 Ridge Regression
To regularize the model, we can reduce model parameter variance by imposing that the norm of the model parameters is small. Assuming that the model parameters follow a normal (Gaussian) distribution, we want to minimize the L2 norm (equivalent to the mean square of the model parameters:
$\mathcal{L}(\mathbf{w}) = MSE(\mathbf{w}) + \lambda \frac{1}{2} || \mathbf{w} ||_2^2$,
where $|| \mathbf{w} ||_2 = \sum_{i=1}^n w_i^2$ is the L2 norm of the model parameters, $\lambda$ is a hyperparameter to tune to balance the contribution of model norm as against the residual norms. L2 norm is sensitive to outliers in the distributions.
Ridge Regression is sensitive to data scale, so do not forget to scale input data.
## 3.2 Lasso Regression
Lasso Regression is just like the Ridge Regression a way to minimize model variance. Instead of mimizing the L2 norm, we mimize the L1 norn:
$\mathcal{L}(\mathbf{w}) = MSE(\mathbf{w}) + \lambda || \mathbf{w} ||_1$,
The L1 norm $|| \mathbf{w} ||_1 = \sum_{i=1}^n | w_i |$ is appropriate for exponential (Laplace) distribution, and allow to not be penalized by outliers. It tends to eliminate the weights of the least important features. It effectively performs a *feature reduction* and output a *sparse model*. It can be called in SGD by using the argument ``penalty="l1"``.
## 3.3 Elastic Net
Combine Ridge and Lasso, weigh the contribution of each norm (L1 and L2) using the hyperparameter $r$, and the contribution of the regularization in the loss function with $\lambda$.
$\mathcal{L}(\mathbf{w}) = MSE(\mathbf{w}) + r \lambda|| \mathbf{w} ||_1 + \frac{1-r}{2} \lambda|| \mathbf{w} ||_2^2$,
```
from sklearn.linear_model import SGDRegressor, ElasticNet, Lasso, Ridge
sgd_reg = SGDRegressor()
ridge_reg = Ridge(alpha=0.1)
lasso_reg = Lasso(alpha=0.1)
ela_reg = ElasticNet(alpha=0.1,l1_ratio=0.5)
# prep the data again
x = np.asarray(date_year[0:4*365]).reshape(-1,1)
y = np.asarray(ue[0:4*365]).reshape(-1,1)
x = x-np.min(x)
G = np.c_[np.ones((len(x),1)),x]
scale = (np.max(y)-np.min(y)) # minmax scaling
# Fit
sgd_reg.fit(x,y)
ridge_reg.fit(x,y)
lasso_reg.fit(x,y)
ela_reg.fit(x,y)
# make prediction
y_sgd=sgd_reg.predict(x)
y_sridge=ridge_reg.predict(x)
y_lasso=lasso_reg.predict(x)
y_ela=ela_reg.predict(x)
w_sgd=[sgd_reg.intercept_[0],sgd_reg.coef_[0]]
w_ridge=[ridge_reg.intercept_[0],ridge_reg.coef_[0]]
w_lasso=[lasso_reg.intercept_[0],lasso_reg.coef_[0]]
w_ela=[ela_reg.intercept_[0],ela_reg.coef_[0]]
print(w_sgd,w_ridge,w_lasso,w_ela)
fig,ax=plt.subplots(1,1)
ax.plot(x,y);ax.grid(True)
ax.plot(x,G.dot(w_sgd))
ax.plot(x,G.dot(w_ridge))
ax.plot(x,G.dot(w_lasso))
ax.plot(x,G.dot(w_ela))
ax.legend(['data','SGD','Ridge','Lasso','Elastic'])
# perform the regressions
plot_learning_curves(sgd_reg, x.ravel(), y.ravel(),"r-+","r")
plot_learning_curves(ridge_reg, x.ravel(), y.ravel(),"g-+","g")
plot_learning_curves(lasso_reg, x.ravel(), y.ravel(),"m-+","m")
plot_learning_curves(ela_reg, x.ravel(), y.ravel(),"y-+","y")
plt.ylim([0,6])
plt.xlim([0,30])
```
We see that there needs to be at least 10 samples in the training set for the models to generalize reasonably well. We also see that all of the regularization mechanisms yield seemingly similar behavior at the training. After a sufficient number of samples, validation loss goes below training loss.
**model complexity**
Now we will try and fit the step in the data.
```
x = np.asarray(date_year[3*365:4*365]).reshape(-1,1)
y = np.asarray(ue[3*365:4*365]).reshape(-1,1)
x = x-np.min(x)
# G = np.c_[np.ones((len(x),1)),x]
scale = (np.max(y)-np.min(y)) # minmax scaling
newy = y / scale
plt.plot(x,newy*scale);plt.grid(True)
```
The data looks complex, with the superposition of a linear trend and oscillatory signals. Let's fit a general polynomial form. We will start with a simple model.
```
from sklearn.preprocessing import PolynomialFeatures
#Let's start with a simple
poly_features = PolynomialFeatures(degree=2)
G = poly_features.fit_transform(x) # G now contains the original feature of X plus the power of the features.
ridge_reg = Ridge(alpha=0.1)
ridge_reg.fit(G,y)
y_ridge=ridge_reg.predict(G)
print(G.shape)
plt.plot(x,y);plt.grid(True)
plt.plot(x,y_ridge)
plot_learning_curves(ridge_reg, G.ravel(), y.ravel(),"b-+","b");plt.xlim([0,100])
# Let's make it complex
poly_features = PolynomialFeatures(degree=400)
G2 = poly_features.fit_transform(x) # G now contains the original feature of X plus the power of the features.
ridge_reg2 = Ridge(alpha=0.001)
ridge_reg2.fit(G2,y)
y_ridge2=ridge_reg2.predict(G2)
fix,ax=plt.subplots(1,2,figsize=(20,8))
ax[0].plot(x,y);ax[0].grid(True)
ax[0].plot(x,y_ridge,"m",linewidth=3)
ax[0].plot(x,y_ridge2,"y",linewidth=3)
# ax[0].set_ylim([-10,20])
ax[0].set_ylabel('Vertical displacement (mm)at AC29')
plot_learning_curves(ridge_reg, G.ravel(), y.ravel(),"m-+","m");#plt.xlim([0,200])
plot_learning_curves(ridge_reg2, G2.ravel(), y.ravel(),"y-+","y");#plt.xlim([0,200])
plt.ylim([2,4])
```
# 4) Early stopping
In gradient descent, learning of the algorithm means that we are "training" the algorithm iteratively. As we keep training the model.
Another strategy to regularize the learning is to stop training as soon as the validation error reaches a minimum. Now instead of looking at the errors as a function of training size, we look at them as a function of epoch.
```
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
x = np.asarray(date_year[:]).reshape(-1,1)
x=x-np.min(x)
y = np.asarray(uv[:]).reshape(-1,1)
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.3,random_state=42)
# use the Pipeline function from sklearn to get prepare your data.
poly_scaler = Pipeline([
("poly_features", PolynomialFeatures(degree=50)),
("std_scaler", StandardScaler()) ])
X_train_poly_scaled = poly_scaler.fit_transform(X_train)
X_poly = poly_scaler.fit_transform(x)
X_val_poly_scaled = poly_scaler.transform(X_val)
# set the gradient with a single iteration since we will iterate over epochs.
# warm_start=True says that you should keep the previous state of the model to retrain.
sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True,
penalty=None, learning_rate="constant", eta0=0.0005)
minimum_val_error = float("inf")
best_epoch = None
best_model = None
val_error=np.zeros(1000)
train_error=np.zeros(1000)
for epoch in range(1000):
sgd_reg.fit(X_train_poly_scaled, y_train.ravel()) # continues where it left off
y_val_predict = sgd_reg.predict(X_val_poly_scaled)
y_train_predict = sgd_reg.predict(X_train_poly_scaled)
val_error[epoch] = mean_squared_error(y_val, y_val_predict)
train_error[epoch] = mean_squared_error(y_train, y_train_predict)
if val_error[epoch] < minimum_val_error: # you will stop and save the best model
minimum_val_error = val_error[epoch]
best_epoch = epoch
best_model = clone(sgd_reg)
best_y = sgd_reg.predict(X_poly)
fig,ax=plt.subplots(1,2,figsize=(16,6))
ax[0].plot(x,y);
ax[0].plot(x,best_y)
ax[1].plot(np.arange(1000),val_error)
ax[1].plot(np.arange(1000),train_error)
plt.legend(["validation error","training error"])
plt.xlim([0,100]);plt.ylim([0,30])
```
You may also consider the parameter ``early_stopping=True`` in SGD to automatically implement early stopping and deal with overfitting.
# 5) Training Classification algorithms
Last week, we explored the ***logistic regression***, a classification method to estimate the probability that an instance belongs to a particular class. Here we take example of a binary classificaiton. The Logistic regression estimates the probability that an instance belongs to the positive class. If the probably is ablove a threshold, then the instance is classified in the positive class. The probability is estimted using a **logistic sigmoid function**:
$\sigma(x) = \frac{1}{1+ \exp(-x)}$
Training a logistic regression is to tune the model such that the output score is low for a negative instance and high for a positive instance. The loss function associated with logistic regression is the $\log$ function due to its property that it is really high at low values of $x$ and really low at high values of $x$. The cost function over a batch of $m$ instances it the sum of the individual instance cost functions, which is called the ***Log Loss**:
$ \mathcal{L}(\mathbf{w}) = - \frac{1}{m} \sum_{i=1}^m \left[ y_i \log(\hat{p}_i(\mathbf{w})) + (1 - y_i) \log(1-\hat{p}_i(\mathbf{w}))\right] $,
where $m$ is the number of instances, $\hat{p}_i = \sigma(\mathbf{w}(x)) $ is the probability output by the model of the instence $x$, and $y_i$ is the class of the instance. The log loss is differentiable with respect to the model parameters, and one can use Gradient Descent to optimize the model parameters.
In Scikit-learn, ``LogisticRegression`` is equivalent to training an logistic regression using a log loss ``SGDClassifier(loss='log')``.
The K-class version of logistical regression is the ***softmax or multinomial regression***. The softmax regression model first computes scores $s_k$ for each class, which are computing using a simple linear regression prediction. The probabilities are calculated using the softmax function:
$\hat{p}_k = \sigma(s_k) = \frac{\exp(s_k)}{ \sum_{i=1}^K \exp(s_i)}$
An appropriate loss function to use is called ***Cross Entropy*** cost function:
$ \mathcal{L}(\mathbf{w}) = - \frac{1}{m} \sum_{i=1}^m \sum_{i=1}^K y_i \log(\hat{p}_i(\mathbf{w})) $.
The rest of the training requires similar tricks than the regression model training. The performance metrics are precision, recall, F1 scores etc etc as seen in previous notes.
# Checklist for training an ML model
1. Set the test set aside.
2. Initialize model parameters for optimizer (e.g. SGD)
3. Identify and define machine learning methods
4. Define the Loss Function
There are loss functions for classification (most of them use logs) and for regressions (they may use exponentials). Follow the documentation of your ML API: https://keras.io/api/losses/, https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics, https://pytorch.org/docs/stable/nn.html#loss-functions
5. Define the optimization algorithm
The most popular optimizer algorithms compute the first derivative (gradient) of the loss functions. They include Gradient Descent, Momentum, Adagrad, RMSProp, Adam.
6. Model training
Prepare the folds for K-fold cross validation. Scale the data.
Define the model parameters in a dictionary. Define the number of epochs, learning rate, batch size.
For each fold:
Initialize the model parameters.
for each epoch (iteration), train the algorithm on a minibatch of training examples. Training consists in 1) passing the training data through our model to obtain a set of predictions, 2) calculating the loss, 3) computing the gradient (either known, or using backward passes in neural networks), and 4) updating the model parameters using an optimization algorithm (e.g. Stochastic Gradient Descent).
7. Fine tune the training
Compute learning rate as a function of training size to get a sense for the batch size desired to properly train.
Compute the validation and training error as a function of epochs. Find the minimum of the validation error and stop the training there.
| github_jupyter |
# Autoregressive models using a feedforward neural network
## PART 2: Applying the methods to health care time series
In this notebook we will use a feedforward neural network to fit a single and ensemble linear and non-linear models to real time series data.
<div class="alert alert-info">
1. Most of the work we will do is data manipulation: preprocessing data and making sure it is the right shape for the neural networks.
2. The ensemble learning method can be computationally expensive. We have included some pre-trained models that can be loaded from file if needed.
</div>
---
**LEARNING OBJECTIVES**
* Learn how to apply feedforward neural networks to real health data.
* Methods to preprocess nn input data.
* Recognise the stochastic nature of neural network training
* Use a ensemble of neural networks to provide a more reliable point forecast
---
# Python dependencies
It is recommended that you use the forecasting course conda environment provided. We are again going to implement neural networks using `tensorflow` and '`keras`. You should be using at least `tensorflow` version `2.1.0`.
```
import statsmodels.api as sm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# tensorflow imports
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.callbacks import EarlyStopping
from statsmodels.tools.eval_measures import rmse
tf.__version__
```
# Forecasting emergency admissions in England
We will now use feedforward neural networks to predict the number of monthly emergency admissions in England.
## Load the data
**Task**:
* Execute the code below to read the emergency admissions data into pandas
```
url = 'https://raw.githubusercontent.com/health-data-science-OR/data/master' \
+ '/em_admits_ts.csv'
em_admits = pd.read_csv(url)
em_admits.head(3)
em_admits.shape
```
## 2.3. Preprocessing
### 2.3.1 Datetime format
Notice the the `month_year` column in `em_admits` holds a string an invalid date format e.g. 'Aug-10'. Pandas cannot handle this as-is because '10' could refer to any century! So let's do a bit of preprocessing to get it into a valid datetime format.
*Optional Task:*
* Take some time to understand the code that preprocesses the dates. This is real health data and it is likely you will need to deal with formatting issues as experienced here.
First we will format the string to something pandas can parse i.e. 'Aug 2010'. Then we will call the `pd.to_datetime()` function to parse the string and return a `datetime`. We will assign the result to our dataframe's index and set the freq to monthly start 'MS'
```
date_str = em_admits['month_year'].str[:3] + ' 20' \
+ em_admits['month_year'].str[-2:]
date_str.name = 'date'
em_admits = em_admits.set_index(pd.to_datetime(date_str))
em_admits.index.freq = 'MS'
em_admits = em_admits.drop(columns=['month_year'])
em_admits.head()
```
## Visualise the training data
We will be forecasting the last 12 months of the series. Let's take a look at the training data (being careful to exclude the last 12 months)
```
holdout_length = 12
em_admits[:len(em_admits)-holdout_length].plot(figsize=(12,4));
```
## Calender adjustment
This is monthly data so a useful preprocessing step is to transform the data into a daily rate by dividing by the number of days in the month. When we plot this the troughs we saw in Feb each year disappear.
**execute the code below which:**:
* Calculates the average admissions per day series
* Plots the training data (holding back 12 months for testing)
```
admit_rate = em_admits['em_admits'] / em_admits.index.days_in_month
admit_rate[:len(admit_rate)-12].plot(figsize=(12,4));
```
# **Exercise 1**: Convert the time series to format suitable for supervised learning.
The function `sliding_window` has been provided below for you to create your training data.
**Task**:
* Using a sliding window approach convert the time series into a tabular format.
* Use a window size of 12 and assume you are predicting a scalar value of y (1-step ahead).
* Conduct a train test split holding back 12 windows as a test set.
```
def sliding_window(train, window_size=2, horizon=1):
'''
sliding window.
Parameters:
--------
train: array-like
training data for time series method
window_size: int, optional (default=2)
lookback - how much lagged data to include.
horizon: int, optional (default=1)
number of observations ahead to predict
Returns:
array-like, array-like
preprocessed X, preprocessed Y
'''
tabular_X = []
tabular_y = []
for i in range(0, len(train) - window_size - horizon):
X_train = train[i:window_size+i]
y_train = train[i+window_size+horizon-1]
tabular_X.append(X_train)
tabular_y.append(y_train)
return np.asarray(tabular_X), np.asarray(tabular_y).reshape(-1, )
# your code here...
# get data in tabular format for NN
#X_data, y_data = sliding_window(...)
#you will need to use these variable names for the next section.
#X_train, y_train, X_test, y_test = ... train test split code
# example solution
def ts_train_test_split(*arrays, train_size):
'''
time series train test split
Parameters:
X: array-like
X data
y_data
'''
results = ()
for a in arrays:
results += a[:train_size], a[train_size:]
return results
WINDOW_SIZE = 12
X_data, y_data = sliding_window(admit_rate,
window_size=WINDOW_SIZE)
#train test split
train_size = len(y_data) - 12
X_train, X_test, y_train, y_test = ts_train_test_split(X_data,
y_data,
train_size=train_size)
```
# Scaling the features and target to be between -1 and 1
In many machine learning applications data are scaled to be between 0 and 1. For neural network forecasting, *Ord, Fildes and Kourentzes (2017)* recommend scaling to be between -1 and 1. This is what we will do here. To do the scaling we will use
```python
sklearn.preprocessing.MinMaxScaler
```
> Execute the code below to transform the data.
```
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(-1, 1))
# scale on training data
scaler.fit(admit_rate.iloc[:-12].to_numpy().reshape(-1, 1))
y_train = scaler.transform(y_train.reshape(-1, 1))
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
y_test = scaler.transform(y_test.reshape(-1, 1))
```
# **Exercise 2**: A Linear regression model benchmark
The first model we will try is the linear model. Its will serve as our neural network baseline. (In practice we would also check this is better than a naive method such as seasonal naive).
## Exercise 2a Train the model
**Task:**
* Using `Keras`, construct a neural network that mimics a simple linear regression model (see previous notebook).
* Optional: To get comparable results, set the tensorflow random number seed to 1234
* Train the model for 100 epochs.
* Optionally you can use an early stopping callback with patience set to 10.
```
# your code here ...
def get_linear_model(ws):
'''
Sequential Keras model that minics
AR linear model.
'''
pass
# example answer
def get_linear_model(ws, lr=0.01, metrics=None):
'''
Sequential Keras model that minics
AR linear model.
'''
if metrics is None:
metrics = ['mae', 'mse']
model = Sequential([Dense(1, input_shape=(ws,))])
model.compile(loss='mse',
optimizer=Adam(learning_rate=lr),
metrics=metrics)
return model
# set tensorflow random seed for repeatability
tf.random.set_seed(1234)
N_EPOCHS = 100
es = EarlyStopping(monitor='val_loss', patience=10)
#call the linear model function create earlier.
model_lm = get_linear_model(ws=12, metrics=['mae'])
#fit model silently (verbose=0)
results = model_lm.fit(x=X_train,
y=y_train,
epochs=N_EPOCHS,
validation_data=(X_test, y_test),
verbose=0,
callbacks=[es])
plt.plot(results.history['loss'], label='loss')
plt.plot(results.history['val_loss'], label='val_loss')
plt.legend()
```
## Extra: Plot the fitted values
```
plt.plot(scaler.inverse_transform(y_train), label='ground truth')
plt.plot(scaler.inverse_transform(model_lm.predict(X_train)), label='NN fitted')
plt.legend();
```
## Exercise 2b. Generate and evaluate a multi-step forecast
**Task:**
* Using the iterative method produce a 12 step forecast. Save the predictions in a variable called `y_preds_lm`
* Calculate the RMSE
* Optional: Plot the results -> predictions versus validation.
**Hints:**
* A function `autoregressive_iterative_forecast` is provided below. (you could use this function or write your own if you prefer!)
```
def autoregressive_iterative_forecast(model, exog, h):
'''
h-step forecast for an autoregressive
model using the iterative prediction method.
Conduct h one-step forecasts gradually
replacing ground truth autoregressive X
values with predictions.
Parameters:
------
model: forecast object
model that has a .predict(h) interface
exog: array-like
initial vector of lagged values (X)
h: int
forecast horizon. assumed to be > 0
Returns:
------
numpy.ndarray
y_predictions
'''
y_preds = []
current_X = exog
for i in range(h):
y_pred = model.predict(current_X.reshape(1, -1))[0,0]
y_preds.append(y_pred)
current_X = np.roll(current_X, shift=-1)
current_X[-1] = y_pred
return np.array(y_preds)
##### your code here ...
# example solution
def plot_nn_prediction_results(model, X_train, y_train, y_test, y_preds):
'''
utility function to plot the results of the prediction
'''
#create series
fitted_values = scaler.inverse_transform(model.predict(X_train))
ground_truth = scaler.inverse_transform(y_train)
ground_truth_val = scaler.inverse_transform(y_test)
padding = np.full(len(fitted_values), np.NAN)
validation = np.concatenate([padding.reshape(-1, 1), ground_truth_val])
forecast = np.concatenate([padding.reshape(-1, 1), y_preds])
plt.plot(ground_truth, label='ground truth')
plt.plot(validation, label='test')
plt.plot(fitted_values, label='in-sample', linestyle='-.')
plt.plot(forecast, label='out-of-sample', linestyle='-.')
plt.plot(admit_rate.to_numpy()[12:])
plt.legend();
# predict next 12 months and plot
H = 12
y_preds_lm = autoregressive_iterative_forecast(model_lm, X_test[0], h=H)
y_preds_lm = scaler.inverse_transform(y_preds_lm.reshape(-1, 1))
plot_nn_prediction_results(model_lm, X_train, y_train, y_test, y_preds_lm)
rmse(y_preds_lm, scaler.inverse_transform(y_test))[0]
```
# **Exercise 3:** Training a non-linear deep network
Now that you have got the basic structure and mechanics of the code you need for forecasting let's build a more complex model and compare the RMSE on the validation set to your simple linear model.
**Task:**
* Create a new neural network model with 2 hidden layers
* Try 32 and 64 neurons for layer 1 and 2 respectively
* Use a ReLU activation function.
* Use the Adam optimiser with a learning rate of 0.01
* Predict the next 12 months ahead
* Calculate the RMSE
**Hints:**
* Feel free to experiment with the number of hidden layers, neurons and learning rate.
* Perhaps try a dropout layer(s) if you feel your model is overfitting.
* Set a tensorflow random seed if you want to be able to reproduce your results e.g. 45676
```
# your code here ...
def get_network_model(ws, n_neurons_l1=32, n_neurons_l2=64,
include_layer_two=False, include_drop_out=False,
drop_out_rate=0.2, lr=0.01, metrics=None):
'''
A function to allow quick changing of network parameters
'''
if metrics is None:
metrics = ['mse']
model = Sequential()
model.add(Flatten(input_shape=(ws,)))
model.add(Dense(n_neurons_l1, activation='relu'))
if include_layer_two:
model.add(Dense(n_neurons_l2, activation='relu'))
if include_drop_out:
model.add(Dropout(drop_out_rate))
model.add(Dense(1))
model.compile(loss='mse',
optimizer=Adam(learning_rate=lr),
metrics=metrics)
return model
# set tensorflow random seed
tf.random.set_seed(45676)
N_EPOCHS = 100
es = EarlyStopping(monitor='loss', patience=10)
#single layer nn
mlp = get_network_model(ws=12, n_neurons_l1=5, include_layer_two=True,
n_neurons_l2=32)
#fit model silently
results_mlp = mlp.fit(x=X_train,
y=y_train,
epochs=N_EPOCHS,
verbose=0, callbacks=[es])
# predict next 12 months and plot
H = 12
y_preds_mlp = autoregressive_iterative_forecast(mlp, X_test[0], h=H)
y_preds_mlp = scaler.inverse_transform(y_preds_mlp.reshape(-1, 1))
plot_nn_prediction_results(mlp, X_train, y_train, y_test, y_preds_mlp)
rmse_lm = rmse(scaler.inverse_transform(y_test), y_preds_lm)[0]
rmse_mlp = rmse(scaler.inverse_transform(y_test), y_preds_mlp)[0]
print(f'rmse lm: {rmse_lm:.2f}\nrmse mlp: {rmse_mlp:.2f}')
# try changing the network parameters to see the impact on the
# rmse relative to the linear model
# set tensorflow random seed
tf.random.set_seed(45676)
#tf.random.set_seed(1234)
N_EPOCHS = 100
H = 12
es = EarlyStopping(monitor='loss', patience=10)
#single layer nn
mlp = get_network_model(ws=12,
n_neurons_l1=5,
include_layer_two=True,
n_neurons_l2=32,
lr=0.1)
#fit model silently
history = mlp.fit(x=X_train,
y=y_train,
epochs=N_EPOCHS,
verbose=0, callbacks=[es])
y_preds_mlp = autoregressive_iterative_forecast(mlp, X_test[0], h=H)
y_preds_mlp = scaler.inverse_transform(y_preds_mlp.reshape(-1, 1))
rmse_lm = rmse(scaler.inverse_transform(y_test), y_preds_lm)[0]
rmse_mlp = rmse(scaler.inverse_transform(y_test), y_preds_mlp)[0]
print(f'rmse lm: {rmse_lm:.2f}\nrmse mlp: {rmse_mlp:.2f}')
plot_nn_prediction_results(mlp, X_train, y_train, y_test, y_preds_mlp)
```
# Ensemble Learning
In all of the examples above we have been setting a random seed for tensorflow. This 'suggests' that if we used a different randon number seed we would get a slightly different result (this is due to both random initialisation of weights/biases and stochastic gradient descent). Neural networks are extremely flexible and have many parameters. This leads to one of the key challenges with neural networks - overfitting. There are multiple ways to deal with overfitting. In forecasting a common approach is to use an **ensemble** of models.
In an ensemble we train multiple models.
## Training an ensemble
We will train an ensemble of neural networks that mimic a linear model.
The code below has been provided for you to work through.
* We set some parameters e.g. number of models in an the ensemble: 20 to 30 should be plenty.
* We use a python loop to create and train each model and store the model in a python list.
* Optionally we can save the models to file and load pre-trained versions at a later date.
* To predict we the need to loop through the collection of models.
```
def load_pretrained_ensemble(n_models):
'''
Load the pre-trained ensemble models (only use if they exist!)
'''
models = []
url = '/input'
for n in range(n_models):
model_n = tf.keras.models.load_model(f'{url}/ensemble_model_{n}.h5')
models.append(model_n)
return models
# script to train the models.
################# Parameters for the ensemble #################################
# set random seed so that ensemble can be repeated.
tf.random.set_seed(1085)
# number of models to create...
N_MODELS = 20
# max no. of epochs for training of each model.
N_EPOCHS = 100
# no. of autoregressive lags
WINDOW_SIZE = 12
# early stopping reguluarization
es = EarlyStopping(monitor='loss', patience=10)
# I've pretrained 50 models you can load them from file if wanted.
LOAD_FROM_FILE = False
###############################################################################
if LOAD_FROM_FILE:
#it will take a few seconds to load.
models = load_pretrained_ensemble(N_MODELS)
else:
models = []
for n in range(N_MODELS):
#single layer nn
model_n = get_linear_model(WINDOW_SIZE)
#fit model silently (verbose=0)
history = model_n.fit(x=X_train,
y=y_train,
epochs=N_EPOCHS,
verbose=0,
callbacks=[es],
batch_size=32)
#this will overwrite pre-trained models.
model_n.save(f'input/ensemble_model_{n}.h5')
models.append(model_n)
```
### Predictions in an ensemble
In an ensemble, we predict in a loop. In python this is straightfoward as we simply loop through the models we have trained and call `autoregressive_iterative_forecast`. We will store the predictions of each forecast in a python `list` called `e_preds`
<div class="alert alert-info">
In an ensemble we end up with a distribution of forecasts! For point forecasts we could then take the median of the forecasts. We can also get a measure of variability in the forecasts by calculating the quantiles.
</div>
```
# create the forecasts
# this code will take a few seconds to execute
H = 12
e_preds = []
for model in models:
y_preds = autoregressive_iterative_forecast(model, X_test[0], h=H)
e_preds.append(y_preds)
e_preds = np.array(e_preds)
```
Inverse transform the data and calculate the median and 0.025 and 0.975 percentiles of the point forecasts
Remember we can use `scaler.inverse_transform()`
```
e_preds = np.asarray(e_preds)
e_preds_tran = scaler.inverse_transform(e_preds).T
y_preds_mdn = np.percentile(e_preds_tran.T, 50, axis=0)
y_preds_2_5 = np.percentile(e_preds_tran.T, 2.5, axis=0)
y_preds_97_5 = np.percentile(e_preds_tran.T, 97.5, axis=0)
y_preds_mdn.shape
# plot the individual forecasts and the median
fig,ax = plt.subplots(1, 2, sharey=True, figsize=(12, 4))
ax[0].plot(e_preds_tran)
ax[0].plot(scaler.inverse_transform(y_test), label='test', linestyle='--',
color='red')
ax[0].plot(y_preds_mdn, label='median', linestyle='-', color='black')
ax[0].legend()
ax[0].set_title(f'Point forecasts: {N_MODELS} models')
ax[1].plot(scaler.inverse_transform(y_test), label='test', linestyle='--',
color='red')
ax[1].plot(y_preds_mdn, label='median', linestyle='-', color='black')
ax[1].plot(y_preds_2_5, label='0.025 percentile', linestyle='-.', color='black')
ax[1].plot(y_preds_97_5, label='0.975 percentile', linestyle='--', color='black')
#ax[1].plot(y_preds_lm, label='original lmforecast', linestyle='--', color='green')
ax[1].set_title(f'Middle 95% of point forecasts ')
ax[1].legend();
rmse_lm = rmse(scaler.inverse_transform(y_test), y_preds_lm)[0]
rmse_mdn = rmse(scaler.inverse_transform(y_test), y_preds_mdn)[0]
print(f'rmse lm: {rmse_lm:.2f}\nrmse ensemble: {rmse_mdn:.2f}')
rmse_25 = rmse(scaler.inverse_transform(y_test), y_preds_2_5)[0]
rmse_75 = rmse(scaler.inverse_transform(y_test), y_preds_97_5)[0]
print(f'95% of linear models will have rmse between: {rmse_75:.2f} - {rmse_25:.2f}')
```
**Question**: Is the ensemble approach useful? What does it tell us about our original linear model?
## Exercise 4: Create an ensemble of non-linear models.
Is the two layer model more accurate than the simple linear regression model and its ensemble counterpart?
**Task:**
* Create an ensemble of 20 models.
* Each model should be based on your solution to exercise 2 (e.g. a neural network with 2 hidden layers)
* Optional: save your models to file. (recommended)
* Forecast the next 12 periods.
* Calculate the RMSE of the forecast.
**Hints:**
* You have **all of the code** you need to complete this task!
* Remember to back transform your forecasts
* Use the median of the ensemble.
* Look carefully at the previous ensemble example.
**Questions**
* Which out of the simple linear, multi-layer and ensemble models do you think is best in this instance?
```
# your code here ...
# set tensorflow random seed for repeatability
tf.random.set_seed(1066)
N_MODLES = 20
N_EPOCHS = 100
H = 12
es = EarlyStopping(monitor='loss', patience=10)
BATCH_SIZE = 32
models = []
for n in range(N_MODELS):
#multi-layer model
model_n = get_network_model(ws=12,
n_neurons_l1=5,
include_layer_two=True,
n_neurons_l2=32,
lr=0.1)
#fit model silently
history = model_n.fit(x=X_train,
y=y_train,
epochs=N_EPOCHS,
verbose=0,
batch_size=BATCH_SIZE)
#this will overwrite pre-trained models.
model_n.save(f'output/mlp_ensemble_{n}.h5')
models.append(model_n)
# this code will take a few seconds to execute
H = 12
e_preds = []
for model in models:
y_preds = autoregressive_iterative_forecast(model, X_test[0], h=H)
e_preds.append(y_preds)
e_preds = np.array(e_preds)
e_preds = np.asarray(e_preds)
e_preds_tran = scaler.inverse_transform(e_preds).T
y_preds_mdn = np.percentile(e_preds_tran.T, 50, axis=0)
y_preds_2_5 = np.percentile(e_preds_tran.T, 2.5, axis=0)
y_preds_97_5 = np.percentile(e_preds_tran.T, 97.5, axis=0)
y_preds_mdn.shape
# plot the individual forecasts and the median
fig,ax = plt.subplots(1, 2, sharey=True, figsize=(12, 4))
ax[0].plot(e_preds_tran)
ax[0].plot(scaler.inverse_transform(y_test), label='test', linestyle='--',
color='red')
ax[0].plot(y_preds_mdn, label='median', linestyle='-', color='black')
ax[0].legend()
ax[0].set_title(f'Point forecasts: {N_MODELS} models')
ax[1].plot(scaler.inverse_transform(y_test), label='test', linestyle='--',
color='red')
ax[1].plot(y_preds_mdn, label='median', linestyle='-', color='black')
ax[1].plot(y_preds_2_5, label='0.025 percentile', linestyle='-.', color='black')
ax[1].plot(y_preds_97_5, label='0.975 percentile', linestyle='--', color='black')
#ax[1].plot(y_preds_lm, label='original lmforecast', linestyle='--', color='green')
ax[1].set_title(f'Middle 95% of point forecasts ')
ax[1].legend();
rmse_lm = rmse(scaler.inverse_transform(y_test), y_preds_mlp)[0]
rmse_mdn = rmse(scaler.inverse_transform(y_test.T), y_preds_mdn)[0]
print(f'rmse lm: {rmse_mlp:.2f}\nrmse ensemble: {rmse_mdn:.2f}')
```
## Optional Extra exercise for you to think about.
* How would you use a ensemble method with a model that predicts a vector?
# End of lab
| github_jupyter |
<a href="https://colab.research.google.com/github/Dmitri9149/Transformer_From_Scratch/blob/main/Final_Working_Transformer_MXNet_76800_128_22_10_20.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install -U mxnet-cu101==1.7.0
!pip install d2l==0.14.4
### !pip install ipython-autotime
### %load_ext autotime
import math
from d2l import mxnet as d2l
from mxnet import np, npx
from mxnet.gluon import nn
from mxnet import np, npx, init, gluon, autograd
import collections
import os
import time
npx.set_np()
from mxnet import autograd, np, npx
```
The code for Transformer from scratch is collected here. The code is mostly from http://d2l.ai/chapter_attention-mechanisms/transformer.html . I did many comments to the code at most difficult points. I hope my additional code and comments will help in better understanding of the Transformer.
This is the original article for the Transformer :
Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., … Polosukhin, I. (2017). Attention is all you need. Advances in neural information processing systems (pp. 5998–6008).
The future work:
1. To learn the Transformer on big data set.
2. Transation from (to) English to Finnish language.
3. Modify the architecture of the Transformer.
4. Better tokenization and preprocessing.
### Attention Mechanism
#### Masked softmax
This is importand auxiliary function.
""" The masked softmax takes a 3-dimensional input and enables us to filter out some elements by specifying a valid length for the last dimension.... As a result, any value outside the valid length will be masked as 0.""" (citation from d2l.ai).
The notion of valid length come from the need to add special <pad> token if our sentence is shorter than length we use for all sentencies in batches. The <pad> tokens will not participate in prediction.
My comments are started with ### ,
the comments with one # are from the original d2l.ai code.
Some functions for plotting and downloading of specific files from specific places are still taken from the d2l.ai library on GitHub : https://github.com/d2l-ai/d2l-en/blob/master/d2l/mxnet.py But the biggest par of the code is collected here (and commented).
```
### from d2l.ai
def masked_softmax(X, valid_len):
"""Perform softmax by filtering out some elements."""
# X: 3-D tensor, valid_len: 1-D or 2-D tensor
### why 3-D tensor ?
### first dimention; we will quantify samples within batch,
### so, the first dimention determines the number of samples in the batch
###
### second dimention; we will quantify queries,
### we may have several queries,
### the second dimention determines the number of queries
###
### we may set up the valid lengths same for every sample in the
### batch, i.e 1-D valid-lengh with size (batch_size, )
### the same means : independent of the queries
### On the contarry: we may set up valid lengths individually for every
### sample in a batch and for every query,
### in this case it will be 2-D valid length
### with size (batch size, number of queries)
###
### Third parameter will correspond to the number of key/value pairs
###
### We may need the valid_length when: 1. we <pad> the end of a sentence: it is too
### short, shorter than num_steps ; 2. when we use the valid_lenght in decoder
### via training, and every word in target sentence is used as query: the query
### can (or may ?) see the all words to the left, but not to the right (see the
### encoder decoder code below). To handle the case we use valid_length too.
###
if valid_len is None:
return npx.softmax(X)
else:
shape = X.shape
if valid_len.ndim == 1:
valid_len = valid_len.repeat(shape[1], axis=0)
else:
valid_len = valid_len.reshape(-1)
# Fill masked elements with a large negative, whose exp is 0
X = npx.sequence_mask(X.reshape(-1, shape[-1]), valid_len, True,
axis=1, value=-1e6)
return npx.softmax(X).reshape(shape)
### from d2l.ai
masked_softmax(np.random.uniform(size=(2, 2, 4)), np.array([2, 3]))
### 2 - number of samples in the batch
### 2 - we make deal with 2 queries
### 4 - four key/value pairs
### for the first sample in our batch , from 4 pairs we will take
### into account only results from first 2 pairs, the rest will be multiplied by 0,
### because that pairs correspond to <pad> tokens
### for the second sample (4 key/value pairs) we will take into account
### only results for first 3 key/value pairs (the rest will masked with 0,
### because the rest pairs correspond to <pad> tokens)
### this is the meaning of np.array([2,3]) as valid length
### the velid length is not dependent from queries in this case
### from d2l.ai
npx.batch_dot(np.ones((2, 1, 3)), np.ones((2, 3, 2)))
### one more example with 1-D valid length
valid_length = np.array([2,3])
### the shape is (2,) : one dimentional length
print('valid_length shape= ', valid_length.shape)
masked_softmax(np.random.uniform (size =(2, 3, 5)), valid_length )
### if we declare 2-D valid_length
valid_length = np.array([[3, 5, 4], [2,4, 1], [1,4, 3],[1,2,3]])
print('valid_length shape= ', valid_length.shape)
masked_softmax(np.random.uniform(size = (4, 3, 5)), valid_length)
### Let us consider the first sample in our batch
### [[0.21225105, 0.31475353, 0.4729953 , 0. , 0. ,
### 0. ],
### [0.19417836, 0.20596693, 0.16711308, 0.15453914, 0.27820238,
### 0. ],
### [0.2753876 , 0.21671425, 0.30811197, 0.19978616, 0. ,
### 0. ]],
### from third dimention in np.random.uniform(size = (4, 3, 5)) we may see it correspond to
### 5 key/value pairs (that is why the length of the lines is 5)
### second dimention in np.random.uniform(size = (4, 3, 5)) means the results are obtained from
### 3 queries, that is why there are 3 lines corresponding to every batch
###
### Below we may see there are 4 groups, because the first dimention, the
### number of samples, is 4 (batch size)
###
### np.array([[3, 5, 4], [2,4, 1], [1,4, 3],[1,2,3]])
### is 2-D array (of size 4 * 3 in our case))
### 4 is batch size, 3 is number of queries : we have 4 groups with 3 lines in each;
### the [3,5,4] subarray correspond to the first sample in the batch,
### in the first group : the first line has first 3 non zero elements,
### the second line 5 first non zero and third line 4 first non zero elements.
```
### Dot product attention
#### Why we need it, how it is calculated
We have query with dimension `d`.
We have #kv_pairs: key/value pairs. Every key and value are vectors of dimension `d`. We pass the query trought the 'grid' with the leng of #kv_pairs and get #kv_pairs of scores. How it works within the pass: we make dot product of query with every of #kv_pairs keys in the 'grid' and get a #kv_pairs scores. We also normilize the scores by dividing on $\sqrt{d}$.
If we have batch with size batch_size and number of queries = #queries, we will get tensor of scores of size (batch_size, #queries, #kv_pairs).
In this way we receive the attention_weights tensor.
We also have tensor 'value' of values of size (batch_size, #kv_pairs, dim_v).
Finally, using npx.batch_dot(attention_weights, value) we will get tensor of size (batch_size, #queries, dim_v) which corresponf of the 'passing' our queries throught the 'grid' of key/value pairs: for every query, for every sample in the batch we will get the transformed vector of size dim_v.
```
### from d2l.ai book
class DotProductAttention(nn.Block):
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# `query`: (`batch_size`, #queries, `d`)
# `key`: (`batch_size`, #kv_pairs, `d`)
# `value`: (`batch_size`, #kv_pairs, `dim_v`)
# `valid_len`: either (`batch_size`, ) or (`batch_size`, xx)
def forward(self, query, key, value, valid_len=None):
d = query.shape[-1]
# Set transpose_b=True to swap the last two dimensions of key
scores = npx.batch_dot(query, key, transpose_b=True) / math.sqrt(d)
attention_weights = self.dropout(masked_softmax(scores, valid_len))
return npx.batch_dot(attention_weights, value)
if False:
### the code from d2l.ai
atten = DotProductAttention(dropout=0.5)
atten.initialize()
### batch size of 2, #kv_pairs = 10, every key is vector of size 2 with
### ones : (1.,1.)
keys = np.ones((2, 10, 2))
### we start with vector which keep float numbers from 0 to 39;
### reshape it to tensor which model one sample batch with 10 key/value pairs and
### dimension of values dim_v = 4; finally we repeat the construction to get 2
### similar samples (batch with 2 samples).
values = np.arange(40).reshape(1, 10, 4).repeat(2, axis=0)
atten(np.ones((2, 1, 2)), keys, values, np.array([2, 6]))
if False:
atten = DotProductAttention(dropout=0.5)
atten.initialize()
keys = np.ones((3,10,5)) # keys in batch of size 3; for every line in batch we have
### 10 keys/values pairs ; where every key is 5 dimentional vector (and value will be 7 dimentional vector);
### each key is forming pair with value, there are 10 such pairs
values = np.arange(70).reshape(1,10,7).repeat(3, axis =0) # values in batch of
### size 3 ; 10 values with 7 dimentional vector each;
### in our batch the 3 samples are identical by construction
queries = np.ones((3,4,5)) # quiries in batch of size 3, there are 4 queries,
### where every query is vector of size 5 (same size as for key)
atten(queries, keys, values, np.array([3, 8, 6])) # values in batch of size 3 ;
### 4 quiry per every sample in batch where every query is vector of size 5
### the valid_len is 1-D
### for the 3 samples the valid_length have size 3 , 8 , 6 ;
### size 3 for first sample , ....., ..... size 6 for the last sample
### the outputs are:
### for every entry in the batch (for every of the 3 samples)
### for every of 4 queries
### total : 3*4 = 12 final values: vectors of size 7
### the values are different for different samples in the batch ,
### because we used different valid length,
### but for every sample group in the batch (same sample, different queries),
### all 4 final values are the same:
### even we use 4 queries, all the quiries are equal in our case
```
### Multihead Attention
""" The *multi-head attention* layer consists of $h$ parallel self-attention layers, each one is called a *head*. For each head, before feeding into the attention layer, we project the queries, keys, and values with three dense layers with hidden sizes $p_q$, $p_k$, and $p_v$, respectively. The outputs of these $h$ attention heads are concatenated and then processed by a final dense layer.

Assume that the dimension for a query, a key, and a value are $d_q$, $d_k$, and $d_v$, respectively. Then, for each head $i=1,\ldots, h$, we can train learnable parameters
$\mathbf W_q^{(i)}\in\mathbb R^{p_q\times d_q}$,
$\mathbf W_k^{(i)}\in\mathbb R^{p_k\times d_k}$,
and $\mathbf W_v^{(i)}\in\mathbb R^{p_v\times d_v}$. Therefore, the output for each head is
$$\mathbf o^{(i)} = \mathrm{attention}(\mathbf W_q^{(i)}\mathbf q, \mathbf W_k^{(i)}\mathbf k,\mathbf W_v^{(i)}\mathbf v),$$
where $\textrm{attention}$ can be any attention layer, such as the `DotProductAttention` and `MLPAttention` as we introduced in :numref:`sec_attention`.
After that, the output with length $p_v$ from each of the $h$ attention heads are concatenated to be an output of length $h p_v$, which is then passed the final dense layer with $d_o$ hidden units. The weights of this dense layer can be denoted by $\mathbf W_o\in\mathbb R^{d_o\times h p_v}$. As a result, the multi-head attention output will be
$$\mathbf o = \mathbf W_o \begin{bmatrix}\mathbf o^{(1)}\\\vdots\\\mathbf o^{(h)}\end{bmatrix}.$$
Now we can implement the multi-head attention. Assume that the multi-head attention contain the number heads `num_heads` $=h$, the hidden size `num_hiddens` $=p_q=p_k=p_v$ are the same for the query, key, and value dense layers. In addition, since the multi-head attention keeps the same dimensionality between its input and its output, we have the output feature size $d_o =$ `num_hiddens` as well. """ (citation from d2l.ai book).
There are some problems in the d2l.ai text, there is stated :
$p_q$ = $p_k$ = $p_v$ = num_hiddens,
and
$d_o =$ `num_hiddens` as well.
So, we have $W_o$ transformation from input of size (num_heads * num_hiddens) to output of size (num_hiddens). If h > 1, the input size and output size can not be equal. But in the PyTorch code in the d2l.ai we have:
self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias)
with equal input and output. It is hidden in the d2l.ai
MXNet code: self.W_o = nn.Dense(num_hiddens, use_bias=use_bias, flatten=False), because in the
case of Gluon Dense layer we state only output dimension (num_hiddens in the case). The input dimension is not stated.
There is also assumed in the code below (from d2l.ai book), the num_hiddens is multiple of num_heads. No assumptions about it in the main text of the book. But in the d2l.ai code the assumption is used.
The ony interpretation to the code below I may give now:
$p_v$ * num_heads=num_hiddens (same for $p_q$ = $p_k$ = $p_v$),
but not $p_v$=num_hiddens.
I will interpret the code with the assumption.
```
### from d2l.ai
class MultiHeadAttention(nn.Block):
def __init__(self, num_hiddens, num_heads, dropout, use_bias=False, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.num_heads = num_heads
self.attention = d2l.DotProductAttention(dropout)
### here, as I understand, the num_hiddens = num_heads * p_v
### where p_v (see the text above) is the dimension of the vector
### to which a query is transformed by single head,
### the size of p_v is to be (num_hidden/num_heads)
### it explains what the code below do
self.W_q = nn.Dense(num_hiddens, use_bias=use_bias, flatten=False)
self.W_k = nn.Dense(num_hiddens, use_bias=use_bias, flatten=False)
self.W_v = nn.Dense(num_hiddens, use_bias=use_bias, flatten=False)
### if every head transform query of size `dim` = num_hiddens to
### p_v = p_q = p_k = (num_hidden/num_heads), when we
### concatenate num_heads of such queries, we will get
### vector of size num_hidden again;
### it explains the input / output dimensions for W_o :
### input and output have same dimension = num_hiddens
self.W_o = nn.Dense(num_hiddens, use_bias=use_bias, flatten=False)
### every query generate num_heads outputs , which we cancatenate in
### one vector of dimention num_hiddens : so the output of every query is
### of size num_heads / num_hiddens;
### to apply self-attention we de-cancatenate the combined output
### to hum_heads of separate outputs from every query
### with size (num_hiddens / num_heads), and
### simultaneously recombine them in single batch (with size num_heads),
### which increase the total batch size to (batch_size * num_heads)
### We have to correct the valid_length to take into account
### the num_heads query transformtions are combined now in single batch.
### After application of self_attention, we make the reverse operation:
### locate the batch samples which correspond to the outputs of the same query
### in different heads, and concatenate them again in one combined output.
### The number of batches decrease and the length of output increase by the
### same factor num_heads.
### These are the roles of transpose_qkv , transpose_output functions below:
def forward(self, query, key, value, valid_len):
# For self-attention, `query`, `key`, and `value` shape:
# (`batch_size`, `seq_len`, `dim`), where `seq_len` is the length of
# input sequence. `valid_len` shape is either (`batch_size`, ) or
# (`batch_size`, `seq_len`).
# Project and transpose `query`, `key`, and `value` from
# (`batch_size`, `seq_len`, `num_hiddens`) to
# (`batch_size` * `num_heads`, `seq_len`, `num_hiddens` / `num_heads`)
query = transpose_qkv(self.W_q(query), self.num_heads)
key = transpose_qkv(self.W_k(key), self.num_heads)
value = transpose_qkv(self.W_v(value), self.num_heads)
if valid_len is not None:
# Copy `valid_len` by `num_heads` times
if valid_len.ndim == 1:
valid_len = np.tile(valid_len, self.num_heads)
else:
valid_len = np.tile(valid_len, (self.num_heads, 1))
# For self-attention, `output` shape:
# (`batch_size` * `num_heads`, `seq_len`, `num_hiddens` / `num_heads`)
output = self.attention(query, key, value, valid_len)
# `output_concat` shape: (`batch_size`, `seq_len`, `num_hiddens`)
output_concat = transpose_output(output, self.num_heads)
return self.W_o(output_concat)
### from d2l.ai
def transpose_qkv(X, num_heads):
# Input `X` shape: (`batch_size`, `seq_len`, `num_hiddens`).
# Output `X` shape:
# (`batch_size`, `seq_len`, `num_heads`, `num_hiddens` / `num_heads`)
X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
# `X` shape:
# (`batch_size`, `num_heads`, `seq_len`, `num_hiddens` / `num_heads`)
X = X.transpose(0, 2, 1, 3)
# `output` shape:
# (`batch_size` * `num_heads`, `seq_len`, `num_hiddens` / `num_heads`)
output = X.reshape(-1, X.shape[2], X.shape[3])
return output
### from d2l.ai
def transpose_output(X, num_heads):
# A reversed version of `transpose_qkv`
X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
X = X.transpose(0, 2, 1, 3)
return X.reshape(X.shape[0], X.shape[1], -1)
if False:
### from d2l.ai
### num_hiddens = 100, num_heads=10
cell = MultiHeadAttention(100, 10, 0.5)
cell.initialize()
X = np.ones((2, 4, 5))
valid_len = np.array([2, 3])
cell(X, X, X, valid_len).shape
if False:
### it correspond to scenario size of embedding is 512 ; num_heads = 8 ;
### num_hiddens = 512
cell = MultiHeadAttention(512, 8, 0.5)
cell.initialize()
# num of batches is 3 ; seq_len is 20 ; size of embedding is 512
X = np.ones((3, 20, 512))
valid_len = np.array([15,17,12])
cell(X, X, X, valid_len).shape
```
### Position-wise encoding
Two 1 * 1 convolutional layers are applied. Extract
position independent features of word representations (in the same way the convolution layers are applied in image recognition networks).
""" Similar to the multi-head attention, the position-wise feed-forward network will only change the last dimension size of the input—the feature dimension. In addition, if two items in the input sequence are identical, the according outputs will be identical as well. """ (citation from d2l.ai)
```
### from d2l.ai
class PositionWiseFFN(nn.Block):
def __init__(self, ffn_num_hiddens, pw_num_outputs, **kwargs):
super(PositionWiseFFN, self).__init__(**kwargs)
self.dense1 = nn.Dense(ffn_num_hiddens, flatten=False,
activation='relu')
self.dense2 = nn.Dense(pw_num_outputs, flatten=False)
def forward(self, X):
return self.dense2(self.dense1(X))
if False:
ffn = PositionWiseFFN(4, 8)
ffn.initialize()
ffn(np.ones((2, 3, 4)))[0]
```
### Add and Norm
""" we add a layer that contains a residual structure and a layer normalization after both the multi-head attention layer and the position-wise FFN network. Layer normalization is similar to batch normalization ........ One difference is that the mean and variances for the layer normalization are calculated along the last dimension, e.g X.mean(axis=-1) instead of the first batch dimension, e.g., X.mean(axis=0). Layer normalization prevents the range of values in the layers from changing too much, which allows faster training and better generalization ability. """ (citation from d2l.ai)
```
if False:
### from d2l.ai
layer = nn.LayerNorm()
layer.initialize()
batch = nn.BatchNorm()
batch.initialize()
X = np.array([[1, 2], [2, 3]])
# Compute mean and variance from `X` in the training mode
with autograd.record():
print('layer norm:', layer(X), '\nbatch norm:', batch(X))
```
"""AddNorm accepts two inputs X and Y. We can deem X as the original input in the residual network, and Y as the outputs from either the multi-head attention layer or the position-wise FFN network. In addition, we apply dropout on Y for regularization.""" citation from d2l.ai
```
### from d2l.ai
class AddNorm(nn.Block):
def __init__(self, dropout, **kwargs):
super(AddNorm, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm()
def forward(self, X, Y):
return self.ln(self.dropout(Y) + X)
if False:
### d2l.ai
add_norm = AddNorm(0.5)
add_norm.initialize()
add_norm(np.ones((2, 3, 4)), np.ones((2, 3, 4))).shape
```
### Positional Encoding
```
### I used the code as alternative to the original positional encoding;
### just encode position of words (tokens) in sentence ,
### it changes the results , but the results are quite well.
if False:
### from d2l.ai
class PositionalEncoding(nn.Block):
def __init__(self, num_hiddens, dropout, max_len=100):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(dropout)
# Create a long enough `P`
### max_len correspond to sequence length ;
### num_hiddens correspond to embedding size
###
self.P = np.zeros((1, max_len, num_hiddens))
### X = np.arange(0, max_len).reshape(-1, 1) / np.power(
### 10000, np.arange(0, num_hiddens, 2) / num_hiddens)
### self.P[:, :, 0::2] = np.sin(X)
### self.P[:, :, 1::2] = np.cos(X)
###################### my code be carefull !!!!!
X = np.arange(0, max_len).reshape(-1, 1) / max_len
### 10000, np.arange(0, num_hiddens, 2) / num_hiddens)
self.P[:, :, 0::1] = np.sin(X)
### self.P[:, :, 1::2] = np.cos(X)
################################
def forward(self, X):
X = X + self.P[:, :X.shape[1], :].as_in_ctx(X.ctx)
return self.dropout(X)
### from d2l.ai
class PositionalEncoding(nn.Block):
def __init__(self, num_hiddens, dropout, max_len=1000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(dropout)
# Create a long enough `P`
### max_len correspond to sequence length ;
### num_hiddens correspond to embedding size
self.P = np.zeros((1, max_len, num_hiddens))
X = np.arange(0, max_len).reshape(-1, 1) / np.power(
10000, np.arange(0, num_hiddens, 2) / num_hiddens)
self.P[:, :, 0::2] = np.sin(X)
self.P[:, :, 1::2] = np.cos(X)
def forward(self, X):
X = X + self.P[:, :X.shape[1], :].as_in_ctx(X.ctx)
return self.dropout(X)
if False:
### from d2l.ai
### num_hiddens = 20 , dropout = 0
pe = PositionalEncoding(20, 0)
pe.initialize()
### we assume batch_size = 1; max_length = 100 correspond to tokens (here words) in our line;
### num_hiddens = 20 (embedding size)
###
Y = pe(np.zeros((1, 100, 20)))
### dim correspond to coordinate in embedding vector of out tokens (words)
d2l.plot(np.arange(100), Y[0, :, 4:8].T, figsize=(6, 2.5),
legend=["dim %d" % p for p in [4, 5, 6, 7]])
```
### Encoder
"""Armed with all the essential components of Transformer, let us first build a Transformer encoder block. This encoder contains a multi-head attention layer, a position-wise feed-forward network, and two “add and norm” connection blocks. As shown in the code, for both of the attention model and the positional FFN model in the EncoderBlock, their outputs’ dimension are equal to the num_hiddens. This is due to the nature of the residual block, as we need to add these outputs back to the original value during “add and norm”. """ (citation from d2l.ai)
```
### from d2l.ai
### this block will not change the input shape
class EncoderBlock(nn.Block):
def __init__(self, num_hiddens, ffn_num_hiddens, num_heads, dropout,
use_bias=False, **kwargs):
super(EncoderBlock, self).__init__(**kwargs)
self.attention = MultiHeadAttention(num_hiddens, num_heads, dropout,
use_bias)
self.addnorm1 = AddNorm(dropout)
self.ffn = PositionWiseFFN(ffn_num_hiddens, num_hiddens)
self.addnorm2 = AddNorm(dropout)
def forward(self, X, valid_len):
### we sum the original input to the attention block and the output from the
### block + we normilize the result using AddNorm
Y = self.addnorm1(X, self.attention(X, X, X, valid_len))
return self.addnorm2(Y, self.ffn(Y))
```
""" Now it comes to the implementation of the entire Transformer encoder. With the Transformer encoder, $n$ blocks of `EncoderBlock` stack up one after another. Because of the residual connection, the embedding layer size $d$ is same as the Transformer block output size. Also note that we multiply the embedding output by $\sqrt{d}$ to prevent its values from being too small. """ (citation from d2l.ai)
```
### from d2l.ai
class Encoder(nn.Block):
"""The base encoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X, *args):
raise NotImplementedError
### from d2l.ai
class TransformerEncoder(Encoder):
def __init__(self, vocab_size, num_hiddens, ffn_num_hiddens,
num_heads, num_layers, dropout, use_bias=False, **kwargs):
super(TransformerEncoder, self).__init__(**kwargs)
self.num_hiddens = num_hiddens
self.embedding = nn.Embedding(vocab_size, num_hiddens)
self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
self.blks = nn.Sequential()
for _ in range(num_layers):
self.blks.add(
EncoderBlock(num_hiddens, ffn_num_hiddens, num_heads, dropout,
use_bias))
### the order of steps:
### firstly we apply Positinal Encoding to initial word vectors
### FROM HERE: Then several times do:
### apply Multihead Attention
### apply Add Norm
### apply PositionWise transformation
### apply Add Norm
### and again... Go To FROM HERE
def forward(self, X, valid_len, *args):
X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
for blk in self.blks:
X = blk(X, valid_len)
return X
```
### Decoder
""" During training, the output for the $t$-query could observe all the previous key-value pairs. It results in an different behavior from prediction. Thus, during prediction we can eliminate the unnecessary information by specifying the valid length to be $t$ for the $t^\textrm{th}$ query. """
(citation from d2l.ai)
```
### from d2l.ai
class DecoderBlock(nn.Block):
# `i` means it is the i-th block in the decoder
### the i will be initialized from the TransformerDecoder block
### the block will be used in TransformerDecoder in stack ,
### several blocks will be aranged in sequence, output from
### one block will be input to the next blosk
def __init__(self, num_hiddens, ffn_num_hiddens, num_heads,
dropout, i, **kwargs):
super(DecoderBlock, self).__init__(**kwargs)
self.i = i
### in the block we will aplly (MultiHeadAttention + AddNorm)
### and again (MultiHeadAttention + AddNorm) ;
### then we will apply PositionWiseFFN
self.attention1 = MultiHeadAttention(num_hiddens, num_heads, dropout)
self.addnorm1 = AddNorm(dropout)
self.attention2 = MultiHeadAttention(num_hiddens, num_heads, dropout)
self.addnorm2 = AddNorm(dropout)
self.ffn = PositionWiseFFN(ffn_num_hiddens, num_hiddens)
self.addnorm3 = AddNorm(dropout)
def forward(self, X, state):
### we use state [0] and state[1] to keep output from TransformerEncoder :
### enc_outputs and enc_valid_length;
### which correspond to sentences we are translating (sentences in language FROM
### which we translate);
### the state [0] and state [1] are received from TransformerDecoder
### enclosing block as shared parameter;
enc_outputs, enc_valid_len = state[0], state[1]
# `state[2][i]` contains the past queries for this block
### in the first block (i = 1) , at this place in code,
### the queries = None, see the code in TransformetEncoder :
###
### def init_state(self, enc_outputs, enc_valid_len, *args):
### return [enc_outputs, enc_valid_len, [None]*self.num_layers]
###
### TransformerEncoder is initialized from EncoderDecoder
### using the 'init_state' function (see above) , as
### we can see, the fird element in array is None for self.layers;
### the 'init_state' determines the 'state' in TransformerEncoder,
### in the code above we use state[0] and state[1] to determine
### 'enc_outputs', 'enc_valid_len' in this block
if state[2][self.i] is None:
key_values = X
else:
### queries are processed and concatenated and used as new
### grid of key/value pairs
key_values = np.concatenate((state[2][self.i], X), axis=1)
state[2][self.i] = key_values
if autograd.is_training():
### here are are in training mode
### below in 'attention' function we will use X as queries,
### X correspond to all words in the target sentence within training;
### seq_len correspond to the length of the whole target sentence;
### we will use seq_len queries, for every sample in the batch;
### for us important the following:
### first query from the sentence has to be constrained
### to first key_value pair; second: to the first two key_value pairs,
### etc...
### that is why the valid_len is generated in the way:
batch_size, seq_len, _ = X.shape
# Shape: (batch_size, seq_len), the values in the j-th column
# are j+1
### while training we take into account the result of passing a query
### in target sentence throught the 'grid' of key/value pairs to the
### left of the query ;
### every query in the target sequence has its own valid_len and
### the valid_len correspond to the position of a query in the
### sentence
valid_len = np.tile(np.arange(1, seq_len + 1, ctx=X.ctx),
(batch_size, 1))
else:
valid_len = None
### the attention mechanism is used on key_values corresponding
### to the target sentence key_values (then AddNorm is applied)
X2 = self.attention1(X, key_values, key_values, valid_len)
Y = self.addnorm1(X, X2)
### the attention mechanism is used on TransformerEncoder outputs
### key_values as the 'grid' (then AddNorm is applied);
### the key/values are the learned pairs
### which are originated from the source sentence
Y2 = self.attention2(Y, enc_outputs, enc_outputs, enc_valid_len)
Z = self.addnorm2(Y, Y2)
return self.addnorm3(Z, self.ffn(Z)), state
### from d2l.ai
class Decoder(nn.Block):
"""The base decoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
### from d2l.ai
class TransformerDecoder(Decoder):
def __init__(self, vocab_size, num_hiddens, ffn_num_hiddens,
num_heads, num_layers, dropout, **kwargs):
super(TransformerDecoder, self).__init__(**kwargs)
self.num_hiddens = num_hiddens
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, num_hiddens)
self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
### sequential application of several DecoderBlock's
self.blks = nn.Sequential()
for i in range(num_layers):
self.blks.add(
DecoderBlock(num_hiddens, ffn_num_hiddens, num_heads,
dropout, i))
self.dense = nn.Dense(vocab_size, flatten=False)
def init_state(self, enc_outputs, env_valid_len, *args):
return [enc_outputs, env_valid_len, [None]*self.num_layers]
def forward(self, X, state):
X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
for blk in self.blks:
X, state = blk(X, state)
return self.dense(X), state
### from d2l.ai
### this block couples together TransformerEncoder and TransformerDecoder
###
class EncoderDecoder(nn.Block):
"""The base class for the encoder-decoder architecture."""
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
### the enc_outputs are moved to decoder from encoder;
### the coupling happens in this point of code
enc_outputs = self.encoder(enc_X, *args)
### initial decoder state: dec_state is calculated using the dec_outputs
### and used as 'state' in TransformerDecoder
dec_state = self.decoder.init_state(enc_outputs, *args)
### use initial state + input dec_X to the decoder to calculate
### the decoder output
return self.decoder(dec_X, dec_state)
```
### Training
```
### from d2l.ai
### because of the padding (and valid_length) we have to filter out some entries
class MaskedSoftmaxCELoss(gluon.loss.SoftmaxCELoss):
# `pred` shape: (`batch_size`, `seq_len`, `vocab_size`)
# `label` shape: (`batch_size`, `seq_len`)
# `valid_len` shape: (`batch_size`, )
def forward(self, pred, label, valid_len):
# weights shape: (batch_size, seq_len, 1)
weights = np.expand_dims(np.ones_like(label), axis=-1)
weights = npx.sequence_mask(weights, valid_len, True, axis=1)
return super(MaskedSoftmaxCELoss, self).forward(pred, label, weights)
if False:
### from d2l.ai
loss = MaskedSoftmaxCELoss()
loss(np.ones((3, 4, 10)), np.ones((3, 4)), np.array([4, 2, 0]))
### from d2l.ai
### prevents too high gradients
def grad_clipping(model, theta):
"""Clip the gradient."""
if isinstance(model, gluon.Block):
params = [p.data() for p in model.collect_params().values()]
else:
params = model.params
norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
### from d2l.ai
### accumulate results in one array, auxiliary function
class Accumulator:
"""For accumulating sums over `n` variables."""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
### from d2l.ai
def train_s2s_ch9(model, data_iter, lr, num_epochs, device):
model.initialize(init.Xavier(), force_reinit=True, ctx=device)
trainer = gluon.Trainer(model.collect_params(),
'adam', {'learning_rate': lr})
loss = MaskedSoftmaxCELoss()
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[1, num_epochs], ylim=[0, 1.00])
for epoch in range(1, num_epochs + 1):
timer = d2l.Timer()
metric = d2l.Accumulator(2) # loss_sum, num_tokens
### use data_iter from load_data_nmt to get X and Y which include:
### the source and target
### sentence representations + X_vlen and Y_vlen : the valid lenghts of
### the sentencies
for batch in data_iter:
X, X_vlen, Y, Y_vlen = [x.as_in_ctx(device) for x in batch]
Y_input, Y_label, Y_vlen = Y[:, :-1], Y[:, 1:], Y_vlen-1
with autograd.record():
Y_hat, _ = model(X, Y_input, X_vlen, Y_vlen)
l = loss(Y_hat, Y_label, Y_vlen)
l.backward()
grad_clipping(model, 1)
num_tokens = Y_vlen.sum()
trainer.step(num_tokens)
metric.add(l.sum(), num_tokens)
if epoch % 10 == 0:
animator.add(epoch, (metric[0]/metric[1],))
print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} '
f'tokens/sec on {str(device)}')
```
### Reading and Processing the Text
```
### from d2l.ai
def download_extract(name, folder=None):
"""Download and extract a zip/tar file."""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, 'Only zip/tar files can be extracted.'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
```
""" ... a dataset that contains a set of English sentences with the corresponding French translations. As can be seen that each line contains an English sentence with its French translation, which are separated by a TAB.""" (citation from d2l.ai)
```
### d2l.ai
### the data for the translation are prepared by the d2l.ai project (book)
d2l.DATA_HUB['fra-eng'] = (d2l.DATA_URL + 'fra-eng.zip',
'94646ad1522d915e7b0f9296181140edcf86a4f5')
def read_data_nmt():
data_dir = d2l.download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:
return f.read()
raw_text = read_data_nmt()
print(raw_text[0:106])
### from d2l.ai
def preprocess_nmt(text):
def no_space(char, prev_char):
return char in set(',.!') and prev_char != ' '
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
out = [' ' + char if i > 0 and no_space(char, text[i-1]) else char
for i, char in enumerate(text)]
return ''.join(out)
### from d2l.ai
text = preprocess_nmt(raw_text)
print(text[0:95])
### from d2l.ai
def tokenize_nmt(text, num_examples=None):
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
### from d2l.ai
source, target = tokenize_nmt(text)
source[0:3], target[0:3]
```
#### Histogram of the number of tokens per sentence
There are mostly 5 token sentencies, num of tokens is
usually less than 10..15.
```
### from d2l.ai
d2l.set_figsize()
d2l.plt.hist([[len(l) for l in source], [len(l) for l in target]],
label=['source', 'target'])
d2l.plt.legend(loc='upper right');
```
### Vocabulary
```
### from d2l.ai
def count_corpus(tokens):
"""Count token frequencies."""
# Here `tokens` is a 1D list or 2D list
if len(tokens) == 0 or isinstance(tokens[0], list):
# Flatten a list of token lists into a list of tokens
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
### from d2l.ai
class Vocab:
"""Vocabulary for text."""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[0])
self.token_freqs.sort(key=lambda x: x[1], reverse=True)
# The index for the unknown token is 0
self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens
uniq_tokens += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
### from d2l.ai
src_vocab = Vocab(source, min_freq=3,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
len(src_vocab)
```
### Loading the dataset
```
### from d2l.ai
def truncate_pad(line, num_steps, padding_token):
if len(line) > num_steps:
return line[:num_steps] # Trim
return line + [padding_token] * (num_steps - len(line)) # Pad
### the <pad> is represented by number 1 in Vocabuary
### from d2l.ai
truncate_pad(src_vocab[source[0]], 10, src_vocab['<pad>'])
### from d2l.ai
def build_array(lines, vocab, num_steps, is_source):
lines = [vocab[l] for l in lines]
if not is_source:
lines = [[vocab['<bos>']] + l + [vocab['<eos>']] for l in lines]
array = np.array([truncate_pad(
l, num_steps, vocab['<pad>']) for l in lines])
valid_len = (array != vocab['<pad>']).sum(axis=1)
return array, valid_len
### from d2l.ai
def load_array(data_arrays, batch_size, is_train=True):
"""Construct a Gluon data iterator."""
dataset = gluon.data.ArrayDataset(*data_arrays)
return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)
### from d2l.ai
### quite importand function to construct dataset for training (data_iter)
### from original data
def load_data_nmt(batch_size, num_steps, num_examples=76800):
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = Vocab(source, min_freq=3,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
tgt_vocab = Vocab(target, min_freq=3,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
src_array, src_valid_len = build_array(
source, src_vocab, num_steps, True)
tgt_array, tgt_valid_len = build_array(
target, tgt_vocab, num_steps, False)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = load_array(data_arrays, batch_size)
return src_vocab, tgt_vocab, data_iter
### from d2l.ai
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu()."""
return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()
```
### Model: training and prediction
```
### the code from d2l.ai
### estimate the execution time for the cell in seconds
start = time.time()
num_hiddens, num_layers, dropout, batch_size, num_steps = 32, 2, 0.0, 128, 15
lr, num_epochs, device = 0.001, 350, try_gpu()
ffn_num_hiddens, num_heads = 64, 4 ### num_hiddens is to be a multiple of num_heads !!
src_vocab, tgt_vocab, train_iter = load_data_nmt(batch_size, num_steps,76800)
encoder = TransformerEncoder(
len(src_vocab), num_hiddens, ffn_num_hiddens, num_heads, num_layers,
dropout)
decoder = TransformerDecoder(
len(src_vocab), num_hiddens, ffn_num_hiddens, num_heads, num_layers,
dropout)
model = EncoderDecoder(encoder, decoder)
train_s2s_ch9(model, train_iter, lr, num_epochs, device)
### estimate the execution time for the cell
end = time.time()
print(end - start)
### from d2l.ai
def predict_s2s_ch9(model, src_sentence, src_vocab, tgt_vocab, num_steps,
device):
src_tokens = src_vocab[src_sentence.lower().split(' ')]
enc_valid_len = np.array([len(src_tokens)], ctx=device)
src_tokens = truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])
enc_X = np.array(src_tokens, ctx=device)
# Add the batch size dimension
enc_outputs = model.encoder(np.expand_dims(enc_X, axis=0),
enc_valid_len)
dec_state = model.decoder.init_state(enc_outputs, enc_valid_len)
dec_X = np.expand_dims(np.array([tgt_vocab['<bos>']], ctx=device), axis=0)
predict_tokens = []
for _ in range(num_steps):
Y, dec_state = model.decoder(dec_X, dec_state)
# The token with highest score is used as the next time step input
dec_X = Y.argmax(axis=2)
py = dec_X.squeeze(axis=0).astype('int32').item()
if py == tgt_vocab['<eos>']:
break
predict_tokens.append(py)
return ' '.join(tgt_vocab.to_tokens(predict_tokens))
for sentence in ['Go .', 'Wow !', "I'm OK .", 'I won !',
'Let it be !', 'How are you ?', 'How old are you ?',
'Cats are cats, dogs are dogs .', 'My friend lives in US .',
'He is fifty nine years old .', 'I like music and science .',
'I love you .', 'The dog is chasing the cat .',
'Somewhere on the earth .', 'Do not worry !',
'Sit down, please !', 'Not at all !', 'It is very very strange .',
'Take it into account .', 'The dark side of the moon .',
'Come on !', 'We are the champions, my friends .']:
print(sentence + ' => ' + predict_s2s_ch9(
model, sentence, src_vocab, tgt_vocab, num_steps, device))
```
| github_jupyter |
# Support Vector Machine
```
!pip install six
!pip install pandas
!pip install numpy
!pip install sklearn
!pip install matplotlib
!pip install imbalanced-learn
import pandas as pd
import numpy as np
import sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from imblearn.under_sampling import RandomUnderSampler
train_set = pd.read_csv('train_set_with_features.csv')
```
## Data Prep
```
# Random undersampler to reduce the number of majority class instances to match number of minority class instance.
undersample = RandomUnderSampler(sampling_strategy='majority')
# Extract only engineered features into x and y
x = train_set.drop(['id', 'qid1', 'qid2', 'question1', 'question2', 'is_duplicate', 'Unnamed: 0'], axis=1)
y = train_set[['is_duplicate']]
# Because gridSearch parameter tuning is slow, only use 50% of model data for training the gridSearch model while searching for best parameters for final SVM model.
x_grid_train, x_grid_test, y_grid_train, y_grid_test = train_test_split(x, y, test_size = 0.5, random_state = 42)
# Split 80% of data for the final model training and 20% for testing.
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42)
# Normalize then undersample data used by final model
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train, y_train = undersample.fit_resample(x_train, y_train)
# Normalize then undersample data used by gridSearch model
x_grid_train = scaler.fit_transform(x_grid_train)
x_grid_test = scaler.transform(x_grid_test)
x_grid_train, y_grid_train = undersample.fit_resample(x_grid_train, y_grid_train)
# gridSearch requires labels to be of a particular shape.
y_grid_train = y_grid_train.to_numpy().reshape(-1)
y_grid_test = y_grid_test.to_numpy().reshape(-1)
```
## Parameter tuning
```
# Execute gridSearch to try these parameters for SVM.
param_grid = {'C': [0.1,1, 10, 100], 'gamma': [1,0.1,0.01,0.001],'kernel': ['rbf', 'sigmoid']}
grid = GridSearchCV(SVC(),param_grid,refit=True,verbose=2, n_jobs=3)
grid.fit(x_grid_train ,y_grid_train)
# Best parameters for SVM, but best kernel is not shown
print(grid.best_estimator_)
# Print out the performance of the SVM model trained by gridSearch using the best parameters.
grid_predictions = grid.predict(x_test)
print(confusion_matrix(y_test,grid_predictions))
print(classification_report(y_test,grid_predictions))
```
## Fitting model based on tuned parameters
```
# Use the parameters found by gridSearch to train the final SVM model with more data (80% instead of 50%).
# After trying multiple kernel types since gridSearch did not reveal the best kernel type, 'rbf' is the best.
# Kernel = 'rbf'
SVM = SVC(C=10, kernel='rbf', degree=3, gamma=0.01)
clf = SVM.fit(x_train,y_train)
predictions_SVM = SVM.predict(x_test)
# Print out the performance of SVM that is trained using the best parameters and
print(classification_report(y_test,predictions_SVM))
```
### Process:
1. Normalize feature engineered training data
2. Parameter tuning using GridSearchCV which fits the SVM model using several values of each parameter and evaluating it with a 5-fold cross validation. (10000 rows)
3. Resulting parameters are C = 100, gamma = 0.01.
4. Upon testing, best kernel for those parameters is rbf.
Results suggest that the model is better used to predict that a question is NOT a duplicate.
### Advantages:
1. By using a kernel, there can be separation of the classes even if the data provided is not linearly separable. (https://core.ac.uk/reader/6302770)
2. SVM provides good out of sample generalization as it makes use of regularization which helps to prevent overfitting on the dataset.
3. SVM can classify data points faster than some other models because it only relies on the support vectors to decide the decision boundary and not all of the data points used to train the model (like kNN).
### Disadvantages:
1. Does not perform too well with skewed dataset, as in our case. There would be high variance of the decision boundary as the under represented class can skew the decision boundary by a lot.
https://www.quora.com/Why-does-SVM-not-perform-well-for-imbalanced-data
2. Takes a long time to train the model if the data set is large. "As you mention, storing the kernel matrix requires memory that scales quadratically with the number of data points. Training time for traditional SVM algorithms also scales superlinearly with the number of data points. So, these algorithms aren't feasible for large data sets."
https://stats.stackexchange.com/questions/314329/can-support-vector-machine-be-used-in-large-data
| github_jupyter |
# Problem Statement:
Given profiles representing fictional customers from an e-commerce company. The profiles contain information about the customer, their orders, their transactions ,what payment methods they used and whether the customer is fraudulent or not. We need to predict the given customer is fraudulent or not based on the above factors
##The data given below represents fictional customers from an e-commerce website
The data contain information about the customerEmail,orders,transaction they have made,what payment method they have used,through which card the payment has been done and whether the customer is fraudulent or not
1)The first thing is loading the dataset
```
import pandas as pd
import numpy as np
data1 = pd.read_csv('Customer_DF (1).csv')
data2 = pd.read_csv('cust_transaction_details (1).csv')
#this code is just to make the copy of the dataset
data_copy_1 = pd.read_csv('Customer_DF (1).csv')
data_copy_2 = pd.read_csv('cust_transaction_details (1).csv')
data1.head()
data2.head()
```
Checking whether if they are some null values
```
data1.isnull().sum()
data2.isnull().sum()
```
Printing the columns of both the table
```
data1.columns
data2.columns
```
Shape of the datasets
```
data1.shape
data2.shape
print('total customers records',data1.shape[0], 'and total unique customers',len(data1.customerEmail.unique()))
```
Duplicates customersEmail ID's are....
```
data1[data1['customerEmail'].duplicated()]
```
We see that all the records that are duplicated are fraudulent
So now our job is to remove all the duplicate entries from the dataset
```
data2=data2.drop(["transactionId","transactionId","paymentMethodId","orderId","Unnamed: 0"],axis=1)
#filtering the email if their is "." present in them
data2["customerEmail"]=data2.apply(lambda x:x.customerEmail if("." in x.customerEmail) else "f",axis=1)
#setting customerEmail as the index of the dataframe
data2 = data2.set_index("customerEmail")
#dropping the email which does not have '.' in them
data2=data2.drop("f",axis=0)
#taking out the mean of the customerEmail to avoid duplicates
n1=data2.groupby("customerEmail")["paymentMethodRegistrationFailure"].mean().astype(int)
n2=data2.groupby("customerEmail")["transactionAmount"].mean().astype(int)
n3=data2.groupby("customerEmail")["transactionFailed"].mean().astype(int)
data2=data2.drop(["transactionFailed","transactionAmount","paymentMethodRegistrationFailure"],axis=1)
data2=data2.drop(["paymentMethodProvider"],axis=1)
#creating dummy variables for the dataset
data2= pd.get_dummies(data2)
data2
m1=data2.groupby("customerEmail")["orderState_failed"].mean().astype(int)
m2=data2.groupby("customerEmail")["orderState_fulfilled"].mean().astype(int)
m3=data2.groupby("customerEmail")["orderState_pending"].mean().astype(int)
l1=data2.groupby("customerEmail")["paymentMethodType_card"].mean().astype(int)
l2=data2.groupby("customerEmail")["paymentMethodType_paypal"].mean().astype(int)
l3=data2.groupby("customerEmail")["paymentMethodType_apple pay"].mean().astype(int)
l4=data2.groupby("customerEmail")["paymentMethodType_bitcoin"].mean().astype(int)
#concatenating the variables after removing duplicates
nresult = pd.concat([m1,m2,m3,l1,l2,l3,l4,n1,n2,n3], axis=1, join='inner')
data1=data1.drop(["customerPhone","customerDevice","customerIPAddress","customerBillingAddress","Unnamed: 0"],axis=1)
#converting the target variable from bool to int for the creation of dummy variable
data1['Fraud'] = data1['Fraud'].astype(int)
#merging both the datasets into single object called result
result = pd.merge(data1,nresult, on='customerEmail')
result.isnull().sum()
#unique email id's in result dataset
len(result["customerEmail"].unique())
#dropping the email id as it is of no use now
result=result.drop(["customerEmail"],axis=1)
result.columns
#creating the dummies for the merged dataset
result2= pd.get_dummies(result)
result2
```
Now exploring the data and analysing it
```
#maximum number of transaction done by the customer
data1[data1['No_Transactions']==data1['No_Transactions'].max()]
#maximum number of orders done by the customer
data1[data1['No_Orders']==data1['No_Orders'].max()]
#maximum number of payments done by the customer
data1[data1['No_Payments']==data1['No_Payments'].max()]
data_copy_2['paymentMethodRegistrationFailure'].value_counts()
import seaborn as sns
import matplotlib.pyplot as plt
sns.countplot(x='paymentMethodRegistrationFailure',data=data_copy_2,palette='hls')
plt.show()
```
Count of Payment method Registration Failure occ
INFERENCE --> There is a very less probability of payment to fail
```
data_copy_2['paymentMethodType'].value_counts()
sns.countplot(x='paymentMethodType',data=data_copy_2,palette='hls')
plt.show()
```
PREFERRED PAYMENT METHOD
INFERENCE --> People prefer Card over other payment methods types
```
data_copy_2['paymentMethodProvider'].value_counts()
sns.countplot(y="paymentMethodProvider",data=data_copy_2)
```
Payment Method Provider
INFERENCE --> JCB 16 DIGIT is widely used followed by VISA 16 DIGIT and rest
```
data_copy_2['transactionFailed'].value_counts()
sns.countplot(x='transactionFailed',data=data_copy_2,orient='vertical',palette='hls')
plt.show()
```
transaction failed
INFERENCE --> after the payment is completed , the probability of transaction to fail is low
```
data_copy_2['orderState'].value_counts()
sns.countplot(x='orderState',data=data_copy_2,orient='vertical',palette='hls')
plt.show()
```
Order State
INFERENCE --> it is found out that the most of the orders were fullfilled
```
result['Fraud'].value_counts()
sns.countplot(x='Fraud',data=data1,orient='vertical',palette='hls')
plt.show()
```
FRAUD
INFERENCE --> it is seen that the cases aof fraud is neaarly half of those that are not fraud
```
result
#number of transaction that went fraud and not fraud
plt.scatter(result['No_Transactions'],result['Fraud'],color='#2ca02c')
#number of orders that went fraud and not fraud
plt.scatter(result['No_Orders'],result['Fraud'],color='#2ca02c')
#number of payments that went fraud and not fraud
plt.scatter(result['No_Payments'],result['Fraud'],color='#2ca02c')
sns.catplot(x="No_Payments",y="No_Transactions",data=result,kind="box")
```
INFERENCE --> although there is no particular trend , but it seems that as the no.of payments increase the no,of transactions tend to decrease
```
sns.barplot(y="No_Payments",x="No_Orders",data=result)
```
INFERENCE --> as the no,of orders increase the no.of payments tend to increase
```
data1[data1['No_Payments']==0]
#No. number of fullfilled orders
len(result[result['orderState_fulfilled'] == 1])
#No. number of pending orders
len(result[result['orderState_pending'] == 1])
#No. number of failed orders
len(result[result['orderState_failed'] == 1])
%matplotlib inline
pd.crosstab(result['paymentMethodType_card'],result['Fraud']).plot(kind='bar')
plt.title('paymentMethodProvider for card vs fraud')
plt.xlabel('paymentMethodProvider')
plt.ylabel('Fraud')
```
INFERENCE --> when the payment method is not card then it is seen that the not fraud and fraud cases are nearly same and when card is used the non fraud case is higher than the fraud case
```
%matplotlib inline
pd.crosstab(result['paymentMethodType_paypal'],result['Fraud']).plot(kind='bar')
plt.title('paymentMethodProvider for paypal vs fraud')
plt.xlabel('paymentMethodProvider')
plt.ylabel('Fraud')
```
INFERENCE --> when the payment method is not paypal then the cases of not fraud is higher than cases of fraud and when paypal is used there is no case of fraud at all
```
%matplotlib inline
pd.crosstab(result['paymentMethodType_bitcoin'],result['Fraud']).plot(kind='bar')
plt.title('paymentMethodProvider for bitcoin vs fraud')
plt.xlabel('paymentMethodProvider')
plt.ylabel('Fraud')
```
INFERENCE --> when the payment type is not bitcoin it is found that the cases of non fraud is higher than the cases of fraud and when bitcoin is used it is found that fraud and non fraud cases is almost same
Till Now we have done some EDA for our datasets
Now we have to construct our model to predict if the customer if fraudulent or not
```
result.describe(include='all')
#creating dependent and independent variables
features = result2.drop('Fraud',axis=1) #->independent variables
labels = result2['Fraud'] #->dependent variable
#splitting the data into training and testing
#and performing Logistic Regression and fitting the training dataset in the Logistic model
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(features,labels,test_size=0.20,random_state=0)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train,y_train)
#predicating the output from the test data
ypred = lr.predict(X_test)
ypred
from sklearn.metrics import confusion_matrix
#creating a confusion matrix to check if the variable is predicated correctly or not
confusion_matrix(y_test,ypred)
#normalizing the data and plotting the confusion matrix
from sklearn.metrics import confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Oranges):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.figure(figsize = (10, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, size = 24)
plt.colorbar(aspect=4)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, size = 14)
plt.yticks(tick_marks, classes, size = 14)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
# Labeling the plot
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), fontsize = 20,
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.grid(None)
plt.tight_layout()
plt.ylabel('True label', size = 18)
plt.xlabel('Predicted label', size = 18)
cm = confusion_matrix(y_test, ypred)
plot_confusion_matrix(cm, classes = ['fraud', 'not fraud'],
title = 'FRAUD DETECTION CONFUSION MATRIX')
#finding out the accuracy_score for the model
#the below accuracy for the model is 68%,hyperparameters are yet to be applied
from sklearn.metrics import accuracy_score
print("Logistic regression")
accuracy_score(y_test,ypred) * 100
from sklearn.metrics import classification_report
print(classification_report(y_test,ypred))
#basically in the below code we are performing pipelining in the model itself will Normalize the data and perform PCA
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
pca = decomposition.PCA(n_components=2)
logistic = linear_model.LogisticRegression()
pipe = Pipeline(steps=[('sc', sc),
('pca', pca),
('logistic', logistic)])
n_components = list(range(1,features.shape[1]+1,1))
# Create a list of values of the regularization parameter
C = np.logspace(-4, 4, 15)
# Create a list of options for the regularization penalty
penalty = ['l1', 'l2']
# Create a dictionary of all the parameter options
# accessing the parameters of steps of a pipeline by using '__’
parameters = dict(pca__n_components=n_components,
logistic__C=C,
logistic__penalty=penalty)
clf = GridSearchCV(pipe, parameters,verbose = True,n_jobs=-1,scoring='accuracy')
# Fit the grid search
clf.fit(features, labels)
print('Best Penalty:', clf.best_estimator_.get_params()['logistic__penalty'])
print('Best C:', clf.best_estimator_.get_params()['logistic__C'])
print('Best Number Of Components:', clf.best_estimator_.get_params()['pca__n_components'])
#this will return the best parameters that are required for our model
clf.best_params_
#this will return the mean accuracy of the model
clf.best_score_
clf.best_estimator_
clf.best_estimator_.get_params()['logistic']
#here cross_val_score will split the whole data into training and testing and perforn cross validations
cross_val = cross_val_score(clf,features,labels,cv=3,scoring='accuracy',n_jobs=-1)
#this will return the accuracy of each dataset that was splitted on the basics of 'cv' value which is 3
cross_val * 100
print('the mean accuracy of our model is',(cross_val * 100).mean())
print('the maximum accuracy of our model is',max(cross_val * 100))
ypred_new = clf.predict(X_test)
ypred_new
accuracy_score(y_test,ypred_new) * 100
```
# INFERENCES
* There is a very less probability of payment to fail
* People prefer Card over other payment methods types
* JCB 16 DIGIT is widely used followed by VISA 16 DIGIT and rest
* After the payment is completed , the probability of transaction to fail is low
* It is found out that the most of the orders were fullfilled
* It is seen that the cases of fraud is nearly half of those that are not fraud
* It seems that as the no.of payments increase the no,of transactions tend to decrease
* As the no.of orders increase the no.of payments tend to increase
* When the payment method is not card then it is seen that the not fraud and fraud cases are nearly same and when card is used the non fraud case is higher than the fraud case
* When the payment method is not paypal then the cases of not fraud is higher than cases of fraud and when paypal is used there is no case of fraud at all
* When the payment type is not bitcoin it is found that the cases of non fraud is higher than the cases of fraud and when bitcoin is used it is found that fraud and non fraud cases is almost same
# Model_Selection
* Initially , we decided to use Logistic Regression as it seems to appear as a binary problem and achieved an accuracy of 75%
*
* Implemented a pipeline such that it will Normalize the data and perform PCA
*
* Applied CrossValidation and fitting the model via GridSearch
*
* The mean accuracy of our model is 77.13536848596978 %
The highest accuracy of our model is 82.97872340425532 %
The testing accuracy of our model is 78.57142857142857 %
```
#applying ANN here
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Flatten, LeakyReLU, BatchNormalization, Dropout
from keras.activations import relu, sigmoid
def create_model(layers,activation):
model = Sequential()
for i,node in enumerate(layers):
if i == 0:
model.add(Dense(node,input_dim=X_train.shape[1]))
model.add(Activation(activation))
model.add(Dropout(0.3))
else:
model.add(Dense(node))
model.add(Activation(activation))
model.add(Dropout(0.3))
model.add(Dense(units=1,activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy',metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=create_model,verbose=0)
layers = [[20,15],[40,20,15],[45,25,20,15]]
activation = ['sigmoid','relu']
param_grid = dict(layers=layers, activation=activation, batch_size = [64,128,150,175], epochs=[35])
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=3)
grid_result = grid.fit(X_train, y_train)
ypred_new = grid.predict(X_test)
ypred_new
accuracy_score(ypred_new,y_test)
grid_result.best_params_
grid_result.best_score_
```
| github_jupyter |
```
!rm -Rf HMP_Dataset
!git clone https://github.com/wchill/HMP_Dataset
#!ls HMP_Dataset/Brush_teeth
import os
#get list of folders/files in folder HMP_Dataset
file_list = os.listdir('HMP_Dataset')
#filter list for folders containing data
file_list_filtered = [s for s in file_list if '_' in s]
import pandas as pd
#create pandas data frame for all the data
data_frames = []
for category in file_list_filtered:
data_files = os.listdir('HMP_Dataset/'+category)
#create a temporary pandas data frame for each data file
for data_file in data_files:
print(data_file)
temp_df = pd.read_csv('HMP_Dataset/'+category+'/'+data_file, header=None, names=['x','y','z'], sep=' ')
#getting number of records by checking length of 1st column
length = len(temp_df.iloc[:,0])
#create a column called "source" storing the current CSV file
temp_df['source'] = pd.Series([data_file]*length, index=temp_df.index)
#create a column called "class" storing the current data folder
temp_df['class'] = pd.Series([category]*length, index=temp_df.index)
#append to existing data frame list
data_frames = data_frames + [temp_df]
#create big dataframe from all small ones
df = pd.concat(data_frames)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(df['class'].as_matrix())
encoded_classes = le.transform(df['class'].as_matrix())
encoded_classes
df_encoded = df.join(pd.DataFrame(encoded_classes,columns=['class_factorized']))
df_encoded
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder()
one_hot_encoded_classes = ohe.fit_transform(encoded_classes.reshape(-1, 1)).toarray()
one_hot_encoded_classes
display(pd.DataFrame(one_hot_encoded_classes))
df_one_hot = pd.DataFrame(one_hot_encoded_classes,dtype=int)
df_one_hot
#df = pd.concat([df, dfOneHot], axis=1)
df_final = df_encoded.join(df_one_hot)
from sklearn import preprocessing
from pandas import Series
x_scaled = preprocessing.scale(df_encoded['x'])
y_scaled = preprocessing.scale(df_encoded['y'])
z_scaled = preprocessing.scale(df_encoded['z'])
df_encoded['x_scaled'] = Series(x_scaled, index=df_encoded.index)
df_encoded['y_scaled'] = Series(y_scaled, index=df_encoded.index)
df_encoded['z_scaled'] = Series(z_scaled, index=df_encoded.index)
df_encoded
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import StandardScaler
mapper = DataFrameMapper([
(['x','y','z'], StandardScaler())
])
df_scaled = mapper.fit_transform(df)
df_scaled
!pip install ibex
from ibex.sklearn.preprocessing import StandardScaler
from ibex.sklearn.preprocessing import LabelEncoder
from ibex.sklearn.preprocessing import OneHotEncoder
from ibex import trans
pipeline = (trans(LabelEncoder(), in_cols='class') +
trans(StandardScaler(), in_cols=['x', 'y', 'z']) +
trans(OneHotEncoder(), in_cols=['functiontransformer_0'][0]) +
trans(None, in_cols='source')
)
df_scaled = pipeline.fit_transform(df)
df_scaled
%matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df_pd_tools['x'], df_pd_tools['y'], df_pd_tools['z'],c=df_pd_tools['class_factorized'])
ax.view_init(30, 130)
plt.show()
#for angle in range(0, 360):
# ax.view_init(30, angle)
# plt.draw()
# plt.pause(.001)
%matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
df_pd_tools_filtered1 = df_pd_tools[df_pd_tools['class']=='Standup_chair']
df_pd_tools_filtered2 = df_pd_tools[df_pd_tools['class']=='Pour_water']
df_pd_tools_filtered = pd.concat([df_pd_tools_filtered1,df_pd_tools_filtered2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df_pd_tools_filtered['x'], df_pd_tools_filtered['y'], df_pd_tools_filtered['z'],c=df_pd_tools_filtered['class_factorized'])
ax.view_init(30, 130)
plt.show()
```
| github_jupyter |
## Divide y vencerás
Este es un método de diseño de algoritmos que se basa en *subdividir* el problema en sub-problemas, resolverlos *recursivamente*, y luego *combinar* las soluciones de los sub-problemas para construir la solución del problema original. Es necesario que los subproblemas tengan la misma estructura que el problema original, de modo que se pueda aplicar la recursividad.
## Ejemplo: Obtener la subsecuencia de suma máxima
Dada una sucesión de enteros $$ a_{1}, a_{2}, …, a_{n}$$
encontrar e identificar el valor máximo de la suma de una porción consecutiva de la secuencia.
Cuando todos los enteros son negativos entendemos que la subsecuencia de suma máxima es la vacía, siendo su suma cero.
Ejemplos:
1. { -2, 11, -4, 13, -5, 2}
2. {1, -3, 4, -2, -1, 6}
Si revisamos el problema de forma intuitiva, mediante el uso de esquemas por ejemplo, para 1 y 2 las subsecuencias de suma máxima están marcadas en negrita:
1. { -2, **11, -4, 13**, -5, 2}
y la suma de esta es 20.
2. {1, -3, **4, -2, -1, 6**}
y la suma de esta es 7.
Esta solución intuitiva utiliza del orden de $n^2$ comparaciones: $\Theta(n^2)$.
## Usando divide y vencerás
Supongamos ahora que la sucesión dada es {4, -3, 5, -2, -1, 2, 6, -2}. Dividiremos esta secuencia en dos partes iguales, como se muestra en la figura a continuación.

Entonces la subsecuencia de suma máxima puede aparecer en una de estas tres formas:
* *Caso 1*: está totalmente incluida en la primera mitad.
* *Caso 2*: está totalmente incluida en la segunda mitad.
* *Caso 3*: comienza en la primera mitad, pero termina en la segunda.
La figura anterior muestra que podemos calcular, para cada elemento de la primera mitad, la suma de la subsecuencia contigua que termina en el elemento situado más a la derecha. Hacemos esto con un recorrido de derecha a izquierda, partiendo del elemento situado entre las dos mitades. Análogamente, podemos calcular la suma de todas las subsecuencias contiguas que comiencen con el primer elemento de la segunda mitad. Entonces se puede combinar estas dos subsecuencias para formar la subsecuencia de suma máxima que cruza la línea divisoria. En el ejemplo de la figura, la secuencia resultante va desde el primer elemento de la primera mitad hasta el penúltimo elemento de la segunda mitad. La suma total es la suma de las dos subsecuencias, 4+7 = 11. Esto nos muestra que el caso 3 se puede resolver en tiempo lineal.
Tanto para el caso 1 como el caso 2, tenemos el mismo problema original, pero para una secuencia de tamaño $n/2$, es decir el mismo $\Theta(n^2)$ (recordemos que se obvían los coeficientes).
Sin embargo, podemos aplicar la misma estrategia de división por la mitad en los casos 1 y 2. Podemos continuar dividiendo hasta que sea imposible dividir más. Esto equivale, más concretamente, a resolver los casos 1 y 2 recursivamente. Se puede demostrar que esto reduce el tiempo de ejecución por de bajo de cuadrático, pues los ahorros se acumulan a lo largo de la ejecución del algoritmo. Mostramos a continuación un esquema del algoritmo:
1. Calcular recursivamente la subsecuencia de suma máxima que está totalmente contenida en la primera mitad.
2. Calcular recursivamente la subsecuencia de suma máxima que está totalmente contenida en la segunda mitad.
3. Calcular, usando dos bucles consecutivos, la subsecuencia de suma máxima que comienza en la primera mitad pero termina en la segunda.
4. Elegir la mayor de las tres sumas.
El método resultante aparece a continuación. Un algoritmo recursivo nos exige definir un caso base. Naturalmente, cuando el dato es un solo elemento, no usamos recursión.
A la llamada recursiva se le pasa el vector de entrada junto con los límites izquierdo y derecho, los cuales delimitan la porción de vector sobre la que se está operando. Una rutina guía de una línea inicializa los parámetros límite a 0 y N - 1.
```
def max3(a,b,c):
aux=[]
aux.append(a)
aux.append(b)
aux.append(c)
max = -1000
for e in aux:
if e > max:
max = e
return max
def maxSumaRec( a, izq, der): # Sub-problema cualquiera
# a: [][ ][ ] | [][][]. (n=6)
# izq der
#
# Sub-problema inicial
# a: [ ][ ][ ] | [][][ ]
# izq=0. centro=2. der=5
maxSumIzqBorde = 0; maxSumDerBorde = 0
sumIzqBorde = 0; sumDerBorde = 0
centro = int((izq+der)/2)
# CASO BASE:
if izq == der:
if a[izq] > 0:
return a[izq]
else:
return 0
# Se busca de forma recursiva la suma máxima en la sección izquierda
maxSumIzq = maxSumaRec(a, izq, centro)
# Se busca de forma recursiva la suma máxima en la sección derecha
maxSumDer = maxSumaRec(a, centro+1, der)
for i in range(centro, izq-1,-1): # al ppio: 3, 2, 1, 0 , para el ej.
sumIzqBorde += a[i]
if sumIzqBorde > maxSumIzqBorde:
maxSumIzqBorde = sumIzqBorde
for j in range(centro+1, der+1): # al ppio: 4, 5, 6, 7
sumDerBorde += a[j]
if sumDerBorde > maxSumDerBorde:
maxSumDerBorde = sumDerBorde
return max3(maxSumIzq,maxSumDer, maxSumIzqBorde+maxSumDerBorde)
a = [4,-3,5,-2,-1,2,6,-2]
print(maxSumaRec(a,0,len(a)-1))
b = [-2, 11, -4, 13, -5, 2]
print(maxSumaRec(b,0,len(b)-1))
c = [1, -3, 4, -2, -1, 6]
print(maxSumaRec(c,0,len(c)-1))
```
### Ejercicio de divide y vencerás
Si tenemos dos números complejos
$$
\begin{align}
u&=a+bi\\
v&=c+di
\end{align}
$$
podemos calcular su producto
$$
uv=(ac-bd)+(ad+bc)i
$$
haciendo 4 multiplicación de números reales.
Encuentre una forma de realizar este cálculo haciendo solo 3 multiplicaciones de números reales.
## Referencias
1. Weiss, M. A., & Marroquín, O. (2000). Estructuras de datos en JavaTM. Addison-Wesley.
2. Apuntes de Patricio Poblete (U. de Chile) disponible en: https://github.com/ivansipiran/AED-Apuntes#Algoritmos-y-Estructuras-de-Datos (visitado en mayo 2021)
| github_jupyter |
# Sequence classification model for IMDB Sentiment Analysis
(c) Deniz Yuret, 2019
* Objectives: Learn the structure of the IMDB dataset and train a simple RNN model.
* Prerequisites: [RNN models](60.rnn.ipynb)
```
# Set display width, load packages, import symbols
ENV["COLUMNS"] = 72
using Statistics: mean
using IterTools: ncycle
using Knet: Knet, AutoGrad, RNN, param, dropout, minibatch, nll, accuracy, progress!, adam, save, load, gc
# Set constants for the model and training
EPOCHS=3 # Number of training epochs
BATCHSIZE=64 # Number of instances in a minibatch
EMBEDSIZE=125 # Word embedding size
NUMHIDDEN=100 # Hidden layer size
MAXLEN=150 # maximum size of the word sequence, pad shorter sequences, truncate longer ones
VOCABSIZE=30000 # maximum vocabulary size, keep the most frequent 30K, map the rest to UNK token
NUMCLASS=2 # number of output classes
DROPOUT=0.5 # Dropout rate
LR=0.001 # Learning rate
BETA_1=0.9 # Adam optimization parameter
BETA_2=0.999 # Adam optimization parameter
EPS=1e-08 # Adam optimization parameter
```
## Load and view data
```
include(Knet.dir("data","imdb.jl")) # defines imdb loader
@doc imdb
@time (xtrn,ytrn,xtst,ytst,imdbdict)=imdb(maxlen=MAXLEN,maxval=VOCABSIZE);
println.(summary.((xtrn,ytrn,xtst,ytst,imdbdict)));
# Words are encoded with integers
rand(xtrn)'
# Each word sequence is padded or truncated to length 150
length.(xtrn)'
# Define a function that can print the actual words:
imdbvocab = Array{String}(undef,length(imdbdict))
for (k,v) in imdbdict; imdbvocab[v]=k; end
imdbvocab[VOCABSIZE-2:VOCABSIZE] = ["<unk>","<s>","<pad>"]
function reviewstring(x,y=0)
x = x[x.!=VOCABSIZE] # remove pads
"""$(("Sample","Negative","Positive")[y+1]) review:\n$(join(imdbvocab[x]," "))"""
end
# Hit Ctrl-Enter to see random reviews:
r = rand(1:length(xtrn))
println(reviewstring(xtrn[r],ytrn[r]))
# Here are the labels: 1=negative, 2=positive
ytrn'
```
## Define the model
```
struct SequenceClassifier; input; rnn; output; pdrop; end
SequenceClassifier(input::Int, embed::Int, hidden::Int, output::Int; pdrop=0) =
SequenceClassifier(param(embed,input), RNN(embed,hidden,rnnType=:gru), param(output,hidden), pdrop)
function (sc::SequenceClassifier)(input)
embed = sc.input[:, permutedims(hcat(input...))]
embed = dropout(embed,sc.pdrop)
hidden = sc.rnn(embed)
hidden = dropout(hidden,sc.pdrop)
return sc.output * hidden[:,:,end]
end
(sc::SequenceClassifier)(input,output) = nll(sc(input),output)
```
## Experiment
```
dtrn = minibatch(xtrn,ytrn,BATCHSIZE;shuffle=true)
dtst = minibatch(xtst,ytst,BATCHSIZE)
length.((dtrn,dtst))
# For running experiments
function trainresults(file,maker; o...)
if (print("Train from scratch? "); readline()[1]=='y')
model = maker()
progress!(adam(model,ncycle(dtrn,EPOCHS);lr=LR,beta1=BETA_1,beta2=BETA_2,eps=EPS))
Knet.save(file,"model",model)
GC.gc(true) # To save gpu memory
else
isfile(file) || download("http://people.csail.mit.edu/deniz/models/tutorial/$file",file)
model = Knet.load(file,"model")
end
return model
end
maker() = SequenceClassifier(VOCABSIZE,EMBEDSIZE,NUMHIDDEN,NUMCLASS,pdrop=DROPOUT)
# model = maker()
# nll(model,dtrn), nll(model,dtst), accuracy(model,dtrn), accuracy(model,dtst)
# (0.69312066f0, 0.69312423f0, 0.5135817307692307, 0.5096153846153846)
model = trainresults("imdbmodel132.jld2",maker);
# ┣████████████████████┫ [100.00%, 1170/1170, 00:15/00:15, 76.09i/s]
# nll(model,dtrn), nll(model,dtst), accuracy(model,dtrn), accuracy(model,dtst)
# (0.05217469f0, 0.3827392f0, 0.9865785256410257, 0.8576121794871795)
```
## Playground
```
predictstring(x)="\nPrediction: " * ("Negative","Positive")[argmax(Array(vec(model([x]))))]
UNK = VOCABSIZE-2
str2ids(s::String)=[(i=get(imdbdict,w,UNK); i>=UNK ? UNK : i) for w in split(lowercase(s))]
# Here we can see predictions for random reviews from the test set; hit Ctrl-Enter to sample:
r = rand(1:length(xtst))
println(reviewstring(xtst[r],ytst[r]))
println(predictstring(xtst[r]))
# Here the user can enter their own reviews and classify them:
println(predictstring(str2ids(readline(stdin))))
```
| github_jupyter |
$\newcommand{\mb}[1]{\mathbf{ #1 }}$
$\newcommand{\bb}[1]{\mathbb{ #1 }}$
$\newcommand{\bs}[1]{\boldsymbol{ #1 }}$
$\newcommand{\norm}[1]{\left\Vert #1 \right\Vert}$
$\newcommand{\der}[2]{\frac{ \mathrm{d} #1 }{ \mathrm{d} #2 }}$
$\newcommand{\derp}[2]{\frac{ \partial #1 }{ \partial #2 }}$
$\newcommand{\R}{\bb{R}}$
# Learning Dynamics
```
from matplotlib.pyplot import show, subplots
```
## Robotic Systems
Let $\mathcal{Q} \subseteq \R^n$ be a configuration space. Consider a robotic system governed by:
\begin{equation}
\mb{D}(\mb{q})\ddot{\mb{q}} + \mb{C}(\mb{q}, \dot{\mb{q}})\dot{\mb{q}} + \mb{G}(\mb{q}) = \mb{B}\mb{u},
\end{equation}
for generalized coordinates $\mb{q} \in \mathcal{Q}$, coordinate rates $\dot{\mb{q}} \in \R^n$, actions $\mb{u} \in \R^m$, inertia matrix function $\mb{D}: \mathcal{Q} \to \bb{S}^n_{++}$ (the space of $n \times n$ positive definite matrices), Coriolis terms $\mb{C}: \mathcal{Q} \times \R^n \to \R^{n \times n}$, potential terms $\mb{G}: \mathcal{Q} \to \R^n$, and static actuation matrix $\mb{B} \in \R^{n \times m}$. Assume $m \leq n$ and $\mb{B}$ is full rank.
### Inverted Pendulum
```
from numpy import array, identity, linspace
from core.controllers import FBLinController, LQRController
from core.systems import InvertedPendulum
ip = InvertedPendulum(m=0.25, l=0.5)
Q_ip = identity(2)
R_ip = identity(1)
lqr_ip = LQRController.build(ip, Q_ip, R_ip)
fb_lin_ip = FBLinController(ip, lqr_ip)
x_0_ip = array([1, 0])
ts_ip = linspace(0, 10, 1000 + 1)
xs_ip, _ = ip.simulate(x_0_ip, fb_lin_ip, ts_ip)
fig_ip, ax_ip = subplots(figsize=(6, 4))
ax_ip.plot(ts_ip, xs_ip[:, 0], linewidth=3, label='Oracle')
ax_ip.grid()
ax_ip.legend(fontsize=16)
ax_ip.set_title('Inverted Pendulum', fontsize=16)
ax_ip.set_xlabel('$t$ (sec)', fontsize=16)
ax_ip.set_ylabel('$\\theta$ (rad)', fontsize=16)
show()
```
### Double Inverted Pendulum
```
from core.systems import DoubleInvertedPendulum
dip = DoubleInvertedPendulum(m_1=0.25, m_2=0.25, l_1=0.5, l_2=0.5)
Q_dip = identity(4)
R_dip = identity(2)
lqr_dip = LQRController.build(dip, Q_dip, R_dip)
fb_lin_dip = FBLinController(dip, lqr_dip)
x_0_dip = array([1, 0, 0, 0])
ts_dip = linspace(0, 10, 1000 + 1)
xs_dip, _ = dip.simulate(x_0_dip, fb_lin_dip, ts_dip)
fig_dip, (ax_dip_1, ax_dip_2) = subplots(2, figsize=(6, 8))
ax_dip_1.set_title('Double Inverted Pendulum', fontsize=16)
ax_dip_1.plot(ts_dip, xs_dip[:, 0], linewidth=3, label='Oracle')
ax_dip_1.grid()
ax_dip_1.legend(fontsize=16)
ax_dip_1.set_xlabel('$t$ (sec)', fontsize=16)
ax_dip_1.set_ylabel('$\\theta_1$ (rad)', fontsize=16)
ax_dip_2.plot(ts_dip, xs_dip[:, 1], linewidth=3, label='Oracle')
ax_dip_2.grid()
ax_dip_2.legend(fontsize=16)
ax_dip_2.set_xlabel('$t$ (sec)', fontsize=16)
ax_dip_2.set_ylabel('$\\theta_2$ (rad)', fontsize=16)
show()
```
## Uncertain Robotic Systems
Suppose $\mb{D}$, $\mb{C}$, $\mb{G}$, and $\mb{B}$ are unknown, and instead we have access to corresponding estimates $\hat{\mb{D}}$, $\hat{\mb{C}}$, $\hat{\mb{G}}$, and $\hat{\mb{B}}$ satisfying:
\begin{equation}
\hat{\mb{D}}(\mb{q})\ddot{\mb{q}} + \hat{\mb{C}}(\mb{q}, \dot{\mb{q}})\dot{\mb{q}} + \hat{\mb{G}}(\mb{q}) = \hat{\mb{B}}\mb{u}.
\end{equation}
Assume that $\hat{\mb{B}}$ is also full rank.
The system dynamics can be in terms of the estimated terms as:
\begin{equation}
\der{}{t} \begin{bmatrix} \mb{q} \\ \dot{\mb{q}} \end{bmatrix} = \begin{bmatrix} \dot{\mb{q}} \\ -\hat{\mb{D}}(\mb{q})^{-1}(\hat{\mb{C}}(\mb{q}, \dot{\mb{q}})\dot{\mb{q}} + \hat{\mb{G}}(\mb{q})) \end{bmatrix} + \begin{bmatrix} \mb{0}_{n \times m} \\ \hat{\mb{D}}(\mb{q})^{-1}\hat{\mb{B}} \end{bmatrix} \mb{u} + \begin{bmatrix} \mb{0}_n \\ \hat{\mb{D}}(\mb{q})^{-1}(\hat{\mb{C}}(\mb{q}, \dot{\mb{q}})\dot{\mb{q}} + \hat{\mb{G}}(\mb{q}))-\mb{D}(\mb{q})^{-1}(\mb{C}(\mb{q}, \dot{\mb{q}})\dot{\mb{q}} + \mb{G}(\mb{q})) \end{bmatrix} + \begin{bmatrix} \mb{0}_{n \times m} \\ \mb{D}(\mb{q})^{-1}\mb{B} - \hat{\mb{D}}(\mb{q})^{-1}\hat{\mb{B}} \end{bmatrix} \mb{u}
\end{equation}
### Inverted Pendulum
```
ip_est = InvertedPendulum(m=0.24, l=0.48)
lqr_ip_est = LQRController.build(ip_est, Q_ip, R_ip)
fb_lin_ip_est = FBLinController(ip_est, lqr_ip_est)
xs_ip, us_ip = ip.simulate(x_0_ip, fb_lin_ip_est, ts_ip)
ax_ip.plot(ts_ip, xs_ip[:, 0], linewidth=3, label='Estimated')
ax_ip.legend(fontsize=16)
fig_ip
```
### Double Inverted Pendulum
```
dip_est = DoubleInvertedPendulum(m_1=0.24, m_2=0.24, l_1=0.48, l_2=0.48)
lqr_dip_est = LQRController.build(dip_est, Q_dip, R_dip)
fb_lin_dip_est = FBLinController(dip_est, lqr_dip_est)
xs_dip, us_dip = dip.simulate(x_0_dip, fb_lin_dip_est, ts_dip)
ax_dip_1.plot(ts_dip, xs_dip[:, 0], linewidth=3, label='Esimated')
ax_dip_1.legend(fontsize=16)
ax_dip_2.plot(ts_dip, xs_dip[:, 1], linewidth=3, label='Esimated')
ax_dip_2.legend(fontsize=16)
fig_dip
```
## Learning Dynamics
```
from tensorflow.logging import ERROR, set_verbosity
set_verbosity(ERROR)
```
### Inverted Pendulum
```
from core.dynamics import LearnedFBLinDynamics
from core.learning.keras import KerasResidualAffineModel
d_drift_in_ip = 3
d_act_in_ip = 3
d_hidden_ip = 20
d_out_ip = 1
res_model_ip = KerasResidualAffineModel(d_drift_in_ip, d_act_in_ip, d_hidden_ip, 1, d_out_ip)
ip_learned = LearnedFBLinDynamics(ip_est, res_model_ip)
data = ip_learned.process_episode(xs_ip, us_ip, ts_ip)
ip_learned.fit(data, num_epochs=10, validation_split=0.1)
x_dots = array([ip.eval_dot(x, u, t) for x, u, t in zip(xs_ip, us_ip, ts_ip)])
_, (ax_ip_1, ax_ip_2) = subplots(2, figsize=(6, 8))
ax_ip_1.set_title('Inverted Pendulum', fontsize=16)
ax_ip_1.plot(ts_ip[:-1], x_dots[:, 0], linewidth=3, label='True')
ax_ip_1.grid()
ax_ip_1.set_xlabel('$t$ (sec)', fontsize=16)
ax_ip_1.set_ylabel('$\\dot{\\theta}$ (rad / sec)', fontsize=16)
ax_ip_2.plot(ts_ip[:-1], x_dots[:, 1], linewidth=3, label='True')
ax_ip_2.grid()
ax_ip_2.set_xlabel('$t$ (sec)', fontsize=16)
ax_ip_2.set_ylabel('$\\ddot{\\theta}$ (rad / sec$^2$)', fontsize=16)
x_dots = array([ip_learned.eval_dot(x, u, t) for x, u, t in zip(xs_ip, us_ip, ts_ip)])
ax_ip_1.plot(ts_ip[:-1], x_dots[:, 0], linewidth=3, label='Learned')
ax_ip_1.legend(fontsize=16)
ax_ip_2.plot(ts_ip[:-1], x_dots[:, 1], linewidth=3, label='Learned')
ax_ip_2.legend(fontsize=16)
show()
```
### Double Inverted Pendulum
```
d_drift_in_dip = 5
d_act_in_dip = 5
d_hidden_dip = 40
d_out_dip = 2
res_model_dip = KerasResidualAffineModel(d_drift_in_dip, d_act_in_dip, d_hidden_dip, 2, d_out_dip)
dip_learned = LearnedFBLinDynamics(dip_est, res_model_dip)
data = dip_learned.process_episode(xs_dip, us_dip, ts_dip)
dip_learned.fit(data, num_epochs=10, validation_split=0.1)
x_dots = array([dip.eval_dot(x, u, t) for x, u, t in zip(xs_dip, us_dip, ts_dip)])
_, axs_dip = subplots(4, figsize=(6, 16))
axs_dip[0].set_title('Double Inverted Pendulum', fontsize=16)
ylabels = ['$\\dot{\\theta}_1$ (rad / sec)', '$\\dot{\\theta}_2$ (rad / sec)', '$\\ddot{\\theta}_1$ (rad / sec$^2$)', '$\\ddot{\\theta}_2$ (rad / sec$^2$)']
for ax, data, ylabel in zip(axs_dip, x_dots.T, ylabels):
ax.plot(ts_dip[:-1], data, linewidth=3, label='True')
ax.grid()
ax.set_xlabel('$t$ (sec)', fontsize=16)
ax.set_ylabel(ylabel, fontsize=16)
x_dots = array([dip_learned.eval_dot(x, u, t) for x, u, t in zip(xs_dip, us_dip, ts_dip)])
for ax, data in zip(axs_dip, x_dots.T):
ax.plot(ts_dip[:-1], data, linewidth=3, label='Learned')
ax.legend(fontsize=16)
show()
```
## Overfitting
### Inverted Pendulum
```
lqr_learned_ip = LQRController.build(ip_learned, Q_ip, R_ip)
fb_lin_learned_ip = FBLinController(ip_learned, lqr_learned_ip)
xs, _ = ip.simulate(x_0_ip, fb_lin_learned_ip, ts_ip)
_, ax = subplots(figsize=(6, 4))
ax.plot(ts_ip, xs[:, 0], linewidth=3)
ax.grid()
ax.set_title('Inverted Pendulum', fontsize=16)
ax.set_xlabel('$t$ (sec)', fontsize=16)
ax.set_ylabel('$\\theta$ (rad)', fontsize=16)
show()
```
### Double Inverted Pendulum
```
lqr_learned_dip = LQRController.build(dip_learned, Q_dip, R_dip)
fb_lin_learned_dip = FBLinController(dip_learned, lqr_learned_dip)
xs, _ = dip.simulate(x_0_dip, fb_lin_learned_dip, ts_dip)
_, (ax_1, ax_2) = subplots(2, figsize=(6, 8))
ax_1.set_title('Double Inverted Pendulum', fontsize=16)
ax_1.plot(ts_dip, xs[:, 0], linewidth=3)
ax_1.grid()
ax_1.set_xlabel('$t$ (sec)', fontsize=16)
ax_1.set_ylabel('$\\theta_1$ (rad)', fontsize=16)
ax_2.plot(ts_dip, xs[:, 1], linewidth=3)
ax_2.grid()
ax_2.set_xlabel('$t$ (sec)', fontsize=16)
ax_2.set_ylabel('$\\theta_2$ (rad)', fontsize=16)
show()
```
| github_jupyter |
# Introduction to MLOps
## Environment setup
```
import platform
print(f"Python version: {platform.python_version()}")
assert platform.python_version_tuple() >= ("3", "6")
from IPython.display import YouTubeVideo
```
## The Machine Learning workflow
[](https://www.redhat.com/files/summit/session-assets/2019/T957A0.pdf)
### Codifying problems and metrics
- Main questions:
- What is the business objective?
- How to measure success?
- What are the technical, temporal and organisational constraints?
- Possible solutions: communicate with PO and stakeholders, knowing product and client needs.
### Data collection and cleaning
- Main questions:
- Which data?
- Is it free/in adequate quantity/noisy/labelled/biased?
- Is it stable or evolving?
- Possible solutions: [public datasets](https://github.com/awesomedata/awesome-public-datasets), [DVC](https://dvc.org/), [Doccano](https://github.com/doccano/doccano), manual work.
### Feature engineering
- Main questions:
- What is the format of my input data?
- Whet features could potentially be useful for my models?
- How are they retrieved during training and production?
- Possible solutions: data pipelines, feature stores, domain experts.
[](https://www.tecton.ai/blog/what-is-a-feature-store/)
### Model training and tuning
- Main questions:
- Which model(s)?
- How to optimize its performance?
- How to track model versions?
- Possible solutions: starting simple, hyperparameter tuning, [MLflow](https://mlflow.org).
### Model validation
- Main questions:
- Does the model address the business objective?
- How to measure its performance?
- Are there uptime constraints for my model?
- Possible solutions: testing set, [continuous integration](https://en.wikipedia.org/wiki/Continuous_integration), [memoization](https://en.wikipedia.org/wiki/Memoization).
### Model deployment
- Main questions:
- How to serve my model?
- How to handle model versioning?
- How to handle scaling?
- Possible solutions: [FastAPI](https://fastapi.tiangolo.com/), [Docker](https://www.docker.com/), [Kubernetes](https://kubernetes.io/), [Cortex](https://www.cortex.dev/), [Databricks](https://databricks.com/), stress tests.
### Monitoring, validation
- Main questions:
- How to check model performance in production?
- How to prevent [model drifting](https://c3.ai/glossary/data-science/model-drift/)?
- How to explain model results?
- Possible solutions: [A/B testing](https://en.wikipedia.org/wiki/A/B_testing), [canary release](https://martinfowler.com/bliki/CanaryRelease.html), [explainability tools](https://github.com/EthicalML/awesome-production-machine-learning#explaining-black-box-models-and-datasets).
## From DevOps to MLOps
### Motivation
> "The real challenge isn't building an ML model, the challenge is building an integrated ML system and to continuously operate it in production."
### Elements of a ML system
[](https://cloud.google.com/architecture/mlops-continuous-delivery-and-automation-pipelines-in-machine-learning)
### DevOps
DevOps is a set of practices that combines software development (*Dev*) and IT operations (*Ops*). Its goal is to shorten the product delivery loop while maintaining high quality.
It implies constant collaboration between the development and infrastructure teams, as well as the use of several tools to automate and streamline the push to production and monitoring of a project.

### MLOps
MLOps is the process of automating and productionalizing Machine Learning-based systems. MLOps integrates data- and model-specific tasks into the DevOps workflow cycle to obtain a unified release process. Like DevOps, it combines ML system development (*Dev*) and ML system operation (*Ops*).
[](https://www.phdata.io/blog/mlops-vs-devops-whats-the-difference/)
[](https://ml-ops.org/content/mlops-principles)
### MLOps core principles
Like DevOps, MLOps is built on the following [principles](https://ml-ops.org/content/mlops-principles):
- Automation.
- Continuous X (integration, delivery and training).
- Versioning.
- Testing.
### Manual process
[](https://cloud.google.com/architecture/mlops-continuous-delivery-and-automation-pipelines-in-machine-learning#mlops_level_2_cicd_pipeline_automation)
### ML pipeline automation
[](https://cloud.google.com/architecture/mlops-continuous-delivery-and-automation-pipelines-in-machine-learning#mlops_level_1_ml_pipeline_automation)
### MLOps resources
- [MLOps: Continuous delivery and automation pipelines in machine learning](https://cloud.google.com/architecture/mlops-continuous-delivery-and-automation-pipelines-in-machine-learning)
- [Machine Learning Operations](https://ml-ops.org/)
- [MLOps and DevOps: Why Data Makes It Different](https://www.oreilly.com/radar/mlops-and-devops-why-data-makes-it-different/)
- [Practitioners guide to MLOps](https://services.google.com/fh/files/misc/practitioners_guide_to_mlops_whitepaper.pdf)
- [Awesome MLOps](https://github.com/visenger/awesome-mlops)
- [The 2021 Machine Learning, AI and Data Landscape](https://mattturck.com/data2021/)
## Overview of some MLOps tools
### Flask & FastAPI
[Flask](https://flask.palletsprojects.com) is a fast and lightweight web framework written in Python. It can be used to deploy Machine Learning models as APIs.
[FastAPI](https://fastapi.tiangolo.com/) is another lightweight web framework for builiding APIs. it has become the standard for deploying Python ML models on the web.
### DVC
[DVC](https://dvc.org/) ("Data Version Control") is an open source project that extends [Git](https://git-scm.com/) version control to data and model files. DVC uses a remote repository (including supports all major cloud providers) to store all the data and models for a project. In the actual code repository, a pointer to this remote location is stored to access the actual artifacts.
```
YouTubeVideo("UbL7VUpv1Bs")
```
### MLflow
[MLflow](https://mlflow.org) is an open source platform for managing the end-to-end machine learning lifecycle. It can:
- track ML experiments (training runs) to record and compare parameters and results;
- package ML code in a reusable, reproducible form;
- manage and deploy models from a variety of ML libraries to a variety of model serving and inference platforms.
MLflow can be used with any ML library, in any programming language. Python, R and Java are supported out-of-the-box. It is included in several ML cloud platforms, like [Databricks](https://databricks.com/product/managed-mlflow).
[](https://www.datanami.com/2018/06/05/databricks-launches-mlflow-to-simplify-machine-learning-lifecycle/)
| github_jupyter |
# Gaussian 中的 PUHF/PMP2 结果的重新实现
> 创建时间:2019-08-31,最后修改:2019-09-01
在这一份笔记中,我们将使用 PySCF 的功能与 NumPy 重复 Gaussian 中计算的 PUHF 与 PMP2 能量结果;并对 PUHF 与 PMP2 的推导作简单的说明。
```
from pyscf import gto, scf, mp
```
## 参考结果与体系定义
### Gaussian 结果
在 Gaussian 中,我们使用以下输入卡可以得到 PUHF/PMP2 能量:
```
#p UMP2(Full)/6-31G nosymm
H2O
3 4
O 0. 0. 0.
H 1. 0. 0.
H 0. 1. 0.
```
对于上述分子,其中一些重要的输出结果是
1. $E_\mathrm{UHF}$:-73.0451423839
2. $E_\mathrm{UMP2, corr}$: -0.02646719276
3. $E_\mathrm{UMP2}$: -73.071609576661
4. $\langle S_z \rangle$: 1.5
5. $\langle S^{2(0)} \rangle$: 3.7531
6. $\langle S^{2(0)} \rangle + \langle S^{2(1)} \rangle$: 3.7504
7. $E_\mathrm{PUHF}$:-73.046146318
8. $E_\mathrm{PMP2}$: -73.072180589
输出文件参见 {download}`assets/PUHF_and_PMP2.out`;其中,有效的数据可以通过下述的代码获得:
```
with open("assets/PUHF_and_PMP2.out", "r") as output:
output_lines = output.read().split("\n")
for line_num, line_text in enumerate(output_lines):
if any([keyword in line_text for keyword in
["SCF Done", "EUMP2", "<S**2>", "(S**2,1)", "E(PMP2)"]]) \
and "Initial guess" not in line_text:
print("line {:03d}: {}".format(line_num, line_text))
```
我们的目标就是近乎准确无误地重复上述八个结果。
### PySCF 体系定义
为了获得与 Gaussian 相同的结果,我们需要定义相同的分子与电荷、多重度环境:
```
mol = gto.Mole()
mol.atom = """
O 0. 0. 0.
H 1. 0. 0.
H 0. 1. 0.
"""
mol.charge = 3
mol.spin = 3
mol.basis = "6-31G"
mol.build()
```
通过 PySCF 计算 UHF 能量:
```
scf_eng = scf.UHF(mol)
scf_eng.conv_tol = 1e-10
scf_eng.run();
```
上述结果应当能与 $E_\mathrm{UHF}$ 和 $\langle S^{2(0)} \rangle$ 对应。$\langle S_z \rangle = 1.5$ 几乎是显然的。不过,我们仍然不了解 $\langle S^{2(0)} \rangle$ 是如何生成的。
通过 PySCF 计算 UMP2 能量:
```
mp2_eng = mp.UMP2(scf_eng)
mp2_eng.run();
```
上述结果应当能与 $E_\mathrm{UMP2, corr}$ 和 $E_\mathrm{UMP2}$ 对应。
因此,当前的问题将是回答:如何重复
1. $\langle S^{2(0)} \rangle$: 3.7531
2. $\langle S^{2(0)} \rangle + \langle S^{2(1)} \rangle$: 3.7504
3. $E_\mathrm{PUHF}$:-73.046146318
4. $E_\mathrm{PMP2}$: -73.072180589
### 部分变量定义
首先,我们遵从大多数量化文章中的记号
- $i, j$ 代表占据分子轨道
- $a, b$ 代表非占分子轨道
- $p, q$ 代表任意分子轨道
- $\alpha, \beta$ 代表任意原子轨道
<center><b>Table 1. 分子相关变量</b></center>
| 变量名 | 元素记号 | 意义与注解 | 标量或区间 |
|-|-|-|-|
| `nocc_a` | $n_\mathrm{occ}^\alpha$ | $\alpha$ 自旋电子数 | $5$ |
| `nocc_b` | $n_\mathrm{occ}^\beta$ | $\beta$ 自旋电子数 | $2$ |
| `N` | $N$ | 总电子数 | $7$ |
| `nmo` | $n_\mathrm{MO}$ | 分子轨道数 | $13$ |
| `nao` | $n_\mathrm{AO}$ | 原子轨道数 | $13$ |
| `S` | $S_{\mu \nu}$ | 原子轨道重叠积分 | |
| `so_a` | | $\alpha$ 占据轨道分割 | $[0, 5)$ |
| `so_b` | | $\beta$ 占据轨道分割 | $[0, 2)$ |
| `sv_a` | | $\alpha$ 非占轨道分割 | $[5, 13)$ |
| `sv_b` | | $\beta$ 非占轨道分割 | $[2, 13)$ |
| `Sx` | $S_x$ | $x$ 分量自旋 | $0$ |
| `Sy` | $S_y$ | $y$ 分量自旋 | $0$ |
| `Sz` | $S_z$ | $z$ 分量自旋 | $3/2$ |
<center><b>Table 2. UHF 计算相关变量</b></center>
| 变量名 | 元素记号 | 意义与注解 |
|-|-|-|
| `C_a` | $C_{\mu p}$ | $\alpha$ 系数矩阵 |
| `C_b` | $C_{\mu \bar p}$ | $\beta$ 系数矩阵 |
| `e_a` | $e_{p}$ | $\alpha$ 轨道能 |
| `e_b` | $e_{\bar p}$ | $\alpha$ 轨道能 |
| `eo_a` | $e_{i}$ | $\beta$ 占据轨道能 |
| `eo_b` | $e_{\bar i}$ | $\alpha$ 占据轨道能 |
| `ev_a` | $e_{a}$ | $\alpha$ 非占轨道能 |
| `ev_b` | $e_{\bar a}$ | $\beta$ 非占轨道能 |
| `D2_aa` | $D_{ij}^{ab}$ | $\alpha, \alpha$ 轨道能差 |
| `D2_ab` | $D_{i \bar j}^{a \bar b}$ | $\alpha, \beta$ 轨道能差 |
| `D2_bb` | $D_{\bar i \bar j}^{\bar a \bar b}$ | $\beta, \beta$ 轨道能差 |
<center><b>Table 3. UMP2 计算相关变量</b></center>
| 变量名 | 元素记号 | 意义与注解 |
|-|-|-|
| `t2_aa` | $t_{ij}^{ab}$ | $\alpha, \alpha$ MP2 激发系数 |
| `t2_ab` | $t_{i \bar j}^{a \bar b}$ | $\alpha, \beta$ MP2 激发系数 |
| `t2_bb` | $t_{\bar i \bar j}^{\bar a \bar b}$ | $\beta, \beta$ MP2 激发系数 |
| `D2_aa` | $D_{ij}^{ab}$ | $\alpha, \alpha$ MP2 激发系数分母 |
| `D2_ab` | $D_{i \bar j}^{a \bar b}$ | $\alpha, \beta$ MP2 激发系数分母 |
| `D2_bb` | $D_{\bar i \bar j}^{\bar a \bar b}$ | $\beta, \beta$ MP2 激发系数分母 |
上述需要补充说明的公式有:
$$
S_z = \frac{1}{2} (n_\mathrm{occ}^\alpha - n_\mathrm{occ}^\beta)
$$
$$
D_{i \bar j}^{a \bar b} = e_i + e_{\bar j} - e_a - e_{\bar b}
$$
对于 MP2 激发系数分母,另外两种自旋情况的 $D_{ij}^{ab}$ 与 $D_{\bar i \bar j}^{\bar a \bar b}$ 也可以类似地生成。
```
# === Molecular
# --- Definition
nocc_a, nocc_b = mol.nelec
N = nocc_a + nocc_b
nmo = nao = mol.nao
S = mol.intor("int1e_ovlp")
# --- Derivative
so_a, so_b = slice(0, nocc_a), slice(0, nocc_b)
sv_a, sv_b = slice(nocc_a, nmo), slice(nocc_b, nmo)
Sx, Sy, Sz = 0, 0, 0.5 * (nocc_a - nocc_b)
# === UHF Calculation
# --- Definition
C_a, C_b = scf_eng.mo_coeff
e_a, e_b = scf_eng.mo_energy
# --- Derivative
eo_a, eo_b = e_a[so_a], e_b[so_b]
ev_a, ev_b = e_a[sv_a], e_b[sv_b]
D2_aa = eo_a[:, None, None, None] + eo_a[None, :, None, None] - ev_a[None, None, :, None] - ev_a[None, None, None, :]
D2_ab = eo_a[:, None, None, None] + eo_b[None, :, None, None] - ev_a[None, None, :, None] - ev_b[None, None, None, :]
D2_bb = eo_b[:, None, None, None] + eo_b[None, :, None, None] - ev_b[None, None, :, None] - ev_b[None, None, None, :]
# === MP2 Calculation
t2_aa, t2_ab, t2_bb = mp2_eng.t2
```
作为对四脚标张量性质的验证,我们计算 MP2 相关能 $E_\mathrm{MP2, corr}$ 如下:
$$
E_\mathrm{MP2, corr} =
\frac{1}{4} \sum_{ijab} (t_{ij}^{ab})^2 D_{ij}^{ab} +
\frac{1}{4} \sum_{\bar i \bar j \bar a \bar b} (t_{\bar i \bar j}^{\bar a \bar b})^2 D_{i \bar j}^{a \bar b} +
\sum_{i \bar j a \bar b} (t_{i \bar j}^{a\bar b})^2 D_{i \bar j}^{a \bar b}
$$
```
(+ 0.25 * (t2_aa**2 * D2_aa).sum()
+ 0.25 * (t2_bb**2 * D2_bb).sum()
+ (t2_ab**2 * D2_ab).sum())
```
PySCF 所给出的 $E_\mathrm{MP2, corr}$ 可以给出相同的结果:
```
mp2_eng.e_corr
```
## $\langle S^2 \rangle$ 相关计算
### 分子轨道基组重叠矩阵 `S_pq` $S_{p \bar q}$
$$
S_{p \bar q} = \sum_{\mu \nu} C_{\mu p} S_{\mu \nu} C_{\nu \bar q}
$$
若用量子力学记号,上述矩阵元素可能表示为
$$
S_{p \bar q} = \int \phi_p (\boldsymbol{r}) \phi_{\bar q} (\boldsymbol{r}) \, \mathrm{d} \boldsymbol{r}
$$
注意上述的积分是空间坐标的积分,不包含自旋部分的积分。
```
S_pq = C_a.T @ S @ C_b
S_pq.shape
```
我们以后还会使用上述矩阵的占据-占据部分 `S_ij` $S_{i \bar j}$、占据-非占部分 `S_ia` $S_{i \bar a}$ 与非占-占据部分 `S_ai` $S_{a \bar i} = S_{\bar i a}$:
```
S_ij, S_ia, S_ai = S_pq[so_a, so_b], S_pq[so_a, sv_b], S_pq[sv_a, so_b]
[S_ij.shape, S_ia.shape, S_ai.shape]
```
### `S2_0` $\langle S^{2(0)} \rangle$
$\langle S^{2(0)} \rangle$ 在程序中通常写为 `<S^2>` 或 `<S**2>`。在 Gaussian 计算 PUHF 处,还写为 `(S**2,0)`。这意味着是 UHF 波函数的 $\langle S^2 \rangle_\mathrm{UHF}$。相对地,UMP2 波函数给出的对 $\langle S^2 \rangle$ 的矫正将记作 $\langle S^{2(1)} \rangle$。
参考 Chen and Schlegel [^Chen-Schlegel.JCP.1994.101] Table 1, $0 \rightarrow 0$ 或等价地,Szabo and Ostlund [^Szabo-Ostlund.Dover.1996] eq (2.271)
$$
\langle S^{2(0)} \rangle = \langle \Psi_0 | \hat S^2 | \Psi_0 \rangle = S_z (S_z + 1) + n_\mathrm{occ}^\beta - \sum_{i \bar j} (S_{i \bar j})^2
$$
```
S2_0 = Sz * (Sz + 1) + nocc_b - (S_ij**2).sum()
S2_0
```
Gaussian 的参考值是 3.7531。
为了以后的记号便利,我们在这里定义 `L`
$$
L = \sum_{i \bar j} (S_{i \bar j})^2
$$
```
L = (S_ij**2).sum()
```
### `S2_1` $\langle S^{2(1)} \rangle$
$$
\begin{align}
\langle S^{2(1)} \rangle &= 2 \langle \Psi_0 | \hat S^2 | \Psi^{(1)} \rangle = 2 \sum_{i \bar j a \bar b} t_{i \bar j}^{a \bar b} \langle \Psi_0 | \hat S^2 | \Psi_{i \bar j}^{a \bar b} \rangle \\
&= - 2 \sum_{i \bar j a \bar b} t_{i \bar j}^{a \bar b} \langle i | \bar b \rangle \langle a | \bar j \rangle = - 2 \sum_{i \bar j a \bar b} t_{i \bar j}^{a \bar b} S_{i \bar b} S_{a \bar j}
\end{align}
$$
上式的第一个等号是 Chen and Schlegel [^Chen-Schlegel.JCP.1994.101] eq (5) 所给出的;而第三个等号是 Table 1 $0 \rightarrow \alpha \beta (i, a: \alpha; j, b: \beta)$ 给出的。
上式的推导中有一处关于 $| \Psi^{(1)} \rangle$ 的展开的推导省略。我们知道
$$
| \Psi^{(1)} \rangle = \hat T_2 | \Psi_0 \rangle
= \frac{1}{4} \sum_{ijab} t_{ij}^{ab} | \Psi_{ij}^{ab} \rangle + \frac{1}{4} \sum_{\bar i \bar j \bar a \bar b} t_{\bar i \bar j}^{\bar a \bar b} | \Psi_{\bar i \bar j}^{\bar a \bar b} \rangle + \sum_{i \bar j a \bar b} t_{i \bar j}^{a \bar b} | \Psi_{i \bar j}^{a \bar b} \rangle
$$
但由于利用到 $\langle 0 | \hat S^2 | \Psi_{ij}^{ab} \rangle = \langle 0 | \hat S^2 | \Psi_{\bar i \bar j}^{\bar a \bar b} \rangle = 0$,因此在第二个等号时只从三个 $| \Psi^{(1)} \rangle$ 中留下了一项。关于 $\hat S^2$ 作用在 UHF 波函数与轨道下的性质,可以参考 Schlegel [^Schlegel-Schlegel.JCP.1986.84] eq (5) 的说明。
```
S2_1 = - 2 * (t2_ab * S_ia[:, None, None, :] * S_ai.T[None, :, :, None]).sum()
S2_1
```
因此,UMP2 矫正过的 $\langle S^2 \rangle_\mathrm{UMP2} = \langle S^{2(0)} \rangle + \langle S^{2(1)} \rangle$ 的结果是
```
S2_0 + S2_1
```
Gaussian 的参考值是 3.7504。
### `S4SD` $\texttt{S4SD}$
`S4SD` 的表达式较为复杂,我们也直接使用 $\texttt{S4SD}$ 而不用其他记号表示该项:
$$
\begin{align}
\texttt{S4SD} = (n_\mathrm{occ}^\alpha - L) (n_\mathrm{occ}^\beta - L) + 2 L - 2 \sum_{i \bar j k \bar l} S_{i \bar j} S_{\bar j k} S_{k \bar l} S_{\bar l i} + \langle S^{2(0)} \rangle^2
\end{align}
$$
```
S4SD = (nocc_a - L) * (nocc_b - L) + 2 * L - 2 * (S_ij @ S_ij.T @ S_ij @ S_ij.T).trace() + S2_0**2
S4SD
```
该表达式的来源可能是 Amos and Hall [^Amos-Hall.PRSLA.1961.263]。该文的 eq (7·02) 下方公式中,有通过稍高阶的投影而获得的 $\langle S^2 \rangle$ 的计算方式
$$
\langle S^2 \rangle \simeq \langle S^{2(0)} \rangle + \frac{\texttt{S4SD} - \langle S^{2(0)} \rangle^2}{\langle S^{2(0)} \rangle - (S_z + 1) (S_z + 2)}
$$
通过这种方式获得的 $\langle S^2 \rangle$ 近似值可以相当精确,比 $\langle S^{2(0)} \rangle + \langle S^{2(1)} \rangle$ 还要接近精确值 $3.75$:
```
S2_0 + (S4SD - S2_0**2) / (S2_0 - (Sz + 1) * (Sz + 2))
```
相信 $\texttt{S4SD}$ 的存在意义是用于计算 Schlegel [^Schlegel-Schlegel.JCP.1986.84] 式 eq (24) 中的 $\langle \tilde \Phi_1 | \tilde \Phi_1 \rangle = \langle \Phi_0 | A_{s + 1}^\dagger A_{s + 1} | \Phi_0 \rangle$;但关于这一关系我还不确定是否正确。后面计算 PMP2 能量时会使用上 $\texttt{S4SD}$。
## 自旋污染矫正能量计算
### `EPUHF` $E_\mathrm{PUHF}$
根据 Schlegel [^Schlegel-Schlegel.JCP.1986.84] eq (22),PUHF 能量可以表达为
$$
E_\mathrm{PUHF} = E_\mathrm{UHF} + \frac{1}{\langle \Psi_0 | \hat P_s | \Psi_0 \rangle} \sum_{i \bar j a \bar b} \langle \Psi_0 | \hat H | \Psi_{i \bar j}^{a \bar b} \rangle \langle \Psi_{i \bar j}^{a \bar b} | \hat P_s | \Psi_0 \rangle
$$
其中,$\hat P_s$ 算符称为 Löwdin 算符 [^Lowdin-Lowdin.PR.1955.97] eq (7),
$$
\hat P_s = \prod_{k \neq s}^{N / 2} \frac{\hat S^2 - k (k + 1)}{s (s + 1) - k (k + 1)}
$$
相当于将自旋不纯的波函数纯化为自旋量子数为 $s$ 的态。在实际使用中,通常使用 $\hat A_{s + 1} \simeq \hat P_s$ 替代;关于这段讨论可以参考 Rossky and Karplus [^Rossky-Karplus.JCP.1980.73] section V.A 的讨论,而下面公式的形式参考 Schlegel [^Schlegel-Schlegel.JCP.1986.84] eq (14);其中,$s$ 一般取 $S_z$:
$$
\hat A_{s + 1} = \frac{\hat S^2 - (s + 1)(s + 2)}{\langle S^{2(0)} \rangle - (s + 1)(s + 2)}
$$
关于 $\hat A_{s + 1}$,一个显然的性质是 $\langle \Psi_0 | \hat A_{s + 1} | \Psi_0 \rangle = 1$。
为了程序方便,定义下述临时变量 `Y`
$$
Y = \langle S^{2(0)} \rangle - (S_z + 1) (S_z + 2)
$$
那么 `D_EPUHF`
$$
\begin{align}
\Delta E_\mathrm{PUHF} &= \sum_{i \bar j a \bar b} t_{i \bar j}^{a \bar b} D_{i \bar j}^{a \bar b} \cdot \langle \Psi_{i \bar j}^{a \bar b} | \frac{\hat S^2}{Y} | \Psi_0 \rangle \\
&= - \frac{1}{Y} \sum_{i \bar j a \bar b} t_{i \bar j}^{a \bar b} D_{i \bar j}^{a \bar b} S_{i \bar b} S_{\bar j a}
\end{align}
$$
```
Y = S2_0 - (Sz + 1) * (Sz + 2)
D_EPUHF = - 1 / Y * (t2_ab * D2_ab * S_ia[:, None, None, :] * S_ai.T[None, :, :, None]).sum()
D_EPUHF
```
因而 $E_\mathrm{PUHF} = E_\mathrm{UHF} + \Delta E_\mathrm{PUHF}$:
```
scf_eng.e_tot + D_EPUHF
```
Gaussian 的参考值是 -73.046146318。
### `EPMP2` $E_\mathrm{PMP2}$
根据 Schlegel [^Schlegel-Schlegel.JCP.1986.84] eq (24),PMP2 能量可以表达为
$$
\begin{align}
\Delta E_\mathrm{PMP2} = \Delta E_\mathrm{PUHF} \left( 1 - \frac{\langle \Phi^{(1)} | \hat A_{s + 1} | \Psi_0 \rangle}{\langle \Phi_0 | \hat A_{s + 1}^\dagger \hat A_{s + 1} | \Psi_0 \rangle} \right)
\end{align}
$$
关于上式的分数项,分子部分可以写为
$$
\begin{align}
\langle \Phi^{(1)} | \hat A_{s + 1} | \Psi_0 \rangle
= \langle \Phi^{(1)} | \frac{\hat S^2}{Y} - \frac{(s + 1)(s + 2)}{Y} | \Psi_0 \rangle = \frac{1}{2} \frac{\langle S^{2(1)} \rangle}{Y}
\end{align}
$$
而关于分子项,参考在 $\texttt{S4SD}$ 的讨论,
$$
\langle \Phi_0 | \hat A_{s + 1}^\dagger \hat A_{s + 1} | \Psi_0 \rangle \simeq \langle S^2 \rangle - \langle S^{2(0)} \rangle = \frac{\texttt{S4SD} - \langle S^{2(0)} \rangle^2}{Y^2}
$$
但作者不能断定上述论断的正确性。
将分子、分母的结果代入 $\Delta E_\mathrm{PMP2}$ 的算式中,可以得到 `D_EPMP2`
$$
\Delta E_\mathrm{PMP2} = \Delta E_\mathrm{PUHF} \left( 1 - \frac{1}{2} \frac{\langle S^{2(1)} \rangle \cdot Y}{\texttt{S4SD} - \langle S^{2(0)} \rangle^2} \right)
$$
```
D_EPMP2 = D_EPUHF * (1 - 0.5 * S2_1 * Y / (S4SD - S2_0**2))
D_EPMP2
```
因而 $E_\mathrm{PMP2} = E_\mathrm{UMP2} + \Delta E_\mathrm{PMP2}$:
```
mp2_eng.e_tot + D_EPMP2
```
Gaussian 的参考值是 -73.072180589。
至此,我们已经完成了使用 PySCF 的功能与 NumPy 重复 Gaussian 的 PUHF、PMP2 的能量结果了。
## 修订时间轴
- 2019/08/30 写完文档;文档基于 2019/08/13 的一份笔记。
- 2019/09/01 补充一部分推导。
[^Chen-Schlegel.JCP.1994.101]: Chen, W.; Schlegel, H. B. Evaluation of S2 for Correlated Wave Functions and Spin Projection of Unrestricted Moller–Plesset Perturbation Theory. *J. Chem. Phys.* **1994**, *101* (7), 5957–5968. doi: [10.1063/1.467312](https://doi.org/10.1063/1.467312).
[^Szabo-Ostlund.Dover.1996]: Szabo, A.; Ostlund, N. S. *Modern Quantum Chemistry: Introduction to Advanced Electronic Structure Theory (Dover Books on Chemistry)*; Dover Publications, 1996.
[^Schlegel-Schlegel.JCP.1986.84]: Schlegel, H. B. Potential Energy Curves Using Unrestricted Mo/ller–Plesset Perturbation Theory with Spin Annihilation. *J. Chem. Phys.* **1986**, *84* (8), 4530–4534. doi: [10.1063/1.450026](https://doi.org/10.1063/1.450026).
[^Amos-Hall.PRSLA.1961.263]: Amos, A. T.; Hall, G. G. Single Determinant Wave Functions. *Proc. R. Soc. Lond. A* **1961**, *263* (1315), 483–493. doi: [10.1098/rspa.1961.0175](https://doi.org/10.1098/rspa.1961.0175).
[^Lowdin-Lowdin.PR.1955.97]: Lowdin, P.-O. Quantum Theory of Many-Particle Systems. III. Extension of the Hartree-Fock Scheme to Include Degenerate Systems and Correlation Effects. *Phys. Rev.* **1955**, *97* (6), 1509–1520. [10.1103/physrev.97.1509](https://doi.org/10.1103/physrev.97.1509).
[^Rossky-Karplus.JCP.1980.73]: Rossky, P. J.; Karplus, M. Spin Dependent Properties of Perturbed Wave Functions: An Analytic Comparison of the Exact, UHF, and Spin-Projected UHF States. *J. Chem. Phys.* **1980**, *73* (12), 6196–6214. [10.1063/1.440115](https://doi.org/10.1063/1.440115).
| github_jupyter |
In all our analyses, we used estimations for either simple or logarithmic rates of return. <br/>
The formula for simple returns is
$$
\frac{P_t - P_{t-1}}{P_{t-1}}
,$$
while the formula for log returns is
$$
ln( \frac{P_t}{P_{t-1}} )
.$$
<br/>
If our dataset is simply called "data", in Python, we could write the first formula as <br/ >
*(data / data.shift(1)) - 1,*
and the second one as
*np.log(data / data.shift(1)).*
<br/>
Instead of coding it this way, some professionals prefer using **Pandas.DataFrame.pct_change()** method, as it computes simple returns directly. We will briefly introduce it to you in this notebook document.
First, let's import NumPy, Pandas, and pandas_datareader.
```
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
```
We will calculate returns of the Procter and Gamble stock, based on adjusted closing price data since the 1st of January 2007.
```
ticker = 'PG'
data = pd.DataFrame()
data[ticker] = wb.DataReader(ticker, data_source='yahoo', start='2007-1-1')['Adj Close']
```
So far, we estimated simple returns in the following way.
```
s_rets_1 = (data / data.shift(1)) - 1
s_rets_1.head()
```
Observe the .pct_change() method can obtain an identical result.
```
s_rets_2 = data.pct_change()
s_rets_2.head()
```
Now, if you multiply the obtained values by 100, you will see the percentage change:
```
s_rets_2.head() * 100
```
This means the close price on 2007-01-04 was 0.76% lower than the price on 2007-01-03, the price on 2007-01-05 was 0.85% lower than the price on 2007-01-04, and so on.
A few arguments can be used in the percentage change method. The most important one is 'period' as it specifies the difference between prices in the nominator. By default, it equals one, and that's why we obtained the same result for s_rets_1 and s_rets_2. Let's assume we would like to calculate simple returns with the following formula:
$$
\frac{P_t - P_{t-2}}{P_{t-2}}
,$$
Then, we should specify 'periods = 2' in parentheses:
```
s_rets_3 = data.pct_change(periods=2)
s_rets_3.head()
```
You can see there was no value obtained not only for the first, but also for the second observation. If we use the "old" formula, and not this method, *shift(2)* would lead us to the same output:
```
s_rets_4 = (data / data.shift(2)) - 1
s_rets_4.head()
```
Great! <br/>
Now, let's consider logarithmic returns. To this moment, we applied the following formula:
```
log_rets_1 = np.log(data / data.shift(1))
log_rets_1.tail()
```
You can calculate the same formula for log returns with the help of the .pct_change() method. Just be careful with the way you apply the formula! Mathematically, it will look like this:
$$
ln(\frac{P_t}{P_{t-1}} ) = ln( \frac{P_t - P_{t-1}}{P_{t-1}} + \frac{P_{t-1}}{P_{t-1}}) = ln(\ simple.returns + 1)
.$$
```
log_rets_2 = np.log(data.pct_change() + 1)
log_rets_2.tail()
```
***
The .pct_change() method is very popular. Whether you include it in your code or you go the other way around and type the formulas as we did in our analyses, you should obtain the correct value for the returns you need.
| github_jupyter |
## Linear Regression using pytorch
Linear regression is one of the must have tools in any data scientists toolkit. It attempts to fit the input data using a solution like:
* y is our measured output
* X is our input data, there are m measurements each of n values
Using linear regression we find coefficients θ<sub>0</sub> ... θ<sub>n</sub>
ŷ = θ<sub>0</sub> + θ<sub>1</sub>X<sub>1</sub> + θ <sub>2</sub>X<sub>2</sub> + ... + θ<sub>n</sub>X<sub>n</sub>
* ŷ is our predicted output
We minimize the error (loss function) between ŷ and y. A very common way is to minimize the squared distance between the each ŷ and y pair.
An example may help:
### We survey 10 people, based on 3 facts about a car we ask what would they pay
- m = 10 ( 10 samples of data )
- n = 3 ( 3 observations in each sample )
- X is m × n matrix
- y is a m × 1 matrix ( vector )
We want to find θ<sub>0</sub>, θ<sub>1</sub>, θ<sub>2</sub> & θ<sub>3</sub>. That will allow us to find the price of any car (OK so we may need more than 3 things to really price a car but ... )
The 3 questions may be:
* Top speed in mph.
* Fuel consumption in mpg.
* Cargo capacity in cuft.
Let's consider some cases for out θs
* θ<sub>0</sub>=10000.0 θ<sub>1</sub>=0.0 θ<sub>2</sub>=0.0 θ<sub>3</sub>=0.0
- all cars would cost 10000, non of the three factors make any difference
* θ<sub>0</sub>= 1000.0 θ<sub>1</sub>=150.0 θ<sub>2</sub>=120.0 θ<sub>3</sub>=0.0
- People like fast cars with low fuel consumption
- cars with a top speed of 100mph, consuming 5mpg cost 1000 + 100×150 + 120×5 = 16,600
- cars with a top speed of 90mph, consuming 20mpg cost 1000 + 90×150 + 120×25 = 17,500
- cars with a top speed of 10mph, consuming 1mpg cost 1000 + 10×150 + 120×1 = 1,620
That's linear regression!
```
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
```
```
m = -4
c = 12
noisiness = 7
num_points = 20
x = ( torch.rand( num_points, 1 ) - 0.5 ) * 10
y = ( ( torch.rand( num_points, 1 ) - 0.5 ) * noisiness ) + ( x * m + c )
plt.scatter( x.tolist(), y.tolist(), color='red' )
plt.show()
```
We want to solve for 2 variables ( m & c ) so we have to synthesize a bias ( the c ). Bias is independent of the input data, so we'll prepend a column of ones to our input values ( x ).
if x is
```
1
2
3
```
It will become
```
1 1
1 2
1 3
```
```
xplusone = torch.cat( ( torch.ones( x.size(0),1 ), x) , 1 )
```
Now we use the pytorch built-in solver [gels](https://pytorch.org/docs/stable/torch.html#torch.gels). This is the least squares solver. The returns includes two parts:
- the first n items are the co-officients we want
- the remainder are error terms
We have two dimensions ( the 1D x data and the prepended ones ), so read the first two items from the result.
```
R, _ = torch.gels( y, xplusone )
R = R[0:xplusone.size(1)]
```
Let's plot the original points and the best fit line. The best fit line comes from the co-efficients.
Multiply the original inputs by R ( the coefficients ) to get the line
```
yh = xplusone.mm( R )
plt.plot( x.tolist(), yh.tolist() )
plt.scatter( x.tolist(), y.tolist(), color='red' )
plt.show()
```
### An example using more than one dimensional input
This example shows the method above works for higher dimensional data.
First define the x & y arrays of inputs and results
```
m = torch.tensor( [ [-2.0], [-2.0] ] )
c = 12
noisiness = 10
num_points = 100
x = ( torch.rand( num_points, 2 ) - 0.5 ) * 10
y = ( ( torch.rand( num_points, 1 ) - 0.5 ) * noisiness ) + ( x.mm( m ) + c )
```
Find the best fitting plane of the input data points. Compare the code to the 2D case above: it's the same.
```
xplusone = torch.cat( ( torch.ones( x.size(0),1 ), x) , 1 )
R, _ = torch.gels( y, xplusone )
R = R[0:xplusone.size(1)]
yh = xplusone.mm( R )
```
Plot the results
It's harder to see a 3D plot. The red dots are the data points, the green plane is the best fit solution.
```
fig = plt.figure()
ax = fig.add_subplot( 111, projection='3d')
ax.scatter( x[:,0].tolist(), x[:,1].tolist(), y[:,0].tolist(), color='red' )
ax.plot_trisurf( x[:,0].tolist(), x[:,1].tolist(), yh[:,0].tolist(), color='green', shade=False )
plt.show()
```
| github_jupyter |
# 量子神经网络在自然语言处理中的应用
[](https://mindspore.cn/mindquantum/api/zh-CN/master/index.html) [](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/master/mindquantum/zh_cn/mindspore_qnn_for_nlp.ipynb) [](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/master/mindquantum/zh_cn/mindspore_qnn_for_nlp.py) [](https://gitee.com/mindspore/docs/blob/master/docs/mindquantum/docs/source_zh_cn/qnn_for_nlp.ipynb)
## 概述
在自然语言处理过程中,词嵌入(Word embedding)是其中的重要步骤,它是一个将高维度空间的词向量嵌入到一个维数更低的连续向量空间的过程。当给予神经网络的语料信息不断增加时,网络的训练过程将越来越困难。利用量子力学的态叠加和纠缠等特性,我们可以利用量子神经网络来处理这些经典语料信息,加入其训练过程,并提高收敛精度。下面,我们将简单地搭建一个量子经典混合神经网络来完成一个词嵌入任务。
## 环境准备
导入本教程所依赖模块
```
import numpy as np
import time
from mindquantum.core import QubitOperator
import mindspore.ops as ops
import mindspore.dataset as ds
from mindspore import nn
from mindspore.train.callback import LossMonitor
from mindspore import Model
from mindquantum.framework import MQLayer
from mindquantum import Hamiltonian, Circuit, RX, RY, X, H, UN
```
本教程实现的是一个[CBOW模型](https://blog.csdn.net/u010665216/article/details/78724856),即利用某个词所处的环境来预测该词。例如对于“I love natural language processing”这句话,我们可以将其切分为5个词,\["I", "love", "natural", "language", "processing”\],在所选窗口为2时,我们要处理的问题是利用\["I", "love", "language", "processing"\]来预测出目标词汇"natural"。这里我们以窗口为2为例,搭建如下的量子神经网络,来完成词嵌入任务。

这里,编码线路会将"I"、"love"、"language"和"processing"的编码信息编码到量子线路中,待训练的量子线路由四个Ansatz线路构成,最后我们在量子线路末端对量子比特做$\text{Z}$基矢上的测量,具体所需测量的比特的个数由所需嵌入空间的维数确定。
## 数据预处理
我们对所需要处理的语句进行处理,生成关于该句子的词典,并根据窗口大小来生成样本点。
```
def GenerateWordDictAndSample(corpus, window=2):
all_words = corpus.split()
word_set = list(set(all_words))
word_set.sort()
word_dict = {w: i for i, w in enumerate(word_set)}
sampling = []
for index, _ in enumerate(all_words[window:-window]):
around = []
for i in range(index, index + 2*window + 1):
if i != index + window:
around.append(all_words[i])
sampling.append([around, all_words[index + window]])
return word_dict, sampling
word_dict, sample = GenerateWordDictAndSample("I love natural language processing")
print(word_dict)
print('word dict size: ', len(word_dict))
print('samples: ', sample)
print('number of samples: ', len(sample))
```
根据如上信息,我们得到该句子的词典大小为5,能够产生一个样本点。
## 编码线路
为了简单起见,我们使用的编码线路由$\text{RX}$旋转门构成,结构如下。

我们对每个量子门都作用一个$\text{RX}$旋转门。
```
def GenerateEncoderCircuit(n_qubits, prefix=''):
if prefix and prefix[-1] != '_':
prefix += '_'
circ = Circuit()
for i in range(n_qubits):
circ += RX(prefix + str(i)).on(i)
return circ
GenerateEncoderCircuit(3, prefix='e')
```
我们通常用$\left|0\right>$和$\left|1\right>$来标记二能级量子比特的两个状态,由态叠加原理,量子比特还可以处于这两个状态的叠加态:
$$\left|\psi\right>=\alpha\left|0\right>+\beta\left|1\right>$$
对于$n$比特的量子态,其将处于$2^n$维的希尔伯特空间中。对于上面由5个词构成的词典,我们只需要$\lceil \log_2 5 \rceil=3$个量子比特即可完成编码,这也体现出量子计算的优越性。
例如对于上面词典中的"love",其对应的标签为2,2的二进制表示为`010`,我们只需将编码线路中的`e_0`、`e_1`和`e_2`分别设为$0$、$\pi$和$0$即可。下面来验证一下。
```
from mindquantum.simulator import Simulator
from mindspore import context
from mindspore import Tensor
n_qubits = 3 # number of qubits of this quantum circuit
label = 2 # label need to encode
label_bin = bin(label)[-1: 1: -1].ljust(n_qubits, '0') # binary form of label
label_array = np.array([int(i)*np.pi for i in label_bin]).astype(np.float32) # parameter value of encoder
encoder = GenerateEncoderCircuit(n_qubits, prefix='e') # encoder circuit
encoder_params_names = encoder.params_name # parameter names of encoder
print("Label is: ", label)
print("Binary label is: ", label_bin)
print("Parameters of encoder is: \n", np.round(label_array, 5))
print("Encoder circuit is: \n", encoder)
print("Encoder parameter names are: \n", encoder_params_names)
# quantum state evolution operator
state = encoder.get_qs(pr=dict(zip(encoder_params_names, label_array)))
amp = np.round(np.abs(state)**2, 3)
print("Amplitude of quantum state is: \n", amp)
print("Label in quantum state is: ", np.argmax(amp))
```
通过上面的验证,我们发现,对于标签为2的数据,最后得到量子态的振幅最大的位置也是2,因此得到的量子态正是对输入标签的编码。我们将对数据编码生成参数数值的过程总结成如下函数。
```
def GenerateTrainData(sample, word_dict):
n_qubits = np.int(np.ceil(np.log2(1 + max(word_dict.values()))))
data_x = []
data_y = []
for around, center in sample:
data_x.append([])
for word in around:
label = word_dict[word]
label_bin = bin(label)[-1: 1: -1].ljust(n_qubits, '0')
label_array = [int(i)*np.pi for i in label_bin]
data_x[-1].extend(label_array)
data_y.append(word_dict[center])
return np.array(data_x).astype(np.float32), np.array(data_y).astype(np.int32)
GenerateTrainData(sample, word_dict)
```
根据上面的结果,我们将4个输入的词编码的信息合并为一个更长向量,便于后续神经网络调用。
## Ansatz线路
Ansatz线路的选择多种多样,我们选择如下的量子线路作为Ansatz线路,它的一个单元由一层$\text{RY}$门和一层$\text{CNOT}$门构成,对此单元重复$p$次构成整个Ansatz线路。

定义如下函数生成Ansatz线路。
```
def GenerateAnsatzCircuit(n_qubits, layers, prefix=''):
if prefix and prefix[-1] != '_':
prefix += '_'
circ = Circuit()
for l in range(layers):
for i in range(n_qubits):
circ += RY(prefix + str(l) + '_' + str(i)).on(i)
for i in range(l % 2, n_qubits, 2):
if i < n_qubits and i + 1 < n_qubits:
circ += X.on(i + 1, i)
return circ
GenerateAnsatzCircuit(5, 2, 'a')
```
## 测量
我们把对不同比特位上的测量结果作为降维后的数据。具体过程与比特编码类似,例如当我们想将词向量降维为5维向量时,对于第3维的数据可以如下产生:
- 3对应的二进制为`00011`。
- 测量量子线路末态对$Z_0Z_1$哈密顿量的期望值。
下面函数将给出产生各个维度上数据所需的哈密顿量(hams),其中`n_qubits`表示线路的比特数,`dims`表示词嵌入的维度:
```
def GenerateEmbeddingHamiltonian(dims, n_qubits):
hams = []
for i in range(dims):
s = ''
for j, k in enumerate(bin(i + 1)[-1:1:-1]):
if k == '1':
s = s + 'Z' + str(j) + ' '
hams.append(Hamiltonian(QubitOperator(s)))
return hams
GenerateEmbeddingHamiltonian(5, 5)
```
## 量子版词向量嵌入层
量子版词向量嵌入层结合前面的编码量子线路和待训练量子线路,以及测量哈密顿量,将`num_embedding`个词嵌入为`embedding_dim`维的词向量。这里我们还在量子线路的最开始加上了Hadamard门,将初态制备为均匀叠加态,用以提高量子神经网络的表达能力。
下面,我们定义量子嵌入层,它将返回一个量子线路模拟算子。
```
def QEmbedding(num_embedding, embedding_dim, window, layers, n_threads):
n_qubits = int(np.ceil(np.log2(num_embedding)))
hams = GenerateEmbeddingHamiltonian(embedding_dim, n_qubits)
circ = Circuit()
circ = UN(H, n_qubits)
encoder_param_name = []
ansatz_param_name = []
for w in range(2 * window):
encoder = GenerateEncoderCircuit(n_qubits, 'Encoder_' + str(w))
ansatz = GenerateAnsatzCircuit(n_qubits, layers, 'Ansatz_' + str(w))
encoder.no_grad()
circ += encoder
circ += ansatz
encoder_param_name.extend(encoder.params_name)
ansatz_param_name.extend(ansatz.params_name)
grad_ops = Simulator('projectq', circ.n_qubits).get_expectation_with_grad(hams,
circ,
None,
None,
encoder_param_name,
ansatz_param_name,
n_threads)
return MQLayer(grad_ops)
```
整个训练模型跟经典网络类似,由一个嵌入层和两个全连通层构成,然而此处的嵌入层是由量子神经网络构成。下面定义量子神经网络CBOW。
```
class CBOW(nn.Cell):
def __init__(self, num_embedding, embedding_dim, window, layers, n_threads,
hidden_dim):
super(CBOW, self).__init__()
self.embedding = QEmbedding(num_embedding, embedding_dim, window,
layers, n_threads)
self.dense1 = nn.Dense(embedding_dim, hidden_dim)
self.dense2 = nn.Dense(hidden_dim, num_embedding)
self.relu = ops.ReLU()
def construct(self, x):
embed = self.embedding(x)
out = self.dense1(embed)
out = self.relu(out)
out = self.dense2(out)
return out
```
下面我们对一个稍长的句子来进行训练。首先定义`LossMonitorWithCollection`用于监督收敛过程,并搜集收敛过程的损失。
```
class LossMonitorWithCollection(LossMonitor):
def __init__(self, per_print_times=1):
super(LossMonitorWithCollection, self).__init__(per_print_times)
self.loss = []
def begin(self, run_context):
self.begin_time = time.time()
def end(self, run_context):
self.end_time = time.time()
print('Total time used: {}'.format(self.end_time - self.begin_time))
def epoch_begin(self, run_context):
self.epoch_begin_time = time.time()
def epoch_end(self, run_context):
cb_params = run_context.original_args()
self.epoch_end_time = time.time()
if self._per_print_times != 0 and cb_params.cur_step_num % self._per_print_times == 0:
print('')
def step_end(self, run_context):
cb_params = run_context.original_args()
loss = cb_params.net_outputs
if isinstance(loss, (tuple, list)):
if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):
loss = loss[0]
if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):
loss = np.mean(loss.asnumpy())
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1
if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):
raise ValueError("epoch: {} step: {}. Invalid loss, terminating training.".format(
cb_params.cur_epoch_num, cur_step_in_epoch))
self.loss.append(loss)
if self._per_print_times != 0 and cb_params.cur_step_num % self._per_print_times == 0:
print("\repoch: %+3s step: %+3s time: %5.5s, loss is %5.5s" % (cb_params.cur_epoch_num, cur_step_in_epoch, time.time() - self.epoch_begin_time, loss), flush=True, end='')
```
接下来,利用量子版本的`CBOW`来对一个长句进行词嵌入。运行之前请在终端运行`export OMP_NUM_THREADS=4`,将量子模拟器的线程数设置为4个,当所需模拟的量子系统比特数较多时,可设置更多的线程数来提高模拟效率。
```
import mindspore as ms
from mindspore import context
from mindspore import Tensor
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
corpus = """We are about to study the idea of a computational process.
Computational processes are abstract beings that inhabit computers.
As they evolve, processes manipulate other abstract things called data.
The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells."""
ms.set_seed(42)
window_size = 2
embedding_dim = 10
hidden_dim = 128
word_dict, sample = GenerateWordDictAndSample(corpus, window=window_size)
train_x, train_y = GenerateTrainData(sample, word_dict)
train_loader = ds.NumpySlicesDataset({
"around": train_x,
"center": train_y
}, shuffle=False).batch(3)
net = CBOW(len(word_dict), embedding_dim, window_size, 3, 4, hidden_dim)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
net_opt = nn.Momentum(net.trainable_params(), 0.01, 0.9)
loss_monitor = LossMonitorWithCollection(500)
model = Model(net, net_loss, net_opt)
model.train(350, train_loader, callbacks=[loss_monitor], dataset_sink_mode=False)
```
打印收敛过程中的损失函数值:
```
import matplotlib.pyplot as plt
plt.plot(loss_monitor.loss, '.')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.show()
```
通过如下方法打印量子嵌入层的量子线路中的参数:
```
net.embedding.weight.asnumpy()
```
## 经典版词向量嵌入层
这里我们利用经典的词向量嵌入层来搭建一个经典的CBOW神经网络,并与量子版本进行对比。
首先,搭建经典的CBOW神经网络,其中的参数跟量子版本的类似。
```
class CBOWClassical(nn.Cell):
def __init__(self, num_embedding, embedding_dim, window, hidden_dim):
super(CBOWClassical, self).__init__()
self.dim = 2 * window * embedding_dim
self.embedding = nn.Embedding(num_embedding, embedding_dim, True)
self.dense1 = nn.Dense(self.dim, hidden_dim)
self.dense2 = nn.Dense(hidden_dim, num_embedding)
self.relu = ops.ReLU()
self.reshape = ops.Reshape()
def construct(self, x):
embed = self.embedding(x)
embed = self.reshape(embed, (-1, self.dim))
out = self.dense1(embed)
out = self.relu(out)
out = self.dense2(out)
return out
```
生成适用于经典CBOW神经网络的数据集。
```
train_x = []
train_y = []
for i in sample:
around, center = i
train_y.append(word_dict[center])
train_x.append([])
for j in around:
train_x[-1].append(word_dict[j])
train_x = np.array(train_x).astype(np.int32)
train_y = np.array(train_y).astype(np.int32)
print("train_x shape: ", train_x.shape)
print("train_y shape: ", train_y.shape)
```
我们对经典CBOW网络进行训练。
```
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
train_loader = ds.NumpySlicesDataset({
"around": train_x,
"center": train_y
}, shuffle=False).batch(3)
net = CBOWClassical(len(word_dict), embedding_dim, window_size, hidden_dim)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
net_opt = nn.Momentum(net.trainable_params(), 0.01, 0.9)
loss_monitor = LossMonitorWithCollection(500)
model = Model(net, net_loss, net_opt)
model.train(350, train_loader, callbacks=[loss_monitor], dataset_sink_mode=False)
```
打印收敛过程中的损失函数值:
```
import matplotlib.pyplot as plt
plt.plot(loss_monitor.loss, '.')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.show()
```
由上可知,通过量子模拟得到的量子版词嵌入模型也能很好的完成嵌入任务。当数据集大到经典计算机算力难以承受时,量子计算机将能够轻松处理这类问题。
## 参考文献
[1] Tomas Mikolov, Kai Chen, Greg Corrado, Jeffrey Dean. [Efficient Estimation of Word Representations in
Vector Space](https://arxiv.org/pdf/1301.3781.pdf)
| github_jupyter |
```
import numpy as np
import pandas as pd
# Code to read csv file into colaboratory:
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
'''
downloaded = drive.CreateFile({'id':'1q9Yh9GorYkl_xf3O_P4zBbPYBXtTcuWx'})
downloaded.GetContentFile('moviereviews.tsv')
df= pd.read_csv("moviereviews.tsv", sep='\t')
df.head()
'''
```
# Sentiment Analysis
Now that we've seen word vectors we can start to investigate sentiment analysis. The goal is to find commonalities between documents, with the understanding that similarly *combined* vectors should correspond to similar sentiments.
While the scope of sentiment analysis is very broad, we will focus our work in two ways.
### 1. Polarity classification
We won't try to determine if a sentence is objective or subjective, fact or opinion. Rather, we care only if the text expresses a *positive*, *negative* or *neutral* opinion.
### 2. Document level scope
We'll also try to aggregate all of the sentences in a document or paragraph, to arrive at an overall opinion.
### 3. Coarse analysis
We won't try to perform a fine-grained analysis that would determine the degree of positivity/negativity. That is, we're not trying to guess how many stars a reviewer awarded, just whether the review was positive or negative.
## Broad Steps:
* First, consider the text being analyzed. A model trained on paragraph-long movie reviews might not be effective on tweets. Make sure to use an appropriate model for the task at hand.
* Next, decide the type of analysis to perform. In the previous section on text classification we used a bag-of-words technique that considered only single tokens, or *unigrams*. Some rudimentary sentiment analysis models go one step further, and consider two-word combinations, or *bigrams*. In this section, we'd like to work with complete sentences, and for this we're going to import a trained NLTK lexicon called *VADER*.
## NLTK's VADER module | Valence Aware Dictionary for sEntiment Reasoning
VADER is an NLTK module that provides sentiment scores based on words used ("completely" boosts a score, while "slightly" reduces it), on capitalization & punctuation ("GREAT!!!" is stronger than "great."), and negations (words like "isn't" and "doesn't" affect the outcome).
<br>To view the source code visit https://www.nltk.org/_modules/nltk/sentiment/vader.html
**Download the VADER lexicon.** You only need to do this once.
```
import nltk
nltk.download('vader_lexicon')
```
<div class="alert alert-danger">NOTE: At the time of this writing there's a <a href='https://github.com/nltk/nltk/issues/2053'>known issue</a> with SentimentIntensityAnalyzer that raises a harmless warning on loading<br>
<tt><font color=black> UserWarning: The twython library has not been installed.<br> Some functionality from the twitter package will not be available.</tt>
This is due to be fixed in an upcoming NLTK release. For now, if you want to avoid it you can (optionally) install the NLTK twitter library with<br>
<tt><font color=black> conda install nltk[twitter]</tt><br>or<br>
<tt><font color=black> pip3 install -U nltk[twitter]</tt></div>
```
# !pip3 install -U nltk[twitter]
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
!pip3 install -U nltk[twitter]
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
```
VADER's `SentimentIntensityAnalyzer()` takes in a string and returns a dictionary of scores in each of four categories:
* negative [0,1]
* neutral [0,1]
* positive [0,1]
* compound *(computed by normalizing the scores above)* [-1,1]
```
a = 'This was a good movie.'
sid.polarity_scores(a)
a = 'This was the best, most awesome movie EVER MADE!!!'
sid.polarity_scores(a)
a = 'This was the worst film to ever disgrace the screen.'
sid.polarity_scores(a)
```
## Use VADER to analyze Amazon Reviews
For this exercise we're going to apply `SentimentIntensityAnalyzer` to a dataset of 10,000 Amazon reviews. Like our movie reviews datasets, these are labeled as either "pos" or "neg". At the end we'll determine the accuracy of our sentiment analysis with VADER.
```
import numpy as np
import pandas as pd
downloaded = drive.CreateFile({'id':'1kb-mL5Dl-5VoV-ZREdKqwG_FCWCXO1uj'})
downloaded.GetContentFile('amazonreviews.tsv')
df= pd.read_csv("amazonreviews.tsv", sep='\t')
df.head()
df.shape
df['label'].value_counts()
```
### Clean the data:
Recall that our moviereviews.tsv file contained empty records. Let's check to see if any exist in amazonreviews.tsv.
```
# REMOVE NaN VALUES AND EMPTY STRINGS:
df.dropna(inplace=True)
blanks = [] # start with an empty list
for index,label,review in df.itertuples(): # iterate over the DataFrame
if type(review)==str: # avoid NaN values
if review.isspace(): # test 'review' for whitespace
blanks.append(index) # add matching index numbers to the list
df.drop(blanks, inplace=True)
df['label'].value_counts()
blanks # empty
# if blanks[] was not empty --> df.drop(blanks, inplace= True)
```
In this case there were no empty records. Good!
## Let's run the first review through VADER
```
df.iloc[0]['review']
# Below we are displaying the text as a script which is more readable (not like above)
from IPython.display import Markdown, display
display(Markdown('> '+df['review'][0]))
sid.polarity_scores(df.loc[0]['review'])
df.loc[0]['label']
```
Great! Our first review was labeled "positive", and earned a positive compound score.
## Adding Scores and Labels to the DataFrame
In this next section we'll add columns to the original DataFrame to store polarity_score dictionaries, extracted compound scores, and new "pos/neg" labels derived from the compound score. We'll use this last column to perform an accuracy test.
```
# lamda take that review and then apply polarity score to that particular review
df['scores'] = df['review'].apply(lambda review: sid.polarity_scores(review))
df.head()
# compound is usually useful, so adding that as a column as well
df['compound'] = df['scores'].apply(lambda score_dict: score_dict['compound'])
df.head()
# translating the compounding scores and creating a new column
# if compound score >0 -> positive else negative
df['comp_score'] = df['compound'].apply(lambda score: 'pos' if score >=0 else 'neg')
df.head()
```
## Report on Accuracy
Finally, we'll use scikit-learn to determine how close VADER came to our original 10,000 labels.
```
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# comparing "label" which is the already true-correct label, with the compound
# score which we calculated afterwards
accuracy_score(df['label'],df['comp_score'])
print(classification_report(df['label'],df['comp_score']))
# vaden is not good at detecting sarcasm
print(confusion_matrix(df['label'],df['comp_score']))
# 2622 correctly classified as positive
# 434 inclorreclty classified as positive
# 2475 incorrecly classified as negative
# 4469 correctly classified as negative
# You can make the confusion matrix less confusing by adding labels:
#from sklearn import metrics
#df = pd.DataFrame(metrics.confusion_matrix(y_test,predictions), index=['negative','positive'], columns=['negative','positive'])
#df
# but here we hadn't split the data
```
This tells us that VADER correctly identified an Amazon review as "positive" or "negative" roughly 71% of the time.
# Sentiment Analysis Project
## Task #1: Perform vector arithmetic on your own words
Write code that evaluates vector arithmetic on your own set of related words. The goal is to come as close to an expected word as possible.
```
!python -m spacy download en_core_web_lg
# !python -m spacy download en_vectors_web_lg
# Import spaCy and load the language library. Remember to use a larger model!
import spacy
nlp = spacy.load('en_core_web_lg')
# Choose the words you wish to compare, and obtain their vectors
word1 = nlp.vocab['wolf'].vector
word2 = nlp.vocab['dog'].vector
word3 = nlp.vocab['cat'].vector
# Import spatial and define a cosine_similarity function
from scipy import spatial
cosine_similarity = lambda x, y: 1 - spatial.distance.cosine(x, y)
# Write an expression for vector arithmetic
# For example: new_vector = word1 - word2 + word3
new_vector = word1 - word2 + word3
# List the top ten closest vectors in the vocabulary to the result of the expression above
computed_similarities = []
for word in nlp.vocab:
if word.has_vector: #not all words have vectors in spacy
if word.is_lower:
if word.is_alpha: # if they are alphabetic
similarity = cosine_similarity(new_vector, word.vector)
computed_similarities.append((word, similarity))
computed_similarities = sorted(computed_similarities, key=lambda item: -item[1]) #in descending indexing
print([w[0].text for w in computed_similarities[:10]])
```
### CHALLENGE: Write a function that takes in 3 strings, performs a-b+c arithmetic, and returns a top-ten result
```
def vector_math(a,b,c):
new_vector = nlp.vocab[a].vector - nlp.vocab[b].vector + nlp.vocab[c].vector
computed_similarities = []
for word in nlp.vocab:
if word.has_vector:
if word.is_lower:
if word.is_alpha:
similarity = cosine_similarity(new_vector, word.vector)
computed_similarities.append((word, similarity))
computed_similarities = sorted(computed_similarities, key=lambda item: -item[1])
return [w[0].text for w in computed_similarities[:10]]
# Test the function on known words:
vector_math('king','man','woman')
```
## Task #2: Perform VADER Sentiment Analysis on your own review
Write code that returns a set of SentimentIntensityAnalyzer polarity scores based on your own written review.
```
# Import SentimentIntensityAnalyzer and create an sid object
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
# Write a review as one continuous string (multiple sentences are ok)
my_review = 'This movie portrayed real people, and was based on actual events.'
# Obtain the sid scores for your review
sid.polarity_scores(my_review)
```
### CHALLENGE: Write a function that takes in a review and returns a score of "Positive", "Negative" or "Neutral"
```
def review_rating(string):
scores = sid.polarity_scores(string)
if scores['compound'] == 0:
return 'Neutral'
elif scores['compound'] > 0:
return 'Positive'
else:
return 'Negative'
# Test the function on your review above:
review_rating(my_review)
```
| github_jupyter |
# T1574.008 - Path Interception by Search Order Hijacking
Adversaries may execute their own malicious payloads by hijacking the search order used to load other programs. Because some programs do not call other programs using the full path, adversaries may place their own file in the directory where the calling program is located, causing the operating system to launch their malicious software at the request of the calling program.
Search order hijacking occurs when an adversary abuses the order in which Windows searches for programs that are not given a path. Unlike [DLL Search Order Hijacking](https://attack.mitre.org/techniques/T1574/001), the search order differs depending on the method that is used to execute the program. (Citation: Microsoft CreateProcess) (Citation: Windows NT Command Shell) (Citation: Microsoft WinExec) However, it is common for Windows to search in the directory of the initiating program before searching through the Windows system directory. An adversary who finds a program vulnerable to search order hijacking (i.e., a program that does not specify the path to an executable) may take advantage of this vulnerability by creating a program named after the improperly specified program and placing it within the initiating program's directory.
For example, "example.exe" runs "cmd.exe" with the command-line argument <code>net user</code>. An adversary may place a program called "net.exe" within the same directory as example.exe, "net.exe" will be run instead of the Windows system utility net. In addition, if an adversary places a program called "net.com" in the same directory as "net.exe", then <code>cmd.exe /C net user</code> will execute "net.com" instead of "net.exe" due to the order of executable extensions defined under PATHEXT. (Citation: Microsoft Environment Property)
Search order hijacking is also a common practice for hijacking DLL loads and is covered in [DLL Search Order Hijacking](https://attack.mitre.org/techniques/T1574/001).
## Atomic Tests:
Currently, no tests are available for this technique.
## Detection
Monitor file creation for files named after partial directories and in locations that may be searched for common processes through the environment variable, or otherwise should not be user writable. Monitor the executing process for process executable paths that are named for partial directories. Monitor file creation for programs that are named after Windows system programs or programs commonly executed without a path (such as "findstr," "net," and "python"). If this activity occurs outside of known administration activity, upgrades, installations, or patches, then it may be suspicious.
Data and events should not be viewed in isolation, but as part of a chain of behavior that could lead to other activities, such as network connections made for Command and Control, learning details about the environment through Discovery, and Lateral Movement.
| github_jupyter |
```
import numpy as np
import pandas as pd
data = pd.read_csv('5_a.csv')
data
list(data.iloc[:,1])
y_predicted = [0 if i<0.5 else 1 for i in list(data['proba']) ]
0 in y_predicted
# confusion matrix
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
def custom_metrics(data):
y_actual = list(data.iloc[:,0])
y_predicted = list(data.iloc[:,1])
# lets binarize y_predicted with threshold 0.5.
# means >=0.5 is 1 otherwise 0
y_actual = list(map(int ,y_actual))
y_predicted = [0 if i<0.5 else 1 for i in y_predicted]
confusion_matrix = [
[0,0],
[0,0]
]
# print('actual y',y_actual)
# print('predicted y',y_predicted)
#let's calculate every part of confusion matrix
# TrueNegative(TN), FalseNegative(FN), TruePositive(TP),FalsePositive(FP)
tn,fn,tp,fp = 0,0,0,0
for i in range(len(y_actual)):
if y_actual[i]==0 and y_predicted[i] ==0:
tn +=1
elif y_actual[i]==0 and y_predicted[i] ==1:
fp +=1
elif y_actual[i]==1 and y_predicted[i] ==1:
tp +=1
elif y_actual[i]==1 and y_predicted[i]==0:
fn +=1
confusion_matrix[0][0] = tn
confusion_matrix[0][1] = fp
confusion_matrix[1][0] = fn
confusion_matrix[1][1] = tp
print(np.array(confusion_matrix))
##### we have computed confusion matrix ########
###############################################################################
#############################*************#####################################
######## let's compute f1-score ################
### first find precision and recall
###############################################
### precision = tp/(tp+fp) ####################
### precision = TruePositive/ total predicted positive)###
###############################################
### recall = tp/(tp+fn) ####################
### recall = TruePositive/ total actual positive)######
###############################################
precision = tp/(tp+fp)
recall = tp/(tp+fn)
f1_score = 2*((precision * recall)/(precision+recall))
print('f1-score',f1_score)
##############################**************###################################
###############################################################################
# let's find accuracy score before AUC Score
accuracy = (tp+tn)/(tp+tn+fp+fn)
print('accuracy',accuracy)
###############################################################################
##################### AUC SCORE ###############################################
## step 1: find n-unique probabilities from predicted y
# let's find n unique probabilities
y_predicted = np.array(data.iloc[:,1]) # changing to numpy.array
print('y_predicted',y_predicted)
threshold = np.unique(y_predicted)#finding unique values
threshold.sort()# sort the probabilities in ascending order
print('threshold',threshold)
#for every probabilities in threshold
fpr_values = []
tpr_values = []
for thresh in threshold:# make every probability as threshold
# for every threshold value change the y_predicted
# y = y_actual[:] #uncomment it to copy y_actual to y
y_predicted = [0 if i<thresh else 1 for i in y_predicted]
# find tpr and fpr for every threshold
tpr = tp/(tp+fp)
fpr = fp/(fp+tn)
tpr_values.append(tpr)
fpr_values.append(fpr)
# change tpr_values and fpr_values to numpy array
tpr_array = np.array(tpr_values)
fpr_array = np.array(fpr_values)
print('fpr:',fpr_array)
print('tpr:',tpr_array)
auc_score = np.trapz(tpr_array, fpr_array)
plt.plot(tpr_array,fpr_array)
plt.show()
print('auc score:',auc_score)
custom_metrics(data)
x = np.array([1,2,3,4,4,5,4,4,4,5,5,5,5,4,4,43,3,4,3443,45,6])
np.unique(x)
1 == 1.0
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(np.array([1,2,3,4,5]),np.array([3,4,6,4,3]))
5/2
np.trapz(np.array([3.4,3.6,2.5]),np.array([5.3,6.6,7.6]))
plt.plot(np.array([0.400,0.401,0.402]),np.array([0.401,0.402,0.403]))
```
| github_jupyter |
# Scraping Reddit Data

Using the PRAW library, a wrapper for the Reddit API, everyone can easily scrape data from Reddit or even create a Reddit bot.
```
import praw
```
Before it can be used to scrape data we need to authenticate ourselves. For this we need to create a Reddit instance and provide it with a client_id , client_secret and a user_agent . To create a Reddit application and get your id and secret you need to navigate to [this page](https://www.reddit.com/prefs/apps).
```
reddit = praw.Reddit(client_id='my_client_id',
client_secret='my_client_secret',
user_agent='my_user_agent')
```
We can get information or posts from a specifc subreddit using the reddit.subreddit method and passing it a subreddit name.
```
# get 10 hot posts from the MachineLearning subreddit
hot_posts = reddit.subreddit('MachineLearning').hot(limit=10)
```
Now that we scraped 10 posts we can loop through them and print some information.
```
for post in hot_posts:
print(post.title)
# get hot posts from all subreddits
hot_posts = reddit.subreddit('all').hot(limit=10)
for post in hot_posts:
print(post.title)
# get MachineLearning subreddit data
ml_subreddit = reddit.subreddit('MachineLearning')
print(ml_subreddit.description)
```
Because we only have a limited amoung of requests per day it is a good idea to save the scraped data in some kind of variable or file.
```
import pandas as pd
posts = []
ml_subreddit = reddit.subreddit('MachineLearning')
for post in ml_subreddit.hot(limit=10):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
posts = pd.DataFrame(posts,columns=['title', 'score', 'id', 'subreddit', 'url', 'num_comments', 'body', 'created'])
posts
posts.to_csv('top_ml_subreddit_posts.csv')
```
PRAW also allows us to get information about a specifc post/submission
```
submission = reddit.submission(url="https://www.reddit.com/r/MapPorn/comments/a3p0uq/an_image_of_gps_tracking_of_multiple_wolves_in/")
# or
submission = reddit.submission(id="a3p0uq") #id comes after comments/
for top_level_comment in submission.comments:
print(top_level_comment.body)
```
This will work for some submission, but for others that have more comments this code will throw an AttributeError saying:
``AttributeError: 'MoreComments' object has no attribute 'body'``
These MoreComments object represent the “load more comments” and “continue this thread” links encountered on the websites, as described in more detail in the comment documentation.
There get rid of the MoreComments objects, we can check the datatype of each comment before printing the body.
```
from praw.models import MoreComments
for top_level_comment in submission.comments:
if isinstance(top_level_comment, MoreComments):
continue
print(top_level_comment.body)
```
The below cell is another way of getting rid of the MoreComments objects
```
submission.comments.replace_more(limit=0)
for top_level_comment in submission.comments:
print(top_level_comment.body)
```
The above codeblocks only got the top lebel comments. If we want to get the complete ``CommentForest`` we need to use the ``.list`` method.
```
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
print(comment.body)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Nutritiousfacts/DS-Unit-2-Regression-Classification/blob/master/module3/Gabe_flomo_assignment_regression_classification_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science, Unit 2: Predictive Modeling
# Regression & Classification, Module 3
## Assignment
We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.
But not just for condos in Tribeca...
Instead, predict property sales prices for **One Family Dwellings** (`BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'`) using a subset of the data where the **sale price was more than \\$100 thousand and less than $2 million.**
The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.
- [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.
- [ ] Do exploratory visualizations with Seaborn.
- [ ] Do one-hot encoding of categorical features.
- [ ] Do feature selection with `SelectKBest`.
- [ ] Fit a linear regression model with multiple features.
- [ ] Get mean absolute error for the test set.
- [ ] As always, commit your notebook to your fork of the GitHub repo.
## Stretch Goals
- [ ] Add your own stretch goal(s) !
- [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html).
- [ ] Learn more about feature selection:
- ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance)
- [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html)
- [mlxtend](http://rasbt.github.io/mlxtend/) library
- scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection)
- [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.
- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.
- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way (without an excessive amount of formulas or academic pre-requisites).
(That book is good regardless of whether your cultural worldview is inferential statistics or predictive machine learning)
- [ ] Read Leo Breiman's paper, ["Statistical Modeling: The Two Cultures"](https://projecteuclid.org/download/pdf_1/euclid.ss/1009213726)
- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html):
> Pipeline can be used to chain multiple estimators into one. This is useful as there is often a fixed sequence of steps in processing the data, for example feature selection, normalization and classification. Pipeline serves multiple purposes here:
> - **Convenience and encapsulation.** You only have to call fit and predict once on your data to fit a whole sequence of estimators.
> - **Joint parameter selection.** You can grid search over parameters of all estimators in the pipeline at once.
> - **Safety.** Pipelines help avoid leaking statistics from your test data into the trained model in cross-validation, by ensuring that the same samples are used to train the transformers and predictors.
```
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade category_encoders pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git
!git pull origin master
# Change into directory for module
os.chdir('module3')
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv('../data/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
df.tail()
df = df.query("SALE_PRICE > 100000 and SALE_PRICE < 2000000")
mask = df['BUILDING_CLASS_CATEGORY'].str.contains("01 ONE FAMILY DWELLINGS")
sub = df[mask]
sub.tail()
sub.columns
sub.columns.drop("EASE-MENT")
sub["SALE_DATE"] = pd.to_datetime(df['SALE_DATE'], infer_datetime_format=True)
sub["SALE_DATE"].describe()
import numpy as np
sub = sub[(sub['SALE_PRICE'] >= np.percentile(sub['SALE_PRICE'], 0.5)) &
(sub['SALE_PRICE'] <= np.percentile(sub['SALE_PRICE'], 99.5)) &
(sub['GROSS_SQUARE_FEET'] >= np.percentile(sub['GROSS_SQUARE_FEET'], 0.05)) &
(sub['GROSS_SQUARE_FEET'] < np.percentile(sub['GROSS_SQUARE_FEET'], 99.95))]
sub = sub.query("GROSS_SQUARE_FEET > 0")
sub["SALE_DATE"].dt.month.value_counts()
shape = 907 + 763 + 734
shape
train = sub[sub["SALE_DATE"].dt.month < 4]
test = sub[sub["SALE_DATE"].dt.month == 4]
print(train.shape,test.shape)
# checking to see if they were combined correctly
#assert train.shape[0] == shape
# visualize the data
import plotly.express as px
px.scatter(train, y = "SALE_PRICE", x = "GROSS_SQUARE_FEET",trendline = "ols")
train["Gross_feet_binned"] = (train["GROSS_SQUARE_FEET"] > 500) & (train["GROSS_SQUARE_FEET"] > 4000)
train.groupby("Gross_feet_binned").SALE_PRICE.describe()
# cluster the location
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 20,n_jobs = -1)
train["cluster"] = kmeans.fit_predict(train[["GROSS_SQUARE_FEET","SALE_PRICE"]])
test["cluster"] = kmeans.fit_predict(test[["GROSS_SQUARE_FEET","SALE_PRICE"]])
#train.columns.drop("Gross_feet_binned")
px.scatter(train, y = "SALE_PRICE", x = "GROSS_SQUARE_FEET",color = "cluster")
train.groupby("cluster").SALE_PRICE.describe()
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.catplot(x = "cluster",y = "SALE_PRICE",data = train, kind = "bar", color = "grey");
train.columns
# exploring other data compared to the price
for col in sorted(train.columns.drop(["EASE-MENT","SALE_PRICE"])):
if train[col].nunique() <= 20:
sns.catplot(x = col ,y = "SALE_PRICE",data = train, kind = "bar", color = "#843B62");
plt.show()
train["BOROUGH"] = train["BOROUGH"].astype(str)
test["BOROUGH"] = test["BOROUGH"].astype(str)
# cardinality for categorical data
train.describe(exclude = "number").T.sort_values(by = "unique")
target = "SALE_PRICE"
numerics = train.select_dtypes(include = "number").columns.drop(target).tolist()
categoricals = train.select_dtypes(exclude = "number").columns.tolist()
low_cardinalality = [col for col in categoricals if train[col].nunique() <= 50]
features = numerics + low_cardinalality
features.remove("Gross_feet_binned")
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
X_train.shape, y_train.shape, X_test.shape,y_test.shape
import category_encoders as ce
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train_encoded = encoder.fit_transform(X_train)
X_test_encoded = encoder.transform(X_test)
X_train_encoded.head()
X_train_encoded = X_train_encoded.drop(columns='EASE-MENT')
X_test_encoded = X_test_encoded.drop(columns='EASE-MENT')
from sklearn.feature_selection import f_regression, SelectKBest
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_encoded)
X_test_scaled = scaler.fit_transform(X_test_encoded)
for k in range(1, len(X_train_encoded.columns)+1):
print(f"{k} features")
selector = SelectKBest(score_func = f_regression, k=k)
X_train_selected = selector.fit_transform(X_train_scaled, y_train)
X_test_selected = selector.transform(X_test_scaled)
model = LinearRegression()
model.fit(X_train_selected, y_train)
y_pred = model.predict(X_test_selected)
mae = mean_absolute_error(y_test,y_pred)
print(f"Test MAE: ${mae:,.0f}")
print()
```
| github_jupyter |
```
# Copyright 2021 Google LLC
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# Author(s): Kevin P. Murphy (murphyk@gmail.com) and Mahmoud Soliman (mjs@aucegypt.edu)
```
<a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a>
<a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/figures//chapter20_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Figure 20.1:<a name='20.1'></a> <a name='pcaDemo2d'></a>
An illustration of PCA where we project from 2d to 1d. Circles are the original data points, crosses are the reconstructions. The red star is the data mean.
Figure(s) generated by [pcaDemo2d.py](https://github.com/probml/pyprobml/blob/master/scripts/pcaDemo2d.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/pcaDemo2d.py")
```
## Figure 20.2:<a name='20.2'></a> <a name='pcaDigits'></a>
An illustration of PCA applied to MNIST digits from class 9. Grid points are at the 5, 25, 50, 75, 95 \% quantiles of the data distribution along each dimension. The circled points are the closest projected images to the vertices of the grid. Adapted from Figure 14.23 of <a href='#HastieBook'>[HTF09]</a> .
Figure(s) generated by [pca_digits.py](https://github.com/probml/pyprobml/blob/master/scripts/pca_digits.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/pca_digits.py")
```
## Figure 20.3:<a name='20.3'></a> <a name='eigenFace'></a>
a) Some randomly chosen $64 \times 64$ pixel images from the Olivetti face database. (b) The mean and the first three PCA components represented as images.
Figure(s) generated by [pcaImageDemo.m](https://github.com/probml/pmtk3/blob/master/demos/pcaImageDemo.m)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaImages-faces-images.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaImages-faces-basis.png")
```
## Figure 20.4:<a name='20.4'></a> <a name='pcaProjVar'></a>
Illustration of the variance of the points projected onto different 1d vectors. $v_1$ is the first principal component, which maximizes the variance of the projection. $v_2$ is the second principal component which is direction orthogonal to $v_1$. Finally $v'$ is some other vector in between $v_1$ and $v_2$. Adapted from Figure 8.7 of <a href='#Geron2019'>[Aur19]</a> .
Figure(s) generated by [pca_projected_variance.py](https://github.com/probml/pyprobml/blob/master/scripts/pca_projected_variance.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/pca_projected_variance.py")
```
## Figure 20.5:<a name='20.5'></a> <a name='heightWeightPCA'></a>
Effect of standardization on PCA applied to the height/weight dataset. (Red=female, blue=male.) Left: PCA of raw data. Right: PCA of standardized data.
Figure(s) generated by [pcaStandardization.py](https://github.com/probml/pyprobml/blob/master/scripts/pcaStandardization.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/pcaStandardization.py")
```
## Figure 20.6:<a name='20.6'></a> <a name='pcaErr'></a>
Reconstruction error on MNIST vs number of latent dimensions used by PCA. (a) Training set. (b) Test set.
Figure(s) generated by [pcaOverfitDemo.m](https://github.com/probml/pmtk3/blob/master/demos/pcaOverfitDemo.m)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaOverfitReconTrain.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaOverfitReconTest.png")
```
## Figure 20.7:<a name='20.7'></a> <a name='pcaFrac'></a>
(a) Scree plot for training set, corresponding to \cref fig:pcaErr (a). (b) Fraction of variance explained.
Figure(s) generated by [pcaOverfitDemo.m](https://github.com/probml/pmtk3/blob/master/demos/pcaOverfitDemo.m)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaOverfitScree.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaOverfitVar.png")
```
## Figure 20.8:<a name='20.8'></a> <a name='pcaProfile'></a>
Profile likelihood corresponding to PCA model in \cref fig:pcaErr (a).
Figure(s) generated by [pcaOverfitDemo.m](https://github.com/probml/pmtk3/blob/master/demos/pcaOverfitDemo.m)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaOverfitProfile.png")
```
## Figure 20.9:<a name='20.9'></a> <a name='sprayCan'></a>
Illustration of the FA generative process, where we have $L=1$ latent dimension generating $D=2$ observed dimensions; we assume $\boldsymbol \Psi =\sigma ^2 \mathbf I $. The latent factor has value $z \in \mathbb R $, sampled from $p(z)$; this gets mapped to a 2d offset $\boldsymbol \delta = z \mathbf w $, where $\mathbf w \in \mathbb R ^2$, which gets added to $\boldsymbol \mu $ to define a Gaussian $p(\mathbf x |z) = \mathcal N (\mathbf x |\boldsymbol \mu + \boldsymbol \delta ,\sigma ^2 \mathbf I )$. By integrating over $z$, we ``slide'' this circular Gaussian ``spray can'' along the principal component axis $\mathbf w $, which induces elliptical Gaussian contours in $\mathbf x $ space centered on $\boldsymbol \mu $. Adapted from Figure 12.9 of <a href='#BishopBook'>[Bis06]</a> .
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/PPCAsprayCan.png")
```
## Figure 20.10:<a name='20.10'></a> <a name='pcaSpring'></a>
Illustration of EM for PCA when $D=2$ and $L=1$. Green stars are the original data points, black circles are their reconstructions. The weight vector $\mathbf w $ is represented by blue line. (a) We start with a random initial guess of $\mathbf w $. The E step is represented by the orthogonal projections. (b) We update the rod $\mathbf w $ in the M step, keeping the projections onto the rod (black circles) fixed. (c) Another E step. The black circles can 'slide' along the rod, but the rod stays fixed. (d) Another M step. Adapted from Figure 12.12 of <a href='#BishopBook'>[Bis06]</a> .
Figure(s) generated by [pcaEmStepByStep.m](https://github.com/probml/pmtk3/blob/master/demos/pcaEmStepByStep.m)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaEmStepByStepEstep1.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaEmStepByStepMstep1.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaEmStepByStepEstep2.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/pcaEmStepByStepMstep2.png")
```
## Figure 20.11:<a name='20.11'></a> <a name='mixFAdgm'></a>
Mixture of factor analyzers as a PGM.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/mixFAdgmC.png")
```
## Figure 20.12:<a name='20.12'></a> <a name='ppcaMixNetlab'></a>
Mixture of PPCA models fit to a 2d dataset, using $L=1$ latent dimensions and $K=1$ and $K=10$ mixture components.
Figure(s) generated by [mixPpcaDemoNetlab.m](https://github.com/probml/pmtk3/blob/master/demos/mixPpcaDemoNetlab.m)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/mixPpcaAnnulus1.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/mixPpcaAnnulus10.png")
```
## Figure 20.13:<a name='20.13'></a> <a name='MFAGANsamples'></a>
Random samples from the MixFA model fit to CelebA. From Figure 4 of <a href='#Richardson2018'>[EY18]</a> . Used with kind permission of Yair Weiss
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/MFAGAN-samples.png")
```
## Figure 20.14:<a name='20.14'></a> <a name='binaryPCA'></a>
(a) 150 synthetic 16 dimensional bit vectors. (b) The 2d embedding learned by binary PCA, fit using variational EM. We have color coded points by the identity of the true ``prototype'' that generated them. (c) Predicted probability of being on. (d) Thresholded predictions.
Figure(s) generated by [binaryFaDemoTipping.m](https://github.com/probml/pmtk3/blob/master/demos/binaryFaDemoTipping.m)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/binaryPCAinput.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/binaryPCAembedding.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/binaryPCApostpred.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/binaryPCArecon.png")
```
## Figure 20.15:<a name='20.15'></a> <a name='PLS'></a>
Gaussian latent factor models for paired data. (a) Supervised PCA. (b) Partial least squares.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/eSPCAxy.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ePLSxy.png")
```
## Figure 20.16:<a name='20.16'></a> <a name='CCA'></a>
Canonical correlation analysis as a PGM.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/eCCAxy.png")
```
## Figure 20.17:<a name='20.17'></a> <a name='autoencoder'></a>
An autoencoder with one hidden layer.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/autoencoder.png")
```
## Figure 20.18:<a name='20.18'></a> <a name='aeFashion'></a>
Results of applying an autoencoder to the Fashion MNIST data. Top row are first 5 images from validation set. Bottom row are reconstructions. (a) MLP model (trained for 20 epochs). The encoder is an MLP with architecture 784-100-30. The decoder is the mirror image of this. (b) CNN model (trained for 5 epochs). The encoder is a CNN model with architecture Conv2D(16, 3x3, same, selu), MaxPool2D(2x2), Conv2D(32, 3x3, same, selu), MaxPool2D(2x2), Conv2D(64, 3x3, same, selu), MaxPool2D(2x2). The decoder is the mirror image of this, using transposed convolution and without the max pooling layers. Adapted from Figure 17.4 of <a href='#Geron2019'>[Aur19]</a> .
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/ae_mnist_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/ae_fashion_mlp_recon.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae_fashion_cnn_recon.png")
```
## Figure 20.19:<a name='20.19'></a> <a name='aeFashionTSNE'></a>
tSNE plot of the first 2 latent dimensions of the Fashion MNIST validation set computed using an MLP-based autoencoder. Adapted from Figure 17.5 of <a href='#Geron2019'>[Aur19]</a> .
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/ae_mnist_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-mlp-fashion-tsne.png")
```
## Figure 20.20:<a name='20.20'></a> <a name='DAEfashion'></a>
Denoising autoencoder (MLP architecture) applied to some noisy Fashion MNIST images from the validation set. (a) Gaussian noise. (b) Bernoulli dropout noise. Top row: input. Bottom row: output Adapted from Figure 17.9 of <a href='#Geron2019'>[Aur19]</a> .
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/ae_mnist_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-denoising-gaussian.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-denoising-dropout.png")
```
## Figure 20.21:<a name='20.21'></a> <a name='DAEfield'></a>
The residual error from a DAE, $\mathbf e (\mathbf x )=r( \cc@accent "707E \mathbf x )-\mathbf x $, can learn a vector field corresponding to the score function. Arrows point towards higher probability regions. The length of the arrow is proportional to $||\mathbf e (\mathbf x )||$, so points near the 1d data manifold (represented by the curved line) have smaller arrows. From Figure 5 of <a href='#Alain2014'>[GY14]</a> . Used with kind permission of Guillaume Alain.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/DAE.png")
```
## Figure 20.22:<a name='20.22'></a> <a name='sparseAE'></a>
Neuron activity (in the bottleneck layer) for an autoencoder applied to Fashion MNIST. We show results for three models, with different kinds of sparsity penalty: no penalty (left column), $\ell _1$ penalty (middle column), KL penalty (right column). Top row: Heatmap of 300 neuron activations (columns) across 100 examples (rows). Middle row: Histogram of activation levels derived from this heatmap. Bottom row: Histogram of the mean activation per neuron, averaged over all examples in the validation set. Adapted from Figure 17.11 of <a href='#Geron2019'>[Aur19]</a> .
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/ae_mnist_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-noreg-heatmap.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-L1reg-heatmap.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-KLreg-heatmap.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-noreg-act.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-L1reg-act.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-KLreg-act.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-noreg-neurons.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-L1reg-neurons.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-sparse-KLreg-neurons.png")
```
## Figure 20.23:<a name='20.23'></a> <a name='vaeSchematic'></a>
Schematic illustration of a VAE. From a figure from http://krasserm.github.io/2018/07/27/dfc-vae/ . Used with kind permission of Martin Krasser.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/vae-krasser.png")
```
## Figure 20.24:<a name='20.24'></a> <a name='VAEcelebaRecon'></a>
Comparison of reconstruction abilities of an autoencoder and VAE. Top row: Original images. Middle row: Reconstructions from a VAE. Bottom row: Reconstructions from an AE. We see that the VAE reconstructions (middle) are blurrier. Both models have the same shallow convolutional architecture (3 hidden layers, 200 latents), and are trained on identical data (20k images of size $64 \times 64$ extracted from CelebA) for the same number of epochs (20).
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/vae_celeba_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-celeba-orig.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/vae-celeba-recon.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-celeba-recon.png")
```
## Figure 20.25:<a name='20.25'></a> <a name='VAEcelebaSamples'></a>
Unconditional samples from a VAE (top row) or AE (bottom row) trained on CelebA. Both models have the same structure and both are trained for 20 epochs.
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/vae_celeba_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/vae-celeba-samples.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/ae-celeba-samples.png")
```
## Figure 20.26:<a name='20.26'></a> <a name='VAEcelebaInterpGender'></a>
Interpolation between two real images (first and last columns) in the latent space of a VAE. Adapted from Figure 3.22 of <a href='#Foster2019'>[Dav19]</a> .
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/vae_celeba_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/vae-celeba-interp-gender.png")
```
## Figure 20.27:<a name='20.27'></a> <a name='VAEcelebaAddGlasses'></a>
Adding or removing the ``sunglasses'' vector to an image using a VAE. The first column is an input image, with embedding $\mathbf z $. Subsequent columns show the decoding of $\mathbf z + s \boldsymbol \Delta $, where $s \in \ -4,-3,-2,-1,0,1,2,3,4\ $ and $\boldsymbol \Delta = \overline \mathbf z ^+ - \overline \mathbf z ^-$ is the difference in the average embeddings of images of people with or without sunglasses. Adapted from Figure 3.21 of <a href='#Foster2019'>[Dav19]</a> .
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/vae_celeba_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/vae-celeba-glasses-scale.png")
```
## Figure 20.28:<a name='20.28'></a> <a name='tangentSpace'></a>
Illustration of the tangent space and tangent vectors at two different points on a 2d curved manifold. From Figure 1 of <a href='#Bronstein2017'>[MM+17]</a> . Used with kind permission of Michael Bronstein
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/tangentSpace.png")
```
## Figure 20.29:<a name='20.29'></a> <a name='manifold-6-rotated'></a>
Illustration of the image manifold. (a) An image of the digit 6 from the USPS dataset, of size $64 \times 57 = 3,648$. (b) A random sample from the space $\ 0,1\ ^ 3648 $ reshaped as an image. (c) A dataset created by rotating the original image by one degree 360 times. We project this data onto its first two principal components, to reveal the underlying 2d circular manifold. From Figure 1 of <a href='#Lawrence2012'>[Nei12]</a> . Used with kind permission of Neil Lawrence
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/manifold-6-original.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/manifold-6-rnd.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/manifold-6-rotated.png")
```
## Figure 20.30:<a name='20.30'></a> <a name='manifoldData'></a>
Illustration of some data generated from low-dimensional manifolds. (a) The 2d Swiss-roll manifold embedded into 3d.
Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/manifold_swiss_sklearn.py")
pmlt.show_and_run("/pyprobml/scripts/manifold_digits_sklearn.py")
```
## Figure 20.31:<a name='20.31'></a> <a name='metricMDS'></a>
Metric MDS applied to (a) Swiss roll.
Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/manifold_swiss_sklearn.py")
pmlt.show_and_run("/pyprobml/scripts/manifold_digits_sklearn.py")
```
## Figure 20.32:<a name='20.32'></a> <a name='KNNgraph'></a>
(a) If we measure distances along the manifold, we find $d(1,6) > d(1,4)$, whereas if we measure in ambient space, we find $d(1,6) < d(1,4)$. The plot at the bottom shows the underlying 1d manifold. (b) The $K$-nearest neighbors graph for some datapoints; the red path is the shortest distance between A and B on this graph. From <a href='#HintonEmbedding'>[Hin13]</a> . Used with kind permission of Geoff Hinton.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/hinton-isomap1.png")
pmlt.show_image("/pyprobml/notebooks/figures/images/hinton-isomap2.png")
```
## Figure 20.33:<a name='20.33'></a> <a name='isomap'></a>
Isomap applied to (a) Swiss roll.
Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/manifold_swiss_sklearn.py")
pmlt.show_and_run("/pyprobml/scripts/manifold_digits_sklearn.py")
```
## Figure 20.34:<a name='20.34'></a> <a name='isomapNoisy'></a>
(a) Noisy version of Swiss roll data. We perturb each point by adding $\mathcal N (0, 0.5^2)$ noise. (b) Results of Isomap applied to this data.
Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/manifold_swiss_sklearn.py")
```
## Figure 20.35:<a name='20.35'></a> <a name='kpcaScholkopf'></a>
Visualization of the first 8 kernel principal component basis functions derived from some 2d data. We use an RBF kernel with $\sigma ^2=0.1$.
Figure(s) generated by [kpcaScholkopf.m](https://github.com/probml/pmtk3/blob/master/demos/kpcaScholkopf.m)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/kpcaScholkopfNoShade.png")
```
## Figure 20.36:<a name='20.36'></a> <a name='kPCA'></a>
Kernel PCA applied to (a) Swiss roll.
Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/manifold_swiss_sklearn.py")
pmlt.show_and_run("/pyprobml/scripts/manifold_digits_sklearn.py")
```
## Figure 20.37:<a name='20.37'></a> <a name='LLE'></a>
LLE applied to (a) Swiss roll.
Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/manifold_swiss_sklearn.py")
pmlt.show_and_run("/pyprobml/scripts/manifold_digits_sklearn.py")
```
## Figure 20.38:<a name='20.38'></a> <a name='eigenmaps'></a>
Laplacian eigenmaps applied to (a) Swiss roll.
Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/manifold_swiss_sklearn.py")
pmlt.show_and_run("/pyprobml/scripts/manifold_digits_sklearn.py")
```
## Figure 20.39:<a name='20.39'></a> <a name='graphLaplacian'></a>
Illustration of the Laplacian matrix derived from an undirected graph. From https://en.wikipedia.org/wiki/Laplacian_matrix . Used with kind permission of Wikipedia author AzaToth.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/graphLaplacian.png")
```
## Figure 20.40:<a name='20.40'></a> <a name='graphFun'></a>
Illustration of a (positive) function defined on a graph. From Figure 1 of <a href='#Shuman2013'>[DI+13]</a> . Used with kind permission of Pascal Frossard.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/graphFun.png")
```
## Figure 20.41:<a name='20.41'></a> <a name='tSNE'></a>
tSNE applied to (a) Swiss roll.
Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_and_run("/pyprobml/scripts/manifold_swiss_sklearn.py")
pmlt.show_and_run("/pyprobml/scripts/manifold_digits_sklearn.py")
```
## Figure 20.42:<a name='20.42'></a> <a name='tsneWattenberg'></a>
Illustration of the effect of changing the perplexity parameter when t-SNE is applied to some 2d data. From <a href='#Wattenberg2016how'>[MFI16]</a> . See http://distill.pub/2016/misread-tsne for an animated version of these figures. Used with kind permission of Martin Wattenberg.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /content/
pmlt.show_image("/pyprobml/notebooks/figures/images/tSNE-wattenberg0.png.png")
```
## References:
<a name='Geron2019'>[Aur19]</a> G. Aur'elien "Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques for BuildingIntelligent Systems (2nd edition)". (2019).
<a name='BishopBook'>[Bis06]</a> C. Bishop "Pattern recognition and machine learning". (2006).
<a name='Shuman2013'>[DI+13]</a> S. DI, N. SK, F. P, O. A and V. P. "The emerging field of signal processing on graphs: Extendinghigh-dimensional data analysis to networks and other irregulardomains". In: IEEE Signal Process. Mag. (2013).
<a name='Foster2019'>[Dav19]</a> F. David "Generative Deep Learning: Teaching Machines to Paint, WriteCompose, and Play". (2019).
<a name='Richardson2018'>[EY18]</a> R. Eitan and W. Yair. "On GANs and GMMs". (2018).
<a name='Alain2014'>[GY14]</a> A. Guillaume and B. Yoshua. "What Regularized Auto-Encoders Learn from the Data-GeneratingDistribution". In: jmlr (2014).
<a name='HastieBook'>[HTF09]</a> T. Hastie, R. Tibshirani and J. Friedman. "The Elements of Statistical Learning". (2009).
<a name='HintonEmbedding'>[Hin13]</a> G. Hinton "CSC 2535 Lecture 11: Non-linear dimensionality reduction". (2013).
<a name='Wattenberg2016how'>[MFI16]</a> W. Martin, V. Fernanda and J. Ian. "How to Use t-SNE Effectively". In: Distill (2016).
<a name='Bronstein2017'>[MM+17]</a> B. MM, B. J, L. Y, S. A and V. P. "Geometric Deep Learning: Going beyond Euclidean data". In: IEEE Signal Process. Mag. (2017).
<a name='Lawrence2012'>[Nei12]</a> L. NeilD "A Unifying Probabilistic Perspective for Spectral DimensionalityReduction: Insights and New Models". In: jmlr (2012).
| github_jupyter |
```
%matplotlib inline
```
배포를 위한 비전 트랜스포머(Vision Transformer) 모델 최적화하기
=================================================================
Authors : `Jeff Tang <https://github.com/jeffxtang>`_, `Geeta Chauhan <https://github.com/gchauhan/>`_
번역 : `김태영 <https://github.com/Taeyoung96/>`_
비전 트랜스포머(Vision Transformer)는 자연어 처리 분야에서 소개된
최고 수준의 결과를 달성한 최신의 어텐션 기반(attention-based) 트랜스포머 모델을
컴퓨터 비전 분야에 적용을 한 모델입니다.
FaceBook에서 발표한 Data-efficient Image Transformers는 `DeiT <https://ai.facebook.com/blog/data-efficient-image-transformers-a-promising-new-technique-for-image-classification>`_
이미지 분류를 위해 ImageNet 데이터셋을 통해 훈련된
비전 트랜스포머 모델입니다.
이번 튜토리얼에서는, DeiT가 무엇인지 그리고 어떻게 사용하는지 다룰 것입니다.
그 다음 스크립팅, 양자화, 최적화, 그리고 iOS와 안드로이드 앱 안에서
모델을 사용하는 전체적인 단계를 수행해 볼 것입니다.
또한, 양자화와 최적화가 된 모델과 양자화와 최적화가 되지 않은 모델을 비교해 볼 것이며,
단계를 수행해 가면서 양자화와 최적화를 적용한 모델이 얼마나 이점을 가지는지 볼 것입니다.
DeiT란 무엇인가
--------------------
합성곱 신경망(CNNs)은 2012년 딥러닝이 시작된 이후
이미지 분류를 수행할 때 주요한 모델이였습니다. 그러나 합성곱 신경망은 일반적으로
최첨단의 결과를 달성하기 위해 훈련에 수억 개의 이미지가 필요했습니다.
DeiT는 훈련에 더 적은 데이터와 컴퓨팅 자원을 필요로 하는 비전 트랜스포머 모델이며,
최신 CNN 모델과 이미지 분류를 수행하는데 경쟁을 합니다.
이는 DeiT의 두 가지 주요 구성 요소에 의해 가능하게 되었습니다.
- 훨씬 더 큰 데이터 세트에 대한 훈련을 시뮬레이션하는 데이터 증강(augmentation)
- 트랜스포머 네트워크에 CNN의 출력값을 그대로 증류(distillation)하여 학습할 수 있도록 하는 기법
DeiT는 제한된 데이터와 자원을 활용하여 컴퓨터 비전 태스크(task)에 트랜스포머 모델을
성공적으로 적용할 수 있음을 보여줍니다.
DeiT의 좀 더 자세한 내용을 원한다면, `저장소 <https://github.com/facebookresearch/deit>`_
와 `논문 <https://arxiv.org/abs/2012.12877>`_ 을 참고하시길 바랍니다.
DeiT를 활용한 이미지 분류
-------------------------------
DeiT를 사용하여 이미지를 분류하는 방법에 대한 자세한 정보는 DeiT 저장소에 README를 참고하시길 바랍니다.
빠른 테스트를 위해서, 먼저 필요한 패키지들을
설치합니다:
pip install torch torchvision timm pandas requests
Google Colab에서는 아래와 같이 실행합니다:
```
# !pip install timm pandas requests
```
그런 다음 아래 스크립트를 실행합니다:
```
from PIL import Image
import torch
import timm
import requests
import torchvision.transforms as transforms
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
print(torch.__version__)
# Pytorch 버전은 1.8.0 이어야 합니다.
model = torch.hub.load('facebookresearch/deit:main', 'deit_base_patch16_224', pretrained=True)
model.eval()
transform = transforms.Compose([
transforms.Resize(256, interpolation=3),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD),
])
img = Image.open(requests.get("https://raw.githubusercontent.com/pytorch/ios-demo-app/master/HelloWorld/HelloWorld/HelloWorld/image.png", stream=True).raw)
img = transform(img)[None,]
out = model(img)
clsidx = torch.argmax(out)
print(clsidx.item())
```
ImageNet 목록에 따라 `라벨(labels) 파일 <https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a>`_
클래스 인덱스의 출력은 269여야 하며, 이는 ‘timber wolf, grey wolf, gray wolf, Canis lupus’에 매핑됩니다.
이제 DeiT 모델을 사용하여 이미지들을 분류할 수 있음을 확인했습니다.
iOS 및 Android 앱에서 실행할 수 있도록 모델을 수정하는 방법을 살펴보겠습니다.
DeiT 스크립팅
----------------------
모바일에서 이 모델을 사용하려면, 우리는 첫번째로 모델 스크립팅이 필요합니다.
전체적인 개요는 `스크립트 그리고 최적화 레시피 <https://tutorials.pytorch.kr/recipes/script_optimized.html>`_
에서 확인할 수 있습니다. 아래 코드를 실행하여 이전 단계에서 사용한 DeiT 모델을
모바일에서 실행할 수 있는 TorchScript 형식으로 변환합니다.
```
model = torch.hub.load('facebookresearch/deit:main', 'deit_base_patch16_224', pretrained=True)
model.eval()
scripted_model = torch.jit.script(model)
scripted_model.save("fbdeit_scripted.pt")
```
약 346MB 크기의 스크립팅된 모델 파일 fbdeit_scripted.pt가 생성됩니다.
DeiT 양자화
---------------------
추론 정확도를 거의 동일하게 유지하면서 훈련된 모델 크기를 크게 줄이기 위해
모델에 양자화를 적용할 수 있습니다.
DeiT에서 사용된 트랜스포머 모델 덕분에,
모델에 동적 양자화를 쉽게 적용할 수 있습니다.
왜나하면 동적 양자화는 LSTM 모델과 트랜스포머 모델에서 가장 잘 적용되기 때문입니다.
(자세한 내용은 `여기 <https://pytorch.org/docs/stable/quantization.html?highlight=quantization#dynamic-quantization>`_
를 참고하세요.)
아래의 코드를 실행시켜 봅시다.
```
# 서버 추론을 위해 'fbgemm'을, 모바일 추론을 위해 'qnnpack'을 사용해 봅시다.
backend = "fbgemm" # 이 주피터 노트북에서는 양자화된 모델의 더 느린 추론 속도를 일으키는 qnnpack으로 대체되었습니다.
model.qconfig = torch.quantization.get_default_qconfig(backend)
torch.backends.quantized.engine = backend
quantized_model = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
scripted_quantized_model = torch.jit.script(quantized_model)
scripted_quantized_model.save("fbdeit_scripted_quantized.pt")
```
fbdeit_quantized_scripted.pt 모델의 스크립팅과 양자화가 적용된 버전이 만들어졌습니다.
모델의 크기는 단지 89MB 입니다.
양자화가 적용되지 않은 모델의 크기인 346MB보다 74%나 감소했습니다!
동일한 추론 결과를 만들기 위해 ``scripted_quantized_model``을
사용해 봅시다.
```
out = scripted_quantized_model(img)
clsidx = torch.argmax(out)
print(clsidx.item())
# 동일한 출력 결과인 269가 출력 되어야 합니다.
```
DeiT 최적화
---------------------
모바일에 스크립트 되고 양자화된 모델을 사용하기 위한
마지막 단계는 최적화입니다.
```
from torch.utils.mobile_optimizer import optimize_for_mobile
optimized_scripted_quantized_model = optimize_for_mobile(scripted_quantized_model)
optimized_scripted_quantized_model.save("fbdeit_optimized_scripted_quantized.pt")
```
생성된 fbdeit_optimized_scripted_quantized.pt 파일은
양자화되고 스크립트되지만 최적화되지 않은 모델과 크기가 거의 같습니다.
추론 결과는 동일하게 유지됩니다.
```
out = optimized_scripted_quantized_model(img)
clsidx = torch.argmax(out)
print(clsidx.item())
# 다시 한번, 동일한 출력 결과인 269가 출력 되어야 합니다.
```
라이트 인터프리터(Lite interpreter) 사용
-----------------------------------------
라이트 인터프리터를 사용하면 얼마나 모델의 사이즈가 작아지고, 추론 시간이 짧아지는지
결과를 확인해 봅시다. 이제 좀 더 가벼운 버전의 모델을 만들어 봅시다.
```
optimized_scripted_quantized_model._save_for_lite_interpreter("fbdeit_optimized_scripted_quantized_lite.ptl")
ptl = torch.jit.load("fbdeit_optimized_scripted_quantized_lite.ptl")
```
가벼운 모델의 크기는 그렇지 않은 버전의 모델 크기와 비슷하지만,
모바일에서 가벼운 버전을 실행하면 추론 속도가 빨라질 것으로 예상됩니다.
추론 속도 비교
---------------------------
네 가지 모델(원본 모델, 스크립트된 모델, 스크립트와 양자화를 적용한 모델,
스크립트와 양자화를 적용한 후 최적화한 모델)의 추론 속도가 어떻게 다른지 확인해 봅시다.
아래의 코드를 실행해 봅시다.
```
with torch.autograd.profiler.profile(use_cuda=False) as prof1:
out = model(img)
with torch.autograd.profiler.profile(use_cuda=False) as prof2:
out = scripted_model(img)
with torch.autograd.profiler.profile(use_cuda=False) as prof3:
out = scripted_quantized_model(img)
with torch.autograd.profiler.profile(use_cuda=False) as prof4:
out = optimized_scripted_quantized_model(img)
with torch.autograd.profiler.profile(use_cuda=False) as prof5:
out = ptl(img)
print("original model: {:.2f}ms".format(prof1.self_cpu_time_total/1000))
print("scripted model: {:.2f}ms".format(prof2.self_cpu_time_total/1000))
print("scripted & quantized model: {:.2f}ms".format(prof3.self_cpu_time_total/1000))
print("scripted & quantized & optimized model: {:.2f}ms".format(prof4.self_cpu_time_total/1000))
print("lite model: {:.2f}ms".format(prof5.self_cpu_time_total/1000))
```
Google Colab에서 실행 시킨 결과는 다음과 같습니다.
::
original model: 1236.69ms
scripted model: 1226.72ms
scripted & quantized model: 593.19ms
scripted & quantized & optimized model: 598.01ms
lite model: 600.72ms
다음 결과는 각 모델이 소요한 추론 시간과
원본 모델에 대한 각 모델의 감소율을 요약한 것입니다.
```
import pandas as pd
import numpy as np
df = pd.DataFrame({'Model': ['original model','scripted model', 'scripted & quantized model', 'scripted & quantized & optimized model', 'lite model']})
df = pd.concat([df, pd.DataFrame([
["{:.2f}ms".format(prof1.self_cpu_time_total/1000), "0%"],
["{:.2f}ms".format(prof2.self_cpu_time_total/1000),
"{:.2f}%".format((prof1.self_cpu_time_total-prof2.self_cpu_time_total)/prof1.self_cpu_time_total*100)],
["{:.2f}ms".format(prof3.self_cpu_time_total/1000),
"{:.2f}%".format((prof1.self_cpu_time_total-prof3.self_cpu_time_total)/prof1.self_cpu_time_total*100)],
["{:.2f}ms".format(prof4.self_cpu_time_total/1000),
"{:.2f}%".format((prof1.self_cpu_time_total-prof4.self_cpu_time_total)/prof1.self_cpu_time_total*100)],
["{:.2f}ms".format(prof5.self_cpu_time_total/1000),
"{:.2f}%".format((prof1.self_cpu_time_total-prof5.self_cpu_time_total)/prof1.self_cpu_time_total*100)]],
columns=['Inference Time', 'Reduction'])], axis=1)
print(df)
"""
Model Inference Time Reduction
0 original model 1236.69ms 0%
1 scripted model 1226.72ms 0.81%
2 scripted & quantized model 593.19ms 52.03%
3 scripted & quantized & optimized model 598.01ms 51.64%
4 lite model 600.72ms 51.43%
"""
```
더 읽을거리
~~~~~~~~~~~~~~~~~
- `Facebook Data-efficient Image Transformers <https://ai.facebook.com/blog/data-efficient-image-transformers-a-promising-new-technique-for-image-classification>`__
- `Vision Transformer with ImageNet and MNIST on iOS <https://github.com/pytorch/ios-demo-app/tree/master/ViT4MNIST>`__
- `Vision Transformer with ImageNet and MNIST on Android <https://github.com/pytorch/android-demo-app/tree/master/ViT4MNIST>`__
| github_jupyter |
```
!cp drive/My\ Drive/time-series-analysis/london_bike_sharing_dataset.csv .
```
### Importing libraries
```
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import seaborn as sns
from matplotlib import rc
from pylab import rcParams
rcParams['figure.figsize'] = 22, 6
```
### Load data
```
df = pd.read_csv('london_bike_sharing_dataset.csv',parse_dates=['timestamp'],index_col='timestamp')
df.head()
```
#### Get a copy of the data
```
df_copy = df.copy()
```
## Exploratory data analysis
### Extracting extra features from timestamps
```
df['hour'] = df.index.hour
df['day_of_week'] = df.index.dayofweek
df['day_of_month'] = df.index.day
df['month'] = df.index.month
df.head()
```
### total numbers of bike shared during the period
```
sns.lineplot(x=df.index, y=df.cnt);
```
### total numbers of bike shared during each month
```
df_by_month = df.resample('M').sum()
sns.lineplot(x=df_by_month.index, y='cnt', data=df_by_month, color='b');
```
### total numbers of bike shared in each hour in comparison with holidays
```
sns.pointplot(x='hour',y='cnt', data=df, hue='is_holiday');
```
### total numbers of bike shared during each day of the week
```
sns.pointplot(x='day_of_week',y='cnt', data=df, color='b');
```
## Splitting train & test
```
train_size = int(len(df_)*0.9)
test_size = len(df) - train_size
train , test = df.iloc[:train_size], df.iloc[train_size:]
print(train.shape, test.shape)
```
## Feature scaling
```
from sklearn.preprocessing import RobustScaler
pd.options.mode.chained_assignment = None
f_columns = ['t1', 't2', 'hum', 'wind_speed']
f_transformer = RobustScaler()
cnt_transformer = RobustScaler()
f_transformer = f_transformer.fit(train[f_columns].to_numpy())
cnt_transformer = cnt_transformer.fit(train[['cnt']])
train.loc[:, f_columns] = f_transformer.transform(train[f_columns].to_numpy())
train['cnt'] = cnt_transformer.transform(train[['cnt']])
test.loc[:, f_columns] = f_transformer.transform(test[f_columns].to_numpy())
test['cnt'] = cnt_transformer.transform(test[['cnt']])
```
### Converting the data to a time series format
```
def to_sequence(X, y, time_steps=1):
Xs, ys = [], []
for i in range(len(X) - time_steps):
v = X.iloc[i: (i + time_steps)].to_numpy()
Xs.append(v)
ys.append(y.iloc[i + time_steps])
return np.asarray(Xs), np.asarray(ys)
TIMESTEPS = 24
x_train, y_train = to_sequence(train, train['cnt'], TIMESTEPS)
x_test, y_test = to_sequence(test, test['cnt'], TIMESTEPS)
print(f"X_train shape is {x_train.shape}, and y_train shape is {y_train.shape}")
```
## Defining a model
```
from keras.models import Sequential
from keras.layers import LSTM, Dropout, Bidirectional, Dense
model = Sequential()
model.add(Bidirectional(LSTM(units=128),input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(Dropout(rate=0.3))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse', metrics=['acc'])
model.summary()
```
### Fitting the model on data
```
history = model.fit(x_train, y_train, batch_size=16, validation_split=0.1, epochs=100, shuffle=False)
```
### Model loss visualization
```
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.legend();
```
### Model prediction
```
y_pred = model.predict(x_test)
y_test_inv = cnt_transformer.inverse_transform(y_test.reshape(1,-1))
y_train_inv = cnt_transformer.inverse_transform(y_train.reshape(1,-1))
y_pred_inv = cnt_transformer.inverse_transform(y_pred)
```
### Model prediction visualization
```
plt.plot(y_test_inv.flatten(), marker='.', label='True')
plt.plot(y_pred_inv, marker='.', label='Prediction')
plt.legend();
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D3_ModelFitting/W1D3_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <a href="https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D3_ModelFitting/W1D3_Tutorial4.ipynb" target="_parent"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open in Kaggle"/></a>
# Tutorial 4: Multiple linear regression and polynomial regression
**Week 1, Day 3: Model Fitting**
**By Neuromatch Academy**
**Content creators**: Pierre-Étienne Fiquet, Anqi Wu, Alex Hyafil with help from Byron Galbraith, Ella Batty
**Content reviewers**: Lina Teichmann, Saeed Salehi, Patrick Mineault, Michael Waskom
**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
---
# Tutorial Objectives
*Estimated timing of tutorial: 35 minutes*
This is Tutorial 4 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of regression models by generalizing to multiple linear regression and polynomial regression (Tutorial 4). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 5) and Cross Validation for model selection (Tutorial 6).
In this tutorial, we will generalize the regression model to incorporate multiple features.
- Learn how to structure inputs for regression using the 'Design Matrix'
- Generalize the MSE for multiple features using the ordinary least squares estimator
- Visualize data and model fit in multiple dimensions
- Fit polynomial regression models of different complexity
- Plot and evaluate the polynomial regression fits
```
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/2mkq4/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# @title Video 1: Multiple Linear Regression and Polynomial Regression
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV11Z4y1u7cf", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="d4nfTki6Ejc", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
---
# Setup
```
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Plotting Functions
def evaluate_fits(order_list, mse_list):
""" Compare the quality of multiple polynomial fits
by plotting their MSE values.
Args:
order_list (list): list of the order of polynomials to be compared
mse_list (list): list of the MSE values for the corresponding polynomial fit
"""
fig, ax = plt.subplots()
ax.bar(order_list, mse_list)
ax.set(title='Comparing Polynomial Fits', xlabel='Polynomial order', ylabel='MSE')
def plot_fitted_polynomials(x, y, theta_hat):
""" Plot polynomials of different orders
Args:
x (ndarray): input vector of shape (n_samples)
y (ndarray): vector of measurements of shape (n_samples)
theta_hat (dict): polynomial regression weights for different orders
"""
x_grid = np.linspace(x.min() - .5, x.max() + .5)
plt.figure()
for order in range(0, max_order + 1):
X_design = make_design_matrix(x_grid, order)
plt.plot(x_grid, X_design @ theta_hat[order]);
plt.ylabel('y')
plt.xlabel('x')
plt.plot(x, y, 'C0.');
plt.legend([f'order {o}' for o in range(max_order + 1)], loc=1)
plt.title('polynomial fits')
plt.show()
```
---
# Section 1: Multiple Linear Regression
*Estimated timing to here from start of tutorial: 8 min*
This video covers linear regression with multiple inputs (more than 1D) and polynomial regression.
<details>
<summary> <font color='blue'>Click here for text recap of video </font></summary>
Now that we have considered the univariate case and how to produce confidence intervals for our estimator, we turn to the general linear regression case, where we can have more than one regressor, or feature, in our input.
Recall that our original univariate linear model was given as
\begin{align}
y = \theta x + \epsilon
\end{align}
where $\theta$ is the slope and $\epsilon$ some noise. We can easily extend this to the multivariate scenario by adding another parameter for each additional feature
\begin{align}
y = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + ... +\theta_d x_d + \epsilon
\end{align}
where $\theta_0$ is the intercept and $d$ is the number of features (it is also the dimensionality of our input).
We can condense this succinctly using vector notation for a single data point
\begin{align}
y_i = \boldsymbol{\theta}^{\top}\mathbf{x}_i + \epsilon
\end{align}
and fully in matrix form
\begin{align}
\mathbf{y} = \mathbf{X}\boldsymbol{\theta} + \mathbf{\epsilon}
\end{align}
where $\mathbf{y}$ is a vector of measurements, $\mathbf{X}$ is a matrix containing the feature values (columns) for each input sample (rows), and $\boldsymbol{\theta}$ is our parameter vector.
This matrix $\mathbf{X}$ is often referred to as the "[design matrix](https://en.wikipedia.org/wiki/Design_matrix)".
We want to find an optimal vector of paramters $\boldsymbol{\hat\theta}$. Recall our analytic solution to minimizing MSE for a single regressor:
\begin{align}
\hat\theta = \frac{\sum_{i=1}^N x_i y_i}{\sum_{i=1}^N x_i^2}.
\end{align}
The same holds true for the multiple regressor case, only now expressed in matrix form
\begin{align}
\boldsymbol{\hat\theta} = (\mathbf{X}^\top\mathbf{X})^{-1}\mathbf{X}^\top\mathbf{y}.
\end{align}
This is called the [ordinary least squares](https://en.wikipedia.org/wiki/Ordinary_least_squares) (OLS) estimator.
</details>
For this tutorial we will focus on the two-dimensional case ($d=2$), which allows us to easily visualize our results. As an example, think of a situation where a scientist records the spiking response of a retinal ganglion cell to patterns of light signals that vary in contrast and in orientation. Then contrast and orientation values can be used as features / regressors to predict the cells response.
In this case our model can be writen for a single data point as:
\begin{align}
y = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + \epsilon
\end{align}
or for multiple data points in matrix form where
\begin{align}
\mathbf{X} =
\begin{bmatrix}
1 & x_{1,1} & x_{1,2} \\
1 & x_{2,1} & x_{2,2} \\
\vdots & \vdots & \vdots \\
1 & x_{n,1} & x_{n,2}
\end{bmatrix},
\boldsymbol{\theta} =
\begin{bmatrix}
\theta_0 \\
\theta_1 \\
\theta_2 \\
\end{bmatrix}
\end{align}
When we refer to $x_{i, j}$, we mean that it is the i-th data point and the j-th feature of that data point.
For our actual exploration dataset we shall set $\boldsymbol{\theta}=[0, -2, -3]$ and draw $N=40$ noisy samples from $x \in [-2,2)$. Note that setting the value of $\theta_0 = 0$ effectively ignores the offset term.
```
# @markdown Execute this cell to simulate some data
# Set random seed for reproducibility
np.random.seed(1234)
# Set parameters
theta = [0, -2, -3]
n_samples = 40
# Draw x and calculate y
n_regressors = len(theta)
x0 = np.ones((n_samples, 1))
x1 = np.random.uniform(-2, 2, (n_samples, 1))
x2 = np.random.uniform(-2, 2, (n_samples, 1))
X = np.hstack((x0, x1, x2))
noise = np.random.randn(n_samples)
y = X @ theta + noise
ax = plt.subplot(projection='3d')
ax.plot(X[:,1], X[:,2], y, '.')
ax.set(
xlabel='$x_1$',
ylabel='$x_2$',
zlabel='y'
)
plt.tight_layout()
```
## Coding Exercise 1: Ordinary Least Squares Estimator
In this exercise you will implement the OLS approach to estimating $\boldsymbol{\hat\theta}$ from the design matrix $\mathbf{X}$ and measurement vector $\mathbf{y}$. You can use the `@` symbol for matrix multiplication, `.T` for transpose, and `np.linalg.inv` for matrix inversion.
```
def ordinary_least_squares(X, y):
"""Ordinary least squares estimator for linear regression.
Args:
x (ndarray): design matrix of shape (n_samples, n_regressors)
y (ndarray): vector of measurements of shape (n_samples)
Returns:
ndarray: estimated parameter values of shape (n_regressors)
"""
######################################################################
## TODO for students: solve for the optimal parameter vector using OLS
# Fill out function and remove
raise NotImplementedError("Student exercise: solve for theta_hat vector using OLS")
######################################################################
# Compute theta_hat using OLS
theta_hat = ...
return theta_hat
theta_hat = ordinary_least_squares(X, y)
print(theta_hat)
# to_remove solution
def ordinary_least_squares(X, y):
"""Ordinary least squares estimator for linear regression.
Args:
X (ndarray): design matrix of shape (n_samples, n_regressors)
y (ndarray): vector of measurements of shape (n_samples)
Returns:
ndarray: estimated parameter values of shape (n_regressors)
"""
# Compute theta_hat using OLS
theta_hat = np.linalg.inv(X.T @ X) @ X.T @ y
return theta_hat
theta_hat = ordinary_least_squares(X, y)
print(theta_hat)
```
After filling in this function, you should see that $\boldsymbol{\hat\theta}$ = [ 0.13861386, -2.09395731, -3.16370742]
Now that we have our $\boldsymbol{\hat\theta}$, we can obtain $\hat{\mathbf{y}}$ and thus our mean squared error.
```
# Compute predicted data
theta_hat = ordinary_least_squares(X, y)
y_hat = X @ theta_hat
# Compute MSE
print(f"MSE = {np.mean((y - y_hat)**2):.2f}")
```
Finally, the following code will plot a geometric visualization of the data points (blue) and fitted plane.
```
# @markdown Execute this cell to visualize data and predicted plane
theta_hat = ordinary_least_squares(X, y)
xx, yy = np.mgrid[-2:2:50j, -2:2:50j]
y_hat_grid = np.array([xx.flatten(), yy.flatten()]).T @ theta_hat[1:]
y_hat_grid = y_hat_grid.reshape((50, 50))
ax = plt.subplot(projection='3d')
ax.plot(X[:, 1], X[:, 2], y, '.')
ax.plot_surface(xx, yy, y_hat_grid, linewidth=0, alpha=0.5, color='C1',
cmap=plt.get_cmap('coolwarm'))
for i in range(len(X)):
ax.plot((X[i, 1], X[i, 1]),
(X[i, 2], X[i, 2]),
(y[i], y_hat[i]),
'g-', alpha=.5)
ax.set(
xlabel='$x_1$',
ylabel='$x_2$',
zlabel='y'
)
plt.tight_layout()
```
---
# Section 2: Polynomial Regression
So far today, you learned how to predict outputs from inputs by fitting a linear regression model. We can now model all sort of relationships, including in neuroscience!
One potential problem with this approach is the simplicity of the model. Linear regression, as the name implies, can only capture a linear relationship between the inputs and outputs. Put another way, the predicted outputs are only a weighted sum of the inputs. What if there are more complicated computations happening? Luckily, many more complex models exist (and you will encounter many more over the next 3 weeks). One model that is still very simple to fit and understand, but captures more complex relationships, is **polynomial regression**, an extension of linear regression.
<details>
<summary> <font color='blue'>Click here for text recap of relevant part of video </font></summary>
Since polynomial regression is an extension of linear regression, everything you learned so far will come in handy now! The goal is the same: we want to predict the dependent variable $y$ given the input values $x$. The key change is the type of relationship between inputs and outputs that the model can capture.
Linear regression models predict the outputs as a weighted sum of the inputs:
\begin{align}
y = \theta_0 + \theta x + \epsilon
\end{align}
With polynomial regression, we model the outputs as a polynomial equation based on the inputs. For example, we can model the outputs as:
\begin{align}
y & = \theta_0 + \theta_1 x + \theta_2 x^2 + \theta_3 x^3 + \epsilon
\end{align}
We can change how complex a polynomial is fit by changing the order of the polynomial. The order of a polynomial refers to the highest power in the polynomial. The equation above is a third order polynomial because the highest value x is raised to is 3. We could add another term ($+ \theta_4 x^4$) to model an order 4 polynomial and so on.
</details>
First, we will simulate some data to practice fitting polynomial regression models. We will generate random inputs $x$ and then compute y according to $y = x^2 - x - 2 $, with some extra noise both in the input and the output to make the model fitting exercise closer to a real life situation.
```
# @markdown Execute this cell to simulate some data
# setting a fixed seed to our random number generator ensures we will always
# get the same psuedorandom number sequence
np.random.seed(121)
n_samples = 30
x = np.random.uniform(-2, 2.5, n_samples) # inputs uniformly sampled from [-2, 2.5)
y = x**2 - x - 2 # computing the outputs
output_noise = 1/8 * np.random.randn(n_samples)
y += output_noise # adding some output noise
input_noise = 1/2 * np.random.randn(n_samples)
x += input_noise # adding some input noise
fig, ax = plt.subplots()
ax.scatter(x, y) # produces a scatter plot
ax.set(xlabel='x', ylabel='y');
```
## Section 2.1: Design matrix for polynomial regression
*Estimated timing to here from start of tutorial: 16 min*
Now we have the basic idea of polynomial regression and some noisy data, let's begin! The key difference between fitting a linear regression model and a polynomial regression model lies in how we structure the input variables.
Let's go back to one feature for each data point. For linear regression, we used $\mathbf{X} = \mathbf{x}$ as the input data, where $\mathbf{x}$ is a vector where each element is the input for a single data point. To add a constant bias (a y-intercept in a 2-D plot), we use $\mathbf{X} = \big[ \boldsymbol 1, \mathbf{x} \big]$, where $\boldsymbol 1$ is a column of ones. When fitting, we learn a weight for each column of this matrix. So we learn a weight that multiples with column 1 - in this case that column is all ones so we gain the bias parameter ($+ \theta_0$).
This matrix $\mathbf{X}$ that we use for our inputs is known as a **design matrix**. We want to create our design matrix so we learn weights for $\mathbf{x}^2, \mathbf{x}^3,$ etc. Thus, we want to build our design matrix $X$ for polynomial regression of order $k$ as:
\begin{align}
\mathbf{X} = \big[ \boldsymbol 1 , \mathbf{x}^1, \mathbf{x}^2 , \ldots , \mathbf{x}^k \big],
\end{align}
where $\boldsymbol{1}$ is the vector the same length as $\mathbf{x}$ consisting of of all ones, and $\mathbf{x}^p$ is the vector $\mathbf{x}$ with all elements raised to the power $p$. Note that $\boldsymbol{1} = \mathbf{x}^0$ and $\mathbf{x}^1 = \mathbf{x}$.
If we have inputs with more than one feature, we can use a similar design matrix but include all features raised to each power. Imagine that we have two features per data point: $\mathbf{x}_m$ is a vector of one feature per data point and $\mathbf{x}_n$ is another. Our design matrix for a polynomial regression would be:
\begin{align}
\mathbf{X} = \big[ \boldsymbol 1 , \mathbf{x}_m^1, \mathbf{x}_n^1, \mathbf{x}_m^2 , \mathbf{x}_n^2\ldots , \mathbf{x}_m^k , \mathbf{x}_n^k \big],
\end{align}
### Coding Exercise 2.1: Structure design matrix
Create a function (`make_design_matrix`) that structures the design matrix given the input data and the order of the polynomial you wish to fit. We will print part of this design matrix for our data and order 5.
```
def make_design_matrix(x, order):
"""Create the design matrix of inputs for use in polynomial regression
Args:
x (ndarray): input vector of shape (n_samples)
order (scalar): polynomial regression order
Returns:
ndarray: design matrix for polynomial regression of shape (samples, order+1)
"""
########################################################################
## TODO for students: create the design matrix ##
# Fill out function and remove
raise NotImplementedError("Student exercise: create the design matrix")
########################################################################
# Broadcast to shape (n x 1) so dimensions work
if x.ndim == 1:
x = x[:, None]
#if x has more than one feature, we don't want multiple columns of ones so we assign
# x^0 here
design_matrix = np.ones((x.shape[0], 1))
# Loop through rest of degrees and stack columns (hint: np.hstack)
for degree in range(1, order + 1):
design_matrix = ...
return design_matrix
order = 5
X_design = make_design_matrix(x, order)
print(X_design[0:2, 0:2])
# to_remove solution
def make_design_matrix(x, order):
"""Create the design matrix of inputs for use in polynomial regression
Args:
x (ndarray): input vector of shape (samples,)
order (scalar): polynomial regression order
Returns:
ndarray: design matrix for polynomial regression of shape (samples, order+1)
"""
# Broadcast to shape (n x 1) so dimensions work
if x.ndim == 1:
x = x[:, None]
#if x has more than one feature, we don't want multiple columns of ones so we assign
# x^0 here
design_matrix = np.ones((x.shape[0], 1))
# Loop through rest of degrees and stack columns (hint: np.hstack)
for degree in range(1, order + 1):
design_matrix = np.hstack((design_matrix, x**degree))
return design_matrix
order = 5
X_design = make_design_matrix(x, order)
print(X_design[0:2, 0:2])
```
You should see that the printed section of this design matrix is `[[ 1. -1.51194917]
[ 1. -0.35259945]]`
## Section 2.2: Fitting polynomial regression models
*Estimated timing to here from start of tutorial: 24 min*
Now that we have the inputs structured correctly in our design matrix, fitting a polynomial regression is the same as fitting a linear regression model! All of the polynomial structure we need to learn is contained in how the inputs are structured in the design matrix. We can use the same least squares solution we computed in previous exercises.
### Coding Exercise 2.2: Fitting polynomial regression models with different orders
Here, we will fit polynomial regression models to find the regression coefficients ($\theta_0, \theta_1, \theta_2,$ ...) by solving the least squares problem. Create a function `solve_poly_reg` that loops over different order polynomials (up to `max_order`), fits that model, and saves out the weights for each. You may invoke the `ordinary_least_squares` function.
We will then qualitatively inspect the quality of our fits for each order by plotting the fitted polynomials on top of the data. In order to see smooth curves, we evaluate the fitted polynomials on a grid of $x$ values (ranging between the largest and smallest of the inputs present in the dataset).
```
def solve_poly_reg(x, y, max_order):
"""Fit a polynomial regression model for each order 0 through max_order.
Args:
x (ndarray): input vector of shape (n_samples)
y (ndarray): vector of measurements of shape (n_samples)
max_order (scalar): max order for polynomial fits
Returns:
dict: fitted weights for each polynomial model (dict key is order)
"""
# Create a dictionary with polynomial order as keys,
# and np array of theta_hat (weights) as the values
theta_hats = {}
# Loop over polynomial orders from 0 through max_order
for order in range(max_order + 1):
##################################################################################
## TODO for students: Create design matrix and fit polynomial model for this order
# Fill out function and remove
raise NotImplementedError("Student exercise: fit a polynomial model")
##################################################################################
# Create design matrix
X_design = ...
# Fit polynomial model
this_theta = ...
theta_hats[order] = this_theta
return theta_hats
max_order = 5
theta_hats = solve_poly_reg(x, y, max_order)
# Visualize
plot_fitted_polynomials(x, y, theta_hats)
# to_remove solution
def solve_poly_reg(x, y, max_order):
"""Fit a polynomial regression model for each order 0 through max_order.
Args:
x (ndarray): input vector of shape (n_samples)
y (ndarray): vector of measurements of shape (n_samples)
max_order (scalar): max order for polynomial fits
Returns:
dict: fitted weights for each polynomial model (dict key is order)
"""
# Create a dictionary with polynomial order as keys,
# and np array of theta_hat (weights) as the values
theta_hats = {}
# Loop over polynomial orders from 0 through max_order
for order in range(max_order + 1):
# Create design matrix
X_design = make_design_matrix(x, order)
# Fit polynomial model
this_theta = ordinary_least_squares(X_design, y)
theta_hats[order] = this_theta
return theta_hats
max_order = 5
theta_hats = solve_poly_reg(x, y, max_order)
# Visualize
with plt.xkcd():
plot_fitted_polynomials(x, y, theta_hats)
```
## Section 2.3: Evaluating fit quality
*Estimated timing to here from start of tutorial: 29 min*
As with linear regression, we can compute mean squared error (MSE) to get a sense of how well the model fits the data.
We compute MSE as:
\begin{align}
\mathrm{MSE} = \frac 1 N ||\mathbf{y} - \hat{\mathbf{y}}||^2 = \frac 1 N \sum_{i=1}^N (y_i - \hat y_i)^2
\end{align}
where the predicted values for each model are given by $ \hat{\mathbf{y}} = \mathbf{X}\boldsymbol{\hat\theta}$.
*Which model (i.e. which polynomial order) do you think will have the best MSE?*
### Coding Exercise 2.3: Compute MSE and compare models
We will compare the MSE for different polynomial orders with a bar plot.
```
mse_list = []
order_list = list(range(max_order + 1))
for order in order_list:
X_design = make_design_matrix(x, order)
########################################################################
## TODO for students
# Fill out function and remove
raise NotImplementedError("Student exercise: compute MSE")
########################################################################
# Get prediction for the polynomial regression model of this order
y_hat = ...
# Compute the residuals
residuals = ...
# Compute the MSE
mse = ...
mse_list.append(mse)
# Visualize MSE of fits
evaluate_fits(order_list, mse_list)
# to_remove solution
mse_list = []
order_list = list(range(max_order + 1))
for order in order_list:
X_design = make_design_matrix(x, order)
# Get prediction for the polynomial regression model of this order
y_hat = X_design @ theta_hats[order]
# Compute the residuals
residuals = y - y_hat
# Compute the MSE
mse = np.mean(residuals ** 2)
mse_list.append(mse)
# Visualize MSE of fits
with plt.xkcd():
evaluate_fits(order_list, mse_list)
```
---
# Summary
*Estimated timing of tutorial: 35 minutes*
* Linear regression generalizes naturally to multiple dimensions
* Linear algebra affords us the mathematical tools to reason and solve such problems beyond the two dimensional case
* To change from a linear regression model to a polynomial regression model, we only have to change how the input data is structured
* We can choose the complexity of the model by changing the order of the polynomial model fit
* Higher order polynomial models tend to have lower MSE on the data they're fit with
**Note**: In practice, multidimensional least squares problems can be solved very efficiently (thanks to numerical routines such as LAPACK).
---
# Notation
\begin{align}
x &\quad \text{input, independent variable}\\
y &\quad \text{response measurement, dependent variable}\\
\epsilon &\quad \text{measurement error, noise contribution}\\
\theta &\quad \text{slope parameter}\\
\hat{\theta} &\quad \text{estimated slope parameter}\\
\mathbf{x} &\quad \text{vector of inputs where each element is a different data point}\\
\mathbf{X} &\quad \text{design matrix}\\
\mathbf{y} &\quad \text{vector of measurements}\\
\mathbf{\hat y} &\quad \text{vector of estimated measurements}\\
\boldsymbol{\theta} &\quad \text{vector of parameters}\\
\boldsymbol{\hat\theta} &\quad \text{vector of estimated parameters}\\
d &\quad \text{dimensionality of input}\\
N &\quad \text{number of samples}\\
\end{align}
**Suggested readings**
[Introduction to Applied Linear Algebra – Vectors, Matrices, and Least Squares](http://vmls-book.stanford.edu/)
Stephen Boyd and Lieven Vandenberghe
| github_jupyter |
# Intro to Hidden Markov Models (optional)
---
### Introduction
In this notebook, you'll use the [Pomegranate](http://pomegranate.readthedocs.io/en/latest/index.html) library to build a simple Hidden Markov Model and explore the Pomegranate API.
<div class="alert alert-block alert-info">
**Note:** You are not required to complete this notebook and it will not be submitted with your project, but it is designed to quickly introduce the relevant parts of the Pomegranate library that you will need to complete the part of speech tagger.
</div>
The notebook already contains some code to get you started. You only need to add some new functionality in the areas indicated; you will not need to modify the included code beyond what is requested. Sections that begin with **'IMPLEMENTATION'** in the header indicate that you need to fill in code in the block that follows. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
<div class="alert alert-block alert-info">
**Note:** Code and Markdown cells can be executed using the `Shift + Enter` keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.
</div>
<hr>
<div class="alert alert-block alert-warning">
**Note:** Make sure you have selected a **Python 3** kernel in Workspaces or the hmm-tagger conda environment if you are running the Jupyter server on your own machine.
</div>
```
# Jupyter "magic methods" -- only need to be run once per kernel restart
%load_ext autoreload
%aimport helpers
%autoreload 1
# import python modules -- this cell needs to be run again if you make changes to any of the files
import matplotlib.pyplot as plt
import numpy as np
from helpers import show_model
from pomegranate import State, HiddenMarkovModel, DiscreteDistribution
```
## Build a Simple HMM
---
You will start by building a simple HMM network based on an example from the textbook [Artificial Intelligence: A Modern Approach](http://aima.cs.berkeley.edu/).
> You are the security guard stationed at a secret under-ground installation. Each day, you try to guess whether it’s raining today, but your only access to the outside world occurs each morning when you see the director coming in with, or without, an umbrella.
A simplified diagram of the required network topology is shown below.

### Describing the Network
<div class="alert alert-block alert-warning">
$\lambda = (A, B)$ specifies a Hidden Markov Model in terms of an emission probability distribution $A$ and a state transition probability distribution $B$.
</div>
HMM networks are parameterized by two distributions: the emission probabilties giving the conditional probability of observing evidence values for each hidden state, and the transition probabilities giving the conditional probability of moving between states during the sequence. Additionally, you can specify an initial distribution describing the probability of a sequence starting in each state.
<div class="alert alert-block alert-warning">
At each time $t$, $X_t$ represents the hidden state, and $Y_t$ represents an observation at that time.
</div>
In this problem, $t$ corresponds to each day of the week and the hidden state represent the weather outside (whether it is Rainy or Sunny) and observations record whether the security guard sees the director carrying an umbrella or not.
For example, during some particular week the guard may observe an umbrella ['yes', 'no', 'yes', 'no', 'yes'] on Monday-Friday, while the weather outside is ['Rainy', 'Sunny', 'Sunny', 'Sunny', 'Rainy']. In that case, $t=Wednesday$, $Y_{Wednesday}=yes$, and $X_{Wednesday}=Sunny$. (It might be surprising that the guard would observe an umbrella on a sunny day, but it is possible under this type of model.)
### Initializing an HMM Network with Pomegranate
The Pomegranate library supports [two initialization methods](http://pomegranate.readthedocs.io/en/latest/HiddenMarkovModel.html#initialization). You can either explicitly provide the three distributions, or you can build the network line-by-line. We'll use the line-by-line method for the example network, but you're free to use either method for the part of speech tagger.
```
# create the HMM model
model = HiddenMarkovModel(name="Example Model")
```
### **IMPLEMENTATION**: Add the Hidden States
When the HMM model is specified line-by-line, the object starts as an empty container. The first step is to name each state and attach an emission distribution.
#### Observation Emission Probabilities: $P(Y_t | X_t)$
We need to assume that we have some prior knowledge (possibly from a data set) about the director's behavior to estimate the emission probabilities for each hidden state. In real problems you can often estimate the emission probabilities empirically, which is what we'll do for the part of speech tagger. Our imaginary data will produce the conditional probability table below. (Note that the rows sum to 1.0)
| | $yes$ | $no$ |
| --- | --- | --- |
| $Sunny$ | 0.10 | 0.90 |
| $Rainy$ | 0.80 | 0.20 |
```
# create the HMM model
model = HiddenMarkovModel(name="Example Model")
# emission probability distributions, P(umbrella | weather)
sunny_emissions = DiscreteDistribution({"yes": 0.1, "no": 0.9})
sunny_state = State(sunny_emissions, name="Sunny")
# TODO: create a discrete distribution for the rainy emissions from the probability table above & use that distribution to create a state named Rainy
rainy_emissions = DiscreteDistribution({"yes": 0.8, "no": 0.2})
rainy_state = State(rainy_emissions, name="Rainy")
# add the states to the model
model.add_states(sunny_state, rainy_state)
assert rainy_emissions.probability("yes") == 0.8, "The director brings his umbrella with probability 0.8 on rainy days"
print("Looks good so far!")
```
### **IMPLEMENTATION:** Adding Transitions
Once the states are added to the model, we can build up the desired topology of individual state transitions.
#### Initial Probability $P(X_0)$:
We will assume that we don't know anything useful about the likelihood of a sequence starting in either state. If the sequences start each week on Monday and end each week on Friday (so each week is a new sequence), then this assumption means that it's equally likely that the weather on a Monday may be Rainy or Sunny. We can assign equal probability to each starting state by setting $P(X_0=Rainy) = 0.5$ and $P(X_0=Sunny)=0.5$:
| $Sunny$ | $Rainy$ |
| --- | ---
| 0.5 | 0.5 |
#### State transition probabilities $P(X_{t} | X_{t-1})$
Finally, we will assume for this example that we can estimate transition probabilities from something like historical weather data for the area. In real problems you can often use the structure of the problem (like a language grammar) to impose restrictions on the transition probabilities, then re-estimate the parameters with the same training data used to estimate the emission probabilities. Under this assumption, we get the conditional probability table below. (Note that the rows sum to 1.0)
| | $Sunny$ | $Rainy$ |
| --- | --- | --- |
|$Sunny$| 0.80 | 0.20 |
|$Rainy$| 0.40 | 0.60 |
```
# create edges for each possible state transition in the model
# equal probability of a sequence starting on either a rainy or sunny day
model.add_transition(model.start, sunny_state, 0.5)
model.add_transition(model.start, rainy_state, 0.5)
# add sunny day transitions (we already know estimates of these probabilities from the problem statement)
model.add_transition(sunny_state, sunny_state, 0.8) # 80% sunny->sunny
model.add_transition(sunny_state, rainy_state, 0.2) # 20% sunny->rainy
# TODO: add rainy day transitions using the probabilities specified in the transition table
model.add_transition(rainy_state, sunny_state, 0.4) # 40% rainy->sunny
model.add_transition(rainy_state, rainy_state, 0.6) # 60% rainy->rainy
# finally, call the .bake() method to finalize the model
model.bake()
assert model.edge_count() == 6, "There should be two edges from model.start, two from Rainy, and two from Sunny"
assert model.node_count() == 4, "The states should include model.start, model.end, Rainy, and Sunny"
print("Great! You've finished the model.")
```
## Visualize the Network
---
We have provided a helper function called `show_model()` that generates a PNG image from a Pomegranate HMM network. You can specify an optional filename to save the file to disk. Setting the "show_ends" argument True will add the model start & end states that are included in every Pomegranate network.
```
show_model(model, figsize=(7, 7), filename="example.png", overwrite=True, show_ends=False)
```
### Checking the Model
The states of the model can be accessed using array syntax on the `HMM.states` attribute, and the transition matrix can be accessed by calling `HMM.dense_transition_matrix()`. Element $(i, j)$ encodes the probability of transitioning from state $i$ to state $j$. For example, with the default column order specified, element $(2, 1)$ gives the probability of transitioning from "Rainy" to "Sunny", which we specified as 0.4.
Run the next cell to inspect the full state transition matrix, then read the .
```
column_order = ["Example Model-start", "Sunny", "Rainy", "Example Model-end"] # Override the Pomegranate default order
column_names = [s.name for s in model.states] #['Rainy', 'Sunny', 'Example Model-start', 'Example Model-end']
order_index = [column_names.index(c) for c in column_order] #[2, 1, 0, 3]
# re-order the rows/columns to match the specified column order
transitions = model.dense_transition_matrix()[:, order_index][order_index, :]
print("The state transition matrix, P(Xt|Xt-1):\n")
print(transitions)
print("\nThe transition probability from Rainy to Sunny is {:.0f}%".format(100 * transitions[2, 1]))
```
## Inference in Hidden Markov Models
---
Before moving on, we'll use this simple network to quickly go over the Pomegranate API to perform the three most common HMM tasks:
<div class="alert alert-block alert-info">
**Likelihood Evaluation**<br>
Given a model $\lambda=(A,B)$ and a set of observations $Y$, determine $P(Y|\lambda)$, the likelihood of observing that sequence from the model
</div>
We can use the weather prediction model to evaluate the likelihood of the sequence [yes, yes, yes, yes, yes] (or any other state sequence). The likelihood is often used in problems like machine translation to weight interpretations in conjunction with a statistical language model.
<div class="alert alert-block alert-info">
**Hidden State Decoding**<br>
Given a model $\lambda=(A,B)$ and a set of observations $Y$, determine $Q$, the most likely sequence of hidden states in the model to produce the observations
</div>
We can use the weather prediction model to determine the most likely sequence of Rainy/Sunny states for a known observation sequence, like [yes, no] -> [Rainy, Sunny]. We will use decoding in the part of speech tagger to determine the tag for each word of a sentence. The decoding can be further split into "smoothing" when we want to calculate past states, "filtering" when we want to calculate the current state, or "prediction" if we want to calculate future states.
<div class="alert alert-block alert-info">
**Parameter Learning**<br>
Given a model topography (set of states and connections) and a set of observations $Y$, learn the transition probabilities $A$ and emission probabilities $B$ of the model, $\lambda=(A,B)$
</div>
We don't need to learn the model parameters for the weather problem or POS tagging, but it is supported by Pomegranate.
### IMPLEMENTATION: Calculate Sequence Likelihood
Calculating the likelihood of an observation sequence from an HMM network is performed with the [forward algorithm](https://en.wikipedia.org/wiki/Forward_algorithm). Pomegranate provides the the `HMM.forward()` method to calculate the full matrix showing the likelihood of aligning each observation to each state in the HMM, and the `HMM.log_probability()` method to calculate the cumulative likelihood over all possible hidden state paths that the specified model generated the observation sequence.
Fill in the code in the next section with a sample observation sequence and then use the `forward()` and `log_probability()` methods to evaluate the sequence.
```
# TODO: input a sequence of 'yes'/'no' values in the list below for testing
observations = ['yes', 'no', 'yes']
assert len(observations) > 0, "You need to choose a sequence of 'yes'/'no' observations to test"
# TODO: use model.forward() to calculate the forward matrix of the observed sequence,
# and then use np.exp() to convert from log-likelihood to likelihood
forward_matrix = np.exp(model.forward(observations))
# TODO: use model.log_probability() to calculate the all-paths likelihood of the
# observed sequence and then use np.exp() to convert log-likelihood to likelihood
probability_percentage = np.exp(model.log_probability(observations))
# Display the forward probabilities
print(" " + "".join(s.name.center(len(s.name)+6) for s in model.states))
for i in range(len(observations) + 1):
print(" <start> " if i==0 else observations[i - 1].center(9), end="")
print("".join("{:.0f}%".format(100 * forward_matrix[i, j]).center(len(s.name) + 6)
for j, s in enumerate(model.states)))
print("\nThe likelihood over all possible paths " + \
"of this model producing the sequence {} is {:.2f}%\n\n"
.format(observations, 100 * probability_percentage))
```
### IMPLEMENTATION: Decoding the Most Likely Hidden State Sequence
The [Viterbi algorithm](https://en.wikipedia.org/wiki/Viterbi_algorithm) calculates the single path with the highest likelihood to produce a specific observation sequence. Pomegranate provides the `HMM.viterbi()` method to calculate both the hidden state sequence and the corresponding likelihood of the viterbi path.
This is called "decoding" because we use the observation sequence to decode the corresponding hidden state sequence. In the part of speech tagging problem, the hidden states map to parts of speech and the observations map to sentences. Given a sentence, Viterbi decoding finds the most likely sequence of part of speech tags corresponding to the sentence.
Fill in the code in the next section with the same sample observation sequence you used above, and then use the `model.viterbi()` method to calculate the likelihood and most likely state sequence. Compare the Viterbi likelihood against the forward algorithm likelihood for the observation sequence.
```
# TODO: input a sequence of 'yes'/'no' values in the list below for testing
observations = ['yes', 'no', 'yes']
# TODO: use model.viterbi to find the sequence likelihood & the most likely path
viterbi_likelihood, viterbi_path = model.viterbi(observations)
print("The most likely weather sequence to have generated " + \
"these observations is {} at {:.2f}%."
.format([s[1].name for s in viterbi_path[1:]], np.exp(viterbi_likelihood)*100)
)
```
### Forward likelihood vs Viterbi likelihood
Run the cells below to see the likelihood of each sequence of observations with length 3, and compare with the viterbi path.
```
from itertools import product
observations = ['no', 'no', 'yes']
p = {'Sunny': {'Sunny': np.log(.8), 'Rainy': np.log(.2)}, 'Rainy': {'Sunny': np.log(.4), 'Rainy': np.log(.6)}}
e = {'Sunny': {'yes': np.log(.1), 'no': np.log(.9)}, 'Rainy':{'yes':np.log(.8), 'no':np.log(.2)}}
o = observations
k = []
vprob = np.exp(model.viterbi(o)[0])
print("The likelihood of observing {} if the weather sequence is...".format(o))
for s in product(*[['Sunny', 'Rainy']]*3):
k.append(np.exp(np.log(.5)+e[s[0]][o[0]] + p[s[0]][s[1]] + e[s[1]][o[1]] + p[s[1]][s[2]] + e[s[2]][o[2]]))
print("\t{} is {:.2f}% {}".format(s, 100 * k[-1], " <-- Viterbi path" if k[-1] == vprob else ""))
print("\nThe total likelihood of observing {} over all possible paths is {:.2f}%".format(o, 100*sum(k)))
```
### Congratulations!
You've now finished the HMM warmup. You should have all the tools you need to complete the part of speech tagger project.
| github_jupyter |
.. meta::
:description: A guide which introduces the most important steps to get started with pymoo, an open-source multi-objective optimization framework in Python.
.. meta::
:keywords: Multi-objective Optimization, Python, Evolutionary Computation, Optimization Test Problem, Hypervolume
# Getting Started
In the following, we like to introduce *pymoo* by presenting an example optimization scenario. This guide goes through the essential steps to get started with our framework. This guide is structured as follows:
1. Introduction to Multi-objective Optimization and an exemplarily Test Problem
2. Implementation of a Problem (vectorized, element-wise or functional)
3. Initialization of an Algorithm (in our case NSGA2)
4. Definition of a Termination Criterion
5. Optimize (functional through `minimize` or object-oriented by calling `next()`)
6. Visualization of Results and Convergence
7. Summary
8. Source code (in one piece)
We try to cover the essential steps you have to follow to get started optimizing your own optimization problem and have also included some posteriori analysis which is known to be particularly important in multi-objective optimization.
## 1. Introduction
### Multi-Objective Optimization
In general, multi-objective optimization has several objective functions with subject to inequality and equality constraints to optimize <cite data-cite="multi_objective_book"></cite>. The goal is to find a set of solutions that do not have any constraint violation and are as good as possible regarding all its objectives values. The problem definition in its general form is given by:
\begin{align}
\begin{split}
\min \quad& f_{m}(x) \quad \quad \quad \quad m = 1,..,M \\[4pt]
\text{s.t.} \quad& g_{j}(x) \leq 0 \quad \; \; \, \quad j = 1,..,J \\[2pt]
\quad& h_{k}(x) = 0 \quad \; \; \quad k = 1,..,K \\[4pt]
\quad& x_{i}^{L} \leq x_{i} \leq x_{i}^{U} \quad i = 1,..,N \\[2pt]
\end{split}
\end{align}
The formulation above defines a multi-objective optimization problem with $N$ variables, $M$ objectives, $J$ inequality and $K$ equality constraints. Moreover, for each variable $x_i$ lower and upper variable boundaries ($x_i^L$ and $x_i^U$) are defined.
### Test Problem
In the following, we investigate exemplarily a bi-objective optimization with two constraints.
We tried to select a suitable optimization problem with enough complexity for demonstration purposes, but not too difficult to lose track of the overall idea. Its definition is given by:
\begin{align}
\begin{split}
\min \;\; & f_1(x) = (x_1^2 + x_2^2) \\
\max \;\; & f_2(x) = -(x_1-1)^2 - x_2^2 \\[1mm]
\text{s.t.} \;\; & g_1(x) = 2 \, (x_1 - 0.1) \, (x_1 - 0.9) \leq 0\\
& g_2(x) = 20 \, (x_1 - 0.4) \, (x_1 - 0.6) \geq 0\\[1mm]
& -2 \leq x_1 \leq 2 \\
& -2 \leq x_2 \leq 2
\end{split}
\end{align}
It consists of two objectives ($M=2$) where $f_1(x)$ is minimized and $f_2(x)$ maximized. The optimization is with subject to two inequality constraints ($J=2$) where $g_1(x)$ is formulated as a less than and $g_2(x)$ as a greater than constraint. The problem is defined with respect to two variables ($N=2$), $x_1$ and $x_2$, which both are in the range $[-2,2]$. The problem does not contain any equality constraints ($K=0$).
```
import numpy as np
X1, X2 = np.meshgrid(np.linspace(-2, 2, 500), np.linspace(-2, 2, 500))
F1 = X1**2 + X2**2
F2 = (X1-1)**2 + X2**2
G = X1**2 - X1 + 3/16
G1 = 2 * (X1[0] - 0.1) * (X1[0] - 0.9)
G2 = 20 * (X1[0] - 0.4) * (X1[0] - 0.6)
import matplotlib.pyplot as plt
plt.rc('font', family='serif')
levels = [0.02, 0.1, 0.25, 0.5, 0.8]
plt.figure(figsize=(7, 5))
CS = plt.contour(X1, X2, F1, levels, colors='black', alpha=0.5)
CS.collections[0].set_label("$f_1(x)$")
CS = plt.contour(X1, X2, F2, levels, linestyles="dashed", colors='black', alpha=0.5)
CS.collections[0].set_label("$f_2(x)$")
plt.plot(X1[0], G1, linewidth=2.0, color="green", linestyle='dotted')
plt.plot(X1[0][G1<0], G1[G1<0], label="$g_1(x)$", linewidth=2.0, color="green")
plt.plot(X1[0], G2, linewidth=2.0, color="blue", linestyle='dotted')
plt.plot(X1[0][X1[0]>0.6], G2[X1[0]>0.6], label="$g_2(x)$",linewidth=2.0, color="blue")
plt.plot(X1[0][X1[0]<0.4], G2[X1[0]<0.4], linewidth=2.0, color="blue")
plt.plot(np.linspace(0.1,0.4,100), np.zeros(100),linewidth=3.0, color="orange")
plt.plot(np.linspace(0.6,0.9,100), np.zeros(100),linewidth=3.0, color="orange")
plt.xlim(-0.5, 1.5)
plt.ylim(-0.5, 1)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.12),
ncol=4, fancybox=True, shadow=False)
plt.tight_layout()
plt.show()
```
The figure above shows the contours of the problem. The contour lines of the objective function $f_1(x)$ is represented by a solid and $f_2(x)$ by a dashed line. The constraints $g_1(x)$ and $g_2(x)$ are parabolas which intersect the $x_1$-axis at $(0.1, 0.9)$ and $(0.4, 0.6)$. A thick orange line illustrates the pareto-optimal set. Through the combination of both constraints, the pareto-set is split into two parts.
Analytically, the pareto-optimal set is given by $PS = \{(x_1, x_2) \,|\, (0.1 \leq x_1 \leq 0.4) \lor (0.6 \leq x_1 \leq 0.9) \, \land \, x_2 = 0\}$ and the Pareto-front by $f_2 = (\sqrt{f_1} - 1)^2$ where $f_1$ is defined in $[0.01,0.16]$ and $[0.36,0.81]$.
## 2. Implementation of a Problem
In *pymoo*, we consider **minimization** problems for optimization in all our modules. However, without loss of generality, an objective that is supposed to be maximized can be multiplied by $-1$ and be minimized. Therefore, we minimize $-f_2(x)$ instead of maximizing $f_2(x)$ in our optimization problem. Furthermore, all constraint functions need to be formulated as a $\leq 0$ constraint.
The feasibility of a solution can, therefore, be expressed by:
$$ \begin{cases}
\text{feasible,} \quad \quad \sum_i^n \langle g_i(x)\rangle = 0\\
\text{infeasbile,} \quad \quad \quad \text{otherwise}\\
\end{cases}
$$
$$
\text{where} \quad \langle g_i(x)\rangle =
\begin{cases}
0, \quad \quad \; \text{if} \; g_i(x) \leq 0\\
g_i(x), \quad \text{otherwise}\\
\end{cases}
$$
For this reason, $g_2(x)$ needs to be multiplied by $-1$ in order to flip the $\geq$ to a $\leq$ relation. We recommend the normalization of constraints to give equal importance to each of them.
For $g_1(x)$, the coefficient results in $2 \cdot (-0.1) \cdot (-0.9) = 0.18$ and for $g_2(x)$ in $20 \cdot (-0.4) \cdot (-0.6) = 4.8$, respectively. We achieve normalization of constraints by dividing $g_1(x)$ and $g_2(x)$ by its corresponding coefficient.
Finally, the optimization problem to be optimized using *pymoo* is defined by:
\begin{align}
\label{eq:getting_started_pymoo}
\begin{split}
\min \;\; & f_1(x) = (x_1^2 + x_2^2) \\
\min \;\; & f_2(x) = (x_1-1)^2 + x_2^2 \\[1mm]
\text{s.t.} \;\; & g_1(x) = 2 \, (x_1 - 0.1) \, (x_1 - 0.9) \, / \, 0.18 \leq 0\\
& g_2(x) = - 20 \, (x_1 - 0.4) \, (x_1 - 0.6) \, / \, 4.8 \leq 0\\[1mm]
& -2 \leq x_1 \leq 2 \\
& -2 \leq x_2 \leq 2
\end{split}
\end{align}
This getting started guide demonstrates **3** different ways of defining a problem:
- **By Class**
- **Vectorized evaluation:** A set of solutions is evaluated directly.
- **Elementwise evaluation:** Only one solution is evaluated at a time.
- **By Functions**: Functional interface as commonly defined in other optimization libraries.
**Optional**: Define a Pareto set and front for the optimization problem to track convergence to the analytically derived optimum/optima.
Please choose the most convenient implementation for your purpose.
### By Class
Defining a problem through a class allows defining the problem very naturally, assuming the metadata, such as the number of variables and objectives, are known.
The problem inherits from the [Problem](problems/index.ipynb) class. By calling the `super()` function in the constructor `__init__` the problem properties such as the number of variables `n_var`, objectives `n_obj` and constraints `n_constr` are supposed to be initialized. Furthermore, lower `xl` and upper variables boundaries `xu` are supplied as a NumPy array. Please note that most algorithms in our framework require the lower and upper boundaries to be provided and not equal to negative or positive infinity. Finally, the evaluation function `_evaluate` needs to be overwritten to calculated the objective and constraint values.
#### Vectorized Evaluation
The `_evaluate` method takes a **two-dimensional** NumPy array `X` with *n* rows and *m* columns as an input. Each row represents an individual, and each column an optimization variable. After doing the necessary calculations, the objective values must be added to the dictionary out with the key `F` and the constraints with key `G`.
**Note**: This method is only called once per iteration for most algorithms. This gives you all the freedom to implement your own parallelization.
```
import numpy as np
from pymoo.model.problem import Problem
class MyProblem(Problem):
def __init__(self):
super().__init__(n_var=2,
n_obj=2,
n_constr=2,
xl=np.array([-2,-2]),
xu=np.array([2,2]))
def _evaluate(self, X, out, *args, **kwargs):
f1 = X[:,0]**2 + X[:,1]**2
f2 = (X[:,0]-1)**2 + X[:,1]**2
g1 = 2*(X[:, 0]-0.1) * (X[:, 0]-0.9) / 0.18
g2 = - 20*(X[:, 0]-0.4) * (X[:, 0]-0.6) / 4.8
out["F"] = np.column_stack([f1, f2])
out["G"] = np.column_stack([g1, g2])
vectorized_problem = MyProblem()
```
#### Elementwise Evaluation
The `_evaluate` method takes a **one-dimensional** NumPy array `x` number of entries equal to `n_var`. This behavior is enabled by setting `elementwise_evaluation=True` while calling the `super()` method.
**Note**: This method is called in each iteration for **each** solution exactly once.
```
import numpy as np
from pymoo.util.misc import stack
from pymoo.model.problem import Problem
class MyProblem(Problem):
def __init__(self):
super().__init__(n_var=2,
n_obj=2,
n_constr=2,
xl=np.array([-2,-2]),
xu=np.array([2,2]),
elementwise_evaluation=True)
def _evaluate(self, x, out, *args, **kwargs):
f1 = x[0]**2 + x[1]**2
f2 = (x[0]-1)**2 + x[1]**2
g1 = 2*(x[0]-0.1) * (x[0]-0.9) / 0.18
g2 = - 20*(x[0]-0.4) * (x[0]-0.6) / 4.8
out["F"] = [f1, f2]
out["G"] = [g1, g2]
elementwise_problem = MyProblem()
```
### By Functions
The definition by functions is a common way in Python and available and many other optimization frameworks. It reduces the problem's definitions without any overhead, and the number of objectives and constraints is simply derived from the list of functions.
After having defined the functions, the problem object is created by initializing `FunctionalProblem`. Please note that the number of variables `n_var` must be passed as an argument.
**Note**: This definition is recommended to be used to define a problem through simple functions. It is worth noting that the evaluation can require many functions calls. For instance, for 100 individuals with 2 objectives and 2 constraints 400 function calls are necessary for evaluation. Whereas, a vectorized definition through the `Problem` class requires only a single function call. Moreover, if metrics are shared between objectives or constraints, they need to be calculated twice.
```
import numpy as np
from pymoo.model.problem import FunctionalProblem
objs = [
lambda x: x[0]**2 + x[1]**2,
lambda x: (x[0]-1)**2 + x[1]**2
]
constr_ieq = [
lambda x: 2*(x[0]-0.1) * (x[0]-0.9) / 0.18,
lambda x: - 20*(x[0]-0.4) * (x[0]-0.6) / 4.8
]
functional_problem = FunctionalProblem(2,
objs,
constr_ieq=constr_ieq,
xl=np.array([-2,-2]),
xu=np.array([2,2]))
```
### (Optional) Pareto front (pf) and Pareto set (ps)
In this case, we have a test problem where the optimum is **known**. For illustration, we like to measure the convergence of the algorithm to the known true optimum. Thus, we implement override the `_calc_pareto_front` and `_calc_pareto_set` for this purpose. Please note that both have to be mathematically derived.
**Note: This is not necessary if your goal is solely optimizing a function**. For test problems, this is usually done to measure and visualize the performance of an algorithm.
The implementation of `func_pf` and `func_ps` looks as follows:
```
from pymoo.util.misc import stack
def func_pf(flatten=True, **kwargs):
f1_a = np.linspace(0.1**2, 0.4**2, 100)
f2_a = (np.sqrt(f1_a) - 1)**2
f1_b = np.linspace(0.6**2, 0.9**2, 100)
f2_b = (np.sqrt(f1_b) - 1)**2
a, b = np.column_stack([f1_a, f2_a]), np.column_stack([f1_b, f2_b])
return stack(a, b, flatten=flatten)
def func_ps(flatten=True, **kwargs):
x1_a = np.linspace(0.1, 0.4, 50)
x1_b = np.linspace(0.6, 0.9, 50)
x2 = np.zeros(50)
a, b = np.column_stack([x1_a, x2]), np.column_stack([x1_b, x2])
return stack(a,b, flatten=flatten)
```
This information can be passed to the definition via class or functions as follows:
#### Add to Class
```
import numpy as np
from pymoo.util.misc import stack
from pymoo.model.problem import Problem
class MyTestProblem(MyProblem):
def _calc_pareto_front(self, *args, **kwargs):
return func_pf(**kwargs)
def _calc_pareto_set(self, *args, **kwargs):
return func_ps(**kwargs)
test_problem = MyTestProblem()
```
#### Add to Function
```
from pymoo.model.problem import FunctionalProblem
functional_test_problem = FunctionalProblem(2,
objs,
constr_ieq=constr_ieq,
xl=-2,
xu=2,
func_pf=func_pf,
func_ps=func_ps
)
```
### Initialize the object
Choose the way you have defined your problem and initialize it:
```
problem = test_problem
```
Moreover, we would like to mention that in many test optimization problems, implementation already exists. For example, the test problem *ZDT1* can be initiated by:
```
from pymoo.factory import get_problem
zdt1 = get_problem("zdt1")
```
Our framework has various single- and many-objective optimization test problems already implemented. Furthermore, a more advanced guide for custom problem definitions is available. In case problem functions are computationally expensive, more sophisticated parallelization of the evaluation functions might be worth looking at.
[Optimization Test Problems](problems/index.ipynb) |
[Define a Custom Problem](problems/custom.ipynb) |
[Parallelization](problems/parallelization.ipynb) |
[Callback](interface/callback.ipynb) |
[Constraint Handling](misc/constraint_handling.ipynb)
## 3. Initialization of an Algorithm
Moreover, we need to initialize a method to optimize the problem.
In *pymoo*, factory methods create an `algorithm` object to be used for optimization. For each of those methods, an API documentation is available, and through supplying different parameters, algorithms can be customized in a plug-and-play manner.
Depending on the optimization problem, different algorithms can be used to optimize the problem. Our framework offers various [Algorithms](algorithms/index.ipynb), which can be used to solve problems with different characteristics.
In general, the choice of a suitable algorithm for optimization problems is a challenge itself. Whenever problem characteristics are known beforehand, we recommended using those through customized operators.
However, in our case, the optimization problem is rather simple, but the aspect of having two objectives and two constraints should be considered. We decided to use [NSGA-II](algorithms/nsga2.ipynb) with its default configuration with minor modifications. We chose a population size of 40 (`pop_size=40`) and decided instead of generating the same number of offsprings to create only 10 (`n_offsprings=40`). Such an implementation is a greedier variant and improves the convergence of rather simple optimization problems without difficulties regarding optimization, such as the existence of local Pareto fronts.
Moreover, we enable a duplicate check (`eliminate_duplicates=True`), making sure that the mating produces offsprings that are different from themselves and the existing population regarding their design space values. To illustrate the customization aspect, we listed the other unmodified default operators in the code snippet below.
```
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.factory import get_sampling, get_crossover, get_mutation
algorithm = NSGA2(
pop_size=40,
n_offsprings=10,
sampling=get_sampling("real_random"),
crossover=get_crossover("real_sbx", prob=0.9, eta=15),
mutation=get_mutation("real_pm", eta=20),
eliminate_duplicates=True
)
```
The `algorithm` object contains the implementation of NSGA-II with the custom settings supplied to the factory method.
## 4. Definition of a Termination Criterion
Furthermore, a termination criterion needs to be defined to start the optimization procedure finally. Different kind of [Termination Criteria](interface/termination.ipynb) are available. Here, since the problem is rather simple, we run the algorithm for some number of generations.
```
from pymoo.factory import get_termination
termination = get_termination("n_gen", 40)
```
Instead of the number of generations (or iterations), other criteria such as the number of function evaluations or the improvement in design or objective space between generations can be used.
## 5. Optimize
Finally, we are solving the problem with the algorithm and termination criterion we have defined.
In *pymoo*, we provide two interfaces for solving an optimization problem:
- **Functional:** Commonly in Python, a function is used as a global interface. In pymoo, the `minimize` method is the most crucial method which is responsible for using an algorithm to solve a problem using
other attributes such as `seed`, `termination`, and others.
- **Object Oriented:** The object-oriented interface directly uses the algorithm object to perform an iteration.
This allows the flexibility of executing custom code very quickly between iterations. However, features already
implemented in the functional approach, such as displaying metrics, saving the history, or pre-defined callbacks, need to be incorporated manually.
Both ways have their benefits and drawbacks depending on the different use cases.
### Functional Interface
The functional interface is provided by the `minimize` method. By default, the method performs deep-copies of the algorithm and the termination object. Which means the objects are not altered during the function call. This ensures repetitive function calls end up with the same results. The `minimize` function returns the [Result](interface/result.ipynb) object, which provides attributes such as the optimum.
```
from pymoo.optimize import minimize
res = minimize(problem,
algorithm,
termination,
seed=1,
save_history=True,
verbose=True)
```
The [Result](interface/result.ipynb) object provides the corresponding X and F values and some more information.
### Object-Oriented Interface
On the contrary, the object-oriented approach directly modifies the algorithm object by calling the `next` method. Thus, it makes sense to create a deepcopy of the algorithm object beforehand, as shown in the code below.
In the while loop, the algorithm object can be accessed to be modified or for other purposes.
**NOTE**: In this guide, we have used the functional interface because the history is used during analysis.
```
import copy
# perform a copy of the algorithm to ensure reproducibility
obj = copy.deepcopy(algorithm)
# let the algorithm know what problem we are intending to solve and provide other attributes
obj.setup(problem, termination=termination, seed=1)
# until the termination criterion has not been met
while obj.has_next():
# perform an iteration of the algorithm
obj.next()
# access the algorithm to print some intermediate outputs
print(f"gen: {obj.n_gen} n_nds: {len(obj.opt)} constr: {obj.opt.get('CV').min()} ideal: {obj.opt.get('F').min(axis=0)}")
# finally obtain the result object
result = obj.result()
```
## 6. Visualization of Results and Convergence
### Results
The optimization results are illustrated below (design and objective space). The solid lines represent the analytically derived Pareto set, and front in the corresponding space, and the circles represent solutions found by the algorithm. It can be observed that the algorithm was able to converge, and a set of nearly-optimal solutions was obtained.
```
from pymoo.visualization.scatter import Scatter
# get the pareto-set and pareto-front for plotting
ps = problem.pareto_set(use_cache=False, flatten=False)
pf = problem.pareto_front(use_cache=False, flatten=False)
# Design Space
plot = Scatter(title = "Design Space", axis_labels="x")
plot.add(res.X, s=30, facecolors='none', edgecolors='r')
if ps is not None:
plot.add(ps, plot_type="line", color="black", alpha=0.7)
plot.do()
plot.apply(lambda ax: ax.set_xlim(-0.5, 1.5))
plot.apply(lambda ax: ax.set_ylim(-2, 2))
plot.show()
# Objective Space
plot = Scatter(title = "Objective Space")
plot.add(res.F)
if pf is not None:
plot.add(pf, plot_type="line", color="black", alpha=0.7)
plot.show()
```
Visualization is a vital post-processing step in multi-objective optimization. Although it seems to be pretty easy for our example optimization problem, it becomes much more difficult in higher dimensions where trade-offs between solutions are not readily observable. For visualizations in higher dimensions, various more advanced [Visualizations](visualization/index.ipynb) are implemented in our framework.
### Convergence
A not negligible step is the post-processing after having obtained the results. We strongly recommend not only analyzing the final result but also the algorithm's behavior. This gives more insights into the convergence of the algorithm.
For such an analysis, intermediant steps of the algorithm need to be considered. This can either be achieved by:
- A `Callback` class storing the necessary information in each iteration of the algorithm.
- Enabling the `save_history` flag when calling the minimize method to store a deepcopy of the algorithm's objective each iteration.
We provide some more details about each variant in our [convergence](misc/convergence.ipynb) tutorial.
As you might have already seen, we have set `save_history=True` when calling the `minmize` method in this getting started guide and, thus, will you the `history` for our analysis. Moreover, we need to decide what metric should be used to measure the performance of our algorithm. In this tutorial, we are going to use `Hypervolume` and `IGD`. Feel free to look at our [performance indicators](misc/performance_indicator.ipynb) to find more information about metrics to measure the performance of multi-objective algorithms.
As a first step we have to extract the population in each generation of the algorithm. We extract the constraint violation (`cv`), the objective space values (`F`) and the number of function evaluations (`n_evals`) of the corresponding generation.
```
n_evals = [] # corresponding number of function evaluations\
F = [] # the objective space values in each generation
cv = [] # constraint violation in each generation
# iterate over the deepcopies of algorithms
for algorithm in res.history:
# store the number of function evaluations
n_evals.append(algorithm.evaluator.n_eval)
# retrieve the optimum from the algorithm
opt = algorithm.opt
# store the least contraint violation in this generation
cv.append(opt.get("CV").min())
# filter out only the feasible and append
feas = np.where(opt.get("feasible"))[0]
_F = opt.get("F")[feas]
F.append(_F)
```
**NOTE:** If your problem has different scales on the objectives (e.g. first objective in range of [0.1, 0.2] and the second objective [100, 10000] you **HAVE** to normalize to measure the performance in a meaningful way! This example assumes no normalization is necessary to keep things a bit simple.
### Constraint Violation (CV)
Here, in the first generation, a feasible solution was already found.
Since the constraints of the problem are rather simple, the constraints are already satisfied in the initial population.
```
import matplotlib.pyplot as plt
k = min([i for i in range(len(cv)) if cv[i] <= 0])
first_feas_evals = n_evals[k]
print(f"First feasible solution found after {first_feas_evals} evaluations")
plt.plot(n_evals, cv, '--', label="CV")
plt.scatter(first_feas_evals, cv[k], color="red", label="First Feasible")
plt.xlabel("Function Evaluations")
plt.ylabel("Constraint Violation (CV)")
plt.legend()
plt.show()
```
### Hypvervolume (HV)
Hypervolume is a very well-known performance indicator for multi-objective problems. It is known to be pareto-compliant and is based on the volume between a predefined reference point and the solution provided. Hypervolume requires to define a reference point `ref_point` which shall be larger than the maximum value of the Pareto front.
**Note:** Hypervolume becomes computationally expensive with increasing dimensionality. The exact hypervolume can be calculated efficiently for 2 and 3 objectives. For higher dimensions, some researchers use a hypervolume approximation, which is not available yet in pymoo.
```
import matplotlib.pyplot as plt
from pymoo.performance_indicator.hv import Hypervolume
# MODIFY - this is problem dependend
ref_point = np.array([1.0, 1.0])
# create the performance indicator object with reference point
metric = Hypervolume(ref_point=ref_point, normalize=False)
# calculate for each generation the HV metric
hv = [metric.calc(f) for f in F]
# visualze the convergence curve
plt.plot(n_evals, hv, '-o', markersize=4, linewidth=2)
plt.title("Convergence")
plt.xlabel("Function Evaluations")
plt.ylabel("Hypervolume")
plt.show()
```
### IGD
For IGD the Pareto front needs to be known or to be approximated.
In our framework the Pareto front of **test problems** can be obtained by:
```
pf = problem.pareto_front(flatten=True, use_cache=False)
```
For real-world problems, you have to use an **approximation**. An approximation can be obtained by running an algorithm a couple of times and extracting the non-dominated solutions out of all solution sets. If you have only a single run, an alternative is to use the obtain a non-dominated set of solutions as an approximation. However, the result does then only indicate how much the algorithm's progress in converging to the final set.
```
import matplotlib.pyplot as plt
from pymoo.performance_indicator.igd import IGD
if pf is not None:
# for this test problem no normalization for post prcessing is needed since similar scales
normalize = False
metric = IGD(pf=pf, normalize=normalize)
# calculate for each generation the HV metric
igd = [metric.calc(f) for f in F]
# visualze the convergence curve
plt.plot(n_evals, igd, '-o', markersize=4, linewidth=2, color="green")
plt.yscale("log") # enable log scale if desired
plt.title("Convergence")
plt.xlabel("Function Evaluations")
plt.ylabel("IGD")
plt.show()
```
### Running Metric
Another way of analyzing a run when the true Pareto front is **not** known is using are recently proposed [running metric](https://www.egr.msu.edu/~kdeb/papers/c2020003.pdf). The running metric shows the difference in the objective space from one generation to another and uses the algorithm's survival to visualize the improvement.
This metric is also being used in pymoo to determine the termination of a multi-objective optimization algorithm if no default termination criteria have been defined.
For instance, this analysis reveals that the algorithm was able to improve from the 4th to the 5th generation significantly.
```
from pymoo.util.running_metric import RunningMetric
running = RunningMetric(delta_gen=5,
n_plots=3,
only_if_n_plots=True,
key_press=False,
do_show=True)
for algorithm in res.history[:15]:
running.notify(algorithm)
```
Plotting until the final population shows the the algorithm seems to have more a less converged and only a small improvement has been made.
```
from pymoo.util.running_metric import RunningMetric
running = RunningMetric(delta_gen=10,
n_plots=4,
only_if_n_plots=True,
key_press=False,
do_show=True)
for algorithm in res.history:
running.notify(algorithm)
```
## 7. Summary
We hope you have enjoyed the getting started guide. For more topics we refer to each section covered by on the [landing page](https://pymoo.org). If you have any question or concern do not hesitate to [contact us](contact.rst).
### Citation
If you have used **pymoo** for research purposes please refer to our framework in your reports or publication by:
## 8. Source Code
In this guide, we have provided a couple of options on defining your problem and how to run the optimization.
You might have already copied the code into your IDE. However, if not, the following code snippets cover the problem definition, algorithm initializing, solving the optimization problem, and visualization of the non-dominated set of solutions altogether.
```
import numpy as np
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.model.problem import Problem
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter
class MyProblem(Problem):
def __init__(self):
super().__init__(n_var=2,
n_obj=2,
n_constr=2,
xl=np.array([-2, -2]),
xu=np.array([2, 2]),
elementwise_evaluation=True)
def _evaluate(self, x, out, *args, **kwargs):
f1 = x[0] ** 2 + x[1] ** 2
f2 = (x[0] - 1) ** 2 + x[1] ** 2
g1 = 2 * (x[0] - 0.1) * (x[0] - 0.9) / 0.18
g2 = - 20 * (x[0] - 0.4) * (x[0] - 0.6) / 4.8
out["F"] = [f1, f2]
out["G"] = [g1, g2]
problem = MyProblem()
algorithm = NSGA2(pop_size=100)
res = minimize(problem,
algorithm,
("n_gen", 100),
verbose=True,
seed=1)
plot = Scatter()
plot.add(res.F, color="red")
plot.show()
```
| github_jupyter |
This notebook creates a VM in the user's project with the airflow scheduler and webserver. A default GCP zone for the VM has been chosen (below). Feel free to change this as desired.
## Airflow Dashboard
After successful setup of the Airflow VM, you will be able to view the Airflow Dashboard by creating an ssh tunnel to the VM. To do so, a sample command that you could execute:
gcloud compute ssh --zone us-central1-b datalab-airflow -- -N -p 22 -L localhost:5000:localhost:8080
Once this tunnel is open, you'd be able to view the dashboard by navigating to http://localhost:5000 on your browser.
```
# Get the latest datalab version. Restart the kernel.
!pip install --upgrade --force-reinstall datalab
zone='us-central1-b'
from google.datalab import Context
import google.datalab.storage as storage
project = Context.default().project_id
vm_name = 'datalab-airflow'
# The name of this GCS bucket follows a convention between this notebook and
# the 'BigQuery Pipeline' tutorial notebook, so don't change this.
gcs_dag_bucket_name = project + '-' + vm_name
gcs_dag_bucket = storage.Bucket(gcs_dag_bucket_name)
gcs_dag_bucket.create()
vm_startup_script_contents = """#!/bin/bash
apt-get update
apt-get --assume-yes install python-pip
pip install datalab==1.1.2
pip install apache-airflow==1.9.0
pip install pandas-gbq==0.3.0
export AIRFLOW_HOME=/airflow
export AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION=False
export AIRFLOW__CORE__LOAD_EXAMPLES=False
airflow initdb
airflow scheduler &
airflow webserver -p 8080 &
# We append a gsutil rsync command to the cron file and have this run every minute to sync dags.
PROJECT_ID=$(gcloud info --format="get(config.project)")
GCS_DAG_BUCKET=$PROJECT_ID-datalab-airflow
AIRFLOW_CRON=temp_crontab.txt
crontab -l > $AIRFLOW_CRON
DAG_FOLDER="dags"
LOCAL_DAG_PATH=$AIRFLOW_HOME/$DAG_FOLDER
mkdir $LOCAL_DAG_PATH
echo "* * * * * gsutil rsync gs://$GCS_DAG_BUCKET/$DAG_FOLDER $LOCAL_DAG_PATH" >> $AIRFLOW_CRON
crontab $AIRFLOW_CRON
rm $AIRFLOW_CRON
EOF
"""
vm_startup_script_file_name = 'vm_startup_script.sh'
script_file = open(vm_startup_script_file_name, 'w')
script_file.write(vm_startup_script_contents)
script_file.close()
import subprocess
print subprocess.check_output([
'gcloud', 'compute', '--project', project, 'instances', 'create', vm_name,
'--zone', zone,
'--machine-type', 'n1-standard-1',
'--network', 'default',
'--maintenance-policy', 'MIGRATE',
'--scopes', 'https://www.googleapis.com/auth/cloud-platform',
'--image', 'debian-9-stretch-v20171025',
'--min-cpu-platform', 'Automatic',
'--image-project', 'debian-cloud',
'--boot-disk-size', '10',
'--boot-disk-type', 'pd-standard',
'--boot-disk-device-name', vm_name,
'--metadata-from-file', 'startup-script=' + vm_startup_script_file_name])
```
# Cleanup
```
# The following cleans up the VM and associated GCS bucket. Uncomment and run.
#!gsutil rm -r gs://$gcs_dag_bucket_name
#!gcloud compute instances delete datalab-airflow --zone us-central1-b --quiet
# This just verifies that cleanup actually worked. Uncomment and run. Should
# show an error like "BucketNotFoundException: 404 ...".
#!gsutil ls gs://$gcs_dag_bucket_name
```
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
import os
from datetime import datetime
import io
import csv
import shutil
import matplotlib.pyplot as plt
year = 2017
YEAR_FLAG = 'train'
img_folder = '/datadrive/timelapse_images_fast'
timeseries_folder = '/datadrive/timeseries_derived_data_products_'+str(year)
target_folder_train = '/datadrive/train_data'
target_folder_test = '/datadrive/test_data'
year_path = [(year, img_folder)]#] for y in range(2011, 2018)]
df = pd.DataFrame()
for y, _ in year_path:
path = os.path.join(timeseries_folder,'MH11_resistivity_rock_{}.csv'.format(y))
df = pd.concat((df, pd.read_csv(path)), axis=0)
df2 = pd.DataFrame()
for y, _ in year_path:
path2 = os.path.join(timeseries_folder,'MH25_vaisalawxt520prec_{}.csv'.format(y))
df2 = pd.concat((df2, pd.read_csv(path2)), axis=0)
df3 = pd.DataFrame()
for y, _ in year_path:
path3 = os.path.join(timeseries_folder,'MH25_vaisalawxt520windpth_2017.csv'.format(y))
df3 = pd.concat((df3, pd.read_csv(path3)), axis=0)
def interpolate_to_timestamps(df, time_stamps):
df = df.resample('4T').interpolate()
ind = [df.index.get_loc(tim, method='nearest') for tim in time_stamps.time]
return df.iloc[ind, :]
def extract_time_stamps(year_path):
"""
input: [(year, root_dir), ...]
"""
dfs = pd.DataFrame()
for y, root_dir in year_path:
path_dict = {}
for filename in glob.iglob(root_dir + '**/'+str(y)+'*/*', recursive=True):
di, filname = os.path.split(filename)
ddi, ydi = os.path.split(di)
path_dict[filname] = extract_time(filname)
df = pd.DataFrame({'time':list(path_dict.values()), 'filename': list(path_dict.keys())}, )
dfs = pd.concat((dfs, df), axis=0)
return dfs
def extract_time(filname):
return datetime.strptime(filname, '%Y%m%d_%H%M%S.JPG')
def extract_summer_days(time_stamps):
start = pd.Timestamp(datetime(year=2000, month=1, day=1, hour=8)).time()
end = pd.Timestamp(datetime(year=2000, month=1, day=1, hour=20)).time()
time_stamps_day = time_stamps[np.logical_and(time_stamps['time'].dt.time >= start ,
time_stamps['time'].dt.time <= end)]
june = pd.Timestamp(datetime(year=2000, month=5, day=1, hour=8)).month
august = pd.Timestamp(datetime(year=2000, month=8, day=1, hour=8)).month
time_stamps_summerday = time_stamps_day[np.logical_and(time_stamps['time'].dt.month < august ,
time_stamps['time'].dt.month >= june)]
return time_stamps_summerday
#time_stamps = pd.read_pickle('pd_time_stamps.pkl')
#time_stamps.head()
time_stamps = extract_time_stamps(year_path)
summer_days = extract_summer_days(time_stamps)
df = df.set_index(pd.DatetimeIndex(df.loc[:, 'time']))
df_interp = interpolate_to_timestamps(df, summer_days)
df2 = df2.set_index(pd.DatetimeIndex(df2.loc[:, 'time']))
df2_interp = interpolate_to_timestamps(df2, summer_days)
df_interp['path'] = summer_days.filename.values
df_interp['label_thresh_rest10_1'] = df_interp.loc[:, 'resistivity_10cm [Mohm]'] < 300
df_interp['label_thresh_rest10_2'] = np.logical_and(300 < df_interp.loc[:, 'resistivity_10cm [Mohm]'],
df_interp.loc[:, 'resistivity_10cm [Mohm]'] < 1200)
df_interp['label_thresh_rest10_3'] = 1200 < df_interp.loc[:, 'resistivity_10cm [Mohm]']
df_interp['label_thresh_rest10'] = np.where(df_interp.loc[:, ['label_thresh_rest10_1',
'label_thresh_rest10_2',
'label_thresh_rest10_3']].values)[1]
df_interp['rain_label'] = df2_interp.loc[:, 'rain_intensity [mm/h]'] > 1
df_interp.query('rain_label == False').loc[:, ['path', 'label_thresh_rest10']].to_csv('labels_rain_resist.csv', header=False)
df_interp.loc[:, ['path', 'label_thresh_rest10']].to_csv('/datadrive/labels.csv', header=False)
#df_interp.loc[:,'label_thresh_rest10'].hist(bins=3)
```
## Preprocessing
Run the following cell to generate the folder for the Torch ImageLoader class.
The cell requires a labels.csv file which contains the filenames
of the image files and corresponding resistivity labels (which can be extended
from binary to multiclass depending on resistivity threshold)
```
with open('/datadrive/labels.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
print(row)
img_name = row[1]
label = row[2]#int(row[2]=='True')
month_folder = row[0][:10]
#print(month_folder)
#print(img_name,label)
#print(os.path.join(img_folder,month_folder,img_name))
if YEAR_FLAG == 'train':
shutil.copyfile(os.path.join(img_folder,month_folder,img_name),os.path.join(target_folder_train,label,img_name))
else:
shutil.copyfile(os.path.join(img_folder,month_folder,img_name),os.path.join(target_folder_test,label,img_name))
```
| github_jupyter |
# Running an example simulation
It's all very simple, mostly because this is a simple simulation.
First, let's import stuff we'll need later on:
```
import numpy as np
from pandemic_sim.simulation import Person, Simulation
from pandemic_sim.geometries import RectangleGeometry
from pandemic_sim.health_systems import SimpleHealthSystem
from pandemic_sim.particle_engines import (DefaultParticleEngine,
VelocityVerletIntegrator)
from pandemic_sim.transmission_models import DefaultTransmissionModel
from pandemic_sim.disease_models import DefaultPersonalDiseaseModel
from pandemic_sim.visualizations import (DefaultVisualization,
DefaultPersonsDrawer,
RectangleGeometryDrawer,
SimpleHealthSystemCurvesPlotter)
from pandemic_sim.animators import CelluloidAnimator
```
- `Person` is class representing a person with attributes such as position in the room, infection status and whether the person is dead or alive
- `RectangleGeometry` represents a rectangular room and implements the force the walls exert on persons
- `Simulation` is the class containing the actual simulation code
- `SimpleHealthSystem` models the influence of the health system on the probability of a person dying during their infection. Setting a threshold and a death rate factor, you can emulate the limited ICU bed capacity.
- `DefaultParticleEngine` is a class which performs the purely physics part of the simulation, meaning the movement of persons and the forces acting between them. It does that with the help of the `VelocityVerletIntegration` class, which numerically integrates equations of motion
- `DefaultTransmissionModel` is a disease transmission model based on probabilities for exposing other people and for being susceptible
- `DefaultPersonalDiseaseModel` is a model which, for a single person, determines how likely that person is to die or to be cured from the disease in given simulation time step
- in `DefaultVisualization` is the... well... default visualization. It shows all persons as solid circles and additionally plots the timeline of infected and immune persons and fatalities
- `RectangeGeometryDrawer`, `DefaultPersonsDrawer`, `SimpleHealthSystemCurvesPlotter`: classes in which the drawing / plotting of different things is implemented
- `CelluloidAnimator` contains code to animate a visualization using the [`celluloid` package](https://github.com/jwkvam/celluloid)
If you want to change any of these components, the object-oriented design should make it relatively easy to, e.g., implement a new visualization. The visualization and animation code is currently not documented, though.
Preamble done, so let's start setting up our simulation:
```
n_persons = 200
room = RectangleGeometry(25, 25)
transmission_model = DefaultTransmissionModel(prob_dist=lambda d: d < 1)
# persons start with random positions and velocities
initial_positions = room.get_random_position_set(n_persons)
persons = [Person(pos=pos,
vel=np.random.uniform(low=(-2, -2),
high=(2, 2)),
personal_disease_model=None, personal_transmission_model=None,
infected=False, immune=False)
for pos in initial_positions]
for p in persons:
p.personal_transmission_model = transmission_model.personal_tm_factory(
p, in_prob=1.0, out_prob=0.05)
p.personal_disease_model = DefaultPersonalDiseaseModel(p, death_probability=0.00015, time_to_heal=150)
```
So we're going to have 200 persons moving around in a room with width 25 and height 25 (arbitrary units). The transmission model has an argument `prob_dist` which is a base probability for transmission to occur and which depends on the distance between persons. `death_prob` is the probability for an infected person to die in one unit of simulation time. The person-specific transmission model takes a probability for being susceptible (`in_prob`) and a probability for exposing others (`out_prob`). The latter is rather low, simulating the effect of a simple tissue face mask. `time_to_heal` is the number of simulation time units it takes for an infected person to get cured (and thus become immune)
Now we want some random persons to start out infected:
```
chosen_ones = np.random.choice(np.arange(n_persons), n_persons // 50)
for i in chosen_ones:
persons[i].infected = True
```
Sorry, guys.
On to setting up the simulation object:
```
health_system = SimpleHealthSystem(threshold=50, death_probability_factor=5.0)
pe = DefaultParticleEngine(cutoff=0.75, geometry_gradient=room.gradient,
integrator_params={'timestep': 0.1},
integrator_class=VelocityVerletIntegrator,
inter_particle_force_constant=20.0,
geometry_force_constant=20.0)
sim = Simulation(room, persons, health_system, transmission_model, particle_engine=pe)
```
The parameters with which the health system object is initialized mean that as soon as more than 150 persons are infected, the probability of a person dying in a time step is increased by a factor of three.
As for the simulaton object: lots of parameters here:
- `timestep`: the time step for the integration scheme, which approximately solves the equations of motions for persons. The smaller that parameter is, the more accurate and detailed the simulated movement, but the longer the simulation takes to cover some predefined time span
- `cutoff`: distance at which the repulsive force between two persons (which makes them bounce off each other) kicks in
- `inter_particle_force_constant`: determines how hard two people bounce off each other. If this were set to zero, they could just pass through each other unhindered
- `geometry_force_constant`: determines how hard persons bounce off walls
Now we can run the simulation for some number `n_steps`:
```
n_steps = 500
simulation_results = sim.run(n_steps)
```
Done? Amazing. Let's use the above-discussed `DefaultVisualization` and the `CelluloidAnimator` to get a `.mp4` file with the animated simulation results:
```
radius = pe.cutoff / 2
viz = DefaultVisualization(simulation_results, RectangleGeometryDrawer(room),
DefaultPersonsDrawer(radius), SimpleHealthSystemCurvesPlotter(health_system))
animator = CelluloidAnimator(viz, out="output.mp4", frame_rate=20)
animator.animate(n_steps, interval=2)
```
The `radius` determines the radius of the circles representing the persons. You want to set this to `pe.cutoff / 2`, otherwise things will look weird (persons might not touch or overlap too much). The dashed red line in the "# infected" plot is the health system threshold.
Animating takes quite a while, too; comparable to the time it takes to run the simulation itself. In the animation, blue dots represent healthy persons, orange dot infected persons, and gray dots dead persons.
Let's view the results:
```
from IPython.display import Video
Video("output.mp4")
```
That's it—enjoy playing around with the parameters and the code and if you have suggestions on how to improve all this, feel free to open an issue and / or a pull request!
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
from prml.utils.datasets import load_mnist,load_iris
from prml.kernel_method import BaseKernelMachine
```
# PCA
```
class PCA():
"""PCA
Attributes:
X_mean (1-D array): mean of data
weight (2-D array): proj matrix
importance (1-D array): contirbution of ratio
"""
def __init__(self):
pass
def fit(self,X):
"""fit
Args:
X (2-D array): shape = (N_samples,N_dim), data
"""
N = X.shape[0]
X_mean = X.mean(axis = 0)
S = (X - X_mean).T@(X - X_mean)/N
eig_val,eig_vec = np.linalg.eigh(S)
eig_val,eig_vec = np.real(eig_val),np.real(eig_vec.real)
idx = np.argsort(eig_val)[::-1]
eig_val,eig_vec = eig_val[idx],eig_vec[:,idx]
self.X_mean = X_mean
self.importance = eig_val/eig_val.sum()
self.weight = eig_vec
def transform(self,X,M,return_importance=False,whitening=False):
"""transform
Args:
X (2-D array): shape = (N_samples,N_dim), data
M (int): number of principal component, if M > N_dim, M = N_dim
return_importance (bool): return importance or not
whitening (bool): if whitening or not
Retunrs:
X_proj (2-D array): shape = (N_samples,M), projected data
impotance_rate (float): how important X_proj is
"""
if whitening:
return (X-self.X_mean)@self.weight[:,:M]/np.sqrt(self.importance[:M])
elif return_importance:
return X@self.weight[:,:M],self.importance[:M].sum()
else:
return X@self.weight[:,:M]
def fit_transform(self,X,M,return_importance=False,whitening=False):
"""fit_transform
Args:
X (2-D array): shape = (N_samples,N_dim), data
M (int): number of principal component, if M > N_dim, M = N_dim
return_importance (bool): return importance or not
whitening (bool): if whitening or not
Retunrs:
X_proj (2-D array): shape = (N_samples,M), projected data
impotance_rate (float): how important X_proj is
"""
self.fit(X)
return self.transform(X,M,return_importance,whitening)
X,y = load_iris()
pca = PCA()
X_proj = pca.fit_transform(X,2,whitening=True)
fig,axes = plt.subplots(1,1,figsize=(10,7))
for idx,label in enumerate(np.unique(y)):
axes.scatter(x=X_proj[y == label,0],
y=X_proj[y == label,1],
alpha=0.8,
label=label)
axes.set_title("iris PCA (4dim -> 2dim)")
plt.legend()
plt.show()
```
### mnist image compression
```
X,y = load_mnist([3])
X = X[:600].reshape(-1,28*28)
X_mean = X.mean(axis=0)
pca = PCA()
pca.fit(X)
img = X[0]
fig = plt.figure(figsize=(10,7))
ax = fig.add_subplot(231)
ax.imshow(img.reshape(28,28))
ax.set_title("original image")
ax.axis("off")
img = img.ravel()
weight = pca.weight
approximate = np.dot(weight.T,img - X_mean)*weight
for n,M in enumerate([1,10,50,100,250]):
ax = fig.add_subplot(int(f"23{n+2}"))
img_proj = X_mean + np.sum(approximate[:,:M],axis = 1)
i,j = (n+1)//2,(n+1)%3
ax.imshow(img_proj.reshape(28,28))
ax.set_title(f"M = {M}")
ax.axis("off")
plt.show()
```
# ProbabilisticPCA
```
class ProbabilisticPCA():
"""ProbabilisticPCA
find parameter by maximum likelihood method, O(D^3)
Attributes:
D (int): original dim of data
mu (1-D array): mean of data
W (2-D array): param of density of data
sigma (float): param of density of data
U (2-D array): eigen vectors of covariance matrix of data
lamda (1-D array): eigen values of covariance matrix of data
"""
def __init__(self) -> None:
pass
def fit(self,X):
"""
Args:
X (2-D array): shape = (N_samples,N_dim), data
"""
N = X.shape[0]
X_mean = X.mean(axis = 0)
S = (X - X_mean).T@(X - X_mean)/N
eig_val,eig_vec = np.linalg.eigh(S)
eig_val,eig_vec = np.real(eig_val),np.real(eig_vec.real)
idx = np.argsort(eig_val)[::-1]
eig_val,eig_vec = eig_val[idx],eig_vec[:,idx]
self.D = X.shape[1]
self.mu = X_mean
self.U = eig_vec
self.lamda = eig_val
def transform(self,X,M):
"""transform
after this method is called, attribute W,sigma can be used
Args:
X (2-D array): shape = (N_samples,N_dim), data
M (int): number of principal component, M is less than X.shape[1]
Returns:
X_proj (2-D array): shape = (N_samples,M), projected data
"""
if self.D == M:
raise ValueError("M is less than X.shape[1]")
sigma = np.mean(self.lamda[M:])
W = self.U[:,:M]@(np.diag((self.lamda[:M] - sigma)**0.5))
Mat = W.T@W + sigma*np.eye(M)
proj_weight = W@np.linalg.inv(Mat) # x -> z
return (X - self.mu)@proj_weight
def fit_transform(self,X,M):
"""fit_transform
after this method is called, attribute W,sigma can be used
Args:
X (2-D array): shape = (N_samples,N_dim), data
M (int): number of principal component, M is less than X.shape[1]
Returns:
X_proj (2-D array): shape = (N_samples,M), projected data
"""
self.fit(X)
return self.transform(X,M)
X,y = load_iris()
ppca = ProbabilisticPCA()
X_proj = ppca.fit_transform(X,2)
fig,axes = plt.subplots(1,1,figsize=(10,7))
for idx,label in enumerate(np.unique(y)):
axes.scatter(x=X_proj[y == label,0],
y=X_proj[y == label,1],
alpha=0.8,
label=label)
axes.set_title("iris PCA (4dim -> 2dim)")
plt.legend()
plt.show()
```
# Probablistic PCA
```
class ProbabilisticPCAbyEM():
"""ProbabilisticPCAbyEM
Attributes:
M (int): dimension of latent variables
mu (1-D array): mean of data
W (2-D array): param of density of data
sigma (float): param of density of data
"""
def __init__(self,max_iter=100,threshold=1e-5) -> None:
"""
Args:
max_iter (int): maximum iteration
threshold (float): threshold
"""
self.max_iter = max_iter
self.threshold = threshold
def fit(self,X,M,find_M=False,alpha_limit=10):
"""
Args:
X (2-D array): shape = (N_samples,N_dim), data
M (int): dimension of latent variables
find_M (bool): if appropriate M will be found or not, if this is True, appropriate_M <= M
alpha_limit (float): if alpha is more than this, this component is removed
"""
N = X.shape[0]
D = X.shape[1]
# init param
self.mu = X.mean(axis = 0)
W = np.random.randn(D,M)
sigma = np.random.rand() + 1e-1
if find_M:
alpha = np.random.rand(M) + 1e-1
Y = X - self.mu
Ysum = np.sum(Y**2)
for _ in range(self.max_iter):
# E step
Mat = W.T@W + sigma*np.eye(M)
Minv = np.linalg.inv(Mat)
E_z = Y@W@Minv
E_zz = sigma*Minv + E_z.reshape(-1,M,1)@E_z.reshape(-1,1,M)
# M step
if find_M:
W_new = Y.T@E_z@np.linalg.inv(E_zz.sum(axis = 0) + sigma*np.diag(alpha))
else:
W_new = Y.T@E_z@np.linalg.inv(E_zz.sum(axis = 0))
sigma_new = (Ysum - 2*np.diag(E_z@W_new.T@Y.T).sum() + np.diag(np.sum(E_zz@W_new.T@W_new,axis=0)).sum())/(N*D)
diff = ((sigma_new - sigma)**2 + np.mean((W_new - W)**2)) ** 0.5
if diff < self.threshold:
W = W_new
sigma = sigma_new
break
W = W_new
sigma = sigma_new
if find_M:
alpha = D/np.diag(W.T@W)
idx = alpha < alpha_limit
alpha = alpha[idx]
W = W[:,idx]
M = idx.astype("int").sum()
self.M = M
self.W = W
self.sigma = sigma
def transform(self,X):
"""transform
Args:
X (2-D array): shape = (N_samples,N_dim), data
Returns:
X_proj (2-D array): shape = (N_samples,M), projected data
Note:
unlike other method you should choose M when you call `fit()`
"""
Mat = self.W.T@self.W + self.sigma*np.eye(self.M)
proj_weight = self.W@np.linalg.inv(Mat) # x -> z
return (X - self.mu)@proj_weight
def fit_transform(self,X,M,find_M=False,alpha_limit=10):
"""fit_transform
after this method is called, attribute W,sigma can be used
Args:
X (2-D array): shape = (N_samples,N_dim), data
M (int): number of principal component, M is less than X.shape[1]
find_M (bool): if appropriate M will be found or not, if this is True, appropriate_M <= M
alpha_limit (float): if alpha is more than this, this component is removed
Returns:
X_proj (2-D array): shape = (N_samples,M), projected data
"""
self.fit(X,M,find_M,alpha_limit)
return self.transform(X)
```
you can find appropriate `M` by EM algorhithm
```
X,y = load_iris()
em = ProbabilisticPCAbyEM(max_iter=1000)
X_proj = em.fit_transform(X,4,find_M=True)
M = X_proj.shape[1]
if M == 1:
fig,ax = plt.subplots(1,1,figsize=(10,7))
for idx,label in enumerate(np.unique(y)):
ax.hist(x=X_proj[y == label,0],
alpha=0.8,
label=label)
ax.set_title("iris PCA by EM (4dim -> 1dim)")
plt.legend()
plt.show()
elif M == 2:
fig,axes = plt.subplots(1,1,figsize=(10,7))
for idx,label in enumerate(np.unique(y)):
axes.scatter(x=X_proj[y == label,0],
y=X_proj[y == label,1],
alpha=0.8,
label=label)
axes.set_title("mnist PCA by EM (10dim -> 2dim)")
plt.legend()
plt.show()
else:
print(f"M = {M} >= 3 ...")
```
# Factor Analysis
```
class FactorAnalysis():
"""FactorAnalysis
"""
def __init__(self,max_iter=100,threshold=1e-5) -> None:
"""
Args:
max_iter (int): maximum iteration
threshold (float): threshold
"""
self.max_iter = max_iter
self.threshold = threshold
def fit(self,X,M):
"""fit
"""
N = X.shape[0]
D = X.shape[1]
self.mu = X.mean(axis = 0)
W = np.random.randn(D,M)
Sigma = np.random.rand(D) + 1e-1
Y = X - self.mu
S = Y.T@Y/N
for _ in range(self.max_iter):
# E step
G = np.linalg.inv(np.eye(M) + (W.T/Sigma)@W)
E_z = Y/Sigma@W@G.T
E_zz = G + E_z.reshape(-1,M,1)@E_z.reshape(-1,1,M)
# M step
W_new = Y.T@E_z@np.linalg.inv(E_zz.sum(axis = 0))
Sigma_new = np.diag(S - W_new@E_z.T@Y/N)
diff = (np.mean((Sigma_new - Sigma)**2) + np.mean((W_new - W)**2))**0.5
if diff < self.threshold:
W = W_new
Sigma = Sigma_new
break
W = W_new
Sigma = Sigma_new
self.W = W
self.Sigma = Sigma
self.G = G = np.linalg.inv(np.eye(M) + (W.T/Sigma)@W)
def transform(self,X):
"""transform
Args:
X (2-D array): shape = (N_samples,N_dim), data
Returns:
X_proj (2-D array): shape = (N_samples,M), projected data
"""
return (X - self.mu)/self.Sigma@self.W@self.G.T
def fit_transform(self,X,M):
"""fit_transform
after this method is called, attribute W,sigma can be used
Args:
X (2-D array): shape = (N_samples,N_dim), data
M (int): number of principal component, M is less than X.shape[1]
Returns:
X_proj (2-D array): shape = (N_samples,M), projected data
"""
self.fit(X,M)
return self.transform(X)
X,y = load_iris()
fa = FactorAnalysis()
X_proj = fa.fit_transform(X,M=2)
fig,axes = plt.subplots(1,1,figsize=(10,7))
for idx,label in enumerate(np.unique(y)):
axes.scatter(x=X_proj[y == label,0],
y=X_proj[y == label,1],
alpha=0.8,
label=label)
axes.set_title("iris Factor Analysis (4dim -> 2dim)")
plt.legend()
plt.show()
```
# Kernel PCA
```
class KernelPCA(BaseKernelMachine):
"""KernelPCA
Attributes:
a (2-D array): projection weight of pca
kernel_func (function) : kernel function k(x,y)
gram_func (function) : function which make gram matrix
"""
def __init__(self,kernel="Linear",sigma=0.1,a=1.0,b=0.0,h=None,theta=1.0):
"""
Args:
kernel (string) : kernel type (default "Linear"). you can choose "Linear","Gaussian","Sigmoid","RBF","Exponential"
sigma (float) : for "Gaussian" kernel
a,b (float) : for "Sigmoid" kernel
h (function) : for "RBF" kernel
theta (float) : for "Exponential" kernel
"""
super(KernelPCA,self).__init__(kernel=kernel,sigma=sigma,a=a,b=b,h=h,theta=theta)
def fit(self,X):
"""
Args:
X (2-D array): shape = (N_samples,N_dim), data
"""
# make gram mat
N = X.shape[0]
gram_mat = self.gram_func(X)
divN = np.ones((N,N))/N
K = gram_mat - divN@gram_mat - gram_mat@divN + divN@gram_mat@divN
# eig
eig_val,eig_vec = np.linalg.eigh(K)
eig_val,eig_vec = np.real(eig_val),np.real(eig_vec.real)
idx = np.argsort(eig_val)[::-1]
eig_val,eig_vec = eig_val[idx],eig_vec[:,idx]
plus = eig_val > 0
eig_val,eig_vec = eig_val[plus],eig_vec[:,plus] # if dimension of kernel space is lower than N, K can have eigen values of 0
eig_vec /= eig_val**0.5
self.a = eig_vec
self.X = X
def transform(self,X,M):
"""transform
Args:
X (2-D array): shape = (N_samples,N_dim), data
Returns:
X_proj (2-D array): shape = (N_samples,M), projected data
"""
gram_mat = np.zeros((self.X.shape[0],X.shape[0]))
for i in range(self.X.shape[0]):
gram_mat[i] = np.array([self.kernel_func(self.X[i],X[j]) for j in range(X.shape[0])])
return gram_mat.T@self.a[:,:M]
def fit_transform(self,X,M):
"""fit_transform
Args:
X (2-D array): shape = (N_samples,N_dim), data
M (int): number of principal component, M is less than X.shape[1]
Returns:
X_proj (2-D array): shape = (N_samples,M), projected data
"""
self.fit(X)
return self.transform(X,M)
X,y = load_iris()
kpca = KernelPCA(kernel="Gaussian",sigma=3.0)
X_proj = kpca.fit_transform(X,2)
fig,axes = plt.subplots(1,1,figsize=(10,7))
for idx,label in enumerate(np.unique(y)):
axes.scatter(x=X_proj[y == label,0],
y=X_proj[y == label,1],
alpha=0.8,
label=label)
axes.set_title("iris KPCA (4im -> 2dim)")
plt.legend()
plt.show()
```
| github_jupyter |
when computing the rankings group all cases in same ing snapshot year and call get_edge_data once for each group. Ends up not making it faster...
```
top_directory = '/Users/iaincarmichael/Dropbox/Research/law/law-net/'
from __future__ import division
import os
import sys
import time
from math import *
import copy
import cPickle as pickle
# data
import numpy as np
import pandas as pd
# viz
import matplotlib.pyplot as plt
# graph
import igraph as ig
# NLP
from nltk.corpus import stopwords
# our code
sys.path.append(top_directory + 'code/')
from load_data import load_and_clean_graph, case_info
from pipeline.download_data import download_bulk_resource
from pipeline.make_clean_data import *
from viz import print_describe
sys.path.append(top_directory + 'explore/vertex_metrics_experiment/code/')
from make_snapshots import *
from make_edge_df import *
from attachment_model_inference import *
from compute_ranking_metrics import *
from pipeline_helper_functions import *
from make_case_text_files import *
from bag_of_words import *
from similarity_matrix import *
# directory set up
data_dir = top_directory + 'data/'
experiment_data_dir = data_dir + 'vertex_metrics_experiment/'
court_name = 'scotus'
# jupyter notebook settings
%load_ext autoreload
%autoreload 2
%matplotlib inline
G = load_and_clean_graph(data_dir, court_name)
active_years = range(1900, 2015 + 1)
```
# group by snapshot year
```
def compute_ranking_metrics_LR_group(G,
LogReg,
columns_to_use,
experiment_data_dir,
active_years,
R,
year_floor=1900,
seed=None,
print_progress=False):
'''
Computes the rank score metric for a given logistic regression object.
Sample R test cases that have at least one citation. For each test case
rank test case's ancestors then compute rank score for test cases actual
citations.
Parameters
------------
G: network (so we can get each cases' ancestor network)
LogReg: a logistic regression object
(i.e. the output of fit_logistic_regression)
columns_to_use: list of column names of edge metrics data frame that we
should use to fit logistic regression
path_to_vertex_metrics_folder: we will need these for prediciton
year_interval: the year interval between each vertex metric .csv file
R: how many cases to compute ranking metrics for
year_floor: sample only cases after this year
seed: random seed for selecting cases whose ancsetry to score
Output
-------
The average ranking score over all R cases we tested
'''
# ranking scores for each test case
test_case_rank_scores = []
# get list of test cases
test_vertices = get_test_cases(G, active_years, R, seed=seed)
# load snapshots
snapshots_dict = load_snapshots(experiment_data_dir)
# mabye load the similarities
if 'similarity' in columns_to_use:
similarity_matrix, CLid_to_index = load_similarity_matrix(experiment_data_dir)
else:
similarity_matrix = None
CLid_to_index = None
# organize edges by ing snapshot year
case_dict = get_test_cases_by_snapshot_dict(G, test_vertices, active_years)
for year in case_dict.keys():
# get vetex metrics in year before citing year
snapshot_year = year - 1
# grab data frame of vertex metrics for test case's snapshot
snapshot_df = snapshots_dict['vertex_metrics_' +
str(int(snapshot_year))]
# build edgelist for all cases in given year
edgelist = get_combined_edgelist(G, case_dict[year], snapshot_year)
# grab edge data
edge_data = get_edge_data(G, edgelist, snapshot_df, columns_to_use,
similarity_matrix, CLid_to_index,
edge_status=None)
for test_case in case_dict[year]:
# indices of edge_data
df_indices = [test_case['name'] + '_' + v['name']
for v in G.vs.select(year_le=snapshot_year)]
# grab test case edges
case_edge_data = edge_data.loc[df_indices]
# rank ancestors
ancestor_ranking = get_case_ranking_logreg(case_edge_data,
LogReg, columns_to_use)
# get cited cases
cited_cases = get_cited_cases(G, test_case)
# compute rank score for cited cases
score = score_ranking(cited_cases, ancestor_ranking)
test_case_rank_scores.append(score)
# return test_case_rank_scores, case_ranks, test_cases
return test_case_rank_scores
def get_cited_cases(G, citing_vertex):
"""
Returns the ciations of a cases whose cited year is strictly less than citing year
Parameters
----------
G: igraph object
citing_vertex: igraph vertex
Output
------
list of CL ids of cited cases
"""
# get neighbors first as ig index
all_citations = G.neighbors(citing_vertex.index, mode='OUT')
# return CL indices of cases
# only return cited cases whose year is stictly less than citing year
return [G.vs[ig_id]['name'] for ig_id in all_citations
if G.vs[ig_id]['year'] < citing_vertex['year']]
def get_test_cases_by_snapshot_dict(G, test_cases, active_years):
"""
Organizes test cases by year
list is igraph indices
"""
# get the citing year of each edge
case_years = [case['year'] for case in test_cases]
# dict that organizes edges by ing snapshot year
case_dict = {y: [] for y in active_years}
for i in range(len(test_cases)):
case_dict[case_years[i]].append(test_cases[i])
# only return years with at least one case
return {k : case_dict[k] for k in case_dict.keys() if len(case_dict[k]) > 1}
def contact_lists(LOL):
"""
Concatonates a list of lists
"""
if len(LOL) > 1:
return LOL[0] + contact_lists(LOL[1:])
else:
return LOL[0]
def get_combined_edgelist(G, test_cases, snapshot_year):
# build edgelist for all cases in given year
edgelists = []
for test_case in test_cases:
# restrict ourselves to ancestors of ing
# case strictly before ing year
ancentors = [v.index for v in G.vs.select(year_le=snapshot_year)]
# append test cases edgelist to edgelist
edgelists.append(zip([test_case.index] * len(ancentors), ancentors))
return contact_lists(edgelists)
```
# compare new vs old ranking metrics
```
columns_to_use = ['indegree', 'similarity']
R = 1000
seed_ranking = 3424
LogReg = fit_logistic_regression(experiment_data_dir, columns_to_use)
start = time.time()
compute_ranking_metrics_LR(G, LogReg, columns_to_use, experiment_data_dir,
active_years, R, seed=seed_ranking,print_progress=True)
print 'new function took %d seconds for %d test cases' % (time.time() - start, R)
start = time.time()
compute_ranking_metrics_LR_group(G, LogReg, columns_to_use, experiment_data_dir,
active_years, R, seed=seed_ranking,print_progress=True)
print 'new and improved function took %d seconds for %d test cases' % (time.time() - start, R)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/PacktPublishing/Hands-On-Computer-Vision-with-PyTorch/blob/master/Chapter11/Generating_deep_fakes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import os
if not os.path.exists('Faceswap-Deepfake-Pytorch'):
!wget -q https://www.dropbox.com/s/5ji7jl7httso9ny/person_images.zip
!wget -q https://raw.githubusercontent.com/sizhky/deep-fake-util/main/random_warp.py
!unzip -q person_images.zip
!pip install -q torch_snippets torch_summary
from torch_snippets import *
from random_warp import get_training_data
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
def crop_face(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if(len(faces)>0):
for (x,y,w,h) in faces:
img2 = img[y:(y+h),x:(x+w),:]
img2 = cv2.resize(img2,(256,256))
return img2, True
else:
return img, False
!mkdir cropped_faces_personA
!mkdir cropped_faces_personB
def crop_images(folder):
images = Glob(folder+'/*.jpg')
for i in range(len(images)):
img = read(images[i],1)
img2, face_detected = crop_face(img)
if(face_detected==False):
continue
else:
cv2.imwrite('cropped_faces_'+folder+'/'+str(i)+'.jpg',cv2.cvtColor(img2, cv2.COLOR_RGB2BGR))
crop_images('personA')
crop_images('personB')
class ImageDataset(Dataset):
def __init__(self, items_A, items_B):
self.items_A = np.concatenate([read(f,1)[None] for f in items_A])/255.
self.items_B = np.concatenate([read(f,1)[None] for f in items_B])/255.
self.items_A += self.items_B.mean(axis=(0, 1, 2)) - self.items_A.mean(axis=(0, 1, 2))
def __len__(self):
return min(len(self.items_A), len(self.items_B))
def __getitem__(self, ix):
a, b = choose(self.items_A), choose(self.items_B)
return a, b
def collate_fn(self, batch):
imsA, imsB = list(zip(*batch))
imsA, targetA = get_training_data(imsA, len(imsA))
imsB, targetB = get_training_data(imsB, len(imsB))
imsA, imsB, targetA, targetB = [torch.Tensor(i).permute(0,3,1,2).to(device) for i in [imsA, imsB, targetA, targetB]]
return imsA, imsB, targetA, targetB
a = ImageDataset(Glob('cropped_faces_personA'), Glob('cropped_faces_personB'))
x = DataLoader(a, batch_size=32, collate_fn=a.collate_fn)
inspect(*next(iter(x)))
for i in next(iter(x)):
subplots(i[:8], nc=4, sz=(4,2))
def _ConvLayer(input_features, output_features):
return nn.Sequential(
nn.Conv2d(input_features, output_features, kernel_size=5, stride=2, padding=2),
nn.LeakyReLU(0.1, inplace=True)
)
def _UpScale(input_features, output_features):
return nn.Sequential(
nn.ConvTranspose2d(input_features, output_features, kernel_size=2, stride=2, padding=0),
nn.LeakyReLU(0.1, inplace=True)
)
class Reshape(nn.Module):
def forward(self, input):
output = input.view(-1, 1024, 4, 4) # channel * 4 * 4
return output
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
_ConvLayer(3, 128),
_ConvLayer(128, 256),
_ConvLayer(256, 512),
_ConvLayer(512, 1024),
nn.Flatten(),
nn.Linear(1024 * 4 * 4, 1024),
nn.Linear(1024, 1024 * 4 * 4),
Reshape(),
_UpScale(1024, 512),
)
self.decoder_A = nn.Sequential(
_UpScale(512, 256),
_UpScale(256, 128),
_UpScale(128, 64),
nn.Conv2d(64, 3, kernel_size=3, padding=1),
nn.Sigmoid(),
)
self.decoder_B = nn.Sequential(
_UpScale(512, 256),
_UpScale(256, 128),
_UpScale(128, 64),
nn.Conv2d(64, 3, kernel_size=3, padding=1),
nn.Sigmoid(),
)
def forward(self, x, select='A'):
if select == 'A':
out = self.encoder(x)
out = self.decoder_A(out)
else:
out = self.encoder(x)
out = self.decoder_B(out)
return out
from torchsummary import summary
model = Autoencoder()
summary(model, torch.zeros(32,3,64,64), 'A');
def train_batch(model, data, criterion, optimizes):
optA, optB = optimizers
optA.zero_grad()
optB.zero_grad()
imgA, imgB, targetA, targetB = data
_imgA, _imgB = model(imgA, 'A'), model(imgB, 'B')
lossA = criterion(_imgA, targetA)
lossB = criterion(_imgB, targetB)
lossA.backward()
lossB.backward()
optA.step()
optB.step()
return lossA.item(), lossB.item()
model = Autoencoder().to(device)
dataset = ImageDataset(Glob('cropped_faces_personA'), Glob('cropped_faces_personB'))
dataloader = DataLoader(dataset, 32, collate_fn=dataset.collate_fn)
optimizers = optim.Adam([{'params': model.encoder.parameters()},
{'params': model.decoder_A.parameters()}],
lr=5e-5, betas=(0.5, 0.999)), \
optim.Adam([{'params': model.encoder.parameters()},
{'params': model.decoder_B.parameters()}],
lr=5e-5, betas=(0.5, 0.999))
criterion = nn.L1Loss()
n_epochs = 10000
log = Report(n_epochs)
!mkdir checkpoint
for ex in range(n_epochs):
N = len(dataloader)
for bx,data in enumerate(dataloader):
lossA, lossB = train_batch(model, data, criterion, optimizers)
log.record(ex+(1+bx)/N, lossA=lossA, lossB=lossB, end='\r')
log.report_avgs(ex+1)
if (ex+1)%100 == 0:
state = {
'state': model.state_dict(),
'epoch': ex
}
torch.save(state, './checkpoint/autoencoder.pth')
if (ex+1)%100 == 0:
bs = 5
a,b,A,B = data
line('A to B')
_a = model(a[:bs], 'A')
_b = model(a[:bs], 'B')
x = torch.cat([A[:bs],_a,_b])
subplots(x, nc=bs, figsize=(bs*2, 5))
line('B to A')
_a = model(b[:bs], 'A')
_b = model(b[:bs], 'B')
x = torch.cat([B[:bs],_a,_b])
subplots(x, nc=bs, figsize=(bs*2, 5))
log.plot_epochs()
```
| github_jupyter |
# Introduction
.....
Check to see if jupyter lab uses the correct python interpreter with '!which python'.
It should be something like '/opt/anaconda3/envs/[environment name]/bin/python' (on Mac).
If not, try this: https://github.com/jupyter/notebook/issues/3146#issuecomment-352718675
```
!which python
```
# Install dependencies:
```
# install_packages = True
# if install_packages:
# !conda install tensorflow=2 -y
# !conda install -c anaconda pandas -y
# !conda install -c conda-forge tensorflow-hub -y
# !conda install -c akode html2text -y
# !conda install -c conda-forge tqdm -y
# !conda install -c anaconda scikit-learn -y
# !conda install -c conda-forge matplotlib -y
# !conda install -c anaconda seaborn -y
```
# Imports
```
#imports
import pandas as pd
import numpy as np
import os
import time
import tensorflow as tf
import tensorflow_hub as hub
import zipfile
from html2text import HTML2Text
from tqdm import tqdm
import re
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
import seaborn as sns
```
# Set pandas print options
This will improve readability of printed pandas dataframe.
```
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
```
## Set global Parameters
Set your parameters here:
data_path: In this path put the data you have downloaded with YouTube Data Tools.
output_path: Tghe files generated in this notebook will be saved here.
url_dict: URLs to models on Tensorflow hub are saved here. Other models are available there.
model_type: Define which model you would like to use. Choose one from url_dict
new_embeddings: If this is true, new embeddings will be generated and saved at output_path. Otherwise, embeddings are loaded from Disc.
```
data_path = './data/videoinfo_rcHCretMIZU_2020_11_30-16_16_17_comments.tab'
output_path = "./output/"
new_embeddings = True
url_dict = {
'Transformer' : "https://tfhub.dev/google/universal-sentence-encoder-large/5",
'DAN' : "https://tfhub.dev/google/universal-sentence-encoder/4",
'Transformer_Multilingual': "https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3"
}
model_type = 'Transformer' #@param ['DAN','Transformer','Transformer_Multilingual']
```
## Create output directory
Try to create the directory defined by output_path
```
try:
os.mkdir(output_path)
except OSError:
print ("Creation of the directory %s failed" % output_path)
else:
print ("Successfully created the directory %s " % output_path)
```
# Load Data
Load you data as a pandas dataframe
```
if new_embeddings:
data = pd.read_csv(data_path,sep='\t',header=(0))
data.head()
```
# Preprocessing
Preprocess your data:
- Drop empty rows
- Drop unused columns
```
if new_embeddings:
data = data.dropna(subset=['text', 'authorName']) # drop rows with no content
data=data.drop(['id', 'replyCount','likeCount','authorChannelUrl','authorChannelId','isReplyTo','isReplyToName'],axis=1) # drop unused columns
data.head()
```
- remove HTML-tags, links and usernames
```
if new_embeddings:
# Remove HTML tags
tqdm.pandas()
h = HTML2Text()
h.ignore_links = True
data['cleaned'] = data['text'].progress_apply(lambda x: h.handle(x))
print( "Removed HTML Tags.")
# Remove links
http_link_pattern = r'http\S+'
bitly_link_pattern = r'bit.ly/\S+'
data['cleaned'] = data['cleaned'].str.replace(http_link_pattern, '')
data['cleaned'] = data['cleaned'].str.replace(bitly_link_pattern, '')
print( "Removed Links.")
# Remove user names
keep_names = ["earth", "Tide", "Geologist", "A Person", "Titanic", "adventure", "Sun", "The United States Of America"] # user names we want to keep
user_names = [name for name in data['authorName'].unique() if (len(name)> 3 and name not in keep_names)]
data['cleaned'] = data['cleaned'].str.replace('|'.join(map(re.escape, user_names)), '')
print( "Removed user names.")
```
# Save or Load preprocessed data
Save your data afte preprocessing, or load preprocessed data from disc.
```
if new_embeddings:
data.to_pickle(output_path+'data_preprocessed'+'.pkl')
else:
data = pd.read_pickle(output_path+'data_preprocessed'+'.pkl')
data.head()
```
# Produce Text Embeddings with Universal Sentence Encoder
## Load Model
Load the model from TF-hub
```
hub_url = url_dict[model_type]
if new_embeddings:
print("Loading model. This will take some time...")
embed = hub.load(hub_url)
```
## Embed Documents
Produce embeddings of your documents.
```
if new_embeddings:
for k,g in data.groupby(np.arange(len(data))//200):
if k == 0:
embeddings = embed(g['cleaned'])
else:
embeddings_new = embed(g['cleaned'])
embeddings = tf.concat(values=[embeddings,embeddings_new],axis = 0)
print(k , end =" ")
print("The embeddings vector is of fixed length {}".format(embeddings.shape[1]))
np.save(output_path+'/embeddings'+model_type+'.npy', embeddings, allow_pickle=True, fix_imports=True)
else:
embeddings = np.load(output_path+'/embeddings'+model_type+'.npy', mmap_mode=None, allow_pickle=False, fix_imports=True, encoding='ASCII')
embeddings.shape
```
## Calculate Similarity Matrix with angular distance
'Following Cer et al. (2018), we first compute
the sentence embeddings u, v for an STS sentence
pair, and then score the sentence pair similarity
based on the angular distance between the two
embedding vectors d = − arccos (uv/||u|| ||v||).'
```
from sklearn.metrics.pairwise import cosine_similarity
def cos_sim(input_vectors):
similarity = cosine_similarity(input_vectors)
return similarity
cosine_similarity_matrix = cos_sim(np.array(embeddings))
print(cosine_similarity_matrix)
```
# Plots Similarity
Plot and print a heat map showing the semantic contextual similarity between comments.
```
import seaborn as sns
def plot_similarity(labels, features, rotation):
corr = np.inner(features, features)
sns.set(font_scale=1.2)
g = sns.heatmap(
corr,
xticklabels=labels,
yticklabels=labels,
vmin=0,
vmax=1,
cmap="YlOrRd")
g.set_xticklabels(labels, rotation=rotation)
g.set_title("Semantic Textual Similarity")
num_samples = 5
off_set = 10000
plot_similarity(data.iloc[off_set:off_set+num_samples]['cleaned'], embeddings[off_set:off_set+num_samples], 90)
```
# Show neighbours of a comment
Define which comment to analyze
```
comment_index = 13
comment = data["cleaned"][comment_index]
comment_list = data["cleaned"].tolist()
print(comment)
```
Print similar comments.
```
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
# find the index of sentence in list
index = sentence_list.index(sentence)
# get the corresponding row in similarity matrix
similarity_row = np.array(similarity_matrix[index, :])
# get the indices of top similar
indices = similarity_row.argsort()[-topN:][::-1]
return [sentence_list[i] for i in indices]
for i, value in enumerate(get_top_similar(comment, comment_list, cosine_similarity_matrix, 20)):
print("Top similar comment {}: {}".format(i+1, value))
```
| github_jupyter |
## Using RNNs to add two binary strings ##
if two input binary strings say 010 and 011 are given your network should output the sum = 101
- How do you represent the data
- Defining a simple recurrent network to model the problem in a seq2seq fashion
- Train it on binary strings of a fixed length
- Test the network using binary strings of different lengths
```
# coding: utf-8
# =============================================================================
# Make a simple RNN learn binray addition
# ============================================================================
# author mineshmathew.github.io
# ==============================================================================
from __future__ import print_function
import numpy as np
from time import sleep
import random
import sys
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
random.seed( 10 )
```
## Preparing the input data ##
### Radom binary strings of required length as training data ###
- The function <i>getSample()</i> takes a string-length as input and then returns the input vector and target vector that need to be fed to the RNN
- Say if your string-length is 2, lower and upper bounds would be 2 and 3.
- Then if the two random numbers picked from this range are 2 and 3 ( you have only 2 and 3 in that range :) )
- your inputs in binary would be 10 and 11 and your sum is 5 which is 101.
- <b> Padding :</b>Since your ouput is one bit longer we will rewrite the inputs too in 3 bit form so 010 + 011 -- > 101
### Training data as input sequene and target sequence pairs ###
Starting from the least significant bit (since the addition starts from LSB) we concatenate the correspodning bits in each input binary string and that forms our input sequence.
And your target vector would be the ourput binary string reversed (Since you start from LSB)
Hence your input at one timestep is this ordered pair of bits for that particular position and target for that timestep would be the corresponding bit in the output string
so your input dimension at each time step is 2 and target dimesnion is 1
in the above case so your input and target pairs would be
[1 0] - > 1 <br>
[1 1] -> 0 <br>
[0 0] -> 0

```
def getSample(stringLength, testFlag):
#takes stringlength as input
#returns a sample for the network - an input sequence - x and its target -y
#x is a T*2 array, T is the length of the string and 2 since we take one bit each from each string
#testFlag if set prints the input numbers and its sum in both decimal and binary form
lowerBound=pow(2,stringLength-1)+1
upperBound=pow(2,stringLength)
num1=random.randint(lowerBound,upperBound)
num2=random.randint(lowerBound,upperBound)
num3=num1+num2
num3Binary=(bin(num3)[2:])
num1Binary=(bin(num1)[2:])
num2Binary=(bin(num2)[2:])
if testFlag==1:
print('input numbers and their sum are', num1, ' ', num2, ' ', num3)
print ('binary strings are', num1Binary, ' ' , num2Binary, ' ' , num3Binary)
len_num1= (len(num1Binary))
len_num2= (len(num2Binary))
len_num3= (len(num3Binary))
# since num3 will be the largest, we pad other numbers with zeros to that num3_len
num1Binary= ('0'*(len(num3Binary)-len(num1Binary))+num1Binary)
num2Binary= ('0'*(len(num3Binary)-len(num2Binary))+num2Binary)
# forming the input sequence
# the input at first timestep is the least significant bits of the two input binary strings
# x will be then a len_num3 ( or T ) * 2 array
x=np.zeros((len_num3,2),dtype=np.float32)
for i in range(0, len_num3):
x[i,0]=num1Binary[len_num3-1-i] # note that MSB of the binray string should be the last input along the time axis
x[i,1]=num2Binary[len_num3-1-i]
# target vector is the sum in binary
# convert binary string in <string> to a numpy 1D array
#https://stackoverflow.com/questions/29091869/convert-bitstring-string-of-1-and-0s-to-numpy-array
y=np.array(map(int, num3Binary[::-1]))
#print (x)
#print (y)
return x,y
```
## How does the network look like ? ##
The figure below shows fully rolled network for our task for the input - target pair we took as an example earlier.
In the figure, for ease of drawing, hiddenDIm is chosen as 2

```
class Adder (nn.Module):
def __init__(self, inputDim, hiddenDim, outputDim):
super(Adder, self).__init__()
self.inputDim=inputDim
self.hiddenDim=hiddenDim
self.outputDim=outputDim
self.lstm=nn.RNN(inputDim, hiddenDim )
self.outputLayer=nn.Linear(hiddenDim, outputDim)
self.sigmoid=nn.Sigmoid()
def forward(self, x ):
#size of x is T x B x featDim
#B=1 is dummy batch dimension added, because pytorch mandates it
#if you want B as first dimension of x then specift batchFirst=True when LSTM is initalized
#T,D = x.size(0), x.size(1)
#batch is a must
lstmOut,_ =self.lstm(x ) #x has two dimensions seqLen *batch* FeatDim=2
T,B,D = lstmOut.size(0),lstmOut.size(1) , lstmOut.size(2)
lstmOut = lstmOut.contiguous()
# before feeding to linear layer we squash one dimension
lstmOut = lstmOut.view(B*T, D)
outputLayerActivations=self.outputLayer(lstmOut)
#reshape actiavtions to T*B*outputlayersize
outputLayerActivations=outputLayerActivations.view(T,B,-1).squeeze(1)
outputSigmoid=self.sigmoid(outputLayerActivations)
return outputSigmoid
```
### traning the network ###
- batch learning is not used, only one seqeuence is fed at a time
- runs purely on a cpu
- MSE loss is used
```
featDim=2 # two bits each from each of the String
outputDim=1 # one output node which would output a zero or 1
lstmSize=10
lossFunction = nn.MSELoss()
model =Adder(featDim, lstmSize, outputDim)
print ('model initialized')
#optimizer = optim.SGD(model.parameters(), lr=3e-2, momentum=0.8)
optimizer=optim.Adam(model.parameters(),lr=0.001)
epochs=500
### epochs ##
totalLoss= float("inf")
while totalLoss > 1e-5:
print(" Avg. Loss for last 500 samples = %lf"%(totalLoss))
totalLoss=0
for i in range(0,epochs): # average the loss over 200 samples
stringLen=4
testFlag=0
x,y=getSample(stringLen, testFlag)
model.zero_grad()
x_var=autograd.Variable(torch.from_numpy(x).unsqueeze(1).float()) #convert to torch tensor and variable
# unsqueeze() is used to add the extra dimension since
# your input need to be of t*batchsize*featDim; you cant do away with the batch in pytorch
seqLen=x_var.size(0)
#print (x_var)
x_var= x_var.contiguous()
y_var=autograd.Variable(torch.from_numpy(y).float())
finalScores = model(x_var)
#finalScores=finalScores.
loss=lossFunction(finalScores,y_var)
totalLoss+=loss.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
totalLoss=totalLoss/epochs
```
### Testing the model ###
Remember that the network was purely trained on strings of length =3 <br>
now lets the net on bitstrings of length=4
```
stringLen=5
testFlag=1
# test the network on 10 random binary string addition cases where stringLen=4
for i in range (0,10):
x,y=getSample(stringLen,testFlag)
x_var=autograd.Variable(torch.from_numpy(x).unsqueeze(1).float())
y_var=autograd.Variable(torch.from_numpy(y).float())
seqLen=x_var.size(0)
x_var= x_var.contiguous()
finalScores = model(x_var).data.t()
#print(finalScores)
bits=finalScores.gt(0.5)
bits=bits[0].numpy()
print ('sum predicted by RNN is ',bits[::-1])
print('##################################################')
```
### Things to try out
- See that increasing the hidden size to say 100 worsens the performance
- Change the model slightly to use NLL loss or cross entropy loss (you may want to add two output nodes in this case; one for 1 and one for 0.)
| github_jupyter |
<a href="https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/14_legends.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
Uncomment the following line to install [geemap](https://geemap.org) if needed.
```
# !pip install geemap
import ee
import geemap
geemap.show_youtube('NwnW_qOkNRw')
```
## Add builtin legends from geemap Python package
https://github.com/giswqs/geemap/blob/master/geemap/legends.py
### Available builtin legends:
```
legends = geemap.builtin_legends
for legend in legends:
print(legend)
```
### Available Land Cover Datasets in Earth Engine
https://developers.google.com/earth-engine/datasets/tags/landcover
### National Land Cover Database (NLCD)
https://developers.google.com/earth-engine/datasets/catalog/USGS_NLCD
```
Map = geemap.Map()
Map.add_basemap('HYBRID')
landcover = ee.Image('USGS/NLCD/NLCD2016').select('landcover')
Map.addLayer(landcover, {}, 'NLCD Land Cover')
Map.add_legend(builtin_legend='NLCD')
Map
```
### National Wetlands Inventory (NWI)
https://www.fws.gov/wetlands/data/mapper.html
```
Map = geemap.Map()
Map.add_basemap('HYBRID')
Map.add_basemap('FWS NWI Wetlands')
Map.add_legend(builtin_legend='NWI')
Map
```
### MODIS Land Cover Type Yearly Global 500m
https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1
```
Map = geemap.Map()
Map.add_basemap('HYBRID')
landcover = ee.Image('MODIS/051/MCD12Q1/2013_01_01').select('Land_Cover_Type_1')
Map.setCenter(6.746, 46.529, 2)
Map.addLayer(landcover, {}, 'MODIS Land Cover')
Map.add_legend(builtin_legend='MODIS/051/MCD12Q1')
Map
```
## Add customized legends for Earth Engine data
There are three ways you can add customized legends for Earth Engine data
1. Define legend keys and colors
2. Define legend dictionary
3. Convert Earth Engine class table to legend dictionary
### Define legend keys and colors
```
Map = geemap.Map()
legend_keys = ['One', 'Two', 'Three', 'Four', 'ect']
# colorS can be defined using either hex code or RGB (0-255, 0-255, 0-255)
legend_colors = ['#8DD3C7', '#FFFFB3', '#BEBADA', '#FB8072', '#80B1D3']
# legend_colors = [(255, 0, 0), (127, 255, 0), (127, 18, 25), (36, 70, 180), (96, 68 123)]
Map.add_legend(
legend_keys=legend_keys, legend_colors=legend_colors, position='bottomleft'
)
Map
```
### Define a legend dictionary
```
Map = geemap.Map()
legend_dict = {
'11 Open Water': '466b9f',
'12 Perennial Ice/Snow': 'd1def8',
'21 Developed, Open Space': 'dec5c5',
'22 Developed, Low Intensity': 'd99282',
'23 Developed, Medium Intensity': 'eb0000',
'24 Developed High Intensity': 'ab0000',
'31 Barren Land (Rock/Sand/Clay)': 'b3ac9f',
'41 Deciduous Forest': '68ab5f',
'42 Evergreen Forest': '1c5f2c',
'43 Mixed Forest': 'b5c58f',
'51 Dwarf Scrub': 'af963c',
'52 Shrub/Scrub': 'ccb879',
'71 Grassland/Herbaceous': 'dfdfc2',
'72 Sedge/Herbaceous': 'd1d182',
'73 Lichens': 'a3cc51',
'74 Moss': '82ba9e',
'81 Pasture/Hay': 'dcd939',
'82 Cultivated Crops': 'ab6c28',
'90 Woody Wetlands': 'b8d9eb',
'95 Emergent Herbaceous Wetlands': '6c9fb8',
}
landcover = ee.Image('USGS/NLCD/NLCD2016').select('landcover')
Map.addLayer(landcover, {}, 'NLCD Land Cover')
Map.add_legend(legend_title="NLCD Land Cover Classification", legend_dict=legend_dict)
Map
```
### Convert an Earth Engine class table to legend
For example: MCD12Q1.051 Land Cover Type Yearly Global 500m
https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1
```
Map = geemap.Map()
ee_class_table = """
Value Color Description
0 1c0dff Water
1 05450a Evergreen needleleaf forest
2 086a10 Evergreen broadleaf forest
3 54a708 Deciduous needleleaf forest
4 78d203 Deciduous broadleaf forest
5 009900 Mixed forest
6 c6b044 Closed shrublands
7 dcd159 Open shrublands
8 dade48 Woody savannas
9 fbff13 Savannas
10 b6ff05 Grasslands
11 27ff87 Permanent wetlands
12 c24f44 Croplands
13 a5a5a5 Urban and built-up
14 ff6d4c Cropland/natural vegetation mosaic
15 69fff8 Snow and ice
16 f9ffa4 Barren or sparsely vegetated
254 ffffff Unclassified
"""
landcover = ee.Image('MODIS/051/MCD12Q1/2013_01_01').select('Land_Cover_Type_1')
Map.setCenter(6.746, 46.529, 2)
Map.addLayer(landcover, {}, 'MODIS Land Cover')
legend_dict = geemap.legend_from_ee(ee_class_table)
Map.add_legend(legend_title="MODIS Global Land Cover", legend_dict=legend_dict)
Map
```
| github_jupyter |
# Overfitting Figure Generation
We're going to generate `n_points` points distributed along a line, remembering that the formula for a line is $y = mx+b$. Modified (slightly) from [here](https://stackoverflow.com/a/35730618/8068638).
```
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
n_points = 12
m = 1
b = 0
training_delta = 1.0
test_points_offset = 0.5
test_points_jitter = 0.1
test_delta = 1.0
np.random.seed(3)
```
Now, we need to generate the testing and training "data"
```
points_x = np.arange(n_points)
training_delta = np.random.uniform(-training_delta, training_delta, size=(n_points))
training_points_y = m*points_x + b + training_delta
testing_points_x = points_x + np.random.uniform(-test_points_jitter, test_points_jitter, size=(n_points)) + test_points_offset
testing_delta = np.random.uniform(-test_delta, test_delta, size=(n_points))
testing_points_y = m*testing_points_x + b + testing_delta
```
We'll overfit by generating a $n$-dimensional polynomial
```
overfitted = np.poly1d(np.polyfit(points_x, training_points_y, n_points - 1))
x_space = np.linspace(-(n_points/5), 2*n_points+(n_points/5), n_points*100)
overfitted_x_space = np.linspace(-(n_points/5), 2*n_points+(n_points/5), n_points*100)
y_overfitted = overfitted(x_space)
```
## Plot it
Colors chosen from [Wong, B. (2011). Points of view: Color blindness. *Nature Methods, 8*(6), 441–441. doi:10.1038/nmeth.1618](doi.org/10.1038/nmeth.1618). I had to do some magic to make the arrays colors play nicely with matplotlib
```
def rgb_to_np_rgb(r, g, b):
return (r / 255, g / 255, b / 255)
orange = rgb_to_np_rgb(230, 159, 0)
blueish_green = rgb_to_np_rgb(0, 158, 115)
vermillion = rgb_to_np_rgb(213, 94, 0)
blue = rgb_to_np_rgb(0, 114, 178)
# configure the plot
plt.rcParams["figure.figsize"] = (12.8 * 0.75, 9.6 * 0.75)
plt.rcParams['svg.fonttype'] = 'path'
plt.rcParams['axes.spines.left'] = True
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.bottom'] = True
plt.rcParams["xtick.labelbottom"] = False
plt.rcParams["xtick.bottom"] = False
plt.rcParams["ytick.left"] = False
plt.rcParams["ytick.labelleft"] = False
plt.xkcd() # for fun (see https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003858#s12)
# plot the data
plt.scatter(points_x, training_points_y, zorder=3,label="Training data", s=100, c=[blue])
plt.scatter(testing_points_x, testing_points_y, zorder=3,label="Test data", s=100, c=[vermillion])
plt.plot(x_space, m*x_space + b, zorder=2, label="Properly fit model", c=blueish_green)
plt.plot(x_space, y_overfitted, zorder=1, label="Overfit model", c=orange)
plt.xlim(-(n_points/5) - 1, max(testing_points_x) + 1)
plt.ylim(-(n_points/5) - 1, max(testing_points_y)+(n_points/5) + 1)
# plt.rcParams["figure.figsize"] = [6.4*2, 4.8*2]
plt.legend(loc=2)
plt.savefig('overfitting.svg', bbox_inches='tight')
plt.savefig('overfitting.png', dpi=150, bbox_inches='tight')
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#EDA-and-pre-processing" data-toc-modified-id="EDA-and-pre-processing-1"><span class="toc-item-num">1 </span>EDA and pre-processing</a></span><ul class="toc-item"><li><span><a href="#Descriptive-statistics-(data-shape,-balance,-etc)" data-toc-modified-id="Descriptive-statistics-(data-shape,-balance,-etc)-1.1"><span class="toc-item-num">1.1 </span>Descriptive statistics (data shape, balance, etc)</a></span></li><li><span><a href="#Data-pre-processing" data-toc-modified-id="Data-pre-processing-1.2"><span class="toc-item-num">1.2 </span>Data pre-processing</a></span></li></ul></li><li><span><a href="#ML-template-starts---training-session" data-toc-modified-id="ML-template-starts---training-session-2"><span class="toc-item-num">2 </span>ML template starts - training session</a></span><ul class="toc-item"><li><span><a href="#Training-model-(LGBM)-with-stratisfied-CV" data-toc-modified-id="Training-model-(LGBM)-with-stratisfied-CV-2.1"><span class="toc-item-num">2.1 </span>Training model (LGBM) with stratisfied CV</a></span></li></ul></li><li><span><a href="#Model-evaluation" data-toc-modified-id="Model-evaluation-3"><span class="toc-item-num">3 </span>Model evaluation</a></span><ul class="toc-item"><li><span><a href="#Plot-of-the-CV-folds---F1-macro-and-F1-for-the-positive-class" data-toc-modified-id="Plot-of-the-CV-folds---F1-macro-and-F1-for-the-positive-class-3.1"><span class="toc-item-num">3.1 </span>Plot of the CV folds - F1 macro and F1 for the positive class</a></span></li><li><span><a href="#Scikit-learn---Classification-report" data-toc-modified-id="Scikit-learn---Classification-report-3.2"><span class="toc-item-num">3.2 </span>Scikit learn - Classification report</a></span></li><li><span><a href="#ROC-curve-with-AUC" data-toc-modified-id="ROC-curve-with-AUC-3.3"><span class="toc-item-num">3.3 </span>ROC curve with AUC</a></span></li><li><span><a href="#Confusion-Matrix-plot-(normalized-and-with-absolute-values)" data-toc-modified-id="Confusion-Matrix-plot-(normalized-and-with-absolute-values)-3.4"><span class="toc-item-num">3.4 </span>Confusion Matrix plot (normalized and with absolute values)</a></span></li><li><span><a href="#Feature-Importance-plot" data-toc-modified-id="Feature-Importance-plot-3.5"><span class="toc-item-num">3.5 </span>Feature Importance plot</a></span></li><li><span><a href="#Correlations-analysis-(on-top-features)" data-toc-modified-id="Correlations-analysis-(on-top-features)-3.6"><span class="toc-item-num">3.6 </span>Correlations analysis (on top features)</a></span></li><li><span><a href="#Anomaly-detection-on-the-training-set-(on-top-features-alone)" data-toc-modified-id="Anomaly-detection-on-the-training-set-(on-top-features-alone)-3.7"><span class="toc-item-num">3.7 </span>Anomaly detection on the training set (on top features alone)</a></span></li><li><span><a href="#Data-leakage-test" data-toc-modified-id="Data-leakage-test-3.8"><span class="toc-item-num">3.8 </span>Data leakage test</a></span></li>
<li><span><a href="##-Analysis-of-FPs/FNs" data-toc-modified-id="##-Analysis-of-FPs/FNs"><span class="toc-item-num">3.9 </span>Analysis of FPs/FNs</a></span></li></ul></li></ul></div>
```
import warnings
import pandas as pd
import numpy as np
from pandas_summary import DataFrameSummary
import octopus_ml as oc
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import seaborn as sns
import re
import optuna
pd.set_option('display.max_columns', None) # or 1000
pd.set_option('display.max_rows', None) # or 1000
pd.set_option('display.max_colwidth', -1) # or 199
%matplotlib inline
warnings.simplefilter("ignore")
```
### Read the Kaggle Titanic competition dataset
https://www.kaggle.com/c/titanic
```
pwd
XY_df=pd.read_csv('../../datasets/Kaggle_titanic_train.csv')
test_df=pd.read_csv('../../datasets/Kaggle_titanic_test.csv')
```
# EDA and pre-processing
## Descriptive statistics (data shape, balance, etc)
```
XY_df.shape
XY_df.head(5)
```
### Target distribution
```
XY_df['Survived'].value_counts()
oc.target_pie(XY_df,'Survived')
XY_df.shape
def convert_to_categorical(df):
categorical_features = []
for c in df.columns:
col_type = df[c].dtype
if col_type == "object" or col_type.name == "category":
# an option in case the data(pandas dataframe) isn't passed with the categorical column type
df[c] = df[c].astype('category')
categorical_features.append(c)
return df, categorical_features
def lgbm_fast(X_train, y_train, num, params=None):
# Training function for LGBM with basic categorical features treatment and close to default params
X_train, categorical_features=convert_to_categorical(X_train)
lgb_train = lgb.Dataset(X_train, y_train, categorical_feature=categorical_features)
if params == None:
params = {
"objective": "binary",
"boosting": "gbdt",
"scale_pos_weight": 0.02,
"learning_rate": 0.005,
"seed": 100,
"verbose":-1
# 'categorical_feature': 'auto',
# 'metric': 'auc',
# 'scale_pos_weight':0.1,
# 'learning_rate': 0.02,
# 'num_boost_round':2000,
# "min_sum_hessian_in_leaf":1,
# 'max_depth' : 100,
# "num_leaves":31,
# "bagging_fraction" : 0.4,
# "feature_fraction" : 0.05,
}
clf = lgb.train(
params, lgb_train, num_boost_round=num
)
return clf
```
## Dataset comparisons
```
features=XY_df.columns.to_list()
print ('number of features ', len(features))
features_remove=['PassengerId','Survived']
for f in features_remove:
features.remove(f)
def dataset_comparison(df1,df2, top=3):
print ('Datasets shapes:\n df1: '+str(df1.shape)+'\n df2: '+str(df2.shape))
df1['label']=0
df2['label']=1
df=pd.concat([df1,df2])
print (df.shape)
clf=lgbm_fast(df,df['label'], 100, params=None)
oc.plot_imp( clf, df, title="Datasets differences", model="lgbm", num=12, importaince_type="split", save_path=None)
return df
df=dataset_comparison(XY_df[features],test_df)
import lightgbm as lgb
def dataset_comparison(df1,df2, top=3):
print ('Datasets shapes:\n df1: '+str(df1.shape)+'\n df2: '+str(df2.shape))
df1['label']=0
df2['label']=1
df=pd.concat([df1,df2])
print (df.shape)
clf=lgbm_fast(df,df['label'], 100, params=None)
feature_imp_list=oc.plot_imp( clf, df, title="Datasets differences", model="lgbm", num=10, importaince_type="gain", save_path=None)
oc.target_corr(df,df['label'],feature_imp_list)
return df
df=dataset_comparison(XY_df[features],test_df)
df[1700:1800]
```
### Selected features vs target historgrams
```
oc.hist_target(XY_df, 'Sex', 'Survived')
oc.hist_target(XY_df, 'Fare', 'Survived')
```
### Data summary - and missing values analysis
```
import missingno as msno
from pandas_summary import DataFrameSummary
dfs = DataFrameSummary(XY_df)
dfs.summary()
# Top 5 sparse features, mainly labs results
pd.Series(1 - XY_df.count() / len(XY_df)).sort_values(ascending=False).head(5)
```
## Data pre-processing
```
XY_df['Cabin'] = XY_df['Cabin'].astype('str').fillna("U0")
deck = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "U": 8}
XY_df['Deck'] = XY_df['Cabin'].map(lambda x: re.compile("([a-zA-Z]+)").search(x).group())
XY_df['Deck'] = XY_df['Deck'].map(deck)
XY_df['Deck'] = XY_df['Deck'].fillna(0)
XY_df['Deck'] = XY_df['Deck'].astype('category')
XY_df['relatives'] = XY_df['SibSp'] + XY_df['Parch']
XY_df.loc[XY_df['relatives'] > 0, 'not_alone'] = 0
XY_df.loc[XY_df['relatives'] == 0, 'not_alone'] = 1
XY_df['not_alone'] = XY_df['not_alone'].astype(int)
def encodeAgeFare(train):
train.loc[train['Age'] <= 16, 'Age_fare'] = 0
train.loc[(train['Age'] > 16) & (train['Age'] <= 32), 'Age_fare'] = 1
train.loc[(train['Age'] > 32) & (train['Age'] <= 48), 'Age_fare'] = 2
train.loc[(train['Age'] > 48) & (train['Age'] <= 64), 'Age_fare'] = 3
train.loc[ (train['Age'] > 48) & (train['Age'] <= 80), 'Age_fare'] = 4
train.loc[train['Fare'] <= 7.91, 'Fare'] = 0
train.loc[(train['Fare'] > 7.91) & (train['Fare'] <= 14.454), 'Fare_adj'] = 1
train.loc[(train['Fare'] > 14.454) & (train['Fare'] <= 31.0), 'Fare_adj'] = 2
train.loc[(train['Fare'] > 31.0) & (train['Fare'] <= 512.329), 'Fare_adj'] = 3
encodeAgeFare(XY_df)
# Categorical features pre-proccesing
cat_list ,XY_df=oc.cat_features_proccessing(XY_df)
print (cat_list)
features=XY_df.columns.to_list()
print ('number of features ', len(features))
features_remove=['PassengerId','Survived']
for f in features_remove:
features.remove(f)
X=XY_df[features]
y=XY_df['Survived']
from IPython.display import Image
Image("../images/octopus_know_your_data.PNG", width=600, height=600)
XY_sampled=oc.sampling(XY_df,'Survived',200)
```
# ML template starts - training session
## Training model (LGBM) with stratisfied CV
```
def create(hyperparams):
"""Create LGBM Classifier for a given set of hyper-parameters."""
model = LGBMClassifier(**hyperparams)
return model
def kfold_evaluation(X, y, k, hyperparams, esr=50):
scores = []
kf = KFold(k)
for i, (train_idx, test_idx) in enumerate(kf.split(X)):
X_train = X.iloc[train_idx]
y_train = y.iloc[train_idx]
X_val = X.iloc[test_idx]
y_val = y.iloc[test_idx]
model = create(hyperparams)
model = fit_with_stop(model, X_train, y_train, X_val, y_val, esr)
train_score = evaluate(model, X_train, y_train)
val_score = evaluate(model, X_val, y_val)
scores.append((train_score, val_score))
scores = pd.DataFrame(scores, columns=['train score', 'validation score'])
return scores
# Constant
K = 5
# Objective function
def objective(trial):
# Search spaces
hyperparams = {
'reg_alpha': trial.suggest_float('reg_alpha', 0.001, 10.0),
'reg_lambda': trial.suggest_float('reg_lambda', 0.001, 10.0),
'num_leaves': trial.suggest_int('num_leaves', 5, 1000),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
'max_depth': trial.suggest_int('max_depth', 5, 64),
'colsample_bytree': trial.suggest_float('colsample_bytree', 0.1, 0.5),
'cat_smooth' : trial.suggest_int('cat_smooth', 10, 100),
'cat_l2': trial.suggest_int('cat_l2', 1, 20),
'min_data_per_group': trial.suggest_int('min_data_per_group', 50, 200)
}
hyperparams.update(best_params)
scores = kfold_evaluation(X, y, K, hyperparams, 10)
return scores['validation score'].mean()
def create(hyperparams):
model = LGBMClassifier(**hyperparams)
return model
def fit(model, X, y):
model.fit(X, y,verbose=-1)
return model
def fit_with_stop(model, X, y, X_val, y_val, esr):
#model.fit(X, y,
# eval_set=(X_val, y_val),
# early_stopping_rounds=esr,
# verbose=-1)
model.fit(X, y,
eval_set=(X_val, y_val),
verbose=-1)
return model
def evaluate(model, X, y):
yp = model.predict_proba(X)[:, 1]
auc_score = roc_auc_score(y, yp)
return auc_score
```
## Hyper Parameter Optimization
```
best_params = {
'n_estimators': 1000,
'learning_rate': 0.05,
'metric': 'auc',
'verbose': -1
}
from lightgbm import LGBMClassifier
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=50)
study.best_value
best_params.update(study.best_params)
best_params
#plot_param_importances(study)
#plot_optimization_history(study)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.1,
'n_estimators': 500,
'verbose': -1,
'max_depth': -1,
'seed':100,
'min_split_gain': 0.01,
'num_leaves': 18,
'reg_alpha': 0.01,
'reg_lambda': 1.50,
'feature_fraction':0.2,
'bagging_fraction':0.84
}
metrics= oc.cv_adv(X,y,0.5,1000,shuffle=True,params=best_params)
```
# Model evaluation
### Plot of the CV folds - F1 macro and F1 for the positive class
(in this case it's an unbalanced dataset)
```
oc.cv_plot(metrics['f1_weighted'],metrics['f1_macro'],metrics['f1_positive'],'Titanic Kaggle competition')
```
## Scikit learn - Classification report
```
print(classification_report(metrics['y'], metrics['predictions_folds']))
```
## ROC curve with AUC
```
oc.roc_curve_plot(metrics['y'], metrics['predictions_proba'])
```
## Confusion Matrix plot (normalized and with absolute values)
```
oc.confusion_matrix_plot(metrics['y'], metrics['predictions_folds'])
```
## Feature Importance plot
```
feature_imp_list=oc.plot_imp(metrics['final_clf'],X,'LightGBM Mortality Kaggle',num=15)
top_features=feature_imp_list.sort_values(by='Value', ascending=False).head(20)
top_features
```
## Correlations analysis (on top features)
```
list_for_correlations=top_features['Feature'].to_list()
list_for_correlations.append('Survived')
oc.correlations(XY_df,list_for_correlations)
```
## Data leakage test
```
oc.data_leakage(X,top_features['Feature'].to_list())
```
## Analysis of FPs/FNs
```
fps=oc.recieve_fps(XY_df, metrics['index'] ,metrics['y'], metrics['predictions_proba'],top=10)
fns=oc.recieve_fns(XY_df, metrics['index'] ,metrics['y'], metrics['predictions_proba'],top=10)
fps
fns
filter_fps = XY_df[XY_df.index.isin(fps['index'])]
filter_fns = XY_df[XY_df.index.isin(fns['index'])]
filter_fps_with_prediction=pd.merge(filter_fps,fps[['index','preds_proba']], left_on=[pd.Series(filter_fps.index.values)], right_on=fps['index'])
filter_fns_with_prediction=pd.merge(filter_fns,fns[['index','preds_proba']], left_on=[pd.Series(filter_fns.index.values)], right_on=fns['index'])
```
### Top FPs with full features
```
filter_fps_with_prediction
```
### Top FNs with full features
```
filter_fns_with_prediction
```
| github_jupyter |
```
import sys
sys.path.append('../scripts/')
from puddle_world import *
import itertools
import collections
class PolicyEvaluator:
def __init__(self, widths, goal, puddles, time_interval, sampling_num, \
puddle_coef=100.0, lowerleft=np.array([-4, -4]).T, upperright=np.array([4, 4]).T): #puddle_coef追加
self.pose_min = np.r_[lowerleft, 0]
self.pose_max = np.r_[upperright, math.pi*2]
self.widths = widths
self.goal = goal
self.index_nums = ((self.pose_max - self.pose_min)/self.widths).astype(int)
nx, ny, nt = self.index_nums
self.indexes = list(itertools.product(range(nx), range(ny), range(nt)))
self.value_function, self.final_state_flags = self.init_value_function()
self.policy = self.init_policy()
self.actions = list(set([tuple(self.policy[i]) for i in self.indexes]))
self.state_transition_probs = self.init_state_transition_probs(time_interval, sampling_num)
self.depths = self.depth_means(puddles, sampling_num)
self.time_interval = time_interval #追加
self.puddle_coef = puddle_coef
def policy_evaluation_sweep(self): #追加
for index in self.indexes:
if not self.final_state_flags[index]:
self.value_function[index] = self.action_value(tuple(self.policy[index]), index) #actionはタプルに直してから与える
def action_value(self, action, index): #追加
value = 0.0
for delta, prob in self.state_transition_probs[(action, index[2])]: #index[2]: 方角のインデックス
after = tuple(self.out_correction(np.array(index).T + delta) ) #indexに差分deltaを足してはみ出し処理の後にタプルにする
reward = - self.time_interval * self.depths[(after[0], after[1])] * self.puddle_coef - self.time_interval
value += (self.value_function[after] + reward) * prob
return value
def out_correction(self, index): #追加
index[2] = (index[2] + self.index_nums[2])%self.index_nums[2] #方角の処理
return index
def depth_means(self, puddles, sampling_num):
###セルの中の座標を均等にsampling_num**2点サンプリング###
dx = np.linspace(0, self.widths[0], sampling_num)
dy = np.linspace(0, self.widths[1], sampling_num)
samples = list(itertools.product(dx, dy))
tmp = np.zeros(self.index_nums[0:2]) #深さの合計が計算されて入る
for xy in itertools.product(range(self.index_nums[0]), range(self.index_nums[1])):
for s in samples:
pose = self.pose_min + self.widths*np.array([xy[0], xy[1], 0]).T + np.array([s[0], s[1], 0]).T #セルの中心の座標
for p in puddles:
tmp[xy] += p.depth*p.inside(pose) #深さに水たまりの中か否か(1 or 0)をかけて足す
tmp[xy] /= sampling_num**2 #深さの合計から平均値に変換
return tmp
def init_state_transition_probs(self, time_interval, sampling_num):
###セルの中の座標を均等にsampling_num**3点サンプリング###
dx = np.linspace(0.001, self.widths[0]*0.999, sampling_num) #隣のセルにはみ出さないように端を避ける
dy = np.linspace(0.001, self.widths[1]*0.999, sampling_num)
dt = np.linspace(0.001, self.widths[2]*0.999, sampling_num)
samples = list(itertools.product(dx, dy, dt))
###各行動、各方角でサンプリングした点を移動してインデックスの増分を記録###
tmp = {}
for a in self.actions:
for i_t in range(self.index_nums[2]):
transitions = []
for s in samples:
before = np.array([s[0], s[1], s[2] + i_t*self.widths[2]]).T + self.pose_min #遷移前の姿勢
before_index = np.array([0, 0, i_t]).T #遷移前のインデックス
after = IdealRobot.state_transition(a[0], a[1], time_interval, before) #遷移後の姿勢
after_index = np.floor((after - self.pose_min)/self.widths).astype(int) #遷移後のインデックス
transitions.append(after_index - before_index) #インデックスの差分を追加
unique, count = np.unique(transitions, axis=0, return_counts=True) #集計(どのセルへの遷移が何回か)
probs = [c/sampling_num**3 for c in count] #サンプル数で割って確率にする
tmp[a,i_t] = list(zip(unique, probs))
return tmp
def init_policy(self):
tmp = np.zeros(np.r_[self.index_nums,2]) #制御出力が2次元なので、配列の次元を4次元に
for index in self.indexes:
center = self.pose_min + self.widths*(np.array(index).T + 0.5) #セルの中心の座標
tmp[index] = PuddleIgnoreAgent.policy(center, self.goal)
return tmp
def init_value_function(self):
v = np.empty(self.index_nums) #全離散状態を要素に持つ配列を作成
f = np.zeros(self.index_nums)
for index in self.indexes:
f[index] = self.final_state(np.array(index).T)
v[index] = self.goal.value if f[index] else -100.0
return v, f
def final_state(self, index):
x_min, y_min, _ = self.pose_min + self.widths*index #xy平面で左下の座標
x_max, y_max, _ = self.pose_min + self.widths*(index + 1) #右上の座標(斜め上の離散状態の左下の座標)
corners = [[x_min, y_min, _], [x_min, y_max, _], [x_max, y_min, _], [x_max, y_max, _] ] #4隅の座標
return all([self.goal.inside(np.array(c).T) for c in corners ])
import seaborn as sns ###policyevaluator6create
puddles = [Puddle((-2, 0), (0, 2), 0.1), Puddle((-0.5, -2), (2.5, 1), 0.1)]
pe = PolicyEvaluator(np.array([0.2, 0.2, math.pi/18]).T, Goal(-3,-3), puddles, 0.1, 10)
counter = 0 #スイープの回数
for i in range(10):
pe.policy_evaluation_sweep()
counter += 1
v = pe.value_function[:, :, 18]
sns.heatmap(np.rot90(v), square=False)
plt.show()
print(counter)
```
| github_jupyter |
# CORIOLIX REST API Documentation
## EXAMPLE 1: Query the CORIOLIX REST API - Get a list of all REST endpoints
```
"""Example script to query the CORIOLIX REST API."""
# Key concepts:
# Use the python requests module to query the REST API
# Use the python json module to parse and dump the json response
# Returns:
# Returns a list of all CORIOLIX REST Endpoints
import requests
import json
# Base URL for Datapresence REST API - MODIFY AS NEEDED
rest_url = 'https://coriolix.ceoas.oregonstate.edu/oceanus/api/?format=json'
# Make the query to the REST API
response = requests.get(rest_url, verify=False)
# Load the response as json data
responseJSON = json.loads(response.text)
# Print all the published endpoints
print(json.dumps(responseJSON, indent=4, sort_keys=True))
```
## EXAMPLE 2: Query the CORIOLIX REST API - Get the current sensor observation
```
"""Example script to query the CORIOLIX REST API."""
# Key concepts:
# Use the python requests module to query the REST API
# Use the python json module to parse and dump the json response
# Select a specific endpoint to query.
# Returns:
# Returns a list of all currently valid sensor values
import requests
import json
# URL for Datapresence REST endpoint for the current observations table.
rest_url = 'https://coriolix.ceoas.oregonstate.edu/oceanus/api/cur_obs/?format=json'
#rest_url = 'https://coriolix.ceoas.oregonstate.edu/oceanus/api/decimateData/?model=TsgFlth&date_0=2019-10-10%2002:06:55.353%2B00&decfactr=1&format=json'
# Make the query to the REST API
response = requests.get(rest_url, verify=False)
# Load the response as json data
responseJSON = json.loads(response.text)
# Print all current observations
print(json.dumps(responseJSON, indent=4, sort_keys=True))
## EXAMPLE 3: Query the CORIOLIX REST API - Get the Thermosalinograph data for a user specified window of time
"""Example script to query the CORIOLIX REST API."""
# Key concepts:
# Use the python requests module to query the REST API
# Use the python json module to parse and dump the json response
# Select a specific sensor endpoint to query.
# Filter results
# Returns:
# Returns a list of all currently valid sensor values
import requests
import json
# URL for Datapresence REST endpoint for the current thermosalinograph table.
base_url = 'https://coriolix.ceoas.oregonstate.edu/oceanus/api/decimateData/?model=TsgFlth'
# Set the start date and time using the ISO8601 format, data stored in UTC
start_date = '2019-10-08T20:00:00Z'
end_date = '2019-10-08T21:00:00Z'
# build the query string
query_url = base_url+'?date_0='+start_date+'&date_1='+end_date+'&format=json'
query_url = base_url+'&date_0=2019-10-10%2002:06:55.353%2B00&decfactr=1&format=json'
# Make the query to the REST API
response = requests.get(query_url, verify=False)
# Load the response as json data
responseJSON = json.loads(response.text)
# Print all thermosalinograph observations
print(json.dumps(responseJSON, indent=4, sort_keys=True))
```
| github_jupyter |
# Read Cloud Optimized Geotiffs
The following materials are based on [this tutorial](https://geohackweek.github.io/raster/04-workingwithrasters/). Read more from that tutorial until this one get's better updated.
- Let's read a Landsat TIF profile from AWS cloud storage:
```
import rasterio
import matplotlib.pyplot as plt
import numpy as np
# Specify the path for Landsat TIF on AWS
fp = 'http://landsat-pds.s3.amazonaws.com/c1/L8/042/034/LC08_L1TP_042034_20170616_20170629_01_T1/LC08_L1TP_042034_20170616_20170629_01_T1_B4.TIF'
# See the profile
with rasterio.open(fp) as src:
print(src.profile)
```
- Let's plot a low resolution overview:
```
%matplotlib inline
# Open the COG
with rasterio.open(fp) as src:
# List of overviews from biggest to smallest
oviews = src.overviews(1)
# Retrieve the smallest thumbnail
oview = oviews[-1]
print('Decimation factor= {}'.format(oview))
# NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html)
thumbnail = src.read(1, out_shape=(1, int(src.height // oview), int(src.width // oview)))
print('array type: ',type(thumbnail))
print(thumbnail)
plt.imshow(thumbnail)
plt.colorbar()
plt.title('Overview - Band 4 {}'.format(thumbnail.shape))
plt.xlabel('Column #')
plt.ylabel('Row #')
```
- Let's fix the NoData values to be `NaN` instead of 0:
```
# Open the file
with rasterio.open(fp) as src:
# Access the overviews
oviews = src.overviews(1)
oview = oviews[-1]
print('Decimation factor= {}'.format(oview))
# Read the thumbnail
thumbnail = src.read(1, out_shape=(1, int(src.height // oview), int(src.width // oview)))
# Convert the values into float
thumbnail = thumbnail.astype('f4')
# Convert 0 values to NaNs
thumbnail[thumbnail==0] = np.nan
plt.imshow(thumbnail)
plt.colorbar()
plt.title('Overview - Band 4 {}'.format(thumbnail.shape))
plt.xlabel('Column #')
plt.ylabel('Row #')
```
- Let's take a subset from high resolution image:
```
#https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
#rasterio.windows.Window(col_off, row_off, width, height)
window = rasterio.windows.Window(1024, 1024, 1280, 2560)
with rasterio.open(fp) as src:
subset = src.read(1, window=window)
plt.figure(figsize=(6,8.5))
plt.imshow(subset)
plt.colorbar(shrink=0.5)
plt.title(f'Band 4 Subset\n{window}')
plt.xlabel('Column #')
plt.ylabel('Row #')
```
These commands demonstrate the basics how to use COGs to retrieve data from the cloud.
| github_jupyter |
# Economics 101B Spring 2018 Pre-Semester Exercises
### Professor DeLong
## Our Computing Environment, Jupyter notebooks
This webpage is called a Jupyter notebook. A notebook is a place to write programs and view their results.
### Text cells
In a notebook, each rectangle containing text or code is called a *cell*.
Text cells (like this one) can be edited by double-clicking on them. They're written in a simple format called [Markdown](http://daringfireball.net/projects/markdown/syntax) to add formatting and section headings. You don't need to learn Markdown, but you might want to.
After you edit a text cell, click the "run cell" button at the top that looks like ▶| to confirm any changes. (Try not to delete the instructions of the lab.)
**Question 1.1.1.** This paragraph is in its own text cell. Try editing it so that this sentence is the last sentence in the paragraph, and then click the "run cell" ▶| button . This sentence, for example, should be deleted. So should this one.
### Code cells
Other cells contain code in the Python 3 language. Running a code cell will execute all of the code it contains.
To run the code in a code cell, first click on that cell to activate it. It'll be highlighted with a little green or blue rectangle. Next, either press ▶| or hold down the `shift` key and press `return` or `enter`.
Try running this cell:
```
print("Hello, World!")
```
And this one:
```
print("\N{WAVING HAND SIGN}, \N{EARTH GLOBE ASIA-AUSTRALIA}!")
```
The fundamental building block of Python code is an expression. Cells can contain multiple lines with multiple expressions. When you run a cell, the lines of code are executed in the order in which they appear. Every `print` expression prints a line. Run the next cell and notice the order of the output.
```
print("First this line is printed,")
print("and then this one.")
```
**Question 1** Change the cell above so that it prints out:
First this line,
then the whole 🌏,
and then this one.
*Hint:* If you're stuck on the Earth symbol for more than a few minutes, try talking to a neighbor or a TA. That's a good idea for any lab problem.
## Writing Jupyter notebooks
You can use Jupyter notebooks for your own projects or documents. When you make your own notebook, you'll need to create your own cells for text and code.
To add a cell, click the + button in the menu bar. It'll start out as a text cell. You can change it to a code cell by clicking inside it so it's highlighted, clicking the drop-down box next to the restart (⟳) button in the menu bar, and choosing "Code".
**Question 2** Add a code cell below this one. Write code in it that prints out:
A whole new cell! ♪🌏♪
(That musical note symbol is like the Earth symbol. Its long-form name is `\N{EIGHTH NOTE}`.)
Run your cell to verify that it works.
## 1.4. Errors
Python is a language, and like natural human languages, it has rules. It differs from natural language in two important ways:
1. The rules are *simple*. You can learn most of them in a few weeks and gain reasonable proficiency with the language in a semester.
2. The rules are *rigid*. If you're proficient in a natural language, you can understand a non-proficient speaker, glossing over small mistakes. A computer running Python code is not smart enough to do that.
Whenever you write code, you'll make mistakes. When you run a code cell that has errors, Python will sometimes produce error messages to tell you what you did wrong.
Errors are okay; even experienced programmers make many errors. When you make an error, you just have to find the source of the problem, fix it, and move on.
We have made an error in the next cell. Run it and see what happens.
```
print("This line is missing something."
```
You should see something like this (minus our annotations):
<img src="images/error.jpg"/>
The last line of the error output attempts to tell you what went wrong. The *syntax* of a language is its structure, and this `SyntaxError` tells you that you have created an illegal structure. "`EOF`" means "end of file," so the message is saying Python expected you to write something more (in this case, a right parenthesis) before finishing the cell.
There's a lot of terminology in programming languages, but you don't need to know it all in order to program effectively. If you see a cryptic message like this, you can often get by without deciphering it. (Of course, if you're frustrated, ask a neighbor or a TA for help.)
Try to fix the code above so that you can run the cell and see the intended message instead of an error.
## 1.5. Submitting your work
All assignments in the course will be distributed as notebooks like this one, and you will submit your work from the notebook. We will use a system called OK that checks your work and helps you submit. At the top of each assignment, you'll see a cell like the one below that prompts you to identify yourself. Run it and follow the instructions.
```
# Don't change this cell; just run it.
# The result will give you directions about how to log in to the submission system, called OK.
# Once you're logged in, you can run this cell again, but it won't ask you who you are because
# it remembers you. However, you will need to log in once per assignment.
from client.api.notebook import Notebook
ok = Notebook('intro.ok')
_ = ok.auth(force=True, inline=True)
```
When you finish an assignment, you need to submit it by running the submit command below. It's OK to submit multiple times, OK will only try to grade your final submission for each assignment. Don't forget to submit your lab assignment at the end of section, even if you haven't finished everything.
```
_ = ok.submit()
```
Now that you are comfortable with our computing environment, we are going to be moving into more of the fundamentals of Python, but first, run the cell below to ensure all the libraries needed for this notebook are installed.
```
!pip install numpy
!pip install pandas
!pip install sklearn
!pip install seaborn
!pip install matplotlib
!pip install -U okpy
# imports
import numpy as np
import pandas as pd
import seaborn as sns
import ipywidgets as widgets
from ipywidgets import interact
import matplotlib.pyplot as plt
%matplotlib inline
```
Here is how to make an interactive model:
```
# make a function that takes in parameter[s] and graphs
# stick that into interact
```
# Introduction to programming concepts
Welcome to 101B! This introductory notebook will familiarize you with some of the basic strategies for data analysis that will be useful to you throughout the course. Once you have completed setting up Python on your computer using `pip install`, move on to the next cells to begin.
## Part 1: Python basics
Before getting into the more advanced analysis techniques that will be required in this course, we need to brush up on a few of the foundational elements of programming in Python.
### A. Expressions
The departure point for all programming is the concept of the __expression__. An expression is a combination of variables, operators, and other Python elements that the language interprets and acts upon. Expressions act as a set of instructions to be fed through the interpreter, with the goal of generating specific outcomes. See below for some examples of basic expressions.
```
### Examples of expressions:
a = 4
b = 10/5
### The two expressions above do not return anything – they simply store values to the computer.
### An expression that returns an output:
print(a + b)
```
### B. Variables
In the examples above, `a` and `b` are specific Python objects known as __variables__. The first two lines set the variables equal to numerical (one `integer` and one `float`) values, while the final line asks the interpreter to `print` their sum. Variables are stored within the notebook's environment, meaning stored variable values carry over from cell to cell.
```
### Notice that 'a' retains its value.
print(a)
```
### Question 1: Variables
See if you can write a series of expressions that creates two new variables called __x__ and __y__, assigns them values of __10.5__ and __7.2__, then prints their product.
```
### Fill in the missing lines to complete the expressions.
x = ...
...
print()
```
### C. Lists
The next topic is particularly useful in the kind of data manipulation that you will see throughout 101B. The following few cells will introduce the concept of __lists__ (and their counterpart, `numpy arrays`). Read through the following cell to understand the basic structure of a list.
```
### A list is initialized like this:
lst = [1, 3, 6, 'lists', 'are' 'fun', 4]
### And elements are selected like this:
example = lst[2]
### The above line selects the 3rd element of lst (list indices are 0-offset) and sets it to a variable named example.
print(example)
```
### Slicing lists
As you can see from above, lists do not have to be made up of elements of the same kind. Indices do not have to be taken one at a time, either. Instead, we can take a slice of indices and return the elements at those indices as a separate list.
```
### This line will store the first (inclusive) through fourth (exclusive) elements of lst as a new list called lst_2:
lst_2 = lst[1:4]
lst_2
```
### Question 2: Lists
Build a list of length 10 containing whatever elements you'd like. Then, slice it into a new list of length five using a index slicing. Finally, print the last element in your sliced list.
```
### Fill in the ellipses to complete the question.
my_list = ...
my_list_sliced = my_list[...]
print(...)
```
Lists can also be operated on with a few built-in analysis functions. These include `min` and `max`, among others. Lists can also be concatenated together. Find some examples below.
```
### A list containing six integers.
a_list = [1, 6, 4, 8, 13, 2]
### Another list containing six integers.
b_list = [4, 5, 2, 14, 9, 11]
print('Max of a_list:', max(a_list))
print('Min of b_list:', min(a_list))
### Concatenate a_list and b_list:
c_list = a_list + b_list
print('Concatenated:', c_list)
```
### D. Numpy Arrays
Closely related to the concept of a list is the array, a nested sequence of elements that is structurally identical to a list. Arrays, however, can be operated on arithmetically with much more versatility than regular lists. For the purpose of later data manipulation, we'll access arrays through Numpy, which will require an installation and an import statement.
To install numpy, open your terminal and use the command:
> `pip install numpy`
Now run the next cell to import the numpy library into your notebook, and examine how numpy arrays can be used.
```
import numpy as np
### Initialize an array of integers 0 through 9.
example_array = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
### This can also be accomplished using np.arange
example_array_2 = np.arange(10)
### Double the values in example_array and print the new array.
double_array = example_array*2
double_array
```
### E. Looping
Loops are often useful in manipulating, iterating over, or transforming large lists and arrays. The first type we will discuss is the __for loop__. For loops are helpful in traversing a list and performing an action at each element. For example, the following code moves through every element in example_array, adds it to the previous element in example_array, and copies this sum to a new array.
```
new_list = []
for element in example_array:
new_element = element + 5
new_list.append(new_element)
new_list
```
The most important line in the above cell is the "`for element in...`" line. This statement sets the structure of our loop, instructing the machine to stop at every number in `example_array`, perform the indicated operations, and then move on. Once Python has stopped at every element in `example_array`, the loop is completed and the final line, which outputs `new_list`, is executed. It's important to note that "element" is an arbitrary variable name used to represent whichever index value the loop is currently operating on. We can change the variable name to whatever we want and achieve the same result, as long as we stay consistent. For example:
```
newer_list = []
for completely_arbitrary_name in example_array:
newer_element = completely_arbitrary_name + 5
newer_list.append(newer_element)
newer_list
```
For loops can also iterate over ranges of numerical values. If I wanted to alter `example_array` without copying it over to a new list, I would use a numerical iterator to access list indices rather than the elements themselves. This iterator, called `i`, would range from 0, the value of the first index, to 9, the value of the last. I can make sure of this by using the built-in `range` and `len` functions.
```
for i in range(len(example_array)):
example_array[i] = example_array[i] + 5
example_array
```
### Other types of loops
The __while loop__ repeatedly performs operations until its conditional is no longer satisfied. In the below example, an array of integers 0 to 9 is generated. When the program enters the while loop on the subsequent line, it notices that the maximum value of the array is less than 50. Because of this, it adds 1 to the fifth element, as instructed. Once the instructions embedded in the loop are complete, the program refers back to the conditional. Again, the maximum value is less than 50. This process repeats until the the fifth element, now the maximum value of the array, is equal to 50, at which point the conditional is no longer true and the loop breaks.
```
while_array = np.arange(10) # Generate our array of values
print('Before:', while_array)
while(max(while_array) < 50): # Set our conditional
while_array[4] += 1 # Add 1 to the fifth element if the conditional is satisfied
print('After:', while_array)
```
### Question 3: Loops
In the following cell, partial steps to manipulate an array are included. You must fill in the blanks to accomplish the following: <br>
1. Iterate over the entire array, checking if each element is a multiple of 5
2. If an element is not a multiple of 5, add 1 to it repeatedly until it is
3. Iterate back over the list and print each element.
> Hint: To check if an integer `x` is a multiple of `y`, use the modulus operator `%`. Typing `x % y` will return the remainder when `x` is divided by `y`. Therefore, (`x % y != 0`) will return `True` when `y` __does not divide__ `x`, and `False` when it does.
```
### Make use of iterators, range, length, while loops, and indices to complete this question.
question_3 = np.array([12, 31, 50, 0, 22, 28, 19, 105, 44, 12, 77])
for i in range(len(...)):
while(...):
question_3[i] = ...
for element in question_3:
print(...)
```
### F. Functions!
Functions are useful when you want to repeat a series of steps on multiple different objects, but don't want to type out the steps over and over again. Many functions are built into Python already; for example, you've already made use of `len()` to retrieve the number of elements in a list. You can also write your own functions, though, and at this point you already have the skills to do so. <br>
Functions generally take a set of __parameters__, which define the objects they will use when they are run. For example, the `len()` function takes a list or array as its parameter, and returns the length of that list. <br>
The following cell gives an example of an extremely simple function, called `add_two`, which takes as its parameter an integer and returns that integer with, you guessed it, 2 added to it.
```
# An adder function that adds 2 to the given n.
def add_two(n):
return n + 2
add_two(5)
```
Easy enough, right? Let's look at a function that takes two parameters, compares them somehow, and then returns a boolean value (`True` or `False`) depending on the comparison. The `is_multiple` function below takes as parameters an integer `m` and an integer `n`, checks if `m` is a multiple of `n`, and returns `True` if it is. Otherwise, it returns `False`.
```
def is_multiple(m, n):
if (m % n == 0):
return True
else:
return False
is_multiple(12, 4)
is_multiple(12, 7)
```
Since functions are so easily replicable, we can include them in loops if we want. For instance, our `is_multiple` function can be used to check if a number is prime! See for yourself by testing some possible prime numbers in the cell below.
```
# Change possible_prime to any integer to test its primality
# NOTE: If you happen to stumble across a large (> 8 digits) prime number, the cell could take a very, very long time
# to run and will likely crash your kernel. Just click kernel>interrupt if it looks like it's caught.
possible_prime = 9999991
for i in range(2, possible_prime):
if (is_multiple(possible_prime, i)):
print(possible_prime, 'is not prime')
break
if (i >= possible_prime/2):
print(possible_prime, 'is prime')
break
```
### Question 4: Writing functions
In the following cell, complete a function that will take as its parameters a list and two integers x and y, iterate through the list, and replace any number in the list that is a multiple of x with y.
> Hint: use the is_multiple() function to streamline your code.
```
def replace_with_y(lst, x, y):
for i in range(...):
if(...):
...
return lst
```
## Pandas Dataframes
We will be using Pandas dataframes for much of this class to organize and sort through economic data. Pandas is one of the most widely used Python libraries in data science. It is mainly used for data cleaning, and with good reason: it’s very powerful and flexible, among many other things.
### Creating dataframes
The rows and columns of a pandas dataframe are essentially a collection of lists stacked on top/next to each other. For example, if I wanted to store the top 10 movies and their ratings in a datatable, I could create 10 lists that each contain a rating and a corresponding title, and these lists would be the rows of the table:
```
top_10_movies = pd.DataFrame(data=np.array(
[[9.2, 'The Shawshank Redemption (1994)'],
[9.2, 'The Godfather (1972)'],
[9., 'The Godfather: Part II (1974)'],
[8.9, 'Pulp Fiction (1994)'],
[8.9, "Schindler's List (1993)"],
[8.9, 'The Lord of the Rings: The Return of the King (2003)'],
[8.9, '12 Angry Men (1957)'],
[8.9, 'The Dark Knight (2008)'],
[8.9, 'Il buono, il brutto, il cattivo (1966)'],
[8.8, 'The Lord of the Rings: The Fellowship of the Ring (2001)']]), columns=["Rating", "Movie"])
top_10_movies
```
Alternatively, we can store data in a dictionary instead of in lists. A dictionary keeps a mapping of keys to a set of values, and each key is unique. Using our top 10 movies example, we could create a dictionary that contains ratings a key, and movie titles as another key.
```
top_10_movies_dict = {"Rating" : [9.2, 9.2, 9., 8.9, 8.9, 8.9, 8.9, 8.9, 8.9, 8.8],
"Movie" : ['The Shawshank Redemption (1994)',
'The Godfather (1972)',
'The Godfather: Part II (1974)',
'Pulp Fiction (1994)',
"Schindler's List (1993)",
'The Lord of the Rings: The Return of the King (2003)',
'12 Angry Men (1957)',
'The Dark Knight (2008)',
'Il buono, il brutto, il cattivo (1966)',
'The Lord of the Rings: The Fellowship of the Ring (2001)']}
```
Now, we can use this dictionary to create a table with columns `Rating` and `Movie`
```
top_10_movies_2 = pd.DataFrame(data=top_10_movies_dict, columns=["Rating", "Movie"])
top_10_movies_2
```
Notice how both ways return the same table! However, the list method created the table by essentially taking the lists and making up the rows of the table, while the dictionary method took the keys from the dictionary to make up the columns of the table. In this way, dataframes can be viewed as a collection of basic data structures, either through collecting rows or columns.
### Reading in Dataframes
Luckily for you, most datatables in this course will be premade and given to you in a form that is easily read into a pandas method, which creates the table for you. A common file type that is used for economic data is a Comma-Separated Values(.csv) file, which stores tabular data. It is not necessary for you to know exactly how .csv files store data, but you should know how to read a file in as a pandas dataframe.
We will read in a .csv file that contains quarterly real GDI, real GDP, and nominal GDP data in the U.S. from 1947 to the present.
```
### Run this cell to read in the table
accounts = pd.read_csv("data/Quarterly_Accounts.csv")
```
The `pd.read_csv` function expects a path to a .csv file as its input, and will return a datatable created from the data contained in the csv.
We have provided `Quarterly_Accouunts.csv` in the data directory, which is all contained in the current working directory (aka the folder this assignment is contained in). For this reason, we must specify to the `read_csv` function that it should look for the csv in the data directory, and the `/` indicates that `Quarterly_Accounts.csv` can be found there.
Here is a sample of some of the rows in this datatable:
```
accounts.head()
```
### Indexing Dataframes
Oftentimes, tables will contain a lot of extraneous data that muddles our datatables, making it more difficult to quickly and accurately obtain the data we need. To correct for this, we can select out columns or rows that we need by indexing our dataframes.
The easiest way to index into a table is with square bracket notation. Suppose you wanted to obtain all of the Real GDP data from the data. Using a single pair of square brackets, you could index the table for `"Real GDP"`
```
## Run this cell and see what it outputs
accounts["Real GDP"]
```
Notice how the above cell returns an array of all the real GDP values in their original order.
Now, if you wanted to get the first real GDP value from this array, you could index it with another pair of square brackets:
```
accounts["Real GDP"][0]
```
Keep in mind that pandas dataframes, as well as many other data structures, are zero-indexed, meaning indexes start at 0 and end at the number of elements minus one.
If you wanted to create a new datatable with select columns from the original table, you can index with double brackets.
```
## Note: .head() returns the first five rows of the table
accounts[["Year", "Quarter", "Real GDP", "Real GDI"]].head()
```
You can also use column indices instead of names.
```
accounts[[0, 1, 2, 3]].head()
```
Alternatively, you can also get rid of columns you dont need using `.drop()`
```
accounts.drop("Nominal GDP", axis=1).head()
```
Finally, you can use square bracket notation to index rows by their indices with a single set of brackets. You must specify a range of values for which you want to index. For example, if I wanted the 20th to 30th rows of `accounts`:
```
accounts[20:31]
```
### Filtering Data
As you can tell from the previous, indexing rows based on indices is only useful when you know the specific set of rows that you need, and you can only really get a range of entries. Working with data often involves huge datasets, making it inefficient and sometimes impossible to know exactly what indices to be looking at. On top of that, most data analysis concerns itself with looking for patterns or specific conditions in the data, which is impossible to look for with simple index based sorting.
Thankfully, you can also use square bracket notation to filter out data based on a condition. Suppose we only wanted real GDP and nominal GDP data from the 21st century:
```
accounts[accounts["Year"] >= 2000][["Real GDP", "Nominal GDP"]]
```
The `accounts` table is being indexed by the condition `accounts["Year"] >= 2000`, which returns a table where only rows that have a "Year" greater than $2000$ is returned. We then index this table with the double bracket notation from the previous section to only get the real GDP and nominal GDP columns.
Suppose now we wanted a table with data from the first quarter, and where the real GDP was less than 5000 or nominal GDP is greater than 15,000.
```
accounts[(accounts["Quarter"] == "Q1") & ((accounts["Real GDP"] < 5000) | (accounts["Nominal GDP"] > 15000))]
```
Many different conditions can be included to filter, and you can use `&` and `|` operators to connect them together. Make sure to include parantheses for each condition!
Another way to reorganize data to make it more convenient is to sort the data by the values in a specific column. For example, if we wanted to find the highest real GDP since 1947, we could sort the table for real GDP:
```
accounts.sort_values("Real GDP")
```
But wait! The table looks like it's sorted in increasing order. This is because `sort_values` defaults to ordering the column in ascending order. To correct this, add in the extra optional parameter
```
accounts.sort_values("Real GDP", ascending=False)
```
Now we can clearly see that the highest real GDP was attained in the first quarter of this year, and had a value of 16903.2
### Useful Functions for Numeric Data
Here are a few useful functions when dealing with numeric data columns.
To find the minimum value in a column, call `min()` on a column of the table.
```
accounts["Real GDP"].min()
```
To find the maximum value, call `max()`.
```
accounts["Nominal GDP"].max()
```
And to find the average value of a column, use `mean()`.
```
accounts["Real GDI"].mean()
```
## Part 2: Visualization and Regression
Now that you have completed the Python tutorial, you are now ready to learn about how to visualize data and how to analyze data with regression. To begin, run the cells below to import the required packages we will be using for this tutorial.
```
%matplotlib inline
import numpy as np
import pandas as pd
import sklearn as sk
import matplotlib.pyplot as plt
```
We will be using the US unemployment data from https://fred.stlouisfed.org/ show what we can do with data. Let's start by importing the .csv file data with pandas. The statement below will put the csv file into a pandas DataFrame, a data structure for holding tabular (2D) data.
```
unemployment_data = pd.read_csv("data/detailed_unemployment.csv")
unemployment_data
```
We can start visualizing the data that we have in the table. First, we convert the table into a numpy array. Let's extract the columns that we are interested in and plot them with pyplot.
```
#Once this cell is run, the "data" variable will store the table and can be accessed from any cell
data = np.array(unemployment_data[:len(unemployment_data)-1])
#data[:, col_num] means take select all row values in that column number in numpy
total_unemployed = data[:, 1]
not_labor = data[:, 3]
#Plot the data by inputting the x and y axis
plt.scatter(total_unemployed, not_labor)
plt.xlabel("Percent Unemployed")
plt.ylabel("Total Not In Labor, Searched for Work")
plt.show()
```
## Question 5: Plotting
Try plotting the total percent of people unemployed vs those unemployed for more than 15 weeks.
```
total_unemployed = ...
unemp_15_weeks = ...
plt.scatter(total_unemployed, unemp_15_weeks)
plt.xlabel("Percent Unemployed")
plt.ylabel("Total Unemployed for > 15 Weeks")
plt.show()
ok.grade('q05')
```
Now that we know how to select and plot our data, we are ready to dive into regression. For our current task, let's use the total unemployed and housing price index columns.
```
total_unemployed = data[:,1]
hpi = data[:,7]
plt.scatter(total_unemployed, hpi)
plt.xlabel("Percent Unemployed")
plt.ylabel("Housing Price Index")
plt.show()
```
The Scikit Learn library has lots of helpful tools for regression tasks. We will perform linear regression on our data with the imported LinearRegression class. The .fit(x, y) method fits the model to the input data and y values and saves it to that instance of the model. We can then use the .predict(x) method to predict the values of the input data from the model.
```
from sklearn.linear_model import LinearRegression
model = LinearRegression()
#We set x and y to our column values, and use np.reshape so that
#the data is formatted correctly to be input into the model
x = total_unemployed
x = np.reshape(x, (73,1))
y = hpi
model.fit(x, y)
y_ = model.predict(x)
plt.scatter(x, y)
plt.plot(x, y_)
plt.xlabel("Percent Unemployed")
plt.ylabel("Housing Price Index")
plt.show()
```
You can also use your linear regression model to predict values based on your current data. For example, if we wanted to predict the housing price index at an 18% unemployment rate:
```
#The predict method returns an array of length 1, so let's take the first element since it's our predicted value
prediction = model.predict(18)[0]
prediction
```
## Question 6: Regression
Let's make a regression model to predict the total number of people unemployed for more than 15 weeks based on the total percent of people unemployed. Then, let's make a prediction on the number of people unemployed for more than 15 weeks if the unemployment rate was 20%.
```
#Initialize the model
model = ...
#Set up our x and y variables using columns from earlier
x = ...
x = np.reshape(x, (73, 1))
y = ...
#Fit our model to the data and store y_ values for our regression line
#INSERT CODE HERE
y_ = ...
plt.scatter(x, y)
plt.plot(x, y_)
plt.xlabel("Percent Unemployed")
plt.ylabel("Total Unemployed for > 15 Weeks")
plt.show()
#Make your prediction here
prediction = model.predict(20)[0]
prediction
ok.grade('q06')
```
Congratulations! You have completed the regression tutorial. Try importing your own datasets and using regression to analyze your data.
## Chapter 1: Introduction to Macroeconomics
#### Suppose a quantity grows at a steady proportional rate of 3% per year.
How long will it take to double?
```
# ANSWER
TIME_TO_DOUBLE = ___
```
Quadruple?
```
# ANSWER
TIME_TO_QUADRUPLE = ___
```
Grow 1024-fold?
```
# ANSWER
TIME_TO_1024 = ___
```
#### Suppose we have a quantity x(t) that varies over time following the equation: $\frac{dx(t)}{dt} = -(0.06)t + 0.36$
Without integrating the equation:
$1.$ Tell me what the long-run steady-state value of $x$--that is, the limit of $x$ as $t$ approaches in infinity--is going to be.
```
steady_state_val = ___
```
$2.$ Suppose that the value of $x$ at time $t=0$, $x(0)$ equals 12. Once again, without integrating the equation, tell me how long it will take x to close half the distance between its initial value of 12 and its steady-state value.
```
half_dist_time = ___
```
$3.$ How long will it take to close 3/4 of the distance?
```
three_fourth_time = ___
```
$4.$ $7/8$ of the distance?
```
seven_eighth_time = ___
```
$5.$ $15/16$ of the distance?
```
fifteen_sixteenth = ___
```
Now you are allowed to integrate $\frac{dx(t)}{dt} = -(0.06)t + 0.36$.
$1.$ Write down and solve the indefinite integral.
<font color='blue'> ANSWER: here is an integral
$2.$ Write down and solve the definite integral for the initial condition $x(0) = 12$.
<font color='blue'> ANSWER:
$3.$ Write down and solve the definite integral for the initial condition $x(0) = 6$.
<font color='blue'> ANSWER:
#### Suppose we have a quantity $z = (\frac{x}{y})^\beta$
Suppose $x$ is growing at 4% per year and that $\beta=1/4$:
$1.$ How fast is $z$ growing if $y$ is growing at 0% per year?
```
zero_per_growth = ___
```
$2.$ If $y$ is growing at 2% per year?
```
two_per_growth = ___
```
$3.$ If $y$ is growing at 4% per year?
```
four_per_growth = ___
```
#### Rule of 72
1. If a quantity grows at about 3% per year, how long will it take to double?
```
time_to_double = ___
```
$2.$ If a quantity shrinks at about 4% per year, how long will it take it to halve itself?
```
time_to_half = ___
```
$3.$ If a quantity doubles five times, how large is it relative to its original value?
```
doubled_five_times_ratio = ___
```
$4.$ If a quantity halves itself three times, how large is it relative to its original value?
```
halved_three_times_ratio = ___
```
#### Show the relationship between the interest rate and the amount of time it takes to double graphically
```
def graph(interest_rate):
x = np.linspace(1,10,30)
y = 72 / x
print('Time to double:', 72 / interest_rate, 'years')
plt.plot(x,y)
plt.scatter(interest_rate, 72 / interest_rate, c='r')
plt.xlabel('interest rate (%)')
plt.ylabel('time (years)')
interact(graph, interest_rate=widgets.IntSlider(min=1,max=10,step=1))
```
#### How close is this to the actual formula? (EXPAND)
#### Why do DeLong and Olney think that the interest rate and the level of the stock market are important macroeconomic variables?
<font color='blue'> ANSWER:
#### What are the principal flaws in using national product per worker as a measure of material welfare? Given these flaws, why do we use it anyway?
<font color='blue'> ANSWER:
#### What is the difference between the nominal interest rate and the real interest rate? Why do DeLong and Olney think that the real interest rate is more important?
<font color='blue'> ANSWER:
## Chapter 2: Measuring the Macroeconomy
#### National Income and Product Accounting
Explain whether or not, why, and how the following items are included in the calculations of national product:
$1.$ Increases in business inventories.
<font color='blue'> ANSWER:
$2.$ Fees earned by real estate agents on selling existing homes.
<font color='blue'> ANSWER:
$3.$ Social Security checks written by the government.
<font color='blue'> ANSWER:
$4.$ Building of a new dam by the Army Corps of Engineers.
<font color='blue'> ANSWER:
$5.$ Interest that your parents pay on the mortgage they have on their house.
<font color='blue'> ANSWER:
$6.$ Purchases of foreign-made trucks by American residents
<font color='blue'> ANSWER:
#### In or Out of National Product? And Why
Explain whether or not, why, and how the following items are included in the calculation of national product:
$1.$ The sale for \$25,000 of an automobile that cost \$20,000 to manufacture that had been produced here at home last year and carried over in inventory.
<font color='blue'> ANSWER:
$2.$ The sale for \$35,000 of an automobile that cost \$25,000 to manufacture newly- made at home this year.
<font color='blue'> ANSWER:
$3.$ The sale for \$45,000 of an automobile that cost \$30,000 to manufacture that was newly-made abroad this year and imported.
<font color='blue'> ANSWER:
$4.$ The sale for \$25,000 of an automobile that cost \$20,000 to manufacture that was made abroad and imported last year.
<font color='blue'> ANSWER:
#### In or Out of National Product? And Why II
Explain whether or not, why, and how the following items are included in the calculation of GDP:
$1.$ The purchase for \$500 of a dishwasher produced here at home this year.
<font color='blue'> ANSWER:
$2.$ The purchase for $500 of a dishwasher made abroad this year.
<font color='blue'> ANSWER:
$3.$ The purchase for $500 of a used dishwasher.
<font color='blue'> ANSWER:
$4.$ The manufacture of a new dishwasher here at home for $500 of a dishwasher that
then nobody wants to buy.
<font color='blue'> ANSWER:
#### Components of National Income and Product
Suppose that the appliance store buys a refrigerator from the manufacturer on December 15, 2018 for \$600, and that you then buy that refrigerator on January 15, 2019 for \$1000:
$1.$ What is the contribution to GDP in 2018?
```
contribution_2018 = ___
```
$2.$ How is the refrigerator accounted for in the NIPA in 2019?
<font color='blue'> ANSWER: nka;sldkf;ohwalk;ldfh;la lna;sldjfn;landf;l
lajsndf;lankjsd;fljknasd;fljljsnd;fl
$3.$ What is the contribution to GDP in 2018?
```
contribution_2019 = ___
```
$4.$ How is the refrigerator accounted for in the NIPA in 2019?
<font color='blue'> ANSWER:
```
## These lines are reading in CSV files and creating datatables from then, you don't have to worry about them! ##
unemployment = pd.read_csv("data/Unemployment.csv")
quarterly_acc = pd.read_csv("data/Quarterly_Accounts.csv")
from_2007 = quarterly_acc.loc[(quarterly_acc["Year"].isin(np.arange(2007, 2018)))]
```
### Estimating National Product
The Bureau of Economic Analysis measures national product in two different ways: as total expenditure on the economy’s output of goods and services and as the total income of everyone in the economy. Since – as you learned in earlier courses – these two things are the same, the two approaches should give the same answer. But in practice they do not.
We have provided a data table `quarterly_gdp` that contains quarterly data on real GDP measured on the expenditure side (referred to in the National Income and Product Accounts as “Real Gross Domestic Product, chained dollars”) and real GDP measured on the income side (referred to as “Real Gross Domestic Income, chained dollars”). The table refers to Real Gross Dometic Product as "Real GDP" and to Real Gross Dometic Income as "Real GDI", and they are measured in billions of dollars. (Note: You will not have to use Nominal GDP)
Another table, `from_2007`, has been created from `quarterly_gdp`, and includes information from 2007 to 2017.
Below is a snippet from `from_2007`:
```
from_2007.head(10)
```
$1.$ Compute the growth rate at an annual rate of each of the two series by quarter for
2007:Q1–2012:Q4.
```
gdi_rate = ___
gdp_rate = ___
```
$2.$ Describe any two things you see when you compare the two series that you find
interesting, and explain why you find them interesting.
<font color='blue'> ANSWER:
#### Calculating Real Magnitudes:
$1.$ When you calculate real national product, do you do so by dividing nominal national product by the price level or by subtracting the price level from nominal national product?
<font color='blue'> ANSWER:
$2.$ When you calculate the real interest rate, do you do so by dividing the nominal interest rate by the price level or by subtracting the inflation rate from the nominal interest rate?
<font color='blue'> ANSWER:
$3.$ Are your answers to (a) and (b) the same? Why or why not?
<font color='blue'> ANSWER:
### Unemployment Rate
Use the `unemployment` table provided to answer the following questions. ***All numbers (other than percents) are in the thousands.***
Here are the first five entries of the table.
```
unemployment.head()
```
#### What, roughly, was the highest level the U.S. unemployment rate (measured as Percent Unemployed of Labor Force in the table) reached in:
$1.$ The 20th century?
```
unemployment_20th = ___
```
$2.$ The past fifty years?
```
unemployment_past_50 = ___
```
$3.$ The twenty years before 2006?
```
unemployment_before_2006 = ___
```
$4.$ Given your answers to (1) through (3), Do you think there is a connection between your answer to the question above and the fact that Federal Reserve Chair Alan Greenspan received a five-minute standing ovation at the end of the first of many events marking his retirement in 2005?
<font color='blue'> ANSWER:
#### The State of the Labor Market
$1.$ About how many people lose or quit their jobs in an average year?
```
average_quitters = ___
```
$2.$ About how many people get jobs in an average year?
```
average_getters = ___
```
$3.$ About how many people are unemployed in an average year?
```
average_unemployed = ___
```
$4.$ About how many people are at work in an average year?
```
average_workers = ___
```
$5.$ About how many people are unemployed now?
```
unemployed_now = ___
```
#### National Income Accounting:
$1.$ What was the level of real GDP in 2005 dollars in 1970?
```
real_gdp_2005 = ___
```
$2.$ What was the rate of inflation in the United States in 2000?
```
inflation_rate_2000 = ___
```
$3.$ Explain whether or not, how, and why the following items are included in the calculation of GDP: (i) rent you pay on an apartment, (ii) purchase of a used textbook, (iii) purchase of a new tank by the Department of Defense, (iv) watching an advertisement on youtube.
<font color='blue'> ANSWER: thisjo is my answer
al;sdknf;ls
alsjdn
dksdlkfakd;slfn
Congratulations, you have finished your first assignment for Econ 101B! Run the cell below to submit all of your work. Make sure to check on OK to make sure that it has uploaded.
```
_ = ok.submit()
```
Materials from this notebook were partly taken from [Data 8](http://data8.org/), [CS 61A](http://cs61a.org/), and [DS Modules](http://data.berkeley.edu/education/modules) lessons.
| github_jupyter |
# Dataframe modification
```
import os
import pandas as pd
import numpy as np
filename = '..\Data\dataset_clean.csv'
df = pd.read_csv(filename)
df_2=df[['Q1','Q4','Q5','Q10','Q16_Part_1','Q16_Part_2','Q16_Part_3','Q16_Part_4','Q16_Part_5','Q16_Part_6','Q16_Part_7','Q16_Part_8','Q16_Part_9','Q16_Part_10','Q18_Part_1','Q18_Part_2','Q18_Part_3','Q18_Part_4','Q18_Part_5','Q18_Part_6','Q18_Part_7','Q18_Part_8','Q18_Part_9','Q18_Part_10','Q23','Q24_Part_1','Q24_Part_2','Q24_Part_3','Q24_Part_4','Q24_Part_5','Q24_Part_6','Q24_Part_7','Q24_Part_8','Q24_Part_9','Q24_Part_10','Q28_Part_1','Q28_Part_2','Q28_Part_3','Q28_Part_4','Q28_Part_5','Q28_Part_6','Q28_Part_7','Q28_Part_8','Q28_Part_9','Q28_Part_10']]
df_2
cleanup_nums = {"Q1": {"18-21": 0, "22-24": 0,"25-29": 1,"30-34": 1,
"35-39": 1,"40-44": 2,"45-49": 2,"50-54": 2,
"55-59": 3,"60-69": 3,"70+": 3},
# "Q2": {"Prefer not to say": 0,
# "Prefer to self-describe": 0,
# "Male": 1, "Female": 2},
"Q4": {"I prefer not to answer": 0,
"No formal education past high school": 0,
"Some college/university study without earning a bachelors degree": 1,
"Bachelors degree": 2,
"Masters degree": 3,
"Doctoral degree": 4,
"Professional degree": 5},
"Q5": {"Not employed": 0,
"Other": 0,
"Student": 0,
"Data Scientist": 1,
"Software Engineer": 2,
"Data Analyst": 3,
"Data Engineer": 4,
"Statistician": 5,
"DBA/Database Engineer": 6,
"Research Scientist": 7,
"Product/Project Manager": 8,
"Business Analyst": 9},
"Q10": {"0-999": 0,
"1,000-1,999": 0,
"2,000-2,999": 0,
"3,000-3,999": 0,
"4,000-4,999": 0,
"5,000-7,499": 0,
"7,500-9,999": 0,
"10,000-14,999": 0,
"15,000-19,999": 0,
"20,000-24,999": 0,
"25,000-29,999": 0,
"30,000-39,999": 0,
"40,000-49,999": 0,
"50,000-59,999": 1,
"60,000-69,999": 2,
"70,000-79,999": 3,
"80,000-89,999": 4,
"90,000-99,999": 5,
"100,000-124,999": 6,
"125,000-149,999": 7,
"150,000-199,999": 8,
"200,000-249,999": 9,
"250,000-299,999": 9,
"300,000-500,000": 9,
"> $500,000": 9},
"Q23": {"< 1 years": 0,
"1-2 years": 1,
"2-3 years": 1,
"3-4 years": 2,
"4-5 years": 2,
"5-10 years": 3,
"10-15 years": 4,
"20+ years": 4
}
}
df_3=df_2.replace(cleanup_nums)
# from sklearn.preprocessing import LabelEncoder
# le=LabelEncoder()
# Iterating over all the common columns in train and test
# for col in df_3.columns.values:
# if df_3[col].dtypes==object:
# if df_3[col] !=0:
# le.fit(df_3[col].values)
# df_3[col]=le.transform(df_3[col])
cols=['Q16_Part_1','Q16_Part_2','Q16_Part_3','Q16_Part_4','Q16_Part_5','Q16_Part_6','Q16_Part_7','Q16_Part_8','Q16_Part_9','Q16_Part_10','Q18_Part_1','Q18_Part_2','Q18_Part_3','Q18_Part_4','Q18_Part_5','Q18_Part_6','Q18_Part_7','Q18_Part_8','Q18_Part_9','Q18_Part_10','Q23','Q24_Part_1','Q24_Part_2','Q24_Part_3','Q24_Part_4','Q24_Part_5','Q24_Part_6','Q24_Part_7','Q24_Part_8','Q24_Part_9','Q24_Part_10','Q28_Part_1','Q28_Part_2','Q28_Part_3','Q28_Part_4','Q28_Part_5','Q28_Part_6','Q28_Part_7','Q28_Part_8','Q28_Part_9','Q28_Part_10']
for col in cols:
df_3[col]=pd.to_numeric(df_3[col], errors='coerce').fillna(1).astype(int)
cols_Q16=['Q16_Part_1','Q16_Part_2','Q16_Part_3','Q16_Part_4','Q16_Part_5','Q16_Part_6','Q16_Part_7','Q16_Part_8','Q16_Part_9','Q16_Part_10']
df_3['Q16_count'] = np.count_nonzero(df_3[cols_Q16],axis=1)
cols_Q18=['Q18_Part_1','Q18_Part_2','Q18_Part_3','Q18_Part_4','Q18_Part_5','Q18_Part_6','Q18_Part_7','Q18_Part_8','Q18_Part_9','Q18_Part_10']
df_3['Q18_count'] = np.count_nonzero(df_3[cols_Q18],axis=1)
cols_Q24=['Q24_Part_1','Q24_Part_2','Q24_Part_3','Q24_Part_4','Q24_Part_5','Q24_Part_6','Q24_Part_7','Q24_Part_8','Q24_Part_9','Q24_Part_10']
df_3['Q24_count'] = np.count_nonzero(df_3[cols_Q24],axis=1)
cols_Q28=['Q28_Part_1','Q28_Part_2','Q28_Part_3','Q28_Part_4','Q28_Part_5','Q28_Part_6','Q28_Part_7','Q28_Part_8','Q28_Part_9','Q28_Part_10']
df_3['Q28_count'] = np.count_nonzero(df_3[cols_Q28],axis=1)
df_4=df_3[['Q1','Q4','Q5','Q10','Q23','Q16_count','Q18_count','Q24_count','Q28_count']]
# df_4.to_csv('data_x_cleaned.csv', index=False)
```
# 1. Job Title Prediction Model - SVM
### input user data -> job_input = [x,x,x,x,x,x,x,x]
### output -> ans1 = [job1,job2,job3]
```
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(df_4,test_size=0.15, random_state=9832)
X1_train = train_df.drop(['Q5'], axis=1)
Y1_train = train_df['Q5']
X1_test = test_df.drop(['Q5'], axis=1)
Y1_test = test_df['Q5']
from sklearn.svm import SVC, LinearSVC
svc = SVC(probability=True) # instantiate
svc.fit(X1_train, Y1_train) # fit
acc_svc = svc.score(X1_test, Y1_test) # predict + evaluate
print('Support Vector Machines labeling accuracy:', str(round(acc_svc*100,2)),'%')
# from sklearn.externals import joblib
from joblib import dump, load
dump(svc, 'svc_jobs.joblib')
# lr = joblib.load('model.pkl')
import joblib
joblib.__version__
job_input = [[1,2,2,3,1,2,2,3]]
k1_unchanged=svc.predict_proba(job_input)[0]
k1=svc.predict_proba(job_input)[0]
ynew_result1 = svc.predict(job_input)
k1.sort()
print(k1_unchanged)
print(k1)
print(ynew_result1)
Second=k1[-2]
Third=k1[-3]
Fourth=k1[-4]
if ynew_result1==0:
Highest = np.where(k1_unchanged ==k1[-2])[0]
Sec_high = (np.where(k1_unchanged ==k1[-3])[0])
Third_high = (np.where(k1_unchanged ==k1[-4])[0])
ans1 = [Highest[0], Sec_high[0], Third_high[0]]
print(ans1)
else:
Highest = np.where(k1_unchanged ==k1[-1])[0]
Sec_high = (np.where(k1_unchanged ==k1[-2])[0])
Third_high = (np.where(k1_unchanged ==k1[-3])[0])
Four_high = (np.where(k1_unchanged ==k1[-4])[0])
ans1 = [Highest[0], Sec_high[0], Third_high[0], Four_high[0]]
if 0 in ans1: ans1.remove(0)
print(ans1[0:3])
salary_input = ans1
# salary_input = [[1, 4, 10, 4, 1, 4, 6, 6]]
```
# 2. Salary Range Prediction Model - SVM
### model input -> salary_model_input = [x,x,job input from ans1,x,x,x,x,x]
### output -> ans2 = [salary1,salary2,salary3]
```
from sklearn.model_selection import train_test_split
train_df2, test_df2 = train_test_split(df_4,test_size=0.15, random_state=100)
X2_train = train_df.drop(['Q10'], axis=1)
Y2_train = train_df['Q10']
X2_test = test_df.drop(['Q10'], axis=1)
Y2_test = test_df['Q10']
svc = SVC(probability=True) # instantiate
svc.fit(X2_train, Y2_train) # fit
acc_svc = svc.score(X2_test, Y2_test) # predict + evaluate
print('Support Vector Machines labeling accuracy:', str(round(acc_svc*100,2)),'%')
from joblib import dump, load
dump(svc, 'svc_salary.joblib')
ans2 = []
for x in salary_input:
salary_model_input = [[0,0,x,0,0,0,0,0]]
k_unchanged=svc.predict_proba(salary_model_input)[0]
k=svc.predict_proba(salary_model_input)[0]
ynew_result = svc.predict(salary_model_input)
#print(k_unchanged)
k.sort()
#print(k)
if ynew_result==0:
Highest = np.where(k_unchanged ==k[-2])[0]
print(Highest)
ans2.append(Highest[0])
else:
Highest = np.where(k_unchanged ==k[-1])[0]
print(Highest)
ans2.append(Highest[0])
# ans1 = job recommended
# ans2 = salary range predicted based on ans1
print(ans1,ans2)
```
SalaryRange:
"0-49,999": 0,
"50,000-59,999": 1,
"60,000-69,999": 2,
"70,000-79,999": 3,
"80,000-89,999": 4,
"90,000-99,999": 5,
"100,000-124,999": 6,
"125,000-149,999": 7,
"150,000-199,999": 8,
"200,000+": 9
| github_jupyter |
# MLP on Simulated ORFs
Start with ORF_MLP_118 which had the simulator bug fix.
Evaluate MLP with wide,deep network.
Train on copious simulated data.
Use uniform but longer RNA lengths: 1500
Run on Alien.
79% accuracy.
```
import time
def show_time():
t = time.time()
print(time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t)))
show_time()
PC_TRAINS=50000
NC_TRAINS=50000
PC_TESTS=5000
NC_TESTS=5000
RNA_LEN=1500
MAX_K = 3
INPUT_SHAPE=(None,84) # 4^3 + 4^2 + 4^1
NEURONS=128
DROP_RATE=0.30
EPOCHS=200
SPLITS=3
FOLDS=3 # make this 5 for serious testing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Flatten,TimeDistributed
from keras.losses import BinaryCrossentropy
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
import sys
IN_COLAB = False
try:
from google.colab import drive
IN_COLAB = True
except:
pass
if IN_COLAB:
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import ORF_counter
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py')
with open('RNA_gen.py', 'w') as f:
f.write(r.text)
from GenCodeTools import Collection_Generator,Transcript_Oracle
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/KmerTools.py')
with open('KmerTools.py', 'w') as f:
f.write(r.text)
from KmerTools import KmerTools
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/DataPrep.py')
with open('DataPrep.py', 'w') as f:
f.write(r.text)
from DataPrep import DataPrep
else:
print("CoLab not working. On my PC, use relative paths.")
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import ORF_counter
from SimTools.RNA_gen import Collection_Generator,Transcript_Oracle
from SimTools.KmerTools import KmerTools
from SimTools.DataPrep import DataPrep
BESTMODELPATH=DATAPATH+"BestModel"
LASTMODELPATH=DATAPATH+"LastModel"
```
## Data Load
```
show_time()
def make_generators(seq_len):
pcgen = Collection_Generator()
pcgen.get_len_oracle().set_mean(seq_len)
pcgen.set_seq_oracle(Transcript_Oracle())
ncgen = Collection_Generator()
ncgen.get_len_oracle().set_mean(seq_len)
return pcgen,ncgen
pc_sim,nc_sim = make_generators(RNA_LEN)
pc_all = pc_sim.get_sequences(PC_TRAINS+PC_TESTS)
nc_all = nc_sim.get_sequences(NC_TRAINS+NC_TESTS)
print("Generated",len(pc_all),"PC seqs")
print("Generated",len(nc_all),"NC seqs")
pc_sim=None
nc_sim=None
print("Simulated sequence characteristics:")
oc = ORF_counter()
print("PC seqs")
oc.describe_sequences(pc_all)
print("NC seqs")
oc.describe_sequences(nc_all)
oc=None
show_time()
```
## Data Prep
```
dp = DataPrep()
Xseq,y=dp.combine_pos_and_neg(pc_all,nc_all)
nc_all=None
pc_all=None
nc_all=None
print("The first few shuffled labels:")
print(y[:30])
show_time()
Xfrq=KmerTools.seqs_to_kmer_freqs(Xseq,MAX_K)
Xseq = None
y=np.asarray(y)
show_time()
# Assume X and y were shuffled.
train_size=PC_TRAINS+NC_TRAINS
X_train=Xfrq[:train_size]
X_test=Xfrq[train_size:]
y_train=y[:train_size]
y_test=y[train_size:]
print("Training set size=",len(X_train),"=",len(y_train))
print("Reserved test set size=",len(X_test),"=",len(y_test))
Xfrq=None
y=None
show_time()
```
## Neural network
```
def make_DNN():
dt=np.float32
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(1,activation="sigmoid",dtype=dt))
dnn.compile(optimizer='adam',
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build(input_shape=INPUT_SHAPE)
return dnn
model = make_DNN()
print(model.summary())
def do_cross_validation(X,y):
cv_scores = []
fold=0
mycallbacks = [ModelCheckpoint(
filepath=BESTMODELPATH, save_best_only=True,
monitor='val_accuracy', mode='max')]
# When shuffle=True, the valid indices are a random subset.
# No need to shuffle here assuming data was shuffled above.
splitter = KFold(n_splits=SPLITS,shuffle=False)
model = None
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
return model # parameters at end of training
show_time()
last_model = do_cross_validation(X_train,y_train)
best_model = load_model(BESTMODELPATH)
def show_test_AUC(model,X,y):
ns_probs = [0 for _ in range(len(y))]
bm_probs = model.predict(X)
ns_auc = roc_auc_score(y, ns_probs)
bm_auc = roc_auc_score(y, bm_probs)
ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs)
bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc)
plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc)
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
print("%s: %.2f%%" %('AUC',bm_auc*100.0))
def show_test_accuracy(model,X,y):
scores = model.evaluate(X, y, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print("Accuracy on training data.")
show_time()
show_test_AUC(best_model,X_train,y_train)
show_test_accuracy(best_model,X_train,y_train)
show_time()
print("Accuracy on test data.")
show_time()
show_test_AUC(last_model,X_test,y_test)
show_test_accuracy(best_model,X_test,y_test)
show_time()
```
| github_jupyter |
# Data Upload Tutorial
* This notebook is a tutorial on how to upload data using Graphistry's REST API.
- Our REST API is designed to be language agnostic. For our Python specific API, please review the other notebooks in <https://github.com/graphistry/pygraphistry>
* For permission to upload to our public service, you **must** have an API key. Go to <www.graphistry.com/api-request> to recieve a key.
* For more details, visit https://graphistry.github.io/docs/legacy/api/0.9.2/api.html for a full API Reference
#### Import the necessary libaries
```
import graphistry
import pandas
import requests
import random
import time
```
#### Set your API key and Graphistry Server Location
- To use our public server at **labs.graphistry.com**, you must have a valid API key
```
API_KEY = 'Go to www.graphistry.com/api-request to get your key!'
SERVER = 'labs.graphistry.com'
# Current time is used to create a unique dataset name
current_time = str(int(time.time()))
```
## Create a dictionary describing the graph
- Visit https://graphistry.github.io/docs/legacy/api/0.9.2/api.html for full API reference
```
datasetName = 'RestUploadTutorial-' + current_time
data = {
"name": datasetName,
"type": "edgelist",
"bindings": {
"sourceField": "src",
"destinationField": "dst",
"idField": "node"
},
"graph": [
{"src": "myNode1", "dst": "myNode2",
"myEdgeField1": "I'm an edge!", "myCount": 7},
{"src": "myNode2", "dst": "myNode3",
"myEdgeField1": "I'm also an edge!", "myCount": 200}
],
"labels": [
{"node": "myNode1",
"myNodeField1": "I'm a node!",
"pointColor": 5},
{"node": "myNode2",
"myNodeField1": "I'm node 2",
"pointColor": 4},
{"node": "myNode3",
"myNodeField1": "I'm a node three!",
"pointColor": 4}
]
}
```
### Post the json data to construct a graph vizualization dataset, and upload it to the server
```
params = {
'key': API_KEY
}
resp = requests.post('http://'+ SERVER +'/etl', params=params, json=data)
print resp.status_code
resp.raise_for_status()
```
## Embed the uploaded graph vizualization into the notebook using an IFrame
```
datasetName = resp.json()['dataset']
url = 'http://' + SERVER + '/graph/graph.html?dataset=' + datasetName + '&splashAfter=' + str(int(time.time()))
from IPython.display import IFrame
IFrame(url, width=1000, height=500)
```
# Upload a workbook programmatically using a PUT request
```
import json
from pprint import pprint
with open('lesMiserablesWorkbook.json') as data_file:
wb = json.load(data_file)
print (wb)
workbook_id = wb[u'id']
print workbook_id
params = {
'key': API_KEY
}
resp = requests.post('http://'+ SERVER +'/workbook', params=params, json=wb)
print resp.status_code
resp.raise_for_status()
```
## Using the workbook on the Les Miserables dataset
```
url = 'http://' + SERVER + '/graph/graph.html?dataset=Miserables&workbook=%s' % wb['id']
from IPython.display import IFrame
IFrame(url, width=1000, height=500)
```
## Download the previously uploaded workbook using a GET request
```
resp = requests.get('http://'+ SERVER +'/workbook/' + workbook_id)
print resp.status_code
resp.raise_for_status()
downloadedWorkbook = resp.json()
print(downloadedWorkbook)
```
| github_jupyter |
## __PPSO__ (Parallel Particle Swarm Optimisation)
Now we are going to implement a faster, parallel version of PSO, i.e PPSO
Let us first use the code from the [previous notebook](/notebooks/Basic%20PSO.ipynb)
```
%%file particle.py
#dependencies
import random
import math
import copy # for array copying
import sys
class Particle:
def __init__(self,x0, num_dimensions):
self.position_i=[] # particle position
self.velocity_i=[] # particle velocity
self.pos_best_i=[] # best position individual
self.err_best_i=-1 # best error individual
self.err_i=-1 # error individual
self.num_dimensions = num_dimensions
for i in range(0, self.num_dimensions):
self.velocity_i.append(random.uniform(-1,1))
self.position_i.append(x0[i])
# evaluate current fitness
def evaluate(self,costFunc):
self.err_i=costFunc(self.position_i)
# check to see if the current position is an individual best
if self.err_i < self.err_best_i or self.err_best_i==-1:
self.pos_best_i=self.position_i
self.err_best_i=self.err_i
# update new particle velocity
def update_velocity(self,pos_best_g):
w=0.5 # constant inertia weight (how much to weigh the previous velocity)
c1=1 # cognative constant
c2=2 # social constant
for i in range(0, self.num_dimensions):
r1=random.random()
r2=random.random()
vel_cognitive=c1*r1*(self.pos_best_i[i]-self.position_i[i])
vel_social=c2*r2*(pos_best_g[i]-self.position_i[i])
self.velocity_i[i]=w*self.velocity_i[i]+vel_cognitive+vel_social
# update the particle position based off new velocity updates
def update_position(self,bounds):
for i in range(0, self.num_dimensions):
self.position_i[i]=self.position_i[i]+self.velocity_i[i]
# adjust maximum position if necessary
if self.position_i[i]>bounds[i][1]:
self.position_i[i]=bounds[i][1]
# adjust minimum position if neseccary
if self.position_i[i] < bounds[i][0]:
self.position_i[i]=bounds[i][0]
from particle import Particle
import numba
def PSO(costFunc,x0,bounds,num_particles,maxiter):
global num_dimensions
num_dimensions=len(x0)
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
# establish the swarm
swarm=[]
for i in range(0,num_particles):
swarm.append(Particle(x0))
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
swarm[j].evaluate(costFunc)
# determine if current particle is the best (globally)
if swarm[j].err_i < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j].position_i)
err_best_g=float(swarm[j].err_i)
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j].update_velocity(pos_best_g)
swarm[j].update_position(bounds)
i+=1
# print final results
print ('\nFINAL:')
print (pos_best_g)
print (err_best_g)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/google/applied-machine-learning-intensive/blob/master/content/06_other_models/00_decision_trees_and_random_forests/colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#### Copyright 2020 Google LLC.
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Decision Trees and Random Forests
In this lab we will apply decision trees and random forests to perform machine learning tasks. These two model types are relatively easy to understand, but they are very powerful tools.
Random forests build upon decision tree models, so we'll start by creating a decision tree and then move to random forests.
## Load Data
Let's start by loading some data. We'll use the familiar iris dataset from scikit-learn.
```
import pandas as pd
from sklearn.datasets import load_iris
iris_bunch = load_iris()
feature_names = iris_bunch.feature_names
target_name = 'species'
iris_df = pd.DataFrame(
iris_bunch.data,
columns=feature_names
)
iris_df[target_name] = iris_bunch.target
iris_df.head()
```
## Decision Trees
Decision trees are models that create a tree structure that has a condition at each non-terminal leaf in the tree. The condition is used to choose which branch to traverse down the tree.
Let's see what this would look like with a simple example.
Let's say we want to determine if a piece of fruit is a lemon, lime, orange, or grapefruit. We might have a tree that looks like:
```txt
----------
-----------| color? |-----------
| ---------- |
| | |
<green> <orange> <yellow>
| | |
| | |
======== | =========
| lime | | | lemon |
======== --------- =========
-----| size? |-----
| --------- |
| |
<small> <large>
| |
| |
========== ==============
| orange | | grapefruit |
========== ==============
```
This would roughly translate to the following code:
```python
def fruit_type(fruit):
if fruit.color == "green":
return "lime"
if fruit.color == "yellow":
return "lemon"
if fruit.color == "orange":
if fruit.size == "small":
return "orange"
if fruit.size == "large":
return "grapefruit"
```
As you can see, the decision tree is very easy to interpret. If you use a decision tree to make predictions and then need to determine why the tree made the decision that it did, it is very easy to inspect.
Also, decision trees don't benefit from scaling or normalizing your data, which is different from many types of models.
### Create a Decision Tree
Now that we have the data loaded, we can create a decision tree. We'll use the [`DecisionTreeClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html) from scikit-learn to perform this task.
Note that there is also a [`DecisionTreeRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) that can be used for regression models. In practice, you'll typically see decision trees applied to classification problems more than regression.
To build and train the model, we create an instance of the classifier and then call the `fit()` method that is used for all scikit-learn models.
```
from sklearn import tree
dt = tree.DecisionTreeClassifier()
dt.fit(
iris_df[feature_names],
iris_df[target_name]
)
```
If this were a real application, we'd keep some data to the side for testing.
### Visualize the Tree
We now have a decision tree and can use it to make predictions. But before we do that, let's take a look at the tree itself.
To do this we create a [`StringIO`](https://docs.python.org/3/library/io.html) object that we can export dot data to. [DOT](https://www.graphviz.org/doc/info/lang.html) is a graph description language with Python-graphing utilities that we can plot with.
```
import io
import pydotplus
from IPython.display import Image
dot_data = io.StringIO()
tree.export_graphviz(
dt,
out_file=dot_data,
feature_names=feature_names
)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
```
That tree looks pretty complex. Many branches in the tree is a sign that we may have overfit the model. Let's create the tree again; this time we'll limit the depth.
```
from sklearn import tree
dt = tree.DecisionTreeClassifier(max_depth=2)
dt.fit(
iris_df[feature_names],
iris_df[target_name]
)
```
And plot to see the branching.
```
import io
import pydotplus
from IPython.display import Image
dot_data = io.StringIO()
tree.export_graphviz(
dt,
out_file=dot_data,
feature_names=feature_names
)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
```
This tree is less likely to be overfitting since we forced it to have a depth of 2. Holding out a test sample and performing validation would be a good way to check.
What are the `gini`, `samples`, and `value` items shown in the tree?
`gini` is is the *Gini impurity*. This is a measure of the chance that you'll misclassify a random element in the dataset at this decision point. Smaller `gini` is better.
`samples` is a count of the number of samples that have met the criteria to reach this leaf.
Within `value` is the count of each class of data that has made it to this leaf. Summing `value` should equal `sample`.
### Hyperparameters
There are many hyperparameters you can tweak in your decision tree models. One of those is `criterion`. `criterion` determines the quality measure that the model will use to determine the shape of the tree.
The possible `criterion` values are `gini` and `entropy`. `gini` is the [Gini Impuirty](https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity) while `entropy` is a measure of [Information Gain](https://en.wikipedia.org/wiki/Decision_tree_learning#Information_gain).
In the example below, we switch the classifier to use "entropy" for `criterion`. You'll see in the resultant tree that we now see "entropy" instead of "gini", but the resultant trees are the same. For more complex models, though, it may be worthwhile to test the different criterion.
```
import io
import pydotplus
from IPython.display import Image
from sklearn import tree
dt = tree.DecisionTreeClassifier(
max_depth=2,
criterion="entropy"
)
dt.fit(
iris_df[feature_names],
iris_df[target_name]
)
dot_data = io.StringIO()
tree.export_graphviz(
dt,
out_file=dot_data,
feature_names=feature_names
)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
```
We've limited the depth of the tree using `max_depth`. We can also limit the number of samples required to be present in a node for it to be considered for splitting using `min_samples_split`. We can also limit the minimum size of a leaf node using `min_samples_leaf`. All of these hyperparameters help you to prevent your model from overfitting.
There are many other hyperparameters that can be found in the [`DecisionTreeClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html) documentation.
### Exercise 1: Tuning Decision Tree Hyperparameters
In this exercise we will use a decision tree to classify wine quality in the [Red Wine Quality dataset](https://www.kaggle.com/uciml/red-wine-quality-cortez-et-al-2009).
The target column in the dataset is `quality`. Quality is an integer value between 1 and 10 (inclusive). You'll use the other columns in the dataset to build a decision tree to predict wine quality.
For this exercise:
* Hold out some data for final testing of model generalization.
* Use [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) to compare some hyperparameters for your model. You can choose which parameters to test.
* Print the hyperparameters of the best performing model.
* Print the accuracy of the best performing model and the holdout dataset.
* Visualize the best performing tree.
Use as many text and code cells as you need to perform this exercise. We'll get you started with the code to authenticate and download the dataset.
First upload your `kaggle.json` file, and then run the code block below.
```
! chmod 600 kaggle.json && (ls ~/.kaggle 2>/dev/null || mkdir ~/.kaggle) && mv kaggle.json ~/.kaggle/ && echo 'Done'
```
Next, download the wine quality dataset.
```
! kaggle datasets download uciml/red-wine-quality-cortez-et-al-2009
! ls
```
##### **Student Solution**
```
# Your Code Goes Here
```
---
## Random Forests
Random forests are a simple yet powerful machine learning tool based on decision trees. Random forests are easy to understand, yet they touch upon many advanced machine learning concepts, such as ensemble learning and bagging. These models can be used for both classification and regression. Also, since they are built from decision trees, they are not sensitive to unscaled data.
You can think of a random forest as a group decision made by a number of decision trees. For classification problems, the random forest creates multiple decision trees with different subsets of the data. When it is asked to classify a data point, it will ask all of the trees what they think and then take the majority decision.
For regression problems, the random forest will again use the opinions of multiple decision trees, but it will take the mean (or some other summation) of the responses and use that as the regression value.
This type of modeling, where one model consists of other models, is called *ensemble learning*. Ensemble learning can often lead to better models because taking the combined, differing opinions of a group of models can reduce overfitting.
### Create a Random Forest
Creating a random forest is as easy as creating a decision tree.
scikit-learn provides a [`RandomForestClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) and a [`RandomForestRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html), which can be used to combine the predictive power of multiple decision trees.
```
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
iris_bunch = load_iris()
feature_names = iris_bunch.feature_names
target_name = 'species'
iris_df = pd.DataFrame(
iris_bunch.data,
columns=feature_names
)
iris_df[target_name] = iris_bunch.target
rf = RandomForestClassifier()
rf.fit(
iris_df[feature_names],
iris_df[target_name]
)
```
You can look at different trees in the random forest to see how their decision branching differs. By default there are `100` decision trees created for the model.
Let's view a few.
Run the code below a few times, and see if you notice a difference in the trees that are shown.
```
import pydotplus
import random
from IPython.display import Image
from sklearn.externals.six import StringIO
dot_data = StringIO()
tree.export_graphviz(
random.choice(rf.estimators_),
out_file=dot_data,
feature_names=feature_names
)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
```
### Make Predictions
Just like any other scikit-learn model, you can use the `predict()` method to make predictions.
```
print(rf.predict([iris_df.iloc[121][feature_names]]))
```
### Hyperparameters
Many of the hyperparameters available in decision trees are also available in random forest models. There are, however, some hyperparameters that are only available in random forests.
The two most important are `bootstrap` and `oob_score`. These two hyperparameters are relevant to ensemble learning.
`bootstrap` determines if the model will use [bootstrap sampling](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)). When you bootstrap, only a sample of the dataset will be used for training each tree in the forest. The full dataset will be used as the source of the sampling for each tree, but each sample will have a different set of data points, perhaps with some repetition. In bootstrapping, there is also "replacement" of the data, which means a data point can occur in more that one tree.
`oob_score` stands for "Out of bag score." When you create a bootstrap sample, this is referred to as a *bag* in machine learning parlance. When the tree is being scored, only data points in the bag sampled for the tree will be used unless `oob_score` is set to true.
### Exercise 2: Feature Importance
In this exercise we will use the [UCI Abalone dataset](https://www.kaggle.com/hurshd0/abalone-uci) to determine the age of sea snails.
The target feature in the dataset is `rings`, which is a proxy for age in the snails. This is a numeric value, but it is stored as an integer and has a biological limit. So we can think of this as a classification problem and use a [`RandomForestClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html).
You will download the dataset and train a random forest classifier. After you have fit the classifier, the `feature_importances_` attribute of the model will be populated. Use the importance scores to print the least important feature.
*Note that some of the features are categorical string values. You'll need to convert these to numeric values to use them in the model.*
Use as many text and code blocks as you need to perform this exercise.
#### **Student Solution**
```
# Your Code Goes Here
```
---
| github_jupyter |
<a href="https://colab.research.google.com/github/pabair/rl-course-ss21/blob/main/solutions/S6_LunarLander_PolicyBased.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Install Dependencies
```
# source: https://medium.com/coinmonks/landing-a-rocket-with-simple-reinforcement-learning-3a0265f8b58c
!pip3 install box2d-py
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
import matplotlib.pyplot as plt
from collections import deque
torch.manual_seed(1)
np.random.seed(1)
```
# Neural Network
```
class Net(nn.Module):
def __init__(self, obs_size, hidden_size, n_actions):
super(Net, self).__init__()
self.fc1 = nn.Linear(obs_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, n_actions)
def forward(self, x):
x = F.relu(self.fc1(x))
return self.fc2(x)
```
# Generate Episodes
```
def generate_batch(env, batch_size, t_max=5000):
activation = nn.Softmax(dim=1)
batch_actions,batch_states, batch_rewards = [],[],[]
for b in range(batch_size):
states,actions = [],[]
total_reward = 0
s = env.reset()
for t in range(t_max):
s_v = torch.FloatTensor([s])
act_probs_v = activation(net(s_v))
act_probs = act_probs_v.data.numpy()[0]
a = np.random.choice(len(act_probs), p=act_probs)
new_s, r, done, info = env.step(a)
#record sessions like you did before
states.append(s)
actions.append(a)
total_reward += r
s = new_s
if done:
batch_actions.append(actions)
batch_states.append(states)
batch_rewards.append(total_reward)
break
return batch_states, batch_actions, batch_rewards
```
# Training
```
def filter_batch(states_batch, actions_batch, rewards_batch, percentile):
reward_threshold = np.percentile(rewards_batch, percentile)
elite_states = []
elite_actions = []
for i in range(len(rewards_batch)):
if rewards_batch[i] > reward_threshold:
for j in range(len(states_batch[i])):
elite_states.append(states_batch[i][j])
elite_actions.append(actions_batch[i][j])
return elite_states, elite_actions
batch_size = 100
session_size = 500
percentile = 80
hidden_size = 200
completion_score = 100
learning_rate = 0.01
env = gym.make("LunarLander-v2")
n_states = env.observation_space.shape[0]
n_actions = env.action_space.n
#neural network
net = Net(n_states, hidden_size, n_actions)
#loss function
objective = nn.CrossEntropyLoss()
#optimisation function
optimizer = optim.Adam(params=net.parameters(), lr=learning_rate)
for i in range(session_size):
#generate new sessions
batch_states, batch_actions, batch_rewards = generate_batch(env, batch_size, t_max=500)
elite_states, elite_actions = filter_batch(batch_states, batch_actions, batch_rewards, percentile)
optimizer.zero_grad()
tensor_states = torch.FloatTensor(elite_states)
tensor_actions = torch.LongTensor(elite_actions)
action_scores_v = net(tensor_states)
loss_v = objective(action_scores_v, tensor_actions)
loss_v.backward()
optimizer.step()
#show results
mean_reward, threshold = np.mean(batch_rewards), np.percentile(batch_rewards, percentile)
print("%d: loss=%.3f, reward_mean=%.1f, reward_threshold=%.1f" % (
i, loss_v.item(), mean_reward, threshold))
#check if
if np.mean(batch_rewards)> completion_score:
print("Environment has been successfullly completed!")
break
```
# Evaluation
```
import time
FPS = 25
record_folder="video"
env = gym.make('LunarLander-v2')
env = gym.wrappers.Monitor(env, record_folder, force=True)
state = env.reset()
total_reward = 0.0
activation = nn.Softmax(dim=1)
while True:
start_ts = time.time()
env.render()
s_v = torch.FloatTensor([state])
act_probs_v = activation(net(s_v))
act_probs = act_probs_v.data.numpy()[0]
a = np.random.choice(len(act_probs), p=act_probs)
state, reward, done, _ = env.step(a)
total_reward += reward
if done:
break
delta = 1/FPS - (time.time() - start_ts)
if delta > 0:
time.sleep(delta)
print("Total reward: %.2f" % total_reward)
env.close()
```
| github_jupyter |
# Sentiment Analysis with an RNN
In this notebook, you'll implement a recurrent neural network that performs sentiment analysis.
>Using an RNN rather than a strictly feedforward network is more accurate since we can include information about the *sequence* of words.
Here we'll use a dataset of movie reviews, accompanied by sentiment labels: positive or negative.
<img src="assets/reviews_ex.png" width=40%>
### Network Architecture
The architecture for this network is shown below.
<img src="assets/network_diagram.png" width=40%>
>**First, we'll pass in words to an embedding layer.** We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the Word2Vec lesson. You can actually train an embedding with the Skip-gram Word2Vec model and use those embeddings as input, here. However, it's good enough to just have an embedding layer and let the network learn a different embedding table on its own. *In this case, the embedding layer is for dimensionality reduction, rather than for learning semantic representations.*
>**After input words are passed to an embedding layer, the new embeddings will be passed to LSTM cells.** The LSTM cells will add *recurrent* connections to the network and give us the ability to include information about the *sequence* of words in the movie review data.
>**Finally, the LSTM outputs will go to a sigmoid output layer.** We're using a sigmoid function because positive and negative = 1 and 0, respectively, and a sigmoid will output predicted, sentiment values between 0-1.
We don't care about the sigmoid outputs except for the **very last one**; we can ignore the rest. We'll calculate the loss by comparing the output at the last time step and the training label (pos or neg).
---
### Load in and visualize the data
```
import numpy as np
# read data from text files
with open('data/reviews.txt', 'r') as f:
reviews = f.read()
with open('data/labels.txt', 'r') as f:
labels = f.read()
print(reviews[:1000])
print()
print(labels[:20])
```
## Data pre-processing
The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.
You can see an example of the reviews data above. Here are the processing steps, we'll want to take:
>* We'll want to get rid of periods and extraneous punctuation.
* Also, you might notice that the reviews are delimited with newline characters `\n`. To deal with those, I'm going to split the text into each review using `\n` as the delimiter.
* Then I can combined all the reviews back together into one big string.
First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.
```
from string import punctuation
# get rid of punctuation
reviews = reviews.lower() # lowercase, standardize
all_text = ''.join([c for c in reviews if c not in punctuation])
# split by new lines and spaces
reviews_split = all_text.split('\n')
all_text = ' '.join(reviews_split)
# create a list of words
words = all_text.split()
words[:30]
```
### Encoding the words
The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.
> **Exercise:** Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers **start at 1, not 0**.
> Also, convert the reviews to integers and store the reviews in a new list called `reviews_ints`.
```
# feel free to use this import
from collections import Counter
## Build a dictionary that maps words to integers
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
## use the dict to tokenize each review in reviews_split
## store the tokenized reviews in reviews_ints
reviews_ints = []
for review in reviews_split:
reviews_ints.append([vocab_to_int[word] for word in review.split()])
```
**Test your code**
As a text that you've implemented the dictionary correctly, print out the number of unique words in your vocabulary and the contents of the first, tokenized review.
```
# stats about vocabulary
print('Unique words: ', len((vocab_to_int))) # should ~ 74000+
print()
# print tokens in first review
print('Tokenized review: \n', reviews_ints[:1])
```
### Encoding the labels
Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1.
> **Exercise:** Convert labels from `positive` and `negative` to 1 and 0, respectively, and place those in a new list, `encoded_labels`.
```
# 1=positive, 0=negative label conversion
labels_split = labels.split('\n')
encoded_labels = np.array([1 if label == 'positive' else 0 for label in labels_split])
```
### Removing Outliers
As an additional pre-processing step, we want to make sure that our reviews are in good shape for standard processing. That is, our network will expect a standard input text size, and so, we'll want to shape our reviews into a specific length. We'll approach this task in two main steps:
1. Getting rid of extremely long or short reviews; the outliers
2. Padding/truncating the remaining data so that we have reviews of the same length.
Before we pad our review text, we should check for reviews of extremely short or long lengths; outliers that may mess with our training.
```
# outlier review stats
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
```
Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. We'll have to remove any super short reviews and truncate super long reviews. This removes outliers and should allow our model to train more efficiently.
> **Exercise:** First, remove *any* reviews with zero length from the `reviews_ints` list and their corresponding label in `encoded_labels`.
```
print('Number of reviews before removing outliers: ', len(reviews_ints))
## remove any reviews/labels with zero length from the reviews_ints list.
# get indices of any reviews with length 0
non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0]
# remove 0-length reviews and their labels
reviews_ints = [reviews_ints[ii] for ii in non_zero_idx]
encoded_labels = np.array([encoded_labels[ii] for ii in non_zero_idx])
print('Number of reviews after removing outliers: ', len(reviews_ints))
```
---
## Padding sequences
To deal with both short and very long reviews, we'll pad or truncate all our reviews to a specific length. For reviews shorter than some `seq_length`, we'll pad with 0s. For reviews longer than `seq_length`, we can truncate them to the first `seq_length` words. A good `seq_length`, in this case, is 200.
> **Exercise:** Define a function that returns an array `features` that contains the padded data, of a standard size, that we'll pass to the network.
* The data should come from `review_ints`, since we want to feed integers to the network.
* Each row should be `seq_length` elements long.
* For reviews shorter than `seq_length` words, **left pad** with 0s. That is, if the review is `['best', 'movie', 'ever']`, `[117, 18, 128]` as integers, the row will look like `[0, 0, 0, ..., 0, 117, 18, 128]`.
* For reviews longer than `seq_length`, use only the first `seq_length` words as the feature vector.
As a small example, if the `seq_length=10` and an input review is:
```
[117, 18, 128]
```
The resultant, padded sequence should be:
```
[0, 0, 0, 0, 0, 0, 0, 117, 18, 128]
```
**Your final `features` array should be a 2D array, with as many rows as there are reviews, and as many columns as the specified `seq_length`.**
This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.
```
def pad_features(reviews_ints, seq_length):
''' Return features of review_ints, where each review is padded with 0's
or truncated to the input seq_length.
'''
# getting the correct rows x cols shape
features = np.zeros((len(reviews_ints), seq_length), dtype=int)
# for each review, I grab that review and
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_length]
return features
# Test your implementation!
seq_length = 200
features = pad_features(reviews_ints, seq_length=seq_length)
## test statements - do not change - ##
assert len(features)==len(reviews_ints), "Your features should have as many rows as reviews."
assert len(features[0])==seq_length, "Each feature row should contain seq_length values."
# print first 10 values of the first 30 batches
print(features[:30,:10])
```
## Training, Validation, Test
With our data in nice shape, we'll split it into training, validation, and test sets.
> **Exercise:** Create the training, validation, and test sets.
* You'll need to create sets for the features and the labels, `train_x` and `train_y`, for example.
* Define a split fraction, `split_frac` as the fraction of data to **keep** in the training set. Usually this is set to 0.8 or 0.9.
* Whatever data is left will be split in half to create the validation and *testing* data.
```
split_frac = 0.8
## split data into training, validation, and test data (features and labels, x and y)
split_idx = int(len(features)*split_frac)
train_x, remaining_x = features[:split_idx], features[split_idx:]
train_y, remaining_y = encoded_labels[:split_idx], encoded_labels[split_idx:]
test_idx = int(len(remaining_x)*0.5)
val_x, test_x = remaining_x[:test_idx], remaining_x[test_idx:]
val_y, test_y = remaining_y[:test_idx], remaining_y[test_idx:]
## print out the shapes of your resultant feature data
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
```
**Check your work**
With train, validation, and test fractions equal to 0.8, 0.1, 0.1, respectively, the final, feature data shapes should look like:
```
Feature Shapes:
Train set: (20000, 200)
Validation set: (2500, 200)
Test set: (2500, 200)
```
---
## DataLoaders and Batching
After creating training, test, and validation data, we can create DataLoaders for this data by following two steps:
1. Create a known format for accessing our data, using [TensorDataset](https://pytorch.org/docs/stable/data.html#) which takes in an input set of data and a target set of data with the same first dimension, and creates a dataset.
2. Create DataLoaders and batch our training, validation, and test Tensor datasets.
```
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
train_loader = DataLoader(train_data, batch_size=batch_size)
```
This is an alternative to creating a generator function for batching our data into full batches.
```
import torch
from torch.utils.data import TensorDataset, DataLoader
# create Tensor datasets
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
valid_data = TensorDataset(torch.from_numpy(val_x), torch.from_numpy(val_y))
test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))
# dataloaders
batch_size = 50
# make sure the SHUFFLE your training data
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
valid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
# obtain one batch of training data
dataiter = iter(train_loader)
sample_x, sample_y = dataiter.next()
print('Sample input size: ', sample_x.size()) # batch_size, seq_length
print('Sample input: \n', sample_x)
print()
print('Sample label size: ', sample_y.size()) # batch_size
print('Sample label: \n', sample_y)
```
---
# Sentiment Network with PyTorch
Below is where you'll define the network.
<img src="assets/network_diagram.png" width=40%>
The layers are as follows:
1. An [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) that converts our word tokens (integers) into embeddings of a specific size.
2. An [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) defined by a hidden_state size and number of layers
3. A fully-connected output layer that maps the LSTM layer outputs to a desired output_size
4. A sigmoid activation layer which turns all outputs into a value 0-1; return **only the last sigmoid output** as the output of this network.
### The Embedding Layer
We need to add an [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) because there are 74000+ words in our vocabulary. It is massively inefficient to one-hot encode that many classes. So, instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using Word2Vec, then load it here. But, it's fine to just make a new layer, using it for only dimensionality reduction, and let the network learn the weights.
### The LSTM Layer(s)
We'll create an [LSTM](https://pytorch.org/docs/stable/nn.html#lstm) to use in our recurrent network, which takes in an input_size, a hidden_dim, a number of layers, a dropout probability (for dropout between multiple layers), and a batch_first parameter.
Most of the time, you're network will have better performance with more layers; between 2-3. Adding more layers allows the network to learn really complex relationships.
> **Exercise:** Complete the `__init__`, `forward`, and `init_hidden` functions for the SentimentRNN model class.
Note: `init_hidden` should initialize the hidden and cell state of an lstm layer to all zeros, and move those state to GPU, if available.
```
# First checking if GPU is available
train_on_gpu=torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
import torch.nn as nn
class SentimentRNN(nn.Module):
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
"""
Initialize the model by setting up the layers.
"""
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(0.3)
# linear and sigmoid layers
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
x = x.long()
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
# stack up lstm outputs
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully-connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
```
## Instantiate the network
Here, we'll instantiate the network. First up, defining the hyperparameters.
* `vocab_size`: Size of our vocabulary or the range of values for our input, word tokens.
* `output_size`: Size of our desired output; the number of class scores we want to output (pos/neg).
* `embedding_dim`: Number of columns in the embedding lookup table; size of our embeddings.
* `hidden_dim`: Number of units in the hidden layers of our LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.
* `n_layers`: Number of LSTM layers in the network. Typically between 1-3
> **Exercise:** Define the model hyperparameters.
```
# Instantiate the model w/ hyperparams
vocab_size = len(vocab_to_int)+1 # +1 for the 0 padding + our word tokens
output_size = 1
embedding_dim = 400
hidden_dim = 256
n_layers = 2
net = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
print(net)
```
---
## Training
Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. You can also add code to save a model by name.
>We'll also be using a new kind of cross entropy loss, which is designed to work with a single Sigmoid output. [BCELoss](https://pytorch.org/docs/stable/nn.html#bceloss), or **Binary Cross Entropy Loss**, applies cross entropy loss to a single value between 0 and 1.
We also have some data and training hyparameters:
* `lr`: Learning rate for our optimizer.
* `epochs`: Number of times to iterate through the training dataset.
* `clip`: The maximum gradient value to clip at (to prevent exploding gradients).
```
# loss and optimization functions
lr=0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# training params
epochs = 4 # 3-4 is approx where I noticed the validation loss stop decreasing
counter = 0
print_every = 100
clip=5 # gradient clipping
# move model to GPU, if available
if(train_on_gpu):
net.cuda()
net.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
# batch loop
for inputs, labels in train_loader:
counter += 1
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for inputs, labels in valid_loader:
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output.squeeze(), labels.float())
val_losses.append(val_loss.item())
net.train()
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.6f}...".format(loss.item()),
"Val Loss: {:.6f}".format(np.mean(val_losses)))
```
---
## Testing
There are a few ways to test your network.
* **Test data performance:** First, we'll see how our trained model performs on all of our defined test_data, above. We'll calculate the average loss and accuracy over the test data.
* **Inference on user-generated data:** Second, we'll see if we can input just one example review at a time (without a label), and see what the trained model predicts. Looking at new, user input data like this, and predicting an output label, is called **inference**.
```
# Get test data loss and accuracy
test_losses = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size)
net.eval()
# iterate over test data
for inputs, labels in test_loader:
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# get predicted outputs
output, h = net(inputs, h)
# calculate loss
test_loss = criterion(output.squeeze(), labels.float())
test_losses.append(test_loss.item())
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze()) # rounds to the nearest integer
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
# -- stats! -- ##
# avg test loss
print("Test loss: {:.3f}".format(np.mean(test_losses)))
# accuracy over all test data
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc))
```
### Inference on a test review
You can change this test_review to any text that you want. Read it and think: is it pos or neg? Then see if your model predicts correctly!
> **Exercise:** Write a `predict` function that takes in a trained net, a plain text_review, and a sequence length, and prints out a custom statement for a positive or negative review!
* You can use any functions that you've already defined or define any helper functions you want to complete `predict`, but it should just take in a trained net, a text review, and a sequence length.
```
# negative test review
test_review_neg = 'The worst movie I have seen; acting was terrible and I want my money back. This movie had bad acting and the dialogue was slow.'
from string import punctuation
def tokenize_review(test_review):
test_review = test_review.lower() # lowercase
# get rid of punctuation
test_text = ''.join([c for c in test_review if c not in punctuation])
# splitting by spaces
test_words = test_text.split()
# tokens
test_ints = []
test_ints.append([vocab_to_int.get(word, 0) for word in test_words])
return test_ints
# test code and generate tokenized review
test_ints = tokenize_review(test_review_neg)
print(test_ints)
# test sequence padding
seq_length=200
features = pad_features(test_ints, seq_length)
print(features)
# test conversion to tensor and pass into your model
feature_tensor = torch.from_numpy(features)
print(feature_tensor.size())
def predict(net, test_review, sequence_length=200):
net.eval()
# tokenize review
test_ints = tokenize_review(test_review)
# pad tokenized sequence
seq_length=sequence_length
features = pad_features(test_ints, seq_length)
# convert to tensor to pass into your model
feature_tensor = torch.from_numpy(features)
batch_size = feature_tensor.size(0)
# initialize hidden state
h = net.init_hidden(batch_size)
if(train_on_gpu):
feature_tensor = feature_tensor.cuda()
# get the output from the model
output, h = net(feature_tensor, h)
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze())
# printing output value, before rounding
print('Prediction value, pre-rounding: {:.6f}'.format(output.item()))
# print custom response
if(pred.item()==1):
print("Positive review detected!")
else:
print("Negative review detected.")
# positive test review
test_review_pos = 'This movie had the best acting and the dialogue was so good. I loved it.'
# call function
seq_length=200 # good to use the length that was trained on
predict(net, test_review_neg, seq_length)
```
### Try out test_reviews of your own!
Now that you have a trained model and a predict function, you can pass in _any_ kind of text and this model will predict whether the text has a positive or negative sentiment. Push this model to its limits and try to find what words it associates with positive or negative.
Later, you'll learn how to deploy a model like this to a production environment so that it can respond to any kind of user data put into a web app!
| github_jupyter |
```
# Building the CNN
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import load_model
from keras.callbacks import EarlyStopping
# Initializing the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Convolution2D(16, 3, 3, input_shape = (64, 64, 3),activation = 'relu'))
# Step 2 - Max Pooling
classifier.add(MaxPooling2D(pool_size = (2,2)))
# Adding a second convolution layer
classifier.add(Convolution2D(5120, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
# Adding a third convolution layer
classifier.add(Convolution2D(128, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
# Adding a 4th convolution layer
classifier.add(Convolution2D(512, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
# Adding a 5th convolution layer
#classifier.add(Convolution2D(64, 3, 3, activation = 'relu'))
#classifier.add(MaxPooling2D(pool_size = (2,2)))
# Adding a 6th convolution layer
#classifier.add(Convolution2D(256, 3, 3, activation = 'relu'))
#classifier.add(MaxPooling2D(pool_size = (2,2)))
# Adding a 5th convolution layer
#classifier.add(Convolution2D(512, 3, 3, activation = 'relu'))
#classifier.add(MaxPooling2D(pool_size = (2,2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full Connection
classifier.add(Dense(output_dim = 128, activation= 'relu'))
classifier.add(Dense(output_dim = 30,activation= 'softmax'))
# Compiling the CNN
classifier.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the image
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'/demo5/TrainingData',
target_size=(64, 64),
batch_size=10,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
'/demo5/ValidationData',
target_size=(64, 64),
batch_size=2,
class_mode='categorical')
early_stopping = EarlyStopping(monitor='val_acc', patience=15, verbose=1, mode='max')
classifier.fit_generator(
train_generator,
samples_per_epoch=2000,
epochs=500,
validation_data=validation_generator,
callbacks=[early_stopping],
validation_steps=1000)
classifier.save('/demo5/all-model.h5')
import glob
import numpy as np
import csv
from keras.models import load_model
from keras.preprocessing import image
classifier = load_model('/demo5/all-model.h5')
with open('/demo5/prediction.csv', "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for filename in glob.iglob('/demo5/Remaining Clips/*.jpg'):
test_image = image.load_img( filename,target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict_classes(test_image, verbose = 1)
for item in train_generator.class_indices: # Python's for loops are a "for each" loop
if (result[0] == train_generator.class_indices[item]):
line = filename + ',' + item
writer.writerow([filename, item])
```
| github_jupyter |
<a href="https://colab.research.google.com/github/graviraja/100-Days-of-NLP/blob/applications%2Fclassification/applications/classification/grammatically_correct_sentence/CoLA%20with%20DistilBERT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### Installations
```
!pip install transformers
!pip install wget
```
### CoLA (Corpus of Linguistic Acceptability) Dataset
```
import os
import wget
print('Downloading dataset')
# The URL for the dataset zip file.
url = 'https://nyu-mll.github.io/CoLA/cola_public_1.1.zip'
# Download the file (if we haven't already)
if not os.path.exists('./cola_public_1.1.zip'):
wget.download(url, './cola_public_1.1.zip')
if not os.path.exists('./cola_public'):
!unzip cola_public_1.1.zip
!ls
```
### Imports
```
import time
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import transformers
from transformers import AdamW, get_linear_schedule_with_warmup
from sklearn import model_selection
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
torch.backends.cudnn.deterministic = True
train_file = "cola_public/raw/in_domain_train.tsv"
test_file = "cola_public/raw/in_domain_dev.tsv"
df_train = pd.read_csv(train_file, sep='\t', header=None, names=['sentence_source', 'label', 'label_notes', 'sentence'])
df_valid = pd.read_csv(test_file, sep='\t', header=None, names=['sentence_source', 'label', 'label_notes', 'sentence'])
```
### Data Analysis
```
df_train.head()
df_train = df_train.drop(columns=['sentence_source', 'label_notes'])
df_train.head()
df_valid = df_valid.drop(columns=['sentence_source', 'label_notes'])
df_train.shape, df_valid.shape
df_train = df_train.sample(frac=1).reset_index(drop=True)
df_train.head()
sns.countplot(df_train['label'].values)
plt.xlabel("Training Data Distribution")
sns.countplot(df_valid['label'].values)
plt.xlabel("Testing Data Distribution")
```
#### Choosing maximum sequence length
```
token_lens = []
for txt in df_train.sentence:
tokens = txt.split()
token_lens.append(len(tokens))
sns.distplot(token_lens)
plt.xlim([0, 512]);
plt.xlabel('Token lengths');
```
### Configurations
```
OUTPUT_DIM = 1
MAX_LEN = 100
TRAIN_BATCH_SIZE = 8
VALID_BATCH_SIZE = 8
EPOCHS = 3
TEACHER_MODEL_NAME = "bert-base-uncased"
STUDENT_MODEL_NAME = "distilbert-base-uncased"
TEACHER_MODEL_PATH = "teacher_model.bin"
STUDENTSA_MODEL_PATH = "studentsa_model.bin"
STUDENT_MODEL_PATH = "student_model.bin"
TOKENIZER = transformers.BertTokenizer.from_pretrained(TEACHER_MODEL_NAME)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
```
### CoLA Dataset
```
class CoLADataset:
def __init__(self, sentences, labels):
self.sentences = sentences
self.labels = labels
self.tokenizer = TOKENIZER
self.max_len = MAX_LEN
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
sentence = self.sentences[item]
label = self.labels[item]
encoding = self.tokenizer.encode_plus(
sentence,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
truncation=True,
return_tensors='pt',
)
return {
"ids": encoding["input_ids"].flatten(),
"mask": encoding["attention_mask"].flatten(),
"targets": torch.tensor(label, dtype=torch.float)
}
train_dataset = CoLADataset(
sentences=df_train.sentence.values,
labels=df_train.label.values
)
valid_dataset = CoLADataset(
sentences=df_valid.sentence.values,
labels=df_valid.label.values
)
```
### DataLoaders
```
train_data_loader = torch.utils.data.DataLoader(
train_dataset,
TRAIN_BATCH_SIZE,
shuffle=True
)
valid_data_loader = torch.utils.data.DataLoader(
valid_dataset,
VALID_BATCH_SIZE
)
sample = next(iter(train_data_loader))
sample["ids"].shape, sample["mask"].shape, sample["targets"].shape
```
## BERT Model (Teacher)
```
class BERTModel(nn.Module):
def __init__(self):
super().__init__()
self.bert = transformers.BertModel.from_pretrained(TEACHER_MODEL_NAME)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(768, OUTPUT_DIM)
def forward(self, ids, mask):
_, o2 = self.bert(ids, attention_mask=mask)
bo = self.bert_drop(o2)
output = self.out(bo)
return output
teacher_model = BERTModel()
teacher_model.to(device)
```
### Optimizer
```
# create parameters we want to optimize
# we generally dont use any decay for bias and weight layers
param_optimizer = list(teacher_model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0
}
]
num_train_steps = int(len(df_train) / TRAIN_BATCH_SIZE * EPOCHS)
num_train_steps
optimizer = AdamW(optimizer_parameters, lr=3e-5)
```
### Scheduler
```
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=num_train_steps
)
```
### Loss Criterion
```
criterion = nn.BCEWithLogitsLoss().to(device)
```
### Training Method
```
def train_fn(data_loader, model, optimizer, criterion, device, scheduler):
model.train()
epoch_loss = 0
for batch in data_loader:
ids = batch['ids'].to(device)
mask = batch["mask"].to(device)
targets = batch["targets"].to(device)
optimizer.zero_grad()
outputs = model(
ids=ids,
mask=mask
)
loss = criterion(outputs, targets.view(-1, 1))
epoch_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
return epoch_loss / len(data_loader)
```
### Evaluation Method
```
def eval_fn(data_loader, model, criterion, device):
model.eval()
fin_outputs = []
fin_targets = []
epoch_loss = 0
with torch.no_grad():
for batch in data_loader:
ids = batch["ids"].to(device)
mask = batch["mask"].to(device)
targets = batch["targets"].to(device)
outputs = model(
ids=ids,
mask=mask
)
loss = criterion(outputs, targets.view(-1, 1))
epoch_loss += loss.item()
targets = targets.cpu().detach()
fin_targets.extend(targets.numpy().tolist())
outputs = torch.sigmoid(outputs).cpu().detach()
fin_outputs.extend(outputs.numpy().tolist())
outputs = np.array(fin_outputs) >= 0.5
accuracy = metrics.accuracy_score(fin_targets, outputs)
mat_cor = metrics.matthews_corrcoef(fin_targets, outputs)
return epoch_loss / len(data_loader), accuracy, mat_cor
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
```
### Training
```
best_valid_loss = float('inf')
for epoch in range(EPOCHS):
start_time = time.time()
train_loss = train_fn(train_data_loader, teacher_model, optimizer, criterion, device, scheduler)
val_loss, val_acc, val_mat_cor = eval_fn(valid_data_loader, teacher_model, criterion, device)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if val_loss < best_valid_loss:
best_valid_loss = val_loss
torch.save(teacher_model.state_dict(), TEACHER_MODEL_PATH)
print(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
print(f"\t Train Loss: {train_loss:.3f}")
print(f"\t Valid Loss: {val_loss:.3f} | Valid Acc: {val_acc * 100:.2f} | Matthews Cor: {val_mat_cor:.3f}")
teacher_model.load_state_dict(torch.load(TEACHER_MODEL_PATH))
```
### Inference
```
def inference(sentence, model, device):
encoded = TOKENIZER.encode_plus(
sentence,
max_length=MAX_LEN,
add_special_tokens=True,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
truncation=True,
return_tensors='pt',
)
input_ids = encoded['input_ids'].to(device)
attention_mask = encoded['attention_mask'].to(device)
output = model(input_ids, attention_mask)
prediction = torch.round(torch.sigmoid(output))
print(f'Sentence: {sentence}')
print(f'Grammatically Correct: {prediction.item()}')
sentence = "I like coding"
inference(sentence, teacher_model, device)
sentence = "I myself talking to"
inference(sentence, teacher_model, device)
sentence = "I am talking to myself"
inference(sentence, teacher_model, device)
torch.cuda.empty_cache()
```
## DistilBERT Model (Standalone)
Without any teacher forcing from BERT Model
```
class DistilBERTModelSA(nn.Module):
def __init__(self):
super().__init__()
self.bert = transformers.DistilBertModel.from_pretrained(STUDENT_MODEL_NAME)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(768, OUTPUT_DIM)
def forward(self, ids, mask):
output = self.bert(ids, attention_mask=mask)
hidden = output[0]
bo = self.bert_drop(hidden[:, 0])
output = self.out(bo)
return output
student_model_sa = DistilBERTModelSA()
student_model_sa.to(device)
param_optimizer = list(student_model_sa.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0
}
]
num_train_steps = int(len(df_train) / TRAIN_BATCH_SIZE * EPOCHS)
num_train_steps
optimizer = AdamW(optimizer_parameters, lr=3e-5)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=num_train_steps
)
criterion = nn.BCEWithLogitsLoss().to(device)
def train_fn(data_loader, model, optimizer, criterion, device, scheduler):
model.train()
epoch_loss = 0
for batch in data_loader:
ids = batch['ids'].to(device)
mask = batch["mask"].to(device)
targets = batch["targets"].to(device)
optimizer.zero_grad()
outputs = model(
ids=ids,
mask=mask
)
loss = criterion(outputs, targets.view(-1, 1))
epoch_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
return epoch_loss / len(data_loader)
def eval_fn(data_loader, model, criterion, device):
model.eval()
fin_outputs = []
fin_targets = []
epoch_loss = 0
with torch.no_grad():
for batch in data_loader:
ids = batch["ids"].to(device)
mask = batch["mask"].to(device)
targets = batch["targets"].to(device)
outputs = model(
ids=ids,
mask=mask
)
loss = criterion(outputs, targets.view(-1, 1))
epoch_loss += loss.item()
targets = targets.cpu().detach()
fin_targets.extend(targets.numpy().tolist())
outputs = torch.sigmoid(outputs).cpu().detach()
fin_outputs.extend(outputs.numpy().tolist())
outputs = np.array(fin_outputs) >= 0.5
accuracy = metrics.accuracy_score(fin_targets, outputs)
mat_cor = metrics.matthews_corrcoef(fin_targets, outputs)
return epoch_loss / len(data_loader), accuracy, mat_cor
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
best_valid_loss = float('inf')
for epoch in range(EPOCHS):
start_time = time.time()
train_loss = train_fn(train_data_loader, student_model_sa, optimizer, criterion, device, scheduler)
val_loss, val_acc, val_mat_cor = eval_fn(valid_data_loader, student_model_sa, criterion, device)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if val_loss < best_valid_loss:
best_valid_loss = val_loss
torch.save(student_model_sa.state_dict(), STUDENTSA_MODEL_PATH)
print(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
print(f"\t Train Loss: {train_loss:.3f}")
print(f"\t Valid Loss: {val_loss:.3f} | Valid Acc: {val_acc * 100:.2f} | Matthews Cor: {val_mat_cor:.3f}")
student_model_sa.load_state_dict(torch.load(STUDENTSA_MODEL_PATH))
def inference(sentence, model, device):
encoded = TOKENIZER.encode_plus(
sentence,
max_length=MAX_LEN,
add_special_tokens=True,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
truncation=True,
return_tensors='pt',
)
input_ids = encoded['input_ids'].to(device)
attention_mask = encoded['attention_mask'].to(device)
output = model(input_ids, attention_mask)
prediction = torch.round(torch.sigmoid(output))
print(f'Sentence: {sentence}')
print(f'Grammatically Correct: {prediction.item()}')
sentence = "I like coding"
inference(sentence, student_model_sa, device)
torch.cuda.empty_cache()
```
## DistilBERT Model (With Teacher Forcing)
```
class DistilBERTModel(nn.Module):
def __init__(self):
super().__init__()
self.bert = transformers.DistilBertModel.from_pretrained(STUDENT_MODEL_NAME)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(768, OUTPUT_DIM)
def forward(self, ids, mask):
output = self.bert(ids, attention_mask=mask)
hidden = output[0]
bo = self.bert_drop(hidden[:, 0])
output = self.out(bo)
return output
student_model = DistilBERTModel()
student_model.to(device)
param_optimizer = list(student_model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0
}
]
num_train_steps = int(len(df_train) / TRAIN_BATCH_SIZE * EPOCHS)
num_train_steps
optimizer = AdamW(optimizer_parameters, lr=3e-5)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=num_train_steps
)
criterion = nn.BCEWithLogitsLoss().to(device)
MSE_loss = nn.MSELoss(reduction='mean')
KLD_loss = nn.KLDivLoss(reduction="batchmean")
def train_fn(data_loader, model, teacher_model, optimizer, criterion, device, scheduler, alpha_clf=1.0, alpha_teacher=1.0, temperature=2.0):
model.train()
epoch_clf_loss = 0
epoch_total_loss = 0
for batch in data_loader:
ids = batch['ids'].to(device)
mask = batch["mask"].to(device)
targets = batch["targets"].to(device)
optimizer.zero_grad()
student_logits = model(
ids=ids,
mask=mask
)
with torch.no_grad():
teacher_logits = teacher_model(
ids=ids,
mask=mask
)
mse_loss = MSE_loss(student_logits, teacher_logits)
kld_loss = KLD_loss(
(student_logits / temperature),
(teacher_logits / temperature),
)
clf_loss = criterion(student_logits, targets.view(-1, 1))
teacher_loss = mse_loss + kld_loss
loss = alpha_clf * clf_loss + alpha_teacher * teacher_loss
epoch_clf_loss += clf_loss.item()
epoch_total_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
return epoch_clf_loss / len(data_loader), epoch_total_loss / len(data_loader)
def eval_fn(data_loader, model, teacher_model, criterion, device, alpha_clf=1.0, alpha_teacher=1.0, temperature=2.0):
model.eval()
fin_outputs = []
fin_targets = []
epoch_clf_loss = 0
epoch_total_loss = 0
with torch.no_grad():
for batch in data_loader:
ids = batch["ids"].to(device)
mask = batch["mask"].to(device)
targets = batch["targets"].to(device)
student_logits = model(
ids=ids,
mask=mask
)
with torch.no_grad():
teacher_logits = teacher_model(
ids=ids,
mask=mask
)
mse_loss = MSE_loss(student_logits, teacher_logits)
kld_loss = KLD_loss(
(student_logits / temperature),
(teacher_logits / temperature),
)
clf_loss = criterion(student_logits, targets.view(-1, 1))
teacher_loss = mse_loss + kld_loss
loss = alpha_clf * clf_loss + alpha_teacher * teacher_loss
epoch_clf_loss += clf_loss.item()
epoch_total_loss += loss.item()
targets = targets.cpu().detach()
fin_targets.extend(targets.numpy().tolist())
outputs = torch.sigmoid(student_logits).cpu().detach()
fin_outputs.extend(outputs.numpy().tolist())
outputs = np.array(fin_outputs) >= 0.5
accuracy = metrics.accuracy_score(fin_targets, outputs)
mat_cor = metrics.matthews_corrcoef(fin_targets, outputs)
return epoch_clf_loss / len(data_loader), epoch_total_loss / len(data_loader), accuracy, mat_cor
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
teacher_model.load_state_dict(torch.load(TEACHER_MODEL_PATH))
best_valid_loss = float('inf')
for epoch in range(EPOCHS):
start_time = time.time()
train_clf_loss, train_total_loss = train_fn(train_data_loader, student_model, teacher_model, optimizer, criterion, device, scheduler)
val_clf_loss, val_total_loss, val_acc, val_mat_cor = eval_fn(valid_data_loader, student_model, teacher_model, criterion, device)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if val_loss < best_valid_loss:
best_valid_loss = val_loss
torch.save(student_model.state_dict(), STUDENT_MODEL_PATH)
print(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
print(f"\t Train CLF Loss: {train_clf_loss:.3f} | Train total Loss: {train_total_loss:.3f}")
print(f"\t Valid CLF Loss: {val_clf_loss:.3f} | Valid total Loss: {val_total_loss:.3f}")
print(f"\t Valid Acc: {val_acc * 100:.2f} | Matthews Cor: {val_mat_cor:.3f}")
student_model.load_state_dict(torch.load(STUDENT_MODEL_PATH))
def inference(sentence, model, device):
encoded = TOKENIZER.encode_plus(
sentence,
max_length=MAX_LEN,
add_special_tokens=True,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
truncation=True,
return_tensors='pt',
)
input_ids = encoded['input_ids'].to(device)
attention_mask = encoded['attention_mask'].to(device)
output = model(input_ids, attention_mask)
prediction = torch.round(torch.sigmoid(output))
print(f'Sentence: {sentence}')
print(f'Grammatically Correct: {prediction.item()}')
sentence = "I like coding"
inference(sentence, student_model, device)
```
| github_jupyter |
# Lecture 2.0.1: Numpy Random and Random Graphs

Numpy is not only cool because it permits to handle array quite fast (btw, there is C under the hood), but it also have some submodules able to handle a variety of different math things. We are going to learn about random that may be of use for our network analysis.
## Random
[random](https://docs.scipy.org/doc/numpy/reference/routines.random.html) provides a big number of instances for random sampling
```
import numpy
import numpy.random as random
```
### Random functions
Random is somehow redundant...<br/> random.random(size), random.random_sample(size), random.ranf(size), random.sample(size) do exactly the same, i.e. sampling a vector of size size from [0, 1)
```
random.random()
random.random(2)
random.random([2,2])
```
random.randint(low, high) instead sample from an interval of integers **[high is exclusive]**
```
random.randint(0,42)
for i in xrange(10):
if random.randint(0,1)!=0:
print i
```
random.choice(array) random choose an element from the array
```
random.choice(np.arange(42))
random.choice(np.arange(42), size=42)
out=random.choice(np.arange(42), size=42)
len(np.unique(out))==len(out)
```
???
```
random.choice(np.arange(42), size=42, replace=False)
out=random.choice(np.arange(42), size=42, replace=False)
len(np.unique(out))==len(out)
```
This second feature is analogous to random.permutation()
```
out=random.permutation(np.arange(42))
len(np.unique(out))==len(out)
```
Instead random.shuffle(something) shuffle someone else
```
out=np.arange(42)
out
random.shuffle(out)
out
```
### Exercise: generate an instance of a (grand-canonical) Erdös-Renyi random graph for the monopartite network of the previous notebook
In an Erdös-Renyi random graph every link is given a probability equal to the link density, i.e. $p^\text{RG}=\dfrac{2L}{N(N-1)}$. In the grand-canonical approach each possible link is an independent event with probability of success equal to $p_\text{RG} $. Thus, the total number of links is conserved one mean.
#### Load the monopartite adjacency matrix
#### Define the probability per couple of nodes
#### Generate the new adjacency matrix
In principle the number of links is not conserved on a single instance.
### Exercise: generate an instance of a (µ-canonical) Erdös-Renyi random graph for the monopartite network of the previous notebook
Here the solution is to use a trick. random.shuffle is able to reshuffle even matrices, but it does it by reshuffling only rows.
### Exercise: generate an instance of a (grand-canonical) Configuration Model <br/>(Chung-Lu version) <br/>for the monopartite network of the previous notebook
In a configuration model a la Chung-Lu (better, in the Chung-Lu approximation, from now on CLA), the probability per link is $p^\text{CLA}_{ij}=\dfrac{k_ik_j}{2m}$, where $k_i$ is the degree of the real network of the node $i$. **Exercise in the exercise:** use a piece of paper and a pen to show that the mean degree over the ensemble is equal to the one of the real network.
### Exercise: compare the real $k^\text{nn}$ with the average and $\sigma$ over a sample of 1000 sample with gran-canonical ER and CLA-CM
In order to have the name of the films
```
bip_el=np.genfromtxt('./data/imdb_2018_films_actors.txt', delimiter='\t', dtype=[('film', 'U50'),('actor', 'U50')])
films, k_films=np.unique(bip_el['film'], return_counts=True)
```
##### Define a function calculating the $k^\text{nn}$
##### Define a function generating an element of the grand canonical ER
##### Save the $k^\text{nn}$ on the real matrix somewhere
##### Generate a vector of $k^\text{nn}$s vectors, whose elements are the value from the sample of a grand-ER
By the way, what do I expect about the distribution of the $k^\text{nn}$? Why?
##### CLA-CM sampler
##### Generate a vector of $k^\text{nn}$s vectors, whose elements are the value from the sample of a grand-CLACM
#### Select movies whose k^nn is more significantly more than expected...
#### ...and those whose k^nn is more significantly less than expected.
| github_jupyter |
# Calculations with PmagPy
This notebook demonstrates many of the PmagPy calculation functions such as those that rotate directions, return statistical parameters, and simulate data from specified distributions.
## Guide to PmagPy
The notebook is one of a series of notebooks that demonstrate the functionality of PmagPy. The other notebooks are:
- [PmagPy_introduction.ipynb](PmagPy_introduction.ipynb) This notebook introduces PmagPy and lists the functions that are demonstrated in the other notebooks.
- [PmagPy_plots_analysis.ipynb](PmagPy_plots_analysis.ipynb) This notebook demonstrates PmagPy functions that can be used to visualize data as well as those that conduct statistical tests that have associated visualizations.
- [PmagPy_MagIC.ipynb](PmagPy_MagIC.ipynb) This notebook demonstrates how PmagPy can be used to read and write data to and from the MagIC database format including conversion from many individual lab measurement file formats.
## Customizing this notebook
If you want to make changes to this notebook, you should make a copy (see File menu). Otherwise each time you update **PmagPy**, your changes will be overwritten.
## Get started
To use the functions in this notebook, we have to import the **PmagPy** modules **pmagplotlib**, **pmag** and **ipmag** and some other handy functions for use in the notebook. This is done in the following code block which must be executed before running any other code block. To execute, click on the code block and then click on the "Run" button in the menu.
In order to access the example data, this notebook is meant to be run in the PmagPy-data directory (PmagPy directory for developers).
Try it! Run the code block below (click on the cell and then click 'Run'):
```
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.ipmag as ipmag
import matplotlib.pyplot as plt # our plotting buddy
from pmagpy import convert_2_magic as convert
import numpy as np # the fabulous NumPy package
import pandas as pd # and of course Pandas
has_basemap, Basemap = pmag.import_basemap()
has_cartopy, Cartopy = pmag.import_cartopy()
from IPython.display import Image
%matplotlib inline
```
## Functions demonstrated within this notebook:
- Functions in **PmagPy_calculations.ipynb**:
- [aarm_magic](#aarm_magic) : calculate AARM tensors
- [atrm_magic](#aarm_magic) : calculate ATRM tensors
- [angle](#angle) : calculates the angle between two vectors
- [apwp](#apwp) : returns predicted paleolatitudes, directions and pole latitude/longitude from apparent polar wander paths of Besse and Courtillot (2002).
- [b_vdm](#b_vdm) : converts B (in microT) and (magnetic) latitude to V(A)DM (see [vdm_b](#vdm_b))
- [bootams](#bootams) : calculates bootstrap statistics for tensor data
- [cart_dir](#cart_dir) : converts cartesian coordinates (x,y,z) to declination, inclination, intensity (see [dir_cart](#dir_cart))
- [di_eq](#di_eq) : maps declination, inclinatitions to X,Y for plotting in equal area projections
- [di_geo](#di_geo) : rotates declination, inclination in specimen coordinates to geographic coordinates
- [di_rot](#di_rot) : rotates directions to a coordinate system with D,I as center
- [di_tilt](#di_tilt) : rotates directions to stratigraphic coordinates
- [di_vgp](#di_vgp) : converts direction to Virtual Geomagnetic Pole (see [vgp_di](#vgp_di))
- [dia_vgp](#dia_vgp) : converts direction and $\alpha_{95}$ to Virtual Geomagnetic Pole and dp,dm
- [dipole_pinc](#dipole_pinc) : calculates inclination given latitude assuming geocentric axial dipole
- [dipole_plat](#dipole_plat) : calculates latitude given inclination assuming geocentric axial dipole
- [dir_cart](#dir_cart) : converts declination, inclination, intensity to cartesian coordinates (see [cart_dir](#cart_dir))
- [eigs_s](#eigs_s) : converts eigenparameters to equivalent 6 element tensor (see [s_eigs](#s_eigs))
- [eq_di](#eq_di) : takes X,Y from equal area projection (e.g., from digitized coordinates) and converts to declination, inclination
- [fcalc](#fcalc) : returns the value from an F table, given the degrees of freedom.
- [fisher](#fisher) : generates sets of directions drawn from Fisher distributions with vertical true mean
- [fishrot](#fishrot) : generates sets of directions drawn from Fisher distributions with arbitrary true mean
- [flip](#flip) : flips a second mode (reverse directions) to their antipodes
- [gaussian](#gaussian) : generates data drawn from a normal distribution
- [gobing](#gobing) : calculates Bingham statistics from a set of directions
- [gofish](#gofish) : calculates Fisher statistics from a set of directions
- [gokent](#gokent) : calculates Kent statistics from a set of directions
- [goprinc](#goprinc) : calculates principal directions statistics
- [igrf](#igrf) : calculates geomagnetic field vectors for location, age given a field model (e.g., IGRF) including paleofield models (e.g., cals10k)
- [incfish](#incfish) : estimates the true mean inclination from inclination only data
- [pca](#pca) : calculates the best-fit line or plane for demagnetization data and associated statistics
- [pt_rot](#pt_rot) : rotates point given finite rotation pole
- [s_eigs](#s_eigs) : takes a 6 element tensor and calculates eigen parameters (see [eigs_s](#eigs_s))
- [s_geo](#s_geo) : rotates 6 element tensors to geographic coordinates
- [s_hext](#s_hext) : calculates Hext statistics from 6 element tensors
- [s_tilt](#s_tilt) : rotates 6 element tensors to stratigraphic coordinates
- [s_magic](#s_tilt) :
- [scalc](#scalc) : calculates VGP scatter
- [scalc_magic](#scalc) : calculates VGP scatter
- [separate_directions](#separate_directions) : separates a set of directions into two modes (normal and reverse)
- [squish](#squish): flattens inclination data given flattening factor (see [unsquish](#unsquish))
- [sundec](#sundec) : calulates direction to sun for location, date, time and sun azimuth
- [tk03](#tk03) : generates sets of directions consistent with the TK03 field model
- [uniform](#uniform) : generates sets of uniformly distributed directions
- [unsquish](#unsquish) : unsquishes flattened inclinations, given flattening factor (see [squish](#squish))
- [vdm_b](#vdm_b) : calculates intensity at given location from specified virtual dipole moment (see [b_vdm](#b_vdm))
- [vector_mean](#vector_mean) : calculates vector mean for sets of vectors (declination, inclination, intensity)
- [vgp_di](#vgp_di) : calculates direction at given location from virtual geomagnetic pole (see [di_vgp](#di_vgp))
- [watsons_f](#watsons_f) : calculates Watson's F statistic for testing for common mean
## aarm_magic
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#aarm_magic.py)
Anisotropy of anhysteretic or other remanence can be converted to a tensor and used to correct natural remanence data for the effects of anisotropy remanence acquisition. For example, directions may be deflected from the geomagnetic field direction or intensities may be biased by strong anisotropies in the magnetic fabric of the specimen. By imparting an anhysteretic or thermal remanence in many specific orientations, the anisotropy of remanence acquisition can be characterized and used for correction. We do this for anisotropy of anhysteretic remanence (AARM) by imparting an ARM in 9, 12 or 15 positions. Each ARM must be preceded by an AF demagnetization step. The 15 positions are shown in the [k15_magic](#k15_magic) example.
For the 9 position scheme, **aarm_magic** assumes that the AARMs are imparted in positions 1,2,3, 6,7,8, 11,12,13. Someone (a.k.a. Josh Feinberg) has kindly made the measurements and saved them an SIO formatted measurement file named aarm_magic_example.dat in the datafile directory called aarm_magic. Note the special format of these files - the treatment column (column #2) has the position number (1,2,3,6, etc.) followed by either a “00” for the obligatory zero field baseline step or a “10” for the in-field step. These could also be ‘0‘ and ‘1’.
We need to first import these into the measurements format and then calculate the anisotropy tensors. These can then be plotted or used to correct paleointensity or directional data for anisotropy of remanence.
So, first follow the instructions in [sio_magic](#sio_magic) to import the AARM data into the MagIC format. The DC field was 50 μT, the peak AC field was 180 mT, the location was "Bushveld" and the lab protocol was AF and Anisotropy. The naming convention used Option # 3 (see help menu).
Then we need to calculate the best-fit tensor and write them out to the specimens.txt MagIC tables which can be used to correct remanence data for anisotropy.
The **aarm_magic** program takes a measurements.txt formatted file with anisotropy of ARM data in it and calculates the tensors, rotates it into the desired coordinate system and stores the data in a specimens.txt format file. To do this in a notebook, use **ipmag.aarm_magic()**.
```
convert.sio('arm_magic_example.dat',dir_path='data_files/aarm_magic/',specnum=3,
location='Bushveld',codelist='AF:ANI',samp_con='3',
meas_file='aarm_measurements.txt',peakfield=180,labfield=50, phi=-1, theta=-1)
help(ipmag.aarm_magic)
ipmag.aarm_magic('aarm_measurements.txt',dir_path='data_files/aarm_magic/')
# plot the data generated by aarm_magic:
ipmag.aniso_magic_nb(infile='data_files/aarm_magic/specimens.txt', save_plots=False)
```
## atrm_magic
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#atrm_magic.py)
Anisotropy of thermal remanence (ATRM) is similar to anisotropy of anhysteretic remanence (AARM) and the procedure for obtaining the tensor is also similar. Therefore, the **atrm_magic** is quite similar to [aarm_magic](#aarm_magic). However, the SIO lab procedures for the two experiments are somewhat different. In the ATRM experiment, there is a single, zero field step at the chosen temperature which is used as a baseline. We use only six positions (as opposed to nine for AARM) because of the additional risk of alteration at each temperature step. The positions are also different:
```
Image('data_files/Figures/atrm_meas.png')
```
The file atrm_magic_example.dat in the data_files/atrm_magic directory is an SIO formatted data file containing ATRM measurement data done in a temperature of 520∘C. Note the special format of these files - the treatment column (column 2) has the temperature in centigrade followed by either a “00” for the obligatory zero field baseline step or a “10” for the first postion, and so on. These could also be ‘0‘ and ‘1’, etc..
Follow the instructions for [sio_magic](#sio_magic) to import the ATRM data into the MagIC format. The DC field was 40 μT. The sample/site naming convention used option # 1 (see help menu) and the specimen and sample name are the same (specnum=0).
We will use **ipmag.atrm_magic()** to calculate the best-fit tensor and write out the MagIC tables which can be used to correct remanence data for the effects of remanent anisotropy.
```
convert.sio('atrm_magic_example.dat',dir_path='data_files/atrm_magic/',specnum=0,
location='unknown',codelist='T:ANI',samp_con='1',
meas_file='measurements.txt',labfield=40, phi=-1, theta=-1)
help(ipmag.atrm_magic)
ipmag.atrm_magic('measurements.txt',dir_path='data_files/atrm_magic')
```
## angle
[\[Essentials Appendix A.3.4\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ap1.html#x20-215000A.3.4) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#angle.py)
**angle** calculates the angle $\alpha$ between two declination,inclination pairs.
There are several ways to use this function from the notebook - one loading the data into a Pandas dataframe, then converting to the desired arrays, or load directly into a **Numpy** array of desired shape.
```
help(pmag.angle)
# Pandas way:
di=pd.read_csv('data_files/angle/angle.dat',delim_whitespace=True,header=None)
#rename column headers
di.columns=['Dec1','Inc1','Dec2','Inc2']
```
Here's the sort of data in the file:
```
di.head()
```
Now we will use **pmag.angle()** to calculate the angles.
```
# call pmag.angle
pmag.angle(di[['Dec1','Inc1']].values,di[['Dec2','Inc2']].values)
```
Here is the other (equally valid) way using **np.loadtext()**.
```
# Numpy way:
di=np.loadtxt('data_files/angle/angle.dat').transpose() # read in file
D1=di[0:2].transpose() # assign to first array
D2=di[2:].transpose() # assign to second array
pmag.angle(D1,D2) # call pmag.angle
```
You can always save your output using **np.savetxt()**.
```
angles=pmag.angle(D1,D2) # assign the returned array to angles
```
## apwp
[\[Essentials Chapter 16\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch16.html#x15-15600016) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#apwp.py)
The program **apwp** calculates paleolatitude, declination, inclination from a pole latitude and longitude based on the paper Besse and Courtillot (2002; see [Essentials Chapter 16](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch16.html#x15-15600016) for complete discussion). Here we will calculate the expected direction for 100 million year old rocks at a locality in La Jolla Cove (Latitude: 33$^{\circ}$N, Longitude 117$^{\circ}$W). Assume that we are on the North American Plate! (Note that there IS no option for the Pacific plate in the program **apwp**, and that La Jolla was on the North American plate until a few million years ago (6?).
Within the notebook we will call **pmag.apwp**.
```
help(pmag.apwp)
# here are the desired plate, latitude, longitude and age:
data=['NA',33,-117,100] # North American plate, lat and lon of San Diego at 100 Ma
pmag.apwp(data,print_results=True)
```
## b_vdm
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html#x15-1560002) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#b_vdm.py)
**b_vdm** converts geomagnetic field intensity observed at the earth's surface at a particular (paleo)latitude and calculates the Virtual \[Axial\] Dipole Moment (vdm or vadm). We will call **pmag.b_vdm()** directly from within the notebook. \[See also [**vdm_b**](#vdm_b).\]
Here we use the function **pmag.b_vdm()** to convert an estimated paleofield value of 33 $\mu$T obtained from a lava flow at 22$^{\circ}$ N latitude to the equivalent Virtual Dipole Moment (VDM) in Am$^2$.
```
help(pmag.b_vdm)
print ('%7.1f'%(pmag.b_vdm(33e-6,22)*1e-21),' ZAm^2')
pmag.b_vdm(33e-6,22)*1e-21
```
## bootams
[\[Essentials Chapter 13\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch13.html#x15-15600013) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#bootams.py)
**bootams** calculates bootstrap statistics for anisotropy tensor data in the form of:
x11 x22 x33 x12 x23 x13
It does this by selecting para-data sets and calculating the Hext average eigenparameters. It has an optional parametric bootstrap whereby the $\sigma$ for the data set as a whole is used to draw new para data sets. The bootstrapped eigenparameters are assumed to be Kent distributed and the program calculates Kent error ellipses for each set of eigenvectors. It also estimates the standard deviations of the bootstrapped eigenvalues.
**bootams** reads in a file with data for the six tensor elements (x11 x22 x33 x12 x23 x13) for specimens, calls **pmag.s_boot()** using a parametric or non-parametric bootstrap as desired. If all that is desired is the bootstrapped eigenparameters, **pmag.s_boot()** has all we need, but if the Kent ellipses are required, and we can call **pmag.sbootpars()** to calculated these more derived products and print them out.
Note that every time the bootstrap program gets called, the output will be slightly different because this depends on calls to random number generators. If the answers are different by a lot, then the number of bootstrap calculations is too low. The number of bootstraps can be changed with the nb option below.
We can do all this from within the notebook as follows:
```
help(pmag.s_boot)
```
So we will:
- read in the AMS tensor data
- get the bootstrapped eigenparameters
- print out the formatted results
```
Ss=np.loadtxt('data_files/bootams/bootams_example.dat')
Tmean,Vmean,Taus,Vs=pmag.s_boot(Ss) # get the bootstrapped eigenparameters
bpars=pmag.sbootpars(Taus,Vs) # calculate kent parameters for bootstrap
print("""tau tau_sigma V_dec V_inc V_eta V_eta_dec V_eta_inc V_zeta V_zeta_dec V_zeta_inc
""")
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(\
Tmean[0],bpars["t1_sigma"],Vmean[0][0],Vmean[0][1],\
bpars["v1_zeta"],bpars["v1_zeta_dec"],bpars["v1_zeta_inc"],\
bpars["v1_eta"],bpars["v1_eta_dec"],bpars["v1_eta_inc"])
print(outstring)
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(\
Tmean[1],bpars["t2_sigma"],Vmean[1][0],Vmean[1][1],\
bpars["v2_zeta"],bpars["v2_zeta_dec"],bpars["v2_zeta_inc"],\
bpars["v2_eta"],bpars["v2_eta_dec"],bpars["v2_eta_inc"])
print(outstring)
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(\
Tmean[2],bpars["t3_sigma"],Vmean[2][0],Vmean[2][1],\
bpars["v3_zeta"],bpars["v3_zeta_dec"],bpars["v3_zeta_inc"],\
bpars["v3_eta"],bpars["v3_eta_dec"],bpars["v3_eta_inc"])
print(outstring)
# with parametric bootstrap:
Ss=np.loadtxt('data_files/bootams/bootams_example.dat')
Tmean,Vmean,Taus,Vs=pmag.s_boot(Ss,ipar=1,nb=5000) # get the bootstrapped eigenparameters
bpars=pmag.sbootpars(Taus,Vs) # calculate kent parameters for bootstrap
print("""tau tau_sigma V_dec V_inc V_eta V_eta_dec V_eta_inc V_zeta V_zeta_dec V_zeta_inc
""")
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(\
Tmean[0],bpars["t1_sigma"],Vmean[0][0],Vmean[0][1],\
bpars["v1_zeta"],bpars["v1_zeta_dec"],bpars["v1_zeta_inc"],\
bpars["v1_eta"],bpars["v1_eta_dec"],bpars["v1_eta_inc"])
print(outstring)
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(\
Tmean[1],bpars["t2_sigma"],Vmean[1][0],Vmean[1][1],\
bpars["v2_zeta"],bpars["v2_zeta_dec"],bpars["v2_zeta_inc"],\
bpars["v2_eta"],bpars["v2_eta_dec"],bpars["v2_eta_inc"])
print(outstring)
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(\
Tmean[2],bpars["t3_sigma"],Vmean[2][0],Vmean[2][1],\
bpars["v3_zeta"],bpars["v3_zeta_dec"],bpars["v3_zeta_inc"],\
bpars["v3_eta"],bpars["v3_eta_dec"],bpars["v3_eta_inc"])
print(outstring)
```
## cart_dir
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html#x15-1560002) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#cart_dir.py)
**cart_dir** converts cartesian coordinates (X,Y,Z) to polar coordinates (Declination, Inclination, Intensity). We will call **pmag.cart2dir()**.
```
help(pmag.cart2dir)
# read in data file from example file
cart=np.loadtxt('data_files/cart_dir/cart_dir_example.dat')
print ('Input: \n',cart) # print out the cartesian coordinates
# print out the results
dirs = pmag.cart2dir(cart)
print ("Output: ")
for d in dirs:
print ('%7.1f %7.1f %8.3e'%(d[0],d[1],d[2]))
```
## di_eq
[\[Essentials Appendix B\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ap2.html#equal_area)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#di_eq.py)
Paleomagnetic data are frequently plotted in equal area projection. PmagPy has several plotting options which do this (e.g., [**eqarea**](#eqarea), but occasionally it is handy to be able to convert the directions to X,Y coordinates directly, without plotting them at all. Here is an example transcript of a session using the datafile di_eq_example.dat:
The program **di_eq** program calls **pmag.dimap()** which we can do from within a Jupyter notebook.
```
help(pmag.dimap)
DIs=np.loadtxt('data_files/di_eq/di_eq_example.dat').transpose() # load in the data
print (pmag.dimap(DIs[0],DIs[1])) # call the function
```
## di_geo
[\[Essentials Chapter 9\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch9.html)
and [Changing coordinate systems](http://earthref.org/MAGIC/books/Tauxe/Essentials/WebBook3ap1.html#Changing_coordinate_systems)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#di_geo.py)
Here we will convert D = 8.1,I = 45.2 from specimen coordinates to geographic-adjusted coordinates. The orientation of laboratory arrow on the specimen was: azimuth = 347; plunge = 27. To do this we will call **pmag.dogeo()**. There is also **pmag.dogeo_V** for arrays of data.
So let's start with **pmag.dogeo()**.
```
help(pmag.dogeo)
pmag.dogeo(dec=81,inc=45.2,az=347,pl=27)
```
Now let's check out the version that takes many data points at once.
```
help(pmag.dogeo_V)
indata=np.loadtxt('data_files/di_geo/di_geo_example.dat')
print (indata)
```
Let's take a look at these data in equal area projection: (see [eqarea](#eqarea) for details)
```
ipmag.plot_net(1)
ipmag.plot_di(dec=indata.transpose()[0],inc=indata.transpose()[1],color='red',edge='black')
```
The data are highly scattered and we hope that the geographic coordinate system looks better! To find out try:
```
decs,incs=pmag.dogeo_V(indata)
ipmag.plot_net(1)
ipmag.plot_di(dec=decs,inc=incs,color='red',edge='black')
```
These data are clearly much better grouped.
And here they are printed out.
```
print(np.column_stack([decs,incs]))
```
## di_rot
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#di_rot.py)
**di_rot** rotates dec inc pairs to a new origin. We can call **pmag.dodirot()** for single [dec,inc,Dbar,Ibar] data or **pmag.dodirot_V()** for an array of Dec, Inc pairs. We can use the data from the [di_geo](#di_geo) example and rotate the geographic coordinate data such that the center of the distribution is the principal direction.
We do it like this:
- read in a data set with dec inc pairs
- make an equal area projection of the data to remind us what they look like
- calculate the principal component with **pmag.doprinc())**
- rotate the data to the principal direction
- plot the rotated data in an equal area projection.
```
di_block=np.loadtxt('data_files/di_rot/di_rot_example.txt') # read in some data
ipmag.plot_net(1) # make the plot
ipmag.plot_di(di_block=di_block,title='geographic',color='red',edge='black')
```
Now we calculate the principal direction using the method described inthe [goprinc](#goprinc) section.
```
princ=pmag.doprinc(di_block)
```
And note we use **pmag.dodirot_V** to do the rotation.
```
help(pmag.dodirot_V)
rot_block=pmag.dodirot_V(di_block,princ['dec'],princ['inc'])
rot_block
```
And of course look at what we have done!
```
ipmag.plot_net(1) # make the plot
ipmag.plot_di(di_block=rot_block,color='red',title='rotated',edge='black')
```
## di_tilt
[\[Essentials Chapter 9\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch9.html) [\[Changing coordinate systems\]](http://earthref.org/MAGIC/books/Tauxe/Essentials/WebBook3ap1.html#Changing_coordinate_systems)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#di_tilt.py)
**di_tilt** can rotate a direction of Declination = 5.3 and Inclination = 71.6 to “stratigraphic” coordinates, assuming the strike was 135 and the dip was 21. The convention in this program is to use the dip direction, which is to the “right” of this strike.
We can perform this calculation by calling **pmag.dotilt** or **pmag.dotilt_V()** depending on if we have a single point or an array to rotate.
```
help(pmag.dotilt)
help(pmag.dotilt_V)
# read in some data
data=np.loadtxt('data_files/di_tilt/di_tilt_example.dat') # load up the data
di_block=data[:,[0,1]] # let's look at the data first!
ipmag.plot_net(1)
ipmag.plot_di(di_block=di_block)
```
Now we can rotate them
```
Dt,It=pmag.dotilt_V(data) # rotate them
ipmag.plot_net(1) # and take another look
ipmag.plot_di(dec=Dt,inc=It)
```
Use the handy function **np.column_stack** to pair the decs and incs together
```
np.column_stack((Dt,It)) # if you want to see the output:
```
## di_vgp
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#di_vgp.py)
**di_vgp** converts directions (declination,inclination) to Virtual Geomagnetic Pole positions. This is the inverse of [**vgp_di**](#vgp_di).
To do so, we will call **pmag.dia_vgp()** from within the notebook.
```
help(pmag.dia_vgp)
data=np.loadtxt('data_files/di_vgp/di_vgp_example.dat') # read in some data
print (data)
```
The data are almost in the correct format, but there is no a95 field, so that will have to be inserted (as zeros).
```
a95=np.zeros(len(data))
a95
DIs=data.transpose()[0:2].transpose() # get the DIs
LatLons=data.transpose()[2:].transpose() # get the Lat Lons
newdata=np.column_stack((DIs,a95,LatLons)) # stitch them back together
print (newdata)
vgps=np.array(pmag.dia_vgp(newdata)) # get a tuple with lat,lon,dp,dm, convert to array
print (vgps.transpose()) # print out the vgps
```
## dipole_pinc
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#dipole_pinc.py)
If we assume a geocentric axial dipole, we can calculate an expected inclination at a given latitude and that is what **dipole_pinc** does. It calls **pmag.pinc()** and so will we to find the expected inclination at a paleolatitude of 24$^{\circ}$S!
```
help(pmag.pinc)
lat=-24
pmag.pinc(-24)
```
Or as an array
```
lats=range(-90,100,10)
incs=pmag.pinc(lats)
plt.plot(incs,lats)
plt.ylim(100,-100)
plt.xlabel('Latitude')
plt.ylabel('Inclination')
plt.axhline(0,color='black')
plt.axvline(0,color='black');
```
## dipole_plat
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#dipole_plat.py)
**dipole_plat** is similar to [dipole_pinc](#dipole_pinc) but calculates the paleolatitude from the inclination. We will call **pmag.plat()**:
```
help(pmag.plat)
inc=42
pmag.plat(inc)
```
## dir_cart
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html#x15-1560002) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#dir_cart.py)
**pmag.dir2cart()** converts directions (Declination, Inclination, Intensity) to cartesian coordinates (X,Y,Z).
```
help(pmag.dir2cart)
# read in data file from example file
dirs=np.loadtxt('data_files/dir_cart/dir_cart_example.dat')
print ('Input: \n',dirs) # print out the cartesian coordinates
# print out the results
carts = pmag.dir2cart(dirs)
print ("Output: ")
for c in carts:
print ('%8.4e %8.4e %8.4e'%(c[0],c[1],c[2]))
```
## eigs_s
[\[Essentials Chapter 13\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch13.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#eigs_s.py)
This program converts eigenparameters to the six tensor elements. This is the inverse of [s_eigs](#s_eigs).
There is a function **ipmag.eigs_s()** which will do this in a notebook:
```
help(ipmag.eigs_s)
Ss=ipmag.eigs_s(infile="eigs_s_example.dat", dir_path='data_files/eigs_s')
for s in Ss:
print (s)
```
## eq_di
[\[Essentials Appendix B\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ap2.html#x21-227000B#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#eq_di.py)
Data are frequently published as equal area projections and not listed in data tables. These data can be digitized as x,y data (assuming the outer rim is unity) and converted to approximate directions with the program **eq_di**. To use this program, install a graph digitizer (GraphClick from http://www.arizona-software.ch/graphclick/ works on Macs).
Digitize the data from the equal area projection saved in the file eqarea.png in the eq_di directory. You should only work on one hemisphere at a time (upper or lower) and save each hemisphere in its own file. Then you can convert the X,Y data to approximate dec and inc data - the quality of the data depends on your care in digitizing and the quality of the figure that you are digitizing.
Here we will try this out on a datafile already prepared, which are the digitized data from the lower hemisphere of a plot. You check your work with [eqarea](#eqarea).
To do this in a notebook, we can use **pmag.doeqdi()**.
```
help(pmag.doeqdi)
# read in the data into an array
# x is assumed first column, y, second
xy=np.loadtxt('data_files/eq_di/eq_di_example.dat').transpose()
decs,incs=pmag.doeqdi(xy[0],xy[1])
ipmag.plot_net(1)
ipmag.plot_di(dec=decs,inc=incs,color='r',edge='black')
```
## fcalc
**pmag.fcalc()** returns the values of an F-test from an F table.
```
help(pmag.fcalc)
```
## fisher
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#fisher.py)
**fisher** draws $N$ directions from a Fisher distribution with specified $\kappa$ and a vertical mean. (For other directions see [fishrot](#fishrot)). To do this, we can just call the function **pmag.fshdev()** $N$ times.
```
help(pmag.fshdev)
# set the number, N, and kappa
N,kappa=100,20
# a basket to put our fish in
fish=[]
# get the Fisherian deviates
for i in range(N):
d,i=pmag.fshdev(kappa)
fish.append([d,i])
ipmag.plot_net(1)
ipmag.plot_di(di_block=fish,color='r',edge='black')
```
## fishrot
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#fishrot.py)
This program is similar to [fisher](#fisher), but allows you to specify the mean direction.
This has been implemented as **ipmag.fishrot()**.
```
help(ipmag.fishrot)
rotdi=ipmag.fishrot(k=50,n=5,dec=33,inc=41)
for di in rotdi:
print ('%7.1f %7.1f'%(di[0],di[1]))
ipmag.plot_net(1)
ipmag.plot_di(di_block=rotdi)
```
## flip
Fisher statistics requires unimodal data (all in one direction with no reversals) but many paleomagnetic data sets are bimodal. To flip bimodal data into a single mode, we can use **pmag.flip( )**. This function calculates the principle direction and flips all the 'reverse' data to the 'normal' direction along the principle axis.
```
help(pmag.flip)
#read in the data into an array
vectors=np.loadtxt('data_files/eqarea_ell/tk03.out').transpose()
di_block=vectors[0:2].transpose() # decs are di_block[0], incs are di_block[1]
# flip the reverse directions to their normal antipodes
normal,flipped=pmag.flip(di_block)
# and plot them up
ipmag.plot_net(1)
ipmag.plot_di(di_block=di_block,color='red')
ipmag.plot_di(di_block=flipped,color='b')
```
## gaussian
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#gaussian.py)
This program generates sets of data drawn from a normal distribution with a given mean and standard deviation. It is just a wrapper for a call to **pmag.gaussdev()** which just calls **numpy.random.normal()** which we could do, but we would have to import it, so it is easiest just to call the **pmag** version which we have already imported.
```
help(pmag.gaussdev)
N=1000
bins=100
norm=pmag.gaussdev(10,3,N)
plt.hist(norm,bins=bins,color='black',histtype='step',density=True)
plt.xlabel('Gaussian Deviates')
plt.ylabel('Frequency');
# alternatively we can plot with ipmag.histplot:
ipmag.histplot(data=norm, xlab='Gaussian Deviates', save_plots=False, norm=-1)
```
## gobing
[\[Essentials Chapter 12\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch12.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#gobing.py)
**gobing** calculates Bingham statistics for sets of directional data (see the section for eqarea_ell in the PmagPy_plots_analysis documentation for nice examples). We do this by calling **pmag.dobingham()**.
```
help(pmag.dobingham)
di_block=np.loadtxt('data_files/gobing/gobing_example.txt')
pmag.dobingham(di_block)
```
## gofish
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#gofish.py)
**gofish** calculates Fisher statistics for sets of directional data. (see the section for eqarea_ell in the PmagPy_plots_analysis documentation for nice examples).
This can be done with **ipmag.fisher_mean()**.
```
help(ipmag.fisher_mean)
di_block=np.loadtxt('data_files/gofish/fishrot.out')
ipmag.fisher_mean(di_block=di_block)
```
### fisher mean on pandas DataFrames
There is also a function **pmag.dir_df_fisher_mean()** that calculates Fisher statistics on a Pandas DataFrame with directional data
```
help(pmag.dir_df_fisher_mean)
# make the data frame
dir_df=pd.read_csv('data_files/gofish/fishrot.out',delim_whitespace=True, header=None)
dir_df.columns=['dir_dec','dir_inc']
pmag.dir_df_fisher_mean(dir_df)
```
## gokent
[\[Essentials Chapter 12\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch12.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#gokent.py)
With **gokent** we can calculate Kent statistics on sets of directional data (see the section for eqarea_ell in the PmagPy_plots_analysis documentation for nice examples).
This calls **pmag.dokent()** (see also **eqarea_ell** example)
```
help(pmag.dokent)
di_block=np.loadtxt('data_files/gokent/gokent_example.txt')
pmag.dokent(di_block,di_block.shape[0])
```
## goprinc
[\[Essentials Chapter 12\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch12.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#goprinc.py)
**goprinc** calculates the principal directions (and their eigenvalues) for sets of paleomagnetic vectors. It doesn't do any statistics on them, unlike the other programs.
We will call **pmag.doprinc()**:
```
help(pmag.doprinc)
di_block=np.loadtxt('data_files/goprinc/goprinc_example.txt')
pmag.doprinc(di_block)
```
## igrf
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#igrf.py)
This program gives geomagnetic field vector data for a specified place at a specified time. It has many built in models including IGRFs, GUFM and several archeomagnetic models. It calls the function **ipmag.igrf()** for this so that is what we will do.
```
help(ipmag.igrf)
```
We will calculate the field for San Diego from 3000 BCE to 1950 in 50 year increments using the hfm.OL1.A1 model of Constable et al. (2016, doi: 10.1016/j.epsl.2016.08.015).
```
# make a list of desired dates
dates=range(-3000,1950,50) # list of dates in +/- Common Era
mod = 'hfm10k' # choose the desired model
lat,lon,alt=33,-117,0 # desired latitude, longitude and alitude
Vecs=[] # list for Dec,Inc,Int outputs
for date in dates: # step through the dates
Vecs.append(ipmag.igrf([date,alt,lat,lon],mod=mod)) # append to list
vector_df = pd.DataFrame(Vecs) # make it into a Pandas dataframe
vector_df.columns=['dec','inc','int']
vector_df['vadms']=pmag.b_vdm(vector_df.int.values*1e-9, lat) # calculate the VADMs
vector_df['dec_adj']=vector_df['dec']
vector_df.loc[vector_df.dec>180,['dec_adj']]=vector_df.dec-360 # adjust declinations to be -180 => 180
fig=plt.figure(1,figsize=(7,9)) # set up the figure
fig.add_subplot(411) # make 4 rows of plots, this is the first
plt.plot(dates,vector_df.dec_adj) # plot the adjusted declinations
plt.ylabel('Declination ($^{\circ}$)')
plt.title('Geomagnetic field evaluated at Lat: '+str(lat)+' / Lon: '+str(lon))
fig.add_subplot(412) # this is the second
plt.plot(dates,vector_df.inc) # plot the inclinations
plt.ylabel('Inclination ($^{\circ}$)')
fig.add_subplot(413)
plt.plot(dates,vector_df.int*1e-3) # plot the intensites (in uT instead of nT)
plt.ylabel('Intensity ($\mu$T)')
fig.add_subplot(414) # plot the VADMs
plt.plot(dates,vector_df.vadms*1e-21) # plot as ZAm^2
plt.ylabel('VADM (ZAm$^2$)')
plt.xlabel('Dates (CE)');
```
## incfish
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#incfish.py)
You can't get a meaningful average inclination from inclination only data because of the exponential relationship between inclinations and the true mean inclination for Fisher distributions (except exactly at the pole and the equator). So, McFadden and Reid (1982, doi: 10.1111/j.1365-246X.1982.tb04950.x) developed a maximum liklihood estimate for getting an estimate for true mean absent declination. **pmag.doincfish()** is an implementation of that concept.
```
help(pmag.doincfish)
incs=np.loadtxt('data_files/incfish/incfish_example_inc.dat')
pmag.doincfish(incs)
```
## pca
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#pca.py)
**pca** calculates best-fit lines, planes or Fisher means through selected treatment steps along with Kirschvink (1980, doi: 10.1111/j.1365-246X.1980.tb02601.x) MAD values. The file format is a simple space delimited file with specimen name, treatment step, intensity, declination and inclination. **pca.py** calls **pmag.domean()**, so that is what we will do here.
```
help(pmag.domean)
# read in data as space delimited file
data=pd.read_csv('data_files/pca/pca_example.txt',\
delim_whitespace=True,header=None)
# we need to add a column for quality
data['quality']='g'
# strip off the specimen name and reorder records
# from: int,dec,inc to: dec,inc,int
data=data[[1,3,4,2,'quality']].values.tolist()
pmag.domean(data,1,10,'DE-BFL')
```
## pt_rot
[\[Essentials Chapter 16\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch16.html)
[\[Essentials Appendix A.3.5\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ap1.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#pt_rot.py)
This program finds rotation poles for a specified location, age and destination plate, then rotates the point into the destination plate coordinates using the roations and methods described in Essentials Appendix A.3.5.
This can be done for you using the function **frp.get_pole()** in the finite rotation pole module called **pmagpy.frp**. You then call **pmag.pt_rot()** to do the rotation. Let's do this for to rotate the Cretaceous poles from Europe (sane data as in the polemap_magic example) and rotate them to South African coordinates.
```
# need to load this special module
import pmagpy.frp as frp
help(frp.get_pole)
Prot=frp.get_pole('eur',100)
Prot
help(pmag.pt_rot)
data=pd.read_csv('data_files/polemap_magic/locations.txt',sep='\t',header=1)
lats=data['pole_lat'].values
lons=data['pole_lon'].values
RLats,RLons=rot_pts=pmag.pt_rot(Prot,lats,lons)
```
And now we can plot them using **pmagplotlib.plot_map()**
```
Opts={}
Opts['sym']='wo' # sets the symbol
Opts['symsize']=10
Opts['proj']='ortho'
Opts['edge']='black'
Opts['lat_0']=90
Opts['details']={}
Opts['details']['fancy']=True # warning : this option takes a few minutes
if has_cartopy:
plt.figure(1,(6,6)) # optional - make a map
pmagplotlib.plot_map(1, RLats, RLons, Opts)
elif has_basemap:
plt.figure(1,(6,6)) # optional - make a map
pmagplotlib.plot_map_basemap(1, RLats, RLons, Opts)
```
## s_eigs
[\[Essentials Chapter 13\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch13.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#s_eigs.py)
This program converts the six tensor elements to eigenparameters - the inverse of [eigs_s](#eigs_s).
We can call the function **pmag.doseigs()** from the notebook.
```
help(pmag.doseigs)
Ss=np.loadtxt('data_files/s_eigs/s_eigs_example.dat')
for s in Ss:
tau,V=pmag.doseigs(s)
print ('%f %8.2f %8.2f %f %8.2f %8.2f %f %8.2f %8.2f'%\
(tau[2],V[2][0],V[2][1],tau[1],V[1][0],V[1][1],tau[0],V[0][0],V[0][1]))
```
## s_geo
[\[Essentials Chapter 13\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch13.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#s_geo.py)
**s_geo** takes the 6 tensor elements in specimen coordinates and applies the rotation similar to [**di_geo**](#di_geo). To do this we will call **pmag.dosgeo()** from within the notebook.
```
help(pmag.dosgeo)
Ss=np.loadtxt('data_files/s_geo/s_geo_example.dat')
for s in Ss:
print(pmag.dosgeo(s[0:6],s[6],s[7]))
```
## s_hext
[\[Essentials Chapter 13\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch13.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#s_hext.py)
**s_hext** calculates Hext (1963, doi: 10.2307/2333905) statistics for anisotropy data in the six tensor element format.
It calls **pmag.dohext()**.
```
help(pmag.dohext)
```
We are working with data that have no sigmas attached to them and want to average all the values in the file together. Let's look at the rotated data from the [**s_geo**](#s_geo) example.
```
# read in the data
Ss=np.loadtxt('data_files/s_geo/s_geo_example.dat')
# make a container for the rotated S values
SGeos=[]
for s in Ss:
SGeos.append(pmag.dosgeo(s[0:6],s[6],s[7]))
nf,sigma,avs=pmag.sbar(SGeos) # get the average over all the data
hpars=pmag.dohext(nf,sigma,avs)
print(hpars)
```
## s_magic
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#s_magic.py)
NEED TO ADD THIS ONE....
## s_tilt
[\[Essentials Chapter 13\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch13.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#s_tilt.py)
**s_tilt** takes the 6 tensor elements in geographic coordinates and applies the rotation similar to [**di_tilt**](#di_tilt) into stratigraphic coordinates. It calls **pmag.dostilt()**. But be careful! **s_tilt.py** (the command line program) assumes that the bedding info is the strike, with the dip to the right of strike unlike **pmag.dostilt** which assumes that the azimuth is the dip direction.
```
help(pmag.dostilt)
# note that the data in this example are Ss and strike and dip (not bed_az,bed_pl)
Ss=np.loadtxt('data_files/s_tilt/s_tilt_example.dat')
for s in Ss:
print(pmag.dostilt(s[0:6],s[6]+90.,s[7])) # make the bedding azimuth dip direction, not strike.
```
## scalc
[\[Essentials Chapter 14\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch14.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#scalc.py)
This program reads in data files with vgp_lon, vgp_lat and optional kappa, N, and site latitude.
It allows some filtering based on the requirements of the study, such as:
- Fisher k cutoff
- VGP latitudinal cutoff
- Vandamme (1994, doi: 10.1016/0031-9201(94)90012-4) iterative cutoff
- flipping the reverse mode to antipodes
- rotating principle direction to the spin axis
- bootstrap confidence bounds
- optionally calculates the scatter (Sp or Sf of McElhinny & McFadden, 1997) of VGPs
with correction for within site scatter.
The filtering is just what **Pandas** was designed for, so we can calls **pmag.scalc_vgp_df()** which works on a suitably constructed **Pandas** DataFrame.
```
help(pmag.scalc_vgp_df)
```
To just calculate the value of S (without the within site scatter) we read in a data file and attach the correct headers to it depending on what is in it.
```
vgp_df=pd.read_csv('data_files/scalc/scalc_example.txt',delim_whitespace=True,header=None)
if len(list(vgp_df.columns))==2:
vgp_df.columns=['vgp_lon','vgp_lat']
vgp_df['dir_k'],vgp_df['dir_n'],vgp_df['lat']=0,0,0
else:
vgp_df.columns=['vgp_lon','vgp_lat','dir_k','dir_n_samples','lat']
pmag.scalc_vgp_df
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df)
print(N, '%7.1f %7.1f ' % (S_B, cutoff))
```
To apply a cutoff for the Fisher k value, we just filter the DataFrame prior to calculating S_b. Let's filter for kappa>50
```
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df,kappa=50)
print(N, '%7.1f %7.1f ' % (S_B, cutoff))
```
To apply the Vandamme (1994) approach, we set v to True
```
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df,v=True)
print(N, '%7.1f %7.1f ' % (S_B, cutoff))
```
To flip the "reverse" directions, we set anti to 1
```
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df,anti=True)
print(N, '%7.1f %7.1f ' % (S_B, cutoff))
```
And, to do relative to the spin axis, set spin to True:
```
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df,spin=True)
print(N, '%7.1f %7.1f ' % (S_B, cutoff))
```
## scalc_magic
[\[Essentials Chapter 14\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch14.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#scalc_magic.py)
This program does the same thing as [**scalc**](#scalc), but reads in a MagIC formatted file. So, we can do that easy-peasy.
```
vgp_df=pd.read_csv('data_files/scalc_magic/sites.txt',sep='\t',header=1)
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df,anti=True)
print(N, '%7.1f %7.1f ' % (S_B, cutoff))
vgp_df=pd.read_csv('data_files/scalc_magic/sites.txt',sep='\t',header=1)
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df,anti=True,spin=True)
print(N, '%7.1f %7.1f ' % (S_B, cutoff))
```
## separate_directions
Like [pmag.flip( )](#flip), **pmag.separate_directions** divides a directional data set into two modes. Unlike [pmag.flip( )](#flip), it returns the two separate modes (e.g., normal and reverse)
```
help(pmag.separate_directions)
#read in the data into an array
vectors=np.loadtxt('data_files/eqarea_ell/tk03.out').transpose()
di_block=vectors[0:2].transpose() # decs are di_block[0], incs are di_block[1]
# flip the reverse directions to their normal antipodes
normal,reverse=pmag.separate_directions(di_block)
# and plot them up
ipmag.plot_net(1)
ipmag.plot_di(di_block=normal,color='red')
ipmag.plot_di(di_block=reverse,color='b')
```
## squish
[\[Essentials Chapter 7\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch7.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#squish.py)
This program reads in dec/inc data and "squishes" the inclinations using the formula from King
(1955, doi: 10.1111/j.1365-246X.1955.tb06558.x) $\tan(I_o)=flat \tan(I_f)$. \[See also [unsquish](#unsquish)\].
We can call **pmag.squish()** from within the notebook.
```
help(pmag.squish)
di_block=np.loadtxt('data_files/squish/squish_example.dat').transpose()
decs=di_block[0]
incs=di_block[1]
flat=0.4
fincs=pmag.squish(incs,flat)
ipmag.plot_net(1)
ipmag.plot_di(dec=decs,inc=incs,title='Original',color='blue')
ipmag.plot_net(2)
ipmag.plot_di(dec=decs,inc=fincs,title='Squished',color='red')
```
## stats
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html#x15-156000813) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#/stats.py)
This program just calculates the N, mean, sum, sigma and sigma % for data. There are numerous ways to do that in **Numpy**, so let's just use those.
```
data=np.loadtxt('data_files/gaussian/gauss.out')
print (data.shape[0],data.mean(),data.sum(),data.std())
```
## strip_magic
[\[Essentials Chapter 15\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch15.html)
[\[MagIC Database\]](https://earthref.org/MagIC)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#strip_magic.py)
We can do this easily using the wonders of **Pandas** and **matplotlib** as demonstrated here.
```
# read in the data
data=pd.read_csv('data_files/strip_magic/sites.txt',sep='\t',header=1)
# see what's there
data.columns
# you might have to use **df.dropna()** to clean off unwanted NaN lines or other data massaging
# but not for this example
plt.figure(1,(10,4)) # make the figure
plt.plot(data.age,data.vgp_lat,'b-') # plot as blue line
plt.plot(data.age,data.vgp_lat,'ro',markeredgecolor="black") # plot as red dots with black rims
plt.xlabel('Age (Ma)') # label the time axis
plt.ylabel('VGP Lat.$^{\circ}$')
plt.ylim(-90,90) # set the plot limits
plt.axhline(color='black'); # put on a zero line
```
## sundec
[\[Essentials Chapter 9\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch9.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#sundec.py)
Paleomagnetists often use the sun to orient their cores, especially if the sampling site is strongly magnetic and would deflect the magnetic compass. The information required is: where are you (e.g., latitude and longitude), what day is it, what time is it in Greenwhich Mean Time (a.k.a. Universal Time) and where is the sun (e.g., the antipode of the angle the shadow of a gnomon makes with the desired direction)?
This calculation is surprisingly accurate and is implemented in the function
**pmag.dosundec()**.
```
help(pmag.dosundec)
```
Say you (or your elderly colleague) were located at 35$^{\circ}$ N and 33$^{\circ}$ E. The local time was three hours ahead of Universal Time. The shadow angle for the drilling direction was 68$^{\circ}$ measured at 16:09 on May 23, 1994. **pmag.dosundec()** requires a dictionary with the necessary information:
```
sundata={'delta_u':3,'lat':35,'lon':33,\
'date':'1994:05:23:16:9','shadow_angle':68}
print ('%7.1f'%(pmag.dosundec(sundata)))
```
## tk03
[\[Essentials Chapter 16\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch16.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#tk03.py)
Sometimes it is useful to generate a distribution of synthetic geomagnetic field vectors that you might expect to find from paleosecular variation of the geomagnetic field. The program **tk03** generates distributions of field vectors from the PSV model of Tauxe and Kent (2004, doi: 10.1029/145GM08). This program was implemented for notebook use as **ipmag.tk03()**. \[See also [**find_ei**](#find_ei)\].
```
help(ipmag.tk03)
di_block=ipmag.tk03(lat=30)
ipmag.plot_net(1)
ipmag.plot_di(di_block=di_block,color='red',edge='black')
```
## uniform
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#uniform.py)
It is at times handy to be able to generate a uniformly distributed set of directions (or geographic locations). This is done using a technique described by Fisher et al. (Fisher, N. I., Lewis, T., & Embleton, B. J. J. (1987). Statistical Analysis of Spherical Data. Cambridge: Cambridge University Press). We do this by calling **pmag.get_unf()**.
```
help(pmag.get_unf)
di_block=pmag.get_unf()
ipmag.plot_net(1)
ipmag.plot_di(di_block=di_block,color='red',edge='black')
```
## unsquish
[\[Essentials Chapter 7\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch7.html#x15-156000813)
[\[Essentials Chapter 16\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch16.html#x15-156000813)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#unsquish.py)
This program is just the inverse of [**squish**](#squish) in that it takes "squished" data and "unsquishes" them, assuming a King (1955, doi: 10.1111/j.1365-246X.1955.tb06558.x) relationship: $\tan(I_o)=flat \tan(I_f)$. So, $\tan(I_f) = \tan(I_o)/flat$.
It calls **pmag.unquish()**.
```
help(pmag.unsquish)
di_block=np.loadtxt('data_files/unsquish/unsquish_example.dat').transpose()
decs=di_block[0]
incs=di_block[1]
flat=.4
fincs=pmag.unsquish(incs,flat)
ipmag.plot_net(1)
ipmag.plot_di(dec=decs,inc=incs,title='Squished',color='red')
ipmag.plot_net(2)
ipmag.plot_di(dec=decs,inc=fincs,title='Unsquished',color='blue')
```
## vdm_b
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html#x15-1560002) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#vdm_b.py)
**vdm_b** is the inverse of [**b_vdm**](#b_vdm) in that it converts a virtual \[axial\] dipole moment (vdm or vadm) to a predicted geomagnetic field intensity observed at the earth's surface at a particular (paleo)latitude. This program calls **pmag.vdm_b()**.
```
help(pmag.vdm_b)
print ('%7.1f microtesla'%(pmag.vdm_b(7.159e22,22)*1e6))
```
## vector_mean
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html#x15-1560002) [\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#vector_mean.py)
**vector_mean** calculates the vector mean for a set of vectors in polar coordinates (e.g., declination, inclination, intensity). This is similar to the Fisher mean ([**gofish**](#gofish)) but uses vector length instead of unit vectors. It calls
calls **pmag.vector_mean()**.
```
help(pmag.vector_mean)
data=np.loadtxt('data_files/vector_mean/vector_mean_example.dat')
Dir,R=pmag.vector_mean(data)
print (('%i %7.1f %7.1f %f')%(data.shape[0],Dir[0],Dir[1],R))
```
## vgp_di
[\[Essentials Chapter 2\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch2.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#vgp_di.py)
We use **vgp_di** to convert virtual geomagnetic pole positions to predicted directions at a given location. \[See also [**di_vgp**](#di_vgp)\].
This program uses the function **pmag.vgp_di()**.
```
help(pmag.vgp_di)
d,i=pmag.vgp_di(68,191,33,243)
print ('%7.1f %7.1f'%(d,i))
```
## watsons_f
[\[Essentials Chapter 11\]](https://earthref.org/MagIC/books/Tauxe/Essentials/WebBook3ch11.html)
[\[command line version\]](https://pmagpy.github.io/PmagPy-cli.html#watsons_f.py)
There are several different ways of testing whether two sets of directional data share a common mean. One popular (although perhaps not the best) way is to use Watson's F test (Watson, 1956, doi: 10.1111/j.1365-246X.1956.tb05560.x). \[See also [**watsons_v**](#watsons_v) or Lisa Tauxe's bootstrap way: [**common_mean**](#common_mean)\].
If you still want to use Waston's F, then try
**pmag.watsons_f()** for this.
```
help(pmag.watsons_f)
DI1=np.loadtxt('data_files/watsons_f/watsons_f_example_file1.dat')
DI2=np.loadtxt('data_files/watsons_f/watsons_f_example_file2.dat')
F,Fcrit=pmag.watsons_f(DI1,DI2)
print ('%7.2f %7.2f'%(F,Fcrit))
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
%matplotlib inline
class MosaicDataset1(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list, mosaic_label,fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx]
# data = [{"mosaic_list":mosaic_list_of_images, "mosaic_label": mosaic_label, "fore_idx":fore_idx}]
# np.save("mosaic_data.npy",data)
data = np.load("type4_data.npy",allow_pickle=True)
mosaic_list_of_images = data[0]["mosaic_list"]
mosaic_label = data[0]["mosaic_label"]
fore_idx = data[0]["fore_idx"]
batch = 250
msd = MosaicDataset1(mosaic_list_of_images, mosaic_label, fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
class Focus_deep(nn.Module):
'''
deep focus network averaged at zeroth layer
input : elemental data
'''
def __init__(self,inputs,output,K,d):
super(Focus_deep,self).__init__()
self.inputs = inputs
self.output = output
self.K = K
self.d = d
self.linear1 = nn.Linear(self.inputs,300) #,self.output)
#self.linear2 = nn.Linear(6,12)
self.linear2 = nn.Linear(300,self.output)
def forward(self,z):
batch = z.shape[0]
x = torch.zeros([batch,self.K],dtype=torch.float64)
y = torch.zeros([batch,300], dtype=torch.float64)
features = torch.zeros([batch,self.K,300],dtype=torch.float64)
x,y = x.to("cuda"),y.to("cuda")
features = features.to("cuda")
for i in range(self.K):
alp,ftrs = self.helper(z[:,i] ) # self.d*i:self.d*i+self.d
x[:,i] = alp[:,0]
features[:,i] = ftrs
x = F.softmax(x,dim=1) # alphas
for i in range(self.K):
x1 = x[:,i]
y = y+torch.mul(x1[:,None],features[:,i]) # self.d*i:self.d*i+self.d
return y , x
def helper(self,x):
x = self.linear1(x)
x1 = F.tanh(x)
x = F.relu(x)
#x = F.relu(self.linear2(x))
x = self.linear2(x)
#print(x1.shape)
return x,x1
class Classification_deep(nn.Module):
'''
input : elemental data
deep classification module data averaged at zeroth layer
'''
def __init__(self,inputs,output):
super(Classification_deep,self).__init__()
self.inputs = inputs
self.output = output
self.linear1 = nn.Linear(self.inputs,200)
#self.linear2 = nn.Linear(6,12)
self.linear2 = nn.Linear(200,self.output)
def forward(self,x):
x = F.relu(self.linear1(x))
#x = F.relu(self.linear2(x))
x = self.linear2(x)
return x
def calculate_attn_loss(dataloader,what,where,criter):
what.eval()
where.eval()
r_loss = 0
alphas = []
lbls = []
pred = []
fidices = []
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels,fidx = data
lbls.append(labels)
fidices.append(fidx)
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
avg,alpha = where(inputs)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
alphas.append(alpha.cpu().numpy())
loss = criter(outputs, labels)
r_loss += loss.item()
alphas = np.concatenate(alphas,axis=0)
pred = np.concatenate(pred,axis=0)
lbls = np.concatenate(lbls,axis=0)
fidices = np.concatenate(fidices,axis=0)
#print(alphas.shape,pred.shape,lbls.shape,fidices.shape)
analysis = analyse_data(alphas,lbls,pred,fidices)
return r_loss/i,analysis
def analyse_data(alphas,lbls,predicted,f_idx):
'''
analysis data is created here
'''
batch = len(predicted)
amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0
for j in range (batch):
focus = np.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
amth +=1
else:
alth +=1
if(focus == f_idx[j] and predicted[j] == lbls[j]):
ftpt += 1
elif(focus != f_idx[j] and predicted[j] == lbls[j]):
ffpt +=1
elif(focus == f_idx[j] and predicted[j] != lbls[j]):
ftpf +=1
elif(focus != f_idx[j] and predicted[j] != lbls[j]):
ffpf +=1
#print(sum(predicted==lbls),ftpt+ffpt)
return [ftpt,ffpt,ftpf,ffpf,amth,alth]
number_runs = 20
FTPT_analysis = pd.DataFrame(columns = ["FTPT","FFPT", "FTPF","FFPF"])
for n in range(number_runs):
print("--"*40)
# instantiate focus and classification Model
torch.manual_seed(n)
where = Focus_deep(2,1,9,2).double()
torch.manual_seed(n)
what = Classification_deep(300,3).double()
where = where.to("cuda")
what = what.to("cuda")
# instantiate optimizer
optimizer_where = optim.Adam(where.parameters(),lr =0.01)
optimizer_what = optim.Adam(what.parameters(), lr=0.01)
criterion = nn.CrossEntropyLoss()
acti = []
analysis_data = []
loss_curi = []
epochs = 1000
# calculate zeroth epoch loss and FTPT values
running_loss,anlys_data = calculate_attn_loss(train_loader,what,where,criterion)
loss_curi.append(running_loss)
analysis_data.append(anlys_data)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what.train()
where.train()
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_where.zero_grad()
optimizer_what.zero_grad()
# forward + backward + optimize
avg, alpha = where(inputs)
outputs = what(avg)
loss = criterion(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_where.step()
optimizer_what.step()
running_loss,anls_data = calculate_attn_loss(train_loader,what,where,criterion)
analysis_data.append(anls_data)
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.01:
break
print('Finished Training run ' +str(n))
analysis_data = np.array(analysis_data)
FTPT_analysis.loc[n] = analysis_data[-1,:4]/30
correct = 0
total = 0
with torch.no_grad():
for data in train_loader:
images, labels,_ = data
images = images.double()
images, labels = images.to("cuda"), labels.to("cuda")
avg, alpha = where(images)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 3000 train images: %d %%' % ( 100 * correct / total))
# plt.figure(figsize=(6,6))
# plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0],label="ftpt")
# plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1],label="ffpt")
# plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2],label="ftpf")
# plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3],label="ffpf")
# plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.plot(loss_curi)
np.mean(np.array(FTPT_analysis),axis=0)
FTPT_analysis.to_csv("synthetic_first_300_200.csv",index=False)
```
```
FTPT_analysis
```
| github_jupyter |
Classical probability distributions can be written as a stochastic vector, which can be transformed to another stochastic vector by applying a stochastic matrix. In other words, the evolution of stochastic vectors can be described by a stochastic matrix.
Quantum states also evolve and their evolution is described by unitary matrices. This leads to some interesting properties in quantum computing. Unitary evolution is true for a closed system, that is, a quantum system perfectly isolated from the environment. This is not the case in the quantum computers we have today: these are open quantum systems that evolve differently due to to uncontrolled interactions with the environment. In this notebook, we take a glimpse at both types of evolution.
# Unitary evolution
A unitary matrix has the property that its conjugate transpose is its inverse. Formally, it means that a matrix $U$ is unitary if $UU^\dagger=U^\dagger U=\mathbb{1}$, where $^\dagger$ stands for conjugate transpose, and $\mathbb{1}$ is the identity matrix. A quantum computer is a machine that implements unitary operations.
As an example, we have seen the NOT operation before, which is performed by the X gate in a quantum computer. While the generic discussion on gates will only occur in a subsequent notebook, we can study the properties of the X gate. Its matrix representation is $X = \begin{bmatrix} 0 & 1\\ 1 & 0\end{bmatrix}$. Let's check if it is indeed unitary:
```
import numpy as np
X = np.array([[0, 1], [1, 0]])
print("XX^dagger")
print(X.dot(X.T.conj()))
print("X^daggerX")
print(X.T.conj().dot(X))
```
It looks like a legitimate unitary operation. The unitary nature ensures that the $l_2$ norm is preserved, that is, quantum states are mapped to quantum states.
```
print("The norm of the state |0> before applying X")
zero_ket = np.array([[1], [0]])
print(np.linalg.norm(zero_ket))
print("The norm of the state after applying X")
print(np.linalg.norm(X.dot(zero_ket)))
```
Furthermore, since the unitary operation is a matrix, it is linear. Measurements are also represented by matrices. These two observations imply that everything a quantum computer implements is actually linear. If we want to see some form of nonlinearity, that must involve some classical intervention.
Another consequence of the unitary operations is reversibility. Any unitary operation can be reversed. Quantum computing libraries often provide a function to reverse entire circuits. Reversing the X gate is simple: we just apply it again (its conjugate transpose is itself, therefore $X^2=\mathbb{1}$).
```
import numpy as np
from pyquil import Program, get_qc
from pyquil.gates import *
from forest_tools import *
%matplotlib inline
qvm_server, quilc_server, fc = init_qvm_and_quilc('/home/local/bin/qvm', '/home/local/bin/quilc')
qc = get_qc('1q-qvm', connection=fc)
circuit = Program()
circuit += X(0)
circuit += X(0)
results = qc.run_and_measure(circuit, trials=100)
plot_histogram(results)
```
which is exactly $|0\rangle$ as we would expect.
In the next notebook, you will learn about classical and quantum many-body systems and the Hamiltonian. In the notebook on adiabatic quantum computing, you will learn that a unitary operation is in fact the Schrödinger equation solved for a Hamiltonian for some duration of time. This connects the computer science way of thinking about gates and unitary operations to actual physics, but there is some learning to be done before we can make that connection. Before that, let us take another look at the interaction with the environment.
# Interaction with the environment: open systems
Actual quantum systems are seldom closed: they constantly interact with their environment in a largely uncontrolled fashion, which causes them to lose coherence. This is true for current and near-term quantum computers too.
<img src="figures/open_system.svg" alt="A quantum processor as an open quantum system" style="width: 400px;"/>
This also means that their actual time evolution is not described by a unitary matrix as we would want it, but some other operator (the technical name for it is a completely positive trace-preserving map).
Quantum computing libraries often offer a variety of noise models that mimic different types of interaction, and increasing the strength of the interaction with the environment leads to faster decoherence. The timescale for decoherence is often called $T_2$ time. Among a couple of other parameters, $T_2$ time is critically important for the number of gates or the duration of the quantum computation we can perform.
A very cheap way of studying the effects of decoherence is mixing a pure state with the maximally mixed state $\mathbb{1}/2^d$, where $d$ is the number of qubits, with some visibility parameter in $[0,1]$. This way we do not have to specify noise models or any other map modelling decoherence. For instance, we can mix the $|\phi^+\rangle$ state with the maximally mixed state:
```
def mixed_state(pure_state, visibility):
density_matrix = pure_state.dot(pure_state.T.conj())
maximally_mixed_state = np.eye(4)/2**2
return visibility*density_matrix + (1-visibility)*maximally_mixed_state
ϕ = np.array([[1],[0],[0],[1]])/np.sqrt(2)
print("Maximum visibility is a pure state:")
print(mixed_state(ϕ, 1.0))
print("The state is still entangled with visibility 0.8:")
print(mixed_state(ϕ, 0.8))
print("Entanglement is lost by 0.6:")
print(mixed_state(ϕ, 0.6))
print("Barely any coherence remains by 0.2:")
print(mixed_state(ϕ, 0.2))
```
Another way to look at what happens to a quantum state in an open system is through equilibrium processes. Think of a cup of coffee: left alone, it will equilibrate with the environment, eventually reaching the temperature of the environment. This includes energy exchange. A quantum state does the same thing and the environment has a defined temperature, just the environment of a cup of coffee.
The equilibrium state is called the thermal state. It has a very specific structure and we will revisit it, but for now, suffice to say that the energy of the samples pulled out of a thermal state follows a Boltzmann distribution. The Boltzmann -- also called Gibbs -- distribution is described as $P(E_i) = \frac {e^{-E_{i}/T}}{\sum _{j=1}^{M}{e^{-E_{j}/T}}}$, where $E_i$ is an energy, and $M$ is the total number of possible energy levels. Temperature enters the definition: the higher the temperature, the closer we are to the uniform distribution. In the infinite temperature limit, it recovers the uniform distribution. At high temperatures, all energy levels have an equal probability. In contrast, at zero temperature, the entire probability mass is concentrated on the lowest energy level, the ground state energy. To get a sense of this, let's plot the Boltzmann distribution with vastly different temperatures:
```
import matplotlib.pyplot as plt
temperatures = [.5, 5, 2000]
energies = np.linspace(0, 20, 100)
fig, ax = plt.subplots()
for i, T in enumerate(temperatures):
probabilities = np.exp(-energies/T)
Z = probabilities.sum()
probabilities /= Z
ax.plot(energies, probabilities, linewidth=3, label = "$T_" + str(i+1)+"$")
ax.set_xlim(0, 20)
ax.set_ylim(0, 1.2*probabilities.max())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel('Energy')
ax.set_ylabel('Probability')
ax.legend()
```
Here $T_1<T_2<T_3$. Notice that $T_1$ is a low temperature, and therefore it is highly peaked at low energy levels. In contrast, $T_3$ is a very high temperature and the probability distribution is almost completely flat.
```
qvm_server.terminate()
quilc_server.terminate()
```
| github_jupyter |
<h1>IndabaX Tanzania Mobile Banking Prediction Challenge by Tanzania IndabaX 2021 <h1><h2>by XVIII_6@zindi<h2>
<h2>OBJECTIVE OF THE CHALLENGE <h2>
<h4>The objective of this challenge is to build a machine learning model to predict which individuals across Africa and around the world use mobile or internet banking<h4>
<h2>IMPORTING THE IMPORTANT LIBRARIES <h2>
```
#Start by importing the modules
import pandas as pd
import numpy as np
import os
import sys
import gc
import random
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
import lightgbm as lgb
import seaborn as sns
from tqdm import tqdm_notebook
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import warnings
warnings.simplefilter('ignore')
```
<h2>LOADING THE DATA FROM CSV FILE <h2>
```
#Here loading the file from csv ,that is train,test,and sub and vd
pd.set_option('display.max_columns',50000) #this code help to display the full columns and rows of the data in cell
pd.set_option('display.max_rows',None) #
pd.set_option('display.width',70000) #
#read the train ,test,sub and variable definition.
train=pd.read_csv('train1.csv')
test=pd.read_csv('test1.csv')
sub=pd.read_csv('SampleSubmission1.csv')
vd=pd.read_csv('VariableDefinitions1.csv')
```
<h2>EXPLORE THE DATA <h2>
```
#check the train data file for the first 5 above data
train.head()
#check the test data file for the first 5 above data
test.head()
#check the sub file
sub.head()
#check the variable definitions file to gain more understanding
vd
#check the information like data type of the train data file
train.info()
#check information like datatype of the test data file
test.info()
#checking for the missing values in the train data file
train.isnull().sum()
#check for the missing values in the test data file
test.isnull().sum()
```
<h2>DATA VISUALIZATION <h2>
```
#here use graph to check the ditribution of the target
sns.countplot(train.Target)
plt.title('Distribution of the target', fontdict={'size':25})
#Also continue to check for missing values in the train data file by using graph to gain insight
graph= train.isna().sum().sort_values().plot(kind = 'barh', figsize = (15, 15))
plt.title('precentage of missing values', fontdict={'size':41})
for p in graph.patches:
percentage ='{:,.0f}%'.format((p.get_width()/train.shape[0])*100)
width, height =p.get_width(),p.get_height()
x=p.get_x()+width+0.02
y=p.get_y()+height/2
plt.annotate(percentage,(x,y))
#Also continue to check for missing values in the test data file by using graph to gain insight
graph = test.isna().sum().sort_values().plot(kind = 'barh', figsize = (15, 15))
plt.title('precentage of missing values', fontdict={'size':41})
for p in graph.patches:
percentage ='{:,.1f}%'.format((p.get_width()/test.shape[0])*100)
width, height =p.get_width(),p.get_height()
x=p.get_x()+width+0.02
y=p.get_y()+height/2
plt.annotate(percentage,(x,y))
```
<h2>DATA CLEANING UP <h2>
<h4>Since from data visualization and exploration there a lot of nan values so cleaning for better modelling <h4>
```
#Here data cleaning up by using function clean and dirt rate and droping the columns with more dirt rate
def clean(train,thresh) :
def dirt_rate(train,col) :
return train[col].isna().sum() / train.shape[0]
for col in train.columns :
if dirt_rate(train,col) >= thresh :
train.drop(col,axis=1,inplace=True)
return train
#calling the function clean to clean and drop the data with more nans values from the train data and set the test data
train = clean(train,thresh=0.8)
test = test[train.columns[:-1]]
```
<h2>DATA IMPUTING FOR OTHER NANS VALUES <h2>
```
#As still there are missing values so here we imputing those missing values by using the function impute columns
def imputeColumns(train ,test) :
total = pd.concat([train,test]) #here we concat the train and test data to form the total data for easily computing and imputing of the nans values
total['age'].fillna(total.age.mean(),inplace=True) #filling the nans values in age columns by using the mean
FQ = total.filter(like= 'FQ').columns
for cl in FQ :
total[cl] = total[cl].fillna(-1) #filling the nans values in other columns by -1
total[FQ] = total[FQ].astype('int') #changing the data type to int
# get train - test
train = total[total['ID'].isin(train['ID'].unique())] #get train from total data
train['Target'] = train['Target'].astype('int')
test = total[~total['ID'].isin(train['ID'].unique())] #get the test data from the total data
return train , test #return the clean test and train
#Calling the function imputeColumns to clean and perform the above work
train , test =imputeColumns(train , test)
#Explore the train data
train.head()
#check if there is other nans values in clumns in train data
train.isnull().sum()
#check if there is other missing values in clumns in test data
test.isnull().sum()
#Explore the shape of the train,test and sub
train.shape,sub.shape,test.shape
```
<h2>BUILDING THE MODEL <h2>
```
#Creating the class to capture the hyperparameter and droping the other unnecesary feature from the data
class model:
seedNumber = 42
n_splits = 5
remove_features = ['ID', 'country','Target'] #These are the unneccesary columns are to be removed
categorical_features = ['country_code','region'] #Categorical features from the data set
TARGET_COL = 'Target'
params = {'boosting_type': #Hypertuning the parameter to gain more efficiency of the model
'gbdt','objective':
'binary','metric': 'auc',
'n_estimators': 500,
'colsample_bytree' : 0.8,
'seed': 42,
'silent':False,
'early_stopping_rounds': 100,
'learning_rate' :0.1
}
def random_stateNumber(state):
random.seed(state)
np.random.seed(state)
random_stateNumber(model.seedNumber)
features_columns = [col for col in train.columns if col not in model.remove_features]
#train the model by using the function region to train the regions one by one in a data set
def region(X,y,Test,skf,reg) :
oof_lgb = np.zeros((X.shape[0],))
Test['target'] = 0
lgb_preds = []
for fold_, (trn_idx, val_idx) in enumerate(skf.split(X, X.country_code)):
tr_x, tr_y = X.iloc[trn_idx,:], y[trn_idx]
vl_x, vl_y = X.iloc[val_idx,:], y[val_idx]
data_train = lgb.Dataset(tr_x, label=tr_y,categorical_feature=model.categorical_features)
data_valid= lgb.Dataset(vl_x, label=vl_y,categorical_feature=model.categorical_features)
estimator = lgb.train(model.params,data_train,valid_sets = [data_train,data_valid ],verbose_eval = 0)
y_pred_val = estimator.predict(vl_x,num_iteration=estimator.best_iteration)
oof_lgb[val_idx] = y_pred_val
y_pred_test = estimator.predict(Test[features_columns],num_iteration=estimator.best_iteration)
lgb_preds.append(y_pred_test)
print(f'Region[{reg}] AUC : ',roc_auc_score(y, oof_lgb))
return np.mean(lgb_preds,axis=0) , oof_lgb
#Then ,here i continue training the model to get the predictions for the validation set
def continue_training() :
train_ids = [] ; test_ids = [] ;
train_target = [] ;custom_preds = [] ; test_preds = [] ;
for reg in tqdm_notebook(np.sort(train.region.unique())) :
skf = StratifiedKFold(n_splits=model.n_splits,shuffle=True, random_state=model.seedNumber)
train_ = train[train['region']==reg].reset_index(drop=True)
Test = test[test['region']==reg].reset_index(drop=True)
train_ids.extend(train_['ID'].values.tolist()) ; test_ids.extend(Test['ID'].values.tolist())
X , y = train_[features_columns] , train_[model.TARGET_COL]
test_pred , oof_pred =region(X,y,Test,skf,reg=reg)
train_target.extend(y) ; custom_preds.extend(oof_pred) ; test_preds.extend(test_pred)
return train_ids , custom_preds ,train_target ,test_ids, test_preds
train_ids , oof_preds ,train_target ,test_ids, test_preds = continue_training()
#Evaluate the model by using the area under the curve from the metrics module
complete = pd.DataFrame({'ID' :train_ids ,'OOF_lgbm' :oof_preds , 'Target' :train_target})
print(f'AUC : ',roc_auc_score(complete['Target'], complete['OOF_lgbm']))
Submission = pd.DataFrame({'ID' :test_ids ,'Target' :test_preds})
#Complete and submit the files
Submission.to_csv('sub3.csv',index=False)
```
| github_jupyter |
## Learning Objectives
- How we can exctract keywords from corpus (collections of texts) using TF-IDF
- Explain what is TF-IDF
- Applications of keywords exctraction algorithm and Word2Vec
## Review: What are the pre-processings to apply a machine learning algorithm on text data?
1. The text must be parsed to words, called tokenization
2. Then the words need to be encoded as integers or floating point values
3. scikit-learn library offers easy-to-use tools to perform both tokenization and feature extraction of text data
## What is TF-IDF Vectorizer?
- Word counts are a good starting point, but are very basic
An alternative is to calculate word frequencies, and by far the most popular method is called TF-IDF.
**Term Frequency**: This summarizes how often a given word appears within a document
**Inverse Document Frequency**: This downscales words that appear a lot across documents
## Intuitive idea behind TF-IDF:
- If a word appears frequently in a document, it's important. Give the word a high score
- But if a word appears in many documents, it's not a unique identifier. Give the word a low score
<img src="Images/tfidf_slide.png" width="700" height="700">
## Activity: Obtain the keywords from TF-IDF
1- First obtain the TF-IDF matrix for given corpus
2- Do column-wise addition
3- Sort the score from highest to lowest
4- Return the associated words based on step 3
```
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import numpy as np
def keyword_sklearn(docs, k):
vectorizer = TfidfVectorizer(stop_words='english')
tfidf_matrix = vectorizer.fit_transform(docs)
print(tfidf_matrix.toarray())
print(vectorizer.get_feature_names())
tfidf_scores = np.sum(tfidf_matrix, axis=0)
tfidf_scores = np.ravel(tfidf_scores)
return sorted(dict(zip(vectorizer.get_feature_names(), tfidf_scores)).items(), key=lambda x: x[1], reverse=True)[:k]
documnets = ['The sky is bule', 'The sun is bright', 'The sun in the sky is bright', 'we can see the shining sun, the bright sun']
print(keyword_sklearn(documnets, 3))
```
## Word2Vec
- Data Scientists have assigned a vector to each english word
- This process of assignning vectors to each word is called Word2Vec
- In DS 2.4, we will learn how they accomplished Word2Vec task
- Download this huge Word2Vec file: https://nlp.stanford.edu/projects/glove/
- Do not open the extracted file
## What is the property of vectors associated to each word in Word2Vec?
- Words with similar meanings would be closer to each other in Euclidean Space
- For example if $V_{pizza}$, $V_{food}$ and $V_{sport}$ represent the vector associated to pizza, food and sport then:
${\| V_{pizza} - V_{food}}\|$ < ${\| V_{pizza} - V_{sport}}\|$
## Acitivity: Obtain the vector associated to pizza in Glove
```
import codecs
with codecs.open('/Users/miladtoutounchian/Downloads/glove.840B.300d.txt', 'r') as f:
for c, r in enumerate(f):
sr = r.split()
if sr[0] == 'pizza':
print(sr[0])
print([float(i) for i in sr[1:]])
print(len([float(i) for i in sr[1:]]))
break
```
## Activity: Obtain the vectors associated to pizza, food and sport in Glove
```
import codecs
with codecs.open('/Users/miladtoutounchian/Downloads/glove.840B.300d.txt', 'r') as f:
ls = {}
for c, r in enumerate(f):
sr = r.split()
if sr[0] in ['pizza', 'food', 'sport']:
ls[sr[0]] =[float(i) for i in sr[1:]]
if len(ls) == 3:
break
print(ls)
```
## Acitivty: Show that the vector of pizza is closer to vector of food than vector of sport
```
import numpy as np
np.linalg.norm(np.array(ls['pizza']) - np.array(ls['food']))
np.linalg.norm(np.array(ls['pizza']) - np.array(ls['sport']))
np.linalg.norm(np.array(ls['food']) - np.array(ls['sport']))
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.