text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
<a href="https://colab.research.google.com/github/tushar-semwal/fedperf/blob/main/Santiago/Shakespeare/FedAvg.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# FedPerf - Shakespeare + FedAvg algorithm
## Setup & Dependencies Installation
```
%%capture
!pip install torchsummaryX unidecode
%load_ext tensorboard
import copy
from functools import reduce
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import random
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import Sampler
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from torchsummaryX import summary as summaryx
from torchvision import transforms, utils, datasets
from tqdm.notebook import tqdm
from unidecode import unidecode
%matplotlib inline
# Check assigned GPU
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ')
print('and then re-execute this cell.')
else:
print(gpu_info)
# set manual seed for reproducibility
RANDOM_SEED = 42
# general reproducibility
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
torch.cuda.manual_seed(RANDOM_SEED)
# gpu training specific
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
```
## Mount GDrive
```
BASE_DIR = '/content/drive/MyDrive/FedPerf/shakespeare/FedAvg'
try:
from google.colab import drive
drive.mount('/content/drive')
os.makedirs(BASE_DIR, exist_ok=True)
except:
print("WARNING: Results won't be stored on GDrive")
BASE_DIR = './'
```
## Loading Dataset
```
!rm -Rf data
!mkdir -p data scripts
GENERATE_DATASET = False # If False, download the dataset provided by the q-FFL paper
DATA_DIR = 'data/'
# Dataset generation params
SAMPLES_FRACTION = 1. # If using an already generated dataset
# SAMPLES_FRACTION = 0.2 # Fraction of total samples in the dataset - FedProx default script
# SAMPLES_FRACTION = 0.05 # Fraction of total samples in the dataset - qFFL
TRAIN_FRACTION = 0.8 # Train set size
MIN_SAMPLES = 0 # Min samples per client (for filtering purposes) - FedProx
# MIN_SAMPLES = 64 # Min samples per client (for filtering purposes) - qFFL
# Download raw dataset
# !wget https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt -O data/shakespeare.txt
!wget --adjust-extension http://www.gutenberg.org/files/100/100-0.txt -O data/shakespeare.txt
if not GENERATE_DATASET:
!rm -Rf data/train data/test
!gdown --id 1n46Mftp3_ahRi1Z6jYhEriyLtdRDS1tD # Download Shakespeare dataset used by the FedProx paper
!unzip shakespeare.zip
!mv -f shakespeare_paper/train data/
!mv -f shakespeare_paper/test data/
!rm -R shakespeare_paper/ shakespeare.zip
corpus = []
with open('data/shakespeare.txt', 'r') as f:
data = list(unidecode(f.read()))
corpus = list(set(list(data)))
print('Corpus Length:', len(corpus))
```
#### Dataset Preprocessing script
```
%%capture
if GENERATE_DATASET:
# Download dataset generation scripts
!wget https://raw.githubusercontent.com/ml-lab/FedProx/master/data/shakespeare/preprocess/preprocess_shakespeare.py -O scripts/preprocess_shakespeare.py
!wget https://raw.githubusercontent.com/ml-lab/FedProx/master/data/shakespeare/preprocess/shake_utils.py -O scripts/shake_utils.py
!wget https://raw.githubusercontent.com/ml-lab/FedProx/master/data/shakespeare/preprocess/gen_all_data.py -O scripts/gen_all_data.py
# Download data preprocessing scripts
!wget https://raw.githubusercontent.com/ml-lab/FedProx/master/utils/sample.py -O scripts/sample.py
!wget https://raw.githubusercontent.com/ml-lab/FedProx/master/utils/remove_users.py -O scripts/remove_users.py
# Running scripts
if GENERATE_DATASET:
!mkdir -p data/raw_data data/all_data data/train data/test
!python scripts/preprocess_shakespeare.py data/shakespeare.txt data/raw_data
!python scripts/gen_all_data.py
```
#### Dataset class
```
class ShakespeareDataset(Dataset):
def __init__(self, x, y, corpus, seq_length):
self.x = x
self.y = y
self.corpus = corpus
self.corpus_size = len(self.corpus)
super(ShakespeareDataset, self).__init__()
def __len__(self):
return len(self.x)
def __repr__(self):
return f'{self.__class__} - (length: {self.__len__()})'
def __getitem__(self, i):
input_seq = self.x[i]
next_char = self.y[i]
# print('\tgetitem', i, input_seq, next_char)
input_value = self.text2charindxs(input_seq)
target_value = self.get_label_from_char(next_char)
return input_value, target_value
def text2charindxs(self, text):
tensor = torch.zeros(len(text), dtype=torch.int32)
for i, c in enumerate(text):
tensor[i] = self.get_label_from_char(c)
return tensor
def get_label_from_char(self, c):
return self.corpus.index(c)
def get_char_from_label(self, l):
return self.corpus[l]
```
##### Federated Dataset
```
class ShakespeareFedDataset(ShakespeareDataset):
def __init__(self, x, y, corpus, seq_length):
super(ShakespeareFedDataset, self).__init__(x, y, corpus, seq_length)
def dataloader(self, batch_size, shuffle=True):
return DataLoader(self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=0)
```
## Partitioning & Data Loaders
### IID
```
def iid_partition_(dataset, clients):
"""
I.I.D paritioning of data over clients
Shuffle the data
Split it between clients
params:
- dataset (torch.utils.Dataset): Dataset
- clients (int): Number of Clients to split the data between
returns:
- Dictionary of image indexes for each client
"""
num_items_per_client = int(len(dataset)/clients)
client_dict = {}
image_idxs = [i for i in range(len(dataset))]
for i in range(clients):
client_dict[i] = set(np.random.choice(image_idxs, num_items_per_client, replace=False))
image_idxs = list(set(image_idxs) - client_dict[i])
return client_dict
def iid_partition(corpus, seq_length=80, val_split=False):
train_file = [os.path.join(DATA_DIR, 'train', f) for f in os.listdir(f'{DATA_DIR}/train') if f.endswith('.json')][0]
test_file = [os.path.join(DATA_DIR, 'test', f) for f in os.listdir(f'{DATA_DIR}/test') if f.endswith('.json')][0]
with open(train_file, 'r') as file:
data_train = json.loads(unidecode(file.read()))
with open(test_file, 'r') as file:
data_test = json.loads(unidecode(file.read()))
total_samples_train = sum(data_train['num_samples'])
data_dict = {}
x_train, y_train = [], []
x_test, y_test = [], []
# x_val, y_val = [], []
users = list(zip(data_train['users'], data_train['num_samples']))
# random.shuffle(users)
total_samples = int(sum(data_train['num_samples']) * SAMPLES_FRACTION)
print('Objective', total_samples, '/', sum(data_train['num_samples']))
sample_count = 0
for i, (author_id, samples) in enumerate(users):
if sample_count >= total_samples:
print('Max samples reached', sample_count, '/', total_samples)
break
if samples < MIN_SAMPLES: # or data_train['num_samples'][i] > 10000:
print('SKIP', author_id, samples)
continue
else:
udata_train = data_train['user_data'][author_id]
max_samples = samples if (sample_count + samples) <= total_samples else (sample_count + samples - total_samples)
sample_count += max_samples
# print('sample_count', sample_count)
x_train.extend(data_train['user_data'][author_id]['x'][:max_samples])
y_train.extend(data_train['user_data'][author_id]['y'][:max_samples])
author_data = data_test['user_data'][author_id]
test_size = int(len(author_data['x']) * SAMPLES_FRACTION)
if val_split:
x_test.extend(author_data['x'][:int(test_size / 2)])
y_test.extend(author_data['y'][:int(test_size / 2)])
# x_val.extend(author_data['x'][int(test_size / 2):])
# y_val.extend(author_data['y'][int(test_size / 2):int(test_size)])
else:
x_test.extend(author_data['x'][:int(test_size)])
y_test.extend(author_data['y'][:int(test_size)])
train_ds = ShakespeareDataset(x_train, y_train, corpus, seq_length)
test_ds = ShakespeareDataset(x_test, y_test, corpus, seq_length)
# val_ds = ShakespeareDataset(x_val, y_val, corpus, seq_length)
data_dict = iid_partition_(train_ds, clients=len(users))
return train_ds, data_dict, test_ds
```
### Non-IID
```
def noniid_partition(corpus, seq_length=80, val_split=False):
train_file = [os.path.join(DATA_DIR, 'train', f) for f in os.listdir(f'{DATA_DIR}/train') if f.endswith('.json')][0]
test_file = [os.path.join(DATA_DIR, 'test', f) for f in os.listdir(f'{DATA_DIR}/test') if f.endswith('.json')][0]
with open(train_file, 'r') as file:
data_train = json.loads(unidecode(file.read()))
with open(test_file, 'r') as file:
data_test = json.loads(unidecode(file.read()))
total_samples_train = sum(data_train['num_samples'])
data_dict = {}
x_test, y_test = [], []
users = list(zip(data_train['users'], data_train['num_samples']))
# random.shuffle(users)
total_samples = int(sum(data_train['num_samples']) * SAMPLES_FRACTION)
print('Objective', total_samples, '/', sum(data_train['num_samples']))
sample_count = 0
for i, (author_id, samples) in enumerate(users):
if sample_count >= total_samples:
print('Max samples reached', sample_count, '/', total_samples)
break
if samples < MIN_SAMPLES: # or data_train['num_samples'][i] > 10000:
print('SKIP', author_id, samples)
continue
else:
udata_train = data_train['user_data'][author_id]
max_samples = samples if (sample_count + samples) <= total_samples else (sample_count + samples - total_samples)
sample_count += max_samples
# print('sample_count', sample_count)
x_train = data_train['user_data'][author_id]['x'][:max_samples]
y_train = data_train['user_data'][author_id]['y'][:max_samples]
train_ds = ShakespeareFedDataset(x_train, y_train, corpus, seq_length)
x_val, y_val = None, None
val_ds = None
author_data = data_test['user_data'][author_id]
test_size = int(len(author_data['x']) * SAMPLES_FRACTION)
if val_split:
x_test += author_data['x'][:int(test_size / 2)]
y_test += author_data['y'][:int(test_size / 2)]
x_val = author_data['x'][int(test_size / 2):]
y_val = author_data['y'][int(test_size / 2):int(test_size)]
val_ds = ShakespeareFedDataset(x_val, y_val, corpus, seq_length)
else:
x_test += author_data['x'][:int(test_size)]
y_test += author_data['y'][:int(test_size)]
data_dict[author_id] = {
'train_ds': train_ds,
'val_ds': val_ds
}
test_ds = ShakespeareFedDataset(x_test, y_test, corpus, seq_length)
return data_dict, test_ds
```
## Models
### Shakespeare LSTM
```
class ShakespeareLSTM(nn.Module):
"""
"""
def __init__(self, input_dim, embedding_dim, hidden_dim, classes, lstm_layers=2, dropout=0.1, batch_first=True):
super(ShakespeareLSTM, self).__init__()
self.input_dim = input_dim
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.classes = classes
self.no_layers = lstm_layers
self.embedding = nn.Embedding(num_embeddings=self.classes,
embedding_dim=self.embedding_dim)
self.lstm = nn.LSTM(input_size=self.embedding_dim,
hidden_size=self.hidden_dim,
num_layers=self.no_layers,
batch_first=batch_first,
dropout=dropout if self.no_layers > 1 else 0.)
self.fc = nn.Linear(hidden_dim, self.classes)
def forward(self, x, hc=None):
batch_size = x.size(0)
x_emb = self.embedding(x)
out, (ht, ct) = self.lstm(x_emb.view(batch_size, -1, self.embedding_dim), hc)
dense = self.fc(ht[-1])
return dense
def init_hidden(self, batch_size):
return (Variable(torch.zeros(self.no_layers, batch_size, self.hidden_dim)),
Variable(torch.zeros(self.no_layers, batch_size, self.hidden_dim)))
```
#### Model Summary
```
batch_size = 10
seq_length = 80 # mcmahan17a, fedprox, qFFL
shakespeare_lstm = ShakespeareLSTM(input_dim=seq_length,
embedding_dim=8, # mcmahan17a, fedprox, qFFL
hidden_dim=256, # mcmahan17a, fedprox impl
# hidden_dim=100, # fedprox paper
classes=len(corpus),
lstm_layers=2,
dropout=0.1, # TODO:
batch_first=True
)
if torch.cuda.is_available():
shakespeare_lstm.cuda()
hc = shakespeare_lstm.init_hidden(batch_size)
x_sample = torch.zeros((batch_size, seq_length),
dtype=torch.long,
device=(torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')))
x_sample[0][0] = 1
x_sample
print("\nShakespeare LSTM SUMMARY")
print(summaryx(shakespeare_lstm, x_sample))
```
## FedAvg Algorithm
### Plot Utils
```
from sklearn.metrics import f1_score
def plot_scores(history, exp_id, title, suffix):
accuracies = [x['accuracy'] for x in history]
f1_macro = [x['f1_macro'] for x in history]
f1_weighted = [x['f1_weighted'] for x in history]
fig, ax = plt.subplots()
ax.plot(accuracies, 'tab:orange')
ax.set(xlabel='Rounds', ylabel='Test Accuracy', title=title)
ax.grid()
fig.savefig(f'{BASE_DIR}/{exp_id}/Test_Accuracy_{suffix}.jpg', format='jpg', dpi=300)
plt.show()
fig, ax = plt.subplots()
ax.plot(f1_macro, 'tab:orange')
ax.set(xlabel='Rounds', ylabel='Test F1 (macro)', title=title)
ax.grid()
fig.savefig(f'{BASE_DIR}/{exp_id}/Test_F1_Macro_{suffix}.jpg', format='jpg')
plt.show()
fig, ax = plt.subplots()
ax.plot(f1_weighted, 'tab:orange')
ax.set(xlabel='Rounds', ylabel='Test F1 (weighted)', title=title)
ax.grid()
fig.savefig(f'{BASE_DIR}/{exp_id}/Test_F1_Weighted_{suffix}.jpg', format='jpg')
plt.show()
def plot_losses(history, exp_id, title, suffix):
val_losses = [x['loss'] for x in history]
train_losses = [x['train_loss'] for x in history]
fig, ax = plt.subplots()
ax.plot(train_losses, 'tab:orange')
ax.set(xlabel='Rounds', ylabel='Train Loss', title=title)
ax.grid()
fig.savefig(f'{BASE_DIR}/{exp_id}/Train_Loss_{suffix}.jpg', format='jpg')
plt.show()
fig, ax = plt.subplots()
ax.plot(val_losses, 'tab:orange')
ax.set(xlabel='Rounds', ylabel='Test Loss', title=title)
ax.grid()
fig.savefig(f'{BASE_DIR}/{exp_id}/Test_Loss_{suffix}.jpg', format='jpg')
plt.show()
```
### Systems Heterogeneity Simulations
Generate epochs for selected clients based on percentage of devices that corresponds to heterogeneity.
Assign x number of epochs (chosen unifirmly at random between [1, E]) to 0%, 50% or 90% of the selected devices, respectively. Settings where 0% devices perform fewer than E epochs of work correspond to the environments without system heterogeneity, while 90% of the devices sending their partial solutions corresponds to highly heterogenous system.
```
def GenerateLocalEpochs(percentage, size, max_epochs):
''' Method generates list of epochs for selected clients
to replicate system heteroggeneity
Params:
percentage: percentage of clients to have fewer than E epochs
size: total size of the list
max_epochs: maximum value for local epochs
Returns:
List of size epochs for each Client Update
'''
# if percentage is 0 then each client runs for E epochs
if percentage == 0:
return np.array([max_epochs]*size)
else:
# get the number of clients to have fewer than E epochs
heterogenous_size = int((percentage/100) * size)
# generate random uniform epochs of heterogenous size between 1 and E
epoch_list = np.random.randint(1, max_epochs, heterogenous_size)
# the rest of the clients will have E epochs
remaining_size = size - heterogenous_size
rem_list = [max_epochs]*remaining_size
epoch_list = np.append(epoch_list, rem_list, axis=0)
# shuffle the list and return
np.random.shuffle(epoch_list)
return epoch_list
```
### Local Training (Client Update)
```
class CustomDataset(Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
data, label = self.dataset[self.idxs[item]]
return data, label
class ClientUpdate(object):
def __init__(self, dataset, batchSize, learning_rate, epochs, idxs, mu, algorithm):
# self.train_loader = DataLoader(CustomDataset(dataset, idxs), batch_size=batchSize, shuffle=True)
if hasattr(dataset, 'dataloader'):
self.train_loader = dataset.dataloader(batch_size=batch_size, shuffle=True)
else:
self.train_loader = DataLoader(CustomDataset(dataset, idxs), batch_size=batch_size, shuffle=True)
self.algorithm = algorithm
self.learning_rate = learning_rate
self.epochs = epochs
self.mu = mu
def train(self, model):
# print("Client training for {} epochs.".format(self.epochs))
criterion = nn.CrossEntropyLoss()
proximal_criterion = nn.MSELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(), lr=self.learning_rate, momentum=0.5)
# use the weights of global model for proximal term calculation
global_model = copy.deepcopy(model)
# calculate local training time
start_time = time.time()
e_loss = []
for epoch in range(1, self.epochs+1):
train_loss = 0.0
model.train()
for data, labels in self.train_loader:
if torch.cuda.is_available():
data, labels = data.cuda(), labels.cuda()
# clear the gradients
optimizer.zero_grad()
# make a forward pass
output = model(data)
# calculate the loss + the proximal term
_, pred = torch.max(output, 1)
if self.algorithm == 'fedprox':
proximal_term = 0.0
# iterate through the current and global model parameters
for w, w_t in zip(model.parameters(), global_model.parameters()) :
# update the proximal term
#proximal_term += torch.sum(torch.abs((w-w_t)**2))
proximal_term += (w-w_t).norm(2)
loss = criterion(output, labels) + (self.mu/2)*proximal_term
else:
loss = criterion(output, labels)
# do a backwards pass
loss.backward()
# perform a single optimization step
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
# average losses
train_loss = train_loss/len(self.train_loader.dataset)
e_loss.append(train_loss)
total_loss = sum(e_loss)/len(e_loss)
return model.state_dict(), total_loss, (time.time() - start_time)
```
### Server Side Training
```
def training(model, rounds, batch_size, lr, ds, data_dict, test_ds, C, K, E, mu, percentage, plt_title, plt_color, target_test_accuracy,
classes, algorithm="fedprox", history=[], eval_every=1, tb_logger=None):
"""
Function implements the Federated Averaging Algorithm from the FedAvg paper.
Specifically, this function is used for the server side training and weight update
Params:
- model: PyTorch model to train
- rounds: Number of communication rounds for the client update
- batch_size: Batch size for client update training
- lr: Learning rate used for client update training
- ds: Dataset used for training
- data_dict: Type of data partition used for training (IID or non-IID)
- test_data_dict: Data used for testing the model
- C: Fraction of clients randomly chosen to perform computation on each round
- K: Total number of clients
- E: Number of training passes each client makes over its local dataset per round
- mu: proximal term constant
- percentage: percentage of selected client to have fewer than E epochs
Returns:
- model: Trained model on the server
"""
start = time.time()
# global model weights
global_weights = model.state_dict()
# training loss
train_loss = []
# test accuracy
test_acc = []
# store last loss for convergence
last_loss = 0.0
# total time taken
total_time = 0
print(f"System heterogeneity set to {percentage}% stragglers.\n")
print(f"Picking {max(int(C*K),1 )} random clients per round.\n")
users_id = list(data_dict.keys())
for curr_round in range(1, rounds+1):
w, local_loss, lst_local_train_time = [], [], []
m = max(int(C*K), 1)
heterogenous_epoch_list = GenerateLocalEpochs(percentage, size=m, max_epochs=E)
heterogenous_epoch_list = np.array(heterogenous_epoch_list)
# print('heterogenous_epoch_list', len(heterogenous_epoch_list))
S_t = np.random.choice(range(K), m, replace=False)
S_t = np.array(S_t)
print('Clients: {}/{} -> {}'.format(len(S_t), K, S_t))
# For Federated Averaging, drop all the clients that are stragglers
if algorithm == 'fedavg':
stragglers_indices = np.argwhere(heterogenous_epoch_list < E)
heterogenous_epoch_list = np.delete(heterogenous_epoch_list, stragglers_indices)
S_t = np.delete(S_t, stragglers_indices)
# for _, (k, epoch) in tqdm(enumerate(zip(S_t, heterogenous_epoch_list))):
for i in tqdm(range(len(S_t))):
# print('k', k)
k = S_t[i]
epoch = heterogenous_epoch_list[i]
key = users_id[k]
ds_ = ds if ds else data_dict[key]['train_ds']
idxs = data_dict[key] if ds else None
# print(f'Client {k}: {len(idxs) if idxs else len(ds_)} samples')
local_update = ClientUpdate(dataset=ds_, batchSize=batch_size, learning_rate=lr, epochs=epoch, idxs=idxs, mu=mu, algorithm=algorithm)
weights, loss, local_train_time = local_update.train(model=copy.deepcopy(model))
# print(f'Local train time for {k} on {len(idxs) if idxs else len(ds_)} samples: {local_train_time}')
# print(f'Local train time: {local_train_time}')
w.append(copy.deepcopy(weights))
local_loss.append(copy.deepcopy(loss))
lst_local_train_time.append(local_train_time)
# calculate time to update the global weights
global_start_time = time.time()
# updating the global weights
weights_avg = copy.deepcopy(w[0])
for k in weights_avg.keys():
for i in range(1, len(w)):
weights_avg[k] += w[i][k]
weights_avg[k] = torch.div(weights_avg[k], len(w))
global_weights = weights_avg
global_end_time = time.time()
# calculate total time
total_time += (global_end_time - global_start_time) + sum(lst_local_train_time)/len(lst_local_train_time)
# move the updated weights to our model state dict
model.load_state_dict(global_weights)
# loss
loss_avg = sum(local_loss) / len(local_loss)
print('Round: {}... \tAverage Loss: {}'.format(curr_round, round(loss_avg, 3)))
train_loss.append(loss_avg)
if tb_logger:
tb_logger.add_scalar(f'Train/Loss', loss_avg, curr_round)
# testing
# if curr_round % eval_every == 0:
test_scores = testing(model, test_ds, batch_size * 2, nn.CrossEntropyLoss(), len(classes), classes)
test_scores['train_loss'] = loss_avg
test_loss, test_accuracy = test_scores['loss'], test_scores['accuracy']
history.append(test_scores)
# print('Round: {}... \tAverage Loss: {} \tTest Loss: {} \tTest Acc: {}'.format(curr_round, round(loss_avg, 3), round(test_loss, 3), round(test_accuracy, 3)))
if tb_logger:
tb_logger.add_scalar(f'Test/Loss', test_scores['loss'], curr_round)
tb_logger.add_scalars(f'Test/Scores', {
'accuracy': test_scores['accuracy'], 'f1_macro': test_scores['f1_macro'], 'f1_weighted': test_scores['f1_weighted']
}, curr_round)
test_acc.append(test_accuracy)
# break if we achieve the target test accuracy
if test_accuracy >= target_test_accuracy:
rounds = curr_round
break
# break if we achieve convergence, i.e., loss between two consecutive rounds is <0.0001
if algorithm == 'fedprox' and abs(loss_avg - last_loss) < 1e-5:
rounds = curr_round
break
# update the last loss
last_loss = loss_avg
end = time.time()
# plot train loss
fig, ax = plt.subplots()
x_axis = np.arange(1, rounds+1)
y_axis = np.array(train_loss)
ax.plot(x_axis, y_axis)
ax.set(xlabel='Number of Rounds', ylabel='Train Loss', title=plt_title)
ax.grid()
# fig.savefig(plt_title+'.jpg', format='jpg')
# plot test accuracy
fig1, ax1 = plt.subplots()
x_axis1 = np.arange(1, rounds+1)
y_axis1 = np.array(test_acc)
ax1.plot(x_axis1, y_axis1)
ax1.set(xlabel='Number of Rounds', ylabel='Test Accuracy', title=plt_title)
ax1.grid()
# fig1.savefig(plt_title+'-test.jpg', format='jpg')
print("Training Done! Total time taken to Train: {}".format(end-start))
return model, history
```
### Testing Loop
```
def testing(model, dataset, bs, criterion, num_classes, classes, print_all=False):
#test loss
test_loss = 0.0
correct_class = list(0. for i in range(num_classes))
total_class = list(0. for i in range(num_classes))
test_loader = DataLoader(dataset, batch_size=bs)
l = len(test_loader)
model.eval()
print('running validation...')
for i, (data, labels) in enumerate(tqdm(test_loader)):
if torch.cuda.is_available():
data, labels = data.cuda(), labels.cuda()
output = model(data)
loss = criterion(output, labels)
test_loss += loss.item()*data.size(0)
_, pred = torch.max(output, 1)
# For F1Score
y_true = np.append(y_true, labels.data.view_as(pred).cpu().numpy()) if i != 0 else labels.data.view_as(pred).cpu().numpy()
y_hat = np.append(y_hat, pred.cpu().numpy()) if i != 0 else pred.cpu().numpy()
correct_tensor = pred.eq(labels.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not torch.cuda.is_available() else np.squeeze(correct_tensor.cpu().numpy())
#test accuracy for each object class
# for i in range(num_classes):
# label = labels.data[i]
# correct_class[label] += correct[i].item()
# total_class[label] += 1
for i, lbl in enumerate(labels.data):
# print('lbl', i, lbl)
correct_class[lbl] += correct.data[i]
total_class[lbl] += 1
# avg test loss
test_loss = test_loss/len(test_loader.dataset)
print("Test Loss: {:.6f}\n".format(test_loss))
# Avg F1 Score
f1_macro = f1_score(y_true, y_hat, average='macro')
# F1-Score -> weigthed to consider class imbalance
f1_weighted = f1_score(y_true, y_hat, average='weighted')
print("F1 Score: {:.6f} (macro) {:.6f} (weighted) %\n".format(f1_macro, f1_weighted))
# print test accuracy
if print_all:
for i in range(num_classes):
if total_class[i]>0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' %
(classes[i], 100 * correct_class[i] / total_class[i],
np.sum(correct_class[i]), np.sum(total_class[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
overall_accuracy = np.sum(correct_class) / np.sum(total_class)
print('\nFinal Test Accuracy: {:.3f} ({}/{})'.format(overall_accuracy, np.sum(correct_class), np.sum(total_class)))
return {'loss': test_loss, 'accuracy': overall_accuracy, 'f1_macro': f1_macro, 'f1_weighted': f1_weighted}
```
## Experiments
```
FAIL-ON-PURPOSE
seq_length = 80 # mcmahan17a, fedprox, qFFL
embedding_dim = 8 # mcmahan17a, fedprox, qFFL
# hidden_dim = 100 # fedprox paper
hidden_dim = 256 # mcmahan17a, fedprox impl
num_classes = len(corpus)
classes = list(range(num_classes))
lstm_layers = 2 # mcmahan17a, fedprox, qFFL
dropout = 0.1 # TODO
class Hyperparameters():
def __init__(self, total_clients):
# number of training rounds
self.rounds = 50
# client fraction
self.C = 0.5
# number of clients
self.K = total_clients
# number of training passes on local dataset for each roung
# self.E = 20
self.E = 1
# batch size
self.batch_size = 10
# learning Rate
self.lr = 0.8
# proximal term constant
# self.mu = 0.0
self.mu = 0.001
# percentage of clients to have fewer than E epochs
self.percentage = 0
# self.percentage = 50
# self.percentage = 90
# target test accuracy
self.target_test_accuracy= 99.0
# self.target_test_accuracy=96.0
exp_log = dict()
```
### IID
```
train_ds, data_dict, test_ds = iid_partition(corpus, seq_length, val_split=True) # Not using val_ds but makes train eval periods faster
total_clients = len(data_dict.keys())
'Total users:', total_clients
hparams = Hyperparameters(total_clients)
hparams.__dict__
# Sweeping parameter
PARAM_NAME = 'clients_fraction'
PARAM_VALUE = hparams.C
exp_id = f'{PARAM_NAME}/{PARAM_VALUE}'
exp_id
EXP_DIR = f'{BASE_DIR}/{exp_id}'
os.makedirs(EXP_DIR, exist_ok=True)
# tb_logger = SummaryWriter(log_dir)
# print(f'TBoard logger created at: {log_dir}')
title = 'LSTM FedProx on IID'
def run_experiment(run_id):
shakespeare_lstm = ShakespeareLSTM(input_dim=seq_length,
embedding_dim=embedding_dim,
hidden_dim=hidden_dim,
classes=num_classes,
lstm_layers=lstm_layers,
dropout=dropout,
batch_first=True
)
if torch.cuda.is_available():
shakespeare_lstm.cuda()
test_history = []
lstm_iid_trained, test_history = training(shakespeare_lstm,
hparams.rounds, hparams.batch_size, hparams.lr,
train_ds,
data_dict,
test_ds,
hparams.C, hparams.K, hparams.E, hparams.mu, hparams.percentage,
title, "green",
hparams.target_test_accuracy,
corpus, # classes
history=test_history,
algorithm='fedavg',
# tb_logger=tb_writer
)
final_scores = testing(lstm_iid_trained, test_ds, batch_size * 2, nn.CrossEntropyLoss(), len(corpus), corpus)
print(f'\n\n========================================================\n\n')
print(f'Final scores for Exp {run_id} \n {final_scores}')
log = {
'history': test_history,
'hyperparams': hparams.__dict__
}
with open(f'{EXP_DIR}/results_iid_{run_id}.pkl', 'wb') as file:
pickle.dump(log, file)
return test_history
exp_history = list()
for run_id in range(2): # TOTAL RUNS
print(f'============== RUNNING EXPERIMENT #{run_id} ==============')
exp_history.append(run_experiment(run_id))
print(f'\n\n========================================================\n\n')
exp_log[title] = {
'history': exp_history,
'hyperparams': hparams.__dict__
}
df = None
for i, e in enumerate(exp_history):
if i == 0:
df = pd.json_normalize(e)
continue
df = df + pd.json_normalize(e)
df_avg = df / len(exp_history)
avg_history = df_avg.to_dict(orient='records')
plot_scores(history=avg_history, exp_id=exp_id, title=title, suffix='IID')
plot_losses(history=avg_history, exp_id=exp_id, title=title, suffix='IID')
with open(f'{EXP_DIR}/results_iid.pkl', 'wb') as file:
pickle.dump(exp_log, file)
```
### Non-IID
```
exp_log = dict()
data_dict, test_ds = noniid_partition(corpus, seq_length=seq_length, val_split=True)
total_clients = len(data_dict.keys())
'Total users:', total_clients
hparams = Hyperparameters(total_clients)
hparams.__dict__
# Sweeping parameter
PARAM_NAME = 'clients_fraction'
PARAM_VALUE = hparams.C
exp_id = f'{PARAM_NAME}/{PARAM_VALUE}'
exp_id
EXP_DIR = f'{BASE_DIR}/{exp_id}'
os.makedirs(EXP_DIR, exist_ok=True)
# tb_logger = SummaryWriter(log_dir)
# print(f'TBoard logger created at: {log_dir}')
title = 'LSTM FedProx on Non-IID'
def run_experiment(run_id):
shakespeare_lstm = ShakespeareLSTM(input_dim=seq_length,
embedding_dim=embedding_dim,
hidden_dim=hidden_dim,
classes=num_classes,
lstm_layers=lstm_layers,
dropout=dropout,
batch_first=True
)
if torch.cuda.is_available():
shakespeare_lstm.cuda()
test_history = []
lstm_non_iid_trained, test_history = training(shakespeare_lstm,
hparams.rounds, hparams.batch_size, hparams.lr,
None, # ds empty as it is included in data_dict
data_dict,
test_ds,
hparams.C, hparams.K, hparams.E, hparams.mu, hparams.percentage,
title, "green",
hparams.target_test_accuracy,
corpus, # classes
history=test_history,
algorithm='fedavg',
# tb_logger=tb_writer
)
final_scores = testing(lstm_non_iid_trained, test_ds, batch_size * 2, nn.CrossEntropyLoss(), len(corpus), corpus)
print(f'\n\n========================================================\n\n')
print(f'Final scores for Exp {run_id} \n {final_scores}')
log = {
'history': test_history,
'hyperparams': hparams.__dict__
}
with open(f'{EXP_DIR}/results_niid_{run_id}.pkl', 'wb') as file:
pickle.dump(log, file)
return test_history
exp_history = list()
for run_id in range(2): # TOTAL RUNS
print(f'============== RUNNING EXPERIMENT #{run_id} ==============')
exp_history.append(run_experiment(run_id))
print(f'\n\n========================================================\n\n')
exp_log[title] = {
'history': exp_history,
'hyperparams': hparams.__dict__
}
df = None
for i, e in enumerate(exp_history):
if i == 0:
df = pd.json_normalize(e)
continue
df = df + pd.json_normalize(e)
df_avg = df / len(exp_history)
avg_history = df_avg.to_dict(orient='records')
plot_scores(history=avg_history, exp_id=exp_id, title=title, suffix='nonIID')
plot_losses(history=avg_history, exp_id=exp_id, title=title, suffix='nonIID')
```
### Pickle Experiment Results
```
with open(f'{EXP_DIR}/results_niid.pkl', 'wb') as file:
pickle.dump(exp_log, file)
```
| github_jupyter |
# Visualizing data using matplotlib and seaborn
```
import pandas as pd, csv, os, re
import numpy as np
#from nltk.stem.porter import PorterStemmer # an approximate method of stemming words
#stemmer = PorterStemmer()
# FOR VISUALIZATIONS
import matplotlib, seaborn as sns
import matplotlib.pyplot as plt
# Visualization parameters
% pylab inline
% matplotlib inline
matplotlib.style.use('ggplot')
sns.set_style("whitegrid")
sns.despine()
dir_prefix = '/home/jovyan/work/'
counts_file = dir_prefix + 'nowdata/charters_2015.pkl'
#bigfile = dir_prefix + 'Charter-school-identities/data/charters_parsed_03-08.csv'
#tempdf = pd.read_csv(bigfile, iterator="True", sep='\t')
#len(pd.read_csv(bigfile, sep="\t", usecols=["SURVYEAR", "NCESSCH"]))
#print(tempdf.get_chunk(1).keys())
#tempdf.get_chunk(5)[["WEBTEXT", "IDEOLOGY_TEXT", "KEYWORDS_TEXT"]] # There's data here! But how much?
#big_keepcols = ['MEMBER', 'FTE', 'YEAR_OPENED', 'AGE', 'YEAR_CLOSED', 'TOTETH', 'PCTETH', 'LEA_NAME', 'STATENAME', 'TOTFRL', 'ALL_RLA00PCTPROF_1415', 'ALL_MTH00PCTPROF_1415', 'LOCALE', 'PLACE', 'SCH_NAME', 'ADDRESS14', 'TITLEI', 'ESS_COUNT', 'PROG_COUNT', 'RIT_COUNT', 'ESS_STRENGTH', 'PROG_STRENGTH', 'WEBTEXT', 'KEYWORDS_TEXT', 'IDEOLOGY_TEXT'] # 'LON1516', 'LAT1516',
def convert_df(df):
"""Makes a Pandas DataFrame more memory-efficient through intelligent use of Pandas data types:
specifically, by storing columns with repetitive Python strings not with the object dtype for unique values
(entirely stored in memory) but as categoricals, which are represented by repeated integer values. This is a
net gain in memory when the reduced memory size of the category type outweighs the added memory cost of storing
one more thing. As such, this function checks the degree of redundancy for a given column before converting it.
# TO DO: Filter out non-object columns, make that more efficient by downcasting numeric types using pd.to_numeric(),
merge that with the converted object columns (see https://www.dataquest.io/blog/pandas-big-data/).
For now, since the current DF is ENTIRELY composed of object types, code is left as is.
But note that the current code will eliminate any non-object type columns."""
converted_df = pd.DataFrame() # Initialize DF for memory-efficient storage of strings (object types)
df_obj = df.select_dtypes(include=['object']).copy() # Filter to only those columns of object data type
for col in df.columns:
if col in df_obj:
num_unique_values = len(df_obj[col].unique())
num_total_values = len(df_obj[col])
if (num_unique_values / num_total_values) < 0.5: # Only convert data types if at least half of values are duplicates
converted_df.loc[:,col] = df[col].astype('category') # Store these columns as dtype "category"
else:
converted_df.loc[:,col] = df[col]
else:
converted_df.loc[:,col] = df[col]
converted_df.select_dtypes(include=['float']).apply(pd.to_numeric,downcast='float')
converted_df.select_dtypes(include=['int']).apply(pd.to_numeric,downcast='signed')
return converted_df
import gc; gc.disable()
schooldf = pd.read_pickle(counts_file)
gc.enable()
#schooldf = pd.read_csv(counts_file, sep=",", low_memory=False, encoding="utf-8", na_values={"TITLEI":["M","N"]})
# Generate any new columns:
schooldf["DISC_RATIO"] = schooldf["DISCIPLINE_COUNT"]/schooldf["NUMWORDS"]
schooldf["INQ_RATIO"] = schooldf["INQUIRY_COUNT"]/schooldf["NUMWORDS"]
schooldf["ESS_RATIO"] = schooldf["ESS_COUNT"]/schooldf["NUMWORDS"]
schooldf["PROG_RATIO"] = schooldf["PROG_COUNT"]/schooldf["NUMWORDS"]
schooldf["% Population 25 Years and Over: Bachelor's Degree or Higher"] = schooldf["% Population 25 Years and Over: Bachelor's Degree"] + schooldf["% Population 25 Years and Over: Master's Degree"] + schooldf['% Population 25 Years and Over: Professional School Degree'] + schooldf['% Population 25 Years and Over: Doctorate Degree']
schooldf['% Total Population: Nonwhite'] = 1 - schooldf['% Total Population: White Alone']
#schooldf["PCTFRL"] = schooldf["TOTFRL"]/schooldf["MEMBER"] # Percent receiving free/ reduced-price lunch
#schooldf["IDLEAN"] = schooldf["ess_strength"] - schooldf["prog_strength"]
schooldf[schooldf["WEBTEXT"].apply(len) == 0]
# Clean up DF:
print("Initial length: ", len(schooldf))
schooldf = schooldf.drop_duplicates(subset=["NCESSCH"], keep='first') # Drop duplicate rows
schooldf = schooldf[schooldf["WEBTEXT"].apply(len) > 0] # Drop rows where NO WEBTEXT (0 pages) was gathered (i.e., scrape failed)
schooldf = schooldf[schooldf["WEBTEXT"].apply(lambda school: len(school[0][3])) > 0] # Drop rows where first scraped page is empty
schooldf = schooldf[schooldf["WEBTEXT"].apply(lambda school: sum([len(page[3]) for page in school])) > 0] # Drop rows where all pages are empty
# Drop rows where dictionary counting failed (yielding strengths = -6, the filler to avoid intractable infinity values):
#schooldf = schooldf[schooldf["DISC_STR"] != -6]
#schooldf = schooldf[schooldf["INQU_STR"] != -6]
#schooldf = schooldf[schooldf["PROG_STR"] != -6]
#schooldf = schooldf[schooldf["ESS_STR"] != -6]
schooldf.drop(["WEBTEXT", "CMO_WEBTEXT"], axis=1, inplace=True) # Drop huge columns
schooldf = convert_df(schooldf) # Make DF more memory-efficient
print("Length after cleaning: ", len(schooldf))
#print(schooldf.keys())
list(schooldf)
# Inspect different dictionary variables
schooldf[["prog_strength_x", "prog_strength_y", "PROG_STR", "ess_strength_x", "ess_strength_y", "ESS_STR"]]
'''
# Important CRDC variables:
SCH_FTETEACH_TOT Total FTE of Teachers
SCH_FTESECURITY_GUA Number of FTE security guards
SCH_PSCORPINSTANCES_ALL Instances of corporal punishment: All preschool students
SCH_PSOOSINSTANCES_ALL Instances of out-of-school suspension: All preschool students
{'SCH_FTETEACH_TOT':'Total FTE of Teachers', 'SCH_FTESECURITY_GUA':'Number of FTE security guards', 'SCH_PSCORPINSTANCES_ALL':'Instances of corporal punishment: All preschool students','SCH_PSOOSINSTANCES_ALL':'Instances of out-of-school suspension: All preschool students'}
TOT_DISCWODIS_EXPZT_M Total Number of Students without disabilities who received an expulsion under zero tolerance policies: Calculated Male Total
TOT_DISCWODIS_EXPZT_F Total Number of Students without disabilities who received an expulsion under zero tolerance policies: Calculated Female Total
TOT_DISCWDIS_EXPZT_IDEA_M Total Number of Students with disabilities who received an expulsion under zero tolerance policies: Calculated IDEA Male Total
TOT_DISCWDIS_EXPZT_IDEA_F Total Number of Students with disabilities who received an expulsion under zero tolerance policies: Calculated IDEA Female Total
TOT_DISCWODIS_EXPWE_M Total Number of Students without Disabilities who received an expulsion with educational services: Calculated Male Total
TOT_DISCWODIS_EXPWE_F Total Number of Students without Disabilities who received an expulsion with educational services: Calculated Female Total
TOT_DISCWODIS_EXPWOE_M Total Number of Students without disabilities who received an expulsion without educational services: Calculated Male Total
TOT_DISCWODIS_EXPWOE_F Total Number of Students without disabilities who received an expulsion without educational services: Calculated Female Total
TOT_DISCWDIS_EXPWE_IDEA_M Total Number of Students with disabilities who received an expulsion with educational services: Calculated IDEA Male Total
TOT_DISCWDIS_EXPWE_IDEA_F Total Number of Students with disabilities who received an expulsion with educational services: Calculated IDEA Female Total
TOT_DISCWDIS_EXPWOE_IDEA_M Total Number of Students with disabilities who received an expulsion without educational services: Calculated IDEA Male Total
TOT_DISCWDIS_EXPWOE_IDEA_F Total Number of Students with disabilities who received an expulsion without educational services: Calculated IDEA Female Total
TOT_ABSENT_M Total Chronic Student Absenteeism: Calculated Male Total
TOT_ABSENT_F Total Chronic Student Absenteeism: Calculated Female Total
expul_cols = ['TOT_DISCWODIS_EXPWE_M', 'TOT_DISCWODIS_EXPWE_F', 'TOT_DISCWODIS_EXPWOE_M', 'TOT_DISCWODIS_EXPWOE_F', 'TOT_DISCWDIS_EXPWE_IDEA_M', 'TOT_DISCWDIS_EXPWE_IDEA_F', 'TOT_DISCWDIS_EXPWOE_IDEA_M', 'TOT_DISCWDIS_EXPWOE_IDEA_F']
schooldf["Total Number of Students who received an expulsion"] = schooldf[expul_cols.sum(axis=1)
schooldf["Total Number of Students who received an expulsion under zero tolerance policies"] = schooldf['TOT_DISCWODIS_EXPZT_M', 'TOT_DISCWODIS_EXPZT_F', 'TOT_DISCWDIS_EXPZT_IDEA_M', 'TOT_DISCWDIS_EXPZT_IDEA_F'].sum(axis=1)
schooldf["Total Chronic Student Absenteeism"] = schooldf['TOT_ABSENT_M', 'TOT_ABSENT_F'].sum(axis=1)
# Important CRDC variables:
{'SCH_FTETEACH_TOT': 'Total FTE of Teachers', 'SCH_FTESECURITY_GUA': 'Number of FTE security guards', 'SCH_PSCORPINSTANCES_ALL', 'Instances of corporal punishment: All preschool students', 'SCH_PSOOSINSTANCES_ALL', 'Instances of out-of-school suspension: All preschool students', ' ', 'TOT_DISCWODIS_EXPZT_M': 'Total Number of Students without disabilities who received an expulsion under zero tolerance policies: Calculated Male Total', 'TOT_DISCWODIS_EXPZT_F': 'Total Number of Students without disabilities who received an expulsion under zero tolerance policies: Calculated Female Total', 'TOT_DISCWDIS_EXPZT_IDEA_M': 'Total Number of Students with disabilities who received an expulsion under zero tolerance policies: Calculated IDEA Male Total', 'TOT_DISCWDIS_EXPZT_IDEA_F': 'Total Number of Students with disabilities who received an expulsion under zero tolerance policies: Calculated IDEA Female Total', ' ', 'TOT_DISCWODIS_EXPWE_M': 'Total Number of Students without Disabilities who received an expulsion with educational services: Calculated Male Total', 'TOT_DISCWODIS_EXPWE_F': 'Total Number of Students without Disabilities who received an expulsion with educational services: Calculated Female Total', 'TOT_DISCWODIS_EXPWOE_M': 'Total Number of Students without disabilities who received an expulsion without educational services: Calculated Male Total', 'TOT_DISCWODIS_EXPWOE_F': 'Total Number of Students without disabilities who received an expulsion without educational services: Calculated Female Total', 'TOT_DISCWDIS_EXPWE_IDEA_M': 'Total Number of Students with disabilities who received an expulsion with educational services: Calculated IDEA Male Total', 'TOT_DISCWDIS_EXPWE_IDEA_F': 'Total Number of Students with disabilities who received an expulsion with educational services: Calculated IDEA Female Total', 'TOT_DISCWDIS_EXPWOE_IDEA_M': 'Total Number of Students with disabilities who received an expulsion without educational services: Calculated IDEA Male Total', 'TOT_DISCWDIS_EXPWOE_IDEA_F': 'Total Number of Students with disabilities who received an expulsion without educational services: Calculated IDEA Female Total', ' ', 'TOT_ABSENT_M': 'Total Chronic Student Absenteeism: Calculated Male Total', 'TOT_ABSENT_F': 'Total Chronic Student Absenteeism: Calculated Female Total'}
# Important CRDC variables:
['SCH_FTETEACH_TOT', 'SCH_FTESECURITY_GUA', 'SCH_PSCORPINSTANCES_ALL', 'SCH_PSOOSINSTANCES_ALL', 'TOT_DISCWODIS_EXPZT_M', 'TOT_DISCWODIS_EXPZT_F', 'TOT_DISCWDIS_EXPZT_IDEA_M', 'TOT_DISCWDIS_EXPZT_IDEA_F', 'TOT_DISCWODIS_EXPWE_M', 'TOT_DISCWODIS_EXPWE_F', 'TOT_DISCWODIS_EXPWOE_M', 'TOT_DISCWODIS_EXPWOE_F', 'TOT_DISCWDIS_EXPWE_IDEA_M', 'TOT_DISCWDIS_EXPWE_IDEA_F', 'TOT_DISCWDIS_EXPWOE_IDEA_M', 'TOT_DISCWDIS_EXPWOE_IDEA_F', 'TOT_ABSENT_M', 'TOT_ABSENT_F']
'''
# Combine, relabel some CRDC columns
expul_cols = ['TOT_DISCWODIS_EXPWE_M', 'TOT_DISCWODIS_EXPWE_F', 'TOT_DISCWODIS_EXPWOE_M', 'TOT_DISCWODIS_EXPWOE_F', 'TOT_DISCWDIS_EXPWE_IDEA_M', 'TOT_DISCWDIS_EXPWE_IDEA_F', 'TOT_DISCWDIS_EXPWOE_IDEA_M', 'TOT_DISCWDIS_EXPWOE_IDEA_F']
schooldf["Total Number of Students who received an expulsion"] = schooldf[expul_cols].sum(axis=1)
schooldf["Total Number of Students who received an expulsion under zero tolerance policies"] = schooldf[['TOT_DISCWODIS_EXPZT_M', 'TOT_DISCWODIS_EXPZT_F', 'TOT_DISCWDIS_EXPZT_IDEA_M', 'TOT_DISCWDIS_EXPZT_IDEA_F']].sum(axis=1)
schooldf["Total Chronic Student Absenteeism"] = schooldf[['TOT_ABSENT_M', 'TOT_ABSENT_F']].sum(axis=1)
'''
schooldf = schooldf.rename(mapper = {'SCH_FTETEACH_TOT':'# teachers',
'SCH_FTESECURITY_GUA':'# security guards',
'SCH_PSCORPINSTANCES_ALL':'# corporal punishment events preschool',
'SCH_PSOOSINSTANCES_ALL':'# suspensions preschool',
'MEMBER':'# students',
'AGE':'school age',
'PCTETH':'% nonwhites students',
'PCTFRL':'% poor students',
'DISC_STR':'Discipline emphasis',
'INQU_STR':'IBL emphasis',
'close16':'% school closures last year',
'% Total Population: White Alone':'% nonwhites (school district)',
'% Families: Income in Below Poverty Level':'% poverty',
"Total Chronic Student Absenteeism":'absenteeism rate',
"Total Number of Students who received an expulsion":'# students expelled',
"Total Number of Students who received an expulsion under zero tolerance policies":'# students expelled (ZT)'},
axis=1)
'''
#keep_keys = ['MEMBER', 'LON1516', 'LAT1516', 'FTE', 'YEAR_OPENED', 'AGE', 'YEAR_CLOSED', 'TOTETH', 'PCTETH', 'LEA_NAME', 'STATENAME', 'TOTFRL', 'ALL_RLA00PCTPROF_1415', 'ALL_MTH00PCTPROF_1415', 'LOCALE', 'PLACE', 'SCH_NAME', 'ADDRESS14', 'TITLEI', 'ESS_COUNT', 'PROG_COUNT', 'RIT_COUNT', 'ESS_STR', 'PROG_STR', 'RIT_STR', 'ESS_NORMAL', 'PROG_NORMAL', 'RIT_NORMAL']
keep_keys = ['MEMBER', 'AGE', 'PCTETH', 'PCTFRL', 'DISC_RATIO', 'INQ_RATIO','% Total Population: Nonwhite', '% Families: Income in Below Poverty Level', "% Population 25 Years and Over: Bachelor's Degree or Higher", 'Population Density (Per Sq. Mile)', 'close16']
# 'DISC_STR', 'INQU_STR', 'PROG_STR', 'ESS_STR',
'''
['CHILDPOV_S16', 'FTE',
'ALL_RLA00PCTPROF_1516', 'ALL_MTH00PCTPROF_1516', 'ess_strength_x', 'prog_strength_x', 'SD_lv_PVI_2017',
'Population Density (Per Sq. Mile)', "% Population 25 Years and Over: Bachelor's Degree",
'% Civilian Population in Labor Force 16 Years and Over: Unemployed',
'Median Household Income (In 2016 Inflation Adjusted Dollars)',
'% Total Population: Foreign Born', 'close15',
"Total Number of Students who received an expulsion under zero tolerance policies",
'SCH_FTETEACH_TOT', 'SCH_FTESECURITY_GUA'
]
'''
CRDC_keep = [
"Total Chronic Student Absenteeism", "Total Number of Students who received an expulsion"
] # 'SCH_PSCORPINSTANCES_ALL', 'SCH_PSOOSINSTANCES_ALL',
'''
['TOT_DISCWODIS_EXPZT_M', 'TOT_DISCWODIS_EXPZT_F', 'TOT_DISCWDIS_EXPZT_IDEA_M',
'TOT_DISCWDIS_EXPZT_IDEA_F', 'TOT_DISCWODIS_EXPWE_M', 'TOT_DISCWODIS_EXPWE_F', 'TOT_DISCWODIS_EXPWOE_M',
'TOT_DISCWODIS_EXPWOE_F', 'TOT_DISCWDIS_EXPWE_IDEA_M', 'TOT_DISCWDIS_EXPWE_IDEA_F',
'TOT_DISCWDIS_EXPWOE_IDEA_M', 'TOT_DISCWDIS_EXPWOE_IDEA_F', 'TOT_ABSENT_M', 'TOT_ABSENT_F']
'''
keep_keys += CRDC_keep
drop_keys = [key for key in schooldf.keys() if key not in keep_keys]
print(drop_keys)
corr_df = schooldf.drop([key for key in drop_keys], axis=1)
corr_df = corr_df.rename(mapper = {'SCH_FTETEACH_TOT':'# teachers',
'SCH_FTESECURITY_GUA':'# security guards',
'SCH_PSCORPINSTANCES_ALL':'# corporal punishment Presch',
'SCH_PSOOSINSTANCES_ALL':'# suspensions Presch',
'MEMBER':'# students',
'AGE':'school age',
'PCTETH':'% nonwhite students',
'PCTFRL':'% poor students',
'DISC_RATIO':'Discipline emphasis',
'INQ_RATIO':'IBL emphasis',
'DISC_STR':'Discipline emph -6',
'INQU_STR':'IBL emph -6',
'PROG_STR':'Prog emphasis',
'ESS_STR':'Trad emphasis',
'close16':'% school closures last year (district)',
'% Total Population: White Alone':'% white (district)',
'% Total Population: Nonwhite':'% nonwhite (district)',
'% Families: Income in Below Poverty Level':'% poverty (district)',
"% Population 25 Years and Over: Bachelor's Degree or Higher":"% higher ed (district)",
'Population Density (Per Sq. Mile)':'pop. density (district)',
"Total Chronic Student Absenteeism":'absenteeism rate',
"Total Number of Students who received an expulsion":'# students expelled',
"Total Number of Students who received an expulsion under zero tolerance policies":'# students expelled (ZT)'},
axis=1)
orderkeys = ['IBL emphasis',
'Discipline emphasis',
'% nonwhite students',
'% nonwhite (district)',
'% poverty (district)',
'% poor students',
'% school closures last year (district)',
'% higher ed (district)',
'pop. density (district)',
'# students',
'school age',
'# students expelled',
'absenteeism rate',
]
# 'IBL emph -6', 'Discipline emph -6', '# corporal punishment Presch', '# suspensions Presch', 'Prog emphasis', 'Trad emphasis',
#corr_df = corr_df.sort_values(axis=0, by=orderkeys)
corr_df = corr_df[orderkeys]
list(corr_df)
len(corr_df)
corr = corr_df.corr(method='pearson')
cmap = sns.diverging_palette(5, 250, as_cmap=True)
corr = corr.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '10pt'})\
.set_precision(2)
corr
#fig = corr.gcf()
# !pip install openpyxl
import openpyxl
corr.to_excel(dir_prefix + "Charter-school-identities/data/corrshade_100318.xlsx")
corr = corr_df.corr(method='pearson')
plt.matshow(corr_df.corr(method='pearson'))
corr = corr_df.corr(method='pearson')
heatcorr = sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
heatcorr
fig = heatcorr.get_figure()
fig.savefig(dir_prefix + "Charter-school-identities/data/heatcorr_100318.png")
schooldf["PLACE"].value_counts().plot(kind='bar')
plt.show()
grouped_place = schooldf.groupby("PLACE")
#grouped_locale.mean().sort_values(by="LOCALE",ascending=False)
grouped_place["IDLEAN"].median().plot(kind='bar')
plt.show()
corr_df = schooldf.drop(['YEAR_CLOSED', 'YEAR_OPENED', 'LATCODE', 'LONGCODE', 'ess_count', 'prog_count', 'rit_count', 'TOTETH', 'LEA_NAME', 'STATENAME', 'ADDRESS14', 'NCESSCH', 'LEAID'], axis=1)
print(corr_df.corr(method='spearman'))
#plt.scatter(schooldf.IDLEAN, schooldf.PCTETH, alpha=0.5, c="purple", marker='.')
plt.scatter(schooldf.prog_strength, schooldf.PCTETH, alpha=0.5, c="red", marker='.') #s=schooldf.MEMBER # label="% students receiving FRPL by school ideology"
plt.scatter(schooldf.ess_strength, schooldf.PCTETH, alpha=0.5, c="blue", marker='.')
#plt.plot(schooldf.prog_strength, schooldf.PCTETH, c="red") #s=schooldf.MEMBER # label="% students receiving FRPL by school ideology"
#plt.plot(schooldf.ess_strength, schooldf.PCTETH, c="blue")
plt.ylim(0.0, 1.0) ; plt.ylabel("% students free/reduced-price lunch")
plt.xlim(0.0, 1.0) ; plt.xlabel("Progressive ideology < > Traditional ideology")
plt.show()
plt.plot(np.unique(schooldf.prog_strength), np.poly1d(np.polyfit(schooldf.prog_strength, schooldf.PCTETH, 1))(np.unique(schooldf.prog_strength)))
sns.lmplot(x="prog_strength", y="PCTETH", data=schooldf)
sns_plot = ...
sns_plot.savefig(dir_prefix + "Charter-school-identities/data/plot_output.png")
```
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
import pickle
data = pd.read_csv("diabetes-pima.csv")
data.head(10)
# to check if any null value is present
data.isnull().values.any()
## checking Correlation
# to get correlations of each features in dataset
correlation_matrix = data.corr()
corr_features = correlation_matrix.index
plt.figure(figsize=(10,5))
#plottin the heat map
g=sns.heatmap(data[corr_features].corr(),annot=True)
data.corr()
data.head(5)
diabetes_true_count = len(data.loc[data['Outcome'] == True])
diabetes_false_count = len(data.loc[data['Outcome'] == False])
(diabetes_true_count,diabetes_false_count)
dataX = data.iloc[:,[1, 4, 5, 7]].values # since the correlation of BMI,Age, Insulin and Diabetic Conc. has higher correlation with the outcome
dataY = data.iloc[:,8].values
dataX
dataY
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0,1))
dataset_scaled = sc.fit_transform(dataX)
dataset_scaled = pd.DataFrame(dataset_scaled)
X = dataset_scaled
X
Y=dataY
Y
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 45, stratify = data['Outcome'] )
from sklearn.preprocessing import Imputer
fill_values = Imputer(missing_values=0, strategy="mean", axis=0)
X_train = fill_values.fit_transform(X_train)
X_test = fill_values.fit_transform(X_test)
X_train
X_test
import xgboost
classifier=xgboost.XGBClassifier()
classifier=xgboost.XGBClassifier(base_score=0.5, booster=None, colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.3, gamma=0.4, gpu_id=-1,
importance_type='gain', interaction_constraints=None,
learning_rate=0.04, max_delta_step=0, max_depth=3,
min_child_weight=5, missing=0, monotone_constraints=None,
n_estimators=100, n_jobs=0, num_parallel_tree=1,
objective='binary:logistic', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method=None,
validate_parameters=False, verbosity=None)
classifier.fit(X_train,Y_train)
y_pred=classifier.predict(X_test)
y_pred
from sklearn.metrics import confusion_matrix, accuracy_score
cm=confusion_matrix(Y_test,y_pred)
score=accuracy_score(Y_test,y_pred)
cm
score
pickle.dump(classifier, open('model.pkl','wb'))
model = pickle.load(open('model.pkl','rb'))
```
| github_jupyter |
This notebook can be run on mybinder: [](https://mybinder.org/v2/git/https%3A%2F%2Fgricad-gitlab.univ-grenoble-alpes.fr%2Fai-courses%2Fautonomous_systems_ml/HEAD?filepath=notebooks%2F4_discriminant_analysis)
*Taken from scikit-learn example*
```
%matplotlib inline
```
# Linear and Quadratic Discriminant Analysis with covariance ellipsoid
This example plots the covariance ellipsoids of each class and
decision boundary learned by LDA and QDA. The ellipsoids display
the double standard deviation for each class. With LDA, the
standard deviation is the same for all the classes, while each
class has its own standard deviation with QDA model.
```
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# #############################################################################
# Colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
# #############################################################################
# Generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
# #############################################################################
# Plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with\n fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with\n varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
# class 0: dots
plt.scatter(X0_tp[:, 0], X0_tp[:, 1], marker='.', color='red')
plt.scatter(X0_fp[:, 0], X0_fp[:, 1], marker='x',
s=20, color='#990000') # dark red
# class 1: dots
plt.scatter(X1_tp[:, 0], X1_tp[:, 1], marker='.', color='blue')
plt.scatter(X1_fp[:, 0], X1_fp[:, 1], marker='x',
s=20, color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.), zorder=0)
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='white')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'*', color='yellow', markersize=15, markeredgecolor='grey')
plt.plot(lda.means_[1][0], lda.means_[1][1],
'*', color='yellow', markersize=15, markeredgecolor='grey')
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color,
edgecolor='black', linewidth=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.2)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariance_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariance_[1], 'blue')
plt.figure(figsize=(10, 8), facecolor='white')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis',
y=0.98, fontsize=15)
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariance=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.tight_layout()
plt.subplots_adjust(top=0.92)
plt.show()
```
## Exercise:
- Which model (LDA or QDA) should be preferred in the first example (first row)?
- What kind of functions define the decision boudary for QDA in the second example (second row)? Is this boudary in agreement with the data?
| github_jupyter |
# Lesson 2 Demo 3: Creating Fact and Dimension Tables with Star Schema
### Walk through the basics of modeling data using Fact and Dimension tables. In this demo, we will:<br>
<ol><li>Create both Fact and Dimension tables<li>Show how this is a basic element of the Star Schema.
### Import the library
Note: An error might popup after this command has executed. If it does, read it carefully before ignoring.
```
import psycopg2
```
### Create a connection to the database
```
try:
conn = psycopg2.connect(user = "postgres", password = "1234", host = "127.0.0.1",
port = "5432")
except psycopg2.Error as e:
print("Error: Could not make connection to the Postgres database")
print(e)
```
### Next use that connection to get a cursor that we will use to execute queries.
```
try:
cur = conn.cursor()
except psycopg2.Error as e:
print("Error: Could not get curser to the Database")
print(e)
```
### For this demo we will use automatic commit so that each action is commited without having to call conn.commit() after each command. The ability to rollback and commit transactions are a feature of Relational Databases.
```
conn.set_session(autocommit=True)
```
### Let's imagine we work at an online Music Store. There will be many tables in our database but let's just focus on 4 tables around customer purchases.
`Table Name: customer_transactions
column: Customer Id
column: Store Id
column: Spent`
`Table Name: Customer
column: Customer Id
column: Name
column: Rewards`
`Table Name: store
column: Store Id
column: State`
`Table Name: items_purchased
column: customer id
column: Item Name`
<img src="./images/starSchema.png" width="750" height="750">
#### From this representation we can already start to see the makings of a "STAR". We have one fact table (the center of the star) and 3 dimension tables that are coming from it.
### Let's create the Fact Table and insert the data into the table
```
try:
cur.execute("CREATE TABLE IF NOT EXISTS customer_transactions (customer_id int, store_id int, spent numeric);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
#Insert into all tables
try:
cur.execute("INSERT INTO customer_transactions (customer_id, store_id, spent) \
VALUES (%s, %s, %s)", \
(1, 1, 20.50))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO customer_transactions (customer_id, store_id, spent) \
VALUES (%s, %s, %s)", \
(2, 1, 35.21))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
```
### Let's create our Dimension Tables and insert data into those tables.
```
try:
cur.execute("CREATE TABLE IF NOT EXISTS items_purchased (customer_id int, item_number int, item_name varchar);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO items_purchased (customer_id, item_number, item_name) \
VALUES (%s, %s, %s)", \
(1, 1, "Rubber Soul"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO items_purchased (customer_id, item_number, item_name) \
VALUES (%s, %s, %s)", \
(2, 3, "Let It Be"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("CREATE TABLE IF NOT EXISTS store (store_id int, state varchar);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO store (store_id, state) \
VALUES (%s, %s)", \
(1, "CA"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO store (store_id, state) \
VALUES (%s, %s)", \
(2, "WA"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("CREATE TABLE IF NOT EXISTS customer (customer_id int, name varchar, rewards boolean);")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("INSERT INTO customer (customer_id, name, rewards) \
VALUES (%s, %s, %s)", \
(1, "Amanda", True))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO customer (customer_id, name, rewards) \
VALUES (%s, %s, %s)", \
(2, "Toby", False))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
```
**We can do a variety of queries on this data easily because of utilizing the fact/dimension and Star Schema**
* _Query 1_: Find all the customers that spent more than 30 dollars, who are they, what did they buy and if they are a rewards member
* _Query 2_: How much did Store 1 sell?
_Query 1:_ Find all the customers that spent more than 30 dollars, who are they, what did they buy and if they are a rewards member
```
try:
cur.execute("SELECT name, item_name, rewards FROM ((customer_transactions \
JOIN customer ON customer.customer_id=customer_transactions.customer_id)\
JOIN items_purchased ON \
customer_transactions.customer_id=items_purchased.customer_id)\
WHERE spent > 30 ;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
```
_Query 2:_ How much did Store 1 sell?
```
try:
cur.execute("SELECT store_id, SUM(spent) FROM customer_transactions GROUP BY store_id;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
```
### Summary: What you can see here is from this elegant schema we were able to get "facts/metrics" from our fact table (how much each store sold), and also information about our customers that will allow us to do more indepth analytics to get answers to business questions by utilizing our fact and dimension tables.
### For the sake of the demo, I will drop the table.
```
try:
cur.execute("DROP table customer_transactions")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table items_purchased")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table customer")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table store")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
```
### And finally close your cursor and connection.
```
cur.close()
conn.close()
```
| github_jupyter |
```
import random
import torch.nn as nn
import torch
import time
import math
import pickle
import pandas as pd
from pandas import Series, DataFrame
from pandarallel import pandarallel
pandarallel.initialize(progress_bar=True)
import sys
import json
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import svm
from sklearn.metrics import roc_auc_score, roc_curve, accuracy_score, matthews_corrcoef, f1_score, precision_score, recall_score
import random
import pickle
from rdkit.Chem import rdchem, Lipinski
from rdkit import Chem
from rdkit.Chem.rdmolfiles import MolFromFASTA, MolToSmiles, MolFromSmiles
from sklearn.model_selection import GridSearchCV
import numpy as np
import torch.optim as optim
folder = "/data/AIpep-clean/"
import matplotlib.pyplot as plt
from Levenshtein import distance as lev_dist
from models import Classifier
import tmap as tm
from map4 import MAP4Calculator
import os
import pandas as pd
```
# Load Classifiers
```
n_embedding = 100
n_hidden = 400
n_layers = 2
epoch = 38
filename = folder + "models/RNN-classifier/em{}_hi{}_la{}_ep{}".format(n_embedding, n_hidden, n_layers, epoch)
model_activity = Classifier.load_from_file(filename)
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
model_activity.to(device)
n_embedding = 100
n_hidden = 400
n_layers = 1
epoch = 95
filename = folder + "models/RNN-classifier-hem/em{}_hi{}_la{}_ep{}".format(n_embedding, n_hidden, n_layers, epoch)
model_hemolysis = Classifier.load_from_file(filename)
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
model_hemolysis.to(device)
```
# Load data
```
if not os.path.exists(folder + "pickles/all_sequences-hem.pkl"):
df_training_test = pd.read_pickle(folder+"pickles/DAASP_RNN_dataset_with_hem_and_prediction_hem.plk")
df_generated = pd.read_pickle(folder+"pickles/Generated.pkl")
df_generated_tl_pos = pd.read_pickle(folder+"pickles/Generated-TL-grampos-hem.pkl")
df_generated_tl_neg = pd.read_pickle(folder+"pickles/Generated-TL-gramneg-hem.pkl")
df_generated_all = pd.concat([df_generated, df_generated_tl_pos, df_generated_tl_neg])
df_generated_all["prediction"] = df_generated_all.Sequence.map(lambda x: model_activity.predict_peptide_sequence(x)[:,1][0])
df_generated_all["isPredActive"] = df_generated_all["prediction"] > 0.99205756
df_generated_all["prediction_hem"] = df_generated_all.Sequence.map(lambda x: model_hemolysis.predict_peptide_sequence(x)[:,1][0])
df_generated_all["isPredNotHemolytic"] = df_generated_all["prediction_hem"] > 0.99981695
df_all = pd.concat([df_training_test, df_generated_all])
count_neg = 0
count_pos = 0
count = 0
def make_id(row):
global count_neg
global count_pos
global count
if row.Set == "generated-TL-GN-hem":
count_neg += 1
return f"gen_gramneg_{count_neg}"
elif row.Set == "generated-TL-GP-hem":
count_pos += 1
return f"gen_grampos_{count_pos}"
elif row.Set == "generated":
count += 1
return f"gen_{count}"
else:
return row.ID
new_ids = df_all.apply(make_id, axis=1)
df_all["ID"] = new_ids
df_all = df_all.reset_index(drop=True)
df_all.to_pickle(folder+"pickles/all_sequences-hem.pkl")
else:
df_all = pd.read_pickle(folder+"pickles/all_sequences-hem.pkl")
df_training_test_hem = pd.read_pickle(folder+"pickles/DAASP_RNN_dataset_with_hem_and_prediction_hem.plk")
df_training_test_act = pd.read_pickle(folder+"pickles/DAASP_RNN_dataset_with_prediction.plk")
len(df_all.query("activity == 0"))
```
# Find NN
```
def find_seqNN(seq, dataframe):
best_dist = float("inf")
dists = dataframe["Sequence"].map(lambda seq2 : lev_dist(seq,seq2))
NNi = np.argmin(dists)
best_dist = dists.iloc[NNi]
NN = dataframe["Sequence"].iloc[NNi]
return best_dist, NN
def seq_to_smiles(seq):
mol = MolFromFASTA(seq, flavor=True, sanitize = True)
smiles = MolToSmiles(mol, isomericSmiles=True)
return smiles
MAP4 = MAP4Calculator(dimensions=1024)
def calc_map4(smiles):
mol = Chem.MolFromSmiles(smiles)
map4 = MAP4.calculate(mol)
return np.array(map4)
def distance(a, b):
"""Estimates the Jaccard distance of two binary arrays based on their hashes.
Arguments:
a {numpy.ndarray} -- An array containing hash values.
b {numpy.ndarray} -- An array containing hash values.
Returns:
float -- The estimated Jaccard distance.
"""
# The Jaccard distance of Minhashed values is estimated by
return 1.0 - np.float(np.count_nonzero(a == b)) / np.float(len(a))
def find_map_seqNN(fp, dataframe):
best_dist = float("inf")
dists = dataframe["MAP4"].map(lambda fp2 : distance(fp,fp2))
NNi = np.argmin(dists)
best_dist = dists.iloc[NNi]
NN = dataframe["Sequence"].iloc[NNi]
return best_dist, NN
if not os.path.exists(folder+"pickles/all_sequences_with_NN_hem.pkl"):
df_all["dist-NN-Training_hem"] = df_all["Sequence"].parallel_map(lambda x: find_seqNN(x, df_training_test_hem[df_training_test_hem["Set"]=="training"]))
df_all["dist-NN-Test_hem"] = df_all["Sequence"].parallel_map(lambda x: find_seqNN(x, df_training_test_hem[df_training_test_hem["Set"]=="test"]))
df_all["dist_Training_hem"] = df_all["dist-NN-Training_hem"].map(lambda x: x[0])
df_all["NN_Training_hem"] = df_all["dist-NN-Training_hem"].map(lambda x: x[1])
df_all["dist_Test_hem"] = df_all["dist-NN-Test_hem_hem"].map(lambda x: x[0])
df_all["NN_Test_hem"] = df_all["dist-NN-Test_hem"].map(lambda x: x[1])
del df_all["dist-NN-Training_hem"]
del df_all["dist-NN-Test_hem"]
df_all["dist-NN-Training_act"] = df_all["Sequence"].parallel_map(lambda x: find_seqNN(x, df_training_test_act[df_training_test_act["Set"]=="training"]))
df_all["dist-NN-Test_act"] = df_all["Sequence"].parallel_map(lambda x: find_seqNN(x, df_training_test_act[df_training_test_act["Set"]=="test"]))
df_all["dist_Training_act"] = df_all["dist-NN-Training_act"].map(lambda x: x[0])
df_all["NN_Training_act"] = df_all["dist-NN-Training_act"].map(lambda x: x[1])
df_all["dist_Test_act"] = df_all["dist-NN-Test_act"].map(lambda x: x[0])
df_all["NN_Test_act"] = df_all["dist-NN-Test_act"].map(lambda x: x[1])
del df_all["dist-NN-Training_act"]
del df_all["dist-NN-Test_act"]
df_all["SMILES"] = df_all.Sequence.parallel_map(seq_to_smiles)
df_all["MAP4"] = df_all.SMILES.parallel_map(calc_map4)
df_all["map-dist-NN-Training_act"] = df_all["MAP4"].parallel_map(lambda x: find_map_seqNN(x, df_all[df_all["Set"]=="training"]))
df_all["map-dist-NN-Test_act"] = df_all["MAP4"].parallel_map(lambda x: find_map_seqNN(x, df_all[df_all["Set"]=="test"]))
df_all["map_dist_Training_act"] = df_all["map-dist-NN-Training_act"].map(lambda x: x[0])
df_all["map_NN_Training_act"] = df_all["map-dist-NN-Training_act"].map(lambda x: x[1])
df_all["map_dist_Test_act"] = df_all["map-dist-NN-Test_act"].map(lambda x: x[0])
df_all["map_NN_Test_act"] = df_all["map-dist-NN-Test_act"].map(lambda x: x[1])
del df_all["map-dist-NN-Training"]
del df_all["map-dist-NN-Test"]
df_all.to_pickle(folder+"pickles/all_sequences_with_NN_hem.pkl")
else:
df_all = pd.read_pickle(folder+"pickles/all_sequences_with_NN_hem.pkl")
```
# Calculate properties
```
def calc_neg(seq):
seq = seq.upper()
neg = (seq.count('D') + seq.count('E'))
return neg
def calc_pos(seq):
seq = seq.upper()
pos = (seq.count('K') + seq.count('R'))
return pos
def calc_aa(seq, aa):
seq = seq.upper()
aa_f = seq.count(aa)/len(seq)
return aa_f
def calc_hac(smiles):
mol = MolFromSmiles(smiles)
hac = Lipinski.HeavyAtomCount(mol)
return hac
def calc_hydr(seq):
hydr = (seq.count('A') + seq.count('L') + seq.count('I') + seq.count('L') \
+ seq.count('V') + seq.count('M') + seq.count('F') + seq.count('C'))
return hydr
def hydropatch(seq):
seq = seq.upper()
hydro = ["A", "L", "I", "V", "M", "F", "C"]
patch = ""
patches = []
for aa in seq:
if aa in hydro:
patch+=aa
else:
if patch != "":
patches.append(len(patch))
patch=""
if patch != "":
patches.append(len(patch))
return np.array(patches)
def calc_hba(smiles):
mol = MolFromSmiles(smiles)
hba = Lipinski.NumHAcceptors(mol)
return hba
def calc_hbd(smiles):
mol = MolFromSmiles(smiles)
hbd = Lipinski.NumHDonors(mol)
return hbd
def mean(patches):
if len(patches) == 0:
return 0
return round(patches.mean(),2)
d_aminoacids = ["a","c","d","e","f","g","h","i","l","m","n","p","k","q","r","s","t","v","w","y"]
def d_aa(seq):
for aa in d_aminoacids:
if aa in seq:
return True
return False
#!/usr/bin/env python
"""
Calculates a set of properties from a protein sequence:
- hydrophobicity (according to a particular scale)
- mean hydrophobic dipole moment assuming it is an alpha-helix.
- total charge (at pH 7.4)
- amino acid composition
- discimination factor according to Rob Keller (IJMS, 2011)
Essentially the same as HeliQuest (reproduces the same values).
Author:
Joao Rodrigues
j.p.g.l.m.rodrigues@gmail.com
"""
from __future__ import print_function
import argparse
import csv
import math
import os
import time
#
# Definitions
#
scales = {'Fauchere-Pliska': {'A': 0.31, 'R': -1.01, 'N': -0.60,
'D': -0.77, 'C': 1.54, 'Q': -0.22,
'E': -0.64, 'G': 0.00, 'H': 0.13,
'I': 1.80, 'L': 1.70, 'K': -0.99,
'M': 1.23, 'F': 1.79, 'P': 0.72,
'S': -0.04, 'T': 0.26, 'W': 2.25,
'Y': 0.96, 'V': 1.22},
'Eisenberg': {'A': 0.25, 'R': -1.80, 'N': -0.64,
'D': -0.72, 'C': 0.04, 'Q': -0.69,
'E': -0.62, 'G': 0.16, 'H': -0.40,
'I': 0.73, 'L': 0.53, 'K': -1.10,
'M': 0.26, 'F': 0.61, 'P': -0.07,
'S': -0.26, 'T': -0.18, 'W': 0.37,
'Y': 0.02, 'V': 0.54},
}
_supported_scales = list(scales.keys())
aa_charge = {'E': -1, 'D': -1, 'K': 1, 'R': 1}
#
# Functions
#
def assign_hydrophobicity(sequence, scale='Fauchere-Pliska'): # noqa: E302
"""Assigns a hydrophobicity value to each amino acid in the sequence"""
hscale = scales.get(scale, None)
if not hscale:
raise KeyError('{} is not a supported scale. '.format(scale))
hvalues = []
for aa in sequence:
sc_hydrophobicity = hscale.get(aa, None)
if sc_hydrophobicity is None:
raise KeyError('Amino acid not defined in scale: {}'.format(aa))
hvalues.append(sc_hydrophobicity)
return hvalues
def calculate_moment(array, angle=100):
"""Calculates the hydrophobic dipole moment from an array of hydrophobicity
values. Formula defined by Eisenberg, 1982 (Nature). Returns the average
moment (normalized by sequence length)
uH = sqrt(sum(Hi cos(i*d))**2 + sum(Hi sin(i*d))**2),
where i is the amino acid index and d (delta) is an angular value in
degrees (100 for alpha-helix, 180 for beta-sheet).
"""
sum_cos, sum_sin = 0.0, 0.0
for i, hv in enumerate(array):
rad_inc = ((i*angle)*math.pi)/180.0
sum_cos += hv * math.cos(rad_inc)
sum_sin += hv * math.sin(rad_inc)
if len(array) != 0:
return math.sqrt(sum_cos**2 + sum_sin**2) / len(array)
else:
print(array)
return 0
def calculate_charge(sequence, charge_dict=aa_charge):
"""Calculates the charge of the peptide sequence at pH 7.4
"""
sc_charges = [charge_dict.get(aa, 0) for aa in sequence]
return sum(sc_charges)
def calculate_discrimination(mean_uH, total_charge):
"""Returns a discrimination factor according to Rob Keller (IJMS, 2011)
A sequence with d>0.68 can be considered a potential lipid-binding region.
"""
d = 0.944*mean_uH + 0.33*total_charge
return d
def calculate_composition(sequence):
"""Returns a dictionary with percentages per classes"""
# Residue character table
polar_aa = set(('S', 'T', 'N', 'H', 'Q', 'G'))
speci_aa = set(('P', 'C'))
apolar_aa = set(('A', 'L', 'V', 'I', 'M'))
charged_aa = set(('E', 'D', 'K', 'R'))
aromatic_aa = set(('W', 'Y', 'F'))
n_p, n_s, n_a, n_ar, n_c = 0, 0, 0, 0, 0
for aa in sequence:
if aa in polar_aa:
n_p += 1
elif aa in speci_aa:
n_s += 1
elif aa in apolar_aa:
n_a += 1
elif aa in charged_aa:
n_c += 1
elif aa in aromatic_aa:
n_ar += 1
return {'polar': n_p, 'special': n_s,
'apolar': n_a, 'charged': n_c, 'aromatic': n_ar}
def analyze_sequence(name=None, sequence=None, window=18, verbose=False):
"""Runs all the above on a sequence. Pretty prints the results"""
w = window
outdata = [] # for csv writing
# Processing...
seq_len = len(sequence)
print('[+] Analysing sequence {} ({} aa.)'.format(name, seq_len))
print('[+] Using a window of {} aa.'.format(w))
for seq_range in range(0, seq_len):
seq_w = sequence[seq_range:seq_range+w]
if seq_range and len(seq_w) < w:
break
# Numerical values
z = calculate_charge(seq_w)
seq_h = assign_hydrophobicity(seq_w)
av_h = sum(seq_h)/len(seq_h)
av_uH = calculate_moment(seq_h)
d = calculate_discrimination(av_uH, z)
# AA composition
aa_comp = calculate_composition(seq_w)
n_tot_pol = aa_comp['polar'] + aa_comp['charged']
n_tot_apol = aa_comp['apolar'] + aa_comp['aromatic'] + aa_comp['special'] # noqa: E501
n_charged = aa_comp['charged'] # noqa: E501
n_aromatic = aa_comp['aromatic'] # noqa: E501
_t = [name, sequence, seq_range+1, w, seq_w, z, av_h, av_uH, d,
n_tot_pol, n_tot_apol, n_charged, n_aromatic]
outdata.append(_t)
if verbose:
print(' Window {}: {}-{}-{}'.format(seq_range+1, seq_range,
seq_w, seq_range+w))
print(' z={:<3d} <H>={:4.3f} <uH>={:4.3f} D={:4.3f}'.format(z, av_h, # noqa: E501
av_uH, d)) # noqa: E501
print(' Amino acid composition')
print(' Polar : {:3d} / {:3.2f}%'.format(n_tot_pol, n_tot_pol*100/w)) # noqa: E501
print(' Non-Polar: {:3d} / {:3.2f}%'.format(n_tot_apol, n_tot_apol*100/w)) # noqa: E501
print(' Charged : {:3d} / {:3.2f}%'.format(n_charged, n_charged*100/w)) # noqa: E501
print(' Aromatic : {:3d} / {:3.2f}%'.format(n_aromatic, n_aromatic*100/w)) # noqa: E501
print()
return outdata
def read_fasta_file(afile):
"""Parses a file with FASTA formatted sequences"""
if not os.path.isfile(afile):
raise IOError('File not found/readable: {}'.format(afile))
sequences = []
seq_name, cur_seq = None, None
with open(afile) as handle:
for line in handle:
line = line.strip()
if line.startswith('>'):
if cur_seq:
sequences.append((seq_name, ''.join(cur_seq)))
seq_name = line[1:]
cur_seq = []
elif line:
cur_seq.append(line)
sequences.append((seq_name, ''.join(cur_seq))) # last seq
return sequences
def hydr_moment(seq):
seq = seq.upper()
hdr = assign_hydrophobicity(seq,"Eisenberg")
return calculate_moment(hdr)
if not os.path.exists(folder+"pickles/all_sequences_with_NN_prop-hem.pkl"):
df_all["length"] = df_all.Sequence.map(len)
df_all = df_all.query("length>1")
df_all["D_AA"] = df_all.Sequence.map(d_aa)
aminoacids = ["A","C","D","E","F","G","H","I","L","M","N","P","K","Q","R","S","T","V","W","Y"]
for aa in aminoacids:
df_all[f"{aa}_fract"] = df_all.Sequence.map(lambda x: calc_aa(x, aa))
df_all["positive"] = df_all.Sequence.parallel_map(calc_pos)
df_all["negative"] = df_all.Sequence.parallel_map(calc_neg)
df_all["HAC"] = df_all.SMILES.parallel_map(calc_hac)
df_all["HBA"] = df_all.SMILES.parallel_map(calc_hba)
df_all["HBD"] = df_all.SMILES.parallel_map(calc_hbd)
df_all["hydrophobic"] = df_all.Sequence.parallel_map(calc_hydr)
df_all["hydrophobic_patches"] = df_all.Sequence.parallel_map(hydropatch)
df_all["hydrophobic_patches_num"] = df_all.hydrophobic_patches.map(len)
df_all["hydrophobic_patches_len"] = df_all.hydrophobic_patches.map(mean)
df_all["hydro_res_fract"] = df_all.apply(lambda x: x.hydrophobic / x.length, axis=1)
df_all["pos_res_fract"] = df_all.apply(lambda x: x.positive / x.length, axis=1)
df_all["HydroMoment"] = df_all.Sequence.map(hydr_moment)
df_all["charge"] = df_all["Sequence"].map(lambda x: calculate_charge(x.upper()))
df_all["hydrophobicity"] = df_all["Sequence"].map(lambda x: assign_hydrophobicity(x.upper()))
df_all["av_hydrophobicity"] = df_all["hydrophobicity"].map(lambda x: sum(x)/len(x))
df_all["discrimination"] = df_all.apply(lambda x: calculate_discrimination(x.HydroMoment, x.charge), axis=1)
df_all.to_pickle(folder+"pickles/all_sequences_with_NN_prop-hem.pkl")
else:
df_all = pd.read_pickle(folder+"pickles/all_sequences_with_NN_prop-hem.pkl")
df_training_test_all = pd.read_pickle(folder+"pickles/DAASP_RNN_dataset_with_hemolysis.plk")
df_training_test_all["HydroMoment"] = df_training_test_all.Sequence.map(hydr_moment)
```
# SPIDER helicity prediction
```
def row_to_fasta(row):
seq = row["Sequence"]
ID = row["Sequence"]
l = 40
fasta_seq = seq
fasta = ">{}\n{}".format(ID,fasta_seq)
return fasta
def fastafile(row, folder="/data/AIpep/spiderData/"):
fasta = row["fasta"]
fasta = fasta.upper()
ID = str(row["ID"])
name = folder+ID+".seq"
with open(name, "w") as output:
output.write(fasta)
def filename(row, folder="/data/AIpep/spiderData/"):
ID = str(row["ID"])
name = ID+".seq"
return name
def fileloc(row, folder="/data/AIpep/spiderData/"):
ID = str(row["ID"])
name = folder+ID+".seq"
return name
def read_spider(row, folder="/data/AIpep/spider3/"):
ss = []
ID = str(row["ID"])
name = ID+".seq.i2"
with open(folder+name) as infile:
for line in infile:
line = line.strip()
line = line.split(" ")
ss.append(line[2])
return ss[1:]
def count_ss(ss, pred = "H"):
return ss.count(pred)
def fract_ss(ss, pred = "H"):
if len(ss)!=0:
return ss.count(pred)/len(ss)
else:
return 0
df_all["fasta"] = df_all.apply(row_to_fasta, axis = 1)
df_all.apply(fastafile, axis=1)
df_all["SpiderFilename"] = df_all.apply(filename, axis=1)
df_all["SpiderFileloc"] = df_all.apply(fileloc, axis=1)
df_all[["SpiderFilename", "SpiderFileloc"]].to_csv("SPIDER3-Single_np/file_list-hem", header=False, index=False, sep=' ')
df_training_test_all["fasta"] = df_training_test_all.apply(row_to_fasta, axis = 1)
df_training_test_all.apply(fastafile, axis=1)
df_training_test_all["SpiderFilename"] = df_training_test_all.apply(filename, axis=1)
df_training_test_all["SpiderFileloc"] = df_training_test_all.apply(fileloc, axis=1)
df_training_test_all[["SpiderFilename", "SpiderFileloc"]].to_csv("../AIpep/SPIDER3-Single_np/file_list-hem_alltraining", header=False, index=False, sep=' ')
len(df_training_test_all)
df_all.columns
```
### Run SPIDER
%%bash
./SPIDER3-Single_np/impute_script_np.sh
```
df_all["SS"] = df_all.apply(read_spider, axis=1)
df_all["countH"] = df_all.SS.map(count_ss)
df_all["fraction_PredHelical"] = df_all.SS.map(fract_ss)
df_all["fraction_PredBetaSheet"] = df_all.SS.map(lambda x : fract_ss(x, "E"))
df_all["fraction_PredCoil"] = df_all.SS.map(lambda x : fract_ss(x, "C"))
df_training_test_all["SS"] = df_training_test_all.apply(read_spider, axis=1)
df_training_test_all["countH"] = df_training_test_all.SS.map(count_ss)
df_training_test_all["fraction_PredHelical"] = df_training_test_all.SS.map(fract_ss)
df_training_test_all["fraction_PredBetaSheet"] = df_training_test_all.SS.map(lambda x : fract_ss(x, "E"))
df_training_test_all["fraction_PredCoil"] = df_training_test_all.SS.map(lambda x : fract_ss(x, "C"))
df_all
df_all.to_pickle(folder+"pickles/all_sequences_with_NN_prop_helicity-hem.pkl")
```
| github_jupyter |
# Web Track Overview
```
import pandas as pd
import numpy as np
import seaborn as sb
def cc_15_jsonl(f):
prefix = '/mnt/ceph/storage/data-in-progress/kibi9872/sigir2021/data-13-10-2020/cc15-relevance-transfer/'
threshold = 0.82
df = pd.read_json(prefix + f, lines=True)
df['urlMatches'] = df['urlMatches'] > 0
df['urlMatchAndNearDuplicate'] = (df['urlMatches'] > 0) & (df['urlMaxS3Score'] >= threshold)
df['nearDuplicate'] = df['maxNearDuplicateS3Score'] >= threshold
df['nearDuplicateOrUrlDupl'] = (df['maxNearDuplicateS3Score'] >= threshold) | (df['urlMatchAndNearDuplicate'] == True)
df['inCC15'] = (df['nearDuplicateOrUrlDupl'] == True) | (df['maxNearDuplicateS3ScoreDuplicateAware'] >= threshold)
return df
df_2009 = cc_15_jsonl('web-2009.jsonl')
df_2009['corpus'] = '2009'
df_2010 = cc_15_jsonl('web-2010.jsonl')
df_2010['corpus'] = '2010'
df_2011 = cc_15_jsonl('web-2011.jsonl')
df_2011['corpus'] = '2011'
df_2012 = cc_15_jsonl('web-2012.jsonl')
df_2012['corpus'] = '2012'
df_2013 = cc_15_jsonl('web-2013.jsonl')
df_2013['corpus'] = '2013'
df_2014 = cc_15_jsonl('web-2014.jsonl')
df_2014['corpus'] = '2014'
cc_15_df = pd.concat([df_2009, df_2010, df_2011, df_2012, df_2013, df_2014])
cc_15_df = cc_15_df[cc_15_df['duplicate'] == False]
def cw_jsonl(f):
prefix = '/mnt/ceph/storage/data-in-progress/kibi9872/sigir2021/data-15-10-2020/'
df = pd.read_json(prefix + f, lines=True)
threshold = 0.82
df['inCw12'] = df['cw12Matches'] > 0
df['inWayback'] = (df['cw12Matches'] > 0) | (df['matches'] > 0)
df['inCW12AndNearDuplicate'] = (df['cw12Matches'] > 0) & (df['cw12UrlMaxS3Score'] >= threshold)
df['nearDuplicate'] = df['cw12MaxNearDuplicateS3Score'] >= threshold
df['nearDuplicateOrUrlDupl'] = (df['cw12MaxNearDuplicateS3Score'] >= threshold) | (df['inCW12AndNearDuplicate'] == True)
df['inCw12'] = (df['nearDuplicateOrUrlDupl'] == True) | (df['cw12MaxNearDuplicateS3ScoreDuplicateAware'] >= threshold)
df['inWaybackAndNearDuplicate'] = ((df['matches'] > 0) | (df['non200Matches'] > 0)) & ((df['waybackMachineS3Score'] >= threshold) | (df['waybackMachineS3ScoreDuplicateAware'] >= threshold))
df['inCw12Wb12'] = (df['inWaybackAndNearDuplicate'] == True) | (df['inCw12'] == True)
return df
df_2009 = cw_jsonl('web-2009.jsonl')
df_2009['corpus'] = '2009'
df_2010 = cw_jsonl('web-2010.jsonl')
df_2010['corpus'] = '2010'
df_2011 = cw_jsonl('web-2011.jsonl')
df_2011['corpus'] = '2011'
df_2012 = cw_jsonl('web-2012.jsonl')
df_2012['corpus'] = '2012'
df_cw12 = pd.concat([df_2009, df_2010, df_2011, df_2012])
df_cw12 = df_cw12[df_cw12['duplicate'] == False]
cc_15_df
df_treck_overview = cc_15_df.groupby(['corpus', 'topic']).aggregate(
documents=('topic', 'count'),
relevant=('relevant','sum')
).reset_index().groupby('corpus').aggregate(
meanDocuments=('documents', np.mean),
stdDocuments=('documents', np.std),
meanRelevancy=('relevant', np.mean),
stdRelevancy=('relevant', np.std)
).reset_index()
df_treck_overview
cc_15_url_agg = cc_15_df.groupby(['corpus', 'topic']).aggregate(
documents=('topic', 'count'),
urlMatches=('urlMatches','sum')
).reset_index().groupby('corpus').aggregate(
meanUrlMatches=('urlMatches', np.mean),
stdUrlMatches=('urlMatches', np.std)
).reset_index()
cc_15_url_agg
cw12_url_agg = df_cw12.groupby(['corpus', 'topic']).aggregate(
documents=('topic', 'count'),
urlMatches=('inCw12','sum'),
urlMatchesIncludingWayback=('inWayback','sum')
).reset_index().groupby('corpus').aggregate(
meanUrlMatches=('urlMatches', np.mean),
stdUrlMatches=('urlMatches', np.std),
meanUrlMatchesIncludingWayback=('urlMatchesIncludingWayback', np.mean),
stdUrlMatchesIncludingWayback=('urlMatchesIncludingWayback', np.std),
).reset_index()
cw12_url_agg
def cc15(year: str):
i = cc_15_url_agg[cc_15_url_agg['corpus'] == year].iloc[0]
return '{:.1f}'.format(i['meanUrlMatches']) + '$_{(' + '{:.1f}'.format(i['stdUrlMatches']) + ')}$'
def cw12(year: str):
if year in ['2013', '2014']:
return '& -- & -- '
i = cw12_url_agg[cw12_url_agg['corpus'] == year].iloc[0]
return '& ' + '{:.1f}'.format(i['meanUrlMatches']) + '$_{(' + '{:.1f}'.format(i['stdUrlMatches']) + ')}$ & ' +\
'{:.1f}'.format(i['meanUrlMatchesIncludingWayback']) + '$_{(' + '{:.1f}'.format(i['stdUrlMatchesIncludingWayback']) + ')}$'
def judgments(year: str):
i = df_treck_overview[df_treck_overview['corpus'] == year].iloc[0]
return '{:.1f}'.format(i['meanDocuments']) + '$_{(' + '{:.1f}'.format(i['stdDocuments']) + ')}$' + ' & ' + \
'{:.1f}'.format(i['meanRelevancy']) + '$_{(' + '{:.1f}'.format(i['stdRelevancy']) + ')}$'
def row(year: str):
return judgments(year) + cw12(year) + ' & ' + cc15(year)
def create_table_web_track_overview():
return """\\begin{table}
\\centering
\\small
\\setlength{\\tabcolsep}{3pt}%
\\caption{{\\color{red} Add distribution relevant vs. irrelevant.}Overview of relevance assesments of the web tracks and duplicates.}
\\label{table-web-track-overview}
%\\vspace*{-0.3cm}
\\begin{tabular}{cl@{\\hspace{0.15em}}ccccccc}
\\toprule
\\multicolumn{2}{l@{}}{\\bfseries Track} & \\multicolumn{4}{c@{}}{\\bfseries Relevance Assessments} & \\multicolumn{3}{c@{}}{\\bfseries Existing URLs with Judgment} \\\\
\\cmidrule{3-6}
\\cmidrule(l@{1em}){7-9}
& & Topics & \\#Runs & Judgments & \\#Relevant & CW12 & CW12$^{\\dagger}$ & CC15\\\\
\\midrule
\\parbox[t]{4mm}{\\multirow{4}{*}{\\rotatebox[origin=c]{90}{\\tiny CW09 \\kern-0.6em}}}
& 2009 & 50 & 74 & """ + row('2009') + """ \\\\
& 2010 & 48 & 59 & """ + row('2010') + """ \\\\
& 2011 & 50 & 40 & """ + row('2011') + """ \\\\
& 2012 & 50 & 35 & """ + row('2012') + """ \\\\
\\midrule
\\parbox[t]{4mm}{\\multirow{2}{*}{\\rotatebox[origin=c]{90}{\\tiny CW12 \\kern-0.6em}}}
& 2013 & 50 & 37 & """ + row('2013') + """ \\\\
& 2014 & 50 & 33 & """ + row('2014') + """ \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
table_web_track_overview = create_table_web_track_overview()
with open('/sigir21/sigir21-relevance-label-transfer-paper-submitted/table-web-track-overview.tex', 'w+') as f:
f.write(table_web_track_overview)
df_all = []
for _, v in cc_15_df.iterrows():
df_all += [{
'corpus': 'cc15',
'year': v['corpus'],
'topic': v['topic'],
'relevant': v['relevant'],
'inCorpus': v['inCC15'],
'method': 'url+simhash'
}]
df_all += [{
'corpus': 'cc15',
'year': v['corpus'],
'topic': v['topic'],
'relevant': v['relevant'],
'inCorpus': v['urlMatchAndNearDuplicate'],
'method': 'url'
}]
for _, v in df_cw12.iterrows():
df_all += [{
'corpus': 'cw12',
'year': v['corpus'],
'topic': v['topic'],
'relevant': v['relevant'],
'inCorpus': v['inCw12'],
'method': 'url+simhash'
}]
df_all += [{
'corpus': 'cw12wb12',
'year': v['corpus'],
'topic': v['topic'],
'relevant': v['relevant'],
'inCorpus': v['inCw12Wb12'],
'method': 'url+simhash'
}]
df_all += [{
'corpus': 'cw12',
'year': v['corpus'],
'topic': v['topic'],
'relevant': v['relevant'],
'inCorpus': v['inCW12AndNearDuplicate'],
'method': 'url'
}]
df_all += [{
'corpus': 'cw12wb12',
'year': v['corpus'],
'topic': v['topic'],
'relevant': v['relevant'],
'inCorpus': v['inCW12AndNearDuplicate'] or v['inWaybackAndNearDuplicate'],
'method': 'url'
}]
df_all = pd.DataFrame(df_all)
df_all
def config_axis(ax, relevant):
ax.set(ylim=(0, 0.25))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(relevant)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(not relevant)
ax.set_yticks([])
def plot_thing(df_all, relevant):
font = {
'weight': 'normal',
'size': 16
}
plt.rc('font', **font)
fig, ax = plt.subplots(figsize=(6, 5))
config_axis(ax, relevant)
plt_all = sb.barplot(x='year', y='inCorpus', data=df_all[(df_all['method'] == 'url+simhash') & (df_all['relevant'] == relevant)], palette="pastel", errwidth=0, hue='corpus', ax=ax)
ax2 = plt_all.twinx()
config_axis(ax2, relevant)
plt_all = sb.barplot(x='year', y='inCorpus', data=df_all[(df_all['method'] == 'url') & (df_all['relevant'] == relevant)], errwidth=0, hue='corpus', ax=ax2)
ax3 = plt_all.twinx()
config_axis(ax3, relevant)
ret = sb.barplot(x='year', y='inCorpus', alpha=0, data=df_all[(df_all['method'] == 'url+simhash') & (df_all['relevant'] == relevant)], errwidth=1, capsize=.1, palette="pastel", hue='corpus', ax=ax3)
ax.set_title(('Relevant' if relevant else 'Irrelevant') + ' Judgments', x=0.7, y=0.9)
ax.get_legend().remove()
ax2.get_legend().remove()
ax2.set_ylabel('')
ax3.get_legend().remove()
ax3.set_ylabel('')
ax.set_ylabel('Transferred Judgments', fontsize=22)
ax.set_xlabel('Web Track', fontsize=18)
if relevant:
ax.yaxis.set_label_position("right")
ax.set_yticks([i/100 for i in range(0,26,5)])
ax.yaxis.tick_right()
ax.set_ylabel('')
else:
ax.set_yticks([i/100 for i in range(0,26,5)])
fig.tight_layout()
return fig
left_plot = plot_thing(df_all, False)
left_plot.savefig('/sigir21/sigir21-relevance-label-transfer-figures/plot-transferred-labels-left-side.pdf', format='pdf')
right_plot = plot_thing(df_all, True)
right_plot.savefig('/sigir21/sigir21-relevance-label-transfer-figures/plot-transferred-labels-right-side.pdf', format='pdf')
df_all[(df_all['method'] == 'url+simhash') & (df_all['corpus'] == 'cw12wb12')].inCorpus.mean()
df_all[(df_all['method'] == 'url') & (df_all['corpus'] == 'cw12wb12')].inCorpus.mean()
df_all[(df_all['method'] == 'url+simhash') & (df_all['corpus'] == 'cw12')].inCorpus.mean()
```
| github_jupyter |
# Custom statespace models
The true power of the state space model is to allow the creation and estimation of custom models. This notebook shows various statespace models that subclass `sm.tsa.statespace.MLEModel`.
Remember the general state space model can be written in the following general way:
$$
\begin{aligned}
y_t & = Z_t \alpha_{t} + d_t + \varepsilon_t \\
\alpha_{t+1} & = T_t \alpha_{t} + c_t + R_t \eta_{t}
\end{aligned}
$$
You can check the details and the dimensions of the objects [in this link](https://www.statsmodels.org/stable/statespace.html#custom-state-space-models)
Most models won't include all of these elements. For example, the design matrix $Z_t$ might not depend on time ($\forall t \;Z_t = Z$), or the model won't have an observation intercept $d_t$.
We'll start with something relatively simple and then show how to extend it bit by bit to include more elements.
+ Model 1: time-varying coefficients. One observation equation with two state equations
+ Model 2: time-varying parameters with non identity transition matrix
+ Model 3: multiple observation and multiple state equations
+ Bonus: pymc3 for Bayesian estimation
```
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from collections import OrderedDict
plt.rc("figure", figsize=(16,8))
plt.rc("font", size=15)
```
## Model 1: time-varying coefficients
$$
\begin{aligned}
y_t & = d + x_t \beta_{x,t} + w_t \beta_{w,t} + \varepsilon_t \hspace{4em} \varepsilon_t \sim N(0, \sigma_\varepsilon^2)\\
\begin{bmatrix} \beta_{x,t} \\ \beta_{w,t} \end{bmatrix} & = \begin{bmatrix} \beta_{x,t-1} \\ \beta_{w,t-1} \end{bmatrix} + \begin{bmatrix} \zeta_{x,t} \\ \zeta_{w,t} \end{bmatrix} \hspace{3.7em} \begin{bmatrix} \zeta_{x,t} \\ \zeta_{w,t} \end{bmatrix} \sim N \left ( \begin{bmatrix} 0 \\ 0 \end{bmatrix}, \begin{bmatrix} \sigma_{\beta, x}^2 & 0 \\ 0 & \sigma_{\beta, w}^2 \end{bmatrix} \right )
\end{aligned}
$$
The observed data is $y_t, x_t, w_t$. With $x_t, w_t$ being the exogenous variables. Notice that the design matrix is time-varying, so it will have three dimensions (`k_endog x k_states x nobs`)
The states are $\beta_{x,t}$ and $\beta_{w,t}$. The state equation tells us these states evolve with a random walk. Thus, in this case the transition matrix is a 2 by 2 identity matrix.
We'll first simulate the data, the construct a model and finally estimate it.
```
def gen_data_for_model1():
nobs = 1000
rs = np.random.RandomState(seed=93572)
d = 5
var_y = 5
var_coeff_x = 0.01
var_coeff_w = 0.5
x_t = rs.uniform(size=nobs)
w_t = rs.uniform(size=nobs)
eps = rs.normal(scale=var_y**0.5, size=nobs)
beta_x = np.cumsum(rs.normal(size=nobs, scale=var_coeff_x**0.5))
beta_w = np.cumsum(rs.normal(size=nobs, scale=var_coeff_w**0.5))
y_t = d + beta_x * x_t + beta_w * w_t + eps
return y_t, x_t, w_t, beta_x, beta_w
y_t, x_t, w_t, beta_x, beta_w = gen_data_for_model1()
_ = plt.plot(y_t)
class TVRegression(sm.tsa.statespace.MLEModel):
def __init__(self, y_t, x_t, w_t):
exog = np.c_[x_t, w_t] # shaped nobs x 2
super(TVRegression, self).__init__(
endog=y_t, exog=exog, k_states=2,
initialization='diffuse')
# Since the design matrix is time-varying, it must be
# shaped k_endog x k_states x nobs
# Notice that exog.T is shaped k_states x nobs, so we
# just need to add a new first axis with shape 1
self.ssm['design'] = exog.T[np.newaxis, :, :] # shaped 1 x 2 x nobs
self.ssm['selection'] = np.eye(self.k_states)
self.ssm['transition'] = np.eye(self.k_states)
#Which parameters need to be positive?
self.positive_parameters = slice(1, 4)
@property
def param_names(self):
return ['intercept', 'var.e', 'var.x.coeff', 'var.w.coeff']
@property
def start_params(self):
"""
Defines the starting values for the parameters
The linear regression gives us reasonable starting values for the constant
d and the variance of the epsilon error
"""
exog = sm.add_constant(self.exog)
res = sm.OLS(self.endog, exog).fit()
params = np.r_[res.params[0], res.scale, 0.001, 0.001]
return params
def transform_params(self, unconstrained):
"""
We constraint the last three parameters
('var.e', 'var.x.coeff', 'var.w.coeff') to be positive,
because they are variances
"""
constrained = unconstrained.copy()
constrained[self.positive_parameters] = constrained[self.positive_parameters]**2
return constrained
def untransform_params(self, constrained):
"""
Need to unstransform all the parameters you transformed
in the `transform_params` function
"""
unconstrained = constrained.copy()
unconstrained[self.positive_parameters] = unconstrained[self.positive_parameters]**0.5
return unconstrained
def update(self, params, **kwargs):
params = super(TVRegression, self).update(params, **kwargs)
self['obs_intercept', 0, 0] = params[0]
self['obs_cov', 0, 0] = params[1]
self['state_cov'] = np.diag(params[2:4])
```
### And then estimate it with our custom model class
```
mod = TVRegression(y_t, x_t, w_t)
res = mod.fit()
print(res.summary())
```
The values that generated the data were:
+ intercept = 5
+ var.e = 5
+ var.x.coeff = 0.01
+ var.w.coeff = 0.5
As you can see, the estimation recovered the real parameters pretty well.
We can also recover the estimated evolution of the underlying coefficients (or states in Kalman filter talk)
```
fig, axes = plt.subplots(2, figsize=(16, 8))
ss = pd.DataFrame(res.smoothed_state.T, columns=['x', 'w'])
axes[0].plot(beta_x, label='True')
axes[0].plot(ss['x'], label='Smoothed estimate')
axes[0].set(title='Time-varying coefficient on x_t')
axes[0].legend()
axes[1].plot(beta_w, label='True')
axes[1].plot(ss['w'], label='Smoothed estimate')
axes[1].set(title='Time-varying coefficient on w_t')
axes[1].legend()
fig.tight_layout();
```
## Model 2: time-varying parameters with non identity transition matrix
This is a small extension from Model 1. Instead of having an identity transition matrix, we'll have one with two parameters ($\rho_1, \rho_2$) that we need to estimate.
$$
\begin{aligned}
y_t & = d + x_t \beta_{x,t} + w_t \beta_{w,t} + \varepsilon_t \hspace{4em} \varepsilon_t \sim N(0, \sigma_\varepsilon^2)\\
\begin{bmatrix} \beta_{x,t} \\ \beta_{w,t} \end{bmatrix} & = \begin{bmatrix} \rho_1 & 0 \\ 0 & \rho_2 \end{bmatrix} \begin{bmatrix} \beta_{x,t-1} \\ \beta_{w,t-1} \end{bmatrix} + \begin{bmatrix} \zeta_{x,t} \\ \zeta_{w,t} \end{bmatrix} \hspace{3.7em} \begin{bmatrix} \zeta_{x,t} \\ \zeta_{w,t} \end{bmatrix} \sim N \left ( \begin{bmatrix} 0 \\ 0 \end{bmatrix}, \begin{bmatrix} \sigma_{\beta, x}^2 & 0 \\ 0 & \sigma_{\beta, w}^2 \end{bmatrix} \right )
\end{aligned}
$$
What should we modify in our previous class to make things work?
+ Good news: not a lot!
+ Bad news: we need to be careful about a few things
### 1) Change the starting parameters function
We need to add names for the new parameters $\rho_1, \rho_2$ and we need to start corresponding starting values.
The `param_names` function goes from:
```python
def param_names(self):
return ['intercept', 'var.e', 'var.x.coeff', 'var.w.coeff']
```
to
```python
def param_names(self):
return ['intercept', 'var.e', 'var.x.coeff', 'var.w.coeff',
'rho1', 'rho2']
```
and we change the `start_params` function from
```python
def start_params(self):
exog = sm.add_constant(self.exog)
res = sm.OLS(self.endog, exog).fit()
params = np.r_[res.params[0], res.scale, 0.001, 0.001]
return params
```
to
```python
def start_params(self):
exog = sm.add_constant(self.exog)
res = sm.OLS(self.endog, exog).fit()
params = np.r_[res.params[0], res.scale, 0.001, 0.001, 0.8, 0.8]
return params
```
2) Change the `update` function
It goes from
```python
def update(self, params, **kwargs):
params = super(TVRegression, self).update(params, **kwargs)
self['obs_intercept', 0, 0] = params[0]
self['obs_cov', 0, 0] = params[1]
self['state_cov'] = np.diag(params[2:4])
```
to
```python
def update(self, params, **kwargs):
params = super(TVRegression, self).update(params, **kwargs)
self['obs_intercept', 0, 0] = params[0]
self['obs_cov', 0, 0] = params[1]
self['state_cov'] = np.diag(params[2:4])
self['transition', 0, 0] = params[4]
self['transition', 1, 1] = params[5]
```
3) (optional) change `transform_params` and `untransform_params`
This is not required, but you might wanna restrict $\rho_1, \rho_2$ to lie between -1 and 1.
In that case, we first import two utility functions from `statsmodels`.
```python
from statsmodels.tsa.statespace.tools import (
constrain_stationary_univariate, unconstrain_stationary_univariate)
```
`constrain_stationary_univariate` constraint the value to be within -1 and 1.
`unconstrain_stationary_univariate` provides the inverse function.
The transform and untransform parameters function would look like this
(remember that $\rho_1, \rho_2$ are in the 4 and 5th index):
```python
def transform_params(self, unconstrained):
constrained = unconstrained.copy()
constrained[self.positive_parameters] = constrained[self.positive_parameters]**2
constrained[4] = constrain_stationary_univariate(constrained[4:5])
constrained[5] = constrain_stationary_univariate(constrained[5:6])
return constrained
def untransform_params(self, constrained):
unconstrained = constrained.copy()
unconstrained[self.positive_parameters] = unconstrained[self.positive_parameters]**0.5
unconstrained[4] = unconstrain_stationary_univariate(constrained[4:5])
unconstrained[5] = unconstrain_stationary_univariate(constrained[5:6])
return unconstrained
```
I'll write the full class below (without the optional changes I have just discussed)
```
class TVRegressionExtended(sm.tsa.statespace.MLEModel):
def __init__(self, y_t, x_t, w_t):
exog = np.c_[x_t, w_t] # shaped nobs x 2
super(TVRegressionExtended, self).__init__(
endog=y_t, exog=exog, k_states=2,
initialization='diffuse')
# Since the design matrix is time-varying, it must be
# shaped k_endog x k_states x nobs
# Notice that exog.T is shaped k_states x nobs, so we
# just need to add a new first axis with shape 1
self.ssm['design'] = exog.T[np.newaxis, :, :] # shaped 1 x 2 x nobs
self.ssm['selection'] = np.eye(self.k_states)
self.ssm['transition'] = np.eye(self.k_states)
#Which parameters need to be positive?
self.positive_parameters = slice(1, 4)
@property
def param_names(self):
return ['intercept', 'var.e', 'var.x.coeff', 'var.w.coeff',
'rho1', 'rho2']
@property
def start_params(self):
"""
Defines the starting values for the parameters
The linear regression gives us reasonable starting values for the constant
d and the variance of the epsilon error
"""
exog = sm.add_constant(self.exog)
res = sm.OLS(self.endog, exog).fit()
params = np.r_[res.params[0], res.scale, 0.001, 0.001, 0.7, 0.8]
return params
def transform_params(self, unconstrained):
"""
We constraint the last three parameters
('var.e', 'var.x.coeff', 'var.w.coeff') to be positive,
because they are variances
"""
constrained = unconstrained.copy()
constrained[self.positive_parameters] = constrained[self.positive_parameters]**2
return constrained
def untransform_params(self, constrained):
"""
Need to unstransform all the parameters you transformed
in the `transform_params` function
"""
unconstrained = constrained.copy()
unconstrained[self.positive_parameters] = unconstrained[self.positive_parameters]**0.5
return unconstrained
def update(self, params, **kwargs):
params = super(TVRegressionExtended, self).update(params, **kwargs)
self['obs_intercept', 0, 0] = params[0]
self['obs_cov', 0, 0] = params[1]
self['state_cov'] = np.diag(params[2:4])
self['transition', 0, 0] = params[4]
self['transition', 1, 1] = params[5]
```
To estimate, we'll use the same data as in model 1 and expect the $\rho_1, \rho_2$ to be near 1.
The results look pretty good!
Note that this estimation can be quite sensitive to the starting value of $\rho_1, \rho_2$. If you try lower values, you'll see it fails to converge.
```
mod = TVRegressionExtended(y_t, x_t, w_t)
res = mod.fit(maxiter=2000) #it doesn't converge with 50 iters
print(res.summary())
```
## Model 3: multiple observation and state equations
We'll keep the time-varying parameters, but this time we'll also have two observation equations.
### Observation equations
$\hat{i_t}, \hat{M_t}, \hat{s_t}$ are observed each period.
The model for the observation equation has two equations:
$$ \hat{i_t} = \alpha_1 * \hat{s_t} + \varepsilon_1 $$
$$ \hat{M_t} = \alpha_2 + \varepsilon_2 $$
Following the [general notation from state space models](https://www.statsmodels.org/stable/statespace.html), the endogenous part of the observation equation is $y_t = (\hat{i_t}, \hat{M_t})$ and we only have one exogenous variable $\hat{s_t}$
### State equations
$$ \alpha_{1, t+1} = \delta_1 \alpha_{1, t} + \delta_2 \alpha_{2, t} + W_1 $$
$$ \alpha_{2, t+1} = \delta_3 \alpha_{2, t} + W_2 $$
### Matrix notation for the state space model
$$
\begin{aligned}
\begin{bmatrix} \hat{i_t} \\ \hat{M_t} \end{bmatrix} &=
\begin{bmatrix} \hat{s_t} & 0 \\ 0 & 1 \end{bmatrix} \begin{bmatrix} \alpha_{1, t} \\ \alpha_{2, t} \end{bmatrix} + \begin{bmatrix} \varepsilon_{1, t} \\ \varepsilon_{1, t} \end{bmatrix} \hspace{6.5em} \varepsilon_t \sim N \left ( \begin{bmatrix} 0 \\ 0 \end{bmatrix}, \begin{bmatrix} \sigma_{\varepsilon_1}^2 & 0 \\ 0 & \sigma_{\varepsilon_2}^2 \end{bmatrix} \right )
\\
\begin{bmatrix} \alpha_{1, t+1} \\ \alpha_{2, t+1} \end{bmatrix} & = \begin{bmatrix} \delta_1 & \delta_1 \\ 0 & \delta_3 \end{bmatrix} \begin{bmatrix} \alpha_{1, t} \\ \alpha_{2, t} \end{bmatrix} + \begin{bmatrix} W_1 \\ W_2 \end{bmatrix} \hspace{3.em} \begin{bmatrix} W_1 \\ W_2 \end{bmatrix} \sim N \left ( \begin{bmatrix} 0 \\ 0 \end{bmatrix}, \begin{bmatrix} \sigma_{W_1}^2 & 0 \\ 0 & \sigma_{W_2}^2 \end{bmatrix} \right )
\end{aligned}
$$
I'll simulate some data, talk about what we need to modify and finally estimate the model to see if we're recovering something reasonable.
```
true_values = {'var_e1': 0.01, 'var_e2': 0.01,
'var_w1': 0.01, 'var_w2': 0.01,
'delta1': 0.8, 'delta2': 0.5, 'delta3': 0.7}
def gen_data_for_model3():
#Starting values
alpha1_0 = 2.1
alpha2_0 = 1.1
t_max = 500
def gen_i(alpha1, s):
return alpha1*s + np.sqrt(true_values['var_e1'])*np.random.randn()
def gen_m_hat(alpha2):
return 1*alpha2 + np.sqrt(true_values['var_e2'])*np.random.randn()
def gen_alpha1(alpha1, alpha2):
w1 = np.sqrt(true_values['var_w1'])*np.random.randn()
return true_values['delta1'] * alpha1 + true_values['delta2'] * alpha2 + w1
def gen_alpha2(alpha2):
w2 = np.sqrt(true_values['var_w2'])*np.random.randn()
return true_values['delta3'] * alpha2 + w2
s_t = 0.3 + np.sqrt(1.4)*np.random.randn(t_max)
i_hat = np.empty(t_max)
m_hat = np.empty(t_max)
current_alpha1 = alpha1_0
current_alpha2 = alpha2_0
for t in range(t_max):
#Obs eqns
i_hat[t] = gen_i(current_alpha1, s_t[t])
m_hat[t] = gen_m_hat(current_alpha2)
#state eqns
new_alpha1 = gen_alpha1(current_alpha1, current_alpha2)
new_alpha2 = gen_alpha2(current_alpha2)
#Update states for next period
current_alpha1 = new_alpha1
current_alpha2 = new_alpha2
return i_hat, m_hat, s_t
i_hat, m_hat, s_t = gen_data_for_model3()
```
### What do we need to modify?
Once again, we don't need to change much, but we need to be careful about the dimensions.
#### 1) The `__init__` function changes from
```python
def __init__(self, y_t, x_t, w_t):
exog = np.c_[x_t, w_t]
super(TVRegressionExtended, self).__init__(
endog=y_t, exog=exog, k_states=2,
initialization='diffuse')
self.ssm['design'] = exog.T[np.newaxis, :, :] # shaped 1 x 2 x nobs
self.ssm['selection'] = np.eye(self.k_states)
self.ssm['transition'] = np.eye(self.k_states)
```
to
```python
def __init__(self, i_t: np.array, s_t: np.array, m_t: np.array):
exog = np.c_[s_t, np.repeat(1, len(s_t))] # exog.shape => (nobs, 2)
super(MultipleYsModel, self).__init__(
endog=np.c_[i_t, m_t], exog=exog, k_states=2,
initialization='diffuse')
self.ssm['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
self.ssm['design', 0, 0, :] = s_t
self.ssm['design', 1, 1, :] = 1
```
Note that we did not have to specify `k_endog` anywhere. The initialization does this for us after checking the dimensions of the `endog` matrix.
#### 2) The `update()` function
changes from
```python
def update(self, params, **kwargs):
params = super(TVRegressionExtended, self).update(params, **kwargs)
self['obs_intercept', 0, 0] = params[0]
self['obs_cov', 0, 0] = params[1]
self['state_cov'] = np.diag(params[2:4])
self['transition', 0, 0] = params[4]
self['transition', 1, 1] = params[5]
```
to
```python
def update(self, params, **kwargs):
params = super(MultipleYsModel, self).update(params, **kwargs)
#The following line is not needed (by default, this matrix is initialized by zeroes),
#But I leave it here so the dimensions are clearer
self['obs_intercept'] = np.repeat([np.array([0, 0])], self.nobs, axis=0).T
self['obs_cov', 0, 0] = params[0]
self['obs_cov', 1, 1] = params[1]
self['state_cov'] = np.diag(params[2:4])
#delta1, delta2, delta3
self['transition', 0, 0] = params[4]
self['transition', 0, 1] = params[5]
self['transition', 1, 1] = params[6]
```
The rest of the methods change in pretty obvious ways (need to add parameter names, make sure the indexes work, etc). The full code for the function is right below
```
starting_values = {'var_e1': 0.2, 'var_e2': 0.1,
'var_w1': 0.15, 'var_w2': 0.18,
'delta1': 0.7, 'delta2': 0.1, 'delta3': 0.85}
class MultipleYsModel(sm.tsa.statespace.MLEModel):
def __init__(self, i_t: np.array, s_t: np.array, m_t: np.array):
exog = np.c_[s_t, np.repeat(1, len(s_t))] # exog.shape => (nobs, 2)
super(MultipleYsModel, self).__init__(
endog=np.c_[i_t, m_t], exog=exog, k_states=2,
initialization='diffuse')
self.ssm['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
self.ssm['design', 0, 0, :] = s_t
self.ssm['design', 1, 1, :] = 1
#These have ok shape. Placeholders since I'm changing them
#in the update() function
self.ssm['selection'] = np.eye(self.k_states)
self.ssm['transition'] = np.eye(self.k_states)
#Dictionary of positions to names
self.position_dict = OrderedDict(var_e1=1, var_e2=2,
var_w1=3, var_w2=4,
delta1=5, delta2=6, delta3=7)
self.initial_values = starting_values
self.positive_parameters = slice(0, 4)
@property
def param_names(self):
return list(self.position_dict.keys())
@property
def start_params(self):
"""
Initial values
"""
#(optional) Use scale for var_e1 and var_e2 starting values
params = np.r_[self.initial_values['var_e1'],
self.initial_values['var_e2'],
self.initial_values['var_w1'],
self.initial_values['var_w2'],
self.initial_values['delta1'],
self.initial_values['delta2'],
self.initial_values['delta3']]
return params
def transform_params(self, unconstrained):
"""
If you need to restrict parameters
For example, variances should be > 0
Parameters maybe have to be within -1 and 1
"""
constrained = unconstrained.copy()
constrained[self.positive_parameters] = constrained[self.positive_parameters]**2
return constrained
def untransform_params(self, constrained):
"""
Need to reverse what you did in transform_params()
"""
unconstrained = constrained.copy()
unconstrained[self.positive_parameters] = unconstrained[self.positive_parameters]**0.5
return unconstrained
def update(self, params, **kwargs):
params = super(MultipleYsModel, self).update(params, **kwargs)
#The following line is not needed (by default, this matrix is initialized by zeroes),
#But I leave it here so the dimensions are clearer
self['obs_intercept'] = np.repeat([np.array([0, 0])], self.nobs, axis=0).T
self['obs_cov', 0, 0] = params[0]
self['obs_cov', 1, 1] = params[1]
self['state_cov'] = np.diag(params[2:4])
#delta1, delta2, delta3
self['transition', 0, 0] = params[4]
self['transition', 0, 1] = params[5]
self['transition', 1, 1] = params[6]
mod = MultipleYsModel(i_hat, s_t, m_hat)
res = mod.fit()
print(res.summary())
```
## Bonus: pymc3 for fast Bayesian estimation
In this section I'll show how you can take your custom state space model and easily plug it to `pymc3` and estimate it with Bayesian methods. In particular, this example will show you an estimation with a version of Hamiltonian Monte Carlo called the No-U-Turn Sampler (NUTS).
I'm basically copying the ideas contained [in this notebook](https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_sarimax_pymc3.html), so make sure to check that for more details.
```
#Extra requirements
import theano
import theano.tensor as tt
import pymc3 as pm
```
We need to define some helper functions to connect theano to the likelihood function that is implied in our model
```
class Loglike(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, model):
self.model = model
self.score = Score(self.model)
def perform(self, node, inputs, outputs):
theta, = inputs # contains the vector of parameters
llf = self.model.loglike(theta)
outputs[0][0] = np.array(llf) # output the log-likelihood
def grad(self, inputs, g):
# the method that calculates the gradients - it actually returns the
# vector-Jacobian product - g[0] is a vector of parameter values
theta, = inputs # our parameters
out = [g[0] * self.score(theta)]
return out
class Score(tt.Op):
itypes = [tt.dvector]
otypes = [tt.dvector]
def __init__(self, model):
self.model = model
def perform(self, node, inputs, outputs):
theta, = inputs
outputs[0][0] = self.model.score(theta)
```
We'll simulate again the data we used for model 1.
We'll also `fit` it again and save the results to compare them to the Bayesian posterior we get.
```
y_t, x_t, w_t, beta_x, beta_w = gen_data_for_model1()
plt.plot(y_t)
mod = TVRegression(y_t, x_t, w_t)
res_mle = mod.fit(disp=False)
print(res_mle.summary())
```
### Bayesian estimation
We need to define a prior for each parameter and the number of draws and burn-in points
```
# Set sampling params
ndraws = 3000 # 3000 number of draws from the distribution
nburn = 600 # 600 number of "burn-in points" (which will be discarded)
# Construct an instance of the Theano wrapper defined above, which
# will allow PyMC3 to compute the likelihood and Jacobian in a way
# that it can make use of. Here we are using the same model instance
# created earlier for MLE analysis (we could also create a new model
# instance if we preferred)
loglike = Loglike(mod)
with pm.Model():
# Priors
intercept = pm.Uniform('intercept', 1, 10)
var_e = pm.InverseGamma('var.e', 2.3, 0.5)
var_x_coeff = pm.InverseGamma('var.x.coeff', 2.3, 0.1)
var_w_coeff = pm.InverseGamma('var.w.coeff', 2.3, 0.1)
# convert variables to tensor vectors
theta = tt.as_tensor_variable([intercept, var_e, var_x_coeff, var_w_coeff])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist('likelihood', lambda v: loglike(v), observed={'v': theta})
# Draw samples
trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True, cores=4)
```
### How does the posterior distribution compare with the MLE estimation?
The clearly peak around the MLE estimate.
```
results_dict = {'intercept': res_mle.params[0], 'var.e': res_mle.params[1],
'var.x.coeff': res_mle.params[2], 'var.w.coeff': res_mle.params[3]}
plt.tight_layout()
_ = pm.traceplot(trace,
lines=[(k, {}, [v]) for k, v in dict(results_dict).items()],
combined=True,
figsize=(12, 12))
```
| github_jupyter |
# VacationPy
----
#### Note
* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
```
### Store Part I results into DataFrame
* Load the csv exported in Part I to a DataFrame
```
output_data_file = "output_data/cities.csv"
cities_df=pd.read_csv(output_data_file)
cities_df.head()
```
### Humidity Heatmap
* Configure gmaps.
* Use the Lat and Lng as locations and Humidity as the weight.
* Add Heatmap layer to map.
```
cities_df=cities_df.dropna()
coords=cities_df[["Lat","Lon"]]
humidity=cities_df["Humidity"].astype(float)
cities_df["Humidity"]=cities_df["Humidity"].astype(float)
cities_df["Max Temp (F)"]=cities_df["Max Temp (F)"].astype(float)
cities_df["Cloudiness"]=cities_df["Cloudiness"].astype(float)
cities_df["Wind Speed (MPH)"]=cities_df["Wind Speed (MPH)"].astype(float)
gmaps.configure(api_key=g_key)
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(coords, weights=humidity,dissipating=False, max_intensity=max(humidity),point_radius=1.6)
fig.add_layer(heat_layer)
fig
```
### Create new DataFrame fitting weather criteria
* Narrow down the cities to fit weather conditions.
* Drop any rows will null values.
```
temprange=[70,90]
maxhumidity=60
maxcloudiness=50
maxwind=15
cities_df_perfect=cities_df.loc[(cities_df["Humidity"] < maxhumidity)]
cities_df_perfect=cities_df_perfect.loc[(cities_df["Cloudiness"] < maxcloudiness)]
cities_df_perfect=cities_df_perfect.loc[(cities_df["Wind Speed (MPH)"] < maxwind)]
cities_df_perfect=cities_df_perfect.loc[(cities_df["Max Temp (F)"] > temprange[0])]
cities_df_perfect=cities_df_perfect.loc[(cities_df["Max Temp (F)"] < temprange[1])]
cities_df_perfect
#Max Temp (F) Humidity Cloudiness Wind Speed (MPH) Country Date
```
### Hotel Map
* Store into variable named `hotel_df`.
* Add a "Hotel Name" column to the DataFrame.
* Set parameters to search for hotels with 5000 meters.
* Hit the Google Places API for each city's coordinates.
* Store the first Hotel result into the DataFrame.
* Plot markers on top of the heatmap.
```
hotel_df=cities_df_perfect[["City", "Lat", "Lon", "Max Temp (F)", "Humidity", "Cloudiness", "Wind Speed (MPH)", "Country", "Date"]]
distance="5000"
hotelnames=[]
for index, row in hotel_df.iterrows():
lat=row["Lat"]
lon=row["Lon"]
print(f"Looking for hotels near[{lat},{lon}]")
try:
url=f"https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input=hotel&inputtype=textquery&fields=name&locationbias=circle:{distance}@{lat},{lon}&key={g_key}"
hotel=requests.get(url).json()
hotelname=hotel["candidates"][0]["name"]
except:
print("Hotel not found moving to the next one")
hotelname="Not Available"
hotelnames.append(hotelname)
hotel_df["Hotel Name"]=hotelnames
hotel_df
#I couldn't figure out why I was getting a Setting with Copy Warning
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lon"]]
# Add marker layer ontop of heat map
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(marker_layer)
# Display figure
fig
```
| github_jupyter |
# Parameter Space
To run a DYNAMITE model, one must specify a number of parameters for the gravitational potential. The aim of this notebook is to demonstrate how to specify these parameters and to highlight features that we have implemented in order to help you explore parameter space.
We'll start as before by reading the same configuration file as previously,
```
import dynamite as dyn
# read the config file
fname = 'NGC6278_config.yaml'
c = dyn.config_reader.Configuration(fname, reset_logging=True)
```
When the configuration object is created, internally, a parameter space object is created. This ``parspace`` object is a list, and every entry of this list is a parameter in our model, Lets extract this and have a look
```
# extract the parameter space
parspace = c.parspace
print('type of parspace is', type(parspace))
print('length of parspace is', len(parspace))
print('the parameter names are:')
for par in parspace:
print(' -', par.name)
```
Several properties are specified for each parameter in the configuration file. Let's look at the value,
```
print('Parameter / value :')
for par in c.parspace:
print(f' {par.name} = {par.value}')
```
These are the starting values from which we would like to run a model.
One complication in specifying these values is that, for some parameters, we would like to take logarithmically spaced steps through parameter space, i.e. the ones which are specificed as
```
parameters -> XXX -> logarithmic : True
```
Logarithmic spacing can be useful for mass parameters. For other parameters (e.g. length scales) linearly spaced steps may be more appropriate. For other types of parameters (e.g. angles) a different spacing altogether may be preferable.
To handle these possibilities, we introduce the concept of ``raw`` parameter values, distinct from the values themselves. All values associated with parameter in the configuration file are given in ``raw`` units. When we step through parameter space, we take linear steps in ``raw`` values. The conversion from raw values to the parameter values is handles by the method
```
Parameter.get_par_value_from_raw_value
```
So to convert the above list from raw values, we can do the following,
```
print('Parameter / value :')
for par in c.parspace:
raw_value = par.value
par_value = par.get_par_value_from_raw_value(raw_value)
print(f' {par.name} = {par_value}')
```
Notice how only those parameters which have been specified with ``logarithmic : True`` have been modified.
Another property that we specifie for each parameter is whether or not it is fixed, a boolean value,
```
for par in parspace:
if par.fixed:
fix_string = ' is fixed.'
if not par.fixed:
fix_string = ' is NOT fixed.'
print(f'{par.name}{fix_string}')
```
The only parameters which are not fixed for this example are the dark matter fraction ``f-dh`` and the mass-to-light ratio ``ml``. For these free parameters, additional properties about how search through parameter space are stored in the ``par_generator_settings`` attribute,
```
for par in parspace:
if not par.fixed:
tmp = par.par_generator_settings
lo, hi, step = tmp['lo'], tmp['hi'], tmp['step']
print(f'{par.name} takes step-size {step} and bounds ({lo,hi})')
```
How do we search over these free parameters? Running models (especially calcuating the orbit library) is expensive, so we will want to search through parameter space in the most efficient way possible.
In general, an algorithm to search through parameter space will take as input
1. the output of all models which have been run so far (e.g. $\chi^2$ values)
2. setting for the free parameters (e.g. step-size and bounds)
The algorithm will then output a new list of parameters for which we want to run models.
In DYNAMITE, we implement this generic idea in the class
``dyn.parameter_space.ParameterGenerator``.
In the configuration file, you specify *which* parameter generator you would like to use, at the location
```
parameter_space_settings -> generator_type
```
The current choice is
```
c.settings.parameter_space_settings['generator_type']
```
This parameter generator requires an additional setting which is set at
```
parameter_space_settings -> generator_settings -> threshold_del_chi2_abs
```
or
```
parameter_space_settings -> generator_settings -> threshold_del_chi2_as_frac_of_sqrt2nobs
```
(the options are mutually exclusive, set one or the other). Internally, the setting is converted to the appropriate ``threshold_del_chi2`` and is accessed in the following way,
```
threshold_del_chi2_as_frac_of_sqrt2nobs = \
c.settings.parameter_space_settings['generator_settings']['threshold_del_chi2_as_frac_of_sqrt2nobs']
threshold_del_chi2 = c.settings.parameter_space_settings['generator_settings']['threshold_del_chi2']
print(f'threshold_del_chi2_as_frac_of_sqrt2nobs = {threshold_del_chi2_as_frac_of_sqrt2nobs}')
print(f'threshold_del_chi2 = {threshold_del_chi2}')
```
The algorithm implemented to generate parameters in ``LegacyGridSearch`` is the following,
```
iteration = 0
if iteration == 0
all parameters take `value` specified in the config
else:
1. find the model with the lowest chi-squared
2. find all models with chi-squared within threshold_del_chi2 of the lowest value
3. for all models satisfying that criteria:
- for all free parameters:
- generate a new parameter set +/-1 step-size from the current value
4. Remove any models with parameters outside specified bounds
5. iteration = iteration + 1
stop if no new models are added, or any other stopping criteria are met
```
For those of you who have used the previous version of the trixial Schwarzschild modelling code (aka ``schwpy``), this is the same algorithm which was implemented there.
The last line of the algorithm mentions stopping criteria. Settings which control the stopping criteria are also speicified in the configuration file, under
```
parameter_space_settings -> stopping_criteria
```
The current settings which are the following,
```
stopping_crierita = c.settings.parameter_space_settings['stopping_criteria']
for key in stopping_crierita:
print(f'{key} = {stopping_crierita[key]}')
```
These have the following meaning,
- if no new model impoves the chi-squared by at least ``min_delta_chi2``, then stop
- if we have already run ``n_max_mods`` models, then stop
- if we have already run ``n_max_iter`` iterations, then stop
:)
| github_jupyter |
```
%matplotlib inline
```
# Solar Data Processing with Python Part II
Now we have a grasp of the basics of python, but the whole reason for downloading python in the first place was to analyze solar data. Let's take a closer look at examples of solar data analysis.
We will be using SunPy to access solar data. SunPy is a python package designed to interface between the powerful tools that exist in other Python Libraries with current repositories of solar data. With SunPy we will show how to: download solar data sets from the VSO, calibrate to industry standards, plot and overlay a time series.
# Fitting A Gaussian to Data.
One of the most common data types in solar data processing is a time series. A time series is a measurement of how one physical parameter changes as a function of time. This example shows how to fit a gaussian to a spectral line. In this example, it will be as "real world" as possible.
First, let's import some useful libraries.
Next we need to load in the data set we want to work with:
So what did we get when we opened the file? Let's take a look:
We got 4 items in the list. Lets take a look at the first one:
It looks like this data is from the GLAST telescope measuring gamma rays. Let's take a look at the second item:
Alright, now we are getting somewhere. This has data in units of 'keV' and max/min measurements. Let's take a look at the other elements of the list we got:
So it looks like we are working with some energy counts data, temportal information, quality measurements, etc.
# Plotting Spectral Data
Let's take a look at some of the data we've got.
There is a large array of counts at 128 different energies. Let's take a look at the lowest energy measurements:
So now we have a plot of counts over some perieod of time. We can see there is one major spike in the data. Let's filter the data so that we just have the major peak without the spike.
This function, "np.logical_and", is similar to a "where" statement in IDL. We can see that "w" is now an array of true and false values. To take a subsection of our data where our filter is true:
Now, it is good to add some units to data when you can. The header of the file tells us what the units are, but in this case, counts have no units.
# Fitting the data with a Gaussian
Now that we have extracted a detection feature from the whole data. Now let's say we want to fit it with a gaussian. Do do this we will make use of a couple of packages in in astropy. We will initialize the gaussian fit with some approximations (max, center, FWHM):
Now let's define a fitting method and produce a fit:
Since this fitting routine expects both X and Y coordinate data, we need to define an X vector:
Let's take a look at some of the qualities of our fitted gaussian:
Our guesses wern't too bad, but we over estimated the Standard Deviation by about a factor of 5. The variable 'g' has the fitted parameters of our gaussian but it doesn't actually contain an array. To plot it over the data, we need to create an array of values. We will make an array from 0 to 1410 with 2820 points in it.
To find the values of our fit at each location, it is easy:
Now we can plot it:
That isn't a very good fit. If we chose a more clever way to filter our data, or possibly fit two gaussians that could improve things.
# Ingegrating under the curve.
Let's find the area under the curve we just created. We can numerically integrate it easily:
| github_jupyter |
```
import matplotlib.pyplot as plt
%matplotlib notebook
import numpy as np
import pandas as pd
from scipy import interpolate
import pickle
import xmeos
from xmeos import models
from xmeos import datamod
CONSTS = models.CONSTS
analysis_file = 'data/analysis.pkl'
with open(analysis_file, 'rb') as f:
analysis = pickle.load(f)
eos_mod = analysis['eos_mod']
data = analysis['datasets']['Spera2011']
eos_electronic = analysis['eos_electronic']
# data = analysis['datasets']['multi']
param_tex_str = analysis['param_tex_str']
params_init = analysis['params_init']
eos_mod.set_params(params_init)
# display(eos_mod.get_params())
datamodel = datamod.init_datamodel(data, eos_mod)
# datamodel
print('Calc Params')
print('===========')
eos_mod.get_calc_params()
fit_calcs = ['compress','refstate','gamma','bcoef','thermal']
fix_params = ['S0','Cvlimfac','mexp']
# fix_params = ['S0','mexp']
# fix_params = ['S0','Cvlimfac']
datamodel['eos_mod'].set_param_values([3/5,1], param_names=['mexp','Cvlimfac'])
datamod.select_fit_params(datamodel, fit_calcs, fix_params=fix_params)
datamod.fit(datamodel)
datamod.fit(datamodel, apply_bulk_mod_wt=True, wt_vol=.5)
R2fit = datamodel['posterior']['R2fit']
display('R2fit = ', R2fit)
display('R2avg = ', 0.5*R2fit['E']+.25*R2fit['P']+.25*R2fit['E'])
display('Model Residual Error = ', datamodel['posterior']['fit_err'])
display(datamodel['posterior']['param_tbl'])
display('R2fit = ', datamodel['posterior']['R2fit'])
display('Model Residual Error = ', datamodel['posterior']['fit_err'])
display(datamodel['posterior']['param_tbl'])
.5*(0.9997647071840597+.5*0.9998527429587041+.5*0.9986048490791933)
.5*(0.999759019439001+.5*0.99985949101627+.5*0.9986848263745852)
.999516-.999497
# Save fitted model
analysis['datamodel'] = datamodel
with open(analysis_file, 'wb') as f:
pickle.dump(analysis, f)
plt.figure()
posterior = datamodel['posterior']
corr = posterior['corr']
if corr is not None:
param_labels = [param_tex_str[name] for name in posterior['param_names']]
cmap = plt.get_cmap('coolwarm')
Nparam = len(param_labels)
corr_plt = np.flipud(np.ma.masked_where(np.eye(Nparam),corr))
plt.pcolormesh(corr_plt,cmap=cmap)
# plt.imshow(corr, cmap=cmap)
plt.clim(-1,1)
plt.colorbar(label=r'Correlation Coefficient')
plt.xticks(.5+np.arange(len(param_labels)),param_labels)
plt.yticks(np.flipud(.5+np.arange(len(param_labels))),param_labels)
for (index,val) in np.ndenumerate(np.flipud(corr)):
if index[1]!=Nparam-1-index[0]:
plt.text(index[1]+.5,index[0]+.5,'%+.2f'%(val),fontsize=9,
horizontalalignment='center', verticalalignment='center')
plt.setp(plt.gca().get_xticklines(),visible=False);
plt.setp(plt.gca().get_yticklines(),visible=False);
#plt.plot((0,11),(5,5),'k-',linewidth=2)
#plt.plot((0,11),(7,7),'k-',linewidth=2)
#plt.plot((4,4),(0,11),'k-',linewidth=2)
#plt.plot((6,6),(0,11),'k-',linewidth=2)
#plt.show()
eos_mod = datamodel['eos_mod']
T0 = eos_mod.get_refstate()['T0']
V0 = eos_mod.get_params()['V0']
tbl = datamodel['data']['table']
Tlbl = data['T_labels']
delT = Tlbl[1]-Tlbl[0]
cmap = plt.get_cmap('coolwarm',len(Tlbl))
clims = [Tlbl[0]-delT/2,Tlbl[-1]+delT/2]
Vmod = V0*np.linspace(.3,1.2,1001)
plt.figure()
for iT in data['T_avg']:
icol = cmap((iT-clims[0])/(clims[1]-clims[0]))
plt.plot(Vmod/V0, eos_mod.press(Vmod,iT), '-', color=icol)
Tbnd = 1773
Tbnd = 1673
Pbnd = eos_mod.press(Vmod,Tbnd)
# indbnd = np.argmin(Pbnd)
indbnd = np.argmin(Pbnd**2)
plt.plot(Vmod[:indbnd]/V0, Pbnd[:indbnd],'-.',color=[.5,.5,.5])
plt.scatter(tbl['V']/V0,tbl['P'],c=tbl['T'], cmap=cmap)
plt.clim(clims)
plt.xlabel(r'$V$ / $V_0$')
plt.ylabel(r'Pressure [GPa]')
cbar = plt.colorbar(label='Temperature [K]')
cbar.set_ticks(Tlbl)
#plt.ylim(-2,15);
plt.plot(Vmod/V0,0*Vmod,'k-')
plt.figure()
for iT in data['T_avg']:
icol = cmap((iT-clims[0])/(clims[1]-clims[0]))
plt.plot(Vmod/V0, eos_mod.internal_energy(Vmod,iT), '-', color=icol)
plt.scatter(tbl['V']/V0,tbl['E'],c=tbl['T'], cmap=cmap)
plt.xlabel(r'$V$ / $V_0$')
plt.ylabel(r'Energy [eV/atom]')
cbar = plt.colorbar()
plt.clim(clims)
cbar.set_ticks(Tlbl)
plt.ylim(-21,-19)
plt.figure()
for iT in data['T_avg']:
icol = cmap((iT-clims[0])/(clims[1]-clims[0]))
plt.plot(eos_mod.press(Vmod,iT), eos_mod.internal_energy(Vmod,iT), '-', color=icol)
plt.scatter(tbl['P'], tbl['E'], c=tbl['T'], cmap=cmap)
plt.clim(clims)
plt.xlabel(r'Pressure [GPa]')
plt.ylabel(r'Energy [eV / atom]')
cbar = plt.colorbar(label='Temperature [K]')
cbar.set_ticks(Tlbl)
plt.xlim(-5, 200)
plt.ylim(-21, -19)
eos_electronic.set_param_values(param_names='V0', param_values=V0)
E_elec = eos_electronic.energy(tbl['V'], tbl['T'])
P_elec = eos_electronic.press(tbl['V'], tbl['T'])
T_avg = data['T_avg']
# T_avg.append(8000)
plt.figure()
for iT in T_avg:
icol = cmap((iT-clims[0])/(clims[1]-clims[0]))
plt.plot(Vmod/V0, eos_mod.internal_energy(Vmod,iT), '-', color=icol)
plt.plot(Vmod/V0, eos_mod.internal_energy(Vmod,iT)+eos_electronic.energy(Vmod, iT), ':', color=icol)
plt.scatter(tbl['V']/V0,tbl['E'],c=tbl['T'], cmap=cmap)
plt.xlabel(r'$V$ / $V_0$')
plt.ylabel(r'Energy [eV/atom]')
cbar = plt.colorbar()
plt.clim(clims)
cbar.set_ticks(Tlbl)
plt.ylim(-21,-19)
plt.figure()
for iT in data['T_avg']:
icol = cmap((iT-clims[0])/(clims[1]-clims[0]))
plt.plot(Vmod/V0, eos_mod.press(Vmod,iT), '-', color=icol)
plt.plot(Vmod/V0, eos_mod.press(Vmod,iT)+eos_electronic.press(Vmod, iT), ':', color=icol)
Tbnd = 1773
Tbnd = 1673
Pbnd = eos_mod.press(Vmod,Tbnd)
# indbnd = np.argmin(Pbnd)
indbnd = np.argmin(Pbnd**2)
plt.plot(Vmod[:indbnd]/V0, Pbnd[:indbnd],'-.',color=[.5,.5,.5])
plt.scatter(tbl['V']/V0,tbl['P'],c=tbl['T'], cmap=cmap)
plt.clim(clims)
plt.xlabel(r'$V$ / $V_0$')
plt.ylabel(r'Pressure [GPa]')
cbar = plt.colorbar(label='Temperature [K]')
cbar.set_ticks(Tlbl)
#plt.ylim(-2,15);
plt.plot(Vmod/V0,0*Vmod,'k-')
.91*12.95
```
| github_jupyter |
# Challenge 4: Convolutional Neural Networks
Create a Convolutional Neural Network (a deep learning architecture) to classify the gear data. The architecture or design should contain a mix of layers such as convolutional and pooling.
Train a model on the training dataset using the deided architecture. You may have to iterate on the architecture. Make sure the best trained model is saved to disk.
```
import numpy as np
np.random.seed(42)
%matplotlib inline
from sklearn import metrics
import seaborn as sn
import pandas as pd
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
def report(y_true, y_pred):
print("Accuracy score: ", metrics.accuracy_score(y_true, y_pred, normalize=True))
print(classification_report(y_true, y_pred))
labels = encoder.inverse_transform(np.unique(y_true))
df_cm = pd.DataFrame(
metrics.confusion_matrix(y_true, y_pred),
index=labels,
columns=labels
)
plt.figure(figsize = (10,7))
sn.heatmap(df_cm, annot=True)
plt.show()
```
## 1. Creating dataset
```
from os import walk, listdir
from os.path import isfile, join
import cv2
from sklearn.utils import shuffle
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
def load_dataset(folder):
X = []
y = []
paths = []
for (dir_path, _, _) in walk(folder):
label = dir_path.replace(folder, '').replace('\\', '').replace('/', '')
files = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
for file in files:
img = cv2.imread(join(dir_path, file))
X.append(img)
y.append(label)
paths.append(join(dir_path, file))
return np.array(X), np.array(y), np.array(paths)
X, y, paths = load_dataset("data/gear_images_preprocessed")
# Encode class values as integers
encoder = LabelEncoder()
encoder.fit(y)
y_encoded = encoder.transform(y)
y_dummy = np_utils.to_categorical(y_encoded)
print(encoder.inverse_transform(np.argmax(y_dummy[2020])))
#print(*X[1,1])
#print(*y_dummy[2121])
print("Train dataset shape:", X.shape, y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y_dummy, test_size=0.2, random_state=42, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
print("Train dataset shape:", X_train.shape, y_train.shape)
print("Test dataset shape:", X_test.shape, y_test.shape)
print("Val dataset shape:", X_val.shape, y_val.shape)
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Activation, Flatten, Dropout, Dense
from keras import backend as K
from keras import optimizers
from keras import losses
class BasicNet:
@staticmethod
def build(width, height, depth, classes):
model = Sequential()
input_shape = (height, width, depth)
model.add(Conv2D(32, (2, 2), input_shape=input_shape))
model.add(Conv2D(64, (2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(classes, activation='softmax'))
return model
epochs = 20
batch_size = 64
image_dims = (128, 128, 3)
num_classes = len(np.unique(y))
model = BasicNet.build(
width=image_dims[1], height=image_dims[0],
depth=image_dims[2], classes=num_classes
)
model.summary()
model.compile(
loss=losses.categorical_crossentropy,
optimizer=optimizers.Adadelta(),
metrics=['categorical_accuracy'],
)
history = model.fit(
X_train / 255, y_train,
batch_size=batch_size,
validation_data=(X_val / 255, y_val),
epochs=epochs,
verbose=1
)
# Calculate score
y_pred = model.predict(X_test / 255)
y_pred_flatten = np.argmax(y_pred, axis=1)
y_test_flatten = np.argmax(y_test, axis=1)
report(y_test_flatten, y_pred_flatten)
#from keras.models import load_model
import pickle
# Saving model
model.save('model_ch4.h5')
# Saving labels
with open("model_labels_ch4.dat", "wb") as f:
pickle.dump(encoder, f, pickle.HIGHEST_PROTOCOL)
from keras.models import load_model
m = load_model('model_ch4.h5')
m.summary()
path = "data/gear_images_preprocessed/pulleys/10308568_zm.jpg"
img = cv2.imread(path)
y_pred = m.predict([[img]])
print(encoder.inverse_transform(np.argmax(y_pred)))
```
| github_jupyter |
```
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Embedding, LSTM
from keras.layers import Conv1D, Flatten, MaxPooling1D, GlobalMaxPooling1D
from keras.preprocessing import sequence, text
import numpy as np
import os
import json
```
# Workaround on ValueError exception when loading pickle file
Since the curent version of numpy (1.16.4) sets `allow_pickle` to `False` by default, we need to overwrite this parameter to be able to load the dataset into memory.
We should, obviously, reset the default parameters later. See how this is done in the next cell:
```
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=vocabulary_size)
# restore np.load for future normal usage
np.load = np_load_old
# set parameters
vocabulary_size = 5000
max_len = 1000
batch_size = 32
embedding_dims= 25
filters = 16
kernel_size = 3
hidden_dims = 250
epochs = 10
# Transform the dataset
# tokenizer = text.Tokenizer(num_words=vocabulary_size)
# tokenizer.fit_on_texts(X_train)
# X_train = tokenizer.text_to_matrix(X_train)
# X_test = tokenizer.text_to_matrix(X_test)
X_train = sequence.pad_sequences(X_train, maxlen=max_len)
X_test = sequence.pad_sequences(X_test, maxlen=max_len)
# Prepare the model creating our own embedding with keras
model = Sequential()
# layer to map the vocab indices into embedding_dims dimensions
model.add(Embedding(vocabulary_size, embedding_dims, input_length=max_len))
model.add(Dropout(0.3))
# Add a Convolution1D to learn word group filters of size filter_length
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu'))
# we use max pooling:
model.add(MaxPooling1D())
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu'))
model.add(Flatten())
model.add(Dense(hidden_dims, activation='relu'))
model.add(Dropout(0.3))
# The output layer: positive or negative review
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_test, y_test))
# Prepare the model using Glove embedding
def load_glove_embeddings(src_path):
embeddings_index = dict()
filename = os.path.join(src_path, 'glove.6B.100d.txt')
with open(filename) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype = 'float32')
embeddings_index[word] = coefs
return embeddings_index
embeddings_index = load_glove_embeddings('../../../data/non_versioned')
embeddings_matrix = np.zeros((vocabulary_size, 100))
for word, index in tokenizer.word_index.items():
if index > vocabulaty_size - 1:
break
else:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
# Create the model
model = Sequential()
# Make this layer use the Glove embedding and do not update during training
model.add(Embedding(vocabulary_size, 100, input_length=max_len, weights=[embeddings_matrix], trainable=False))
model.add(Dropout(0.4))
# Add a Convolution1D to learn word group filters of size filter_length
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu'))
# we use max pooling:
model.add(MaxPooling1D())
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu'))
model.add(Flatten())
model.add(Dense(hidden_dims, activation='relu'))
model.add(Dropout(0.4))
# The output layer: positive or negative review
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_test, y_test))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/predatorx7/borrows/blob/master/pyai/5_A_Water_Jug.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>

Step 1: Fill 5 liter jug from 8 liter jug and put 3 liter water into 3 liter jug.
Step 2: Put 3 liter water from 3 liter jug into 8 liter jug. Put remaining 2 liter from 5 liter jug to 3 liter jug.
Step 3: Fill 5 liter water jug from 8 liter water jug. Since 3 liter water jug has 1 liter empty capacity, put 1 liter water from 5 liter jug into 3 liter water jug.
Step 4: Solved! Now put 3 liter water from 3 liter jug into 8 liter jug. Now 5 liter jug and 8 liter jug is holding 4 liter each.
```
# 3 water jugs capacity -> (x,y,z) where x>y>z
# initial state (12,0,0)
# final state (6,6,0)
capacity = (12,8,5)
# Maximum capacities of 3 jugs -> x,y,z
x = capacity[0]
y = capacity[1]
z = capacity[2]
# to mark visited states
memory = {}
# store solution path
ans = []
def get_all_states(state):
# Let the 3 jugs be called a,b,c
a = state[0]
b = state[1]
c = state[2]
if(a==6 and b==6):
ans.append(state)
return True
# if current state is already visited earlier
if((a,b,c) in memory):
return False
memory[(a,b,c)] = 1
#empty jug a
if(a>0):
#empty a into b
if(a+b<=y):
if( get_all_states((0,a+b,c)) ):
ans.append(state)
return True
else:
if( get_all_states((a-(y-b), y, c)) ):
ans.append(state)
return True
#empty a into c
if(a+c<=z):
if( get_all_states((0,b,a+c)) ):
ans.append(state)
return True
else:
if( get_all_states((a-(z-c), b, z)) ):
ans.append(state)
return True
#empty jug b
if(b>0):
#empty b into a
if(a+b<=x):
if( get_all_states((a+b, 0, c)) ):
ans.append(state)
return True
else:
if( get_all_states((x, b-(x-a), c)) ):
ans.append(state)
return True
#empty b into c
if(b+c<=z):
if( get_all_states((a, 0, b+c)) ):
ans.append(state)
return True
else:
if( get_all_states((a, b-(z-c), z)) ):
ans.append(state)
return True
#empty jug c
if(c>0):
#empty c into a
if(a+c<=x):
if( get_all_states((a+c, b, 0)) ):
ans.append(state)
return True
else:
if( get_all_states((x, b, c-(x-a))) ):
ans.append(state)
return True
#empty c into b
if(b+c<=y):
if( get_all_states((a, b+c, 0)) ):
ans.append(state)
return True
else:
if( get_all_states((a, y, c-(y-b))) ):
ans.append(state)
return True
return False
initial_state = (12,0,0)
print("Starting work...\n")
get_all_states(initial_state)
ans.reverse()
for i in ans:
print(i)
```

Logic:
(5 * 2) - (3 *2) = 4
Which means that we need to fill 5 liter jug 2 times and empty it into 3 liter water jug 2 times we will arrive with the result.
| github_jupyter |
<a href="https://colab.research.google.com/github/tjwei/NCTU_DeepLearning/blob/master/tf2_tutorial/02_tf2_Basics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install -U tensorflow-gpu
import tensorflow as tf
tf.__version__
matrix1 = tf.constant([[3., 3.]])
matrix2 = tf.constant([[2.],[2.]])
matrix1, matrix2
product = tf.matmul(matrix1, matrix2)
product
matrix1 @ matrix2
product + 3
w = tf.Variable(shape=(1, 2), initial_value=[[2., 1.]])
w
y = w @ [[1], [2]]
y
with tf.GradientTape() as tape:
y = w@[[1], [2]]
loss = (y - 3)**2
gradients = tape.gradient(loss, [w])
gradients
```
## MNIST Again
```
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = (x_train-127.5)/127.5
x_test = (x_test-127.5)/127.5
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.shuffle(10000).batch(32)
train_ds
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.shuffle(10000).batch(32)
from tensorflow.keras.layers import Dense, Flatten, Reshape
from tensorflow.keras.models import Model
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Create an instance of the model
model = MyModel()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
#@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
#@tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset the metrics for the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
```
| github_jupyter |
# Clean-Label Feature Collision Attacks on a PyTorch Classifier
In this notebook, we will learn how to use ART to run a clean-label feature collision poisoning attack on a neural network trained with PyTorch. We will be training our data on a subset of the CIFAR-10 dataset. The methods described are derived from [this paper](https://arxiv.org/abs/1804.00792) by Shafahi, Huang, et. al. 2018.
```
import os, sys
from os.path import abspath
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from art import config
from art.utils import load_dataset, get_file
from art.estimators.classification import PyTorchClassifier
from art.attacks.poisoning import FeatureCollisionAttack
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
np.random.seed(301)
(x_train, y_train), (x_test, y_test), min_, max_ = load_dataset('cifar10')
print(x_train.shape)
x_train = np.transpose(x_train, (0, 3, 1, 2)).astype(np.float32)
x_test = np.transpose(x_test, (0, 3, 1, 2)).astype(np.float32)
num_samples_train = 1000
num_samples_test = 1000
x_train = x_train[0:num_samples_train]
y_train = y_train[0:num_samples_train]
x_test = x_test[0:num_samples_test]
y_test = y_test[0:num_samples_test]
class_descr = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
print("shape of x_train",x_train.shape)
print("shape of y_train",y_train.shape)
!wget -c https://www.dropbox.com/s/ljkld6opyruvn5u/resnet18.pt?dl=0
```
## Load Model to be Attacked
In this example, we using a RESNET18 model pretrained on the CIFAR dataset.
```
# Model Definition and pretrained model pulled from:
# https://github.com/huyvnphan/PyTorch_CIFAR10
import torch
import torch.nn as nn
import os
__all__ = [
"ResNet",
"resnet18",
"resnet34",
"resnet50",
]
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=10,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
# CIFAR10: kernel_size 7 -> 3, stride 2 -> 1, padding 3->1
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False
)
# END
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.size(0), -1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, device, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
# Download the model state_dict from the link: and run your code
state_dict = torch.load(
'resnet18.pt?dl=0', map_location=device
)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, device="cpu", **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(
"resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, device, **kwargs
)
import torch.optim as optim
# Pretrained model
classifier_model = resnet18(pretrained=True)
classifier_model.eval() # for evaluation
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier_model.parameters(), lr=0.0001)
classifier = PyTorchClassifier(clip_values=(min_, max_), model=classifier_model,
preprocessing=((0.4914, 0.4822, 0.4465),(0.2471, 0.2435, 0.2616)),nb_classes=10,input_shape=(3,32,32),loss=criterion,
optimizer=optimizer)
```
## Choose Target Image from Test Set
```
target_class = "bird" # one of ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
target_label = np.zeros(len(class_descr))
target_label[class_descr.index(target_class)] = 1
target_instance = np.expand_dims(x_test[np.argmax(y_test, axis=1) == class_descr.index(target_class)][3], axis=0)
img_plot = np.transpose(target_instance[0],(1,2,0))
fig = plt.imshow(img_plot)
print("shape of target_instance",target_instance.shape)
print('true_class: ' + target_class)
print('predicted_class: ' + class_descr[np.argmax(classifier.predict(target_instance), axis=1)[0]])
feature_layer = classifier.layer_names[-2]
print(feature_layer)
```
## Poison Training Images to Misclassify Test
The attacker wants to make it such that whenever a prediction is made on this particular cat the output will be a horse.
```
base_class = "frog" # one of ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
base_idxs = np.argmax(y_test, axis=1) == class_descr.index(base_class)
base_instances = np.copy(x_test[base_idxs][:10])
base_labels = y_test[base_idxs][:10]
x_test_pred = np.argmax(classifier.predict(base_instances), axis=1)
nb_correct_pred = np.sum(x_test_pred == np.argmax(base_labels, axis=1))
print("New test data to be poisoned (10 images):")
print("Correctly classified: {}".format(nb_correct_pred))
print("Incorrectly classified: {}".format(10-nb_correct_pred))
plt.figure(figsize=(10,10))
for i in range(0, 9):
pred_label, true_label = class_descr[x_test_pred[i]], class_descr[np.argmax(base_labels[i])]
plt.subplot(330 + 1 + i)
fig=plt.imshow(np.transpose(base_instances[i],(1,2,0)))
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
fig.axes.text(0.5, -0.1, pred_label + " (" + true_label + ")", fontsize=12, transform=fig.axes.transAxes,
horizontalalignment='center')
```
The captions on the images can be read: `predicted label (true label)`
## Creating Poison Frogs
```
attack = FeatureCollisionAttack(classifier,
target_instance,
feature_layer,
max_iter=10,
similarity_coeff=256,
watermark=0.3,
learning_rate=1)
poison, poison_labels = attack.poison(base_instances)
poison_pred = np.argmax(classifier.predict(poison), axis=1)
plt.figure(figsize=(10,10))
for i in range(0, 9):
pred_label, true_label = class_descr[poison_pred[i]], class_descr[np.argmax(poison_labels[i])]
plt.subplot(330 + 1 + i)
fig=plt.imshow(np.transpose(poison[i],(1,2,0)))
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
fig.axes.text(0.5, -0.1, pred_label + " (" + true_label + ")", fontsize=12, transform=fig.axes.transAxes,
horizontalalignment='center')
```
Notice how the network classifies most of theses poison examples as frogs, and it's not incorrect to do so. The examples look mostly froggy. A slight watermark of the target instance is also added to push the poisons closer to the target class in feature space.
## Training with Poison Images
```
import torch.optim as optim
adv_train = np.vstack([x_train, poison])
adv_labels = np.vstack([y_train, poison_labels])
classifier_model.train()
classifier.fit(adv_train, adv_labels, nb_epochs=20, batch_size=4)
```
## Fooled Network Misclassifies Bird
```
fig = plt.imshow(np.transpose(target_instance[0],(1,2,0)))
print('true_class: ' + target_class)
print('predicted_class: ' + class_descr[np.argmax(classifier.predict(target_instance), axis=1)[0]])
```
These attacks allow adversaries who can poison your dataset the ability to mislabel any particular target instance of their choosing without manipulating labels.
| github_jupyter |
```
import joblib
import pandas as pd
from sklearn.datasets import load_breast_cancer, load_iris, load_boston
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import GridSearchCV
```
# Process Dataset Breast Cancer
```
data_breast_cancer = load_breast_cancer(as_frame=True)
data_breast_cancer = data_breast_cancer.frame
data_breast_cancer.to_csv('data_breast_cancer.csv', index=False)
X_breast_cancer = data_breast_cancer.loc[:, data_breast_cancer.columns != 'target']
y_breast_cancer = data_breast_cancer.target
grid = {'max_depth':[2,5], 'bootstrap': [True], 'oob_score': [True], 'max_samples': [0.8, 0.9]}
classifier = RandomForestClassifier(oob_score=True, random_state=42)
grid_classifier = GridSearchCV(classifier, grid, cv=5)
grid_classifier.fit(X_breast_cancer, y_breast_cancer)
rf = grid_classifier.best_estimator_
print('Parameters of best prediction model:')
print(grid_classifier.best_params_)
print('OOB accuracy of prediction model:')
print(rf.oob_score_)
filename_model = 'random_forest_breat_cancer.joblib'
joblib.dump(rf, open(filename_model, 'wb'))
```
# Process Dataset Iris
```
data_iris = load_iris(as_frame=True)
data_iris = data_iris.frame
data_iris.to_csv('data_iris.csv', index=False)
X_iris = data_iris.loc[:, data_iris.columns != 'target']
y_iris = data_iris.target
grid = {'n_estimators': [500],'max_features': [2],'max_depth': [2], 'bootstrap': [True], 'oob_score': [True]}
classifier = RandomForestClassifier(oob_score=True, random_state=42)
grid_classifier = GridSearchCV(classifier, grid, cv=5)
grid_classifier.fit(X_iris, y_iris)
rf = grid_classifier.best_estimator_
print('Parameters of best prediction model:')
print(grid_classifier.best_params_)
print('OOB accuracy of prediction model:')
print(rf.oob_score_)
filename_model = 'random_forest_iris.joblib'
joblib.dump(rf, open(filename_model, 'wb'))
```
# Process Dataset Boston
```
data = load_boston()
data_boston = pd.DataFrame(columns=data['feature_names'], index=range(data['data'].shape[0]))
data_boston.loc[:,:] = data['data']
data_boston['CHAS'] = data_boston['CHAS'].astype('category')
data_boston['target'] = data['target']
data_boston.to_csv('data_boston.csv', index=False)
X_boston = data_boston.loc[:, data_boston.columns != 'target']
y_boston = data_boston.target
grid = {'n_estimators': [500],'max_features': [2],'max_depth': [2], 'bootstrap': [True], 'oob_score': [True]}
classifier = RandomForestRegressor(oob_score=True, random_state=42)
grid_classifier = GridSearchCV(classifier, grid, cv=5)
grid_classifier.fit(X_boston, y_boston)
rf = grid_classifier.best_estimator_
print('Parameters of best prediction model:')
print(grid_classifier.best_params_)
print('OOB MSE of prediction model:')
print(rf.oob_score_)
filename_model = 'random_forest_boston.joblib'
joblib.dump(rf, open(filename_model, 'wb'))
```
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
# !git pull
import tensorflow as tf
import malaya_speech
import malaya_speech.train
from malaya_speech.train.model import fastspeech2
import numpy as np
_pad = 'pad'
_start = 'start'
_eos = 'eos'
_punctuation = "!'(),.:;? "
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
MALAYA_SPEECH_SYMBOLS = (
[_pad, _start, _eos] + list(_special) + list(_punctuation) + list(_letters)
)
input_ids = tf.placeholder(tf.int32, [None, None])
lens = tf.placeholder(tf.int32, [None, None])
mel_outputs = tf.placeholder(tf.float32, [None, None, 80])
mel_lengths = tf.placeholder(tf.int32, [None])
energies = tf.placeholder(tf.float32, [None, None])
energies_lengths = tf.placeholder(tf.int32, [None])
f0s = tf.placeholder(tf.float32, [None, None])
f0s_lengths = tf.placeholder(tf.int32, [None])
config = malaya_speech.config.fastspeech2_config
config = fastspeech2.Config(
vocab_size = len(MALAYA_SPEECH_SYMBOLS), **config
)
model = fastspeech2.Model(config)
r_training = model(input_ids, lens, f0s, energies, training = False)
speed_ratios = tf.placeholder(tf.float32, [None], name = 'speed_ratios')
f0_ratios = tf.placeholder(tf.float32, [None], name = 'f0_ratios')
energy_ratios = tf.placeholder(tf.float32, [None], name = 'energy_ratios')
r = model.inference(input_ids, speed_ratios, f0_ratios, energy_ratios)
r
decoder_output = tf.identity(r[0], name = 'decoder_output')
post_mel_outputs = tf.identity(r[1], name = 'post_mel_outputs')
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
path = 'fastspeech2-female'
ckpt_path = tf.train.latest_checkpoint(path)
ckpt_path
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
import re
from unidecode import unidecode
import malaya
normalizer = malaya.normalize.normalizer(date = False, time = False)
pad_to = 8
def tts_encode(string: str, add_eos: bool = True):
r = [MALAYA_SPEECH_SYMBOLS.index(c) for c in string if c in MALAYA_SPEECH_SYMBOLS]
if add_eos:
r = r + [MALAYA_SPEECH_SYMBOLS.index('eos')]
return r
def put_spacing_num(string):
string = re.sub('[A-Za-z]+', lambda ele: ' ' + ele[0] + ' ', string)
return re.sub(r'[ ]+', ' ', string).strip()
def convert_to_ascii(string):
return unidecode(string)
def collapse_whitespace(string):
return re.sub(_whitespace_re, ' ', string)
def cleaning(string, normalize = True, add_eos = False):
sequence = []
string = convert_to_ascii(string)
if string[-1] in '-,':
string = string[:-1]
if string[-1] not in '.,?!':
string = string + '.'
string = string.replace('&', ' dan ')
string = string.replace(':', ',').replace(';', ',')
if normalize:
t = normalizer._tokenizer(string)
for i in range(len(t)):
if t[i] == '-':
t[i] = ','
string = ' '.join(t)
string = normalizer.normalize(string,
check_english = False,
normalize_entity = False,
normalize_text = False,
normalize_url = True,
normalize_email = True,
normalize_year = True)
string = string['normalize']
else:
string = string
string = put_spacing_num(string)
string = re.sub(r'[ ]+', ' ', string).strip()
string = string.lower()
ids = tts_encode(string, add_eos = add_eos)
text_input = np.array(ids)
num_pad = pad_to - ((len(text_input) + 2) % pad_to)
text_input = np.pad(
text_input, ((1, 1)), 'constant', constant_values = ((1, 2))
)
text_input = np.pad(
text_input, ((0, num_pad)), 'constant', constant_values = 0
)
return string, text_input
import matplotlib.pyplot as plt
# https://umno-online.my/2020/12/28/isu-kartel-daging-haram-lagi-pihak-gesa-kerajaan-ambil-tindakan-tegas-drastik/
t, ids = cleaning('Pergerakan Pemuda UMNO Negeri Perak mendesak agar tindakan drastik dan tegas diambil dalam kadar segera bersesuaian dengan enam saranan yang telah dibentangkan oleh Pergerakan Pemuda UMNO sebelum ini berhubung isu kartel daging haram.', add_eos = False)
t, ids
%%time
o = sess.run([decoder_output, post_mel_outputs], feed_dict = {input_ids: [ids],
speed_ratios: [1.0],
f0_ratios: [1.0],
energy_ratios: [1.0]})
o[1].shape
mel_outputs_ = np.reshape(o[1], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
mel_outputs_ = np.reshape(o[0], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
import pickle
with open('a.pkl', 'wb') as fopen:
pickle.dump([np.reshape(o[0], [-1, 80]), np.reshape(o[1], [-1, 80])], fopen)
saver = tf.train.Saver()
saver.save(sess, 'fastspeech2-female-output/model.ckpt')
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'gather' in n.op.lower()
or 'Placeholder' in n.name
or 'ratios' in n.name
or 'post_mel_outputs' in n.name
or 'decoder_output' in n.name
or 'alignment_histories' in n.name)
and 'adam' not in n.name
and 'global_step' not in n.name
and 'Assign' not in n.name
and 'ReadVariableOp' not in n.name
and 'Gather' not in n.name
and 'IsVariableInitialized' not in n.name
]
)
strings.split(',')
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('fastspeech2-female-output', strings)
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph('fastspeech2-female-output/frozen_model.pb')
test_sess = tf.InteractiveSession(graph = g)
X = g.get_tensor_by_name('import/Placeholder:0')
speed_ratios = g.get_tensor_by_name('import/speed_ratios:0')
f0_ratios = g.get_tensor_by_name('import/f0_ratios:0')
energy_ratios = g.get_tensor_by_name('import/energy_ratios:0')
output_nodes = ['decoder_output', 'post_mel_outputs']
outputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in output_nodes}
%%time
o = test_sess.run(outputs, feed_dict = {X: [ids],
speed_ratios: [1.0],
f0_ratios: [1.0],
energy_ratios: [1.0]})
mel_outputs_ = np.reshape(o['decoder_output'], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
mel_outputs_ = np.reshape(o['post_mel_outputs'], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
from tensorflow.tools.graph_transforms import TransformGraph
transforms = ['add_default_attributes',
'remove_nodes(op=Identity, op=CheckNumerics)',
'fold_batch_norms',
'fold_old_batch_norms',
'quantize_weights(fallback_min=-1024, fallback_max=1024)',
'strip_unused_nodes',
'sort_by_execution_order']
pb = 'fastspeech2-female-output/frozen_model.pb'
input_graph_def = tf.GraphDef()
with tf.gfile.FastGFile(pb, 'rb') as f:
input_graph_def.ParseFromString(f.read())
transformed_graph_def = TransformGraph(input_graph_def,
['Placeholder', 'speed_ratios', 'f0_ratios', 'energy_ratios'],
output_nodes, transforms)
with tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
g = load_graph('fastspeech2-female-output/frozen_model.pb.quantized')
test_sess = tf.InteractiveSession(graph = g)
X = g.get_tensor_by_name(f'import/Placeholder:0')
speed_ratios = g.get_tensor_by_name('import/speed_ratios:0')
f0_ratios = g.get_tensor_by_name('import/f0_ratios:0')
energy_ratios = g.get_tensor_by_name('import/energy_ratios:0')
outputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in output_nodes}
%%time
o = test_sess.run(outputs, feed_dict = {X: [ids],
speed_ratios: [1.0],
f0_ratios: [1.0],
energy_ratios: [1.0]})
mel_outputs_ = np.reshape(o['decoder_output'], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
mel_outputs_ = np.reshape(o['post_mel_outputs'], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
b2_application_key_id = os.environ['b2_application_key_id']
b2_application_key = os.environ['b2_application_key']
from b2sdk.v1 import *
info = InMemoryAccountInfo()
b2_api = B2Api(info)
application_key_id = b2_application_key_id
application_key = b2_application_key
b2_api.authorize_account("production", application_key_id, application_key)
file_info = {'how': 'good-file'}
b2_bucket = b2_api.get_bucket_by_name('malaya-speech-model')
file = 'fastspeech2-female-output/frozen_model.pb'
outPutname = 'v2/tts/fastspeech2-female.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = 'fastspeech2-female-output/frozen_model.pb.quantized'
outPutname = 'v2/tts/fastspeech2-female.pb.quantized'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = '../speech-bahasa/female-stats-v3/stats.npy'
outPutname = 'v2/vocoder-stats/female.npy'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
```
| github_jupyter |
## Dependencies
```
import json, glob
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts_aux import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras import layers
from tensorflow.keras.models import Model
```
# Load data
```
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
```
# Model parameters
```
input_base_path = '/kaggle/input/189-tweet-train-5fold-roberta-top-public/'
with open(input_base_path + 'config.json') as json_file:
config = json.load(json_file)
config
vocab_path = input_base_path + 'vocab.json'
merges_path = input_base_path + 'merges.txt'
base_path = '/kaggle/input/qa-transformers/roberta/'
# vocab_path = base_path + 'roberta-base-vocab.json'
# merges_path = base_path + 'roberta-base-merges.txt'
config['base_model_path'] = base_path + 'roberta-base-tf_model.h5'
config['config_path'] = base_path + 'roberta-base-config.json'
model_path_list = glob.glob(input_base_path + 'model' + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = '\n')
```
# Tokenizer
```
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path,
lowercase=True, add_prefix_space=True)
```
# Pre process
```
test['text'].fillna('', inplace=True)
test['text'] = test['text'].apply(lambda x: x.lower())
test['text'] = test['text'].apply(lambda x: x.strip())
x_test, x_test_aux, x_test_aux_2 = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test)
```
# Model
```
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name='base_model')
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
x = layers.Dropout(.1)(last_hidden_state)
x_start = layers.Dropout(0.1)(x)
x_start = layers.Conv1D(768, 2, padding='same')(x_start)
x_start = layers.LeakyReLU()(x_start)
x_start = layers.Conv1D(64, 2, padding='same')(x_start)
x_start = layers.Dense(1)(x_start)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('softmax', name='y_start')(x_start)
x_end = layers.Dropout(0.1)(x)
x_end = layers.Conv1D(768, 2, padding='same')(x_end)
x_end = layers.LeakyReLU()(x_end)
x_end = layers.Conv1D(64, 2, padding='same')(x_end)
x_end = layers.Dense(1)(x_end)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('softmax', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
return model
```
# Make predictions
```
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
for model_path in model_path_list:
print(model_path)
model = model_fn(config['MAX_LEN'])
model.load_weights(model_path)
test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE']))
test_start_preds += test_preds[0]
test_end_preds += test_preds[1]
```
# Post process
```
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['text_len'] = test['text'].apply(lambda x : len(x))
test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' ')))
test['end'].clip(0, test['text_len'], inplace=True)
test['start'].clip(0, test['end'], inplace=True)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
test['selected_text'] = test.apply(lambda x: x['text'] if (x['selected_text'] == '') else x['selected_text'], axis=1)
test['selected_text'].fillna(test['text'], inplace=True)
```
# Visualize predictions
```
display(test.head(10))
```
# Test set predictions
```
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test['selected_text']
submission.to_csv('submission.csv', index=False)
submission.head(10)
```
| github_jupyter |
# Pooled Classification
A common workflow with longitudinal spatial data is to apply the same classification scheme to an attribute over different time periods. More specifically, one would like to keep the class breaks the same over each period and examine how the mass of the distribution changes over these classes in the different periods.
The `Pooled` classifier supports this workflow.
```
import numpy as np
import mapclassify as mc
```
## Sample Data
We construct a synthetic dataset composed of 20 cross-sectional units at three time points. Here the mean of the series is increasing over time.
```
n = 20
data = np.array([np.arange(n)+i*n for i in range(1,4)]).T
data.shape
data
```
## Default: Quintiles
The default is to apply a [vec](https://en.wikipedia.org/wiki/Vectorization_(mathematics)) operator to the data matrix and treat the observations as a single collection. Here the quantiles of the pooled data are obtained.
```
res = mc.Pooled(data)
res
```
Note that the class definitions are constant across the periods.
```
res = mc.Pooled(data, k=4)
res.col_classifiers[0].counts
res.col_classifiers[-1].counts
res.global_classifier.counts
res
```
Extract the pooled classification objects for each column
```
c0, c1, c2 = res.col_classifiers
c0
```
Compare to the unrestricted classifier for the first column
```
mc.Quantiles(c0.y, k=4)
```
and the last column comparisions
```
c2
mc.Quantiles(c2.y, k=4)
```
## Non-default classifier: BoxPlot
```
res = mc.Pooled(data, classifier='BoxPlot', hinge=1.5)
res
res.col_classifiers[0].bins
c0, c1, c2 = res.col_classifiers
c0.yb
c00 = mc.BoxPlot(c0.y, hinge=3)
c00.yb
c00
c0
```
## Non-default classifier: FisherJenks
```
res = mc.Pooled(data, classifier='FisherJenks', k=5)
res
c0, c1, c2 = res.col_classifiers
mc.FisherJenks(c0.y, k=5)
```
## Non-default classifier: MaximumBreaks
```
data[1, 0] = 10
data[1, 1] = 10
data[1, 2] = 10
data[9, 2] = 10
data
res = mc.Pooled(data, classifier='MaximumBreaks', k=5)
res
c0, c1, c2 = res.col_classifiers
c0
mc.MaximumBreaks(c0.y, k=5)
res = mc.Pooled(data, classifier='UserDefined', bins=mc.Quantiles(data[:,-1]).bins)
res
mc.Quantiles(data[:,-1])
data[:,-1]
```
## Pinning the pooling
Another option is to specify a specific subperiod as the definition for the classes in the pooling.
### Pinning to the last period
As an example, we can use the quintles from the third period to defined the pooled classifier:
```
pinned = mc.Pooled(data, classifier='UserDefined', bins=mc.Quantiles(data[:,-1]).bins)
pinned
pinned.global_classifier
```
### Pinning to the first period
```
pinned = mc.Pooled(data, classifier='UserDefined', bins=mc.Quantiles(data[:,0]).bins)
pinned
```
Note that the quintiles for the first period, by definition, contain all the values from that period, they do not bound the larger values in subsequent period. Following the [mapclassify policy](https://github.com/pysal/mapclassify/blob/master/mapclassify/classifiers.py#L569), an additional class is added to contain all values in the pooled series.
| github_jupyter |
# Determining the proton content with a quantum computer
Code at: https://github.com/qiboteam/qibo/tree/master/examples/qPDF.
In this tutorial we show how to use the `qPDF` model implemented in Qibo to create a set of Parton Distribution Functions (PDFs), parameterized by a variational quantum circuit. In the context of High Energy Physics, parton distribution functions estimate the momentum fraction of the proton carried by partons i.e. quarks, antiquarks and gluon. Here we simulate a quantum computer to encode within a circuit the data from these PDFs in such a way that, if we measure the output of the aforementioned quantum circuit, we obtain the corresponding PDFs values.
In order to accomplish our goal, we use a Variational Quantum Circuit (VQC):

### Circuit
We consider two different Ansätze. Those Ansätze depend on tunable parameters and a variable $x$ that also serves as the independent variables for the PDF $f_i(x, Q)$, where $Q$ is fixed.
The first one is the Ansatz _Weighted_. Its basic single qubit gate is
$$
U_w (\alpha, x) = R_z(\alpha_3 \log(x) + \alpha_4) R_y(\alpha_1 x + \alpha_2).
$$
The second Ansatz is the fourier one, whose basic single-qubit gate is, on demand
$$
U_f(\alpha, x) = R_y(\alpha_4)R_z(\alpha_3)R_y(-\pi/2 \log x)R_y(\alpha_2)R_z(\alpha_1)R_y(\pi x)
$$
Both Ansätze have a layered structure with entangling gates among different qubits depicted in the following circuit

The Ansatz is constructed in one qubit per parton. We only let one and all flavours $(s, \bar s, c, u, \bar u, d, \bar d, g)$ to be optimized simultaneously, thus only circuits with one and eight qubits are available.
### Cost function
The cost function driving the optimization process of this circuit is defined through several pieces. First, we need a Hamiltonian to measure. We choose a different hamiltonian for every parton, namely
$$
Z_i = \bigotimes_{j=0}^{n} Z^{\delta_{ij}}.
$$
This family of hamiltonians allows for the definition of their expected values, depending on $\theta$ and $x$
$$
z_i (\theta, x) = \langle \psi(\theta, x) | Z_i | \psi(\theta, x) \rangle.
$$
The relation between the $z(\theta, x)$ quantities and PDFs is
$$
f_i (x, Q_0) = \frac{1 - z_i(\theta, x)}{1 + z_i (\theta, x)}.
$$
Using this definition, we can just use the usual Pearson's chi-squared quantity
$$
\chi^2 = \frac{1}{N}\sum_{i=1}^N \int_{x\in[0, 1]} dx \frac{\left( f_i (x, \theta) - \frac{1 - z(x, \theta)}{1 + z(x, \theta)}\right)^2}{\sigma^2}.
$$
This is the loss function for our minimization procedure.
## Code
First, we must decide the variables for our problem. The meaning of them are
- `ansatz`: Which one is chosen, *Weighted* or *Fourier*.
- `multi_output`: If *True*, all partons are fitted in the same circuit.
- `parton`: which parton is to be fit. Ignored if `multi_output = True`.
- `mode`: if *full*, data is fitted for $x \in [10^{-4}, 1]$, if *partial* only large $x$ is considered.
- `layers`: number of layers.
### Create a qPDF model
```
# import requirements
import numpy as np
from qibo.models.hep import qPDF
# our setup
ansatz = 'Weighted'
multi_output = True
parton = '8flavours' # or gluon
mode = 'full' # or partial
layers = 3
```
Extract reference data and auxiliary variables. These cell controls the importing of different sets of data.
```
# Read input data
def load_data_and_setup():
if multi_output:
data_file = f'data/{mode}/8flavours.dat'
nqubits = 8
else:
data_file = f'data/{mode}/{parton}.dat'
nqubits = 1
return np.loadtxt(data_file), nqubits
# load data
data, nqubits = load_data_and_setup()
# load qPDF model
mypdf = qPDF(ansatz, layers, nqubits, multi_output=multi_output)
```
Now we define a way to compute the loss function
$$
\chi^2 = \frac{1}{N}\sum_{i=1}^N\sum_{j} \frac{\left( f_i (x_j, \theta) - \frac{1 - z(x_j, \theta)}{1 + z(x_j, \theta)}\right)^2}{\sigma^2}
$$
is defined. For multi-flavour fits, a mean is considered.
```
# Define loss function
def loss(params):
"""Compute loss for a given set of parameters.
Args:
parameters (np.array): the list of parameters for the gates.
Returns:
The loss function.
"""
xtrain = data[:, 0]
if multi_output:
cf = 0
i = 1
for ypred in mypdf.predict(params, xtrain).transpose():
ytrain = data[:, i]
ysigma = data[:, i + 1]
cf += np.mean(np.square(ytrain - ypred) / ysigma ** 2)
i += 2
cf /= 8
else:
ytrain = data[:, 1]
ysigma = data[:, 2]
ypred = mypdf.predict(params, xtrain).flatten()
cf = np.mean(np.square(ytrain - ypred) / ysigma ** 2)
return cf
```
Optimization procedure extracted from standard techniques must be used to look for the optimal configuration of $\theta$ parameters. In this case, we will implement optimizers from `scipy` and a genetic one.
```python
# Optimizing
from qibo.optimizers import optimize
np.random.seed(10)
params = np.random.rand(mypdf.nparams)
_, params, _ = optimize(loss, params, method='cma')
```
The optimization may be costly in some cases. In order to save time, we provide some precomputed results that will let you to see the performance of this algorithm in several circumstances. Precomputed results include the ones detailed in the corresponding paper.
```
# For taking old results
import pickle
with open(f'results/{mode}/{parton}/{ansatz}_{nqubits}_q_{layers}_l_result.pkl', 'rb') as f:
results = pickle.load(f)
params = results['x']
```
Let us now take a look at the results! These graphs compare the reference data (black) and the current optimized fit (blue).
```
# Auxiliary plotting function
import matplotlib.pyplot as plt
def plot_PDF(params, chi2):
if multi_output:
fig, axs = plt.subplots(2, 4, figsize=(13, 9), sharex=True, sharey=True)
i = 1
partons = ['sbar', 'ubar', 'dbar', 'gluon', 'd', 'u', 's', 'c']
partons_name = [r'$\bar s$', r'$\bar u$', r'$\bar d$', r'$g$', r'$d$', r'$u$', r'$s$', r'$c$']
xtrain = data[:, 0]
for ax, yprediction in zip(axs.flatten(), mypdf.predict(params, xtrain).transpose()):
ytrain = data[:, i].copy()
ysigma = data[:, i + 1].copy()
if i == 7:
ax.set(title=partons_name[(i - 1) // 2] + ' / 3', xscale='log')
yprediction /= 3
ytrain /= 3
ysigma /= 3
elif i == 15:
ax.set(title=partons_name[(i - 1) // 2] + r' $\times$ 10', xscale='log')
yprediction *= 10
ytrain *= 10
ysigma *= 10
else:
ax.set(title=partons_name[(i - 1) // 2], xscale='log')
if (i - 1) // 2 % 4 == 0:
ax.set(ylabel='PDF')
if (i - 1) // 2 > 3:
ax.set(xlabel='x')
ax.plot(xtrain, ytrain, label='Classical PDF', color='black')
ax.fill_between(xtrain, ytrain + ysigma, ytrain - ysigma, alpha=0.3, color='black')
ax.plot(xtrain, yprediction.flatten(), label=r'qPDF model', color='orange', linewidth=2, zorder=10)
ax.set(ylim=[-0.05, 1])
i += 2
ax.grid(True)
fig.suptitle(f'$\chi^2 = $ {chi2:.4f}')
plt.legend()
else:
fig, ax = plt.subplots(figsize = (8, 6))
ax.set(title=f'$\chi^2 = $ {chi2:.2f}', xlabel='x', ylabel='PDF',
xscale='log')
xtrain = data[:, 0]
ytrain = data[:, 1]
ysigma = data[:, 2]
yprediction = mypdf.predict(params, xtrain).flatten()
ax.plot(xtrain, ytrain, label='Classical '+ parton + ' PDF', color='black')
ax.fill_between(xtrain, ytrain + ysigma, ytrain - ysigma, alpha=0.3, color='black')
ax.plot(xtrain, yprediction.flatten(), label=r'Quantum PDF model', zorder=10)
ax.legend()
```
## Plot results
```
plot_PDF(params, chi2=loss(params))
```
| github_jupyter |
> Copyright 2020 DeepMind Technologies Limited.
>
> Licensed under the Apache License, Version 2.0 (the "License");
> you may not use this file except in compliance with the License.
>
> You may obtain a copy of the License at
> https://www.apache.org/licenses/LICENSE-2.0
>
> Unless required by applicable law or agreed to in writing, software
> distributed under the License is distributed on an "AS IS" BASIS,
> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
> See the License for the specific language governing permissions and
> limitations under the License.
# **Tutorial: Causal Reasoning in Probability Trees**
*By the AGI Safety Analysis Team @ DeepMind.*
**Summary:** This is the companion tutorial for the paper "Algorithms
for Causal Reasoning in Probability trees" by Genewein T. et al. (2020).
Probability trees are one of the simplest models of causal
generative processes.They possess clean semantics and are strictly more general
than causal Bayesian networks, being able to e.g. represent causal relations
that causal Bayesian networks can’t. Even so, they have received little
attention from the AI and ML community.
In this tutorial we present new algorithms for causal reasoning in discrete
probability trees that cover the entire causal hierarchy (association,
intervention, and counterfactuals), operating on arbitrary logical and causal
events.
# Part I: Basics
### Setup
First we install the `graphviz` package:
```
!apt-get install graphviz
!pip install graphviz
```
### Imports and data structures
We import Numpy and Pyplot, and then we define the basic data structures for
this tutorial.
```
#@title Imports
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
#@title Data structures
import graphviz
import copy
from random import random
class MinCut:
"""A representation of an event in a probability tree."""
def __init__(self, root, t=frozenset(), f=frozenset()):
self._root = root
self.t = t
self.f = f
def __str__(self):
true_elements = ', '.join([str(id) for id in sorted(self.t)])
false_elements = ', '.join([str(id) for id in sorted(self.f)])
return '{true: {' + true_elements + '}, false: {' + false_elements + '}}'
def __reptr__(self):
return self.__str__()
# Proposition
def prop(root, statement):
cond_lst = Node._parse_statements(statement)
# Complain if more than one statement.
if len(cond_lst) != 1:
raise Exception('\'prop\' takes one and only one statement.')
return None
# Remove list envelope.
cond = cond_lst[0]
# Recurse.
return MinCut._prop(root, root, cond)
def _prop(root, node, cond):
# Take var and val of condition.
condvar, condval = cond
# Search for variable.
for var, val in node.assign:
if condvar == var:
if condval == val:
return MinCut(root, frozenset([node.id]), frozenset())
else:
return MinCut(root, frozenset(), frozenset([node.id]))
# If we reach a leaf node and the variable isn't resolved,
# raise an exception.
if not node.children:
msg = 'Node ' + str(node.id) + ': ' \
+ 'min-cut for condition "' + condvar + ' = ' \
+ condval + '" is undefined.'
raise Exception(msg)
# Variable not found, recurse.
t_set = frozenset()
f_set = frozenset()
for child in node.children:
_, subnode = child
subcut = MinCut._prop(root, subnode, cond)
t_set = t_set.union(subcut.t)
f_set = f_set.union(subcut.f)
# Consolidate into node if children are either only true or false nodes.
cut = MinCut(root, t_set, f_set)
if not cut.f:
cut.t = frozenset([node.id])
elif not cut.t:
cut.f = frozenset([node.id])
return cut
# Negation
def neg(self):
return MinCut(self._root, t=self.f, f=self.t)
def __invert__(self):
return self.neg()
# Conjunction
def conj(root, cut1, cut2):
return MinCut._conj(root, root, cut1, cut2, False, False)
def _conj(root, node, cut1, cut2, end1=False, end2=False):
# Base case.
if (node.id in cut1.f) or (node.id in cut2.f):
return MinCut(root, frozenset(), frozenset([node.id]))
if node.id in cut1.t:
end1 = True
if node.id in cut2.t:
end2 = True
if end1 and end2:
return MinCut(root, frozenset([node.id]), frozenset())
# Recurse.
t_set = frozenset()
f_set = frozenset()
for _, subnode in node.children:
subcut = MinCut._conj(root, subnode, cut1, cut2, end1, end2)
t_set = t_set.union(subcut.t)
f_set = f_set.union(subcut.f)
# Consolidate into node if children are either only true or false nodes.
cut = MinCut(root, t_set, f_set)
if not cut.f:
cut.t = frozenset([node.id])
elif not cut.t:
cut.f = frozenset([node.id])
return cut
def __and__(self, operand):
return MinCut.conj(self._root, self, operand)
# Disjunction
def disj(root, cut1, cut2):
return MinCut.neg(MinCut.conj(root, MinCut.neg(cut1), MinCut.neg(cut2)))
def __or__(self, operand):
return MinCut.disj(self._root, self, operand)
# Causal dependence
def precedes(root, cut_c, cut_e):
return MinCut._precedes(root, root, cut_c, cut_e, False)
def _precedes(root, node, cut_c, cut_e, found_c):
# Base case.
if not found_c:
if (node.id in cut_e.t or node.id in cut_e.f or node.id in cut_c.f):
return MinCut(root, frozenset(), frozenset([node.id]))
if (node.id in cut_c.t):
found_c = True
if found_c:
if (node.id in cut_e.t):
return MinCut(root, frozenset([node.id]), frozenset())
if (node.id in cut_e.f):
return MinCut(root, frozenset(), frozenset([node.id]))
# Recursion.
t_set = frozenset()
f_set = frozenset()
for _, subnode in node.children:
subcut = MinCut._precedes(root, subnode, cut_c, cut_e, found_c)
t_set = t_set.union(subcut.t)
f_set = f_set.union(subcut.f)
# Consolidate into node if children are either only true or false nodes.
cut = MinCut(root, t_set, f_set)
if not cut.f:
cut.t = frozenset([node.id])
elif not cut.t:
cut.f = frozenset([node.id])
return cut
def __lt__(self, operand):
return MinCut.precedes(self._root, self, operand)
class Critical:
"""A representation of the critical set associated to an event."""
# Constructor
def __init__(self, s=frozenset()):
self.s = s
def __str__(self):
elements = ', '.join([str(id) for id in sorted(self.s)])
return '{' + elements + '}'
def __reptr__(self):
return self.__str__()
def critical(root, cut):
_, crit = Critical._critical(root, cut)
return crit
def _critical(node, cut):
# Base case.
if node.id in cut.t:
return (False, Critical(frozenset()))
if node.id in cut.f:
return (True, Critical(frozenset()))
# Recurse.
s = frozenset()
for _, subnode in node.children:
incut, subcrit = Critical._critical(subnode, cut)
if incut:
s = s.union(frozenset([node.id]))
else:
s = s.union(subcrit.s)
return (False, Critical(s))
class Node:
"""A node in probability tree."""
# Constructor.
def __init__(self, uid, statements, children=None):
# Automatically assigned ID.
self.id = uid
# Assignments.
if isinstance(statements, str):
self.assign = Node._parse_statements(statements)
else:
self.assign = statements
# Children.
if children is None:
self.children = []
else:
self.children = children
# Parse statements.
def _parse_statements(statements):
statement_list = statements.split(',')
pair_list = [x.split('=') for x in statement_list]
assign = [(var.strip(), val.strip()) for var, val in pair_list]
return assign
# Sample.
def sample(self):
return self._sample(dict())
def _sample(self, smp):
# Add new assignments.
newsmp = {var: val for var, val in self.assign}
smp = dict(smp, **newsmp)
# Base case.
if not self.children:
return smp
# Recurse.
rnum = random()
for child in self.children:
subprob, subnode = child
rnum -= subprob
if rnum <= 0:
return subnode._sample(smp)
# Something went wrong: probabilities aren't normalized.
msg = 'Node ' + str(self.id) + ': ' \
+ 'probabilities of transitions do not add up to one.'
raise Exception(msg)
# Insert.
def insert(self, prob, node):
self.children.append((prob, node))
# Compute probability of cut.
def prob(self, cut):
return self._prob(cut, 1.0)
def _prob(self, cut, prob):
# Base case.
if self.id in cut.t:
return prob
if self.id in cut.f:
return 0.0
# Recurse.
probsum = 0.0
for child in self.children:
subprob, subnode = child
resprob = subnode._prob(cut, prob * subprob)
probsum += resprob
return probsum
# Return a dictionary with all the random variables and their values.
def rvs(self):
sts = dict()
return self._rvs(sts)
def _rvs(self, sts):
for var, val in self.assign:
if not (var in sts):
sts[var] = list()
if not (val in sts[var]):
sts[var].append(val)
for _, subnode in self.children:
sts = subnode._rvs(sts)
return sts
# Auxiliary function for computing the list of children.
def _normalize_children(children, probsum, logsum):
newchildren = None
if probsum > 0.0:
newchildren = [
(subprob / probsum, subnode) for _, subprob, subnode in children
]
else:
newchildren = [
(sublog / logsum, subnode) for sublog, _, subnode in children
]
return newchildren
# Conditioning
def see(self, cut):
root = copy.deepcopy(self)
root._see(cut, 1.0)
return root
def _see(self, cut, prob):
# Base case.
if self.id in cut.t:
newnode = Node(self.id, self.assign)
return (1.0, prob)
if self.id in cut.f:
newnode = Node(self.id, self.assign)
return (0.0, 0.0)
# Recurse.
newchildren = []
probsum = 0.0
logsum = 0.0
for subprob, subnode in self.children:
reslog, resprob = subnode._see(cut, prob * subprob)
newchildren.append((reslog, resprob, subnode))
logsum += reslog
probsum += resprob
# Normalize.
self.children = Node._normalize_children(newchildren, probsum, logsum)
return (1.0, probsum)
# Causal intervention
def do(self, cut):
root = copy.deepcopy(self)
root._do(cut)
return root
def _do(self, cut):
# Base case.
if self.id in cut.t:
return True
if self.id in cut.f:
return False
# Recurse.
newchildren = []
probsum = 0.0
logsum = 0.0
for subprob, subnode in self.children:
resdo = subnode._do(cut)
if resdo:
newchildren.append((1.0, subprob, subnode))
probsum += subprob
logsum += 1.0
else:
newchildren.append((0.0, 0.0, subnode))
# Normalize.
self.children = Node._normalize_children(newchildren, probsum, logsum)
return (1.0, probsum)
# Counterfactual/subjunctive conditional
def cf(self, root_prem, cut_subj):
root_subj = self.do(cut_subj)
root_subj._cf(root_prem, cut_subj)
return root_subj
def _cf(self, prem, cut):
# Base case.
if self.id in cut.t:
return True
if self.id in cut.f:
return False
# Recurse.
critical = False
for child, child_prem in zip(self.children, prem.children):
(_, subnode) = child
(_, subnode_prem) = child_prem
in_do = subnode._cf(subnode_prem, cut)
if not in_do:
critical = True
continue
# Pick children if node is critical.
if not critical:
self.children = [
(subprob, subnode)
for (_, subnode), (subprob, _) in zip(self.children, prem.children)
]
return True
# Show probability tree.
def show(self, show_id=False, show_prob=False, cut=None, crit=None):
# Initialize Digraph.
graph_attr = {
'bgcolor': 'White',
'rankdir': 'LR',
'nodesep': '0.1',
'ranksep': '0.3',
'sep': '0'
}
node_attr = {
'style': 'rounded',
'shape': 'box',
'height': '0.1',
'width': '0.5',
'fontsize': '10',
'margin': '0.1, 0.02'
}
edge_attr = {'fontsize': '10'}
g = graphviz.Digraph(
'g',
format='svg',
graph_attr=graph_attr,
node_attr=node_attr,
edge_attr=edge_attr)
# Recursion.
return self._show(
g, 1.0, show_id=show_id, show_prob=show_prob, cut=cut, crit=crit)
def _show(self, g, prob, show_id=False, show_prob=False, cut=None, crit=None):
# Create label.
labels = [name + ' = ' + value for name, value in self.assign]
node_label = '\n'.join(labels)
if show_id:
node_label = str(self.id) + '\n' + node_label
if show_prob:
node_label = node_label + '\np = ' + '{0:.3g}'.format(prob)
# Decorate node.
attr = {'style': 'filled, rounded', 'fillcolor': 'WhiteSmoke'}
if not (cut is None):
if self.id in cut.t:
attr = {'style': 'filled, rounded', 'fillcolor': 'AquaMarine'}
elif self.id in cut.f:
attr = {'style': 'filled, rounded', 'fillcolor': 'LightCoral'}
if not (crit is None):
if self.id in crit.s:
attr = {'style': 'filled, rounded', 'fillcolor': 'Plum'}
g.node(str(self.id), label=node_label, **attr)
# Recurse.
for child in self.children:
subprob, subnode = child
subnode._show(
g,
prob * subprob,
show_id=show_id,
show_prob=show_prob,
cut=cut,
crit=crit)
g.edge(str(self.id), str(subnode.id), label='{0:.3g}'.format(subprob))
return g
def find(self, uid):
if self.id == uid:
return self
for child in self.children:
subprob, subnode = child
found_node = subnode.find(uid)
if found_node is not None:
return found_node
return None
class PTree:
"""A probability tree."""
def __init__(self):
"""Create a probability tree."""
self._root = None
self._count = 0
def root(self, statements, children=None):
"""Sets the root node.
Parameters
----------
statements : str
A string containing a comma-separated list of statements of
the form "var = val", such as "X=1, Y=0". These are the
values resolved by the root node.
children : list((float, Node)), (default: None)
A list of (probability, child node) pairs. These are the root
node's children and their transition probabilities.
Returns
-------
Node
the root node of the probability tree.
"""
self._count += 1
self._root = Node(self._count, statements, children)
return self._root
def child(self, prob, statements, children=None):
"""Create a child node and its transition probability.
Parameters
----------
prob : float
The probability of the transition
statements : str
A string containing a comma-separated list of statements of
the form "var = val", such as "X=1, Y=0". These are the
values resolved by the child node.
children : list((float, Node)), (default: None)
A list of (probability, child node) pairs to be set as the
children of the node.
Returns
-------
Node
the created node.
"""
self._count += 1
return (prob, Node(self._count, statements, children))
def get_root(self):
"""Return the root node.
Returns
-------
Node
the root node of the probability tree.
"""
return self._root
def show(self, show_id=False, show_prob=False, cut=None, crit=None):
"""Returns a graph of the probability tree.
Parameters
----------
show_id: Bool (default: False)
If true, display the unique id's.
show_prob : Bool (default: False)
If true, display the node probabilities.
cut : MinCut (default: None)
If a MinCut is given, then display it.
crit : Critical (default: None)
If a Critical set is given, then show it.
Returns
-------
Node
the created node.
"""
return self._root.show(
show_id=show_id, show_prob=show_prob, cut=cut, crit=crit)
def rvs(self):
"""Return a dictionary with all the random variables and their values.
Returns
-------
dict(str: list)
A dictionary with all the random variables pointing at lists
containing their possible values.
"""
return self._root.rvs()
def rv(self, var):
"""Return a probability distribution for a given random variable.
Parameters
----------
var: str
A string containing the name of the random variable.
Returns
-------
list((float, str))
A list with pairs (prob, val), where prob and val are the
probability
and the value of the random variable.
"""
return [(self.prob(self.prop(var + ' = ' + val)), val)
for val in self.rvs()[var]]
def expect(self, var):
"""Return the expected value of a random variable.
Parameters
----------
var: str
A string containing the name of the random variable.
Returns
-------
float
The expected value of the random variable.
"""
e = 0.0
for prob, val in self.rv(var):
e += prob * float(val)
return e
def find(self, uid):
"""Return a node with given unique identifier.
Parameters
----------
uid: int
Identifier of the node to be returned.
Returns
-------
Node or None
Returns the node if found, otherwise None.
"""
return self._root.find(uid)
def prop(self, statement):
"""Returns min-cut of a statement.
Parameters
----------
statement: str
A single statement of the form "var = val", such as "X = 1".
Returns
-------
MinCut
the min-cut of the event corresponding to the statement.
"""
return MinCut.prop(self._root, statement)
def critical(self, cut):
"""Returns critical set of a min-cut.
Parameters
----------
cut: MinCut
A min-cuts.
Returns
-------
Critical
the critical set for the min-cut.
"""
return Critical.critical(self._root, cut)
def sample(self):
"""Sample a realization.
Returns
-------
dict((str:str))
A dictionary of bound random variables such as
{ 'X': '1', 'Y': '0' }.
"""
return self._root.sample()
def prob(self, cut):
"""Compute probability of a min-cut.
Parameters
----------
cut: MinCut
A min-cut for an event.
Returns
-------
float
The probability of the event of the min-cut.
"""
return self._root.prob(cut)
def see(self, cut):
"""Return a probability tree conditioned on a cut.
Parameters
----------
cut: MinCut
A min-cut for an event.
Returns
-------
PTree
A new probability tree.
"""
newptree = PTree()
newptree._root = self._root.see(cut)
return newptree
def do(self, cut):
"""Intervene on a cut.
Parameters
----------
cut: MinCut
A min-cut for an event.
Returns
-------
float
A new probability tree.
"""
newptree = PTree()
newptree._root = self._root.do(cut)
return newptree
def cf(self, tree_prem, cut_subj):
"""Return a subjunctive conditional tree.
Parameters
----------
tree_prem: PTree
A probality tree representing the premises for the subjunctive
evaluation.
This probability tree must have been obtained through operations on
the
base probability tree.
cut_do: MinCut
A min-cut for an event. This min-cut is the subjunctive condition of
the
counterfactual.
Returns
-------
float
A new probability tree.
"""
newptree = PTree()
newptree._root = self._root.cf(tree_prem._root, cut_subj)
return newptree
def fromFunc(func, root_statement=None):
"""Build a probability tree from a factory function.
Building probability trees can be difficult, especially when we have
to manually specify all its nodes. To simplify this, `fromFunc` allows
building a probability tree using a factory function. A factory
function is a function that:
- receives a dictionary of bound random variables, such as
{ 'X': '1', 'Y': '0' }
- and returns either `None` if a leaf has been reached, or a list
of transitions and their statements, such as
[(0.3, 'Z = 0'), (0.2, 'Z = 1'), (0.5, 'Z = 2')].
Such a factory function contains all the necessary information for
building a probability tree.
The advantage of using a factory function is that we can exploit
symmetries (such as conditional independencies) to code a much
more compact description of the probability tree.
Parameters
----------
func: Function: dict((str: str)) -> list((float, str))
A probality tree factory function.
root_statement: str (default: None)
A string containing the statement (e.g. 'root = 0')
for the root node. If `None`, 'Ω = 1' is used.
Returns
-------
PTree
A new probability tree.
"""
if not root_statement:
root_statement = 'O = 1'
tree = PTree()
bvars = dict(Node._parse_statements(root_statement))
tree.root(root_statement, tree._fromFunc(func, bvars))
return tree
def _fromFunc(self, func, bvars):
"""Auxiliary method for PTree.fromFunc()."""
transition_list = func(bvars)
if not transition_list:
return None
children = []
for prob, statement in transition_list:
add_vars = dict(Node._parse_statements(statement))
new_bvars = {**bvars, **add_vars}
res = self._fromFunc(func, new_bvars)
children.append(self.child(prob, statement, res))
return children
```
## 1. Probability trees
A **probability tree** is a representation of a random experiment or process.
Starting from the **root node**, the process iteratively takes **random
transitions** to **child nodes**, terminating at a **leaf node**. A path from
the root node to a node is a **(partial) realization**, and a path from the root
node to a leaf node is a **total realization**. Every node in the tree has one
or more **statements** associated with it. When a realization reaches a node,
the statements indicate which values are bound to random variables.
Let's create our first probability tree. It shows a random variable $X$ where: -
$X = 0$ with probability $0.5$; - $X = 1$ with probability $0.3$; - and $X = 2$
with probability $0.2$.
```
# Creata a blank probability tree.
pt = PTree()
# Add a root node and the children.
pt.root(
'O = 1',
[pt.child(0.5, 'X = 1'),
pt.child(0.3, 'X = 2'),
pt.child(0.2, 'X = 3')])
# Display it.
display(pt.show())
```
We'll typically call the root node $O$, standing for "**O**mega" ($\Omega$),
which is a common name for the sample space in the literature.
After creating a probability tree, we can ask it to return:
- the list of random variables and their values using the method `rvs()`;
- the probability distribution for a given random variable using
`rv(varname)`;
- the expected value of a *numerical* random variable with
`expected(varname)`;
- and obtain a random sample from the tree with `sample()`.
```
rvs = pt.rvs()
print('Random variables:', rvs)
pdist = pt.rv('X')
print('P(X) =', pdist)
expect = pt.expect('X')
print('E(X) =', expect)
smp = pt.sample()
print('Sample =', smp)
```
### Causal dependencies
In a probability tree, a causal dependency $X \rightarrow Y$ is expressed
through a node $X$ having a descendent node $Y$. For instance, consider the next
probability tree:
```
# Create a blank probability tree.
pt = PTree()
# Add a root node and the children.
pt.root('O = 1', [
pt.child(0.3, 'X = 0', [
pt.child(0.2, 'Y = 0'),
pt.child(0.8, 'Y = 1'),
]),
pt.child(0.7, 'X = 1', [
pt.child(0.8, 'Y = 0'),
pt.child(0.2, 'Y = 1'),
]),
])
# Display it.
display(pt.show())
```
Here $Y$ is a descendant of $X$ and therefore $X \rightarrow Y$. This means that
we can affect the value of $Y$ by choosing $X$ but not viceversa. The exact
semantics of this requires **interventions**, which we'll review later. Notice
how the value of $X$ changes the distribution over $Y$: - $P(Y=1|X=0) >
P(Y=0|X=0)$, - $P(Y=1|X=1) < P(Y=0|X=1)$.
If we want to express that neither $X \rightarrow Y$ nor $Y \rightarrow X$ are
the case, then we need to combine both random variables into the same nodes as
follows:
```
# Creata a blank probability tree.
pt = PTree()
# Add a root node and the children.
pt.root('O = 1', [
pt.child(0.3 * 0.2, 'X = 0, Y = 0'),
pt.child(0.3 * 0.8, 'X = 0, Y = 1'),
pt.child(0.7 * 0.8, 'X = 1, Y = 0'),
pt.child(0.7 * 0.2, 'X = 1, Y = 1')
])
# Display it.
display(pt.show())
```
### Another tree: drug testing
Let's build another example. Here we have a drug testing situation:
- A patient has a probability of being ill ($D = 1$).
- If the patient takes the drug ($T = 1$) when she is ill, she will likely
feel better ($R = 1$), otherwise she will likely feel worse ($R = 0$).
- However, if she takes the drug when she is not ill, the situation is
inverted: the drug might make her feel worse ($R = 0$).

This tree can also be represented as the above causal Bayesian graph. This is
always the case when the causal ordering of the random variables is the same, no
matter which realization path is taken in the tree.
```
med = PTree()
med.root('O = 1', [
med.child(0.4, 'D = 0', [
med.child(0.5, 'T = 0',
[med.child(0.2, 'R = 0'),
med.child(0.8, 'R = 1')]),
med.child(0.5, 'T = 1',
[med.child(0.8, 'R = 0'),
med.child(0.2, 'R = 1')])
]),
med.child(0.6, 'D = 1', [
med.child(0.5, 'T = 0',
[med.child(0.8, 'R = 0'),
med.child(0.2, 'R = 1')]),
med.child(0.5, 'T = 1',
[med.child(0.2, 'R = 0'),
med.child(0.8, 'R = 1')])
])
])
print('Random variables:', med.rvs())
display(med.show())
```
### A tree that cannot be represented as a Bayesian graph: Weather-Barometer Worlds
We can also build a tree where the different realization paths have different
causal dependencies. For instance, imagine we have two possible worlds: - Our
world ($A = 0$) where the weather ($W$) influences the barometer reading
($B$); - An alien world ($A = 1$) where the barometer influences the weather.
Such a situation with multiple causal dependencies cannot be captured in a
single graphical model:

However, we can represent it using a probability tree:

### Exercise 1
Now it's your turn to create a probability tree. Create the "weather-barometer
worlds" probability tree and name it `wb`.
```
```
#### Solution
```
# Create blank tree.
wb = PTree()
# Set the root node and its sub-nodes.
wb.root('O = 1', [
wb.child(0.5, 'A = 0', [
wb.child(0.5, 'W = 0',
[wb.child(0.75, 'B = 0'),
wb.child(0.25, 'B = 1')]),
wb.child(0.5, 'W = 1',
[wb.child(0.25, 'B = 0'),
wb.child(0.75, 'B = 1')])
]),
wb.child(0.5, 'A = 1', [
wb.child(0.5, 'B = 0',
[wb.child(0.75, 'W = 0'),
wb.child(0.25, 'W = 1')]),
wb.child(0.5, 'B = 1',
[wb.child(0.25, 'W = 0'),
wb.child(0.75, 'W = 1')])
])
])
# Display it.
display(wb.show())
```
### Remember:
- A node can contain more than one statement.
- The tree doesn't have to be balanced.
See the next example.
```
pt = PTree()
pt.root('O = 1', [
pt.child(0.2, 'X = 0, Y = 0'),
pt.child(0.8, 'X = 1', [pt.child(0.3, 'Y = 1'),
pt.child(0.7, 'Y = 2')])
])
display(pt.show())
```
### Displaying additional information
We can display additional information about probability trees:
- **Unique identifiers**: Each node has an automatically assigned
unique identifier. Use `show_id = True` to display it.
- **Probability**: Each node has a probability of being realized.
Use `show_prob = True` to display this information.
```
display(med.show(show_prob=True, show_id=True))
```
### Exercise 2
For the probability tree `wb`:
- list all the random variables;
- compute the probability distribution of the barometer ($B$);
- display the probability tree with the unique ids and probabilities
of every node.
```
```
#### Solution
```
print(wb.rvs())
print(wb.rv('B'))
display(wb.show(show_id=True, show_prob=True))
```
## 2. Propositions and min-cuts
We've seen that a probability tree is a simple way of representing all the
possible realizations and their causal dependencies. We now investigate the
possible **events** in a probability tree.
An **event** is a collection of full realizations. We can **describe** events
using propositions about random variables (e.g. $W = 0$, $B = 1$) and the
logical connectives of negation, conjunction (AND), and disjunction (OR). The
connectives allow us to state composite events, such as $\neg(W = 1 \wedge B =
0)$. For instance, the event $B = 0$ is the set of all realizations, i.e. paths
from the root to a leaf, that **pass through a node** with the statement $B=0$.
We can **represent** events using cuts, and in particular, **min-cuts**. A
**min-cut** is a minimal representation of an event in terms of the nodes of a
probability tree. The min-cut of an event collects the smallest number of nodes
in the probability tree that resolves whether an event has occurred or not. In
other words, if a realization hits a node in the min-cut, then we know for sure
whether the event has occurred or not. (In measure theory, a similar notion to
min-cut would be the algebra that renders the event measurable.)
Our implementation of min-cuts furthermore distinguishes between the nodes that
render the event true from the nodes that render the event false.
Let's start by constructing a min-cut for a setting of a random variable in our
drug testing example. Verify that the min-cut is correct for the setting of the
random variable.
```
# Build a cut for the proposition 'R = 1'.
cut = med.prop('R=1')
# The result is of type MinCut:
print('Type of a cut:', type(cut))
# Print the min-cut. Note that the elements in the
# true and false sets refer to the ids of the prob tree.
print('Min-cut for "R = 1":', cut)
# Render the probability tree with a cut.
display(med.show(cut=cut, show_id=True))
```
Let's do a min-cut for not taking the treatment ($T = 0$)
```
# Build a cut for the proposition 'T = 0'.
cut = med.prop('T=0')
# Print the min-cut. Note that the elements in the
# true and false sets refer to the ids of the prob tree.
print('Min-cut for "T = 0":', cut)
# Render the probability tree with a cut.
display(med.show(cut=cut, show_id=True))
```
We can build negative events too using the `~` unary operator. As an example,
let's negate the previous event. Compare the two cuts. Notice that a negation
simply inverts the nodes that are true and false.
```
cut = ~med.prop('T = 0')
print('Min-cut for "T = 0":', med.prop('T = 0'))
print('Min-cut for "not T = 0":', ~med.prop('T = 0'))
display(med.show(cut=cut, show_id=True))
```
Now let's build more complex events using conjunctions (`&`) and disjunctions
(`|`). Make sure these min-cuts make sense to you. Notice that the conjunction
of two events pick out the earliest occurrence of false nodes and the last
occurence of true nodes, whereas the disjunction does the opposite.
```
# Recovery
cut1 = med.prop('R=1')
print('Cut for "R = 1":')
display(med.show(cut=cut1))
# Taking the treatment
cut2 = med.prop('T=1')
print('Cut for "T=1":')
display(med.show(cut=cut2))
# Conjunction: taking the treatment and recovery
cut_and = cut1 & cut2
print('Cut for "T=1 and R=1":')
display(med.show(cut=cut_and))
# Disjunction: taking the treatment or recovery
cut_or = cut1 | cut2
print('Cut for "T=1 or R=1":')
display(med.show(cut=cut_or))
```
### The precedence relation
In addition to the Boolean operators, we can also use a causal connective which
cannot stated in logical terms: the **precedence relation** $\prec$. This
relation allows building min-cuts for events where one event $A$ precedes
another event $B$, written $A \prec B$, and thus requires the additional
information provided by the probability tree's structure.
Let's try one example. We want to build the min-cut where having the disease
($D=1$) precedes feeling better ($R=1$), and vice-versa.
```
# Disease and recovery min-cuts.
cut1 = med.prop('D=1') < med.prop('R=1')
cut2 = med.prop('R=1') < med.prop('D=1')
# Display.
print('Cut for D=1 < R=1:')
display(med.show(cut=cut1))
print('Cut for R=1 < D=1:')
display(med.show(cut=cut2))
```
### Requirement: random variables must be measurable
If we try to build a min-cut using a variable that is not measurable, then an
exception is raised. For instance, the random variable $X$ below is not
measurable within the probability tree, because the realization starting at the
root and reaching the leaf $Y = 2$ never sets the value for $X$.
Attempting to build a min-cut for an event involving $X$ will throw an error.
```
pt = PTree()
pt.root('O = 1', [
pt.child(0.1, 'X = 0, Y = 0'),
pt.child(0.2, 'X = 1, Y = 1'),
pt.child(0.7, 'Y = 2')
])
display(pt.show())
```
### Special case: probabilistic truth versus logical truth
Let's have a look at one special case. Our definitions make a distinction
between **logical** and **probabilistic truth**. This is best seen in the
example below.
In this example, we have a probability tree with three outcomes: $X = 1, 2$, and
$3$. - $X = 1$ occurs with probability one.
- Hence, probabilistically, the event $X=1$ is resolved at the level of the
root node.
- However, it isn't resolved at the logical level, since $X = 2$ or $X = 3$
can happen logically, although with probability zero.
Distinguishing between logical truth and probabilistic truth is important for
stating counterfactuals. This will become clearer later.
```
# First we add all the nodes.
pt = PTree()
pt.root('O = 1',
[pt.child(1, 'X = 1'),
pt.child(0, 'X = 2'),
pt.child(0, 'X = 3')])
# Show the cut for 'X = 0'
cut = pt.prop('X = 1')
print('While the root node "O=1" does resolve the event "X=1"\n' +
'probabilistically, it does not resolve the event logically.')
display(pt.show(cut=cut))
```
### Exercise 3
For the `wb` probability tree, build the min-cuts for the following events:
- the world is alien ($A = 1$);
- the weather is sunny ($W = 1$);
- the barometer goes down and the weather is sunny ($B = 0 \wedge W = 1$);
- the negation of "barometer does not go down or weather is not sunny",
$\neg(\neg(B = 0) \vee \neg(W = 1))$.
Display every min-cut. In particular, compare the last two. What do you observe?
```
```
#### Solution
```
# Exercise.
# A = 1.
cut = wb.prop('A=1')
print('Cut for "A=1":')
display(wb.show(cut=cut))
# W = 1.
cut = wb.prop('W=1')
print('Cut for "W=1":')
display(wb.show(cut=cut))
# B = 0 and W = 1.
cut = wb.prop('B=0') & wb.prop('W=1')
print('Cut for "B=0 and W=1":')
display(wb.show(cut=cut))
# not( not(B = 0) or not(W = 1) ).
cut = ~(~wb.prop('B=0') | ~wb.prop('W=1'))
print('Cut for "not( not(B=0) or not(W=1) )":')
display(wb.show(cut=cut))
```
### Exercise 4
For the `wb` probability tree, determine the min-cut for whenever the weather
($W$) affects the value of the barometer ($B$). This min-cut should coincide
with the min-cut for the event ($A=0$).
Hint: enumerate all the 4 cases (values for $W$ and $B$) and combine them using
disjunctions.
```
```
#### Solution
```
# Build the min-cut.
cut = (wb.prop('W=0') < wb.prop('B=0')) \
| (wb.prop('W=0') < wb.prop('B=1')) \
| (wb.prop('W=1') < wb.prop('B=0')) \
| (wb.prop('W=1') < wb.prop('B=1'))
# Display.
display(wb.show(cut=cut))
```
## 3. Critical sets
Min-cuts correspond to the smallest set of nodes where it becomes clear whether
an event has occurred or not. Every min-cut has an associated **critical set**:
the set of nodes that **determines** whether an event won't occur. Given an
event, the associated **critical set** is defined as the set of parents of the
event's false set in the min-cut.
Together, a critical set and a min-cut form the set of **mechanisms** that
determine the occurrence of the event.
Let's have a look at a simple example. Here, the critical set is the singleton
containing the root node. Critical sets are computed using the function
`PTree.critical(cut)`, where `cut` is an event's min-cut. We can display the
critical set by providing the optional argument `crit` to the `PTree.show()`
function.
```
# First we add all the nodes.
pt = PTree()
pt.root('O = 1',
[pt.child(1, 'X = 1'),
pt.child(0, 'X = 2'),
pt.child(0, 'X = 3')])
# Get the critical set for a min-cut.
cut = pt.prop('X = 1')
crit = pt.critical(cut)
# Show the critical set.
print('Min-cut for "X=1":', cut)
print('Critical set for "X=1":', crit)
display(pt.show(show_id=True, cut=cut, crit=crit))
```
Let's work out another example. Consider the following probability tree.

Try to predict the min-cut and the critical set of the events $X=1$, $Y=1$, and
$Y=0$.
```
pt = PTree()
pt.root('O = 1', [
pt.child(0.2, 'X = 0, Y = 0'),
pt.child(0.8, 'X = 1', [pt.child(0.3, 'Y = 1'),
pt.child(0.7, 'Y = 0')])
])
# Original tree.
print('Original tree:')
display(pt.show(show_id=True))
# 'X=1'
cut = pt.prop('X=1')
crit = pt.critical(cut)
print('Min-cut and critical set for "X=1":')
display(pt.show(show_id=True, cut=cut, crit=crit))
# 'Y=1'
cut = pt.prop('Y=1')
crit = pt.critical(cut)
print('Min-cut and critical set for "Y=1":')
display(pt.show(show_id=True, cut=cut, crit=crit))
# 'Y=0'
cut = pt.prop('Y=0')
crit = pt.critical(cut)
print('Min-cut and critical set for "Y=0":')
display(pt.show(show_id=True, cut=cut, crit=crit))
```
### Exercise 5
For the `wb` tree, compute and display the mechanisms (i.e. the min-cut and the
critical set) for the following events:
- the world is alien ($A = 1$);
- the barometer goes down ($B = 0$);
- the weather is sunny ($W = 1$);
- the barometer goes down and weather is sunny ($B = 0 \wedge W = 1$).
```
```
#### Solution
```
# Exercise.
# A = 1.
cut = wb.prop('A=1')
crit = wb.critical(cut)
print('Mechanism for "A=1":')
display(wb.show(cut=cut, crit=crit))
# B = 0.
cut = wb.prop('B=0')
crit = wb.critical(cut)
print('Mechanisms for "B=0":')
display(wb.show(cut=cut, crit=crit))
# W = 1.
cut = wb.prop('W=1')
crit = wb.critical(cut)
print('Mechanisms for "W=1":')
display(wb.show(cut=cut, crit=crit))
# B = 0 and W = 1.
cut = wb.prop('B=0') & wb.prop('W=1')
crit = wb.critical(cut)
print('Mechanisms for "B=0 and W=1":')
display(wb.show(cut=cut, crit=crit))
```
We'll return later to critical sets, as they are important for determining the
operations of conditioning and intervening on probability trees.
## 4. Evaluating probabilities
We can also evaluate probabilities of events. For instance, you may ask:
- "$P(R=1)$: What is the probability of recovery?"
- "$P(R=0)$: What is the probability of not recovering?"
- "$P(D=1)$: What is the probability of having the disease?"
- "$P(D=1 \wedge R=0)$: What is the probability of taking the drug and not
recovering?"
- "$P(D=1 \vee R=0)$: What is the probability of taking the drug or not
recovering?"
- "$P(D=1 \prec R=1)$: What is the probability of taking the drug preceding
the recovery?"
To do so, we use the min-cut of the event.
Let's have a look at some of them. Compare to the graph of the probability tree.
```
# Min-cuts for some events
cut1 = med.prop('R=1')
cut2 = med.prop('D=1')
cut1_neg = ~cut1
cut_and = cut2 & cut1_neg
cut_or = cut2 | cut1_neg
cut_prec = cut2 < cut1
print('P(R=1) =', med.prob(cut1))
print('P(R=0) =', med.prob(cut1_neg))
print('P(D=1) =', med.prob(cut2))
print('P(D=1 and R=0) =', med.prob(cut_and))
print('P(D=1 or R=0) =', med.prob(cut_or))
print('P(D=1 precedes R=1) =', med.prob(cut_prec))
display(med.show(show_prob=True))
```
### Exercise 6
For the `wb` tree, evaluate the probability of the following events:
- the world is ours ($A = 0$) and the barometer goes down ($B = 0$);
- it is not the case that the barometer goes down or the weather
is sunny ($\neg(B = 0 \vee W = 1)$).
Print the probabilities and display the probability trees.
```
```
#### Solution
```
# Exercise.
# A = 0 and B = 0
cut = wb.prop('A=0') & wb.prop('B=0')
print('P(A=0 and B=0) =', wb.prob(cut))
display(wb.show(cut=cut))
# not(B = 0 or W = 1)
cut = ~(wb.prop('B=0') | wb.prop('W=1'))
print('P(not(B=0 or W=1)) =', wb.prob(cut))
display(wb.show(cut=cut))
```
## 5. Conditioning
We have learned how to represent events using min-cuts. Now we can use min-cuts
to **condition** probability trees **on events**. Conditioning allows asking
questions after making **observations**, such as:
- "$P(R=1|T=1)$: What is the probability of recovery given that a patient has
taken the treatment?"
- "$P(D=1|R=1)$: What is the probability of having had the disease given that
a patient has recovered/felt better?"
### How to compute conditions
Conditioning takes a probability tree and produces a new probability tree with
modified transition probabilities. These are obtained by removing all the total
realizations that are **incompatible with the condition**, and then
renormalizing, as illustrated below.
<img src="http://www.adaptiveagents.org/_media/wiki/see.png" alt="Seeing" width="700"/>
In the example, we compute the result of seeing $Y= 1$.
Conditioning on an event proceeds in two steps:
- first, we remove the probability mass of the realizations
passing through the false set of the event’s min-cut
(hihglighted in dark, bottom row);
- then we renormalize the probabilities.
We can do this recursively by aggregating the original probabilities
of the true set. The top row shows the result of conditioning a
probability tree on the event $Y= 1$, which also highlights the modified
transition probabilities in red. The bottom row shows the same
operation in a probability mass diagram, which is a representation of a
probability tree that emphasizes the probabilities.
Let's have a look at the drug testing example. We will condition on $R=1$.
Observe how the probabilities change.
```
# Now we condition.
cut = med.prop('R=1')
med_see = med.see(cut)
# Critical set.
crit = med.critical(cut)
# Compare probabilities of events.
print('Before conditioning: P(R=1) =', med.prob(cut))
print('After conditioning: P(R=1 | R=1) =', med_see.prob(cut))
# Display both trees for comparison.
print('\nOriginal tree:')
display(med.show(show_prob=True))
print('Tree after conditioning on "R=1":')
display(med_see.show(cut=cut, crit=crit, show_prob=True))
```
We can condition on composite events too and evaluate the probability of events.
Assume you observe that the drug was taken and a recovery is observed. Then, it
is very likely that the patient had the disease.
```
# Min-cuts.
cut_r = med.prop('R=1')
cut_tr = med.prop('T=1') & med.prop('R=1')
cut_disease = med.prop('D=1')
# Critical set.
crit = med.critical(cut_tr)
# Condition.
med_see_r = med.see(cut_r)
med_see_tr = med.see(cut_tr)
# Now we evaluate the posterior probability of having a disease.
print('P(D = 1) =', med.prob(cut_disease))
print('P(D = 1 | R = 1) =', med_see_r.prob(cut_disease))
print('P(D = 1 | T = 1, R = 1) =', med_see_tr.prob(cut_disease))
# Display prob tree.
print('\nProbability tree after conditioning on "T=1 and R=1":')
display(med_see_tr.show(cut=cut_tr, show_id=True, crit=crit))
```
### Special case: conditioning on trivial events
Let's have a look at a special case: conditioning on **trivial events**, namely
the **sure event** and the **impossible event**.
Observe that conditioning on trivial events does not change the probability
tree.
```
# Create a simple tree.
pt = PTree()
pt.root('O = 1', [
pt.child(0.6, 'X = 0, Y = 0'),
pt.child(0.6, 'X = 1', [pt.child(0.3, 'Y = 0'),
pt.child(0.7, 'Y = 0')]),
])
# Show tree.
print('Original tree:')
display(pt.show())
# Condition on Y = 0.
cut = pt.prop('Y=0')
pt_see_sure = pt.see(cut)
print('Conditioning on "Y = 0":')
display(pt_see_sure.show(cut=cut))
# Condiiton on not Y = 0.
neg_cut = ~cut
pt_see_impossible = pt.see(neg_cut)
print('Conditioning on "not Y = 0":')
display(pt_see_impossible.show(cut=neg_cut))
```
### Special case: conditioning on an event with probability zero
Let's return to our simple example with tree outcomes. Assume we're conditioning
on an event with **probability zero**, which can happen **logically but not
probabilistically**. Using the measure-theoretic definition of conditional
probabilities, we are required to pick a so-called **version** of the
conditional distribution. There are infinite choices.
Here, we have settled on the following. If we condition on an event with
probability zero, then we assign uniform probability over all the possible
transitions. This is just one arbitrary way of solving this problem.
See the example below.
```
# Create a simple tree.
pt = PTree()
pt.root(
'O = 1',
[pt.child(1.0, 'X = 1'),
pt.child(0.0, 'X = 2'),
pt.child(0.0, 'X = 3')])
# Let's pick the negative event for our minimal prob tree.
cut = ~pt.prop('X = 1')
display(pt.show(cut=cut))
pt_see = pt.see(cut)
display(pt_see.show(cut=cut))
```
### Exercise 7
For the `wb` tree, print the probability distribution of
- the weather $W$
- and the barometer $B$.
Do this for the following probability trees:
- the original tree
- the probability tree conditioned on it being an alien world ($\theta = 1$)
- the probability tree conditioned on the weather being sunny ($W = 1$).
What do you observe? Does observing (conditioning) give you any additional
information? If no, why? If yes, why is that?
```
```
#### Solution
```
# Exercise
# No condition.
print('P(W) =', wb.rv('W'))
print('P(B) =', wb.rv('B'))
# Condition on "A = 1"
cut = wb.prop('A=1')
print('P(W | A=1) =', wb.see(cut).rv('W'))
print('P(B | A=1) =', wb.see(cut).rv('B'))
# Condition on "W = 1"
cut = wb.prop('W=1')
print('P(W | W=1) =', wb.see(cut).rv('W'))
print('P(B | W=1) =', wb.see(cut).rv('B'))
```
## 6. Interventions
Interventions are at the heart of causal reasoning.
We have seen how to filter probability trees using observational data through
the use of conditioning. Now we investigate how a probability tree transforms
when it is intervened. An **intervention** is a change to the random process
itself to make something happen, as opposed to a filtration. We can ask
questions like:
- "$P(R=1|T \leftarrow 1)$: What is the probability of recovery given that **I
take the drug**?"
- "$P(D=1|T \leftarrow 1 \wedge R=1)$: What is the probability of having the
disease given **that I take the drug** and that I observe a recovery?"
Here, the notation $T \leftarrow 1$ is a shorthand for the more common notation
$\mathrm{do}(T = 1)$.
### How to compute interventions
Interventions differ from conditioning in the following:
- they change the transition probabilities **minimally**,
so as to make a desired event happen;
- they **do not filter** the total realizations of the probability tree;
- they are **easier to execute** than conditions, because they only
change the transition probabilities that leave the critical set,
and they do not require the backward induction of probabilities.
See the illustration below.
<img src="http://www.adaptiveagents.org/_media/wiki/do.png" alt="Doing" width="700"/>
Example intervention on $Y \leftarrow 1$. An intervention proceeds in two steps:
- first, it selects the partial realizations starting in a critical node
and ending in a leaf that traverse the false set of the event’s min-cut;
- then it removes their probability mass, renormalizing the probabilities
from the transitions leaving the critical set.
The top row shows the result of intervening a probability tree
on $Y \leftarrow 1$. The bottom row show the same procedure on
the corresponding probability mass diagram.
Let's start with a simple comparison to illustrate the difference.
```
pt = PTree()
pt.root('O = 1', [
pt.child(0.2, 'X = 0, Y = 0'),
pt.child(0.8, 'X = 1', [pt.child(0.3, 'Y = 1'),
pt.child(0.7, 'Y = 0')])
])
print('Original:')
display(pt.show(show_prob=True, cut=cut, crit=crit))
# 'Y=1'
cut = pt.prop('Y = 1')
crit = pt.critical(cut)
pt_see = pt.see(cut)
pt_do = pt.do(cut)
print('Condition on "Y=1":')
display(pt_see.show(cut=cut, crit=crit))
print('Intervention on "Y<-1":')
display(pt_do.show(cut=cut, crit=crit))
# 'Y=0'
cut = pt.prop('Y = 0')
crit = pt.critical(cut)
pt_see = pt.see(cut)
pt_do = pt.do(cut)
print('Condition on "Y = 0":')
display(pt_see.show(cut=cut, crit=crit))
print('Intervention on "Y <- 0":')
display(pt_do.show(cut=cut, crit=crit))
```
Notice that the mechanisms for $Y=0$ and $Y=1$ are different. In general, a
single random variable can have **multiple mechanism** for setting their
individual values.
Let's return to our drug testing example. We investigate the effect of taking
the treatment, that is, by intervening on $T \leftarrow 1$. How do the
probabilities of:
- having the disease ($D = 1$);
- taking the treatment ($T = 1$);
- and recovering ($R = 1$)
change after taking the treatment ($T \leftarrow 1$)?
```
# Min-Cuts.
cut_dis = med.prop('D = 1')
cut_arg = med.prop('R = 1')
cut_do = med.prop('T = 1')
# Critical set.
crit_do = med.critical(cut_do)
# Perform intervention.
med_do = med.do(cut_do)
# Display original tree.
print('Original tree:')
print('P(D = 1) =', med.prob(cut_dis))
print('P(T = 1) =', med.prob(cut_do))
print('P(R = 1) =', med.prob(cut_arg))
display(med.show(cut=cut_do, show_prob=True, crit=crit_do))
# Display tree after invervention.
print('Tree after intervening on "T <- 1":')
print('P(D = 1 | T <- 1) =', med_do.prob(cut_dis))
print('P(T = 1 | T <- 1) =', med_do.prob(cut_do))
print('P(R = 1 | T <- 1) =', med_do.prob(cut_arg))
display(med_do.show(cut=cut_do, show_prob=True, crit=crit_do))
```
In other words, for the example above, taking the treatment increases the
chances of recovery. This is due to the base rates (i.e. the probability of
having a disease). The base rates are not affected by the decision of taking the
treatment.
### Special case: intervening on an event with probability zero
Assume we're intervening on an event with **probability zero**. Recall that this
is possible **logically**, but **not probabilistically**. How do we set the
transition probabilities leaving the critical set? Here again we settle on
assigning uniform probabilities over all the transitions affected by the
intervention.
See the example below.
```
# Create a simple tree.
pt = PTree()
pt.root(
'O = 1',
[pt.child(1.0, 'X = 1'),
pt.child(0.0, 'X = 2'),
pt.child(0.0, 'X = 3')])
# Let's pick the negative event for our minimal prob tree.
cut = ~pt.prop('X = 1')
crit = pt.critical(cut)
# Intervene.
pt_do = pt.do(cut)
# Show results.
print('Before the intervention:')
display(pt.show(cut=cut, crit=crit))
print('After the invention on "not X <- 1":')
display(pt_do.show(cut=cut, crit=crit))
```
### Exercise 8
For the `wb` tree, print the probability distribution of
- the weather $W$
- and the barometer $B$.
Do this for the following probability trees:
- the original tree
- the probability tree resulting from enforcing it to being
an alien world ($A \leftarrow 1$)
- the probability tree resulting from setting the weather to
being sunny ($W \leftarrow 1$).
What do you observe? Compare these results with your previous exercise, where
you conditioned on the same events. Why are the probabilities different when you
condition and when you intervene? How is this related to the different causal
dependencies in both worlds?
```
```
#### Solution
```
# Exercise
# No intervention.
print('P(W) =', wb.rv('W'))
print('P(B) =', wb.rv('B'))
# Intervention on "A <- 1"
cut = wb.prop('A=1')
print('P(W|A <- 1) =', wb.do(cut).rv('W'))
print('P(B|A <- 1) =', wb.do(cut).rv('B'))
# Condition on "W <- 1"
cut = wb.prop('W=1')
print('P(W|W <- 1) =', wb.do(cut).rv('W'))
print('P(B|W <- 1) =', wb.do(cut).rv('B'))
```
### Exercise 9
Next, evaluate the following probabilities:
- What is the probability of being in our world ($A=0$), given that you
observe a sunny weather ($W=1$) and the barometer going up ($B=1$)?
- What is the probability of being in our world ($A=0$), given that you first
observe a sunny weather ($W=1$) and then **you force** the barometer to go
up ($B\leftarrow 1$)?
- What is the probability of being in our world ($A=0$), given that you first
**force** the barometer to go up ($B\leftarrow 1$) and then observe a sunny
weather ($W=1$)?
Answer the following questions:
- Does conditioning give different results from intervening? If so, why?
- When you mix conditions and interventions, does the order matter? If so,
why?
```
```
#### Solution
```
# Exercise
cutw = wb.prop('W=1')
cutb = wb.prop('B=1')
cuttheta = wb.prop('A=0')
# Question 1
print('P(A = 0 | W = 1 and B = 1) =', wb.see(cutw).see(cutb).prob(cuttheta))
# Question 2
print('P(A = 0 | W = 1 then B <- 1) =', wb.see(cutw).do(cutb).prob(cuttheta))
# Question 3
print('P(A = 0 | B <- 1 then W = 1) =', wb.do(cutb).see(cutw).prob(cuttheta))
display(wb.show())
```
## 7. Counterfactuals
Finally, we have counterfactuals. Counterfactuals are questions about how the
experiment could have gone if something about it were different. For instance:
- "What is the probability of having the disease **had I not recovered**,
given that I have recovered?"
- "Given that I have taken the treatment and recovered, what is the
probability of recovery **had I not taken the treatment**?"
These are tricky questions because they mix two moods:
- **indicative statements** - things that have actually happened;
- **subjunctive statements** - things that could have happened
in an alternate reality/possible world.
Because of this, counterfactuals spawn a new scope of random variables:
<img src="http://www.adaptiveagents.org/_media/wiki/counterfactual.png" alt="Counterfactual" width="400"/>
These two questions above are spelled as follows:
- $P(D^\ast=1|R=1)$, where $D^\ast=D_{R \leftarrow 0}$
- $P(R^\ast=1|T\leftarrow 1; R=1)$, where $R^\ast=R_{T\leftarrow 0}$
Here the random variables with an asterisk $D^\ast, R^\ast$ are copies of the
original random variables $D, R$ that ocurr in an alternate reality. The
notation $D_{T \leftarrow 0}$ means that the random variable $D$ is in the new
scope spawned by the intervention on $T\leftarrow 0$.
### Computing a counterfactual
The next figure shows how to obtain a counterfactual:
<img src="http://www.adaptiveagents.org/_media/wiki/cf.png" alt="Computing a counterfactual" width="700"/>
The example shows a counterfactual probability tree generated by imposing $Y
\leftarrow 1$, given the factual premise $Z = 1$. Starting from a **reference
probability tree**, we first derive two additional trees: a **factual premise**,
capturing the current state of affairs; and a **counterfactual premise**,
represented as an intervention on the reference tree.
To form the counterfactual we proceed as follows:
- We slice both derived trees along the critical set
of the counterfactual premise.
- Then, we compose the counterfactual tree by
taking the transition probabilities **upstream of the slice**
from the factual premise, and those **downstream of the slice**
from the counterfactual premise.
The events downstream then span a new scope containing copies
of the original random variables (marked with "∗"), ready to
adopt new values.
In particular note that $Z^\ast = 0$ can happen in our alternate
reality, even though we know that $Z = 1$.
Let's have a look at a minimal example.
```
pt = PTree()
pt.root('O = 1', [
pt.child(0.25, 'X = 0', [
pt.child(0.25, 'Y = 0',
[pt.child(0.1, 'Z = 0'),
pt.child(0.9, 'Z = 1')]),
pt.child(0.75, 'Y = 1',
[pt.child(0.2, 'Z = 0'),
pt.child(0.8, 'Z = 1')]),
]),
pt.child(0.75, 'X = 1',
[pt.child(0.75, 'Y = 0, Z = 0'),
pt.child(0.25, 'Y = 1, Z = 0')])
])
print('Original:')
display(pt.show())
# Condition on 'Y=0', do 'Y=1'
cut_see = pt.prop('Y=0')
cut_do = pt.prop('Y=1')
# Critical set.
crit = pt.critical(cut_do)
# Evaluate conditional, intervention, and counterfactual.
pt_see = pt.see(cut_see)
pt_do = pt.do(cut_do)
pt_cf = pt.cf(pt_see, cut_do)
# Display results.
print('Condition on "Y = 0":')
display(pt_see.show(cut=cut_see, crit=crit))
print('Intervention on "Y <- 1":')
display(pt_do.show(cut=cut_do, crit=crit))
print('Counterfactual with premise "Y = 0" and subjunctive "Y = 1":')
display(pt_cf.show(cut=cut_do, crit=crit))
```
Now we return to our drug testing example. Let's ask the two questions we asked
before. We start with the question: "What is the probability of having the
disease **had I not recovered**, given that I have recovered?", that is
$$P(D^\ast=1|R=1), \qquad D^\ast=D_{R \leftarrow 0}.$$
```
# Cuts.
cut_disease = med.prop('D = 1')
cut_recovery = med.prop('R = 1')
cut_not_recovery = ~cut_recovery
# Critical.
crit = med.critical(cut_not_recovery)
# Compute counterfactual:
# - compute factual premise,
# - use factual premise and subjunctive premise to compute counterfactual.
med_factual_prem = med.see(cut_recovery)
med_cf = med.cf(med_factual_prem, cut_not_recovery)
print('Baseline:')
print('P(D = 1) =', med.prob(cut_disease))
display(med.show())
print('Premise:')
print('P(D = 1 | R = 1) =', med_factual_prem.prob(cut_disease))
display(med_factual_prem.show())
print('Counterfactual:')
print('P(D* = 1 | R = 1) =', med_cf.prob(cut_disease), ', D* = D[R <- 0]')
display(med_cf.show(crit=crit, cut=cut_not_recovery))
```
As we can see, the probability of the disease in the indicative and the
counterfactual aren't different. This is because the recovery $R$ is independent
of the disease $D$, and because the disease is upstream of the critical set.
Let's have a look at the second question: $$P(R^\ast=1|T\leftarrow 1;
R=1), \qquad R^\ast=R_{T\leftarrow 0}$$
```
# Cuts.
cut_treatment = med.prop('T = 1')
cut_not_treatment = ~cut_treatment
cut_recovery = med.prop('R = 1')
# Critical.
crit = med.critical(cut_not_treatment)
# Compute counterfactual:
# - compute factual premise,
# - use factual premise and counterfactual premise to compute counterfactual.
med_factual_prem = med.do(cut_treatment).see(cut_recovery)
med_cf = med.cf(med_factual_prem, cut_not_treatment)
# Display results.
print('Baseline:')
print('P(R = 1) =', med.prob(cut_recovery))
display(med.show())
print('Premise:')
print('P(R = 1 | T <- 1 and R = 1) =', med_factual_prem.prob(cut_recovery))
display(med_factual_prem.show())
print('Counterfactual:')
print('P(R* = 1 | T <- 1 and R = 1) =', med_cf.prob(cut_recovery),
', R* = R[T <- 0]')
display(med_cf.show(cut=cut_not_treatment, crit=crit))
```
Hence, if I had not taken the treatment, then the probability of recovery would
have been lower. Why is that?
- In our premise, I have taken the treatment and
then observed a recovery.
- This implies that, most likely, I had the disease,
since taking the treatment when I don't have the disease is risky and can lead
to illness.
- Thus, knowing that I probably have the disease, I know that, had I
not taken the treatment, I would most likely not have recovered.
### Exercise 10
Consider the drug testing probability tree `med`.
- Assume you take the drug ($T \leftarrow 1$) and you feel bad afterwards
($R = 0$).
- Given this information, what is the probability of recovery ($R = 1$) had
you not taken the drug ($T = 0$)?
Compute the **regret**, i.e. the difference: $$ \mathbb{E}[ R^\ast | T
\leftarrow 1; R = 0 ] - \mathbb{E}[ R | T \leftarrow 1; R = 0 ], $$ where
$R^\ast = R_{T \leftarrow 0}$.
```
```
#### Solution
```
# Exercise
med_prem = med.do(med.prop('T=1')).see(med.prop('R=0'))
med_cf = med.cf(med_prem, med.prop('T=0'))
print('P(R* = 1 | T <- 1, R = 0) =', med_cf.prob(med.prop('R=1')))
regret = med_cf.expect('R') - med_prem.expect('R')
print('Regret = ', regret)
display(med_prem.show(cut=med.prop('R=0')))
```
### Exercise 11
Take the probability tree `wb`. Evaluate the following counterfactuals:
1. Assume that you set the world to ours ($A \leftarrow 0$) and the weather to
sunny ($W \leftarrow 1$). What is the probability distribution of observing
a high barometer value ($B = 1$) had you set the weather to rainy ($W
\leftarrow 0$)? Does the fact that you set the world and the weather affect
the value of the counterfactual?
2. Assume that you set the barometer to a high value ($B \leftarrow 1$), and
you observe that the weather is sunny ($W=1$). What is the probability of
observing a sunny weather ($W=1$) had you set the barometer to a low value
($B=0$)?
These are highly non-trivial questions. What do you observe? Do the results make
sense to you?
```
```
#### Solution
```
# Question 1.
wb_prem = wb.do(wb.prop('A=0')).do(wb.prop('W=1'))
wb_cf = wb.cf(wb_prem, wb.prop('W=0'))
print('P(B*| A <- 0, W <- 1) =', wb_cf.rv('B'), ' where B* = B[W <- 0]')
display(wb_cf.show(show_prob=True, cut=wb.prop('B=1')))
# Question 2.
wb_prem = wb.do(wb.prop('B=1')).see(wb.prop('W=1'))
wb_cf = wb.cf(wb_prem, wb.prop('B=0'))
print('P(W* | B <- 1 then W <- 1) =', wb_cf.rv('W'), ' where W* = W[B <- 0]')
display(wb_cf.show(show_prob=True, cut=wb.prop('W=1')))
```
# Part II: Examples
## Construction of probability trees using factory functions
Building probability trees can be difficult, especially when we have to manually
specify all its nodes.
To simplify this, we could design a function `factory(bvar)` which:
- receives a dictionary `bvar` of bound random variables, such as
`{ 'X': '1', 'Y': '0' }`
- and returns a list of transitions and their statements, such as
`[(0.3, 'Z = 0'), (0.2, 'Z = 1'), (0.5, 'Z = 2')].` If all relevant
events have been defined already, return `None`.
Such a function contains all the necessary information for building a
probability tree. We call this a **probability tree factory**. We can pass a
description function to the method `PTree.fromFunc()` to build a probability
tree.
The advantage of using this method is that we can exploit symmetries (e.g.
conditional independencies) to code a much more compact description of the
probability tree. Essentially, it is like specifying a probabilistic program.
Let's experiment with this.
## Burglar, Earthquake, Alarm
Let's start with a classical example: a burglar alarm. The alarm gets
triggered by a burglar breaking into our home. However, the alarm can
also be set off by an earthquake.
Let's define the factory function.
```
def alarm(bvar):
# Define the burglar and earthquake events.
if 'Burglar' not in bvar:
pb = 0.1 # Probability of burglar
pe = 0.001 # Probability of earthquake
return [((1 - pb) * (1 - pe), 'Burglar=0, Earthquake=0'),
((1 - pb) * pe, 'Burglar=0, Earthquake=1'),
(pb * (1 - pe), 'Burglar=1, Earthquake=0'),
(pb * pe, 'Burglar=1, Earthquake=1')]
# Define the alarm event.
if 'Alarm' not in bvar:
if bvar['Burglar'] == '0' and bvar['Earthquake'] == '0':
return [(0.999, 'Alarm=0'), (0.001, 'Alarm=1')]
if bvar['Burglar'] == '0' and bvar['Earthquake'] == '1':
return [(0.01, 'Alarm=0'), (0.99, 'Alarm=1')]
if bvar['Burglar'] == '1' and bvar['Earthquake'] == '0':
return [(0.1, 'Alarm=0'), (0.9, 'Alarm=1')]
else:
return [(0.001, 'Alarm=0'), (0.999, 'Alarm=1')]
# All the events defined.
return None
```
Now, let's create the probability tree.
```
# Create the probability tree.
al = PTree.fromFunc(alarm, 'Root = 1')
# Print all the random variables.
print('Random variables:', al.rvs())
print('\nP(Alarm) =', al.rv('Alarm'))
print('\nOriginal probability tree:')
display(al.show())
print('\nSome samples from the probability tree:')
for k in range(5):
print(al.sample())
```
Assume now you hear the alarm. Which explanation is more likely:
did the earthquake or the burglar trigger the alarm?
```
# Condition on the alarm going off.
cut = al.prop('Alarm=1')
crit = al.critical(cut)
al_see = al.see(cut)
# Compute probability distributions for earthquake and burglar.
print('P(Earthquake = 1 | Alarm = 1) =', al_see.prob(al.prop('Earthquake=1')))
print('P(Burglar = 1 | Alarm = 1) =', al_see.prob(al.prop('Burglar=1')))
# Display the conditional probability tree.
print('\nConditional probability tree:')
display(al_see.show(show_prob=True, cut=cut, crit=crit))
print('\nSome samples from the conditional probability tree:')
for k in range(5):
print(al_see.sample())
```
As we can see, it is far more likely that the burglar set off the alarm.
If we now tamper with the alarm, setting it off, then what is the probability
that there was a burglar or an earthquake?
```
# Intervene on the alarm going off.
cut = al.prop('Alarm=1')
crit = al.critical(cut)
al_do = al.do(cut)
# Compute probability distributions for earthquake and burglar.
print('P(Earthquake = 1 | Alarm <- 1) =', al_do.prob(al.prop('Earthquake=1')))
print('P(Burglar = 1 | Alarm <- 1) =', al_do.prob(al.prop('Burglar=1')))
# Display the intervened probability tree.
print('\nIntervened probability tree:')
display(al_do.show(show_prob=True, cut=cut, crit=crit))
print('\nSome samples from the intervened probability tree:')
for k in range(5):
print(al_do.sample())
```
Now we observe that the probabilities of the burglar and earthquake
events are exactly as the base rates - we have severed the
causal dependencies connecting those events with the alarm.
## Coin toss prediction
Let's build another probability tree. This is a discrete approximation to a
process having a continuous random variable: a **Beta-Bernoulli process**.
This problem was first studied by Rev. Thomas Bayes ("An Essay towards
solving a Problem in the Doctrine of Chances", 1763) .
The story goes as follows. Someone picks a coin with an unknown bias and then throws it repeatedly. Our goal is to infer the next outcome based only on the observed outcomes (and not on the latent bias). The unknown bias is drawn
uniformly from the interval [0, 1].
Let's start by coding the factory function for the discretized Beta-Bernoulli
process. Here we assume that the prior distribution over the bias is uniform,
and discretized into `divtheta = 40` bins. Then `T = 5` coin tosses follow.
```
#@title Beta-Bernoulli factory function.
def betaBernoulli(bvar, divtheta=41, T=5):
# Root: defined.
# Define biases Bias=0, 1/divtheta, 2/divtheta, ... , 1
if 'Bias' not in bvar:
ptheta = 1.0 / divtheta
biases = [(ptheta, 'Bias=' + str(theta))
for theta in np.linspace(0.0, 1.0, divtheta, endpoint=True)]
return biases
# Biases: defined.
# Now create Bernoulli observations X_1, X_2, ... , X_T,
# where X_t=0 or X_t=1.
t = 1
for var in bvar:
if '_' not in var:
continue
t += 1
if t <= T:
theta = float(bvar['Bias'])
varstr = 'X_' + str(t)
return [(1 - theta, varstr + '=0'), (theta, varstr + '=1')]
# All the events defined.
return None
```
We now build the probability tree. Let's also print the
random variables and get a few samples.
```
# Create tree.
bb = PTree.fromFunc(betaBernoulli)
# Show random variables.
print('Random variables:')
print(bb.rvs())
# Get sample.
print('\nSamples from the process:')
for n in range(10):
print(bb.sample())
```
The tree itself is quite large (over 1000 nodes).
Normally such trees are too large to
display, for instance when `T` is large.
Let's display it.
```
bb.show()
```
### Exercise
Let's do some inference now.
Assume you observe the first four toin cosses. They are
```
observations = ['X_1=1', 'X_2=1', 'X_3=0', 'X_4=1']
```
Answer the following questions:
1. What is the prior distribution over the unknown bias?
2. What is the probability of the next outcome being Heads (`X_5=1`)?
3. Given the observations, what is the distribution over the
latent bias?
4. Rather than observing the four outcomes, assume instead
that you enforce the outcomes. What is the probability of
the next outcome being Heads?
5. What is the distribution over the latent bias if you enforce
the data?
```
```
#### Solution
```
# Prepare the cut forthe data.
observations = ['X_1=1', 'X_2=1', 'X_3=0', 'X_4=1']
cut_data = None
for s in observations:
if cut_data is None:
cut_data = bb.prop(s)
else:
cut_data &= bb.prop(s)
# Prepare the cut for the query.
cut_query = bb.prop('X_5=1')
# Question 1
bias = bb.rv('Bias')
print('P(Bias) :\n' + str(bias))
# Question 2
bb_cond = bb.see(cut_data)
print('\nP(X_5 = 1 | Data) = ' + str(bb_cond.prob(cut_query)))
# Question 3
bias_cond = bb_cond.rv('Bias')
print('\nP(Bias | Data) :\n' + str(bias_cond))
# Question 4
bb_int = bb.do(cut_data)
print('\nP(X_5 = 1 | do(Data)) = ' + str(bb_int.prob(cut_query)))
# Question 5
bias_int = bb_int.rv('Bias')
print('\nP(Bias | do(Data)) :' + str(bias_int))
# Display distribution over bias.
print('\nDistribution over biases for the three settings:')
fig = plt.figure(figsize=(15, 5))
# Show prior.
plt.subplot(131)
res = bb.rv('Bias')
theta = np.array([theta for _, theta in res], dtype=np.float)
prob = np.array([prob for prob, _ in res])
plt.fill_between(theta, prob, 0)
plt.title('P(Bias)')
plt.ylim([-0.005, 0.1])
plt.xlabel('Bias')
# Show posterior after conditioning.
plt.subplot(132)
res = bb.see(cut).rv('Bias')
theta = np.array([theta for _, theta in res], dtype=np.float)
prob = np.array([prob for prob, _ in res])
plt.fill_between(theta, prob, 0)
plt.title('P(Bias|D)')
plt.ylim([-0.005, 0.1])
plt.xlabel('Bias')
# Show posterior after intervening.
plt.subplot(133)
res = bb.do(cut).rv('Bias')
theta = np.array([theta for _, theta in res], dtype=np.float)
prob = np.array([prob for prob, _ in res])
plt.fill_between(theta, prob, 0)
plt.title('P(Bias|do(D))')
plt.ylim([-0.005, 0.1])
plt.xlabel('Bias')
plt.show()
```
## Who's in charge?
In this problem we will look at causal induction. Alice and Bob play a game
where both of them shout either 'chicken' or 'egg'.
At the beginning of the game, one of them is chosen to be the leader, and
the other, the follower. The follower will always attempt to match the
leader: so if Alice is the leader and Bob the follower, and Alice
shouts 'chicken', then Bob will attempt to shout 'chicken' too (with
60% success rate).
A typical game would look like this:
- Round 1: Alice shouts 'egg', Bob shouts 'chicken'.
- Round 2: Alice shouts 'chicken', Bob shouts 'chicken'.
- Round 3: Alice shouts 'chicken', Bob shouts 'chicken'.
- Round 4: Alice shouts 'egg', Bob shouts 'egg'.
Note that you hear both of them shouting simultaneously.
Our goal is to discover who's the leader. This is a **causal induction
problem**, because we want to figure out whether:
- hypothesis `Leader = Alice`: Alice $\rightarrow$ Bob;
- or hypothesis `Leader = Bob`: Bob $\rightarrow$ Alice.
Let's start by defining the factory function.
```
#@title Leader factory function.
def leader(bvar, T=2):
p = 0.75 # Probability of match.
# Define leader.
if 'Leader' not in bvar:
return [(0.5, 'Leader=Alice'), (0.5, 'Leader=Bob')]
# Now create the shouts.
# Figure out the leader.
if bvar['Leader'] == 'Alice':
leader = 'Alice'
follower = 'Bob'
else:
leader = 'Bob'
follower = 'Alice'
# Define random variables of shouts.
for t in range(1, T+1):
leader_str = leader + '_' + str(t)
if leader_str not in bvar:
return [(0.5, leader_str + '=chicken'), (0.5, leader_str + '=egg')]
follower_str = follower + '_' + str(t)
if follower_str not in bvar:
if bvar[leader_str] == 'chicken':
return [(p, follower_str + '=chicken'), (1-p, follower_str + '=egg')]
else:
return [(1-p, follower_str + '=chicken'), (p, follower_str + '=egg')]
# We're done.
return None
# Create true environment.
class ChickenEggGame:
def __init__(self, T=2):
self.T = T
self.pt = PTree.fromFunc(lambda bvar:leader(bvar, T=T))
smp = self.pt.sample()
self.pt.do(self.pt.prop('Leader=' + smp['Leader']))
self.time = 0
def step(self, name, word):
# Check whether parameters are okay.
if name != 'Alice' and name != 'Bob':
raise Exception('"name" has to be either "Alice" or "Bob".')
if word != 'chicken' and word != 'egg':
raise Exception('"word" has to be either "chicken" or "egg".')
if self.time > self.T -1:
raise Exception('The game has only ' + str(self.T) + ' rounds.')
# Enforce instruction.
self.time = self.time + 1
cut_do = self.pt.prop(name + '_' + str(self.time) + '=' + word)
self.pt = self.pt.do(cut_do)
# Produce next sample.
smp = self.pt.sample()
if name == 'Alice':
varname = 'Bob_' + str(self.time)
else:
varname = 'Alice_' + str(self.time)
response = smp[varname]
cut_see = self.pt.prop(varname + '=' + response)
self.pt = self.pt.see(cut_see)
return varname + '=' + response
def reveal(self):
smp = self.pt.sample()
return smp['Leader']
```
The factory function is called `leader()`.
Let's first have a look at how the probability tree would
look like for `T = 2` rounds.
```
ld = PTree.fromFunc(lambda bvar: leader(bvar, T=2), root_statement='Root = 1')
display(ld.show())
```
Notice how the transition probabilities of `Alice_n, Bob_n`,
`n = 1, 2`, are identical within the subtree rooted at
`Leader = Alice`. The same is true for the transitions probabilities
within the subtree rooted at `Leader = Bob`.
Now, let's create a new probability tree for a slightly longer game,
namely `T = 5`. **This tree is too big to display** (over 2K nodes)
but we can still sample from it.
```
T = 5
ld = PTree.fromFunc(lambda bvar: leader(bvar, T=T), root_statement='Root = 1')
print('Samples from the probability tree:')
for n in range(T):
print(ld.sample())
```
Let's first figure out the joint distribution over Alice's and Bob's shouts
in the first round (remember, rounds are i.i.d.) when Alice is the leader,
and compare this to the situation when Bob is the leader.
We can do this by setting `Leader` to whoever we want to be the leader,
and then enumerate the joint probabilities over the combinations of
shouts.
```
import itertools
# Define cuts for both leaders.
cut_leader_a = ld.prop('Leader = Alice')
cut_leader_b = ld.prop('Leader = Bob')
# The words they can say.
words = ['chicken', 'egg']
# Print the joint distribution over
# shouts when Alice is the leader.
print('Leader = Alice')
for word_a, word_b in itertools.product(words, words):
cut = ld.prop('Alice_1 = ' + word_a) & ld.prop('Bob_1 = ' + word_b)
prob = ld.do(cut_leader_a).prob(cut)
fmt = 'P( Alice_1 = {}, Bob_1 = {} | Leader <- Alice) = {:.2f}'
print(fmt.format(word_a, word_b, prob))
# Print the joint distribution over
# shouts when Bob is the leader.
print('\nLeader = Bob')
for word_a, word_b in itertools.product(words, words):
cut = ld.prop('Alice_1 = ' + word_a) & ld.prop('Bob_1 = ' + word_b)
prob = ld.do(cut_leader_b).prob(cut)
fmt = 'P( Alice_1 = {}, Bob_1 = {} | Leader <- Bob) = {:.2f}'
print(fmt.format(word_a, word_b, prob))
```
Looking at the joint probabilities, **we realize that they are identical**.
This means that we cannot identify who's the leader by conditioning on
our observations. Let's try this with the following observations:
```
obs = [
'Alice_1=chicken', 'Bob_1=egg',
'Alice_2=egg', 'Bob_2=egg',
'Alice_3=egg', 'Bob_3=egg'
]
```
We now compare the prior and posterior probabilities of Bob being the leader.
```
import functools
obs = [
'Alice_1=chicken', 'Bob_1=egg',
'Alice_2=egg', 'Bob_2=egg',
'Alice_3=egg', 'Bob_3=egg'
]
cuts_data = [ld.prop(data) for data in obs]
cut_data = functools.reduce(lambda x, y: x & y, cuts_data)
cut_query = ld.prop('Leader=Bob')
prob_prior = ld.prob(cut_query)
prob_post = ld.see(cut_data).prob(cut_query)
print('Prior and posterior probabilities:')
print('P( Leader = Bob ) = {:.2f}'.format(prob_prior))
print('P( Leader = Bob | Data ) = {:.2f}'.format(prob_post))
```
As you can see, this doesn't work - we can't disentangle the two hypotheses
just by looking at the data.
Intuitively, we could figure out whether Alice or Bob is the leader by
intervening the game - for instance, by instructing Bob to say what
we want and observe Alice's reaction:
- if Alice matches Bob many times, then she's probably the follower;
- instead if Alice does not attempt to match Bob, then we can conclude
that Alice is the leader.
Crucially, we need to **interact** in order to collect the data.
It's not enough to passively observe. For this, we'll use
an implementation of the game (`ChickenEggGame`) that allows
us to instruct either Alice or Bob to shout the word we want.
```
T = 5
game = ChickenEggGame(T=T)
# Do T rounds.
for n in range(T):
reply = game.step('Alice', 'chicken')
print(reply)
# Reveal.
print('The true leader is:' + game.reveal())
```
### Exercise
Using `ChickenEggGame`, play `T=5` rounds giving an instruction.
Use a copy of the probability tree `ld` to record the results,
appropriately distinguishing between conditions and interventions.
Finally, compute the posterior probability of Alice being the
leader and compare with ground truth (using the `reveal` method).
```
```
#### Solution
```
import copy
T = 5
game = ChickenEggGame(T=T)
# Do T rounds.
print('Game:')
ldg = copy.deepcopy(ld)
for t in range(1, T+1):
reply = game.step('Alice', 'chicken')
instruction = 'Alice_' + str(t) + '=chicken'
ldg = ldg.do(ldg.prop(instruction))
ldg = ldg.see(ldg.prop(reply))
print(instruction + ', ' + reply)
# Prediction.
print('\nPrediction:')
cut_query = ldg.prop('Leader=Alice')
prob_post = ldg.prob(cut_query)
print('P(Leader = Alice | Data) = {:.5f}'.format(prob_post))
# Reveal ground truth.
print('\nGround truth:')
print('Leader = ' + game.reveal())
```
| github_jupyter |
# Visualization
## Matplotlib
<div style="clear:both"></div>
</div>
<hr style="height:2px;">
<div style="float:right; width:250 px"><img src="https://matplotlib.org/_static/logo2.png" alt="NumPy Logo" style="height: 150px;"></div>
## Objectives
1. Create a basic line plot.
1. Add labels and grid lines to the plot.
1. Plot multiple series of data.
1. Plot imshow, contour, and filled contour plots.
*This notebook was modified from one developed by Unidata*
## Getting Help with Matplotlib
Here are some important resources for learning more about Matplotlib and getting help.
- [NCAR Hackathons Data Visualization in Python Guide](https://ncar-hackathons.github.io/visualization)
- [Matplotlib documentation](http://matplotlib.org)
- [Matplotlib `plot` documentation](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot)
- [Matplotlib GitHub Issue Tracker](https://github.com/matplotlib/matplotlib/issues)
- [Matplotlib questions on StackOverflow](https://stackoverflow.com/questions/tagged/matplotlib)
## Plotting with Matplotlib
Matplotlib is a python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms.
The first step is to set up our notebook environment so that matplotlib plots appear inline as images:
```
%matplotlib inline
```
Next we import the matplotlib library's `pyplot` interface; this interface is the simplest way to create new Matplotlib figures. To shorten this long name, we import it as `plt` to keep things short but clear.
```
import matplotlib.pyplot as plt
import numpy as np
```
Now we generate some data to use while experimenting with plotting:
```
times = np.array([ 93., 96., 99., 102., 105., 108., 111., 114., 117.,
120., 123., 126., 129., 132., 135., 138., 141., 144.,
147., 150., 153., 156., 159., 162.])
temps = np.array([310.7, 308.0, 296.4, 289.5, 288.5, 287.1, 301.1, 308.3,
311.5, 305.1, 295.6, 292.4, 290.4, 289.1, 299.4, 307.9,
316.6, 293.9, 291.2, 289.8, 287.1, 285.8, 303.3, 310.])
```
Now we come to two quick lines to create a plot. Matplotlib has two core objects: the `Figure` and the `Axes`. The `Axes` is an individual plot with an x-axis, a y-axis, labels, etc; it has all of the various plotting methods we use. A `Figure` holds one or more `Axes` on which we draw; think of the `Figure` as the level at which things are saved to files (e.g. PNG, SVG)

Below the first line asks for a `Figure` 10 inches by 6 inches. We then ask for an `Axes` or subplot on the `Figure`. After that, we call `plot`, with `times` as the data along the x-axis (independant values) and `temps` as the data along the y-axis (the dependant values).
```
# Create a figure
fig = plt.figure(figsize=(10, 6))
# Ask, out of a 1x1 grid, the first axes.
ax = fig.add_subplot(1, 1, 1)
# Plot times as x-variable and temperatures as y-variable
ax.plot(times, temps)
```
From there, we can do things like ask the axis to add labels for x and y:
```
# Add some labels to the plot
ax.set_xlabel('Time')
ax.set_ylabel('Temperature')
# Prompt the notebook to re-display the figure after we modify it
fig
```
We can also add a title to the plot:
```
ax.set_title('GFS Temperature Forecast', fontdict={'size':16})
fig
```
Of course, we can do so much more...
```
# Set up more temperature data
temps_1000 = np.array([316.0, 316.3, 308.9, 304.0, 302.0, 300.8, 306.2, 309.8,
313.5, 313.3, 308.3, 304.9, 301.0, 299.2, 302.6, 309.0,
311.8, 304.7, 304.6, 301.8, 300.6, 299.9, 306.3, 311.3])
```
Here we call `plot` more than once to plot multiple series of temperature on the same plot; when plotting we pass `label` to `plot` to facilitate automatic creation. This is added with the `legend` call. We also add gridlines to the plot using the `grid()` call.
```
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# Plot two series of data
# The label argument is used when generating a legend.
ax.plot(times, temps, label='Temperature (surface)')
ax.plot(times, temps_1000, label='Temperature (1000 mb)')
# Add labels and title
ax.set_xlabel('Time')
ax.set_ylabel('Temperature')
ax.set_title('Temperature Forecast')
# Add gridlines
ax.grid(True)
# Add a legend to the upper left corner of the plot
ax.legend(loc='upper left')
```
We're not restricted to the default look of the plots, but rather we can override style attributes, such as `linestyle` and `color`. `color` can accept a wide array of options for color, such as `red` or `blue` or HTML color codes.
```
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# Specify how our lines should look
ax.plot(times, temps, color='red', label='Temperature (surface)')
ax.plot(times, temps_1000, color='red', linestyle='--',
label='Temperature (isobaric level)')
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Temperature')
ax.set_title('Temperature Forecast')
ax.grid(True)
ax.legend(loc='upper left')
```
### Exercise
* Use `add_subplot` to create two different subplots on the figure
* Create one subplot for temperature, and one for dewpoint
* Set the title of each subplot as appropriate
* Use `ax.set_xlim` and `ax.set_ylim` to control the plot boundaries
* **BONUS:** Experiment with passing `sharex` and `sharey` to `add_subplot` to <a href="https://matplotlib.org/gallery/subplots_axes_and_figures/shared_axis_demo.html#sphx-glr-gallery-subplots-axes-and-figures-shared-axis-demo-py">share plot limits</a>
```
# Fake dewpoint data to plot
dewpoint = 0.9 * temps
dewpoint_1000 = 0.9 * temps_1000
# Create the figure
fig = plt.figure(figsize=(10, 6))
# YOUR CODE GOES HERE
```
#### Solution
```
# %load solutions/subplots.py
```
## Scatter Plots
Maybe it doesn't make sense to plot your data as a line plot, but with markers (a scatter plot). We can do this by setting the `linestyle` to none and specifying a marker type, size, color, etc.
```
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# Specify no line with circle markers
ax.plot(temps, temps_1000, linestyle='None', marker='o', markersize=5)
ax.set_xlabel('Temperature (surface)')
ax.set_ylabel('Temperature (1000 hPa)')
ax.set_title('Temperature Cross Plot')
ax.grid(True)
```
You can also use the `scatter` methods, which is slower, but will give you more control, such as being able to color the points individually based upon a third variable.
```
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# Specify no line with circle markers
ax.scatter(temps, temps_1000)
ax.set_xlabel('Temperature (surface)')
ax.set_ylabel('Temperature (1000 hPa)')
ax.set_title('Temperature Cross Plot')
ax.grid(True)
```
### Exercise
* Beginning with our code above, add the `c` keyword argument to the `scatter` call and color the points by the difference between the surface and 1000 hPa temperature.
* Add a 1:1 line to the plot (slope of 1, intercept of zero). Use a black dashed line.
* **BONUS:** Change the color map to be something more appropriate for this plot.
* **BONUS:** Try to add a colorbar to the plot (have a look at the matplotlib documentation for help).
```
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# YOUR CODE GOES HERE
ax.set_xlabel('Temperature (surface)')
ax.set_ylabel('Temperature (1000 hPa)')
ax.set_title('Temperature Cross Plot')
ax.grid(True)
```
#### Solution
```
# %load solutions/color_scatter.py
```
## imshow/contour
- `imshow` displays the values in an array as colored pixels, similar to a heat map.
- `contour` creates contours around data.
- `contourf` creates filled contours around data.
First let's create some fake data to work with - let's use a bivariate normal distribution.
```
x = y = np.arange(-3.0, 3.0, 0.025)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
```
Let's start with a simple imshow plot.
```
fig, ax = plt.subplots()
im = ax.imshow(Z, interpolation='bilinear', cmap='RdYlGn',
origin='lower', extent=[-3, 3, -3, 3])
```
We can also create contours around the data.
```
fig, ax = plt.subplots()
ax.contour(X, Y, Z)
fig, ax = plt.subplots()
c = ax.contour(X, Y, Z, levels=np.arange(-2, 2, 0.25))
ax.clabel(c)
fig, ax = plt.subplots()
c = ax.contourf(X, Y, Z)
```
### Exercise
* Create a figure using imshow and contour that is a heatmap in the colormap of your choice. Overlay black contours with a 0.5 contour interval.
```
# YOUR CODE GOES HERE
```
#### Solution
```
# %load solutions/contourf_contour.py
```
## Resources
The goal of this tutorial is to provide an overview of the use of the Matplotlib library. It covers creating simple line plots, but it is by no means comprehensive. For more information, try looking at the:
- [Matplotlib Documentation](http://matplotlib.org)
- [Matplotlib `plot` documentation](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot)
<div class="alert alert-block alert-success">
<p>Previous: <a href="00_intro.ipynb">Introduction</a></p>
<p>Next: <a href="02_cartopy.ipynb">Cartopy</a></p>
</div>
| github_jupyter |
# Load and Process models
This script will load the M models in the collection using cobrapy, and convert them to a normalized format. They will also be exported to the "mat" format used by the COBRA toolbox.
This requires [cobrapy](https://opencobra.github.io/cobrapy) version 0.4.0b1 or later.
```
import os
import warnings
import re
from itertools import chain
import sympy
import scipy
import scipy.io
import cobra
from read_excel import read_excel
```
## Read in Models
```
def open_exchanges(model, amount=10):
for reaction in model.reactions:
if len(reaction.metabolites) == 1:
# Ensure we are not creating any new sinks
if reaction.metabolites.values()[0] > 0:
reaction.upper_bound = max(reaction.upper_bound, amount)
else:
reaction.lower_bound = min(reaction.lower_bound, -amount)
def add_exchanges(model, extracellular_suffix="[e]", uptake_amount=10):
for metabolite in model.metabolites:
if str(metabolite).endswith(extracellular_suffix):
if len(metabolite.reactions) == 0:
print "no reactions for " + metabolite.id
continue
if min(len(i.metabolites) for i in metabolite.reactions) > 1:
EX_reaction = cobra.Reaction("EX_" + metabolite.id)
EX_reaction.add_metabolites({metabolite: 1})
m.add_reaction(EX_reaction)
EX_reaction.upper_bound = uptake_amount
EX_reaction.lower_bound = -uptake_amount
```
### SBML models
These models will be read in using [libSBML](http://sbml.org/Software/libSBML) through cobrapy. Some models will need their exchanges opened.
```
legacy_SBML = {"T_Maritima", "iNJ661m", "iSR432", "iTH366"}
open_boundaries = {"iRsp1095", "iWV1314", "iFF708", "iZM363"}
models = cobra.DictList()
for i in sorted(os.listdir("sbml")):
if not i.endswith(".xml"):
continue
model_id = i[:-4]
filepath = os.path.join("sbml", i)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
m = cobra.io.read_legacy_sbml(filepath) if model_id in legacy_SBML \
else cobra.io.read_sbml_model(filepath)
m.id = m.description = model_id.replace(".", "_")
if m.id in open_boundaries:
open_exchanges(m)
models.append(m)
```
### Models available in COBRA Toolbox "mat" format
```
for i in sorted(os.listdir("mat")):
if not i.endswith(".mat"):
continue
m = cobra.io.load_matlab_model(os.path.join("mat", i))
m.id = i[:-4]
if m.id in open_boundaries:
open_exchanges(m)
models.append(m)
```
### Some models are only available as Microsoft Excel files
```
m = read_excel("xls/iJS747.xls",
verbose=False, rxn_sheet_header=7)
models.append(m)
m = read_excel("xls/iRM588.xls",
verbose=False, rxn_sheet_header=5)
models.append(m)
m = read_excel("xls/iSO783.xls", verbose=False, rxn_sheet_header=2)
models.append(m)
m = read_excel("xls/iCR744.xls", rxn_sheet_header=4, verbose=False)
models.append(m)
m = read_excel("xls/iNV213.xls", rxn_str_key="Reaction Formula", verbose=False)
# remove boundary metabolites
for met in list(m.metabolites):
if met.id.endswith("[b]"):
met.remove_from_model()
models.append(m)
m = read_excel("xls/iTL885.xls", verbose=False,
rxn_id_key="Rxn name", rxn_gpr_key="Gene-reaction association", met_sheet_name="ignore")
models.append(m)
m = read_excel("xls/iWZ663.xls", verbose=False,
rxn_id_key="auto", rxn_name_key="Reaction name", rxn_gpr_key="Local gene")
models.append(m)
m = read_excel("xls/iOR363.xls", verbose=False)
models.append(m)
m = read_excel("xls/iMA945.xls", verbose=False)
models.append(m)
m = read_excel("xls/iPP668.xls", verbose=False)
add_exchanges(m)
models.append(m)
m = read_excel("xls/iVM679.xls", verbose=False, met_sheet_name="ignore",
rxn_id_key="Name", rxn_name_key="Description", rxn_str_key="Reaction")
open_exchanges(m)
models.append(m)
m = read_excel("xls/iTY425.xls", rxn_sheet_header=1,
rxn_sheet_name="S8", rxn_id_key="Number", rxn_str_key="Reaction", verbose=False)
add_exchanges(m, "xt")
# Protein production reaction does not prdoue "PROTEIN" metabolite
m.reactions.R511.add_metabolites({m.metabolites.PROTEIN: 1})
m.id = m.id + "_fixed"
models.append(m)
m = read_excel("xls/iSS724.xls", rxn_str_key="Reactions",
rxn_sheet_header=1, met_sheet_header=1, rxn_id_key="Name",
verbose=False)
add_exchanges(m, "xt")
models.append(m)
m = read_excel("xls/iCS400.xls", rxn_sheet_name="Complete Rxn List",
rxn_sheet_header=2, rxn_str_key="Reaction",
rxn_id_key="Name", verbose=False)
add_exchanges(m, "xt")
models.append(m)
m = read_excel("xls/iLL672.xls",
rxn_id_key="auto", met_sheet_name="Appendix 3 iLL672 metabolites",\
rxn_str_key="REACTION", rxn_gpr_key="skip", verbose=False,
rxn_sheet_name='Appendix 3 iLL672 reactions')
m.reactions[-1].objective_coefficient = 1
m.metabolites.BM.remove_from_model()
add_exchanges(m, "xt")
models.append(m)
plus_re = re.compile("(?<=\S)\+") # substitute H+ with H, etc.
m = read_excel("xls/iMH551.xls", rxn_sheet_name="GPR Annotation", rxn_sheet_header=4,
rxn_id_key="auto", rxn_str_key="REACTION", rxn_gpr_key="skip",
rxn_name_key="ENZYME", rxn_skip_rows=[625, 782, 787], verbose=False,
rxn_sheet_converters={"REACTION": lambda x: plus_re.sub("", x)})
for met in m.metabolites:
if met.id.endswith("(extracellular)"):
met.id = met.id[:-15] + "_e"
m.repair()
add_exchanges(m, "_e")
models.append(m)
m = read_excel("xls/iCS291.xls", rxn_sheet_name="Sheet1",
rxn_str_key="Reaction",
rxn_sheet_header=5, rxn_id_key="Name",
verbose=False)
add_exchanges(m, "xt")
# BIOMASS is just all model metabolites in the Demands list
m.add_reaction(cobra.Reaction("BIOMASS"))
# taken from Table 1 in publication
biomass_mets = {}
for i in {"ALA", "ARG", "ASN", "ASP", "CYS", "GLU", "GLN", "GLY",
"HIS", "ILE", "LEU", "LYS", "MET", "PHE", "PRO", "SER",
"THR", "TRP", "TYR", "VAL", "PTRC", "SPMD", "ATP", "GTP",
"CTP", "UTP", "DATP", "DGTP", "DCTP", "DTTP", "PS", "PE",
"PG", "PEPTIDO", "LPS", "OPP", "UDPP", "NAD", "NADP", "FAD",
"COA", "ACP", "PTH", "THIAMIN", "MTHF", "MK", "DMK"
}:
biomass_mets[m.metabolites.get_by_id(i)] = -1
dm = cobra.Reaction("DM_" + i)
m.add_reaction(dm)
dm.add_metabolites({m.metabolites.get_by_id(i): -1})
m.reactions.BIOMASS.add_metabolites(biomass_mets)
m.change_objective("BIOMASS")
add_exchanges(m, "xt")
models.append(m)
m = read_excel("xls/iYO844.xls", rxn_sheet_name="Reaction and locus", verbose=False, rxn_gpr_key="Locus name",
rxn_str_key=u'Equation (note [c] and [e] at the beginning refer to the compartment \n'
'the reaction takes place in, cytosolic and extracellular respectively)')
add_exchanges(m)
# create the biomass reaction from supplementary data table
# http://www.jbc.org/content/suppl/2007/06/29/M703759200.DC1/Biomass_composition.doc
r = cobra.Reaction("biomass")
r.objective_coefficient = 1.
m.add_reaction(r)
r.reaction = ("408.3 gly[c] + 266.9 ala-L[c] + 306.7 val-L[c] + 346.4 leu-L[c] + 269.9 ile-L[c] + "
"216.2 ser-L[c] + 186.3 thr-L[c] + 175.9 phe-L[c] + 110.8 tyr-L[c] + 54.3 trp-L[c] + "
"56.7 cys-L[c] + 113.3 met-L[c] + 323.1 lys-L[c] + 193.0 arg-L[c] + 81.7 his-L[c] + "
"148.0 asp-L[c] + 260.4 glu-L[c] + 148.0 asp-L[c] + 260.3 gln-L[c] + 160.6 pro-L[c] + "
"62.7 gtp[c] + 38.9 ctp[c] + 41.5 utp[c] + 23.0 datp[c] + 17.4 dgtp[c] + 17.4 dctp[c] + "
"22.9 dttp[c] + 0.085750 m12dg_BS[c] + 0.110292 d12dg_BS[c] + 0.065833 t12dg_BS[c] + "
"0.004642 cdlp_BS[c] + 0.175859 pgly_BS[c] + 0.022057 lysylpgly_BS[c] + 0.559509 psetha_BS[c] + "
"0.006837 lipo1-24_BS[c] + 0.006123 lipo2-24_BS[c] + 0.018162 lipo3-24_BS[c] + "
"0.014676 lipo4-24_BS[c] + 101.82 peptido_BS[c] + 3.62 gtca1-45_BS[c] + 2.35 gtca2-45_BS[c] + "
"1.82 gtca3-45_BS[c] + 3.11 tcam_BS[c] + 706.3 k[c] + 101.7 mg2[c] + 3.4 fe3[c] + 3.2 ca2[c] + "
"0.9 ppi[c] + 0.3 mql7[c] + 0.4 10fthf[c] + 16.2 nad[c] + 4.7 amp[c] + 2.6 adp[c] + 1.0 cmp[c] + "
"0.9 nadp[c] + 0.5 ctp[c] + 0.5 gmp[c] + 0.4 gtp[c] + 0.3 cdp[c] + 0.2 nadph[c] + 0.2 gdp[c] + "
"105053.5 atp[c] + 105000 h2o[c] --> 104985.6 pi[c] + 104997.4 adp[c] + 105000 h[c]")
# units are in mg for this reaction, so scale to grams
r *= 0.001
models.append(m)
models.sort()
```
## Determine Objective Reactions
Some of these models do not specify an objective (or biomass) reaction. These will be automatically detected if possible, or set from a manually curated list.
```
# regular expression to detect "biomass"
biomass_re = re.compile("biomass", re.IGNORECASE)
# manually identified objective reactions
curated_objectives = {"VvuMBEL943": "R806",
"iAI549": "BIO_CBDB1_DM_855",
"mus_musculus": "BIO028",
"iRsp1095": "RXN1391",
"iLC915": "r1133",
"PpaMBEL1254": "R01288",
"AbyMBEL891": "R761",
"iAbaylyiV4": "GROWTH_DASH_RXN",
"iOG654": "RM00001",
"iOR363": "OF14e_Retli",
"iRM588": "agg_GS13m",
"iJS747": "agg_GS13m_2",
"iTL885": "SS1240",
"iMH551": "R0227"}
for m in models:
if len(m.reactions.query(lambda x: x > 0, "objective_coefficient")):
continue
if m.id in curated_objectives:
m.change_objective(curated_objectives[m.id])
continue
# look for reactions with "biomass" in the id or name
possible_objectives = m.reactions.query(biomass_re)
if len(possible_objectives) == 0:
possible_objectives = m.reactions.query(biomass_re, "name")
# In some cases, a biomass "metabolite" is produced, whose production
# should be the objective function.
possible_biomass_metabolites = m.metabolites.query(biomass_re)
if len(possible_biomass_metabolites) == 0:
possible_biomass_metabolites = m.metabolites.query(biomass_re, "name")
if len(possible_biomass_metabolites) > 0:
biomass_met = possible_biomass_metabolites[0]
r = cobra.Reaction("added_biomass_sink")
r.objective_coefficient = 1
r.add_metabolites({biomass_met: -1})
m.add_reaction(r)
print ("autodetected biomass metabolite '%s' for model '%s'" %
(biomass_met.id, m.id))
elif len(possible_objectives) > 0:
print("autodetected objective reaction '%s' for model '%s'" %
(possible_objectives[0].id, m.id))
m.change_objective(possible_objectives[0])
else:
print("no objective found for " + m.id)
# Ensure the biomass objective flux is unconstrained
for m in models:
for reaction in m.reactions.query(lambda x: x > 0, "objective_coefficient"):
reaction.lower_bound = min(reaction.lower_bound, 0)
reaction.upper_bound = max(reaction.upper_bound, 1000)
```
## Fixes of various encoding bugs
### General
GSMN_TB does not use the convention of extracellular metabolites with exchanges. Although the model still solves with this formulation, this is still normalized here. This process does not change the mathematical structure of the model.
```
h_c = models.GSMN_TB.metabolites.H_c
for r in models.GSMN_TB.reactions:
if len(r.metabolites) == 2 and h_c in r.metabolites:
met = [i for i in r.metabolites if i is not h_c][0]
EX_met = cobra.Metabolite(met.id[:-1] + "e")
r.add_metabolites({EX_met: -r.metabolites[met]})
if "EX_" + EX_met.id not in models.GSMN_TB.reactions:
exchange = cobra.Reaction("EX_" + EX_met.id)
exchange.add_metabolites({EX_met: -1})
exchange.lower_bound = -1000000.0
exchange.upper_bound = 1000000.0
models.GSMN_TB.add_reaction(exchange)
```
### Reaction and Metabolites
### id's
```
# reaction id's with spaces in them
models.iJS747.reactions.get_by_id("HDH [deleted 01/16/2007 12:02:30 PM]").id = "HDH_del"
models.iJS747.reactions.get_by_id("HIBD [deleted 03/21/2007 01:06:12 PM]").id = "HIBD_del"
models.iAC560.reactions.get_by_id("GLUDx [m]").id = "GLUDx[m]"
for r in models.iOR363.reactions:
if " " in r.id:
r.id = r.id.split()[0]
models.textbook.reactions.query("Biomass")[0].id = "Biomass_Ecoli_core"
```
Use the convention underscore + compartment i.e. _c instead of [c] (c) etc.
```
SQBKT_re = re.compile("\[([a-z])\]$")
def fix_brackets(id_str, compiled_re):
result = compiled_re.findall(id_str)
if len(result) > 0:
return compiled_re.sub("_" + result[0], id_str)
else:
return id_str
for r in models.iRS1597.reactions:
r.id = fix_brackets(r.id, re.compile("_LSQBKT_([a-z])_RSQBKT_$"))
for m_id in ["iJS747", "iRM588", "iSO783", "iCR744", "iNV213", "iWZ663", "iOR363", "iMA945", "iPP668",
"iTL885", "iVM679", "iYO844", "iZM363"]:
for met in models.get_by_id(m_id).metabolites:
met.id = fix_brackets(met.id, SQBKT_re)
for met in models.S_coilicolor_fixed.metabolites:
if met.id.endswith("_None_"):
met.id = met.id[:-6]
# Some models only have intra and extracellular metabolites, but don't use _c and _e.
for m_id in ["iCS291", "iCS400", "iTY425_fixed", "iSS724"]:
for metabolite in models.get_by_id(m_id).metabolites:
if metabolite.id.endswith("xt"):
metabolite.id = metabolite.id[:-2] + "_e"
elif len(metabolite.id) < 2 or metabolite.id[-2] != "_":
metabolite.id = metabolite.id + "_c"
# Exchange reactions should have the id of the metabolite after with the same convention
for m_id in ["iAF1260", "iJO1366", "iAF692", "iJN746", "iRC1080", "textbook", "iNV213",
"iIT341", "iJN678", "iJR904", "iND750", "iNJ661", "iPS189_fixed", "iSB619",
"iZM363", "iMH551"]:
for r in models.get_by_id(m_id).reactions:
if len(r.metabolites) != 1:
continue
if r.id.startswith("EX_"):
r.id = "EX_" + list(r.metabolites.keys())[0].id
if r.id.startswith("DM_"):
r.id = "DM_" + list(r.metabolites.keys())[0].id
for m in models:
m.repair()
```
### Metabolite Formulas
```
for model in models:
for metabolite in model.metabolites:
if metabolite.formula is None:
metabolite.formula = ""
continue
if str(metabolite.formula).lower() == "none":
metabolite.formula = ""
continue
# some characters should not be in a formula
if "(" in metabolite.formula or \
")" in metabolite.formula or \
"." in metabolite.formula:
metabolite.formula = ""
```
### Metabolite Compartments
```
compartments = {
'c': 'Cytoplasm',
'e': 'Extracellular',
'p': 'Periplasm',
'm': 'Mitochondria',
'g': 'Golgi',
'n': "Nucleus",
'r': "Endoplasmic reticulum",
'x': "Peroxisome",
'v': "Vacuole",
"h": "Chloroplast",
"x": "Glyoxysome",
"s": "Eyespot",
"default": "No Compartment"}
for model in models:
for metabolite in model.metabolites:
if metabolite.compartment is None or len(metabolite.compartment.strip()) == 0 or metabolite.compartment == "[":
if len(metabolite.id) > 2 and metabolite.id[-2] == "_" and metabolite.id[-1].isalpha():
metabolite.compartment = metabolite.id[-1]
else:
metabolite.compartment = "default"
if metabolite.compartment not in model.compartments:
model.compartments[metabolite.compartment] = compartments.get(metabolite.compartment, metabolite.compartment)
```
### Metabolite and Reaction Names
Names which start with numbers don't need to be escaped with underscores.
```
for model in models:
for x in chain(model.metabolites, model.reactions):
if x.name is not None and x.name.startswith("_"):
x.name = x.name.lstrip("_")
if x.name is not None:
x.name = x.name.strip()
if x.name is None:
x.name = x.id
```
### MISC fixes
```
models.iMM1415.reactions.EX_lnlc_dup_e.remove_from_model()
models.iMM1415.reactions.EX_retpalm_e.remove_from_model(remove_orphans=True)
# these reaction names are reaction strings
for r in models.iCac802.reactions:
r.name = ""
```
## Fix Genes and GPR's
A lot of genes have characters which won't work in their names
```
# nonbreaking spaces
models.iCB925.reactions.FDXNRy.gene_reaction_rule = '( Cbei_0661 or Cbei_2182 )'
for r in models.iCB925.reactions:
if "\xa0" in r.gene_reaction_rule:
r.gene_reaction_rule = r.gene_reaction_rule.replace("\xc2", " ").replace("\xa0", " ")
for g in list(models.iCB925.genes):
if len(g.reactions) == 0:
models.iCB925.genes.remove(g)
```
Some GPR's are not valid boolean expressions.
```
multiple_ors = re.compile("(\s*or\s+){2,}")
multiple_ands = re.compile("(\s*and\s+){2,}")
for model_id in ["iRS1563", "iRS1597", "iMM1415"]:
model = models.get_by_id(model_id)
for reaction in model.reactions:
gpr = reaction.gene_reaction_rule
gpr = multiple_ors.sub(" or ", gpr)
gpr = multiple_ands.sub(" and ", gpr)
if "[" in gpr:
gpr = gpr.replace("[", "(").replace("]", ")")
if gpr.endswith(" or"):
gpr = gpr[:-3]
if gpr.count("(") != gpr.count(")"):
gpr = "" # mismatched parenthesis somewhere
reaction.gene_reaction_rule = gpr
for gene in list(model.genes):
if gene.id.startswith("[") or gene.id.endswith("]"):
if len(gene.reactions) == 0:
model.genes.remove(gene.id)
# Some models are missing spaces between the ands/ors in some of their GPR's
for m_id in ["iJN678", "iTL885"]:
for r in models.get_by_id(m_id).reactions:
r.gene_reaction_rule = r.gene_reaction_rule.replace("and", " and ").replace("or", " or ")
models.iCac802.reactions.R0095.gene_reaction_rule = \
models.iCac802.reactions.R0095.gene_reaction_rule.replace(" AND ", " and ")
# make sbml3 output deterministic by sorting genes
for m in models:
m.genes.sort()
```
## Ensure all ID's are SBML compliant
```
for m in models:
cobra.manipulation.escape_ID(m)
```
## Export Models
### SBML 3
Export the models to the use the fbc version 2 (draft RC6) extension to SBML level 3 version 1.
```
for model in models:
cobra.io.write_sbml_model(model, "sbml3/%s.xml" % model.id)
```
### mat
Save all the models into a single mat file. In addition to the usual fields in the "mat" struct, we will also include S_num and S_denom, which are the numerator and denominator of the stoichiometric coefficients encoded as rational numbers.
```
def convert_to_rational(value):
return sympy.Rational("%.15g" % value)
def construct_S_num_denom(model):
"""convert model to two S matrices
they encode the numerator and denominator of stoichiometric
coefficients encoded as rational numbers
"""
# intialize to 0
dimensions = (len(model.metabolites), len(model.reactions))
S_num = scipy.sparse.lil_matrix(dimensions)
S_denom = scipy.sparse.lil_matrix(dimensions)
# populate with stoichiometry
for i, r in enumerate(model.reactions):
for met, value in r._metabolites.iteritems():
rational_value = convert_to_rational(value)
num, denom = (rational_value.p, rational_value.q)
S_num[model.metabolites.index(met), i] = num
S_denom[model.metabolites.index(met), i] = denom
return S_num, S_denom
all_model_dict = {}
for model in models:
model_dict = cobra.io.mat.create_mat_dict(model)
model_dict["S_num"], model_dict["S_denom"] = construct_S_num_denom(model)
all_model_dict[model.id] = model_dict
scipy.io.savemat("all_models.mat", all_model_dict, oned_as="column")
```
| github_jupyter |
# Credential Scan on Azure Log Analytics
__Notebook Version:__ 1.0<br>
__Python Version:__ Python 3.8 - AzureML<br>
__Required Packages:__ No<br>
__Platforms Supported:__ Azure Machine Learning Notebooks
__Data Source Required:__ Log Analytics tables
### Description
This notebook provides step-by-step instructions and sample code to detect credential leak into Azure Log Analytics using Azure SDK for Python and KQL.<br>
*** No need to download and install any other Python modules. ***<br>
*** Please run the cells sequentially to avoid errors. Please do not use "run all cells". *** <br>
Need to know more about KQL? [Getting started with Kusto Query Language](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/concepts/).
## Table of Contents
1. Warm-up
2. Azure Authentication
3. Azure Log Analytics Data Queries
## 1. Warm-up
```
# If you need to know what Python modules are available, you may run this:
# help("modules")
# Load Python libraries that will be used in this notebook
from azure.common.client_factory import get_client_from_cli_profile
from azure.common.credentials import get_azure_cli_credentials
from azure.loganalytics.models import QueryBody
from azure.mgmt.loganalytics import LogAnalyticsManagementClient
from azure.loganalytics import LogAnalyticsDataClient
import pandas as pd
import json
import ipywidgets
from IPython.display import display, HTML, Markdown
# Functions will be used in this notebook
def read_config_values(file_path):
"This loads pre-generated parameters for Sentinel Workspace"
with open(file_path) as json_file:
if json_file:
json_config = json.load(json_file)
return (json_config["tenant_id"],
json_config["subscription_id"],
json_config["resource_group"],
json_config["workspace_id"],
json_config["workspace_name"],
json_config["user_alias"],
json_config["user_object_id"])
return None
def has_valid_token():
"Check to see if there is a valid AAD token"
try:
credentials, sub_id = get_azure_cli_credentials()
creds = credentials._get_cred(resource=None)
token = creds._token_retriever()[2]
print("Successfully signed in.")
return True
except Exception as ex:
if "Please run 'az login' to setup account" in str(ex):
print(str(ex))
return False
elif "AADSTS70043: The refresh token has expired" in str(ex):
message = "**The refresh token has expired. <br> Please continue your login process. Then: <br> 1. If you plan to run multiple notebooks on the same compute instance today, you may restart the compute instance by clicking 'Compute' on left menu, then select the instance, clicking 'Restart'; <br> 2. Otherwise, you may just restart the kernel from top menu. <br> Finally, close and re-load the notebook, then re-run cells one by one from the top.**"
display(Markdown(message))
return False
elif "[Errno 2] No such file or directory: '/home/azureuser/.azure/azureProfile.json'" in str(ex):
print("Please sign in.")
return False
else:
print(str(ex))
return False
except:
print("Please restart the kernel, and run 'az login'.")
return False
def get_credscan_kql_where_clause(column_name):
"This function return the KQL where clause for credscan"
where_clause = " | where TimeGenerated > ago({0}) | where {1} "
time_range = "7d"
regex_string = ""
regex_list = [
r"(?i)(ida:password|IssuerSecret|(api|client|app(lication)?)[_\\- ]?(key|secret)[^,a-z]|\\.azuredatabricks\\.net).{0,10}(dapi)?[a-z0-9/+]{22}",
r"(?i)(x-api-(key|token).{0,10}[a-z0-9/+]{40}|v1\\.[a-z0-9/+]{40}[^a-z0-9/+])",
r"(?-i)\\WAIza(?i)[a-z0-9_\\\\\\-]{35}\\W",
r"(?i)(\\Wsig\\W|Secret(Value)?|IssuerSecret|(\\Wsas|primary|secondary|management|Shared(Access(Policy)?)?).?Key|\\.azure\\-devices\\.net|\\.(core|servicebus|redis\\.cache|accesscontrol|mediaservices)\\.(windows\\.net|chinacloudapi\\.cn|cloudapi\\.de|usgovcloudapi\\.net)|New\\-AzureRedisCache).{0,100}([a-z0-9/+]{43}=)",
r"(?i)visualstudio\\.com.{1,100}\\W(?-i)[a-z2-7]{52}\\W",
r"(?i)se=2021.+sig=[a-z0-9%]{43,63}%3d",
r"(?i)(x-functions-key|ApiKey|Code=|\\.azurewebsites\\.net/api/).{0,100}[a-z0-9/\\+]{54}={2}",
r"(?i)code=[a-z0-9%]{54,74}(%3d){2}",
r"(?i)(userpwd|publishingpassword).{0,100}[a-z0-9/\\+]{60}\\W",
r"(?i)[^a-z0-9/\\+][a-z0-9/\\+]{86}==",
r"(?-i)\\-{5}BEGIN( ([DR]SA|EC|OPENSSH|PGP))? PRIVATE KEY( BLOCK)?\\-{5}",
r"(?i)(app(lication)?|client)[_\\- ]?(key(url)?|secret)([\\s=:>]{1,10}|[\\s\"':=|>\\]]{3,15}|[\"'=:\\(]{2})[^\\-]",
r"(?i)refresh[_\\-]?token([\\s=:>]{1,10}|[\\s\"':=|>\\]]{3,15}|[\"'=:\\(]{2})(\"data:text/plain,.+\"|[a-z0-9/+=_.-]{20,200})",
r"(?i)AccessToken(Secret)?([\\s\"':=|>\\]]{3,15}|[\"'=:\\(]{2}|[\\s=:>]{1,10})[a-z0-9/+=_.-]{20,200}",
r"(?i)[a-z0-9]{3,5}://[^%:\\s\"'/][^:\\s\"'/\\$]+[^:\\s\"'/\\$%]:([^%\\s\"'/][^@\\s\"'/]{0,100}[^%\\s\"'/])@[\\$a-z0-9:\\.\\-_%\\?=/]+",
r"(?i)snmp(\\-server)?\\.exe.{0,100}(priv|community)",
r"(?i)(ConvertTo\\-?SecureString\\s*((\\(|\\Wstring)\\s*)?['\"]+)",
r"(?i)(Consumer|api)[_\\- ]?(Secret|Key)([\\s=:>]{1,10}|[\\s\"':=|>,\\]]{3,15}|[\"'=:\\(]{2})[^\\s]{5,}",
r"(?i)authorization[,\\[:= \"']+([dbaohmnsv])",
r"(?i)-u\\s+.{2,100}-p\\s+[^\\-/]",
r"(?i)(amqp|ssh|(ht|f)tps?)://[^%:\\s\"'/][^:\\s\"'/\\$]+[^:\\s\"'/\\$%]:([^%\\s\"'/][^@\\s\"'/]{0,100}[^%\\s\"'/])@[\\$a-z0-9:\\.\\-_%\\?=/]+",
r"(?i)(\\Waws|amazon)?.{0,5}(secret|access.?key).{0,10}\\W[a-z0-9/\\+]{40}",
r"(?-i)(eyJ0eXAiOiJKV1Qi|eyJhbGci)",
r"(?i)@(\\.(on)?)?microsoft\\.com[ -~\\s]{1,100}?(\\w?pass\\w?)",
r"(?i)net(\\.exe)?.{1,5}(user\\s+|share\\s+/user:|user-?secrets? set)\\s+[a-z0-9]",
r"(?i)xox[pbar]\\-[a-z0-9]",
r"(?i)[\":\\s=]((x?corp|extranet(test)?|ntdev)(\\.microsoft\\.com)?|corp|redmond|europe|middleeast|northamerica|southpacific|southamerica|fareast|africa|exchange|extranet(test)?|partners|parttest|ntdev|ntwksta)\\W.{0,100}(password|\\Wpwd|\\Wpass|\\Wpw\\W|userpass)",
r"(?i)(sign_in|SharePointOnlineAuthenticatedContext|(User|Exchange)Credentials?|password)[ -~\\s]{0,100}?@([a-z0-9.]+\\.(on)?)?microsoft\\.com['\"]?",
r"(?i)(\\.database\\.azure\\.com|\\.database(\\.secure)?\\.windows\\.net|\\.cloudapp\\.net|\\.database\\.usgovcloudapi\\.net|\\.database\\.chinacloudapi\\.cn|\\.database.cloudapi.de).{0,100}(DB_PASS|(sql|service)?password|\\Wpwd\\W)",
r"(?i)(secret(.?key)?|password)[\"']?\\s*[:=]\\s*[\"'][^\\s]+?[\"']",
r"(?i)[^a-z\\$](DB_USER|user id|uid|(sql)?user(name)?|service\\s?account)\\s*[^\\w\\s,]([ -~\\s]{2,120}?|[ -~]{2,30}?)([^a-z\\s\\$]|\\s)\\s*(DB_PASS|(sql|service)?password|pwd)",
r"(?i)(password|secret(key)?)[ \\t]*[=:]+[ \\t]*([^:\\s\"';,<]{2,200})",
]
for (i, re_str) in enumerate(regex_list):
if i != 0:
if i == 27:
regex_string += " and "
else:
regex_string += " or "
regex_string += " " + column_name + " matches regex \"" + re_str + "\""
return where_clause.format(time_range, regex_string)
def process_result(result):
"This function processes data returned from Azure LogAnalyticsDataClient, it returns pandas DataFrame."
json_result = result.as_dict()
cols = pd.json_normalize(json_result['tables'][0], 'columns')
final_result = pd.json_normalize(json_result['tables'][0], 'rows')
if final_result.shape[0] != 0:
final_result.columns = cols.name
return final_result
# Calling the above function to populate Sentinel workspace parameters
# The file, config.json, was generated by the system, however, you may modify the values, or manually set the variables
tenant_id, subscription_id, resource_group, workspace_id, workspace_name, user_alias, user_object_id = read_config_values('config.json');
```
## 2. Azure Authentication
```
# Azure CLI is used to get device code to login into Azure, you need to copy the code and open the DeviceLogin site.
# You may add [--tenant $tenant_id] to the command
if has_valid_token() == False:
!az login --tenant $tenant_id --use-device-code
# Initialzie Azure LogAnalyticsDataClient, which is used to access Sentinel log data in Azure Log Analytics.
# You may need to change resource_uri for various cloud environments.
resource_uri = "https://api.loganalytics.io"
la_client = get_client_from_cli_profile(LogAnalyticsManagementClient, subscription_id = subscription_id)
creds, _ = get_azure_cli_credentials(resource=resource_uri)
la_data_client = LogAnalyticsDataClient(creds)
```
## 3. Azure Log Analytics Data Queries
```
# Get all tables available using Kusto query language. If you need to know more about KQL, please check out the link provided at the introductory section.
tables_result = None
table_list = None
all_tables_query = "union withsource = SentinelTableName * | distinct SentinelTableName | sort by SentinelTableName asc"
if la_data_client != None:
tables_result = la_data_client.query(workspace_id, QueryBody(query=all_tables_query))
if tables_result != None:
table_list = process_result(tables_result)
tables = sorted(table_list.SentinelTableName.tolist())
table_dropdown = ipywidgets.Dropdown(options=tables, description='Tables:')
display(table_dropdown)
# Select a Column in the selected table to scan
# However, you may safely ignore this cell if you decide to scan all columns for the selected table!
columns_result = None
column_list = None
all_columns_query = "{0} | getschema | project ColumnName | order by ColumnName asc".format(table_dropdown.value)
if la_data_client != None:
columns_result = la_data_client.query(workspace_id, QueryBody(query=all_columns_query))
if columns_result != None:
column_list = process_result(columns_result)
columns = sorted(column_list.ColumnName.tolist())
column_dropdown = ipywidgets.Dropdown(options=columns, description='Columns:')
display(column_dropdown)
else:
column_list= []
# This cell will run Credential Scanner regex
# You may adjust the query based on your needs.
# To look at the query, you may run: print(query)
if table_list.empty == False:
if 'column_list' in vars() and column_list.empty == False and column_dropdown.value != None:
column_name = "tostring({0})".format(column_dropdown.value)
else:
column_name = "*"
table_name = table_dropdown.value
kql_where_clause = get_credscan_kql_where_clause(column_name)
query = "{0} {1}".format(table_name, kql_where_clause)
#print("Query: " + query)
# Run query
result = la_data_client.query(workspace_id, QueryBody(query=query))
# Display Result
final_result = process_result(result)
if final_result.size == 0:
print("No credentials found")
else:
display(final_result)
# Save results to a csv file in the current file system
if final_result is not None and len(final_result) > 0:
final_result.to_csv('credscan_loganalytics.csv')
```
| github_jupyter |
# Mask R-CNN - Train on Shapes Dataset
This notebook shows how to train Mask R-CNN on your own dataset. To keep things simple we use a synthetic dataset of shapes (squares, triangles, and circles) which enables fast training. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.
The code of the *Shapes* dataset is included below. It generates images on the fly, so it doesn't require downloading any data. And it can generate images of any size, so we pick a small image size to train faster.
```
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
```
## Configurations
```
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 3 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 128
IMAGE_MAX_DIM = 128
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = ShapesConfig()
config.display()
```
## Notebook Preferences
```
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
```
## Dataset
Create a synthetic dataset
Extend the Dataset class and add a method to load the shapes dataset, `load_shapes()`, and override the following methods:
* load_image()
* load_mask()
* image_reference()
```
class ShapesDataset(utils.Dataset):
"""Generates the shapes synthetic dataset. The dataset consists of simple
shapes (triangles, squares, circles) placed randomly on a blank surface.
The images are generated on the fly. No file access required.
"""
def load_shapes(self, count, height, width):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("shapes", 1, "square")
self.add_class("shapes", 2, "circle")
self.add_class("shapes", 3, "triangle")
# Add images
# Generate random specifications of images (i.e. color and
# list of shapes sizes and locations). This is more compact than
# actual images. Images are generated on the fly in load_image().
for i in range(count):
bg_color, shapes = self.random_image(height, width)
self.add_image("shapes", image_id=i, path=None,
width=width, height=height,
bg_color=bg_color, shapes=shapes)
def load_image(self, image_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
info = self.image_info[image_id]
bg_color = np.array(info['bg_color']).reshape([1, 1, 3])
image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)
image = image * bg_color.astype(np.uint8)
for shape, color, dims in info['shapes']:
image = self.draw_shape(image, shape, dims, color)
return image
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
shapes = info['shapes']
count = len(shapes)
mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)
for i, (shape, _, dims) in enumerate(info['shapes']):
mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),
shape, dims, 1)
# Handle occlusions
occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
for i in range(count-2, -1, -1):
mask[:, :, i] = mask[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
# Map class names to class IDs.
class_ids = np.array([self.class_names.index(s[0]) for s in shapes])
return mask.astype(np.bool), class_ids.astype(np.int32)
def draw_shape(self, image, shape, dims, color):
"""Draws a shape from the given specs."""
# Get the center x, y and the size s
x, y, s = dims
if shape == 'square':
cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)
elif shape == "circle":
cv2.circle(image, (x, y), s, color, -1)
elif shape == "triangle":
points = np.array([[(x, y-s),
(x-s/math.sin(math.radians(60)), y+s),
(x+s/math.sin(math.radians(60)), y+s),
]], dtype=np.int32)
cv2.fillPoly(image, points, color)
return image
def random_shape(self, height, width):
"""Generates specifications of a random shape that lies within
the given height and width boundaries.
Returns a tuple of three valus:
* The shape name (square, circle, ...)
* Shape color: a tuple of 3 values, RGB.
* Shape dimensions: A tuple of values that define the shape size
and location. Differs per shape type.
"""
# Shape
shape = random.choice(["square", "circle", "triangle"])
# Color
color = tuple([random.randint(0, 255) for _ in range(3)])
# Center x, y
buffer = 20
y = random.randint(buffer, height - buffer - 1)
x = random.randint(buffer, width - buffer - 1)
# Size
s = random.randint(buffer, height//4)
return shape, color, (x, y, s)
def random_image(self, height, width):
"""Creates random specifications of an image with multiple shapes.
Returns the background color of the image and a list of shape
specifications that can be used to draw the image.
"""
# Pick random background color
bg_color = np.array([random.randint(0, 255) for _ in range(3)])
# Generate a few random shapes and record their
# bounding boxes
shapes = []
boxes = []
N = random.randint(1, 4)
for _ in range(N):
shape, color, dims = self.random_shape(height, width)
shapes.append((shape, color, dims))
x, y, s = dims
boxes.append([y-s, x-s, y+s, x+s])
# Apply non-max suppression wit 0.3 threshold to avoid
# shapes covering each other
keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)
shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]
return bg_color, shapes
# Training dataset
dataset_train = ShapesDataset()
dataset_train.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = ShapesDataset()
dataset_val.load_shapes(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
```
## Create Model
```
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last(), by_name=True)
```
## Training
Train in two stages:
1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass `layers='heads'` to the `train()` function.
2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass `layers="all` to train all layers.
```
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=2,
layers="all")
# Save weights
# Typically not needed because callbacks save after every epoch
# Uncomment to save manually
# model_path = os.path.join(MODEL_DIR, "mask_rcnn_shapes.h5")
# model.keras_model.save_weights(model_path)
```
## Detection
```
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()
# Load trained weights
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
results = model.detect([original_image], verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, r['scores'], ax=get_ax())
```
## Evaluation
```
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
image_ids = np.random.choice(dataset_val.image_ids, 10)
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
APs.append(AP)
print("mAP: ", np.mean(APs))
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Neural style transfer on video
Using modified code from `pytorch`'s neural style [example](https://pytorch.org/tutorials/advanced/neural_style_tutorial.html), we show how to setup a pipeline for doing style transfer on video. The pipeline has following steps:
1. Split a video into images
2. Run neural style on each image using one of the provided models (from `pytorch` pretrained models for this example).
3. Stitch the image back into a video.
> **Tip**
If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.
## Prerequisites
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc.
## Initialize Workspace
Initialize a workspace object from persisted configuration.
```
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
from azureml.core import Workspace, Experiment
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
from azureml.core.compute import AmlCompute, ComputeTarget
from azureml.core import Datastore, Dataset
from azureml.pipeline.core import Pipeline
from azureml.pipeline.steps import PythonScriptStep
from azureml.core.runconfig import CondaDependencies, RunConfiguration
from azureml.core.compute_target import ComputeTargetException
from azureml.data import OutputFileDatasetConfig
```
# Download models
```
import os
# create directory for model
model_dir = 'models'
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
import urllib.request
def download_model(model_name):
# downloaded models from https://pytorch.org/tutorials/advanced/neural_style_tutorial.html are kept here
url = "https://pipelinedata.blob.core.windows.net/styletransfer/saved_models/" + model_name
local_path = os.path.join(model_dir, model_name)
urllib.request.urlretrieve(url, local_path)
```
# Register all Models
```
from azureml.core.model import Model
mosaic_model = None
candy_model = None
models = Model.list(workspace=ws, tags=['scenario'])
for m in models:
print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
if m.name == 'mosaic' and mosaic_model is None:
mosaic_model = m
elif m.name == 'candy' and candy_model is None:
candy_model = m
if mosaic_model is None:
print('Mosaic model does not exist, registering it')
download_model('mosaic.pth')
mosaic_model = Model.register(model_path = os.path.join(model_dir, "mosaic.pth"),
model_name = "mosaic",
tags = {'type': "mosaic", 'scenario': "Style transfer using batch inference"},
description = "Style transfer - Mosaic",
workspace = ws)
else:
print('Reusing existing mosaic model')
if candy_model is None:
print('Candy model does not exist, registering it')
download_model('candy.pth')
candy_model = Model.register(model_path = os.path.join(model_dir, "candy.pth"),
model_name = "candy",
tags = {'type': "candy", 'scenario': "Style transfer using batch inference"},
description = "Style transfer - Candy",
workspace = ws)
else:
print('Reusing existing candy model')
```
# Create or use existing compute
```
# AmlCompute
cpu_cluster_name = "cpu-cluster"
try:
cpu_cluster = AmlCompute(ws, cpu_cluster_name)
print("found existing cluster.")
except ComputeTargetException:
print("creating new cluster")
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_v2",
max_nodes = 1)
# create the cluster
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, provisioning_config)
cpu_cluster.wait_for_completion(show_output=True)
# AmlCompute
gpu_cluster_name = "gpu-cluster"
try:
gpu_cluster = AmlCompute(ws, gpu_cluster_name)
print("found existing cluster.")
except ComputeTargetException:
print("creating new cluster")
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_NC6",
max_nodes = 3)
# create the cluster
gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)
gpu_cluster.wait_for_completion(show_output=True)
```
# Python Scripts
We use an edited version of `neural_style_mpi.py` (original is [here](https://github.com/pytorch/examples/blob/master/fast_neural_style/neural_style/neural_style.py)). Scripts to split and stitch the video are thin wrappers to calls to `ffmpeg`.
We install `ffmpeg` through conda dependencies.
```
scripts_folder = "scripts"
process_video_script_file = "process_video.py"
# peek at contents
with open(os.path.join(scripts_folder, process_video_script_file)) as process_video_file:
print(process_video_file.read())
stitch_video_script_file = "stitch_video.py"
# peek at contents
with open(os.path.join(scripts_folder, stitch_video_script_file)) as stitch_video_file:
print(stitch_video_file.read())
```
The sample video **organutan.mp4** is stored at a publicly shared datastore. We are registering the datastore below. If you want to take a look at the original video, click here. (https://pipelinedata.blob.core.windows.net/sample-videos/orangutan.mp4)
```
# datastore for input video
account_name = "pipelinedata"
video_ds = Datastore.register_azure_blob_container(ws, "videos", "sample-videos",
account_name=account_name, overwrite=True)
# the default blob store attached to a workspace
default_datastore = ws.get_default_datastore()
```
# Sample video
```
video_name=os.getenv("STYLE_TRANSFER_VIDEO_NAME", "orangutan.mp4")
orangutan_video = Dataset.File.from_files((video_ds,video_name))
cd = CondaDependencies()
cd.add_channel("conda-forge")
cd.add_conda_package("ffmpeg==4.0.2")
# Runconfig
amlcompute_run_config = RunConfiguration(conda_dependencies=cd)
amlcompute_run_config.environment.docker.base_image = "pytorch/pytorch"
amlcompute_run_config.environment.spark.precache_packages = False
ffmpeg_audio = OutputFileDatasetConfig(name="ffmpeg_audio")
processed_images = OutputFileDatasetConfig(name="processed_images")
output_video = OutputFileDatasetConfig(name="output_video")
ffmpeg_images = OutputFileDatasetConfig(name="ffmpeg_images")
```
# Define tweakable parameters to pipeline
These parameters can be changed when the pipeline is published and rerun from a REST call.
As part of ParallelRunStep following 2 pipeline parameters will be created which can be used to override values.
node_count
process_count_per_node
```
from azureml.pipeline.core.graph import PipelineParameter
# create a parameter for style (one of "candy", "mosaic") to transfer the images to
style_param = PipelineParameter(name="style", default_value="mosaic")
# create a parameter for the number of nodes to use in step no. 2 (style transfer)
nodecount_param = PipelineParameter(name="nodecount", default_value=2)
split_video_step = PythonScriptStep(
name="split video",
script_name="process_video.py",
arguments=["--input_video", orangutan_video.as_mount(),
"--output_audio", ffmpeg_audio,
"--output_images", ffmpeg_images],
compute_target=cpu_cluster,
runconfig=amlcompute_run_config,
source_directory=scripts_folder
)
stitch_video_step = PythonScriptStep(
name="stitch",
script_name="stitch_video.py",
arguments=["--images_dir", processed_images.as_input(),
"--input_audio", ffmpeg_audio.as_input(),
"--output_dir", output_video],
compute_target=cpu_cluster,
runconfig=amlcompute_run_config,
source_directory=scripts_folder
)
```
# Create environment, parallel step run config and parallel run step
```
from azureml.core import Environment
from azureml.core.runconfig import DEFAULT_GPU_IMAGE
parallel_cd = CondaDependencies()
parallel_cd.add_channel("pytorch")
parallel_cd.add_conda_package("pytorch")
parallel_cd.add_conda_package("torchvision")
parallel_cd.add_conda_package("pillow<7") # needed for torchvision==0.4.0
parallel_cd.add_pip_package("azureml-core")
styleenvironment = Environment(name="styleenvironment")
styleenvironment.python.conda_dependencies=parallel_cd
styleenvironment.docker.base_image = DEFAULT_GPU_IMAGE
from azureml.pipeline.core import PipelineParameter
from azureml.pipeline.steps import ParallelRunConfig
parallel_run_config = ParallelRunConfig(
environment=styleenvironment,
entry_script='transform.py',
output_action='summary_only',
mini_batch_size="1",
error_threshold=1,
source_directory=scripts_folder,
compute_target=gpu_cluster,
node_count=nodecount_param,
process_count_per_node=2
)
from azureml.pipeline.steps import ParallelRunStep
from datetime import datetime
parallel_step_name = 'styletransfer-' + datetime.now().strftime('%Y%m%d%H%M')
distributed_style_transfer_step = ParallelRunStep(
name=parallel_step_name,
inputs=[ffmpeg_images], # Input file share/blob container/file dataset
output=processed_images, # Output file share/blob container
arguments=["--style", style_param],
parallel_run_config=parallel_run_config,
allow_reuse=False #[optional - default value True]
)
```
# Run the pipeline
```
pipeline = Pipeline(workspace=ws, steps=[stitch_video_step])
pipeline.validate()
# submit the pipeline and provide values for the PipelineParameters used in the pipeline
pipeline_run = Experiment(ws, 'styletransfer_parallel_mosaic').submit(pipeline)
```
# Monitor pipeline run
The pipeline run status could be checked in Azure Machine Learning portal (https://ml.azure.com). The link to the pipeline run could be retrieved by inspecting the `pipeline_run` object.
```
# This will output information of the pipeline run, including the link to the details page of portal.
pipeline_run
```
### Optional: View detailed logs (streaming)
```
# Wait the run for completion and show output log to console
pipeline_run.wait_for_completion(show_output=True)
```
# Download output video
Downloads the video in `output_video` folder
```
def download_video(run, target_dir=None):
stitch_run = run.find_step_run(stitch_video_step.name)[0]
port_data = stitch_run.get_details()['outputDatasets'][0]['dataset']
port_data.download(target_dir)
pipeline_run.wait_for_completion()
download_video(pipeline_run, "output_video_mosaic")
```
# Publish pipeline
```
pipeline_name = "style-transfer-batch-inference"
print(pipeline_name)
published_pipeline = pipeline.publish(
name=pipeline_name,
description=pipeline_name)
print("Newly published pipeline id: {}".format(published_pipeline.id))
```
# Get published pipeline
This is another way to get the published pipeline.
```
from azureml.pipeline.core import PublishedPipeline
# You could retrieve all pipelines that are published, or
# just get the published pipeline object that you have the ID for.
# Get all published pipeline objects in the workspace
all_pub_pipelines = PublishedPipeline.list(ws)
# We will iterate through the list of published pipelines and
# use the last ID in the list for Schelue operations:
print("Published pipelines found in the workspace:")
for pub_pipeline in all_pub_pipelines:
print("Name:", pub_pipeline.name,"\tDescription:", pub_pipeline.description, "\tId:", pub_pipeline.id, "\tStatus:", pub_pipeline.status)
if(pub_pipeline.name == pipeline_name):
published_pipeline = pub_pipeline
print("Published pipeline id: {}".format(published_pipeline.id))
```
# Run pipeline through REST calls for other styles
# Get AAD token
```
from azureml.core.authentication import InteractiveLoginAuthentication
import requests
auth = InteractiveLoginAuthentication()
aad_token = auth.get_authentication_header()
```
# Get endpoint URL
```
rest_endpoint = published_pipeline.endpoint
print("Pipeline REST endpoing: {}".format(rest_endpoint))
```
# Send request and monitor
```
experiment_name = 'styletransfer_parallel_candy'
response = requests.post(rest_endpoint,
headers=aad_token,
json={"ExperimentName": experiment_name,
"ParameterAssignments": {"style": "candy", "NodeCount": 3}})
run_id = response.json()["Id"]
from azureml.pipeline.core.run import PipelineRun
published_pipeline_run_candy = PipelineRun(ws.experiments[experiment_name], run_id)
# Show detail information of run
published_pipeline_run_candy
```
# Download output from re-run
```
published_pipeline_run_candy.wait_for_completion()
download_video(published_pipeline_run_candy, target_dir="output_video_candy")
```
| github_jupyter |
# Print in python
```
import os
from IPython.core.display import HTML
def load_style(directory = '../', name='customMac.css'):
styles = open(os.path.join(directory, name), 'r').read()
return HTML(styles)
load_style()
```
## Print Statement
The **print** statement can be used in the following different ways :
- print "Hello World"
- print "Hello", <Variable Containing the String>
- print "Hello" + <Variable Containing the String>
- print "Hello %s" % <variable containing the string>
```
print("Hello World")
```
In Python, single, double and triple quotes are used to denote a string.
Most use single quotes when declaring a single character.
Double quotes when declaring a line and triple quotes when declaring a paragraph/multiple lines.
```
print('Hey')
print("""My name is Kundan Kumar.
I always play with Python.""")
```
Strings can be assigned to variable say _string1_ and _string2_ which can called when using the print statement.
```
string1 = 'World'
print('Hello', string1)
string2 = '!'
print('Hello', string1, string2)
```
String concatenation is the "addition" of two strings. Observe that while concatenating there will be no space between the strings.
```
print('Hello' + string1 + string2)
```
**%s** is used to refer to a variable which contains a string.
```
print("Hello %s" % string1)
```
Similarly, when using other data types
- %s -> string
- %d -> Integer
- %f -> Float
- %o -> Octal
- %x -> Hexadecimal
- %e -> exponential
This can be used for conversions inside the print statement itself.
```
print("Actual Number = %d" %18)
print("Float of the number = %f" %18)
print("Octal equivalent of the number = %o" %18)
print("Hexadecimal equivalent of the number = %x" %18)
print("Exponential equivalent of the number = %e" %18)
```
When referring to multiple variables parenthesis is used.
```
print("Hello %s %s" %(string1,string2))
```
## Other Examples
The following are other different ways the print statement can be put to use.
```
print("I want %%d to be printed %s" %'here')
print('_A'*10) # repeats 10 times
print("Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug")
# how to print \n as text
print("I want \\n to be printed.")
print("""
Routine:
\t- Eat
\t- Sleep\n\t- Repeat
""")
```
## PrecisionWidth and FieldWidth
Fieldwidth is the width of the entire number and precision is the width towards the right. One can alter these widths based on the requirements.
The default Precision Width is set to 6.
```
"%f" % 3.121312312312
```
Notice upto 6 decimal points are returned. To specify the number of decimal points, '%(fieldwidth).(precisionwidth)f' is used.
```
"%.5f" % 3.121312312312
```
If the field width is set more than the necessary than the data right aligns itself to adjust to the specified values.
```
"%9.5f" % 3.121312312312
```
Zero padding is done by adding a 0 at the start of fieldwidth.
```
"%020.5f" % 3.121312312312
```
For proper alignment, a space can be left blank in the field width so that when a negative number is used, proper alignment is maintained. However, it automatically align in python 3.6.
```
print("% 9f" % 3.121312312312)
print("%9f" % -3.121312312312)
```
'+' sign can be returned at the beginning of a positive number by adding a + sign at the beginning of the field width.
```
print("%+9f" % 3.121312312312)
print("% 9f" % -3.121312312312)
```
As mentioned above, the data right aligns itself when the field width mentioned is larger than the actualy field width. But left alignment can be done by specifying a negative symbol in the field width.
```
"%-9.3f" % 3.121312312312
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import os
import torch
import torchvision
import torchsample
import psycopg2
import random
import re
import time
import csv
import copy
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.utils.data as data_utils
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import WeightedRandomSampler
from torchvision import models
from torchvision import transforms
from torchsample import transforms as ts_transforms
from matplotlib import pyplot as plt
from PIL import Image
from scipy.ndimage import imread
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn import preprocessing
from sklearn.gaussian_process import GaussianProcessRegressor
from mpl_toolkits.mplot3d import Axes3D
from mlxtend.evaluate import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
```
## Load Data
### Train Data Highway
```
X_train = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/highway/split/highway_train_images.npy')
train_targets = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/highway/split/highway_train_targets.npy')
paths_train = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/highway/split/highway_train_filepaths.npy')
meteo_train = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/highway/split/highway_train_meteo.npy')
meteo_train = meteo_train.reshape(meteo_train.shape[0], 4)
X_train = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/knmi/knmi_train_images.npy')
train_targets = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/knmi/knmi_train_targets.npy')
paths_train = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/knmi/knmi_train_filepaths.npy')
meteo_train = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/knmi/knmi_train_meteo.npy')
meteo_train = meteo_train.reshape(meteo_train.shape[0], 4)
```
### Validation Data
```
X_validation = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/highway/split/highway_val_images.npy')
validation_targets = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/highway/split/highway_val_targets.npy')
paths_validation = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/highway/split/highway_val_filepaths.npy')
meteo_validation = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/meteo/highway/split/highway_val_meteo.npy')
meteo_validation = meteo_validation.reshape(meteo_validation.shape[0], 4)
test_features = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/test_images.npy')
test_targets = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/test_targets.npy')
test_filepaths = np.load('/Volumes/TIMPP/UnusedKNMI/numpyfiles/test_filepaths.npy')
X_train = X_train[:3000]
train_targets = train_targets[:3000]
paths_train = paths_train[:3000]
meteo_train = meteo_train[:3000]
```
## CNN Helpers and Data Preparation
### Class Proportions (for Weighted Sampling)
```
# Get the class proportions
class_counts = np.bincount(train_targets.astype(int))
total = len(train_targets)
proportion_0 = class_counts[0] / total
proportion_1 = class_counts[1] / total
proportion_2 = class_counts[2] / total
print('Class percentages:\nNo fog: {:.2f}%\nFog: {:.2f}%\nDense fog: {:.2f}%'.format(proportion_0 * 100,
proportion_1 * 100, proportion_2 * 100))
print(class_counts)
```
### Class Weighting
```
# List containing class probabilities
probabilities = [proportion_0, proportion_1, proportion_2]
reciprocal_weights = []
# Put weight at every index
for i in range(len(X_train)):
reciprocal_weights.append(probabilities[train_targets[i]])
# Inverse of probabilities as weights
weights = (1 / torch.Tensor(reciprocal_weights))
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights.double(), len(X_train))
# Inverse weights for all the datapoints
inverse_weights_class = 1 / torch.Tensor(probabilities)
# Inverse weights per class
inverse_weights = 1/ torch.Tensor(probabilities)
```
### Transformations
```
data_transforms = {
'train': transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(80),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'validation': transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(80),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
```
### Define Dataset Class
```
class KNMIDataset(Dataset):
def __init__(self, images, targets, filepaths, meteo, transforms=None):
self.transforms = transforms
self.images = images
self.targets = targets
self.filepaths = filepaths
self.meteo = meteo
print(len(self.images))
def __getitem__(self, index):
image = self.images[index]
if self.transforms != None:
image = self.transforms(image)
target = self.targets[index]
filepath = self.filepaths[index]
meteo = self.meteo[index]
return (image, target, index, filepath, meteo)
def __len__(self):
return len(self.targets)
```
### Create Datasets and Dataloaders
```
BATCH_SIZE = 164
# Datasets
train_dataset = KNMIDataset(X_train, train_targets, paths_train, meteo_train, transforms=data_transforms['train'])
validation_dataset = KNMIDataset(X_validation, validation_targets, paths_validation, meteo_validation, transforms=data_transforms['validation'])
# Data loaders
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)
validation_loader = DataLoader(dataset=validation_dataset, batch_size=BATCH_SIZE)
loaders = {'train': train_loader, 'validation': validation_loader}
```
### Check Batch Iteration Size of Trainloader
```
# Iteration for one train/testloader batch
img, labels, idx, paths, meteo = next(iter(validation_loader))
inputs, labels = Variable(img), Variable(labels)
print('Loader image tensor shape: {}\nLoader targets tensor shape: {}'.format(inputs.size(), labels.size()))
```
## Plotting Functions
### Confusion Matrix
```
# Confusion matrix helper
def show_cm(targets, predictions):
cm = confusion_matrix(y_target=targets,
y_predicted=predictions,
binary=False)
fig, ax = plot_confusion_matrix(conf_mat=cm)
plt.show()
```
### Loss Curve Plotting
```
def plot_loss_curves(training_loss, validation_loss):
"""
Plots loss curves after model training.
:param training_loss: List with training loss for every epoch.
:param validation_loss: List with validation loss for every epoch.
"""
train_plot, = plt.plot(training_loss, label='Training')
val_plot, = plt.plot(validation_loss, label='Validation')
plt.title('Loss curves (training/validation)')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(handles=[train_plot, val_plot])
plt.show()
```
### Certain/Uncertain Images Plotting
```
def plot_images(loss, image_index, filepaths, targets, predictions, phase, amount=5):
"""
Use to plot images that the model is most certain about and which it was most uncertain about.
:param loss: Tensor that has size of batch containing loss
:param filepaths: List with filepaths that point to where batch images are located
:param amount: Amount of images to show. Default: 1
"""
def loop_plot(indices, targets, predictions, filepaths, losses, phase):
fig=plt.figure(figsize=(20, 5))
columns = 5
rows = 1
# Determine phase and get image np array
if phase == 'train':
image_array = X_train
elif phase == 'validation':
image_array = X_validation
else:
image_array = test_images
print(filepaths)
# Loop over the data and plot 'amount' of images
for i, (index, target, prediction, loss) in enumerate(zip(indices, targets, predictions, losses)):
img = image_array[index]
fig.add_subplot(rows, columns, i + 1)
plt.title('target: {}, prediction: {} loss: {:.2f}'.format(target, prediction, loss))
plt.imshow(img)
plt.show()
def get_k_and_plot(loss, amount, targets, image_index, filepaths, predictions, phase, largest=True):
# Get all relevant data
values, indices = torch.topk(loss, amount, largest=largest)
targets = [targets[i].data[0] for i in list(indices.data.numpy().reshape((1, -1))[0])]
images_idx = [image_index[i] for i in list(indices.data.numpy().reshape((1, -1))[0])]
filepaths = [filepaths[i] for i in list(indices.data.numpy().reshape((1, -1))[0])]
predictions = [predictions[i] for i in list(indices.data.numpy().reshape((1, -1))[0])]
loss = [loss.data[i] for i in list(indices.data.numpy().reshape((1, -1))[0])]
# Show images (uncertain/certain)
loop_plot(images_idx, targets, predictions, filepaths, loss, phase)
print('Top {} most uncertain images'.format(amount))
get_k_and_plot(loss, amount, targets, image_index, filepaths, predictions, phase, largest=True)
print('Top {} most certain images'.format(amount))
get_k_and_plot(loss, amount, targets, image_index, filepaths, predictions, phase, largest=False)
def get_average_accuracy(predictions, targets):
# Lists for holding corrects
no_fog_correct = 0
light_fog_correct = 0
dense_fog_correct = 0
for pred, target in zip(predictions, targets):
if pred == 0 and target == 0:
no_fog_correct += 1
elif pred == 1 and target == 1:
light_fog_correct += 1
elif pred == 2 and target == 2:
dense_fog_correct += 1
# Validation counts
total = np.bincount(validation_targets)
no_fog_total = total[0]
light_fog_total = total[1]
dense_fog_total = total[2]
# Accuracy per class
acc_no_fog = no_fog_correct / no_fog_total
acc_light = light_fog_correct / light_fog_total
acc_dense = dense_fog_correct / dense_fog_total
average_acc = (acc_no_fog + acc_light + acc_dense) / 3 * 100
return average_acc
```
## Model Training
### Main Training Function
```
def train_model(model, criterion, optimizer, num_epochs):
"""
Does the actual training of the models.
:param model: Model object specified in 'run_model'.
:param criterion: Optimization criterion/loss.
:param optimizer: Type of optimizer.
:param num_epochs: Number of epochs to train.
"""
start = time.time()
# For storing loss for curve, best model and best accuracy
train_loss, validation_loss = [],[]
best_model = model
best_accuracy = 0.0
best_f1macro = 0.0
for epoch in range(num_epochs):
# Running loss and correct predictions
running_loss_train = 0.0
running_correct_train = 0.0
running_loss_val = 0.0
running_correct_val = 0.0
epoch_validation_targets = []
epoch_validation_predictions = []
for phase in ['train', 'validation']:
# Change model mode according to phase
if phase == 'train':
model.train()
else:
model.eval()
# Iterate over batches in loader
for i, (image_tensor, label_tensor, image_index, filepaths, meteo) in enumerate(loaders[phase]):
features = Variable(image_tensor)
targets = Variable(label_tensor.view(-1))
meteo_features = Variable(meteo.type(torch.FloatTensor))
# Forward + Backward + Optimize
if phase == 'train':
optimizer.zero_grad()
outputs = model(features, meteo_features)
# Get prediction index and no. correct predictions
_, predictions = torch.max(outputs.data, 1)
# Kijk hier uit dat die testloader precies aantal batches in de validation data haalt
correct = torch.sum(predictions == targets.data)
# Loss and optimization
loss = criterion(outputs, targets)
# Average the loss
total_loss = torch.mean(loss)
# Only do backpropagation if in the training phase
if phase == 'train':
total_loss.backward()
optimizer.step()
# Running loss and number of correct predictions
if phase == 'train':
running_loss_train += total_loss.data[0]
running_correct_train += correct
else:
running_loss_val += total_loss.data[0]
running_correct_val += correct
epoch_validation_targets.extend(list(targets.data))
epoch_validation_predictions.extend(list(predictions))
# Plot images in validation phase
# plot_images(loss, image_index, filepaths, targets, predictions, phase)
# If model is in training phase, show loss every N iterations
if (i+1) % 5 == 0:
if phase == 'train':
print ('Epoch {}/{}, Iteration {}/{} Train Running Loss: {:.4f}'.format(epoch+1, num_epochs, i+1,
len(X_train)//BATCH_SIZE,
running_loss_train / i))
# plot_images(loss, image_index, filepaths, targets, predictions, phase)
# Epoch losses and epoch train accuracies
epoch_train_loss = running_loss_train / (len(X_train)//BATCH_SIZE)
epoch_train_accuracy = (running_correct_train / (len(X_train)//BATCH_SIZE)) / BATCH_SIZE * 100
epoch_val_loss = running_loss_val / len(X_validation//BATCH_SIZE)
epoch_val_accuracy= running_correct_val / len(X_validation) * 100
# F1-score
f1_macro = f1_score(epoch_validation_targets, epoch_validation_predictions, average='macro')
f1_micro = f1_score(epoch_validation_targets, epoch_validation_predictions, average='micro')
precision = precision_score(epoch_validation_targets, epoch_validation_predictions, average='macro')
recall = recall_score(epoch_validation_targets, epoch_validation_predictions, average='macro')
# Print the average epoch loss and the average prediction accuracy
print('\nEpoch {}/{}, Train Loss: {:.4f}, Train Accuracy: {:.4f}%\n'
'Validation Loss: {:.4f}, Validation Accuracy: {:.4f}%, f1-score {:.4f}\n'.format(epoch + 1,
num_epochs, epoch_train_loss, epoch_train_accuracy,
epoch_val_loss, epoch_val_accuracy, f1_macro))
# Safe best model and best accuracy
if phase == 'validation':
if epoch_val_accuracy > best_accuracy:
best_accuracy = epoch_val_accuracy
best_model = copy.deepcopy(model)
# Show the confusion matrix for validation targets/predictions
show_cm(epoch_validation_targets, epoch_validation_predictions)
# Append losses
train_loss.append(epoch_train_loss)
validation_loss.append(epoch_val_loss)
# Elapsed time and best accuracy
elapsed_time = time.time() - start
print('Training was completed in {:.0f}m {:.0f}s\n'.format(elapsed_time//60, elapsed_time%60))
print('Best validation accuracy: {:4f}%'.format(best_accuracy))
# Plot loss curves
# plot_loss_curves(train_loss, validation_loss)
# Return the best model
return best_model
```
### Run Model Function
```
def run_model(model, epochs, learning_rate, train_from_layer=False, last_layer_trained=False, not_self_defined=True):
'''
Configures model object and then calls 'train_model' for model training.
:param train_from_layer: Specify number of layers before fully-connected to also be finetuned. Default is False,
which will just train the fc layers. Give a number to specify number of layers before that.
:param model: This is a (pre-trained) model that will be further finetuned.
:param epochs: Number of epochs to train.
:param learning_rate: Learning rate for the parameters.
:param not_self_defined: True if model architecture is used from torchvision. False if model is self-defined.
'''
if not_self_defined:
# Set all parameter training to false
for parameter in model.parameters():
parameter.requires_grad = False
# Select number of pre-trained layers to finetune
if train_from_layer != False:
ct = 0
print(ct)
for name, child in model.named_children():
ct += 1
if ct > train_from_layer:
for name2, params in child.named_parameters():
params.requires_grad = True
# Get parameters that need finetuning
optim_params = filter(lambda p: p.requires_grad, model.parameters())
# Adjust final layer to number of classes if last layer has not been trained yet
if last_layer_trained == False:
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, TARGET_SIZE)
optim_params = model.fc.parameters()
# Train all parameters if model is not predefined from torchvision
else:
optim_params = filter(lambda p: p.requires_grad, model.parameters())
if torch.cuda.is_available():
inverse_weights.cuda()
model = model.cuda()
# Optimizers and loss criterions
criterion = nn.CrossEntropyLoss(reduce=False, weight=inverse_weights)
optimizer = optim.Adam(optim_params, lr=learning_rate)
# Train and save
trained_model = train_model(model, criterion, optimizer, epochs)
return trained_model
input_size = 4
hidden_size = 100
output_size = 3
class meteo_NN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers):
super(meteo_NN, self).__init__()
self.num_layers = num_layers
# Input layer
self.fc_input = nn.Linear(input_size, hidden_size)
# Hidden size dependent on input
if num_layers == 1:
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc_last = nn.Linear(hidden_size, output_size)
elif num_layers == 2:
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc_last = nn.Linear(hidden_size, output_size)
elif num_layers == 3:
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, hidden_size)
self.fc_last = nn.Linear(hidden_size, output_size)
else:
raise ValueError('This net only takes a maximum of 3 hidden layers as input.')
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc_input(x)
x = self.relu(x)
if self.num_layers == 1:
x = self.fc2(x)
x = self.relu(x)
x = self.fc_last(x)
elif self.num_layers == 2:
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
x = self.relu(x)
x = self.fc_last(x)
else:
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
x = self.relu(x)
x = self.fc4(x)
x = self.relu(x)
x = self.fc_last(x)
return x
class resnet18_meteo(nn.Module):
def __init__(self, resnet18, meteo_NN, num_classes):
super(resnet18_meteo, self).__init__()
# Respectively a torchvision resnet-18 and a 1-hidden layer NN
self.resnet_CNN = resnet18
self.meteo_net = meteo_NN
# Sizes of the FC layers of both NN's
self.len_fc_resnet = self.resnet_CNN.fc.in_features
self.len_fc_meteo = self.meteo_net.fc_last.out_features
print(self.len_fc_meteo)
# Remove FC layer from the resnet
self.modules=list(self.resnet_CNN.children())[:-1]
self.resnet18_convblocks= nn.Sequential(*self.modules)
# Fully connected layer is now size resnet FC + meteo FC
self.fc = nn.Linear(self.len_fc_resnet + self.len_fc_meteo, num_classes)
def forward(self, img_x, meteo_x):
# Both should be flattened layers at end of networks
img_x = self.resnet18_convblocks(img_x)
meteo_x = self.meteo_net(meteo_x)
# Flatten convolutional features
img_x_flattened = img_x.view(img_x.size(0), -1)
# Concat the outputs of CNN and meteo-NN in fully connected layer
out = torch.cat([img_x_flattened, meteo_x], dim=1)
out = self.fc(out)
return out
# Define meteo feedforward net
meteo_net = meteo_NN(input_size, hidden_size, output_size, 2)
# Define Resnet and remove FC layer
resnet18 = models.resnet18(pretrained=True)
resnet18.avgpool = nn.AdaptiveAvgPool2d(1)
# Meteo + resnet18
meteo_resnet = resnet18_meteo(resnet18, meteo_net, 3)
def train_FFN(model, criterion, optimizer, num_epochs):
"""
Does the actual training of the models.
:param model: Model object specified in 'run_model'.
:param criterion: Optimization criterion/loss.
:param optimizer: Type of optimizer.
:param num_epochs: Number of epochs to train.
"""
start = time.time()
# For storing loss for curve, best model and best accuracy
train_loss, validation_loss = [],[]
best_accuracy = 0.0
best_avg_accuracy = 0.0
best_f1macro = 0.0
best_epoch_avg = 0
best_epoch_f1 = 0
best_model_avg = model
best_model_f1 = model
for epoch in range(num_epochs):
# Running loss and correct predictions
running_loss_train = 0.0
running_correct_train = 0.0
running_loss_val = 0.0
running_correct_val = 0.0
epoch_validation_targets = []
epoch_validation_predictions = []
validation_predictions = []
validation_targets = []
for phase in ['train', 'validation']:
# Change model mode according to phase
if phase == 'train':
model.train()
else:
model.eval()
# Iterate over batches in loader
for i, (image_tensor, label_tensor, image_index, filepaths, meteo) in enumerate(loaders[phase]):
features = Variable(meteo.type(torch.FloatTensor))
targets = Variable(label_tensor.view(-1))
# Forward + Backward + Optimize
if phase == 'train':
optimizer.zero_grad()
outputs = model(features)
# Get prediction index and no. correct predictions
_, predictions = torch.max(outputs.data, 1)
# Kijk hier uit dat die testloader precies aantal batches in de validation data haalt
correct = torch.sum(predictions == targets.data)
if phase == 'validation':
validation_predictions.extend(predictions.cpu().numpy())
validation_targets.extend(targets.data.cpu().numpy())
# Loss and optimization
loss = criterion(outputs, targets)
# Average the loss
total_loss = torch.mean(loss)
# Only do backpropagation if in the training phase
if phase == 'train':
total_loss.backward()
optimizer.step()
# Running loss and number of correct predictions
if phase == 'train':
running_loss_train += total_loss.data[0]
running_correct_train += correct
else:
running_loss_val += total_loss.data[0]
running_correct_val += correct
epoch_validation_targets.extend(list(targets.data))
epoch_validation_predictions.extend(list(predictions))
# Plot images in validation phase
# plot_images(loss, image_index, filepaths, targets, predictions, phase)
# If model is in training phase, show loss every N iterations
if (i+1) % 50 == 0:
if phase == 'train':
print ('Epoch {}/{}, Iteration {}/{} Train Running Loss: {:.4f}'.format(epoch+1, num_epochs, i+1,
len(X_train)//BATCH_SIZE,
running_loss_train / i))
# plot_images(loss, image_index, filepaths, targets, predictions, phase)
# Epoch losses and epoch train accuracies
epoch_train_loss = running_loss_train / (len(X_train)//BATCH_SIZE)
epoch_train_accuracy = (running_correct_train / (len(X_train)//BATCH_SIZE)) / BATCH_SIZE * 100
epoch_val_loss = running_loss_val / len(X_validation//BATCH_SIZE)
epoch_val_accuracy= running_correct_val / len(X_validation) * 100
# F1-score
f1_macro = f1_score(epoch_validation_targets, epoch_validation_predictions, average='macro')
f1_micro = f1_score(epoch_validation_targets, epoch_validation_predictions, average='micro')
average_accuracy = get_average_accuracy(validation_predictions, validation_targets)
# Print the average epoch loss and the average prediction accuracy
print('\nEpoch {}/{}, Train Loss: {:.4f}, Train Accuracy: {:.4f}%\n'
'Validation Loss: {:.4f}, Validation Overall Accuracy: {:.4f}%, Validation avg acc: {:4f}%, f1-score {:.4f}\n'.format(epoch + 1,
num_epochs, epoch_train_loss, epoch_train_accuracy,
epoch_val_loss, epoch_val_accuracy, average_accuracy, f1_macro))
# Safe best model and best accuracy
if phase == 'validation':
if epoch_val_accuracy > best_accuracy:
best_accuracy = epoch_val_accuracy
best_model = copy.deepcopy(model)
if f1_macro > best_f1macro:
best_f1macro = f1_macro
best_epoch_f1 = epoch
best_model_f1 = copy.deepcopy(model)
if average_accuracy > best_avg_accuracy:
best_avg_accuracy = average_accuracy
best_epoch_avg = epoch
best_model_avg = copy.deepcopy(model)
# Show the confusion matrix for validation targets/predictions
show_cm(epoch_validation_targets, epoch_validation_predictions)
# Append losses
train_loss.append(epoch_train_loss)
validation_loss.append(epoch_val_loss)
checkpoint = {
'epoch': epoch + 1,
'best_epoch_f1': best_epoch_f1,
'best_epoch_avg': best_epoch_avg,
'state_dict': model,
'best_f1': best_f1macro,
'best_accuracy': best_accuracy,
'optimizer': optimizer.state_dict(),
'best_model_avg': best_model_avg,
'best_model_f1': best_model_f1,
'validation_loss' : validation_loss,
'train_loss' : train_loss,
'best_avg_acc': best_avg_accuracy
}
# Elapsed time and best accuracy
elapsed_time = time.time() - start
print('Training was completed in {:.0f}m {:.0f}s\n'.format(elapsed_time//60, elapsed_time%60))
print('Best validation accuracy: {:4f}%'.format(best_accuracy))
print('Best validation f1-macro: {:.4f}'.format(f1_macro))
print('Best average validation accuracy: {:.4f}%'.format(best_avg_accuracy))
# Return the best model
return checkpoint
meteo_input = 4
meteo_epochs = 1
#
accuracy_dict = {}
# Hyperparameters meteorological net tuning
lr_meteo = 0.05
num_nodes = [i for i in range(2, 12, 2)]
num_layers = [1,2,3]
meteo_num_epochs = 20
for layer_size in num_layers:
for node_size in num_nodes:
model = meteo_NN(input_size=meteo_input, hidden_size=node_size, output_size=3, num_layers=layer_size)
optim_params = filter(lambda p: p.requires_grad, model.parameters())
# Optimizers and loss criterions
criterion = nn.CrossEntropyLoss(reduce=False, weight=inverse_weights)
optimizer = optim.Adam(optim_params, lr=lr_meteo)
trained = train_FFN(model, criterion, optimizer, meteo_epochs)
if str(layer_size) in accuracy_dict.keys():
accuracy_dict[str(layer_size)][str(node_size)] = {'f1' : trained['best_f1'],
'avg_acc': trained['best_avg_acc']}
else:
accuracy_dict[str(layer_size)] = {str(node_size) : {'f1' : trained['best_f1'],
'avg_acc': trained['best_avg_acc']}}
print(accuracy_dict)
best_f1, best_avg_acc, nodes_f1, layers_f1, nodes_avg, layers_avg = 0, 0, 0, 0, 0, 0
for layer_size, node_size in accuracy_dict.items():
for key, metrics in node_size.items():
if metrics['f1'] > best_f1:
best_f1 = metrics['f1']
nodes_f1 = key
layers_f1 = layer_size
if metrics['avg_acc'] > best_avg_acc:
best_avg_acc = metrics['avg_acc']
nodes_avg = key
layers_avg = layer_size
print('Best f1 with {} layers and {} nodes. F1-macro: {}'.format(layers_f1, nodes_f1, best_f1))
print('Best average accuracy with {} layers and {} nodes. Avg accuracy: {}'.format(layers_f1, nodes_f1, best_avg_acc))
```
### Resnet 18
```
lr_classifier = 2e-4
lr_tuner = 1e-4
# Train last FC layer of resnet 18
resnet_18 = models.resnet18(pretrained=True)
resnet_18.avgpool = nn.AdaptiveAvgPool2d(1)
resnet_18_trained = run_model(resnet_18, 10, lr_classifier, False)
# Tune also the last convolutional layer
# resnet_18_tuned = run_model(resnet_18_trained, 15, lr_tuner, train_from_layer=7)
torch.save(resnet_18_trained, '../models/trainedModels/owntrained/best-resnet-18.pt')
```
## Pre-trained Model Loader
```
def load_model(filepath):
'''
Loads a trained model.
:param filepath: Path to the trained model.
'''
loaded_model = torch.load(filepath, map_location=lambda storage, loc: storage)
return loaded_model
TRAINED_MODELS_DIR = '/Volumes/TIMPP/TrainedModels'
# current_model = load_model('/Volumes/TIMPP/TrainedModels/knmi-images/resnet-18/Resnet2/best-resnet-18_tuned.pt')
current_model_trained = load_model(TRAINED_MODELS_DIR + '/knmi-highway/resnet-18/classifier/KNMIhighway_classifier.pth.tar')
current_model_tuned = load_model(TRAINED_MODELS_DIR + '/knmi-highway/resnet-18/tuner/bestKNMIHighway_tuner.pth.tar')
current_model_tuned = current_model_tuned['best_model']
# type(current_model_tuned.load_state_dict())
```
## Test Model Function
### Test Dataset and Dataloader
```
test_dataset = KNMIDataset(test_features, test_targets, test_filepaths, transforms=data_transforms['validation'])
test_dataloader = DataLoader(dataset=test_dataset, batch_size=len(test_features))
import torch
current_model_train = load_model('../../../../Downloads/checkpoint123.pth.tar')
current_model_train['best_epoch_avg']
```
### Model Test Function
```
def test_model(model, dataloader):
"""
Tests a specified model on all the manually labeled highway camera
images.
:param model: Trained model to evaluate
:param test_features: All test features as tensor
:param test_targets: All test labels as tensor
"""
test_images, test_targets, idx, test_filepaths = next(iter(dataloader))
# Loss criterion
criterion = nn.CrossEntropyLoss(reduce=False)
# Wrap tensors
features = Variable(test_images)
targets = Variable(test_targets)
total = len(targets)
model.eval()
# Feed test features into model
outputs = model(features)
# Loss and optimization
loss = criterion(outputs, targets)
# Get test predictions and number of correct predictions
_, predictions = torch.max(outputs.data, 1)
correct = torch.sum(predictions == targets.data)
corrects = predictions == targets.data
for i, cor in enumerate(corrects):
if predictions[i] == 0 and cor == 0:
print(test_filepaths[i])
print(test_targets[i])
img = test_features[i]
plt.imshow(img)
plt.show()
image_indices = list(range(0, total))
# plot_images(loss, image_indices, test_filepaths, targets, predictions, phase='test')
test_accuracy = correct / total * 100
print('Test accuracy: {:.2f}%'.format(test_accuracy))
show_cm(list(targets.data), list(predictions))
test_model(current_model_tuned, test_dataloader)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df1 = pd.read_csv(r"E:\EYE DATASET\Training_Labels.csv")
df1
df1.columns
DR = DIABETIC RETINOPATHY
ARMD = AGE RELATED MACULAR DEGENRATION
import os
import random
import cv2
import matplotlib.pyplot as plt
df = pd.read_csv("full_df.csv")
df
df1
col = df1.columns
len(col)
a = {}
for i in range(1920):
for j in range(1,47):
if(df1['Disease_Risk'][i] == 1):
if(df1[col[j]][i] == 1 and df1[col[j]][i] != 'Disease_Risk'):
a[i] = col[j]
else:
a[i] = 0
a[0]
a
for i in range(len(a)):
if(a[i] != 'DR' and a[i]!='ARMD' and a[i]!='MYA'):
del a[i]
len(a)
a
count=0
DR = []
ARMD = []
MYA = []
NOR = []
for key,value in a.items():
if(value=='MYA'):
MYA.append(key)
if(value == 'DR'):
DR.append(key)
if(value=='ARMD'):
ARMD.append(key)
print(len(DR))
print(len(ARMD))
print(len(MYA))
print(len(NOR))
img_dir1 = r'E:\EYE DATASET\Training Dataset'
df_DR_filenames = pd.DataFrame(DR,columns=['filenames'])
df_DR_filenames['labels'] = "DR"
df_DR_filenames
for i in range(len(df_DR_filenames['filenames'])):
df_DR_filenames['filenames'][i] = df_DR_filenames['filenames'][i]+1
df_DR_filenames
type(str(df_DR_filenames['filenames'][0]))
x = []
for i in range(len(df_DR_filenames['filenames'])):
x.append(df_DR_filenames['filenames'][i]+'.png')
df_DR_filenames['filenames'] = x
df_DR_filenames
df_ARMD_filenames = pd.DataFrame(data = ARMD,columns = ['filenames'])
df_ARMD_filenames
for i in range(len(df_ARMD_filenames['filenames'])):
df_ARMD_filenames['filenames'][i] = df_ARMD_filenames['filenames'][i] + 1
df_ARMD_filenames['filenames']
x = []
for i in range(len(df_ARMD_filenames['filenames'])):
x.append(df_ARMD_filenames['filenames'][i]+'.png')
df_ARMD_filenames['filenames'] = x
x
df_ARMD_filenames['labels'] = 'ARMD'
df_ARMD_filenames
df_MYA_filenames = pd.DataFrame(data = MYA,columns = ['filenames'])
for i in range(len(df_MYA_filenames['filenames'])):
df_MYA_filenames['filenames'][i] = df_MYA_filenames['filenames'][i] + 1
x = []
for i in range(len(df_MYA_filenames['filenames'])):
x.append(str(df_MYA_filenames['filenames'][i]))
df_MYA_filenames['filenames'] = x
df_MYA_filenames['labels'] = 'Myopia'
df_MYA_filenames
x = []
for i in range(len(df_MYA_filenames['filenames'])):
x.append(df_MYA_filenames['filenames'][i] + '.png')
df_MYA_filenames['filenames'] = x
df_MYA_filenames
df_combined1 = df_DR_filenames.append([df_ARMD_filenames,df_MYA_filenames],ignore_index = True)
df_combined1 = df_combined1.sample(345)
df_combined1 = df_combined1.sample(345)
df_combined1 = df_combined1.reset_index(drop=True)
df_combined1
a = np.array(df_combined.filenames)
paths = []
type(paths)
for i in range(345):
img = a[i]
image = os.path.join(img_dir1, img)
paths.append(image)
paths
y2 = []
for i in df_combined1.labels:
if(i=='ARMD'):
y2.append(1)
elif(i=='Myopia'):
y2.append(2)
elif(i=='DR'):
y2.append(3)
y2 = np.array(y2)
y2
df
img_dir2 = r'E:\EYE DATASET\preprocessed_images'
df = df.iloc[:,1:7]
df.head()
s1 = df['Left-Diagnostic Keywords']
s1.head()
s2 = df['Right-Diagnostic Keywords']
s2.head()
for i in range(6392):
if 'age-related' in s1[i]:
s1[i] = 'ARMD'
if 'cataract' in s1[i]:
s1[i] = 'Cataract'
if 'pathological myopia' in s1[i]:
s1[i] = 'Myopia'
if 'normal' in s1[i]:
s1[i] = 'Normal'
if 'glaucoma' in s1[i]:
s1[i] = 'Glaucoma'
if 'hypertensive' in s1[i]:
s1[i] = 'Hypertension'
if 'diabetic' in s1[i]:
s1[i] = 'Diabetic Retinopathy'
df_left_arm = df[df['Left-Diagnostic Keywords'] == 'ARMD']
df_left_arm.head()
for i in range(6392):
if 'age-related' in s2[i]:
s2[i] = 'ARMD'
if 'cataract' in s2[i]:
s2[i] = 'Cataract'
if 'pathological myopia' in s2[i]:
s2[i] = 'Myopia'
if 'normal' in s2[i]:
s2[i] = 'Normal'
if 'glaucoma' in s2[i]:
s2[i] = 'Glaucoma'
if 'hypertensive' in s2[i]:
s2[i] = 'Hypertension'
if 'diabetic' in s2[i]:
s2[i] = 'Diabetic Retinopathy'
df_rt_arm = df[df['Right-Diagnostic Keywords'] == 'ARMD']
df_rt_arm.head()
df_arm_filenames = df_left_arm['Left-Fundus'].append(df_rt_arm['Right-Fundus'], ignore_index=True)
df_arm_filenames.head()
len(df_arm_filenames)
df_left_nor = df[df['Left-Diagnostic Keywords'] == 'Normal']
df_left_nor.head()
df_rt_nor = df[df['Right-Diagnostic Keywords'] == 'Normal']
df_rt_nor.head()
df_nor_filenames = df_left_nor['Left-Fundus'].append(df_rt_nor['Right-Fundus'],ignore_index=True)
df_nor_filenames
len(df_nor_filenames)
df_left_myo = df[df['Left-Diagnostic Keywords'] == 'Myopia']
df_left_myo.head()
df_rt_myo = df[df['Right-Diagnostic Keywords'] == 'Myopia']
df_rt_myo.head()
df_myo_filenames = df_left_myo['Left-Fundus'].append(df_rt_myo['Right-Fundus'],ignore_index=True)
df_myo_filenames
df_left_drp = df[df['Left-Diagnostic Keywords'] == 'Diabetic Retinopathy']
df_left_drp.head()
df_rt_drp = df[df['Right-Diagnostic Keywords'] == 'Diabetic Retinopathy']
df_rt_drp.head()
df_drp_filenames = df_left_drp['Left-Fundus'].append(df_rt_drp['Right-Fundus'],ignore_index=True)
df_drp_filenames
df_nor_filenames = pd.DataFrame(df_nor_filenames, columns = ["filename"])
df_nor_filenames["label"] = "Normal"
df_nor_filenames.head()
df_myo_filenames = pd.DataFrame(df_myo_filenames, columns = ["filename"])
df_myo_filenames["label"] = "Myopia"
df_myo_filenames.head()
df_drp_filenames = pd.DataFrame(df_drp_filenames, columns = ["filename"])
df_drp_filenames["label"] = "Diabetic_Retinopathy"
df_drp_filenames.head()
df_arm_filenames = pd.DataFrame(df_arm_filenames, columns = ["filename"])
df_arm_filenames["label"] = "ARMD"
df_arm_filenames.head()
len(df_arm_filenames)
df_nor_filenames = df_nor_filenames.sample(600)
len(df_nor_filenames)
len(df_drp_filenames)
len(df_myo_filenames)
print(len(ARMD))
print(len(MYA))
print(len(DR))
print(f"ARMD = {34+551}")
print(f"MYA = {71+457}")
print(f"DR = {240+91}")
print(f"NOR = {600}")
df_combined = df_arm_filenames.append([df_nor_filenames,df_drp_filenames,df_myo_filenames],ignore_index = True)
df_combined
df_combined = df_combined.sample(1699)
df_combined = df_combined.reset_index(drop=True)
df_combined
a = np.array(df_combined.filename)
path1 = []
type(path1)
for i in range(1699):
img = a[i]
image = os.path.join(img_dir2, img)
path1.append(image)
len(path1)
path1
y1 = []
for i in df_combined.label:
if(i=='Normal'):
y1.append(0)
elif(i=='ARMD'):
y1.append(1)
elif(i=='Myopia'):
y1.append(2)
elif(i=='Diabetic_Retinopathy'):
y1.append(3)
y1 = np.array(y1)
y1
len(y1)
paths = np.concatenate((path1,paths))
paths
y = np.concatenate((y1,y2))
data = []
for i in range(2044):
img = paths[i]
image = cv2.imread(img)
image = cv2.resize(image,(224,224))
data.append(image)
data = np.array(data)
data = data/255
x = data
print(len(x))
len(y)
from sklearn.model_selection import train_test_split
x_train,x_val,y_train,y_val = train_test_split(x,y,test_size=0.1)
x_val,x_test,y_val,y_test = train_test_split(x_val,y_val,test_size=0.5)
print(len(x_train))
print(len(x_val))
print(len(x_test))
```
### MobileNet V2 Model
```
import numpy as np
import cv2
import PIL.Image as Image
import os
import matplotlib.pylab as plt
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
feature_extractor_model = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
pretrained_model_without_top_layer = hub.KerasLayer(
feature_extractor_model, input_shape=(224, 224, 3), trainable=False)
num_of_classes = 4
model1 = tf.keras.Sequential([
pretrained_model_without_top_layer,
tf.keras.layers.Dense(num_of_classes)
])
model1.summary()
model1.compile(
optimizer="nadam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model1.fit(x_train, y_train, epochs=15)
model1.evaluate(x_test,y_test)
model1.evaluate(x_val,y_val)
y_pre = model1.predict(x_test)
y_pred = [np.argmax(i) for i in y_pre]
y_pred[:10]
y_test[:10]
len(x_test)
from sklearn.metrics import classification_report
print(classification_report(y_pred,y_test))
```
### Resnet 50
```
num_classes = 4
model2 = tf.keras.Sequential([
hub.KerasLayer("https://tfhub.dev/tensorflow/resnet_50/classification/1",
trainable=False,input_shape=(224, 224, 3)),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
model2 = tf.keras.Sequential([
pretrained_model_without_top_layer,
tf.keras.layers.Dense(num_of_classes)
])
model2.summary()
model2.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model2.fit(x_train, y_train, epochs=15)
model2.evaluate(x_test,y_test)
model2.evaluate(x_val,y_val)
y_pre = model1.predict(x_test)
y_pred = [np.argmax(i) for i in y_pre]
y_pred[:10]
y_test[:10]
from sklearn.metrics import classification_report
print(classification_report(y_pred,y_test))
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# ADM Quantities in terms of BSSN Quantities
## Author: Zach Etienne
[comment]: <> (Abstract: TODO)
**Notebook Status:** <font color='orange'><b> Self-Validated </b></font>
**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)**
### NRPy+ Source Code for this module: [ADM_in_terms_of_BSSN.py](../edit/BSSN/BSSN_in_terms_of_ADM.py)
## Introduction:
This module documents the conversion of ADM variables:
$$\left\{\gamma_{ij}, K_{ij}, \alpha, \beta^i\right\}$$
into BSSN variables
$$\left\{\bar{\gamma}_{i j},\bar{A}_{i j},\phi, K, \bar{\Lambda}^{i}, \alpha, \beta^i, B^i\right\},$$
in the desired curvilinear basis (given by `reference_metric::CoordSystem`). Then it rescales the resulting BSSNCurvilinear variables (as defined in [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb)) into the form needed for solving Einstein's equations with the BSSN formulation:
$$\left\{h_{i j},a_{i j},\phi, K, \lambda^{i}, \alpha, \mathcal{V}^i, \mathcal{B}^i\right\}.$$
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules; set desired output BSSN Curvilinear coordinate system set to Spherical
1. [Step 2](#adm2bssn): Perform the ADM-to-BSSN conversion for 3-metric, extrinsic curvature, and gauge quantities
1. [Step 2.a](#adm2bssn_gamma): Convert ADM $\gamma_{ij}$ to BSSN $\bar{\gamma}_{ij}$; rescale to get $h_{ij}$
1. [Step 2.b](#admexcurv_convert): Convert the ADM extrinsic curvature $K_{ij}$ to BSSN $\bar{A}_{ij}$ and $K$; rescale to get $a_{ij}$, $K$.
1. [Step 2.c](#lambda): Define $\bar{\Lambda}^i$
1. [Step 2.d](#conformal): Define the conformal factor variable `cf`
1. [Step 3](#code_validation): Code Validation against `BSSN.BSSN_in_terms_of_ADM` NRPy+ module
1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Initialize core Python/NRPy+ modules \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
```
# Step 1: Import needed core NRPy+ modules
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import sys # Standard Python modules for multiplatform OS-level functions
import BSSN.BSSN_quantities as Bq # NRPy+: This module depends on the parameter EvolvedConformalFactor_cf,
# which is defined in BSSN.BSSN_quantities
# Step 1.a: Set DIM=3, as we're using a 3+1 decomposition of Einstein's equations
DIM=3
```
<a id='adm2bssn'></a>
# Step 2: Perform the ADM-to-BSSN conversion for 3-metric, extrinsic curvature, and gauge quantities \[Back to [top](#toc)\]
$$\label{adm2bssn}$$
Here we convert ADM quantities to their BSSN Curvilinear counterparts.
<a id='adm2bssn_gamma'></a>
## Step 2.a: Convert ADM $\gamma_{ij}$ to BSSN $\bar{\gamma}_{ij}$; rescale to get $h_{ij}$ \[Back to [top](#toc)\]
$$\label{adm2bssn_gamma}$$
We have (Eqs. 2 and 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
$$
\bar{\gamma}_{i j} = \left(\frac{\bar{\gamma}}{\gamma}\right)^{1/3} \gamma_{ij},
$$
where we always make the choice $\bar{\gamma} = \hat{\gamma}$.
After constructing $\bar{\gamma}_{ij}$, we rescale to get $h_{ij}$ according to the prescription described in the [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb) (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
$$
h_{ij} = (\bar{\gamma}_{ij} - \hat{\gamma}_{ij})/\text{ReDD[i][j]}.
$$
```
# Step 2: All ADM quantities were input into this function in the Spherical or Cartesian
# basis, as functions of r,th,ph or x,y,z, respectively. In Steps 1 and 2 above,
# we converted them to the xx0,xx1,xx2 basis, and as functions of xx0,xx1,xx2.
# Here we convert ADM quantities to their BSSN Curvilinear counterparts:
# Step 2.a: Convert ADM $\gamma_{ij}$ to BSSN $\bar{gamma}_{ij}$:
# We have (Eqs. 2 and 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
def gammabarDD_hDD(gammaDD):
global gammabarDD,hDD
if rfm.have_already_called_reference_metric_function == False:
print("BSSN.BSSN_in_terms_of_ADM.hDD_given_ADM(): Must call reference_metric() first!")
sys.exit(1)
# \bar{gamma}_{ij} = (\frac{\bar{gamma}}{gamma})^{1/3}*gamma_{ij}.
gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)
gammabarDD = ixp.zerorank2()
hDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
gammabarDD[i][j] = (rfm.detgammahat/gammaDET)**(sp.Rational(1,3))*gammaDD[i][j]
hDD[i][j] = (gammabarDD[i][j] - rfm.ghatDD[i][j]) / rfm.ReDD[i][j]
```
<a id='admexcurv_convert'></a>
## Step 2.b: Convert the ADM extrinsic curvature $K_{ij}$ to BSSN quantities $\bar{A}_{ij}$ and $K={\rm tr}(K_{ij})$; rescale $\bar{A}_{ij}$ to get $a_{ij}$ \[Back to [top](#toc)\]
$$\label{admexcurv_convert}$$
Convert the ADM extrinsic curvature $K_{ij}$ to the trace-free extrinsic curvature $\bar{A}_{ij}$, plus the trace of the extrinsic curvature $K$, where (Eq. 3 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)):
\begin{align}
K &= \gamma^{ij} K_{ij} \\
\bar{A}_{ij} &= \left(\frac{\bar{\gamma}}{\gamma}\right)^{1/3} \left(K_{ij} - \frac{1}{3} \gamma_{ij} K \right)
\end{align}
After constructing $\bar{A}_{ij}$, we rescale to get $a_{ij}$ according to the prescription described in the [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb) (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
$$
a_{ij} = \bar{A}_{ij}/\text{ReDD[i][j]}.
$$
```
# Step 2.b: Convert the extrinsic curvature K_{ij} to the trace-free extrinsic
# curvature \bar{A}_{ij}, plus the trace of the extrinsic curvature K,
# where (Eq. 3 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)):
def trK_AbarDD_aDD(gammaDD,KDD):
global trK,AbarDD,aDD
if rfm.have_already_called_reference_metric_function == False:
print("BSSN.BSSN_in_terms_of_ADM.trK_AbarDD(): Must call reference_metric() first!")
sys.exit(1)
# \bar{gamma}_{ij} = (\frac{\bar{gamma}}{gamma})^{1/3}*gamma_{ij}.
gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)
# K = gamma^{ij} K_{ij}, and
# \bar{A}_{ij} &= (\frac{\bar{gamma}}{gamma})^{1/3}*(K_{ij} - \frac{1}{3}*gamma_{ij}*K)
trK = sp.sympify(0)
for i in range(DIM):
for j in range(DIM):
trK += gammaUU[i][j]*KDD[i][j]
AbarDD = ixp.zerorank2()
aDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
AbarDD[i][j] = (rfm.detgammahat/gammaDET)**(sp.Rational(1,3))*(KDD[i][j] - sp.Rational(1,3)*gammaDD[i][j]*trK)
aDD[i][j] = AbarDD[i][j] / rfm.ReDD[i][j]
```
<a id='lambda'></a>
## Step 2.c: Assuming the ADM 3-metric $\gamma_{ij}$ is given as an explicit function of `(xx0,xx1,xx2)`, convert to BSSN $\bar{\Lambda}^i$; rescale to compute $\lambda^i$ \[Back to [top](#toc)\]
$$\label{lambda}$$
To define $\bar{\Lambda}^i$ we implement Eqs. 4 and 5 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf):
$$
\bar{\Lambda}^i = \bar{\gamma}^{jk}\left(\bar{\Gamma}^i_{jk} - \hat{\Gamma}^i_{jk}\right).
$$
The [reference_metric.py](../edit/reference_metric.py) module provides us with exact, closed-form expressions for $\hat{\Gamma}^i_{jk}$, so here we need only compute exact expressions for $\bar{\Gamma}^i_{jk}$, based on $\gamma_{ij}$ given as an explicit function of `(xx0,xx1,xx2)`. This is particularly useful when setting up initial data.
After constructing \bar{\Lambda}^i$, we rescale to get $\lambda^i$ according to the prescription described in the [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb) (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
$$
\lambda^i = \bar{\Lambda}^i/\text{ReU[i]}.
$$
```
# Step 2.c: Define \bar{Lambda}^i (Eqs. 4 and 5 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)):
def LambdabarU_lambdaU__exact_gammaDD(gammaDD):
global LambdabarU,lambdaU
# \bar{Lambda}^i = \bar{gamma}^{jk}(\bar{Gamma}^i_{jk} - \hat{Gamma}^i_{jk}).
gammabarDD_hDD(gammaDD)
gammabarUU, gammabarDET = ixp.symm_matrix_inverter3x3(gammabarDD)
# First compute Christoffel symbols \bar{Gamma}^i_{jk}, with respect to barred metric:
GammabarUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
for l in range(DIM):
GammabarUDD[i][j][k] += sp.Rational(1,2)*gammabarUU[i][l]*( sp.diff(gammabarDD[l][j],rfm.xx[k]) +
sp.diff(gammabarDD[l][k],rfm.xx[j]) -
sp.diff(gammabarDD[j][k],rfm.xx[l]) )
# Next evaluate \bar{Lambda}^i, based on GammabarUDD above and GammahatUDD
# (from the reference metric):
LambdabarU = ixp.zerorank1()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
LambdabarU[i] += gammabarUU[j][k] * (GammabarUDD[i][j][k] - rfm.GammahatUDD[i][j][k])
for i in range(DIM):
# We evaluate LambdabarU[i] here to ensure proper cancellations. If these cancellations
# are not applied, certain expressions (e.g., lambdaU[0] in StaticTrumpet) will
# cause SymPy's (v1.5+) CSE algorithm to hang
LambdabarU[i] = LambdabarU[i].doit()
lambdaU = ixp.zerorank1()
for i in range(DIM):
lambdaU[i] = LambdabarU[i] / rfm.ReU[i]
```
<a id='conformal'></a>
## Step 2.d: Define the conformal factor variable `cf` \[Back to [top](#toc)\]
$$\label{conformal}$$
We define the conformal factor variable `cf` based on the setting of the `"BSSN_quantities::EvolvedConformalFactor_cf"` parameter.
For example if `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"phi"`, we can use Eq. 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf), which in arbitrary coordinates is written:
$$
\phi = \frac{1}{12} \log\left(\frac{\gamma}{\bar{\gamma}}\right).
$$
Alternatively if `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"chi"`, then
$$
\chi = e^{-4 \phi} = \exp\left(-4 \frac{1}{12} \left(\frac{\gamma}{\bar{\gamma}}\right)\right)
= \exp\left(-\frac{1}{3} \log\left(\frac{\gamma}{\bar{\gamma}}\right)\right) = \left(\frac{\gamma}{\bar{\gamma}}\right)^{-1/3}.
$$
Finally if `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"W"`, then
$$
W = e^{-2 \phi} = \exp\left(-2 \frac{1}{12} \log\left(\frac{\gamma}{\bar{\gamma}}\right)\right) =
\exp\left(-\frac{1}{6} \log\left(\frac{\gamma}{\bar{\gamma}}\right)\right) =
\left(\frac{\gamma}{\bar{\gamma}}\right)^{-1/6}.
$$
```
# Step 2.d: Set the conformal factor variable cf, which is set
# by the "BSSN_quantities::EvolvedConformalFactor_cf" parameter. For example if
# "EvolvedConformalFactor_cf" is set to "phi", we can use Eq. 3 of
# [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf),
# which in arbitrary coordinates is written:
def cf_from_gammaDD(gammaDD):
global cf
# \bar{Lambda}^i = \bar{gamma}^{jk}(\bar{Gamma}^i_{jk} - \hat{Gamma}^i_{jk}).
gammabarDD_hDD(gammaDD)
gammabarUU, gammabarDET = ixp.symm_matrix_inverter3x3(gammabarDD)
gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)
cf = sp.sympify(0)
if par.parval_from_str("EvolvedConformalFactor_cf") == "phi":
# phi = \frac{1}{12} log(\frac{gamma}{\bar{gamma}}).
cf = sp.Rational(1,12)*sp.log(gammaDET/gammabarDET)
elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi":
# chi = exp(-4*phi) = exp(-4*\frac{1}{12}*(\frac{gamma}{\bar{gamma}}))
# = exp(-\frac{1}{3}*log(\frac{gamma}{\bar{gamma}})) = (\frac{gamma}{\bar{gamma}})^{-1/3}.
#
cf = (gammaDET/gammabarDET)**(-sp.Rational(1,3))
elif par.parval_from_str("EvolvedConformalFactor_cf") == "W":
# W = exp(-2*phi) = exp(-2*\frac{1}{12}*log(\frac{gamma}{\bar{gamma}}))
# = exp(-\frac{1}{6}*log(\frac{gamma}{\bar{gamma}})) = (\frac{gamma}{bar{gamma}})^{-1/6}.
cf = (gammaDET/gammabarDET)**(-sp.Rational(1,6))
else:
print("Error EvolvedConformalFactor_cf type = \""+par.parval_from_str("EvolvedConformalFactor_cf")+"\" unknown.")
sys.exit(1)
```
<a id='betvet'></a>
## Step 2.e: Rescale $\beta^i$ and $B^i$ to compute $\mathcal{V}^i={\rm vet}^i$ and $\mathcal{B}^i={\rm bet}^i$, respectively \[Back to [top](#toc)\]
$$\label{betvet}$$
We rescale $\beta^i$ and $B^i$ according to the prescription described in the [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb) (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
\begin{align}
\mathcal{V}^i &= \beta^i/\text{ReU[i]}\\
\mathcal{B}^i &= B^i/\text{ReU[i]}.
\end{align}
```
# Step 2.e: Rescale beta^i and B^i according to the prescription described in
# the [BSSN in curvilinear coordinates tutorial notebook](Tutorial-BSSNCurvilinear.ipynb)
# (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
#
# \mathcal{V}^i &= beta^i/(ReU[i])
# \mathcal{B}^i &= B^i/(ReU[i])
def betU_vetU(betaU,BU):
global vetU,betU
if rfm.have_already_called_reference_metric_function == False:
print("BSSN.BSSN_in_terms_of_ADM.bet_vet(): Must call reference_metric() first!")
sys.exit(1)
vetU = ixp.zerorank1()
betU = ixp.zerorank1()
for i in range(DIM):
vetU[i] = betaU[i] / rfm.ReU[i]
betU[i] = BU[i] / rfm.ReU[i]
```
<a id='code_validation'></a>
# Step 3: Code Validation against `BSSN.BSSN_in_terms_of_ADM` module \[Back to [top](#toc)\]
$$\label{code_validation}$$
Here, as a code validation check, we verify agreement in the SymPy expressions for [UIUC initial data](Tutorial-ADM_Initial_Data-UIUC_BlackHole.ipynb) between
1. this tutorial and
2. the NRPy+ [BSSN.BSSN_in_terms_of_ADM](../edit/BSSN/BSSN_in_terms_of_ADM.py) module.
As no basis transformation is performed, we analyze these expressions in their native, Spherical coordinates.
```
# Step 3.a: Set the desired *output* coordinate system to Spherical:
par.set_parval_from_str("reference_metric::CoordSystem","Spherical")
rfm.reference_metric()
# Step 3.b: Set up initial data; assume UIUC spinning black hole initial data
import BSSN.UIUCBlackHole as uibh
uibh.UIUCBlackHole(ComputeADMGlobalsOnly=True)
# Step 3.c: Call above functions to convert ADM to BSSN curvilinear
gammabarDD_hDD( uibh.gammaSphDD)
trK_AbarDD_aDD( uibh.gammaSphDD,uibh.KSphDD)
LambdabarU_lambdaU__exact_gammaDD(uibh.gammaSphDD)
cf_from_gammaDD( uibh.gammaSphDD)
betU_vetU( uibh.betaSphU,uibh.BSphU)
# Step 3.d: Now load the BSSN_in_terms_of_ADM module and perform the same conversion
import BSSN.BSSN_in_terms_of_ADM as BitoA
BitoA.gammabarDD_hDD( uibh.gammaSphDD)
BitoA.trK_AbarDD_aDD( uibh.gammaSphDD,uibh.KSphDD)
BitoA.LambdabarU_lambdaU__exact_gammaDD(uibh.gammaSphDD)
BitoA.cf_from_gammaDD( uibh.gammaSphDD)
BitoA.betU_vetU( uibh.betaSphU,uibh.BSphU)
# Step 3.e: Perform the consistency check
print("Consistency check between this tutorial notebook and BSSN.BSSN_in_terms_of_ADM NRPy+ module: ALL SHOULD BE ZERO.")
print("cf - BitoA.cf = " + str(cf - BitoA.cf))
print("trK - BitoA.trK = " + str(trK - BitoA.trK))
# alpha is the only variable that remains unchanged:
# print("alpha - BitoA.alpha = " + str(alpha - BitoA.alpha))
for i in range(DIM):
print("vetU["+str(i)+"] - BitoA.vetU["+str(i)+"] = " + str(vetU[i] - BitoA.vetU[i]))
print("betU["+str(i)+"] - BitoA.betU["+str(i)+"] = " + str(betU[i] - BitoA.betU[i]))
print("lambdaU["+str(i)+"] - BitoA.lambdaU["+str(i)+"] = " + str(lambdaU[i] - BitoA.lambdaU[i]))
for j in range(DIM):
print("hDD["+str(i)+"]["+str(j)+"] - BitoA.hDD["+str(i)+"]["+str(j)+"] = "
+ str(hDD[i][j] - BitoA.hDD[i][j]))
print("aDD["+str(i)+"]["+str(j)+"] - BitoA.aDD["+str(i)+"]["+str(j)+"] = "
+ str(aDD[i][j] - BitoA.aDD[i][j]))
```
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-BSSN_in_terms_of_ADM")
```
| github_jupyter |
# MNIST Example
This demo is an adaption of our [first `MNIST` themed demo](mnist_example.ipynb), which computes saliency maps for the models' actual prediction.
Here, we only analyze one input sample, but compute saliency maps for all of the model's output neurons, one at a time.
# Imports
```
import warnings
warnings.simplefilter('ignore')
%matplotlib inline
import numpy as np
import imp
import time
import keras
import keras.backend
import keras.models
import innvestigate
import innvestigate.applications
import innvestigate.applications.mnist
import innvestigate.utils as iutils
import innvestigate.utils.visualizations as ivis
eutils = imp.load_source("utils", "../utils.py")
mnistutils = imp.load_source("utils_mnist", "../utils_mnist.py")
```
# Data
Then, the MNIST data is loaded in its entirety, formatted wrt to the specifications of the keras backend.
```
# Load data
channels_first = keras.backend.image_data_format() == "channels_first"
data = mnistutils.fetch_data(channels_first) #returns x_train, y_train, x_test, y_test as numpy.ndarray
num_classes = len(np.unique(data[1]))
# Test samples for illustrations
images = [(data[2][i].copy(), data[3][i]) for i in range(num_classes)]
label_to_class_name = [str(i) for i in range(num_classes)]
```
# Model
We have prepared a simple model configuration to play around with.
```
# The line below currently configures an already pretrained network, which saves some time.
modelname = 'pretrained_plos_long_relu'
activation_type = 'relu'
input_range = [-1, 1]
epochs = 0
batch_size = None
create_model_kwargs = {}
```
Now, preprocess the data wrt to the model's requirements, build the model and optionally train it for `epochs` epochs.
```
# Preprocess data
data_preprocessed = (mnistutils.preprocess(data[0], input_range), data[1],
mnistutils.preprocess(data[2], input_range), data[3])
# Create & (optionally) train model
model, modelp = mnistutils.create_model(channels_first, modelname, **create_model_kwargs)
mnistutils.train_model(modelp, data_preprocessed, batch_size=batch_size, epochs=epochs)
model.set_weights(modelp.get_weights())
```
# Analysis
Here, we will restict the selection of methods to those analyzers where output neuron selectino is a supported feature.
```
# Determine analysis methods and properties
methods = [
# NAME OPT.PARAMS POSTPROC FXN TITLE
# Show input
("input", {}, mnistutils.image, "Input"),
# Function
("gradient", {}, mnistutils.graymap, "Gradient"),
# Signal
("deconvnet", {}, mnistutils.bk_proj, "Deconvnet"),
("guided_backprop", {}, mnistutils.bk_proj, "Guided Backprop",),
("pattern.net", {}, mnistutils.bk_proj, "PatternNet"),
# Interaction
("lrp.z", {}, mnistutils.heatmap, "LRP-Z"),
("lrp.epsilon", {"epsilon": 1}, mnistutils.heatmap, "LRP-Epsilon"),
]
```
The main loop below will now instantiate the analyzer objects based on the loaded/trained model and the analyzers' parameterizations above and compute the analyses.
First, we pick an input image which will stay fixed for all methods. Then, we iterate over the model's output neurons instead of input images.
```
# Prepare input image
input_image_idx = 1
image, y = images[input_image_idx]
image = image[None, :, :, :]
# Create analyzers.
analyzers = []
print('Creating analyzer instances. ')
for method in methods:
analyzer = innvestigate.create_analyzer(method[0], # analysis method identifier
model, # model without softmax output
neuron_selection_mode="index", #THIS LINE ENABLES OUTPUT NEURON SELECTION
**method[1]) # optional analysis parameters
# some analyzers require additional training. For those
analyzer.fit(data_preprocessed[0],
pattern_type=activation_type,
batch_size=256, verbose=1)
analyzers.append(analyzer)
print('Running analyses.')
# Apply analyzers to trained model.
analysis = np.zeros([10, len(analyzers), 28, 28, 3])
text = []
for i in range(10):
print('Output Neuron {}: '.format(i), end='')
t_start = time.time()
# Predict label.
x = mnistutils.preprocess(image, input_range)
presm = model.predict_on_batch(x)[0] #forward pass without softmax
prob = modelp.predict_on_batch(x)[0] #forward pass with softmax
y_hat = prob.argmax()
# Save prediction info:
text.append(("%s" %label_to_class_name[y], # ground truth label
"%.2f" %presm[i], # pre-softmax logits
"%.2f" %prob[i], # probabilistic softmax output
"%s" %label_to_class_name[i] # predicted label
))
for aidx, analyzer in enumerate(analyzers):
is_input_analyzer = methods[aidx][0] == "input"
# Analyze.
a = analyzer.analyze(image if is_input_analyzer else x, i)
# Postprocess.
if not is_input_analyzer:
a = mnistutils.postprocess(a)
a = methods[aidx][2](a)
analysis[i, aidx] = a[0]
t_elapsed = time.time() - t_start
print('{:.4f}s'.format(t_elapsed))
```
Next, we visualize the analysis results.
```
# Plot the analysis.
grid = [[analysis[i, j] for j in range(analysis.shape[1])]
for i in range(analysis.shape[0])]
label, presm, prob, pred = zip(*text)
row_labels_left = [('label: {}'.format(label[i]),'neuron: {}'.format(pred[i])) for i in range(len(label))]
row_labels_right = [('logit: {}'.format(presm[i]),'prob: {}'.format(prob[i])) for i in range(len(label))]
col_labels = [''.join(method[3]) for method in methods]
eutils.plot_image_grid(grid, row_labels_left, row_labels_right, col_labels)
```
Each column shows the visualized results for different analyzers and each row shows the analyses wrt to the same input sample. To the left of each row, the ground truth label `label` and the selected output `neuron` are show. To the right, the model's probabilistic (softmax) output is shown for that neuron as `prob` and the logit output just before the terminating softmax layer as `logit`. Note that all analyses have been performed based on the logit output (layer).
| github_jupyter |
# কোয়ান্টাম কম্পিউটারে ক্লাসিক্যাল কম্পিউটেশন
## বিষয়বস্তু
1. [Introduction](#intro)
2. [Consulting and Oracle](#oracle)
3. [Taking Out the Garbage](#garbage)
## 1। ভূমিকা<a id="intro"></a>
কোয়ান্টাম গেটগুলির একটি সর্বজনীন সেট থাকার একটি পরিণতি হল যে কোনও ক্লাসিক্যাল গণনা পুনরুত্পাদন করার ক্ষমতা। আমাদের কেবল বুলিয়ান লজিক গেটগুলিতে ক্লাসিক্যাল কম্পাইল করতে হবে যা আমরা *দ্য অ্যাটমস অফ কম্পিউটেশনে* দেখেছি, এবং তারপরে একটি কোয়ান্টাম কম্পিউটারে এগুলি পুনরুত্পাদন করতে হবে।
এটি কোয়ান্টাম কম্পিউটার সম্পর্কে একটি গুরুত্বপূর্ণ তথ্য প্রদর্শন করে: তারা এমন কিছু করতে পারে যা একটি ধ্রুপদী কম্পিউটার করতে পারে এবং তারা অন্তত একই কম্পিউটেশনাল জটিলতার সাথে তা করতে পারে। যদিও কোয়ান্টাম কম্পিউটার ব্যবহার করা উদ্দেশ্য নয় যে কাজের জন্য ক্লাসিক্যাল কম্পিউটারগুলি ইতিমধ্যেই এক্সেল, তবুও এটি একটি ভাল প্রদর্শন যে কোয়ান্টাম কম্পিউটারগুলি সাধারণ পরিসরের সমস্যার সমাধান করতে পারে।
অধিকন্তু, কোয়ান্টাম সমাধানের প্রয়োজন হয় এমন সমস্যাগুলিতে প্রায়ই এমন উপাদান জড়িত থাকে যা ক্লাসিক্যাল অ্যালগরিদম ব্যবহার করে মোকাবেলা করা যেতে পারে। কিছু ক্ষেত্রে, এই ক্লাসিক্যাল অংশগুলি ক্লাসিক্যাল হার্ডওয়্যারে করা যেতে পারে। যাইহোক, অনেক ক্ষেত্রে, ক্লাসিক্যাল অ্যালগরিদম একটি সুপারপজিশন অবস্থায় বিদ্যমান ইনপুটগুলিতে চালানো আবশ্যক। এর জন্য কোয়ান্টাম হার্ডওয়্যারে চালানোর জন্য ক্লাসিক্যাল অ্যালগরিদম প্রয়োজন। এই বিভাগে আমরা এটি করার সময় ব্যবহৃত কিছু ধারণা উপস্থাপন করি।
## 2. একটি ওরাকলের সাথে পরামর্শ করা<a id="oracle"></a>
অনেক কোয়ান্টাম অ্যালগরিদম কিছু ফাংশন $f(x)$ এর বিশ্লেষণের উপর ভিত্তি করে। প্রায়শই এই অ্যালগরিদমগুলি এই ফাংশনের কিছু 'ব্ল্যাক বক্স' বাস্তবায়নের অস্তিত্ব অনুমান করে, যা আমরা $x$ ইনপুট দিতে পারি এবং সংশ্লিষ্ট আউটপুট $f(x)$ পেতে পারি। এটি একটি *ওরাকল* হিসাবে উল্লেখ করা হয়।
এই বিমূর্ত উপায়ে ওরাকল নিয়ে চিন্তা করার সুবিধা আমাদের ফাংশনটি বিশ্লেষণ করার জন্য ফাংশনটির পরিবর্তে কোয়ান্টাম কৌশলগুলিতে মনোনিবেশ করতে দেয়।
একটি কোয়ান্টাম অ্যালগরিদমের মধ্যে একটি ওরাকল কীভাবে কাজ করে তা বোঝার জন্য, সেগুলি কীভাবে সংজ্ঞায়িত করা হয় সে সম্পর্কে আমাদের সুনির্দিষ্ট হতে হবে। ওরাকলগুলি যে প্রধান রূপগুলি গ্রহণ করে তার মধ্যে একটি হল *বুলিয়ান ওরাকলের* । এগুলি নিম্নলিখিত একক বিবর্তন দ্বারা বর্ণিত হয়েছে,
$$ U_f \left|x , \bar 0 \right\rangle = \left|x, f(x)\right\rangle. $$
এখানে $\left|x , \bar 0 \right\rangle = \left|x \right\rangle \otimes \left|\bar 0 \right\rangle$ দুটি রেজিস্টার সমন্বিত একটি মাল্টি-কিউবিট স্টেট উপস্থাপন করতে ব্যবহৃত হয়। প্রথম রেজিস্টারটি $\left|x\right\rangle$ অবস্থায় রয়েছে, যেখানে $x$ হল আমাদের ফাংশনে ইনপুটের একটি বাইনারি উপস্থাপনা। এই রেজিস্টারে qubits সংখ্যা ইনপুট প্রতিনিধিত্ব করতে প্রয়োজনীয় বিট সংখ্যা.
দ্বিতীয় রেজিস্টারের কাজ একইভাবে আউটপুট এনকোড করা। বিশেষভাবে, $U_f$ প্রয়োগ করার পরে এই রেজিস্টারের অবস্থা $\left|f(x)\right\rangle$ আউটপুটের একটি বাইনারি উপস্থাপনা হবে, এবং এই রেজিস্টারে এর জন্য প্রয়োজনীয় যতগুলি qubit থাকবে। এই রেজিস্টারের জন্য এই প্রারম্ভিক অবস্থা $\left|\bar 0 \right\rangle$ সেই স্টেটের প্রতিনিধিত্ব করে যার জন্য সমস্ত qubits $\left|0 \right\rangle$। অন্যান্য প্রাথমিক অবস্থার জন্য, $U_f$ প্রয়োগ করলে ভিন্ন ফলাফল পাওয়া যাবে। উদ্ভূত নির্দিষ্ট ফলাফল নির্ভর করবে আমরা কিভাবে একক $U_f$ সংজ্ঞায়িত করি।
ওরাকলের আরেকটি রূপ হল *ফেজ ওরাকল* , যা নিম্নরূপ সংজ্ঞায়িত করা হয়েছে,
$$ P_f \left|x \right\rangle = (-1)^{f(x)} \left|x \right\rangle, $$
যেখানে আউটপুট $f(x)$ সাধারণত $0$ বা $1$ এর একটি সাধারণ বিট মান।
যদিও এটি বুলিয়ান ওরাকল থেকে আকারে অনেক আলাদা বলে মনে হয়, তবে এটি একই মৌলিক ধারণার অন্য অভিব্যক্তি। প্রকৃতপক্ষে, এটি পূর্ববর্তী বিভাগে বর্ণিত একই 'ফেজ কিকব্যাক' প্রক্রিয়া ব্যবহার করে উপলব্ধি করা যেতে পারে।
এটি দেখতে, বুলিয়ান ওরাকল $U_f$ বিবেচনা করুন যা একই ফাংশনের সাথে সামঞ্জস্যপূর্ণ। এটি এমন কিছু হিসাবে প্রয়োগ করা যেতে পারে যা মূলত নিয়ন্ত্রিত-না-এর একটি সাধারণ রূপ। এটি ইনপুট রেজিস্টারে নিয়ন্ত্রিত হয়, যেমন এটি আউটপুট বিটটিকে $\left|0 \right\rangle$-এ $f(x)=0$-এ ছেড়ে দেয় এবং $\left-এ ফ্লিপ করতে $X$ প্রয়োগ করে। |1 \right\rangle$ যদি $f(x)=1$। যদি আউটপুট রেজিস্টারের প্রাথমিক অবস্থা $\left|- \right\rangle$ এর পরিবর্তে $\left|0 \right\rangle$ হয়, তাহলে $U_f$ এর প্রভাবটি হবে $(--এর পর্যায়টিকে ঠিকভাবে প্ররোচিত করবে। 1)^{f(x)}$ প্রয়োজন।
$$ U_f \left( \left|x \right\rangle \otimes \left| - \right\rangle \right) = (P_f \otimes I) \left( \left|x \right\rangle \otimes \left| - \right\rangle \right) $$
যেহেতু $\left|- আউটপুট qubit-এর \right\rangle$ অবস্থা পুরো প্রক্রিয়ার দ্বারা অপরিবর্তিত রাখা হয়েছে, এটি নিরাপদে উপেক্ষা করা যেতে পারে। তাই শেষ প্রভাব হল যে ফেজ ওরাকলটি কেবল সংশ্লিষ্ট বুলিয়ান ওরাকল দ্বারা প্রয়োগ করা হয়।
## 3. আবর্জনা বের করা<a id="garbage"></a>
একটি ওরাকল দ্বারা মূল্যায়ন করা ফাংশনগুলি সাধারণত সেগুলি যা একটি ক্লাসিক্যাল কম্পিউটারে দক্ষতার সাথে মূল্যায়ন করা যেতে পারে। যাইহোক, উপরে দেখানো ফর্মগুলির একটিতে এটিকে একক হিসাবে প্রয়োগ করার প্রয়োজনের অর্থ হল এটি অবশ্যই কোয়ান্টাম গেট ব্যবহার করে প্রয়োগ করা উচিত। যাইহোক, এটি কেবল বুলিয়ান গেটগুলি নেওয়ার মতো সহজ নয় যা ক্লাসিক্যাল অ্যালগরিদম বাস্তবায়ন করতে পারে এবং তাদের কোয়ান্টাম সমকক্ষগুলির সাথে প্রতিস্থাপন করতে পারে।
একটি বিষয় যা আমাদের অবশ্যই যত্ন নিতে হবে তা হ'ল বিপরীততা। $U = \sum_x \left| ফর্মের একক f(x) \right\rangle \left\langle x \right|$ শুধুমাত্র তখনই সম্ভব যদি প্রতিটি অনন্য ইনপুট $x$ এর ফলে একটি অনন্য আউটপুট $f(x)$ হয়, যা সাধারণভাবে সত্য নয়। যাইহোক, আমরা আউটপুটে ইনপুটের একটি অনুলিপি অন্তর্ভুক্ত করে এটিকে সত্য হতে বাধ্য করতে পারি। এটিই আমাদেরকে বুলিয়ান ওরাকলের ফর্মে নিয়ে যায় যেমনটি আমরা আগে দেখেছি $$ U_f \left|x,\bar 0 \right\rangle = \left| x,f(x) \right\rangle $$
একক হিসাবে লেখা গণনার সাথে, আমরা সুপারপজিশন অবস্থায় এটি প্রয়োগ করার প্রভাব বিবেচনা করতে সক্ষম। উদাহরণস্বরূপ, আসুন আমরা সমস্ত সম্ভাব্য ইনপুট $x$ (সরলতার জন্য অস্বাভাবিক) উপর সুপারপজিশন নিই। এর ফলে সম্ভাব্য সব ইনপুট/আউটপুট জোড়ার একটি সুপারপজিশন হবে,
$$ U_f \sum_x \left|x,0\right\rangle = \sum_x \left|x,f(x)\right\rangle. $$
শাস্ত্রীয় অ্যালগরিদমগুলিকে অভিযোজিত করার সময়, আমাদেরও যত্ন নিতে হবে যে এই সুপারপজিশনগুলি আমাদের প্রয়োজন অনুসারে আচরণ করে। ক্লাসিক্যাল অ্যালগরিদমগুলি সাধারণত শুধুমাত্র পছন্দসই আউটপুট গণনা করে না, তবে সেই সাথে অতিরিক্ত তথ্যও তৈরি করবে। একটি গণনার এই ধরনের অতিরিক্ত অবশিষ্টাংশগুলি ক্লাসিকভাবে একটি উল্লেখযোগ্য সমস্যা তৈরি করে না, এবং তারা যে মেমরি গ্রহণ করে তা মুছে ফেলার মাধ্যমে সহজেই পুনরুদ্ধার করা যেতে পারে। একটি কোয়ান্টাম দৃষ্টিকোণ থেকে, যাইহোক, জিনিসগুলি এত সহজ নয়।
উদাহরণস্বরূপ, বিবেচনা করুন যে একটি ক্লাসিক্যাল অ্যালগরিদম নিম্নলিখিত প্রক্রিয়াটি সম্পাদন করে, $$ V_f \left|x,\bar 0, \bar 0 \right\rangle = \left| x,f(x), g(x) \right\rangle $$ এখানে আমরা একটি তৃতীয় রেজিস্টার দেখতে পাচ্ছি, যা ক্লাসিক্যাল অ্যালগরিদমের জন্য 'স্ক্র্যাচপ্যাড' হিসেবে ব্যবহৃত হয়। আমরা গণনার শেষে এই রেজিস্টারে অবশিষ্ট তথ্যগুলিকে 'আবর্জনা', $g(x)$ হিসাবে উল্লেখ করব। আসুন আমরা $V_f$ ব্যবহার করি একটি ইউনিটারি বোঝাতে যা উপরেরটি প্রয়োগ করে।
কোয়ান্টাম অ্যালগরিদমগুলি সাধারণত হস্তক্ষেপের প্রভাবের উপর নির্মিত হয়। এই ধরনের সহজতম প্রভাব হল কিছু একক ব্যবহার করে একটি সুপারপজিশন তৈরি করা, এবং তারপর সেই ইউনিটারির বিপরীত ব্যবহার করে এটি সরিয়ে ফেলা। এর সম্পূর্ণ প্রভাব অবশ্যই তুচ্ছ। যাইহোক, আমাদের অবশ্যই নিশ্চিত করতে হবে যে আমাদের কোয়ান্টাম কম্পিউটার অন্তত এই ধরনের তুচ্ছ কাজ করতে সক্ষম।
উদাহরণস্বরূপ, ধরুন আমাদের কোয়ান্টাম কম্পিউটেশনের মধ্যে কিছু প্রক্রিয়া আমাদেরকে সুপারপজিশন স্টেট দিয়েছে $\sum_x \left|x,f(x)\right\rangle$, এবং আমাদের এটিকে $\sum_x \left| অবস্থায় ফেরত দিতে হবে। x,0\right\rangle$। এর জন্য আমরা কেবল $U_f^\dagger$ প্রয়োগ করতে পারি। এটি প্রয়োগ করার ক্ষমতা সরাসরি $U_f$ প্রযোজ্য একটি সার্কিট জানার পরে অনুসরণ করে, যেহেতু আমাদের কেবল সার্কিটের প্রতিটি গেটকে তার বিপরীত দিয়ে প্রতিস্থাপন করতে হবে এবং ক্রমটি বিপরীত করতে হবে।
যাইহোক, ধরুন আমরা জানি না কিভাবে $U_f$ প্রয়োগ করতে হয়, কিন্তু এর পরিবর্তে জানি কিভাবে $V_f$ প্রয়োগ করতে হয়। এর মানে হল আমরা এখানে $U_f^\dagger$ প্রয়োগ করতে পারি না, তবে $V_f^\dagger$ ব্যবহার করতে পারি। দুর্ভাগ্যবশত, আবর্জনার উপস্থিতি মানে এটি একই প্রভাব ফেলবে না।
এর একটি সুস্পষ্ট উদাহরণের জন্য আমরা একটি খুব সাধারণ কেস নিতে পারি। আমরা $x$, $f(x)$ এবং $g(x)$ সীমাবদ্ধ করব সবগুলিকে মাত্র একটি বিট নিয়ে গঠিত। আমরা $f(x) = x$ এবং $g(x) = x$ও ব্যবহার করব, যার প্রতিটি ইনপুট রেজিস্টারে নিয়ন্ত্রিত একটি একক `cx` গেট দিয়ে অর্জন করা যেতে পারে।
বিশেষভাবে, $U_f$ বাস্তবায়নের সার্কিটটি ইনপুট এবং আউটপুট রেজিস্টারের একক বিটের মধ্যে নিম্নলিখিত একক `cx` ।
```
from qiskit import QuantumCircuit, QuantumRegister
input_bit = QuantumRegister(1, 'input')
output_bit = QuantumRegister(1, 'output')
garbage_bit = QuantumRegister(1, 'garbage')
Uf = QuantumCircuit(input_bit, output_bit, garbage_bit)
Uf.cx(input_bit[0], output_bit[0])
Uf.draw()
```
$V_f$ এর জন্য, যেখানে আমাদের আবর্জনার জন্য ইনপুটের একটি অনুলিপি তৈরি করতে হবে, আমরা নিম্নলিখিত দুটি `cx` গেট ব্যবহার করতে পারি।
```
Vf = QuantumCircuit(input_bit, output_bit, garbage_bit)
Vf.cx(input_bit[0], garbage_bit[0])
Vf.cx(input_bit[0], output_bit[0])
Vf.draw()
```
এখন আমরা প্রথমে $U_f$ প্রয়োগ করার এবং তারপর $V_f^{\dagger}$ প্রয়োগ করার প্রভাব দেখতে পারি। নেট প্রভাব নিম্নলিখিত সার্কিট হয়.
```
qc = Uf + Vf.inverse()
qc.draw()
```
এই সার্কিট দুটি অভিন্ন `cx` গেট দিয়ে শুরু হয়, যার প্রভাব একে অপরকে বাতিল করে দেয়। যা অবশিষ্ট থাকে তা হল ইনপুট এবং আবর্জনা রেজিস্টারের মধ্যে চূড়ান্ত `cx` । গাণিতিকভাবে, এর মানে
$$ V_f^\dagger U_f \left| x,0,0 \right\rangle = V_f^\dagger \left| x,f(x),0 \right\rangle = \left| x , 0 ,g(x) \right\rangle. $$
এখানে আমরা দেখতে পাচ্ছি যে $V_f^\dagger$ এর ক্রিয়াটি কেবল আমাদের প্রাথমিক অবস্থায় ফিরিয়ে দেয় না, বরং এর পরিবর্তে প্রথম কিউবিটটিকে অবাঞ্ছিত আবর্জনার সাথে জড়িয়ে ফেলে। একটি অ্যালগরিদমের পরবর্তী পদক্ষেপগুলি তাই প্রত্যাশিতভাবে চলবে না, যেহেতু রাষ্ট্র আমাদের প্রয়োজন এমন নয়।
এই কারণে আমাদের কোয়ান্টাম অ্যালগরিদম থেকে ক্লাসিক্যাল আবর্জনা অপসারণের একটি উপায় প্রয়োজন। এটি 'আনকম্পিউটেশন' নামে পরিচিত একটি পদ্ধতি দ্বারা করা যেতে পারে। আমাদের কেবল আরেকটি ফাঁকা ভেরিয়েবল নিতে হবে এবং $V_f$ প্রয়োগ করতে হবে
$$ \left| x, 0, 0, 0 \right\rangle \rightarrow \left| x,f(x),g(x),0 \right\rangle. $$
তারপরে আমরা নিয়ন্ত্রিত-নট গেটের একটি সেট প্রয়োগ করি, প্রতিটি আউটপুট এনকোড করতে ব্যবহৃত কিউবিটগুলির একটিতে নিয়ন্ত্রিত, এবং অতিরিক্ত ফাঁকা ভেরিয়েবলের সংশ্লিষ্ট কিউবিটের উপর লক্ষ্যবস্তু।
একক কিউবিট রেজিস্টার ব্যবহার করে আমাদের উদাহরণের জন্য এটি করার সার্কিটটি এখানে।
```
final_output_bit = QuantumRegister(1, 'final-output')
copy = QuantumCircuit(output_bit, final_output_bit)
copy.cx(output_bit, final_output_bit)
copy.draw()
```
এর প্রভাব হল তথ্য অনুলিপি করা (যদি আপনি নো-ক্লোনিং উপপাদ্য শুনে থাকেন তবে মনে রাখবেন এটি একই প্রক্রিয়া নয়)। বিশেষত, এটি নিম্নলিখিত উপায়ে রাষ্ট্রকে রূপান্তরিত করে।
$$ \left| x,f(x),g(x),0 \right\rangle \rightarrow \left| x,f(x),g(x),f(x) \right\rangle. $$
অবশেষে আমরা $V_f^\dagger$ প্রয়োগ করি, যা মূল গণনাকে পূর্বাবস্থায় ফিরিয়ে আনে।
$$ \left| x,f(x),g(x),0 \right\rangle \rightarrow \left| x,0,0,f(x) \right\rangle. $$
কপি করা আউটপুট তবুও রয়ে গেছে। নেট প্রভাব হল আবর্জনা ছাড়াই গণনা সম্পাদন করা, এবং তাই আমাদের কাঙ্খিত $U_f$ অর্জন করে।
আমাদের উদাহরণের জন্য একক কিউবিট রেজিস্টার ব্যবহার করে এবং যার জন্য $f(x) = x$, পুরো প্রক্রিয়াটি নিম্নলিখিত সার্কিটের সাথে মিলে যায়।
```
(Vf.inverse() + copy + Vf).draw()
```
`cx` গেটগুলি কীভাবে কাজ করে সে সম্পর্কে আপনি এখন পর্যন্ত যা জানেন তা ব্যবহার করে, আপনি দেখতে সক্ষম হবেন যে দুটি আবর্জনা রেজিস্টারে প্রয়োগ করা একে অপরকে বাতিল করবে। তাই আমরা সফলভাবে আবর্জনা অপসারণ করেছি।
### দ্রুত অনুশীলন
1. দেখান যে আউটপুট সঠিকভাবে 'ফাইনাল আউটপুট' রেজিস্টারে লেখা হয়েছে (এবং শুধুমাত্র এই রেজিস্টারে) যখন 'আউটপুট' রেজিস্টার $|0\rangle$ হিসাবে আরম্ভ করা হয়।
2. 'আউটপুট' রেজিস্টার $|1\rangle$ হিসাবে আরম্ভ হলে কি হবে তা নির্ধারণ করুন।
এই পদ্ধতির সাথে, এবং এই অধ্যায়ে কভার করা অন্য সকলের সাথে, আমাদের কাছে এখন কোয়ান্টাম অ্যালগরিদম তৈরি করার জন্য প্রয়োজনীয় সমস্ত সরঞ্জাম রয়েছে। এখন আমরা সেই অ্যালগরিদমগুলিকে কর্মে দেখতে যেতে পারি।
```
import qiskit.tools.jupyter
%qiskit_version_table
```
| github_jupyter |
# Padding Oracle
- When a decrypted CBC ciphertext ends in an invalid pad the web server returns a 403 error code (forbidden request). When the CBC padding is valid, but the message is malformed, the web server returns a 404 error code (URL not found).
```
http://crypto-class.appspot.com/po?er="your ciphertext here"
```
- The first ciphertext block is random IV, the decrypted text block is ascii encoded
- the ciphertext following the `"po?er="` is a hex encoded AES CBC encryption with a random IV of some secret data about Alice's session.
```
import urllib3 as ul
BLOCKSIZE = 16
AZ = [i for i in range(ord('A'), ord('Z') + 1)]
space = [ord(' ')]
az = [i for i in range(ord('a'),ord('z') +1)]
paddings = [i for i in range(1, 17)]
misc1 = [i for i in range(17, 32)] + [i for i in range(33, 65)]
misc2 = [i for i in range(91, 97)] + [i for i in range(123, 128)]
ALL = paddings + space + az + AZ + misc1 + misc2
def xor(x, y, z):
assert len(x) == len(y) == len(z)
a = int.from_bytes(x, "big")
b = int.from_bytes(y, "big")
c = int.from_bytes(z, "big")
r = a ^ b ^ c
return r.to_bytes(len(x), "big")
# Target: "http:domain.com/po?er="
class PaddingOracle:
def __init__(self, target):
self.target = target
self.http = ul.PoolManager()
# ct: string representing hex encoded
# 4 * 16 * 2 == 128 characters in length
# 4 blocks of ciphertxt, 1 block IV, 3 blocks ciphertext
def decrypt4blocks(self, ct, debug=True):
assert len(ct) == 128
assert self.status_query(ct) == 200
iv, c0, c1, c2 = ct[:32], ct[32:64], ct[64:96], ct[96:]
print("Decrypting...")
m0 = self.decrypt_block(c0, iv)
print(" > ", m0)
m1 = self.decrypt_block(c1, c0)
print(" > ", m1)
m2 = self.decrypt_block(c2, c1)
print(" > ", m2)
return m0 + m1 + m2
def decrypt_block(self, c, c0_hex):
m = bytearray(BLOCKSIZE)
c0 = bytes.fromhex(c0_hex)
for i in range(1, BLOCKSIZE + 1):
self.overwrite_and_send_byte(m, c, i, c0)
return m
# Overwrites one byte in message m for each iteration
def overwrite_and_send_byte(self, m, c, i, c0):
n = bytes([i for _ in range(BLOCKSIZE)])
CURRENT = BLOCKSIZE - i
for g in ALL:
m[CURRENT] = g
q = xor(n, m, c0).hex() + c
if self.is_valid(q) is True:
print(chr(g), end="_")
return
raise ValueError("Unable to find byte")
def is_valid(self, q):
r = self.http.request('GET', self.target + q, retries=False)
return r.status != 403
def status_query(self, q):
return self.http.request('GET', self.target + q, retries=False).status
TARGET = 'http://crypto-class.appspot.com/po?er='
CIPHERTEXT = "f20bdba6ff29eed7b046d1df9fb7000058b1ffb4210a580f748b4ac714c001bd4a61044426fb515dad3f21f18aa577c0bdf302936266926ff37dbf7035d5eeb4"
po = PaddingOracle(TARGET)
message = po.decrypt4blocks(CIPHERTEXT)
print(message)
ct1 = "4ca00ff4c898d61e1edbf1800618fb2828a226d160dad07883d04e008a7897ee2e4b7465d5290d0c0e6c6822236e1daafb94ffe0c5da05d9476be028ad7c1d81"
ct2 = "5b68629feb8606f9a6667670b75b38a5b4832d0f26e1ab7da33249de7d4afc48e713ac646ace36e872ad5fb8a512428a6e21364b0c374df45503473c5242a253"
pt1 = "Basic CBC mode encryption needs padding."
pt2 = "Our implementation uses rand. IV"
TARGET = "http://localhost:9000/po?er="
po = PaddingOracle(TARGET)
message1 = po.decrypt4blocks(ct1)
print(message1)
message2 = po.decrypt4blocks(ct2)
print(message2)
```
| github_jupyter |
# Widget Events
## Special events
```
from __future__ import print_function
```
The `Button` is not used to represent a data type. Instead the button widget is used to handle mouse clicks. The `on_click` method of the `Button` can be used to register function to be called when the button is clicked. The doc string of the `on_click` can be seen below.
```
import ipywidgets as widgets
print(widgets.Button.on_click.__doc__)
```
### Example
Since button clicks are stateless, they are transmitted from the front-end to the back-end using custom messages. By using the `on_click` method, a button that prints a message when it has been clicked is shown below. To capture `print`s (or any other kind of output) and ensure it is displayed, be sure to send it to an `Output` widget (or put the information you want to display into an `HTML` widget).
```
from IPython.display import display
button = widgets.Button(description="Click Me!")
output = widgets.Output()
display(button, output)
def on_button_clicked(b):
with output:
print("Button clicked.")
button.on_click(on_button_clicked)
```
## Traitlet events
Widget properties are IPython traitlets and traitlets are eventful. To handle changes, the `observe` method of the widget can be used to register a callback. The doc string for `observe` can be seen below.
```
print(widgets.Widget.observe.__doc__)
```
### Signatures
Mentioned in the doc string, the callback registered must have the signature `handler(change)` where `change` is a dictionary holding the information about the change.
Using this method, an example of how to output an `IntSlider`'s value as it is changed can be seen below.
```
int_range = widgets.IntSlider()
output2 = widgets.Output()
display(int_range, output2)
def on_value_change(change):
with output2:
print(change['new'])
int_range.observe(on_value_change, names='value')
```
## Linking Widgets
Often, you may want to simply link widget attributes together. Synchronization of attributes can be done in a simpler way than by using bare traitlets events.
### Linking traitlets attributes in the kernel
The first method is to use the `link` and `dlink` functions from the `traitlets` module (these two functions are re-exported by the `ipywidgets` module for convenience). This only works if we are interacting with a live kernel.
```
caption = widgets.Label(value='The values of slider1 and slider2 are synchronized')
sliders1, slider2 = widgets.IntSlider(description='Slider 1'),\
widgets.IntSlider(description='Slider 2')
l = widgets.link((sliders1, 'value'), (slider2, 'value'))
display(caption, sliders1, slider2)
caption = widgets.Label(value='Changes in source values are reflected in target1')
source, target1 = widgets.IntSlider(description='Source'),\
widgets.IntSlider(description='Target 1')
dl = widgets.dlink((source, 'value'), (target1, 'value'))
display(caption, source, target1)
```
Function `widgets.jslink` returns a `Link` widget. The link can be broken by calling the `unlink` method.
```
l.unlink()
dl.unlink()
```
### Registering callbacks to trait changes in the kernel
Since attributes of widgets on the Python side are traitlets, you can register handlers to the change events whenever the model gets updates from the front-end.
The handler passed to observe will be called with one change argument. The change object holds at least a `type` key and a `name` key, corresponding respectively to the type of notification and the name of the attribute that triggered the notification.
Other keys may be passed depending on the value of `type`. In the case where type is `change`, we also have the following keys:
- `owner` : the HasTraits instance
- `old` : the old value of the modified trait attribute
- `new` : the new value of the modified trait attribute
- `name` : the name of the modified trait attribute.
```
caption = widgets.Label(value='The values of range1 and range2 are synchronized')
slider = widgets.IntSlider(min=-5, max=5, value=1, description='Slider')
def handle_slider_change(change):
caption.value = 'The slider value is ' + (
'negative' if change.new < 0 else 'nonnegative'
)
slider.observe(handle_slider_change, names='value')
display(caption, slider)
```
### Linking widgets attributes from the client side
When synchronizing traitlets attributes, you may experience a lag because of the latency due to the roundtrip to the server side. You can also directly link widget attributes in the browser using the link widgets, in either a unidirectional or a bidirectional fashion.
Javascript links persist when embedding widgets in html web pages without a kernel.
```
caption = widgets.Label(value='The values of range1 and range2 are synchronized')
range1, range2 = widgets.IntSlider(description='Range 1'),\
widgets.IntSlider(description='Range 2')
l = widgets.jslink((range1, 'value'), (range2, 'value'))
display(caption, range1, range2)
caption = widgets.Label(value='Changes in source_range values are reflected in target_range1')
source_range, target_range1 = widgets.IntSlider(description='Source range'),\
widgets.IntSlider(description='Target range 1')
dl = widgets.jsdlink((source_range, 'value'), (target_range1, 'value'))
display(caption, source_range, target_range1)
```
Function `widgets.jslink` returns a `Link` widget. The link can be broken by calling the `unlink` method.
```
# l.unlink()
# dl.unlink()
```
### The difference between linking in the kernel and linking in the client
Linking in the kernel means linking via python. If two sliders are linked in the kernel, when one slider is changed the browser sends a message to the kernel (python in this case) updating the changed slider, the link widget in the kernel then propagates the change to the other slider object in the kernel, and then the other slider's kernel object sends a message to the browser to update the other slider's views in the browser. If the kernel is not running (as in a static web page), then the controls will not be linked.
Linking using jslink (i.e., on the browser side) means contructing the link in Javascript. When one slider is changed, Javascript running in the browser changes the value of the other slider in the browser, without needing to communicate with the kernel at all. If the sliders are attached to kernel objects, each slider will update their kernel-side objects independently.
To see the difference between the two, go to the [static version of this page in the ipywidgets documentation](http://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Events.html) and try out the sliders near the bottom. The ones linked in the kernel with `link` and `dlink` are no longer linked, but the ones linked in the browser with `jslink` and `jsdlink` are still linked.
## Continuous updates
Some widgets offer a choice with their `continuous_update` attribute between continually updating values or only updating values when a user submits the value (for example, by pressing Enter or navigating away from the control). In the next example, we see the "Delayed" controls only transmit their value after the user finishes dragging the slider or submitting the textbox. The "Continuous" controls continually transmit their values as they are changed. Try typing a two-digit number into each of the text boxes, or dragging each of the sliders, to see the difference.
```
a = widgets.IntSlider(description="Delayed", continuous_update=False)
b = widgets.IntText(description="Delayed", continuous_update=False)
c = widgets.IntSlider(description="Continuous", continuous_update=True)
d = widgets.IntText(description="Continuous", continuous_update=True)
widgets.link((a, 'value'), (b, 'value'))
widgets.link((a, 'value'), (c, 'value'))
widgets.link((a, 'value'), (d, 'value'))
widgets.VBox([a,b,c,d])
```
Sliders, `Text`, and `Textarea` controls default to `continuous_update=True`. `IntText` and other text boxes for entering integer or float numbers default to `continuous_update=False` (since often you'll want to type an entire number before submitting the value by pressing enter or navigating out of the box).
| github_jupyter |
# Hi, Are you in Google Colab?
In Google colab you can easily run Optimus. If you not you may want to go here
https://colab.research.google.com/github/ironmussa/Optimus/blob/master/examples/10_min_from_spark_to_pandas_with_optimus.ipynb
Install Optimus all the dependencies.
```
import sys
if 'google.colab' in sys.modules:
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
!wget -q https://archive.apache.org/dist/spark/spark-2.4.1/spark-2.4.1-bin-hadoop2.7.tgz
!tar xf spark-2.4.1-bin-hadoop2.7.tgz
!pip install optimuspyspark
```
## Restart Runtime
Before you continue, please go to the 'Runtime' Menu above, and select 'Restart Runtime (Ctrl + M + .)'.
```
if 'google.colab' in sys.modules:
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-2.4.1-bin-hadoop2.7"
```
## You are done. Enjoy Optimus!
# Hacking Optimus!
To hacking Optimus we recommend to clone the repo and change ```repo_path``` relative to this notebook.
```
repo_path=".."
# This will reload the change you make to Optimus in real time
%load_ext autoreload
%autoreload 2
import sys
sys.path.append(repo_path)
```
## Install Optimus
from command line:
`pip install optimuspyspark`
from a notebook you can use:
`!pip install optimuspyspark`
## Import Optimus and start it
```
from optimus import Optimus
op = Optimus(master="local")
```
## Dataframe creation
Create a dataframe to passing a list of values for columns and rows. Unlike pandas you need to specify the column names.
```
df = op.create.df(
[
"names",
"height(ft)",
"function",
"rank",
"weight(t)",
"japanese name",
"last position",
"attributes"
],
[
("Optim'us", 28.0, "Leader", 10, 4.3, ["Inochi", "Convoy"], "19.442735,-99.201111", [8.5344, 4300.0]),
("bumbl#ebéé ", 17.5, "Espionage", 7, 2.0, ["Bumble", "Goldback"], "10.642707,-71.612534", [5.334, 2000.0]),
("ironhide&", 26.0, "Security", 7, 4.0, ["Roadbuster"], "37.789563,-122.400356", [7.9248, 4000.0]),
("Jazz", 13.0, "First Lieutenant", 8, 1.8, ["Meister"], "33.670666,-117.841553", [3.9624, 1800.0]),
("Megatron", None, "None", None, 5.7, ["Megatron"], None, [None, 5700.0]),
("Metroplex_)^$", 300.0, "Battle Station", 8, None, ["Metroflex"], None, [91.44, None]),
]).h_repartition(1)
df.table()
```
Creating a dataframe by passing a list of tuples specifyng the column data type. You can specify as data type an string or a Spark Datatypes. https://spark.apache.org/docs/2.3.1/api/java/org/apache/spark/sql/types/package-summary.html
Also you can use some Optimus predefined types:
* "str" = StringType()
* "int" = IntegerType()
* "float" = FloatType()
* "bool" = BoleanType()
```
df = op.create.df(
[
("names", "str"),
("height", "float"),
("function", "str"),
("rank", "int"),
],
[
("bumbl#ebéé ", 17.5, "Espionage", 7),
("Optim'us", 28.0, "Leader", 10),
("ironhide&", 26.0, "Security", 7),
("Jazz", 13.0, "First Lieutenant", 8),
("Megatron", None, "None", None),
])
df.table()
```
Creating a dataframe and specify if the column accepts null values
```
df = op.create.df(
[
("names", "str", True),
("height", "float", True),
("function", "str", True),
("rank", "int", True),
],
[
("bumbl#ebéé ", 17.5, "Espionage", 7),
("Optim'us", 28.0, "Leader", 10),
("ironhide&", 26.0, "Security", 7),
("Jazz", 13.0, "First Lieutenant", 8),
("Megatron", None, "None", None),
])
df.table()
```
Creating a Daframe using a pandas dataframe
```
import pandas as pd
data = [("bumbl#ebéé ", 17.5, "Espionage", 7),
("Optim'us", 28.0, "Leader", 10),
("ironhide&", 26.0, "Security", 7)]
labels = ["names", "height", "function", "rank"]
# Create pandas dataframe
pdf = pd.DataFrame.from_records(data, columns=labels)
df = op.create.df(pdf=pdf)
df.table()
```
## Viewing data
Here is how to View the first 10 elements in a dataframe.
```
df.table(10)
```
## About Spark
Spark and Optimus work differently than pandas or R. If you are not familiar with Spark, we recommend taking the time to take a look at the links below.
### Partitions
Partition are the way Spark divide the data in your local computer or cluster to better optimize how it will be processed.It can greatly impact the Spark performance.
Take 5 minutes to read this article:
https://www.dezyre.com/article/how-data-partitioning-in-spark-helps-achieve-more-parallelism/297
### Lazy operations
Lazy evaluation in Spark means that the execution will not start until an action is triggered.
https://stackoverflow.com/questions/38027877/spark-transformation-why-its-lazy-and-what-is-the-advantage
### Inmutability
Immutability rules out a big set of potential problems due to updates from multiple threads at once. Immutable data is definitely safe to share across processes.
https://www.quora.com/Why-is-RDD-immutable-in-Spark
### Spark Architecture
https://jaceklaskowski.gitbooks.io/mastering-apache-spark/spark-architecture.html
## Columns and Rows
Optimus organized operations in columns and rows. This is a little different of how pandas works in which all operations are aroud the pandas class. We think this approach can better help you to access and transform data. For a deep dive about the designing decision please read:
https://towardsdatascience.com/announcing-optimus-v2-agile-data-science-workflows-made-easy-c127a12d9e13
Sort by cols names
```
df.cols.sort().table()
```
Sort by rows rank value
```
df.rows.sort("rank").table()
df.describe().table()
```
## Selection
Unlike Pandas, Spark DataFrames don't support random row access. So methods like `loc` in pandas are not available.
Also Pandas don't handle indexes. So methods like `iloc` are not available.
Select an show an specific column
```
df.cols.select("names").table()
```
Select rows from a Dataframe where a the condition is meet
```
df.rows.select(df["rank"] > 7).table()
```
Select rows by specific values on it
```
df.rows.is_in("rank", [7, 10]).table()
```
Create and unique id for every row.
```
df.rows.create_id().table()
```
Create wew columns
```
df.cols.append("Affiliation", "Autobot").table()
```
## Missing Data
```
df.rows.drop_na("*", how='any').table()
```
Filling missing data.
```
df.cols.fill_na("*", "N//A").table()
```
To get the boolean mask where values are nan.
```
df.cols.is_na("*").table()
```
# Operations
## Stats
```
df.cols.mean("height")
df.cols.mean("*")
```
### Apply
```
def func(value, args):
return value + 1
df.cols.apply("height", func, "float").table()
```
### Histogramming
```
df.cols.count_uniques("*")
```
### String Methods
```
df \
.cols.lower("names") \
.cols.upper("function").table()
```
## Merge
### Concat
Optimus provides and intuitive way to concat Dataframes by columns or rows.
```
df_new = op.create.df(
[
"class"
],
[
("Autobot"),
("Autobot"),
("Autobot"),
("Autobot"),
("Decepticons"),
]).h_repartition(1)
op.append([df, df_new], "columns").table()
df_new = op.create.df(
[
"names",
"height",
"function",
"rank",
],
[
("Grimlock", 22.9, "Dinobot Commander", 9),
]).h_repartition(1)
op.append([df, df_new], "rows").table()
# Operations like `join` and `group` are handle using Spark directly
df_melt = df.melt(id_vars=["names"], value_vars=["height", "function", "rank"])
df.table()
df_melt.pivot("names", "variable", "value").table()
```
## Ploting
```
df.plot.hist("height", 10)
df.plot.frequency("*", 10)
```
## Getting Data In/Out
```
df.cols.names()
df.to_json()
df.schema
df.table()
op.profiler.run(df, "height", infer=True)
df_csv = op.load.csv("https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.csv").limit(5)
df_csv.table()
df_json = op.load.json("https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.json").limit(5)
df_json.table()
df_csv.save.csv("test.csv")
df.table()
```
## Enrichment
```
df = op.load.json("https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.json")
df.table()
import requests
def func_request(params):
# You can use here whatever header or auth info you need to send.
# For more information see the requests library
url= "https://jsonplaceholder.typicode.com/todos/" + str(params["id"])
return requests.get(url)
def func_response(response):
# Here you can parse de response
return response["title"]
e = op.enrich(host="localhost", port=27017, db_name="jazz")
e.flush()
df_result = e.run(df, func_request, func_response, calls= 60, period = 60, max_tries = 8)
df_result.table()
```
| github_jupyter |
### Outlier Detection using autoencoders-First version
### Using the whole data
#### Edgar Acuna
#### Abril 2021
```
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
import keras
from keras.models import Model, load_model
from keras.layers import Input, Dense
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras import regularizers
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
url= "https://academic.uprm.edu/eacuna/diabetes.dat"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = pd.read_table(url, names=names)
yd=data['class']
Xd=data.iloc[:,0:8]
from sklearn.preprocessing import StandardScaler
cols_to_norm = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age']
scaler = MinMaxScaler()
data[cols_to_norm] = scaler.fit_transform(data[cols_to_norm])
data.shape
train_x = data.drop(['class'], axis=1) #drop the class column
train_x.info()
train_x = train_x.values #transform to ndarray
train_x
# No of Neurons in each Layer
nb_epoch = 20
batch_size = 50
input_dim = train_x.shape[1] #num of columns, 8
encoding_dim = 4
hidden_dim = int(encoding_dim / 2) #i.e. 7
learning_rate = 1e-7
input_layer = Input(shape=(input_dim, ))
encoder = Dense(encoding_dim, activation="tanh", activity_regularizer=regularizers.l1(learning_rate))(input_layer)
encoder = Dense(hidden_dim, activation="relu")(encoder)
decoder = Dense(hidden_dim, activation='tanh')(encoder)
decoder = Dense(input_dim, activation='relu')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
autoencoder.summary()
import datetime
autoencoder.compile(optimizer='adam', loss='mse' )
t_ini = datetime.datetime.now()
history = autoencoder.fit(train_x, train_x,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_split=0.1,
verbose=0
)
t_fin = datetime.datetime.now()
print('Time to run the model: {} Sec.'.format((t_fin -
t_ini).total_seconds()))
df_history = pd.DataFrame(history.history)
predictions = autoencoder.predict(train_x)
print(predictions)
train_x.shape
mse = np.mean(np.power(train_x- predictions, 2), axis=1)
df_error = pd.DataFrame({'reconstruction_error': mse, 'Label': yd}, index=yd.index)
df_error.describe()
dfOutliers = df_error.index[df_error.reconstruction_error > .15].tolist()
len(dfOutliers)
print(dfOutliers)
y=df_error['reconstruction_error'].tolist()
x = df_error.index.tolist()
thresh=0.15
plt.plot(x, y, 'ro')
plt.ylabel('reconstruction_error')
plt.xlabel('Index')
plt.title(' Threshold = ' +str(thresh))
plt.plot([0,2000],[thresh,thresh],"g--")
#cleaning the data from outliers
data3=data.drop(dfOutliers,axis=0)
```
### Outlier effect on the LDA Classifier
```
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import cross_val_score
ldadis = LinearDiscriminantAnalysis().fit(Xd,yd)
scores = cross_val_score(ldadis, Xd, yd, cv=10)
print("Accuracy using LDA: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
y=data3['class']
X=data3.iloc[:,0:8]
#Haciendo el analisis discriminante y calculando el porcentaje de precision
ldadis = LinearDiscriminantAnalysis().fit(X,y)
scores = cross_val_score(ldadis, X, y, cv=10)
scores
print("Accuracy using LDA after outlier removal: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
```
#### Outlier effect on the KNN classifier
```
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
neigh = KNeighborsClassifier(n_neighbors=5)
scores = cross_val_score(neigh, Xd, yd, cv=10)
scores
print("Accuracy using k=5 neighbors: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
y=data3['class']
X=data3.iloc[:,0:8]
y1=y.to_numpy()
X1=X.to_numpy()
neigh = KNeighborsClassifier(n_neighbors=5)
scores = cross_val_score(neigh, X1, y1, cv=10)
scores
print("Accuracy using k=5 neighbors after outlier removal: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
```
| github_jupyter |
# Intrusion detection on NSL-KDD
This is my try with [NSL-KDD](http://www.unb.ca/research/iscx/dataset/iscx-NSL-KDD-dataset.html) dataset, which is an improved version of well-known [KDD'99](http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html) dataset. I've used Python, Scikit-learn and PySpark via [ready-to-run Jupyter applications in Docker](https://github.com/jupyter/docker-stacks).
I've tried a variety of approaches to deal with this dataset. Here are presented some of them.
To be able to run this notebook, use `make nsl-kdd-pyspark` command. It'll download the latest jupyter/pyspark-notebook docker image and start a container with Jupyter available at `8889` port.
## Contents
1. [Task description summary](#1.-Task-description-summary)
2. [Data loading](#2.-Data-loading)
3. [Exploratory Data Analysis](#3.-Exploratory-Data-Analysis)
4. [One Hot Encoding for categorical variables](#4.-One-Hot-Encoding-for-categorical-variables)
5. [Feature Selection using Attribute Ratio](#5.-Feature-Selection-using-Attribute-Ratio)
6. [Data preparation](#6.-Data-preparation)
7. [Visualization via PCA](#7.-Visualization-via-PCA)
8. [KMeans clustering with Random Forest Classifiers](#8.-KMeans-clustering-with-Random-Forest-Classifiers)
9. [Gaussian Mixture clustering with Random Forest Classifiers](#9.-Gaussian-Mixture-clustering-with-Random-Forest-Classifiers)
10. [Supervised approach for dettecting each type of attacks separately](#10.-Supervised-approach-for-dettecting-each-type-of-attacks-separately)
11. [Ensembling experiments](#11.-Ensembling-experiments)
12. [Results summary](#12.-Results-summary)
## 1. Task description summary
Software to detect network intrusions protects a computer network from unauthorized users, including perhaps insiders. The intrusion detector learning task is to build a predictive model (i.e. a classifier) capable of distinguishing between bad connections, called intrusions or attacks, and good normal connections.
A connection is a sequence of TCP packets starting and ending at some well defined times, between which data flows to and from a source IP address to a target IP address under some well defined protocol. Each connection is labeled as either normal, or as an attack, with exactly one specific attack type. Each connection record consists of about 100 bytes.
Attacks fall into four main categories:
- DOS: denial-of-service, e.g. syn flood;
- R2L: unauthorized access from a remote machine, e.g. guessing password;
- U2R: unauthorized access to local superuser (root) privileges, e.g., various ''buffer overflow'' attacks;
- probing: surveillance and other probing, e.g., port scanning.
It is important to note that the test data is not from the same probability distribution as the training data, and it includes specific attack types not in the training data. This makes the task more realistic. Some intrusion experts believe that most novel attacks are variants of known attacks and the "signature" of known attacks can be sufficient to catch novel variants. The datasets contain a total of 24 training attack types, with an additional 14 types in the test data only.
The complete task description could be found [here](http://kdd.ics.uci.edu/databases/kddcup99/task.html).
### NSL-KDD dataset description
[NSL-KDD](http://www.unb.ca/research/iscx/dataset/iscx-NSL-KDD-dataset.html) is a data set suggested to solve some of the inherent problems of the [KDD'99](http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html) data set.
The NSL-KDD data set has the following advantages over the original KDD data set:
- It does not include redundant records in the train set, so the classifiers will not be biased towards more frequent records.
- There is no duplicate records in the proposed test sets; therefore, the performance of the learners are not biased by the methods which have better detection rates on the frequent records.
- The number of selected records from each difficultylevel group is inversely proportional to the percentage of records in the original KDD data set. As a result, the classification rates of distinct machine learning methods vary in a wider range, which makes it more efficient to have an accurate evaluation of different learning techniques.
- The number of records in the train and test sets are reasonable, which makes it affordable to run the experiments on the complete set without the need to randomly select a small portion. Consequently, evaluation results of different research works will be consistent and comparable.
## 2. Data loading
```
# Here are some imports that are used along this notebook
import os
import math
import itertools
import multiprocessing
import pandas
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from time import time
from collections import OrderedDict
%matplotlib inline
gt0 = time()
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext, Row
conf = SparkConf()\
.setMaster(f"local[{multiprocessing.cpu_count()}]")\
.setAppName("PySpark NSL-KDD")\
.setAll([("spark.driver.memory", "8g"), ("spark.default.parallelism", f"{multiprocessing.cpu_count()}")])
# Creating local SparkContext with specified SparkConf and creating SQLContext based on it
sc = SparkContext.getOrCreate(conf=conf)
sc.setLogLevel('INFO')
sqlContext = SQLContext(sc)
from pyspark.sql.types import *
from pyspark.sql.functions import udf, split, col
import pyspark.sql.functions as sql
train20_nsl_kdd_dataset_path = os.path.join("NSL_KDD_Dataset", "KDDTrain+_20Percent.txt")
train_nsl_kdd_dataset_path = os.path.join("NSL_KDD_Dataset", "KDDTrain+.txt")
test_nsl_kdd_dataset_path = os.path.join("NSL_KDD_Dataset", "KDDTest+.txt")
col_names = np.array(["duration","protocol_type","service","flag","src_bytes",
"dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins",
"logged_in","num_compromised","root_shell","su_attempted","num_root",
"num_file_creations","num_shells","num_access_files","num_outbound_cmds",
"is_host_login","is_guest_login","count","srv_count","serror_rate",
"srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate",
"diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count",
"dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate",
"dst_host_rerror_rate","dst_host_srv_rerror_rate","labels"])
nominal_inx = [1, 2, 3]
binary_inx = [6, 11, 13, 14, 20, 21]
numeric_inx = list(set(range(41)).difference(nominal_inx).difference(binary_inx))
nominal_cols = col_names[nominal_inx].tolist()
binary_cols = col_names[binary_inx].tolist()
numeric_cols = col_names[numeric_inx].tolist()
# Function to load dataset and divide it into 8 partitions
def load_dataset(path):
dataset_rdd = sc.textFile(path, 8).map(lambda line: line.split(','))
dataset_df = (dataset_rdd.toDF(col_names.tolist()).select(
col('duration').cast(DoubleType()),
col('protocol_type').cast(StringType()),
col('service').cast(StringType()),
col('flag').cast(StringType()),
col('src_bytes').cast(DoubleType()),
col('dst_bytes').cast(DoubleType()),
col('land').cast(DoubleType()),
col('wrong_fragment').cast(DoubleType()),
col('urgent').cast(DoubleType()),
col('hot').cast(DoubleType()),
col('num_failed_logins').cast(DoubleType()),
col('logged_in').cast(DoubleType()),
col('num_compromised').cast(DoubleType()),
col('root_shell').cast(DoubleType()),
col('su_attempted').cast(DoubleType()),
col('num_root').cast(DoubleType()),
col('num_file_creations').cast(DoubleType()),
col('num_shells').cast(DoubleType()),
col('num_access_files').cast(DoubleType()),
col('num_outbound_cmds').cast(DoubleType()),
col('is_host_login').cast(DoubleType()),
col('is_guest_login').cast(DoubleType()),
col('count').cast(DoubleType()),
col('srv_count').cast(DoubleType()),
col('serror_rate').cast(DoubleType()),
col('srv_serror_rate').cast(DoubleType()),
col('rerror_rate').cast(DoubleType()),
col('srv_rerror_rate').cast(DoubleType()),
col('same_srv_rate').cast(DoubleType()),
col('diff_srv_rate').cast(DoubleType()),
col('srv_diff_host_rate').cast(DoubleType()),
col('dst_host_count').cast(DoubleType()),
col('dst_host_srv_count').cast(DoubleType()),
col('dst_host_same_srv_rate').cast(DoubleType()),
col('dst_host_diff_srv_rate').cast(DoubleType()),
col('dst_host_same_src_port_rate').cast(DoubleType()),
col('dst_host_srv_diff_host_rate').cast(DoubleType()),
col('dst_host_serror_rate').cast(DoubleType()),
col('dst_host_srv_serror_rate').cast(DoubleType()),
col('dst_host_rerror_rate').cast(DoubleType()),
col('dst_host_srv_rerror_rate').cast(DoubleType()),
col('labels').cast(StringType())))
return dataset_df
```
The first part of data preparation is deviding connections into normal and attack classes based on 'labels' column. Then attacks are splitted to four main categories: DoS, Probe, R2L and U2R. After this, all of those categories are indexed. Also, ID column is added to simplify work with clustered data.
```
from pyspark.ml import Pipeline, Transformer
from pyspark.ml.feature import StringIndexer
from pyspark import keyword_only
from pyspark.ml.param.shared import HasInputCol, HasOutputCol, Param
# Dictionary that contains mapping of various attacks to the four main categories
attack_dict = {
'normal': 'normal',
'back': 'DoS',
'land': 'DoS',
'neptune': 'DoS',
'pod': 'DoS',
'smurf': 'DoS',
'teardrop': 'DoS',
'mailbomb': 'DoS',
'apache2': 'DoS',
'processtable': 'DoS',
'udpstorm': 'DoS',
'ipsweep': 'Probe',
'nmap': 'Probe',
'portsweep': 'Probe',
'satan': 'Probe',
'mscan': 'Probe',
'saint': 'Probe',
'ftp_write': 'R2L',
'guess_passwd': 'R2L',
'imap': 'R2L',
'multihop': 'R2L',
'phf': 'R2L',
'spy': 'R2L',
'warezclient': 'R2L',
'warezmaster': 'R2L',
'sendmail': 'R2L',
'named': 'R2L',
'snmpgetattack': 'R2L',
'snmpguess': 'R2L',
'xlock': 'R2L',
'xsnoop': 'R2L',
'worm': 'R2L',
'buffer_overflow': 'U2R',
'loadmodule': 'U2R',
'perl': 'U2R',
'rootkit': 'U2R',
'httptunnel': 'U2R',
'ps': 'U2R',
'sqlattack': 'U2R',
'xterm': 'U2R'
}
attack_mapping_udf = udf(lambda v: attack_dict[v])
class Labels2Converter(Transformer):
@keyword_only
def __init__(self):
super(Labels2Converter, self).__init__()
def _transform(self, dataset):
return dataset.withColumn('labels2', sql.regexp_replace(col('labels'), '^(?!normal).*$', 'attack'))
class Labels5Converter(Transformer):
@keyword_only
def __init__(self):
super(Labels5Converter, self).__init__()
def _transform(self, dataset):
return dataset.withColumn('labels5', attack_mapping_udf(col('labels')))
labels2_indexer = StringIndexer(inputCol="labels2", outputCol="labels2_index")
labels5_indexer = StringIndexer(inputCol="labels5", outputCol="labels5_index")
labels_mapping_pipeline = Pipeline(stages=[Labels2Converter(), Labels5Converter(), labels2_indexer, labels5_indexer])
labels2 = ['normal', 'attack']
labels5 = ['normal', 'DoS', 'Probe', 'R2L', 'U2R']
labels_col = 'labels2_index'
# Loading train data
t0 = time()
train_df = load_dataset(train_nsl_kdd_dataset_path)
# Fitting preparation pipeline
labels_mapping_model = labels_mapping_pipeline.fit(train_df)
# Transforming labels column and adding id column
train_df = labels_mapping_model.transform(train_df).withColumn('id', sql.monotonically_increasing_id())
train_df = train_df.cache()
print(f"Number of examples in train set: {train_df.count()}")
print(f"Time: {time() - t0:.2f}s")
# Loading test data
t0 = time()
test_df = load_dataset(test_nsl_kdd_dataset_path)
# Transforming labels column and adding id column
test_df = labels_mapping_model.transform(test_df).withColumn('id', sql.monotonically_increasing_id())
test_df = test_df.cache()
print(f"Number of examples in test set: {test_df.count()}")
print(f"Time: {time() - t0:.2f}s")
```
## 2. Exploratory Data Analysis
Here are some descriptive statistics of available features.
```
# Labels columns
(train_df.groupby('labels2').count().show())
(train_df.groupby('labels5').count().sort(sql.desc('count')).show())
(test_df.groupby('labels2').count().show())
(test_df.groupby('labels5').count().sort(sql.desc('count')).show())
# 'protocol_type' nominal column
(train_df.crosstab(nominal_cols[0], 'labels2').sort(sql.asc(nominal_cols[0] + '_labels2')).show())
(train_df.crosstab(nominal_cols[0], 'labels5').sort(sql.asc(nominal_cols[0] + '_labels5')).show())
# 'service' nominal column
print(train_df.select(nominal_cols[1]).distinct().count())
(train_df.crosstab(nominal_cols[1], 'labels2').sort(sql.asc(nominal_cols[1] + '_labels2')).show(n=70))
(train_df.crosstab(nominal_cols[1], 'labels5').sort(sql.asc(nominal_cols[1] + '_labels5')).show(n=70))
# 'flag' nominal column
print(train_df.select(nominal_cols[2]).distinct().count())
(train_df.crosstab(nominal_cols[2], 'labels2').sort(sql.asc(nominal_cols[2] + '_labels2')).show())
(train_df.crosstab(nominal_cols[2], 'labels5').sort(sql.asc(nominal_cols[2] + '_labels5')).show())
# Binary columns
(train_df.select(binary_cols).describe().toPandas().transpose())
# 'su_attempted' should be a binary feature, but has 3 values
(train_df.crosstab('su_attempted', 'labels2').show())
# '2.0' value is replaced to '0.0' for both train and test datasets
train_df = train_df.replace(2.0, 0.0, 'su_attempted')
test_df = test_df.replace(2.0, 0.0, 'su_attempted')
# Numeric columns
print(len(numeric_cols))
(train_df.select(numeric_cols).describe().toPandas().transpose())
(train_df.crosstab('num_outbound_cmds', 'labels2').show())
```
As 'num_outbound_cmds' feature takes only 0.0 values, so it is dropped as redundant.
```
train_df = train_df.drop('num_outbound_cmds')
test_df = test_df.drop('num_outbound_cmds')
numeric_cols.remove('num_outbound_cmds')
```
Commented code below is related to removing highly correlated features. However, it hasen't been tested a lot yet.
```
# from pyspark.mllib.stat import Statistics
# from pyspark.mllib.linalg import Vectors
# from pyspark.ml.feature import VectorAssembler
# t0 = time()
# stat_assembler = VectorAssembler(
# inputCols=numeric_cols,
# outputCol='features')
# stat_rdd = stat_assembler.transform(train_df).rdd.map(lambda row: row['features'].toArray())
# pearson_corr = Statistics.corr(stat_rdd, method='pearson')
# spearman_corr = Statistics.corr(stat_rdd, method='spearman')
# print(time() - t0)
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 6))
# ax1.set_title("Pearson")
# ax2.set_title("Spearman")
# sns.heatmap(pearson_corr, ax=ax1)
# sns.heatmap(spearman_corr, ax=ax2)
# inx_correlated_to_delete = [8, 15, 28, 17, 29]
# for inx in inx_correlated_to_delete:
# train_df = train_df.drop(numeric_cols[inx])
# test_df = test_df.drop(numeric_cols[inx])
# numeric_cols = [col for inx, col in enumerate(numeric_cols) if inx not in inx_correlated_to_delete]
# train_df = train_df.cache()
# test_df = test_df.cache()
# print(train_df.count())
# print(test_df.count())
# print(len(numeric_cols))
```
## 4. One Hot Encoding for categorical variables
One Hot Encoding (OHE) is used for treating categorical variables. Custom function is created for demonstration purposes. However, it could be easily replaced by PySpark OneHotEncoder.
```
def ohe_vec(cat_dict, row):
vec = np.zeros(len(cat_dict))
vec[cat_dict[row]] = float(1.0)
return vec.tolist()
def ohe(df, nominal_col):
categories = (df.select(nominal_col)
.distinct()
.rdd.map(lambda row: row[0])
.collect())
cat_dict = dict(zip(categories, range(len(categories))))
udf_ohe_vec = udf(lambda row: ohe_vec(cat_dict, row),
StructType([StructField(cat, DoubleType(), False) for cat in categories]))
df = df.withColumn(nominal_col + '_ohe', udf_ohe_vec(col(nominal_col))).cache()
nested_cols = [nominal_col + '_ohe.' + cat for cat in categories]
ohe_cols = [nominal_col + '_' + cat for cat in categories]
for new, old in zip(ohe_cols, nested_cols):
df = df.withColumn(new, col(old))
df = df.drop(nominal_col + '_ohe')
return df, ohe_cols
t0 = time()
train_ohe_cols = []
train_df, train_ohe_col0 = ohe(train_df, nominal_cols[0])
train_ohe_cols += train_ohe_col0
train_df, train_ohe_col1 = ohe(train_df, nominal_cols[1])
train_ohe_cols += train_ohe_col1
train_df, train_ohe_col2 = ohe(train_df, nominal_cols[2])
train_ohe_cols += train_ohe_col2
binary_cols += train_ohe_cols
train_df = train_df.cache()
print(f"Number of examples in train set: {train_df.count()}")
print(f"Time: {time() - t0:.2f}s")
```
Custom list of test binary cols is used as test dataset could contain additional categories for 'service' and 'flag' features. However, those additional categories aren't used below.
```
t0 = time()
test_ohe_cols = []
test_df, test_ohe_col0_names = ohe(test_df, nominal_cols[0])
test_ohe_cols += test_ohe_col0_names
test_df, test_ohe_col1_names = ohe(test_df, nominal_cols[1])
test_ohe_cols += test_ohe_col1_names
test_df, test_ohe_col2_names = ohe(test_df, nominal_cols[2])
test_ohe_cols += test_ohe_col2_names
test_binary_cols = col_names[binary_inx].tolist() + test_ohe_cols
test_df = test_df.cache()
print(f"Number of examples in test set: {test_df.count()}")
print(f"Time: {time() - t0:.2f}s")
```
## 5. Feature Selection using Attribute Ratio
Attribute Ratio approach is used for feature selection purposes. This approach was described by Hee-su Chae and Sang Hyun Choi in [Feature Selection for efficient Intrusion Detection using Attribute Ratio](http://www.naun.org/main/UPress/cc/2014/a102019-106.pdf) and [Feature Selection for Intrusion Detection using NSL-KDD](http://www.wseas.us/e-library/conferences/2013/Nanjing/ACCIS/ACCIS-30.pdf)
This approach is also used for nominal variables as they were encoded as binary variables above.
As it is a possible to have 'null' values because binary features could have Frequency(0) = 0, those 'null' values are replaced with 1000.0 (magic number). For NSL KDD dataset it is related only for 'protocol_type_tcp' ohe variable.
```
def getAttributeRatio(df, numericCols, binaryCols, labelCol):
ratio_dict = {}
if numericCols:
avg_dict = (df
.select(list(map(lambda c: sql.avg(c).alias(c), numericCols)))
.first()
.asDict())
ratio_dict.update(df
.groupBy(labelCol)
.avg(*numericCols)
.select(list(map(lambda c: sql.max(col('avg(' + c + ')')/avg_dict[c]).alias(c), numericCols)))
.fillna(0.0)
.first()
.asDict())
if binaryCols:
ratio_dict.update((df
.groupBy(labelCol)
.agg(*list(map(lambda c: (sql.sum(col(c))/(sql.count(col(c)) - sql.sum(col(c)))).alias(c), binaryCols)))
.fillna(1000.0)
.select(*list(map(lambda c: sql.max(col(c)).alias(c), binaryCols)))
.first()
.asDict()))
return OrderedDict(sorted(ratio_dict.items(), key=lambda v: -v[1]))
def selectFeaturesByAR(ar_dict, min_ar):
return [f for f in ar_dict.keys() if ar_dict[f] >= min_ar]
t0 = time()
ar_dict = getAttributeRatio(train_df, numeric_cols, binary_cols, 'labels5')
print(f"Number of features in Attribute Ration dict: {len(ar_dict)}")
print(f"Time: {time() - t0:.2f}s")
ar_dict
```
## 6. Data preparation
Standartization is necessary as a lot of distance based algorithms are used below. Custom standartization is created for demonstration purposes, so it could be easily replaced by PySpark StandardScaler. Note that data is sparse, so it is reasonable to not substract mean for avoiding violating sparsity.
```
t0 = time()
avg_dict = (train_df.select(list(map(lambda c: sql.avg(c).alias(c), numeric_cols))).first().asDict())
std_dict = (train_df.select(list(map(lambda c: sql.stddev(c).alias(c), numeric_cols))).first().asDict())
def standardizer(column):
return ((col(column) - avg_dict[column])/std_dict[column]).alias(column)
# Standardizer without mean
# def standardizer(column):
# return (col(column)/std_dict[column]).alias(column)
train_scaler = [*binary_cols, *list(map(standardizer, numeric_cols)), *['id', 'labels2_index', 'labels2', 'labels5_index', 'labels5']]
test_scaler = [*test_binary_cols, *list(map(standardizer, numeric_cols)), *['id', 'labels2_index', 'labels2', 'labels5_index', 'labels5']]
scaled_train_df = (train_df.select(train_scaler).cache())
scaled_test_df = (test_df.select(test_scaler).cache())
print(scaled_train_df.count())
print(scaled_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
VectorAssembler is used for combining a given list of columns into a single vector column. Then VectorIndexer is used for indexing categorical (binary) features. Indexing categorical features allows algorithms to treat them appropriately, improving performance.
```
from pyspark.ml.feature import VectorIndexer, VectorAssembler
assembler = VectorAssembler(inputCols=selectFeaturesByAR(ar_dict, 0.01), outputCol='raw_features')
indexer = VectorIndexer(inputCol='raw_features', outputCol='indexed_features', maxCategories=2)
prep_pipeline = Pipeline(stages=[assembler, indexer])
prep_model = prep_pipeline.fit(scaled_train_df)
t0 = time()
scaled_train_df = (prep_model
.transform(scaled_train_df)
.select('id', 'indexed_features', 'labels2_index', 'labels2', 'labels5_index', 'labels5')
.cache())
scaled_test_df = (prep_model
.transform(scaled_test_df)
.select('id', 'indexed_features','labels2_index', 'labels2', 'labels5_index', 'labels5')
.cache())
print(scaled_train_df.count())
print(scaled_test_df.count())
print(f"Time: {time() - t0:.2f}s")
# Setting seed for reproducibility
seed = 4667979835606274383
print(seed)
```
The train dataset is splitted into 80% train and 20% cross-validation sets.
```
split = (scaled_train_df.randomSplit([0.8, 0.2], seed=seed))
scaled_train_df = split[0].cache()
scaled_cv_df = split[1].cache()
print(scaled_train_df.count())
print(scaled_cv_df.count())
```
Additional "result" dataframes are used to collect probabilities and predictions from different approaches.
```
res_cv_df = scaled_cv_df.select(col('id'), col('labels2_index'), col('labels2'), col('labels5')).cache()
res_test_df = scaled_test_df.select(col('id'), col('labels2_index'), col('labels2'), col('labels5')).cache()
prob_cols = []
pred_cols = []
print(res_cv_df.count())
print(res_test_df.count())
```
Different metrics from sklearn are used for evaluating results. The most important from them for this task are False positive Rate, Detection Rate and F1 score.
As evaluating via sklearn requires to collect predicted and label columns to the driver, it will be replaced with PySpark metrics later.
```
import sklearn.metrics as metrics
def printCM(cm, labels):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels])
# Print header
print(" " * columnwidth, end="\t")
for label in labels:
print("%{0}s".format(columnwidth) % label, end="\t")
print()
# Print rows
for i, label1 in enumerate(labels):
print("%{0}s".format(columnwidth) % label1, end="\t")
for j in range(len(labels)):
print("%{0}d".format(columnwidth) % cm[i, j], end="\t")
print()
def getPrediction(e):
return udf(lambda row: 1.0 if row >= e else 0.0, DoubleType())
def printReport(resDF, probCol, labelCol='labels2_index', e=None, labels=['normal', 'attack']):
if (e):
predictionAndLabels = list(zip(*resDF.rdd
.map(lambda row: (1.0 if row[probCol] >= e else 0.0, row[labelCol]))
.collect()))
else:
predictionAndLabels = list(zip(*resDF.rdd
.map(lambda row: (row[probCol], row[labelCol]))
.collect()))
cm = metrics.confusion_matrix(predictionAndLabels[1], predictionAndLabels[0])
printCM(cm, labels)
print(" ")
print("Accuracy = %g" % (metrics.accuracy_score(predictionAndLabels[1], predictionAndLabels[0])))
print("AUC = %g" % (metrics.roc_auc_score(predictionAndLabels[1], predictionAndLabels[0])))
print(" ")
print("False Alarm Rate = %g" % (cm[0][1]/(cm[0][0] + cm[0][1])))
print("Detection Rate = %g" % (cm[1][1]/(cm[1][1] + cm[1][0])))
print("F1 score = %g" % (metrics.f1_score(predictionAndLabels[1], predictionAndLabels[0], labels)))
print(" ")
print(metrics.classification_report(predictionAndLabels[1], predictionAndLabels[0]))
print(" ")
```
## 7. Visualization via PCA
PCA algorithm is used for visualization purposes. It's also used later as preprocessing for Gaussian Mixture clustering.
First graph shows 'attack' vs 'normal' labels, second graph shows 4 different types of attacks vs normal connections.
```
from pyspark.ml.feature import VectorSlicer
from pyspark.ml.feature import PCA
t0 = time()
pca_slicer = VectorSlicer(inputCol="indexed_features", outputCol="features", names=selectFeaturesByAR(ar_dict, 0.05))
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pca_pipeline = Pipeline(stages=[pca_slicer, pca])
pca_train_df = pca_pipeline.fit(scaled_train_df).transform(scaled_train_df)
print(f"Time: {time() - t0:.2f}s")
t0 = time()
viz_train_data = np.array(pca_train_df.rdd.map(lambda row: [*row['pca_features'], row['labels2_index'], row['labels5_index']]).collect())
plt.figure()
plt.scatter(x=viz_train_data[:,0], y=viz_train_data[:,1], c=viz_train_data[:,2], cmap="Set1")
plt.figure()
plt.scatter(x=viz_train_data[:,0], y=viz_train_data[:,1], c=viz_train_data[:,3], cmap="Set1")
plt.show()
print(time() - t0)
```
## 8. KMeans clustering with Random Forest Classifiers
The idea of the first approach is to clusterize data into clusters and then train different Random Forest classifiers for each of the clusters. As Random Forest returns probabilities, it is possible to improve detection rate for a new types of attacks by adjusting threshold.
As KMeans cannot truly handle binary/categorical features only numeric features are used for clustarization.
```
kmeans_prob_col = 'kmeans_rf_prob'
kmeans_pred_col = 'kmeans_rf_pred'
prob_cols.append(kmeans_prob_col)
pred_cols.append(kmeans_pred_col)
# KMeans clustrering
from pyspark.ml.clustering import KMeans
t0 = time()
kmeans_slicer = VectorSlicer(inputCol="indexed_features", outputCol="features",
names=list(set(selectFeaturesByAR(ar_dict, 0.1)).intersection(numeric_cols)))
kmeans = KMeans(k=8, initSteps=25, maxIter=100, featuresCol="features", predictionCol="cluster", seed=seed)
kmeans_pipeline = Pipeline(stages=[kmeans_slicer, kmeans])
kmeans_model = kmeans_pipeline.fit(scaled_train_df)
kmeans_train_df = kmeans_model.transform(scaled_train_df).cache()
kmeans_cv_df = kmeans_model.transform(scaled_cv_df).cache()
kmeans_test_df = kmeans_model.transform(scaled_test_df).cache()
print(f"Time: {time() - t0:.2f}s")
# Function for describing the contents of the clusters
def getClusterCrosstab(df, clusterCol='cluster'):
return (df.crosstab(clusterCol, 'labels2')
.withColumn('count', col('attack') + col('normal'))
.withColumn(clusterCol + '_labels2', col(clusterCol + '_labels2').cast('int'))
.sort(col(clusterCol +'_labels2').asc()))
kmeans_crosstab = getClusterCrosstab(kmeans_train_df).cache()
kmeans_crosstab.show(n=30)
```
Clustres are splitted into two categories. Frist category contains clusters that have both 'attack' and 'normal' connections and have more than 25 connections. For the first category Random Forest classifiers are aplied. Second category contains all other clusters and maps cluster to 'attack' or 'normal' based on majority. All clusters that contains less or equal than 25 connections are treated as outliers and are mapped to 'attack' type.
```
# Function for splitting clusters
def splitClusters(crosstab):
exp = ((col('count') > 25) & (col('attack') > 0) & (col('normal') > 0))
cluster_rf = (crosstab
.filter(exp).rdd
.map(lambda row: (int(row['cluster_labels2']), [row['count'], row['attack']/row['count']]))
.collectAsMap())
cluster_mapping = (crosstab
.filter(~exp).rdd
.map(lambda row: (int(row['cluster_labels2']), 1.0 if (row['count'] <= 25) | (row['normal'] == 0) else 0.0))
.collectAsMap())
return cluster_rf, cluster_mapping
kmeans_cluster_rf, kmeans_cluster_mapping = splitClusters(kmeans_crosstab)
print(len(kmeans_cluster_rf), len(kmeans_cluster_mapping))
print(kmeans_cluster_mapping)
kmeans_cluster_rf
from pyspark.ml.classification import RandomForestClassifier
# This function returns Random Forest models for provided clusters
def getClusterModels(df, cluster_rf):
cluster_models = {}
labels_col = 'labels2_cl_index'
labels2_indexer.setOutputCol(labels_col)
rf_slicer = VectorSlicer(inputCol="indexed_features", outputCol="rf_features",
names=selectFeaturesByAR(ar_dict, 0.05))
for cluster in cluster_rf.keys():
t1 = time()
rf_classifier = RandomForestClassifier(labelCol=labels_col, featuresCol='rf_features', seed=seed,
numTrees=500, maxDepth=20, featureSubsetStrategy="sqrt")
rf_pipeline = Pipeline(stages=[labels2_indexer, rf_slicer, rf_classifier])
cluster_models[cluster] = rf_pipeline.fit(df.filter(col('cluster') == cluster))
print("Finished %g cluster in %g s" % (cluster, time() - t1))
return cluster_models
# This utility function helps to get predictions/probabilities for the new data and return them into one dataframe
def getProbabilities(df, probCol, cluster_mapping, cluster_models):
pred_df = (sqlContext.createDataFrame([], StructType([
StructField('id', LongType(), False),
StructField(probCol, DoubleType(), False)])))
udf_map = udf(lambda cluster: cluster_mapping[cluster], DoubleType())
pred_df = pred_df.union(df.filter(col('cluster').isin(list(cluster_mapping.keys())))
.withColumn(probCol, udf_map(col('cluster')))
.select('id', probCol))
for k in cluster_models.keys():
maj_label = cluster_models[k].stages[0].labels[0]
udf_remap_prob = udf(lambda row: float(row[0]) if (maj_label == 'attack') else float(row[1]), DoubleType())
pred_df = pred_df.union(cluster_models[k]
.transform(df.filter(col('cluster') == k))
.withColumn(probCol, udf_remap_prob(col('probability')))
.select('id', probCol))
return pred_df
# Training Random Forest classifiers for each of the clusters
t0 = time()
kmeans_cluster_models = getClusterModels(kmeans_train_df, kmeans_cluster_rf)
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for CV data
t0 = time()
res_cv_df = (res_cv_df.drop(kmeans_prob_col)
.join(getProbabilities(kmeans_cv_df, kmeans_prob_col, kmeans_cluster_mapping, kmeans_cluster_models), 'id')
.cache())
print(res_cv_df.count())
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for Test data
t0 = time()
res_test_df = (res_test_df.drop(kmeans_prob_col)
.join(getProbabilities(kmeans_test_df, kmeans_prob_col, kmeans_cluster_mapping, kmeans_cluster_models), 'id')
.cache())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
As CV data is from the same distribution as the train data it isn't needed to adjust threshold.
```
printReport(res_cv_df, kmeans_prob_col, e=0.5, labels=labels2)
```
Because test data is from the different distribution and it is expected to face unseen attack types, it makes sence to adjust a probability threshold to something like 0.01 for attack connections (0.99 for normal connections). For this approach it gives around ~98-99% Detection Rate with around ~14-15% of False Alarm Rate.
```
printReport(res_test_df, kmeans_prob_col, e=0.01, labels=labels2)
# Adding prediction columns based on chosen thresholds into result dataframes
t0 = time()
res_cv_df = res_cv_df.withColumn(kmeans_pred_col, getPrediction(0.5)(col(kmeans_prob_col))).cache()
res_test_df = res_test_df.withColumn(kmeans_pred_col, getPrediction(0.01)(col(kmeans_prob_col))).cache()
print(res_cv_df.count())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
## 9. Gaussian Mixture clustering with Random Forest Classifiers
The idea of this approach is to clusterize data into clusters via Gaussian Mixture and then train different Random Forest classifiers for each of the clusters. Gaussian Mixture produces a diffirent clustering than KMeans, so results from both approaches could be combine for improving performance. As Gaussian Mixture clustering doesn't work well on high-demensional data PCA algorithm is used for preprocessing.
```
gm_prob_col = 'gm_rf_prob'
gm_pred_col = 'gm_rf_pred'
prob_cols.append(gm_prob_col)
pred_cols.append(gm_pred_col)
# Gaussian Mixture clustering
from pyspark.ml.clustering import GaussianMixture
t0 = time()
gm = GaussianMixture(k=8, maxIter=150, seed=seed, featuresCol="pca_features",
predictionCol="cluster", probabilityCol="gm_prob")
gm_pipeline = Pipeline(stages=[pca_slicer, pca, gm])
gm_model = gm_pipeline.fit(scaled_train_df)
gm_train_df = gm_model.transform(scaled_train_df).cache()
gm_cv_df = gm_model.transform(scaled_cv_df).cache()
gm_test_df = gm_model.transform(scaled_test_df).cache()
gm_params = (gm_model.stages[2].gaussiansDF.rdd
.map(lambda row: [row['mean'].toArray(), row['cov'].toArray()])
.collect())
gm_weights = gm_model.stages[2].weights
print(gm_train_df.count())
print(gm_cv_df.count())
print(gm_test_df.count())
print(f"Time: {time() - t0:.2f}s")
# Description of the contents of the clusters
gm_crosstab = getClusterCrosstab(gm_train_df).cache()
gm_crosstab.show(n=30)
# Splitting clusters
gm_cluster_rf, gm_cluster_mapping = splitClusters(gm_crosstab)
print(len(gm_cluster_rf), len(gm_cluster_mapping))
print(gm_cluster_mapping)
gm_cluster_rf
# Training Random Forest classifiers for each of the clusters
t0 = time()
gm_cluster_models = getClusterModels(gm_train_df, gm_cluster_rf)
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for CV data
t0 = time()
res_cv_df = (res_cv_df.drop(gm_prob_col)
.join(getProbabilities(gm_cv_df, gm_prob_col, gm_cluster_mapping, gm_cluster_models), 'id')
.cache())
print(res_cv_df.count())
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for Test data
t0 = time()
res_test_df = (res_test_df.drop(gm_prob_col)
.join(getProbabilities(gm_test_df, gm_prob_col, gm_cluster_mapping, gm_cluster_models), 'id')
.cache())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
printReport(res_cv_df, gm_prob_col, e=0.5, labels=labels2)
printReport(res_test_df, gm_prob_col, e=0.01, labels=labels2)
# Adding prediction columns based on chosen thresholds into result dataframes
t0 = time()
res_cv_df = res_cv_df.withColumn(gm_pred_col, getPrediction(0.5)(col(gm_prob_col))).cache()
res_test_df = res_test_df.withColumn(gm_pred_col, getPrediction(0.01)(col(gm_prob_col))).cache()
print(res_cv_df.count())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
## 10. Supervised approach for dettecting each type of attacks separately
The idea of the following approach is training Random Forest Classifiers for each of four major 'attack' categories separately.
### 10.1 DoS and normal
```
dos_prob_col = 'dos_prob'
dos_pred_col = 'dos_pred'
prob_cols.append(dos_prob_col)
pred_cols.append(dos_pred_col)
dos_exp = (col('labels5') == 'DoS') | (col('labels5') == 'normal')
dos_train_df = (scaled_train_df.filter(dos_exp).cache())
print(dos_train_df.count())
(dos_train_df
.groupby('labels5')
.count()
.sort(sql.desc('count'))
.show())
```
Commented code below is related to undersampling 'normal' connections. It could give better results. However, it hasen't been tested a lot yet.
```
# dos_train_df = dos_train_df.sampleBy('labels5', fractions={'normal': 45927./67343, 'DoS': 1.0}).cache()
# print(dos_train_df.count())
# (dos_train_df
# .groupby('labels5')
# .count()
# .sort(sql.desc('count'))
# .show())
```
Diffirent AR feature selection is used as only normal and DoS connections are treated. Note that train dataframe without standartization is used for getting Attribute Ratio dictionary.
```
t0 = time()
dos_ar_dict = getAttributeRatio(train_df.filter(dos_exp), numeric_cols, binary_cols, 'labels5')
print(f"Time: {time() - t0:.2f}s")
dos_ar_dict
t0 = time()
dos_slicer = VectorSlicer(inputCol="indexed_features", outputCol="features",
names=selectFeaturesByAR(dos_ar_dict, 0.05))
dos_rf = RandomForestClassifier(labelCol=labels_col, featuresCol='features', featureSubsetStrategy='sqrt',
numTrees=500, maxDepth=20, seed=seed)
dos_rf_pipeline = Pipeline(stages=[dos_slicer, dos_rf])
dos_rf_model = dos_rf_pipeline.fit(dos_train_df)
dos_cv_df = dos_rf_model.transform(scaled_cv_df).cache()
dos_test_df = dos_rf_model.transform(scaled_test_df).cache()
print(dos_cv_df.count())
print(dos_test_df.count())
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for CV data
t0 = time()
res_cv_df = (res_cv_df.drop(dos_prob_col)
.join(dos_cv_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', dos_prob_col]),
'id')
.cache())
print(res_cv_df.count())
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for Test data
t0 = time()
res_test_df = (res_test_df.drop(dos_prob_col)
.join(dos_test_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', dos_prob_col]),
'id')
.cache())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
The first report shows performance of classification for 'normal' and 'DoS' labels, the second report shows performance for the whole data with adjusted threshold.
```
printReport(res_cv_df.filter(dos_exp), probCol=dos_prob_col, e=0.5, labels=['normal', 'DoS'])
printReport(res_cv_df, probCol=dos_prob_col, e=0.05)
printReport(res_test_df.filter(dos_exp), probCol=dos_prob_col, e=0.5, labels=['normal', 'DoS'])
printReport(res_test_df, probCol=dos_prob_col, e=0.01)
# Adding prediction columns based on chosen thresholds into result dataframes
t0 = time()
res_cv_df = res_cv_df.withColumn(dos_pred_col, getPrediction(0.05)(col(dos_prob_col))).cache()
res_test_df = res_test_df.withColumn(dos_pred_col, getPrediction(0.01)(col(dos_prob_col))).cache()
print(res_cv_df.count())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
### 10.2 Probe and normal
```
probe_prob_col = 'probe_prob'
probe_pred_col = 'probe_pred'
prob_cols.append(probe_prob_col)
pred_cols.append(probe_pred_col)
probe_exp = (col('labels5') == 'Probe') | (col('labels5') == 'normal')
probe_train_df = (scaled_train_df.filter(probe_exp).cache())
print(probe_train_df.count())
(probe_train_df
.groupby('labels5')
.count()
.sort(sql.desc('count'))
.show())
```
Commented code below is related to undersampling 'normal' connections. It could give better results. However, it hasen't been tested a lot yet.
```
# probe_train_df = probe_train_df.sampleBy('labels5', fractions={'normal': 9274./53789, 'Probe': 1.0}).cache()
# print(probe_train_df.count())
# (probe_train_df
# .groupby('labels5')
# .count()
# .sort(sql.desc('count'))
# .show())
```
Diffirent AR feature selection is used as only normal and Probe connections are treated. Note that train dataframe without standartization is used for getting Attribute Ratio dictionary.
```
t0 = time()
probe_ar_dict = getAttributeRatio(train_df.filter(probe_exp), numeric_cols, binary_cols, 'labels5')
print(f"Time: {time() - t0:.2f}s")
probe_ar_dict
t0 = time()
probe_slicer = VectorSlicer(inputCol="indexed_features", outputCol="features",
names=selectFeaturesByAR(probe_ar_dict, 0.05))
probe_rf = RandomForestClassifier(labelCol=labels_col, featuresCol='features', featureSubsetStrategy='sqrt',
numTrees=500, maxDepth=20, seed=seed)
probe_rf_pipeline = Pipeline(stages=[probe_slicer, probe_rf])
probe_rf_model = probe_rf_pipeline.fit(probe_train_df)
probe_cv_df = probe_rf_model.transform(scaled_cv_df).cache()
probe_test_df = probe_rf_model.transform(scaled_test_df).cache()
print(probe_cv_df.count())
print(probe_test_df.count())
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for CV data
t0 = time()
res_cv_df = (res_cv_df.drop(probe_prob_col)
.join(probe_cv_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', probe_prob_col]), 'id')
.cache())
print(res_cv_df.count())
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for Test data
t0 = time()
res_test_df = (res_test_df.drop(probe_prob_col)
.join(probe_test_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', probe_prob_col]), 'id')
.cache())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
The first report shows performance of classification for 'normal' and 'Probe' labels, the second report shows performance for the whole data with adjusted threshold.
```
printReport(res_cv_df.filter(probe_exp), probCol=probe_prob_col, e=0.5, labels=['normal', 'Probe'])
printReport(res_cv_df, probCol=probe_prob_col, e=0.05)
printReport(res_test_df.filter(probe_exp), probCol=probe_prob_col, e=0.5, labels=['normal', 'Probe'])
printReport(res_test_df, probCol=probe_prob_col, e=0.01)
# Adding prediction columns based on chosen thresholds into result dataframes
t0 = time()
res_cv_df = res_cv_df.withColumn(probe_pred_col, getPrediction(0.05)(col(probe_prob_col))).cache()
res_test_df = res_test_df.withColumn(probe_pred_col, getPrediction(0.01)(col(probe_prob_col))).cache()
print(res_cv_df.count())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
### 10.3 R2L, U2R and normal types
As there are a few examples of both R2L and U2R attack types and they have similar behaviour, they are combined into one group.
```
r2l_u2r_prob_col = 'r2l_u2r_prob'
r2l_u2r_pred_col = 'r2l_u2r_pred'
prob_cols.append(r2l_u2r_prob_col)
pred_cols.append(r2l_u2r_pred_col)
r2l_u2r_exp = (col('labels5') == 'R2L') | (col('labels5') == 'U2R') | (col('labels5') == 'normal')
r2l_u2r_train_df = (scaled_train_df.filter(r2l_u2r_exp).cache())
print(r2l_u2r_train_df.count())
(r2l_u2r_train_df
.groupby('labels5')
.count()
.sort(sql.desc('count'))
.show())
```
Diffirent AR feature selection is used as only normal, R2L and U2R connections are treated. Note that train dataframe without standartization is used for getting Attribute Ratio dictionary.
```
t0 = time()
r2l_u2r_ar_dict = getAttributeRatio(train_df.filter(r2l_u2r_exp), numeric_cols, binary_cols, 'labels5')
print(f"Time: {time() - t0:.2f}s")
r2l_u2r_ar_dict
t0 = time()
r2l_u2r_slicer = VectorSlicer(inputCol="indexed_features", outputCol="features",
names=selectFeaturesByAR(r2l_u2r_ar_dict, 0.05))
r2l_u2r_rf = RandomForestClassifier(labelCol=labels_col, featuresCol='features', featureSubsetStrategy='sqrt',
numTrees=500, maxDepth=20, seed=seed)
r2l_u2r_rf_pipeline = Pipeline(stages=[r2l_u2r_slicer, r2l_u2r_rf])
r2l_u2r_rf_model = r2l_u2r_rf_pipeline.fit(r2l_u2r_train_df)
r2l_u2r_cv_df = r2l_u2r_rf_model.transform(scaled_cv_df).cache()
r2l_u2r_test_df = r2l_u2r_rf_model.transform(scaled_test_df).cache()
print(r2l_u2r_cv_df.count())
print(r2l_u2r_test_df.count())
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for CV data
t0 = time()
res_cv_df = (res_cv_df.drop(r2l_u2r_prob_col)
.join(r2l_u2r_cv_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', r2l_u2r_prob_col]), 'id')
.cache())
print(res_cv_df.count())
print(f"Time: {time() - t0:.2f}s")
# Getting probabilities for Test data
t0 = time()
res_test_df = (res_test_df.drop(r2l_u2r_prob_col)
.join(r2l_u2r_test_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', r2l_u2r_prob_col]), 'id')
.cache())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
The first report shows performance of classification for 'normal' and 'R2L&U2R' labels, the second report shows performance for the whole data with adjusted threshold.
```
printReport(res_cv_df.filter(r2l_u2r_exp), probCol=r2l_u2r_prob_col, e=0.5, labels=['normal', 'R2L&U2R'])
printReport(res_cv_df, probCol=r2l_u2r_prob_col, e=0.05, labels=labels2)
printReport(res_test_df.filter(r2l_u2r_exp), probCol=r2l_u2r_prob_col, e=0.5, labels=['normal', 'R2L&U2R'])
printReport(res_test_df, probCol=r2l_u2r_prob_col, e=0.01, labels=labels2)
# Adding prediction columns based on chosen thresholds into result dataframes
t0 = time()
res_cv_df = res_cv_df.withColumn(r2l_u2r_pred_col, getPrediction(0.05)(col(r2l_u2r_prob_col))).cache()
res_test_df = res_test_df.withColumn(r2l_u2r_pred_col, getPrediction(0.01)(col(r2l_u2r_prob_col))).cache()
print(res_cv_df.count())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
```
### 10.4 Combining results
```
sup_prob_col = 'sup_prob'
sup_pred_col = 'sup_pred'
prob_cols.append(sup_prob_col)
pred_cols.append(sup_pred_col)
res_cv_df = res_cv_df.withColumn(sup_prob_col,
(col(dos_prob_col) + col(probe_prob_col) + col(r2l_u2r_prob_col))/3).cache()
printReport(res_cv_df, sup_prob_col, e=0.05, labels=labels2)
res_cv_df = res_cv_df.withColumn(sup_pred_col, col(dos_pred_col).cast('int')
.bitwiseOR(col(probe_pred_col).cast('int'))
.bitwiseOR(col(r2l_u2r_pred_col).cast('int'))).cache()
printReport(res_cv_df, sup_pred_col, labels=labels2)
res_test_df = res_test_df.withColumn(sup_prob_col,
(col(dos_prob_col) + col(probe_prob_col) + col(r2l_u2r_prob_col))/3).cache()
printReport(res_test_df, sup_prob_col, e=0.005, labels=labels2)
res_test_df = res_test_df.withColumn(sup_pred_col, col(dos_pred_col).cast('int')
.bitwiseOR(col(probe_pred_col).cast('int'))
.bitwiseOR(col(r2l_u2r_pred_col).cast('int'))).cache()
printReport(res_test_df, sup_pred_col, labels=labels2)
```
## 11. Ensembling experiments
Here are some experiments with ensembling and stacking results from different approaches.
### 11.1 Linear combination of all models
```
# Printing report of the best single model for comparison
printReport(res_test_df, kmeans_pred_col)
# Linear combination of all models
printReport(res_test_df
.select('labels2_index', ((3 * col(kmeans_prob_col) \
+ col(gm_prob_col) \
+ col(dos_prob_col) \
+ col(probe_prob_col) \
+ col(r2l_u2r_prob_col))/7)
.alias('voting')),
'voting', e=0.005, labels=labels2)
printReport(res_test_df
.select('labels2_index', ((2 * col(kmeans_prob_col) \
+ col(gm_prob_col) \
+ col(sup_prob_col))/4)
.alias('voting')),
'voting', e=0.005, labels=labels2)
printReport(res_test_df
.select('labels2_index', (col(kmeans_pred_col).cast('int')
.bitwiseOR(col(gm_pred_col).cast('int'))
.bitwiseOR(col(sup_pred_col).cast('int')))
.alias('voting')),
'voting', labels=labels2)
```
### 11.2 Logistic Regression and Random Forest Classifier
```
from pyspark.ml.classification import LogisticRegression
t0 = time()
lr_assembler = VectorAssembler(inputCols=[
kmeans_prob_col,
gm_prob_col,
dos_prob_col,
probe_prob_col,
r2l_u2r_prob_col
],
outputCol="features")
lr = LogisticRegression(maxIter=100, labelCol="labels2_index", standardization=False, weightCol='weights')
lr_pipeline = Pipeline(stages=[lr_assembler, lr])
weights_dict = {
'normal': 1.0,
'DoS': 100.0,
'Probe': 100.0,
'R2L': 100.0,
'U2R': 100.0
}
udf_weight = udf(lambda row: weights_dict[row], DoubleType())
lr_model = lr_pipeline.fit(res_cv_df.withColumn('weights', udf_weight('labels5')))
lr_test_df = lr_model.transform(res_test_df).cache()
res_test_df = (res_test_df.drop('lr_prob')
.join(lr_test_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', 'lr_prob']), 'id')
.cache())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
printReport(res_test_df, 'lr_prob', e=0.01, labels=labels2)
t0 = time()
rf_assembler = VectorAssembler(inputCols=[
kmeans_pred_col,
gm_pred_col,
dos_pred_col,
probe_pred_col,
r2l_u2r_pred_col
],
outputCol='features')
rf_indexer = VectorIndexer(inputCol='features', outputCol='indexed_features', maxCategories=2)
rf = RandomForestClassifier(labelCol='labels2_index', featuresCol='features', seed=seed,
numTrees=250, maxDepth=5, featureSubsetStrategy='auto')
rf_pipeline = Pipeline(stages=[rf_assembler,
rf_indexer,
rf])
rf_model = rf_pipeline.fit(res_cv_df)
rf_test_df = rf_model.transform(res_test_df).cache()
res_test_df = (res_test_df.drop('rf_prob')
.join(rf_test_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', 'rf_prob']), 'id')
.cache())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
printReport(res_test_df, 'rf_prob', e=0.01, labels=labels2)
# Adding prediction columns based on chosen thresholds into result dataframes
t0 = time()
res_test_df = res_test_df.withColumn('lr_pred', getPrediction(0.01)(col('lr_prob'))).cache()
res_test_df = res_test_df.withColumn('rf_pred', getPrediction(0.01)(col('rf_prob'))).cache()
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
printReport(res_test_df
.select('labels2_index', ((col('lr_prob') + col('rf_prob'))/2)
.alias('voting')),
'voting', e=0.01, labels=labels2)
printReport(res_test_df
.select('labels2_index', (col('lr_pred').cast('int').bitwiseOR(col('rf_pred').cast('int')))
.alias('voting')),
'voting', labels=labels2)
```
### 11.3 Stacking with Random Forest Classifier
```
stack_cv_df = scaled_cv_df.join(res_cv_df.select('id', *[
kmeans_pred_col,
gm_pred_col,
dos_pred_col,
probe_pred_col,
r2l_u2r_pred_col,
sup_pred_col
]), 'id').cache()
stack_test_df = scaled_test_df.join(res_test_df.select('id', *[
kmeans_pred_col,
gm_pred_col,
dos_pred_col,
probe_pred_col,
r2l_u2r_pred_col,
sup_pred_col
]), 'id').cache()
print(stack_cv_df.count())
print(stack_test_df.count())
t0 = time()
pred_assembler = VectorAssembler(inputCols=[
kmeans_pred_col,
gm_pred_col,
dos_pred_col,
probe_pred_col,
r2l_u2r_pred_col,
sup_pred_col
], outputCol='pred_features')
pred_indexer = VectorIndexer(inputCol='pred_features', outputCol='indexed_pred_features', maxCategories=2)
rf_stack_slicer = VectorSlicer(inputCol='indexed_features', outputCol='selected_features',
names=selectFeaturesByAR(ar_dict, 1.5))
rf_stack_assembler = VectorAssembler(inputCols=['selected_features', 'indexed_pred_features'], outputCol='rf_features')
rf_stack_classifier = RandomForestClassifier(labelCol=labels_col, featuresCol='rf_features', seed=seed,
numTrees=500, maxDepth=20, featureSubsetStrategy="auto")
stack_pipeline = Pipeline(stages=[pred_assembler,
pred_indexer,
rf_stack_slicer,
rf_stack_assembler,
rf_stack_classifier
])
stack_model = stack_pipeline.fit(stack_cv_df)
pred_stack_cv_df = stack_model.transform(stack_cv_df).cache()
pred_stack_test_df = stack_model.transform(stack_test_df).cache()
print(pred_stack_cv_df.count())
print(pred_stack_test_df.count())
print(f"Time: {time() - t0:.2f}s")
t0 = time()
res_cv_df = res_cv_df.drop('prob_stack_rf')
res_cv_df = (res_cv_df.join(pred_stack_cv_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', 'prob_stack_rf']),
'id')
.cache())
print(res_cv_df.count())
print(f"Time: {time() - t0:.2f}s")
t0 = time()
res_test_df = res_test_df.drop('prob_stack_rf')
res_test_df = (res_test_df.join(pred_stack_test_df.rdd
.map(lambda row: (row['id'], float(row['probability'][1])))
.toDF(['id', 'prob_stack_rf']),
'id')
.cache())
print(res_test_df.count())
print(f"Time: {time() - t0:.2f}s")
printReport(res_test_df, 'prob_stack_rf', e=0.01, labels=labels2)
print(f"Time: {time() - gt0:.2f}s")
```
## 12. Results summary
The best result from a single approach was achieved by KMeans Clustering with Random Forest Classifiers. It gives
around ~98-99% of detection rate with reasonable ~14-15% of false alarm rate. F1 score is 0.94, weighted F1 score is 0.93.
For improving detection rate ensembling approaches are used. The best of them gives ~99.5-99.6% of detection rate with ~16.1-16.6% of false alarm rate. So there are only about 40-90 attack connections from 12833 (including unknown before) which haven't been recognized.
| github_jupyter |
# Lecture 2b: Introduction to Qiskit
**By Adam Fattal**
Welcome to the first practical lecture! In this lecture, we will be introducing qiskit, a package developed by IBM Quantum that allows one to simulate and run quantum circuits and much more! This lecture covers only the surface of Qiskit's functionality. For more, check out Qiskit's [documentation](https://qiskit.org/documentation/) here.
## Importing Qiskit
```
from qiskit import *
import numpy as np
```
## Part 1: Building Circuits
Let's Try To Build these Quantum Circuits:
<img src='assets/1.png'>
```
circ1 = QuantumCircuit(1,1)
circ1.h(0)
circ1.draw('mpl')
```
<img src='assets/2.png'>
```
circ2 = QuantumCircuit(2,2)
circ2.h(0)
circ2.z(1)
circ2.draw('mpl')
```
<img src='assets/3.png'>
```
circ3 = QuantumCircuit(2,2)
circ3.h(0)
circ3.cx(0,1)
circ3.measure([0,1],[0,1])
circ3.draw('mpl')
```
## Part 2: Using Quantum Circuits
### Statevectors
```
simulator = Aer.get_backend('statevector_simulator')
result = execute(circ2,backend = simulator).result()
psi = result.get_statevector()
psi
np.linalg.norm(psi)
```
### Getting the Unitary
```
simulator = Aer.get_backend('unitary_simulator')
result = execute(circ3,backend = simulator).result()
U = result.get_unitary()
U@np.array([1,0,0,0])
```
### Getting the Bloch Spheres
```
from qiskit.tools.visualization import plot_bloch_multivector
plot_bloch_multivector(psi)
```
### Simulating Results
```
from qiskit.tools.visualization import plot_histogram
backend = Aer.get_backend('qasm_simulator')
result = execute(circ3,backend, shots = 420).result()
output = result.get_counts()
plot_histogram(output)
```
### Full Example
```
qc = QuantumCircuit(4,4)
qc.h(0)
qc.rx(np.pi/3, 1)
qc.x(1)
qc.y(2)
qc.z(3)
qc.cnot(0,2)
qc.measure([i for i in range(3)], [i for i in range(3)])
qc.draw('mpl')
backend = Aer.get_backend('qasm_simulator')
result = execute(qc,backend, shots = 420).result()
output = result.get_counts()
plot_histogram(output)
```
## Part 3: Running circuits on a real quantum computer
```
#Defining a quantum circuit with 2 qubits and 2 classical bits
phiPlus = QuantumCircuit(2,2)
#Preparing a |Φ+> state
phiPlus.h(0)
phiPlus.cnot(0,1)
phiPlus.measure([0,1],[0,1])
#This is what you type to run on real IBMQ hardware
IBMQ.load_account() #This is how you load your account
provider = IBMQ.get_provider('ibm-q') #This is how you get the ibm-q provider
qcomp = provider.get_backend('ibmq_16_melbourne') #This is how you select the device you want to use
job = execute(phiPlus, backend=qcomp, shots=1024) #This is how you tell the device which circuit to run
from qiskit.tools.monitor import job_monitor
job_monitor(job) #Monitor the job
result = job.result() #Get Results
result
plot_histogram(result.get_counts(phiPlus))
```
## Part 4: Grover's Algorithm Demonstration
```
PI = np.pi
def groverCircuit(target):
target_list = [int(x) for x in str(target)] #Converts the target into a list (e.g '1001' => [1,0,0,1])
n = len(target_list) #Length of target list (i.e nbr of qubits)
counter = [i for i in range(n)] #List containing integers from 0 to num_qubits - 1
#Defining a CnP gate. Note that CnP(PI) = CNZ
def mcp(self, lam, control_qubits, target_qubit):
from qiskit.circuit.library import MCPhaseGate
num_ctrl_qubits = len(control_qubits)
return self.append(MCPhaseGate(lam, num_ctrl_qubits), control_qubits[:] + [target_qubit],
[])
#Sub-circuit 1: Hadamard on all qubits
def hadamards(target):
hadCirc = QuantumCircuit(n,n)
hadCirc.h(counter)
hadCirc.barrier()
return hadCirc
#Sub-circuit 2: Oracle
def oracle(target):
filtered = [counter[i] for i in range(n) if target_list[i]==0] #Filtering the counter list to only the indices where target==0
oracleCirc = QuantumCircuit(n,n)
if filtered != []:
oracleCirc.x(filtered) #In other words, if target only has 1s, do nothing
mcp(oracleCirc, np.pi, [i for i in range(n-1)],n-1)
if filtered != []:
oracleCirc.x(filtered) #Applying X gates to the qubits which represent 0
oracleCirc.barrier()
return oracleCirc
#Sub-circuit 3: Amplifier
def amplification(target):
ampCirc = QuantumCircuit(n,n)
ampCirc.h(counter)
ampCirc.x(counter)
mcp(ampCirc, np.pi, [i for i in range(n-1)],n-1)
ampCirc.x(counter)
ampCirc.h(counter)
ampCirc.barrier()
return ampCirc
k = round(PI*n/4 - 0.5) #Ideal number of iterations. k = π/4 * √N - 1/2.
circuit = hadamards(target)
for i in range(k): #Iterating the oracle and amplification
circuit+=oracle(target)
circuit+= amplification(target)
circuit.measure(counter, counter)
return circuit
from qiskit.tools.visualization import plot_histogram
circuit = groverCircuit('1001')
backend = Aer.get_backend('qasm_simulator')
result = execute(circuit,backend, shots = 420).result()
output = result.get_counts()
circuit.draw('mpl')
plot_histogram(output)
```
## Further Reading:
[1] <a href='https://www.youtube.com/watch?v=a1NZC5rqQD8&list=PLOFEBzvs-Vvp2xg9-POLJhQwtVktlYGbY'>Qiskit Tutorial by Qiskit</a>
[2] <a href='https://qiskit.org/documentation/tutorials/circuits/3_summary_of_quantum_operations.html'>Qiskit Summary of Operations </a>
[3] <a href='https://qiskit.org/textbook/preface.html'>Qiskit Textbook</a>
[4] <a href='https://www.youtube.com/watch?v=yprDIC-9D0k'>Getting Started with Qiskit Demo </a>
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
Jac_type = {1:'Sacado ', 0:'Analytic ', 2:'Numerical '}
format_line={'names': ('computation type', 'total time', 'time per sample'), 'formats': ('S30', 'f16', 'f16')}
vector= [16, 16, 16, 32, 32, 32, 32 ]
team=[2, 4, 8, 1, 2, 4, 8 ]
sacado_team_vector = {0:'2x16', 1:'4x16', 2:'8x16', 3:'1x32', 4:'2x32', 5:'4x32', 6:'8x32'} # team size x vector size
output_times_2="kokkos-dev-2/DeviceJacSacado"
Nsp= [1,10,50,100,500,1000,5000,10000,50000,100000,200000,300000]#
data = np.zeros([len(Nsp), len(team)])
for m,N in enumerate(Nsp):
for i in range(5):
last_name = "/Times_nBacth"+str(N)+"_V"+str(vector[i])+"T"+str(team[i])+".dat"
output_2=output_times_2 + last_name
temp_data2 = np.genfromtxt(output_2, dtype=format_line, delimiter=",",comments="#")
try:
data[m,i] = temp_data2[1][2]
except :
print('File is empty')
data[m,i] = np.nan
else:
data[m,i] = temp_data2[1][2]
vector= [32,32,32,32]
team=[1,2,4,8]
analytic_team_vector = {0:'1x32', 1:'2x32', 2:'4x32', 3:'8x32'} # team size x vector size
data2 = np.zeros([len(Nsp), len(team)])
output_times_1="kokkos-dev-2/DeviceJacAnalytic"
for m,N in enumerate(Nsp):
for i in range(4):
last_name = "/Times_nBacth"+str(N)+"_V"+str(vector[i])+"T"+str(team[i])+".dat"
output_2=output_times_1 + last_name
temp_data2 = np.genfromtxt(output_2, dtype=format_line, delimiter=",",comments="#")
try:
data2[m,i] = temp_data2[1][2]
except :
print('File is empty')
data2[m,i] = np.nan
else:
data2[m,i] = temp_data2[1][2]
vector= [32,32,32,32]
team=[1,2,4,8]
numerical_team_vector = {0:'1x32', 1:'2x32', 2:'4x32', 3:'8x32'} # team size x vector size
data3 = np.zeros([len(Nsp), len(team)])
output_times_1="kokkos-dev-2/DeviceJacNumFwd"
for m,N in enumerate(Nsp):
for i in range(4):
last_name = "/Times_nBacth"+str(N)+"_V"+str(vector[i])+"T"+str(team[i])+".dat"
output_2=output_times_1 + last_name
temp_data2 = np.genfromtxt(output_2, dtype=format_line, delimiter=",",comments="#")
try:
data3[m,i] = temp_data2[1][2]
except :
print('File is empty')
data3[m,i] = np.nan
else:
data3[m,i] = temp_data2[1][2]
inx_Jac_type = 1
plt.figure()
plt.title('Jacobian type:'+ Jac_type[inx_Jac_type])
for i in range(7):
plt.plot(Nsp,data[:,i],'o--',label= sacado_team_vector[i])
plt.yscale('log')
plt.xscale('log')
plt.ylabel('Time per sample [s]')
plt.xlabel('Number of samples')
plt.legend(loc='best')
inx_Jac_type = 0
plt.figure()
plt.title('Jacobian type:'+ Jac_type[inx_Jac_type])
for i in range(4):
plt.plot(Nsp,data2[:,i],'o--',label= analytic_team_vector[i])
plt.yscale('log')
plt.xscale('log')
plt.ylabel('Time per sample [s]')
plt.xlabel('Number of samples')
plt.legend(loc='best')
inx_Jac_type = 2
plt.figure()
plt.title('Jacobian type:'+ Jac_type[inx_Jac_type])
for i in range(4):
plt.plot(Nsp,data3[:,i],'o--',label=numerical_team_vector[i])
plt.yscale('log')
plt.xscale('log')
plt.ylabel('Time per sample [s]')
plt.xlabel('Number of samples')
plt.legend(loc='best')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D2_ModelingPractice/W1D2_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy: Week1, Day 2, Tutorial 2
#Tutorial objectives
We are investigating a simple phenomena, working through the 10 steps of modeling ([Blohm et al., 2019](https://doi.org/10.1523/ENEURO.0352-19.2019)) in two notebooks:
**Framing the question**
1. finding a phenomenon and a question to ask about it
2. understanding the state of the art
3. determining the basic ingredients
4. formulating specific, mathematically defined hypotheses
**Implementing the model**
5. selecting the toolkit
6. planning the model
7. implementing the model
**Model testing**
8. completing the model
9. testing and evaluating the model
**Publishing**
10. publishing models
We did steps 1-5 in Tutorial 1 and will cover steps 6-10 in Tutorial 2 (this notebook).
# Utilities Setup and Convenience Functions
Please run the following **3** chunks to have functions and data available.
```
#@title Utilities and setup
# set up the environment for this tutorial
import time # import time
import numpy as np # import numpy
import scipy as sp # import scipy
from scipy.stats import gamma # import gamma distribution
import math # import basic math functions
import random # import basic random number generator functions
import matplotlib.pyplot as plt # import matplotlib
from IPython import display
fig_w, fig_h = (12, 8)
plt.rcParams.update({'figure.figsize': (fig_w, fig_h)})
plt.style.use('ggplot')
%matplotlib inline
#%config InlineBackend.figure_format = 'retina'
from scipy.signal import medfilt
# make
#@title Convenience functions: Plotting and Filtering
# define some convenience functions to be used later
def my_moving_window(x, window=3, FUN=np.mean):
'''
Calculates a moving estimate for a signal
Args:
x (numpy.ndarray): a vector array of size N
window (int): size of the window, must be a positive integer
FUN (function): the function to apply to the samples in the window
Returns:
(numpy.ndarray): a vector array of size N, containing the moving average
of x, calculated with a window of size window
There are smarter and faster solutions (e.g. using convolution) but this
function shows what the output really means. This function skips NaNs, and
should not be susceptible to edge effects: it will simply use
all the available samples, which means that close to the edges of the
signal or close to NaNs, the output will just be based on fewer samples. By
default, this function will apply a mean to the samples in the window, but
this can be changed to be a max/min/median or other function that returns a
single numeric value based on a sequence of values.
'''
# if data is a matrix, apply filter to each row:
if len(x.shape) == 2:
output = np.zeros(x.shape)
for rown in range(x.shape[0]):
output[rown,:] = my_moving_window(x[rown,:],window=window,FUN=FUN)
return output
# make output array of the same size as x:
output = np.zeros(x.size)
# loop through the signal in x
for samp_i in range(x.size):
values = []
# loop through the window:
for wind_i in range(int(-window), 1):
if ((samp_i+wind_i) < 0) or (samp_i+wind_i) > (x.size - 1):
# out of range
continue
# sample is in range and not nan, use it:
if not(np.isnan(x[samp_i+wind_i])):
values += [x[samp_i+wind_i]]
# calculate the mean in the window for this point in the output:
output[samp_i] = FUN(values)
return output
def my_plot_percepts(datasets=None, plotconditions=False):
if isinstance(datasets,dict):
# try to plot the datasets
# they should be named...
# 'expectations', 'judgments', 'predictions'
fig = plt.figure(figsize=(8, 8)) # set aspect ratio = 1? not really
plt.ylabel('perceived self motion [m/s]')
plt.xlabel('perceived world motion [m/s]')
plt.title('perceived velocities')
# loop through the entries in datasets
# plot them in the appropriate way
for k in datasets.keys():
if k == 'expectations':
expect = datasets[k]
plt.scatter(expect['world'],expect['self'],marker='*',color='xkcd:green',label='my expectations')
elif k == 'judgments':
judgments = datasets[k]
for condition in np.unique(judgments[:,0]):
c_idx = np.where(judgments[:,0] == condition)[0]
cond_self_motion = judgments[c_idx[0],1]
cond_world_motion = judgments[c_idx[0],2]
if cond_world_motion == -1 and cond_self_motion == 0:
c_label = 'world-motion condition judgments'
elif cond_world_motion == 0 and cond_self_motion == 1:
c_label = 'self-motion condition judgments'
else:
c_label = 'condition [%d] judgments'%condition
plt.scatter(judgments[c_idx,3],judgments[c_idx,4], label=c_label, alpha=0.2)
elif k == 'predictions':
predictions = datasets[k]
for condition in np.unique(predictions[:,0]):
c_idx = np.where(predictions[:,0] == condition)[0]
cond_self_motion = predictions[c_idx[0],1]
cond_world_motion = predictions[c_idx[0],2]
if cond_world_motion == -1 and cond_self_motion == 0:
c_label = 'predicted world-motion condition'
elif cond_world_motion == 0 and cond_self_motion == 1:
c_label = 'predicted self-motion condition'
else:
c_label = 'condition [%d] prediction'%condition
plt.scatter(predictions[c_idx,4],predictions[c_idx,3], marker='x', label=c_label)
else:
print("datasets keys should be 'hypothesis', 'judgments' and 'predictions'")
if plotconditions:
# this code is simplified but only works for the dataset we have:
plt.scatter([1],[0],marker='<',facecolor='none',edgecolor='xkcd:black',linewidths=2,label='world-motion stimulus',s=80)
plt.scatter([0],[1],marker='>',facecolor='none',edgecolor='xkcd:black',linewidths=2,label='self-motion stimulus',s=80)
plt.legend(facecolor='xkcd:white')
plt.show()
else:
if datasets is not None:
print('datasets argument should be a dict')
raise TypeError
def my_plot_motion_signals():
dt = 1/10
a = gamma.pdf( np.arange(0,10,dt), 2.5, 0 )
t = np.arange(0,10,dt)
v = np.cumsum(a*dt)
fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharex='col', sharey='row', figsize=(14,6))
fig.suptitle('Sensory ground truth')
ax1.set_title('world-motion condition')
ax1.plot(t,-v,label='visual [$m/s$]')
ax1.plot(t,np.zeros(a.size),label='vestibular [$m/s^2$]')
ax1.set_xlabel('time [s]')
ax1.set_ylabel('motion')
ax1.legend(facecolor='xkcd:white')
ax2.set_title('self-motion condition')
ax2.plot(t,-v,label='visual [$m/s$]')
ax2.plot(t,a,label='vestibular [$m/s^2$]')
ax2.set_xlabel('time [s]')
ax2.set_ylabel('motion')
ax2.legend(facecolor='xkcd:white')
plt.show()
def my_plot_sensorysignals(judgments, opticflow, vestibular, returnaxes=False, addaverages=False):
wm_idx = np.where(judgments[:,0] == 0)
sm_idx = np.where(judgments[:,0] == 1)
opticflow = opticflow.transpose()
wm_opticflow = np.squeeze(opticflow[:,wm_idx])
sm_opticflow = np.squeeze(opticflow[:,sm_idx])
vestibular = vestibular.transpose()
wm_vestibular = np.squeeze(vestibular[:,wm_idx])
sm_vestibular = np.squeeze(vestibular[:,sm_idx])
X = np.arange(0,10,.1)
fig, my_axes = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(15,10))
fig.suptitle('Sensory signals')
my_axes[0][0].plot(X,wm_opticflow, color='xkcd:light red', alpha=0.1)
my_axes[0][0].plot([0,10], [0,0], ':', color='xkcd:black')
if addaverages:
my_axes[0][0].plot(X,np.average(wm_opticflow, axis=1), color='xkcd:red', alpha=1)
my_axes[0][0].set_title('world-motion optic flow')
my_axes[0][0].set_ylabel('[motion]')
my_axes[0][1].plot(X,sm_opticflow, color='xkcd:azure', alpha=0.1)
my_axes[0][1].plot([0,10], [0,0], ':', color='xkcd:black')
if addaverages:
my_axes[0][1].plot(X,np.average(sm_opticflow, axis=1), color='xkcd:blue', alpha=1)
my_axes[0][1].set_title('self-motion optic flow')
my_axes[1][0].plot(X,wm_vestibular, color='xkcd:light red', alpha=0.1)
my_axes[1][0].plot([0,10], [0,0], ':', color='xkcd:black')
if addaverages:
my_axes[1][0].plot(X,np.average(wm_vestibular, axis=1), color='xkcd:red', alpha=1)
my_axes[1][0].set_title('world-motion vestibular signal')
my_axes[1][0].set_xlabel('time [s]')
my_axes[1][0].set_ylabel('[motion]')
my_axes[1][1].plot(X,sm_vestibular, color='xkcd:azure', alpha=0.1)
my_axes[1][1].plot([0,10], [0,0], ':', color='xkcd:black')
if addaverages:
my_axes[1][1].plot(X,np.average(sm_vestibular, axis=1), color='xkcd:blue', alpha=1)
my_axes[1][1].set_title('self-motion vestibular signal')
my_axes[1][1].set_xlabel('time [s]')
if returnaxes:
return my_axes
else:
plt.show()
def my_plot_thresholds(thresholds, world_prop, self_prop, prop_correct):
plt.figure(figsize=(12,8))
plt.title('threshold effects')
plt.plot([min(thresholds),max(thresholds)],[0,0],':',color='xkcd:black')
plt.plot([min(thresholds),max(thresholds)],[0.5,0.5],':',color='xkcd:black')
plt.plot([min(thresholds),max(thresholds)],[1,1],':',color='xkcd:black')
plt.plot(thresholds, world_prop, label='world motion')
plt.plot(thresholds, self_prop, label='self motion')
plt.plot(thresholds, prop_correct, color='xkcd:purple', label='correct classification')
plt.xlabel('threshold')
plt.ylabel('proportion correct or classified as self motion')
plt.legend(facecolor='xkcd:white')
plt.show()
def my_plot_predictions_data(judgments, predictions):
conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2])))
veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4]))
velpredict = np.concatenate((predictions[:,3],predictions[:,4]))
# self:
conditions_self = np.abs(judgments[:,1])
veljudgmnt_self = judgments[:,3]
velpredict_self = predictions[:,3]
# world:
conditions_world = np.abs(judgments[:,2])
veljudgmnt_world = judgments[:,4]
velpredict_world = predictions[:,4]
fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharey='row', figsize=(12,5))
ax1.scatter(veljudgmnt_self,velpredict_self, alpha=0.2)
ax1.plot([0,1],[0,1],':',color='xkcd:black')
ax1.set_title('self-motion judgments')
ax1.set_xlabel('observed')
ax1.set_ylabel('predicted')
ax2.scatter(veljudgmnt_world,velpredict_world, alpha=0.2)
ax2.plot([0,1],[0,1],':',color='xkcd:black')
ax2.set_title('world-motion judgments')
ax2.set_xlabel('observed')
ax2.set_ylabel('predicted')
plt.show()
#@title Data generation code (needs to go on OSF and deleted here)
def my_simulate_data(repetitions=100, conditions=[(0,-1),(+1,0)] ):
"""
Generate simulated data for this tutorial. You do not need to run this
yourself.
Args:
repetitions: (int) number of repetitions of each condition (default: 30)
conditions: list of 2-tuples of floats, indicating the self velocity and
world velocity in each condition (default: returns data that is
good for exploration: [(-1,0),(0,+1)] but can be flexibly
extended)
The total number of trials used (ntrials) is equal to:
repetitions * len(conditions)
Returns:
dict with three entries:
'judgments': ntrials * 5 matrix
'opticflow': ntrials * 100 matrix
'vestibular': ntrials * 100 matrix
The default settings would result in data where first 30 trials reflect a
situation where the world (other train) moves in one direction, supposedly
at 1 m/s (perhaps to the left: -1) while the participant does not move at
all (0), and 30 trials from a second condition, where the world does not
move, while the participant moves with 1 m/s in the opposite direction from
where the world is moving in the first condition (0,+1). The optic flow
should be the same, but the vestibular input is not.
"""
# reproducible output
np.random.seed(1937)
# set up some variables:
ntrials = repetitions * len(conditions)
# the following arrays will contain the simulated data:
judgments = np.empty(shape=(ntrials,5))
opticflow = np.empty(shape=(ntrials,100))
vestibular = np.empty(shape=(ntrials,100))
# acceleration:
a = gamma.pdf(np.arange(0,10,.1), 2.5, 0 )
# divide by 10 so that velocity scales from 0 to 1 (m/s)
# max acceleration ~ .308 m/s^2
# not realistic! should be about 1/10 of that
# velocity:
v = np.cumsum(a*.1)
# position: (not necessary)
#x = np.cumsum(v)
#################################
# REMOVE ARBITRARY SCALING & CORRECT NOISE PARAMETERS
vest_amp = 1
optf_amp = 1
# we start at the first trial:
trialN = 0
# we start with only a single velocity, but it should be possible to extend this
for conditionno in range(len(conditions)):
condition = conditions[conditionno]
for repetition in range(repetitions):
#
# generate optic flow signal
OF = v * np.diff(condition) # optic flow: difference between self & world motion
OF = (OF * optf_amp) # fairly large spike range
OF = OF + (np.random.randn(len(OF)) * .1) # adding noise
# generate vestibular signal
VS = a * condition[0] # vestibular signal: only self motion
VS = (VS * vest_amp) # less range
VS = VS + (np.random.randn(len(VS)) * 1.) # acceleration is a smaller signal, what is a good noise level?
# store in matrices, corrected for sign
#opticflow[trialN,:] = OF * -1 if (np.sign(np.diff(condition)) < 0) else OF
#vestibular[trialN,:] = VS * -1 if (np.sign(condition[1]) < 0) else VS
opticflow[trialN,:], vestibular[trialN,:] = OF, VS
#########################################################
# store conditions in judgments matrix:
judgments[trialN,0:3] = [ conditionno, condition[0], condition[1] ]
# vestibular SD: 1.0916052957046194 and 0.9112684509277528
# visual SD: 0.10228834313079663 and 0.10975472557444346
# generate judgments:
if (abs(np.average(np.cumsum(medfilt(VS/vest_amp,5)*.1)[70:90])) < 1):
###########################
# NO self motion detected
###########################
selfmotion_weights = np.array([.01,.01]) # there should be low/no self motion
worldmotion_weights = np.array([.01,.99]) # world motion is dictated by optic flow
else:
########################
# self motion DETECTED
########################
#if (abs(np.average(np.cumsum(medfilt(VS/vest_amp,15)*.1)[70:90]) - np.average(medfilt(OF,15)[70:90])) < 5):
if True:
####################
# explain all self motion by optic flow
selfmotion_weights = np.array([.01,.99]) # there should be lots of self motion, but determined by optic flow
worldmotion_weights = np.array([.01,.01]) # very low world motion?
else:
# we use both optic flow and vestibular info to explain both
selfmotion_weights = np.array([ 1, 0]) # motion, but determined by vestibular signal
worldmotion_weights = np.array([ 1, 1]) # very low world motion?
#
integrated_signals = np.array([
np.average( np.cumsum(medfilt(VS/vest_amp,15))[90:100]*.1 ),
np.average((medfilt(OF/optf_amp,15))[90:100])
])
selfmotion = np.sum(integrated_signals * selfmotion_weights)
worldmotion = np.sum(integrated_signals * worldmotion_weights)
#print(worldmotion,selfmotion)
judgments[trialN,3] = abs(selfmotion)
judgments[trialN,4] = abs(worldmotion)
# this ends the trial loop, so we increment the counter:
trialN += 1
return {'judgments':judgments,
'opticflow':opticflow,
'vestibular':vestibular}
simulated_data = my_simulate_data()
judgments = simulated_data['judgments']
opticflow = simulated_data['opticflow']
vestibular = simulated_data['vestibular']
```
#Micro-tutorial 6 - planning the model
```
#@title Video: Planning the model
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='daEtkVporBE', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
###**Goal:** Identify the key components of the model and how they work together.
Our goal all along has been to model our perceptual estimates of sensory data.
Now that we have some idea of what we want to do, we need to line up the components of the model: what are the input and output? Which computations are done and in what order?
The figure below shows a generic model we will use to guide our code construction.

Our model will have:
* **inputs**: the values the system has available - for this tutorial the sensory information in a trial. We want to gather these together and plan how to process them.
* **parameters**: unless we are lucky, our functions will have unknown parameters - we want to identify these and plan for them.
* **outputs**: these are the predictions our model will make - for this tutorial these are the perceptual judgments on each trial. Ideally these are directly comparable to our data.
* **Model functions**: A set of functions that perform the hypothesized computations.
>Using Python (with Numpy and Scipy) we will define a set of functions that take our data and some parameters as input, can run our model, and output a prediction for the judgment data.
#Recap of what we've accomplished so far:
To model perceptual estimates from our sensory data, we need to
1. _integrate_ to ensure sensory information are in appropriate units
2. _reduce noise and set timescale_ by filtering
3. _threshold_ to model detection
Remember the kind of operations we identified:
* integration: `np.cumsum()`
* filtering: `my_moving_window()`
* threshold: `if` with a comparison (`>` or `<`) and `else`
We will collect all the components we've developed and design the code by:
1. **identifying the key functions** we need
2. **sketching the operations** needed in each.
**_Planning our model:_**
We know what we want the model to do, but we need to plan and organize the model into functions and operations.
We're providing a draft of the first function.
For each of the two other code chunks, write mostly comments and help text first. This should put into words what role each of the functions plays in the overall model, implementing one of the steps decided above.
_______
Below is the main function with a detailed explanation of what the function is supposed to do: what input is expected, and what output will generated.
The code is not complete, and only returns nans for now. However, this outlines how most model code works: it gets some measured data (the sensory signals) and a set of parameters as input, and as output returns a prediction on other measured data (the velocity judgments).
The goal of this function is to define the top level of a simulation model which:
* receives all input
* loops through the cases
* calls functions that computes predicted values for each case
* outputs the predictions
### **TD 6.1**: Complete main model function
The function `my_train_illusion_model()` below should call one other function: `my_perceived_motion()`. What input do you think this function should get?
**Complete main model function**
```
def my_train_illusion_model(sensorydata, params):
'''
Generate output predictions of perceived self-motion and perceived world-motion velocity
based on input visual and vestibular signals.
Args (Input variables passed into function):
sensorydata: (dict) dictionary with two named entries:
opticflow: (numpy.ndarray of float) NxM array with N trials on rows
and M visual signal samples in columns
vestibular: (numpy.ndarray of float) NxM array with N trials on rows
and M vestibular signal samples in columns
params: (dict) dictionary with named entries:
threshold: (float) vestibular threshold for credit assignment
filterwindow: (list of int) determines the strength of filtering for
the visual and vestibular signals, respectively
integrate (bool): whether to integrate the vestibular signals, will
be set to True if absent
FUN (function): function used in the filter, will be set to
np.mean if absent
samplingrate (float): the number of samples per second in the
sensory data, will be set to 10 if absent
Returns:
dict with two entries:
selfmotion: (numpy.ndarray) vector array of length N, with predictions
of perceived self motion
worldmotion: (numpy.ndarray) vector array of length N, with predictions
of perceived world motion
'''
# sanitize input a little
if not('FUN' in params.keys()):
params['FUN'] = np.mean
if not('integrate' in params.keys()):
params['integrate'] = True
if not('samplingrate' in params.keys()):
params['samplingrate'] = 10
# number of trials:
ntrials = sensorydata['opticflow'].shape[0]
# set up variables to collect output
selfmotion = np.empty(ntrials)
worldmotion = np.empty(ntrials)
# loop through trials?
for trialN in range(ntrials):
#these are our sensory variables (inputs)
vis = sensorydata['opticflow'][trialN,:]
ves = sensorydata['vestibular'][trialN,:]
########################################################
# generate output predicted perception:
########################################################
#our inputs our vis, ves, and params
selfmotion[trialN], worldmotion[trialN] = [np.nan, np.nan]
########################################################
# replace above with
# selfmotion[trialN], worldmotion[trialN] = my_perceived_motion( ???, ???, params=params)
# and fill in question marks
########################################################
# comment this out when you've filled
raise NotImplementedError("Student excercise: generate predictions")
return {'selfmotion':selfmotion, 'worldmotion':worldmotion}
# uncomment the following lines to run the main model function:
## here is a mock version of my_perceived motion.
## so you can test my_train_illusion_model()
#def my_perceived_motion(*args, **kwargs):
#return np.random.rand(2)
##let's look at the preditions we generated for two sample trials (0,100)
##we should get a 1x2 vector of self-motion prediction and another for world-motion
#sensorydata={'opticflow':opticflow[[0,100],:0], 'vestibular':vestibular[[0,100],:0]}
#params={'threshold':0.33, 'filterwindow':[100,50]}
#my_train_illusion_model(sensorydata=sensorydata, params=params)
# to_remove solution
def my_train_illusion_model(sensorydata, params):
'''
Generate predictions of perceived self motion and perceived world motion
based on the visual and vestibular signals.
Args:
sensorydata: (dict) dictionary with two named entries:
opticalfow: (numpy.ndarray of float) NxM array with N trials on rows
and M visual signal samples in columns
vestibular: (numpy.ndarray of float) NxM array with N trials on rows and
M vestibular signal samples in columns
params: (dict) dictionary with named entries:
threshold: (float) vestibular threshold for credit assignment
filterwindow: (list of int) determines the strength of filtering for
the visual and vestibular signals, respectively
integrate (bool): whether to integrate the vestibular signals, will
be set to True if absent
FUN (function): function used in the filter, will be set to
np.mean if absent
samplingrate (float): the number of samples per second in the
sensory data, will be set to 10 if absent
Returns:
dict with two entries:
selfmotion: (numpy.ndarray) vector array of length N, with predictions
of perceived self motion
worldmotion: (numpy.ndarray) vector array of length N, with predictions
of perceived world motion
'''
# sanitize input a little
if not('FUN' in params.keys()):
params['FUN'] = np.mean
if not('integrate' in params.keys()):
params['integrate'] = True
if not('samplingrate' in params.keys()):
params['samplingrate'] = 10
# number of trials:
ntrials = sensorydata['opticflow'].shape[0]
# set up variables to collect output
selfmotion = np.empty(ntrials)
worldmotion = np.empty(ntrials)
# loop through trials
for trialN in range(ntrials):
vis = sensorydata['opticflow'][trialN,:]
ves = sensorydata['vestibular'][trialN,:]
########################################################
# get predicted perception in respective output vectors:
########################################################
selfmotion[trialN], worldmotion[trialN] = my_perceived_motion( vis=vis, ves=ves, params=params)
return {'selfmotion':selfmotion, 'worldmotion':worldmotion}
# here is a mock version of my_perceived motion
# now you can test my_train_illusion_model()
def my_perceived_motion(*args, **kwargs):
return np.random.rand(2)
##let's look at the preditions we generated for n=2 sample trials (0,100)
##we should get a 1x2 vector of self-motion prediction and another for world-motion
sensorydata={'opticflow':opticflow[[0,100],:0], 'vestibular':vestibular[[0,100],:0]}
params={'threshold':0.33, 'filterwindow':[100,50]}
my_train_illusion_model(sensorydata=sensorydata, params=params)
```
### **TD 6.2**: Draft perceived motion functions
Now we draft a set of functions, the first of which is used in the main model function (see above) and serves to generate perceived velocities. The other two are used in the first one. Only write help text and/or comments, you don't have to write the whole function. Each time ask yourself these questions:
* what sensory data is necessary?
* what other input does the function need, if any?
* which operations are performed on the input?
* what is the output?
(the number of arguments is correct)
**Template perceived motion**
```
# fill in the input arguments the function should have:
# write the help text for the function:
def my_perceived_motion(arg1, arg2, arg3):
'''
Short description of the function
Args:
argument 1: explain the format and content of the first argument
argument 2: explain the format and content of the second argument
argument 3: explain the format and content of the third argument
Returns:
what output does the function generate?
Any further description?
'''
# structure your code into two functions: "my_selfmotion" and "my_worldmotion"
# write comments outlining the operations to be performed on the inputs by each of these functions
# use the elements from micro-tutorials 3, 4, and 5 (found in W1D2 Tutorial Part 1)
#
#
#
# what kind of output should this function produce?
return output
```
We've completed the `my_perceived_motion()` function for you below. Follow this example to complete the template for `my_selfmotion()` and `my_worldmotion()`. Write out the inputs and outputs, and the steps required to calculate the outputs from the inputs.
**Perceived motion function**
```
#Full perceived motion function
def my_perceived_motion(vis, ves, params):
'''
Takes sensory data and parameters and returns predicted percepts
Args:
vis (numpy.ndarray): 1xM array of optic flow velocity data
ves (numpy.ndarray): 1xM array of vestibular acceleration data
params: (dict) dictionary with named entries:
see my_train_illusion_model() for details
Returns:
[list of floats]: prediction for perceived self-motion based on
vestibular data, and prediction for perceived world-motion based on
perceived self-motion and visual data
'''
# estimate self motion based on only the vestibular data
# pass on the parameters
selfmotion = my_selfmotion(ves=ves,
params=params)
# estimate the world motion, based on the selfmotion and visual data
# pass on the parameters as well
worldmotion = my_worldmotion(vis=vis,
selfmotion=selfmotion,
params=params)
return [selfmotion, worldmotion]
```
**Template calculate self motion**
Put notes in the function below that describe the inputs, the outputs, and steps that transform the output from the input using elements from micro-tutorials 3,4,5.
```
def my_selfmotion(arg1, arg2):
'''
Short description of the function
Args:
argument 1: explain the format and content of the first argument
argument 2: explain the format and content of the second argument
Returns:
what output does the function generate?
Any further description?
'''
# what operations do we perform on the input?
# use the elements from micro-tutorials 3, 4, and 5
# 1.
# 2.
# 3.
# 4.
# what output should this function produce?
return output
# to_remove solution
def my_selfmotion(ves, params):
'''
Estimates self motion for one vestibular signal
Args:
ves (numpy.ndarray): 1xM array with a vestibular signal
params (dict): dictionary with named entries:
see my_train_illusion_model() for details
Returns:
(float): an estimate of self motion in m/s
'''
# 1. integrate vestibular signal
# 2. running window function
# 3. take final value
# 4. compare to threshold
# if higher than threshold: return value
# if lower than threshold: return 0
return output
```
**Template calculate world motion**
Put notes in the function below that describe the inputs, the outputs, and steps that transform the output from the input using elements from micro-tutorials 3,4,5.
```
def my_worldmotion(arg1, arg2, arg3):
'''
Short description of the function
Args:
argument 1: explain the format and content of the first argument
argument 2: explain the format and content of the second argument
argument 3: explain the format and content of the third argument
Returns:
what output does the function generate?
Any further description?
'''
# what operations do we perform on the input?
# use the elements from micro-tutorials 3, 4, and 5
# 1.
# 2.
# 3.
# what output should this function produce?
return output
# to_remove solution
def my_worldmotion(vis, selfmotion, params):
'''
Estimates world motion based on the visual signal, the estimate of
Args:
vis (numpy.ndarray): 1xM array with the optic flow signal
selfmotion (float): estimate of self motion
params (dict): dictionary with named entries:
see my_train_illusion_model() for details
Returns:
(float): an estimate of world motion in m/s
'''
# 1. running window function
# 2. take final value
# 3. subtract selfmotion from value
# return final value
return output
```
#Micro-tutorial 7 - implement model
```
#@title Video: implement the model
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='gtSOekY8jkw', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
**Goal:** We write the components of the model in actual code.
For the operations we picked, there function ready to use:
* integration: `np.cumsum(data, axis=1)` (axis=1: per trial and over samples)
* filtering: `my_moving_window(data, window)` (window: int, default 3)
* average: `np.mean(data)`
* threshold: if (value > thr): <operation 1> else: <operation 2>
###**TD 7.1:** Write code to estimate self motion
Use the operations to finish writing the function that will calculate an estimate of self motion. Fill in the descriptive list of items with actual operations. Use the function for estimating world-motion below, which we've filled for you!
**Template finish self motion function**
```
def my_selfmotion(ves, params):
'''
Estimates self motion for one vestibular signal
Args:
ves (numpy.ndarray): 1xM array with a vestibular signal
params (dict): dictionary with named entries:
see my_train_illusion_model() for details
Returns:
(float): an estimate of self motion in m/s
'''
###uncomment the code below and fill in with your code
## 1. integrate vestibular signal
#ves = np.cumsum(ves*(1/params['samplingrate']))
## 2. running window function to accumulate evidence:
#selfmotion = YOUR CODE HERE
## 3. take final value of self-motion vector as our estimate
#selfmotion =
## 4. compare to threshold. Hint the threshodl is stored in params['threshold']
## if selfmotion is higher than threshold: return value
## if it's lower than threshold: return 0
#if YOURCODEHERE
#selfmotion = YOURCODHERE
# comment this out when you've filled
raise NotImplementedError("Student excercise: estimate my_selfmotion")
return output
# to_remove solution
def my_selfmotion(ves, params):
'''
Estimates self motion for one vestibular signal
Args:
ves (numpy.ndarray): 1xM array with a vestibular signal
params (dict): dictionary with named entries:
see my_train_illusion_model() for details
Returns:
(float): an estimate of self motion in m/s
'''
# integrate signal:
ves = np.cumsum(ves*(1/params['samplingrate']))
# use running window to accumulate evidence:
selfmotion = my_moving_window(ves,
window=params['filterwindows'][0],
FUN=params['FUN'])
# take the final value as our estimate:
selfmotion = selfmotion[-1]
# compare to threshold, set to 0 if lower
if selfmotion < params['threshold']:
selfmotion = 0
return selfmotion
```
### Estimate world motion
We have completed the `my_worldmotion()` function for you.
**World motion function**
```
# World motion function
def my_worldmotion(vis, selfmotion, params):
'''
Short description of the function
Args:
vis (numpy.ndarray): 1xM array with the optic flow signal
selfmotion (float): estimate of self motion
params (dict): dictionary with named entries:
see my_train_illusion_model() for details
Returns:
(float): an estimate of world motion in m/s
'''
# running average to smooth/accumulate sensory evidence
visualmotion = my_moving_window(vis,
window=params['filterwindows'][1],
FUN=np.mean)
# take final value
visualmotion = visualmotion[-1]
# subtract selfmotion from value
worldmotion = visualmotion + selfmotion
# return final value
return worldmotion
```
#Micro-tutorial 8 - completing the model
```
#@title Video: completing the model
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='-NiHSv4xCDs', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
**Goal:** Make sure the model can speak to the hypothesis. Eliminate all the parameters that do not speak to the hypothesis.
Now that we have a working model, we can keep improving it, but at some point we need to decide that it is finished. Once we have a model that displays the properties of a system we are interested in, it should be possible to say something about our hypothesis and question. Keeping the model simple makes it easier to understand the phenomenon and answer the research question. Here that means that our model should have illusory perception, and perhaps make similar judgments to those of the participants, but not much more.
To test this, we will run the model, store the output and plot the models' perceived self motion over perceived world motion, like we did with the actual perceptual judgments (it even uses the same plotting function).
### **TD 8.1:** See if the model produces illusions
```
#@title Run to plot model predictions of motion estimates
# prepare to run the model again:
data = {'opticflow':opticflow, 'vestibular':vestibular}
params = {'threshold':0.6, 'filterwindows':[100,50], 'FUN':np.mean}
modelpredictions = my_train_illusion_model(sensorydata=data, params=params)
# process the data to allow plotting...
predictions = np.zeros(judgments.shape)
predictions[:,0:3] = judgments[:,0:3]
predictions[:,3] = modelpredictions['selfmotion']
predictions[:,4] = modelpredictions['worldmotion'] *-1
my_plot_percepts(datasets={'predictions':predictions}, plotconditions=True)
```
**Questions:**
* Why is the data distributed this way? How does it compare to the plot in TD 1.2?
* Did you expect to see this?
* Where do the model's predicted judgments for each of the two conditions fall?
* How does this compare to the behavioral data?
However, the main observation should be that **there are illusions**: the blue and red data points are mixed in each of the two sets of data. Does this mean the model can help us understand the phenomenon?
#Micro-tutorial 9 - testing and evaluating the model
```
#@title Video: Background
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='5vnDOxN3M_k', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
**Goal:** Once we have finished the model, we need a description of how good it is. The question and goals we set in micro-tutorial 1 and 4 help here. There are multiple ways to evaluate a model. Aside from the obvious fact that we want to get insight into the phenomenon that is not directly accessible without the model, we always want to quantify how well the model agrees with the data.
### Quantify model quality with $R^2$
Let's look at how well our model matches the actual judgment data.
```
#@title Run to plot predictions over data
my_plot_predictions_data(judgments, predictions)
```
When model predictions are correct, the red points in the figure above should lie along the identity line (a dotted black line here). Points off the identity line represent model prediction errors. While in each plot we see two clusters of dots that are fairly close to the identity line, there are also two clusters that are not. For the trials that those points represent, the model has an illusion while the participants don't or vice versa.
We will use a straightforward, quantitative measure of how good the model is: $R^2$ (pronounced: "R-squared"), which can take values between 0 and 1, and expresses how much variance is explained by the relationship between two variables (here the model's predictions and the actual judgments). It is also called [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination), and is calculated here as the square of the correlation coefficient (r or $\rho$). Just run the chunk below:
```
#@title Run to calculate R^2
conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2])))
veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4]))
velpredict = np.concatenate((predictions[:,3],predictions[:,4]))
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(conditions,veljudgmnt)
print('conditions -> judgments R^2: %0.3f'%( r_value**2 ))
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(veljudgmnt,velpredict)
print('predictions -> judgments R^2: %0.3f'%( r_value**2 ))
```
These $R^2$s express how well the experimental conditions explain the participants judgments and how well the models predicted judgments explain the participants judgments.
You will learn much more about model fitting, quantitative model evaluation and model comparison tomorrow!
Perhaps the $R^2$ values don't seem very impressive, but the judgments produced by the participants are explained by the model's predictions better than by the actual conditions. In other words: the model tends to have the same illusions as the participants.
### **TD 9.1** Varying the threshold parameter to improve the model
In the code below, see if you can find a better value for the threshold parameter, to reduce errors in the models' predictions.
**Testing thresholds**
```
# Testing thresholds
def test_threshold(threshold=0.33):
# prepare to run model
data = {'opticflow':opticflow, 'vestibular':vestibular}
params = {'threshold':threshold, 'filterwindows':[100,50], 'FUN':np.mean}
modelpredictions = my_train_illusion_model(sensorydata=data, params=params)
# get predictions in matrix
predictions = np.zeros(judgments.shape)
predictions[:,0:3] = judgments[:,0:3]
predictions[:,3] = modelpredictions['selfmotion']
predictions[:,4] = modelpredictions['worldmotion'] *-1
# get percepts from participants and model
conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2])))
veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4]))
velpredict = np.concatenate((predictions[:,3],predictions[:,4]))
# calculate R2
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(veljudgmnt,velpredict)
print('predictions -> judgments R2: %0.3f'%( r_value**2 ))
test_threshold(threshold=0.5)
```
### **TD 9.2:** Credit assigmnent of self motion
When we look at the figure in **TD 8.1**, we can see a cluster does seem very close to (1,0), just like in the actual data. The cluster of points at (1,0) are from the case where we conclude there is no self motion, and then set the self motion to 0. That value of 0 removes a lot of noise from the world-motion estimates, and all noise from the self-motion estimate. In the other case, where there is self motion, we still have a lot of noise (see also micro-tutorial 4).
Let's change our `my_selfmotion()` function to return a self motion of 1 when the vestibular signal indicates we are above threshold, and 0 when we are below threshold. Edit the function here.
**Template function for credit assigment of self motion**
```
# Template binary self-motion estimates
def my_selfmotion(ves, params):
'''
Estimates self motion for one vestibular signal
Args:
ves (numpy.ndarray): 1xM array with a vestibular signal
params (dict): dictionary with named entries:
see my_train_illusion_model() for details
Returns:
(float): an estimate of self motion in m/s
'''
# integrate signal:
ves = np.cumsum(ves*(1/params['samplingrate']))
# use running window to accumulate evidence:
selfmotion = my_moving_window(ves,
window=params['filterwindows'][0],
FUN=params['FUN'])
## take the final value as our estimate:
selfmotion = selfmotion[-1]
##########################################
# this last part will have to be changed
# compare to threshold, set to 0 if lower and else...
if selfmotion < params['threshold']:
selfmotion = 0
#uncomment the lines below and fill in with your code
#else:
#YOUR CODE HERE
# comment this out when you've filled
raise NotImplementedError("Student excercise: modify with credit assignment")
return selfmotion
# to_remove solution
def my_selfmotion(ves, params):
'''
Estimates self motion for one vestibular signal
Args:
ves (numpy.ndarray): 1xM array with a vestibular signal
params (dict): dictionary with named entries:
see my_train_illusion_model() for details
Returns:
(float): an estimate of self motion in m/s
'''
# integrate signal:
ves = np.cumsum(ves*(1/params['samplingrate']))
# use running window to accumulate evidence:
selfmotion = my_moving_window(ves,
window=params['filterwindows'][0],
FUN=params['FUN'])
# final value:
selfmotion = selfmotion[-1]
# compare to threshold, set to 0 if lower
if selfmotion < params['threshold']:
selfmotion = 0
else:
selfmotion = 1
return selfmotion
```
The function you just wrote will be used when we run the model again below.
```
#@title Run model credit assigment of self motion
# prepare to run the model again:
data = {'opticflow':opticflow, 'vestibular':vestibular}
params = {'threshold':0.33, 'filterwindows':[100,50], 'FUN':np.mean}
modelpredictions = my_train_illusion_model(sensorydata=data, params=params)
# no process the data to allow plotting...
predictions = np.zeros(judgments.shape)
predictions[:,0:3] = judgments[:,0:3]
predictions[:,3] = modelpredictions['selfmotion']
predictions[:,4] = modelpredictions['worldmotion'] *-1
my_plot_percepts(datasets={'predictions':predictions}, plotconditions=False)
```
That looks much better, and closer to the actual data. Let's see if the $R^2$ values have improved:
```
#@title Run to calculate R^2 for model with self motion credit assignment
conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2])))
veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4]))
velpredict = np.concatenate((predictions[:,3],predictions[:,4]))
my_plot_predictions_data(judgments, predictions)
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(conditions,veljudgmnt)
print('conditions -> judgments R2: %0.3f'%( r_value**2 ))
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(velpredict,veljudgmnt)
print('predictions -> judgments R2: %0.3f'%( r_value**2 ))
```
While the model still predicts velocity judgments better than the conditions (i.e. the model predicts illusions in somewhat similar cases), the $R^2$ values are actually worse than those of the simpler model. What's really going on is that the same set of points that were model prediction errors in the previous model are also errors here. All we have done is reduce the spread.
### Interpret the model's meaning
Here's what you should have learned:
1. A noisy, vestibular, acceleration signal can give rise to illusory motion.
2. However, disambiguating the optic flow by adding the vestibular signal simply adds a lot of noise. This is not a plausible thing for the brain to do.
3. Our other hypothesis - credit assignment - is more qualitatively correct, but our simulations were not able to match the frequency of the illusion on a trial-by-trial basis.
_It's always possible to refine our models to improve the fits._
There are many ways to try to do this. A few examples; we could implement a full sensory cue integration model, perhaps with Kalman filters (Week 2, Day 3), or we could add prior knowledge (at what time do the trains depart?). However, we decided that for now we have learned enough, so it's time to write it up.
# Micro-tutorial 10 - publishing the model
```
#@title Video: Background
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='kf4aauCr5vA', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
**Goal:** In order for our model to impact the field, it needs to be accepted by our peers, and order for that to happen it matters how the model is published.
### **TD 10.1:** Write a summary of the project
Here we will write up our model, by answering the following questions:
* **What is the phenomena**? Here summarize the part of the phenomena which your model addresses.
* **What is the key scientific question?**: Clearly articulate the question which your model tries to answer.
* **What was our hypothesis?**: Explain the key relationships which we relied on to simulate the phenomena.
* **How did your model work?** Give an overview of the model, it's main components, and how the model works. ''Here we ... ''
* **What did we find? Did the model work?** Explain the key outcomes of your model evaluation.
* **What can we conclude?** Conclude as much as you can _with reference to the hypothesis_, within the limits of the model.
* **What did you learn? What is left to be learned?** Briefly argue the plausibility of the approach and what you think is _essential_ that may have been left out.
### Guidance for the future
There are good guidelines for structuring and writing an effective paper (e.g. [Mensh & Kording, 2017](https://doi.org/10.1371/journal.pcbi.1005619)), all of which apply to papers about models. There are some extra considerations when publishing a model. In general, you should explain each of the steps in the paper:
**Introduction:** Steps 1 & 2 (maybe 3)
**Methods:** Steps 3-7, 9
**Results:** Steps 8 & 9, going back to 1, 2 & 4
In addition, you should provide a visualization of the model, and upload the code implementing the model and the data it was trained and tested on to a repository (e.g. GitHub and OSF).
The audience for all of this should be experimentalists, as they are the ones who can test predictions made by your your model and collect new data. This way your models can impact future experiments, and that future data can then be modeled (see modeling process schematic below). Remember your audience - it is _always_ hard to clearly convey the main points of your work to others, especially if your audience doesn't necessarily create computational models themselves.

### Suggestion
For every modeling project, a very good exercise in this is to _**first**_ write a short, 100-word abstract of the project plan and expected impact, like the summary you wrote. This forces focussing on the main points: describing the relevance, question, model, answer and what it all means very succinctly. This allows you to decide to do this project or not **before you commit time writing code for no good purpose**. Notice that this is really what we've walked you through carefully in this tutorial! :)
# Post-script
Note that the model we built here was extremely simple and used artificial data on purpose. It allowed us to go through all the steps of building a model, and hopefully you noticed that it is not always a linear process, you will go back to different steps if you hit a roadblock somewhere.
However, if you're interested in how to actually approach modeling a similar phenomenon in a probabilistic way, we encourage you to read the paper by [Dokka et. al., 2019](https://doi.org/10.1073/pnas.1820373116), where the authors model how judgments of heading direction are influenced by objects that are also moving.
# Reading
Blohm G, Kording KP, Schrater PR (2020). _A How-to-Model Guide for Neuroscience_ eNeuro, 7(1) ENEURO.0352-19.2019. https://doi.org/10.1523/ENEURO.0352-19.2019
Dokka K, Park H, Jansen M, DeAngelis GC, Angelaki DE (2019). _Causal inference accounts for heading perception in the presence of object motion._ PNAS, 116(18):9060-9065. https://doi.org/10.1073/pnas.1820373116
Drugowitsch J, DeAngelis GC, Klier EM, Angelaki DE, Pouget A (2014). _Optimal Multisensory Decision-Making in a Reaction-Time Task._ eLife, 3:e03005. https://doi.org/10.7554/eLife.03005
Hartmann, M, Haller K, Moser I, Hossner E-J, Mast FW (2014). _Direction detection thresholds of passive self-motion in artistic gymnasts._ Exp Brain Res, 232:1249–1258. https://doi.org/10.1007/s00221-014-3841-0
Mensh B, Kording K (2017). _Ten simple rules for structuring papers._ PLoS Comput Biol 13(9): e1005619. https://doi.org/10.1371/journal.pcbi.1005619
Seno T, Fukuda H (2012). _Stimulus Meanings Alter Illusory Self-Motion (Vection) - Experimental Examination of the Train Illusion._ Seeing Perceiving, 25(6):631-45. https://doi.org/10.1163/18784763-00002394
| github_jupyter |

# 1.Quickstart Tutorial on Spark NLP - 1 hr
This is the 1 hr workshop version of the entire training notebooks : https://github.com/JohnSnowLabs/spark-nlp-workshop/tree/master/tutorials/Certification_Trainings/Public
an intro article for Spark NLP:
https://towardsdatascience.com/introduction-to-spark-nlp-foundations-and-basic-components-part-i-c83b7629ed59
How to start Spark NLP in 2 weeks:
https://towardsdatascience.com/how-to-get-started-with-sparknlp-in-2-weeks-cb47b2ba994d
https://towardsdatascience.com/how-to-wrap-your-head-around-spark-nlp-a6f6a968b7e8
Article for NER and text classification in Spark NLP
https://towardsdatascience.com/named-entity-recognition-ner-with-bert-in-spark-nlp-874df20d1d77
https://medium.com/spark-nlp/named-entity-recognition-for-healthcare-with-sparknlp-nerdl-and-nercrf-a7751b6ad571
https://towardsdatascience.com/text-classification-in-spark-nlp-with-bert-and-universal-sentence-encoders-e644d618ca32
a webinar to show how to train a NER model from scratch (90 min)
https://www.youtube.com/watch?v=djWX0MR2Ooo
workshop repo that you can start playing with Spark NLP in Colab
(you will also see Databricks notebook under each folder)
https://github.com/JohnSnowLabs/spark-nlp-workshop/tree/master/tutorials/Certification_Trainings
## Coding ...
```
import sparknlp
from sparknlp.base import *
from sparknlp.annotator import *
from pyspark.ml import Pipeline
print("Spark NLP version", sparknlp.version())
spark
```
## Using Pretrained Pipelines
for a more detailed notebook, see https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/1.SparkNLP_Basics.ipynb
```
from sparknlp.pretrained import PretrainedPipeline
pipeline_dl = PretrainedPipeline('explain_document_dl', lang='en')
```
**Stages**
- DocumentAssembler
- SentenceDetector
- Tokenizer
- NER (NER with GloVe 100D embeddings, CoNLL2003 dataset)
- Lemmatizer
- Stemmer
- Part of Speech
- SpellChecker (Norvig)
```
testDoc = '''
Peter Parker is a very good persn.
My life in Russia is very intersting.
John and Peter are brthers. However they don't support each other that much.
Mercedes Benz is also working on a driverless car.
Europe is very culture rich. There are huge churches! and big houses!
'''
result = pipeline_dl.annotate(testDoc)
result.keys()
result['entities']
import pandas as pd
df = pd.DataFrame({'token':result['token'], 'ner_label':result['ner'],
'spell_corrected':result['checked'], 'POS':result['pos'],
'lemmas':result['lemma'], 'stems':result['stem']})
df
```
### Using fullAnnotate to get more details
```
detailed_result = pipeline_dl.fullAnnotate(testDoc)
detailed_result[0]['entities']
chunks=[]
entities=[]
for n in detailed_result[0]['entities']:
chunks.append(n.result)
entities.append(n.metadata['entity'])
df = pd.DataFrame({'chunks':chunks, 'entities':entities})
df
tuples = []
for x,y,z in zip(detailed_result[0]["token"], detailed_result[0]["pos"], detailed_result[0]["ner"]):
tuples.append((int(x.metadata['sentence']), x.result, x.begin, x.end, y.result, z.result))
df = pd.DataFrame(tuples, columns=['sent_id','token','start','end','pos', 'ner'])
df
```
### Sentiment Analysis
```
sentiment = PretrainedPipeline('analyze_sentiment', lang='en')
result = sentiment.annotate("The movie I watched today was not a good one")
result['sentiment']
# DL version (using Universal sentence encoder - USE)
# 930 MB as it downloads the USE as well
sentiment_twitter = PretrainedPipeline('analyze_sentimentdl_use_twitter', lang='en')
```
## Using the modules in a pipeline for custom tasks
for a more detailed notebook, see https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
```
!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/annotation/english/spark-nlp-basics/sample-sentences-en.txt
dbutils.fs.cp("file:/databricks/driver/sample-sentences-en.txt", "dbfs:/")
with open('sample-sentences-en.txt') as f:
print (f.read())
spark_df = spark.read.text('/sample-sentences-en.txt').toDF('text')
spark_df.show(truncate=False)
textFiles = spark.sparkContext.wholeTextFiles("/sample-sentences-en.txt",4) # or/*.txt
spark_df_folder = textFiles.toDF(schema=['path','text'])
spark_df_folder.show(truncate=30)
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentenceDetector = SentenceDetector()\
.setInputCols(['document'])\
.setOutputCol('sentences')
tokenizer = Tokenizer() \
.setInputCols(["sentences"]) \
.setOutputCol("token")
nlpPipeline = Pipeline(stages=[
documentAssembler,
sentenceDetector,
tokenizer
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(spark_df)
result.show(truncate=20)
result.printSchema()
result.select('sentences.result').take(3)
```
### StopWords Cleaner
```
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("token")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)
stopwords_cleaner.getStopWords()[:10]
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
stopwords_cleaner
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(spark_df)
result.show()
result.select('cleanTokens.result').take(1)
```
### Text Matcher
```
! wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Public/data/news_category_train.csv
dbutils.fs.cp("file:/databricks/driver/news_category_train.csv", "dbfs:/")
news_df = spark.read \
.option("header", True) \
.csv("/news_category_train.csv")
news_df.show(5, truncate=50)
entities = ['Wall Street', 'USD', 'stock', 'NYSE']
with open ('financial_entities.txt', 'w') as f:
for i in entities:
f.write(i+'\n')
entities = ['soccer', 'world cup', 'Messi', 'FC Barcelona']
with open ('sport_entities.txt', 'w') as f:
for i in entities:
f.write(i+'\n')
dbutils.fs.cp("file:/databricks/driver/financial_entities.txt", "dbfs:/")
dbutils.fs.cp("file:/databricks/driver/sport_entities.txt", "dbfs:/")
documentAssembler = DocumentAssembler()\
.setInputCol("description")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
financial_entity_extractor = TextMatcher() \
.setInputCols(["document",'token'])\
.setOutputCol("financial_entities")\
.setEntities("file:/databricks/driver/financial_entities.txt")\
.setCaseSensitive(False)\
.setEntityValue('financial_entity')
sport_entity_extractor = TextMatcher() \
.setInputCols(["document",'token'])\
.setOutputCol("sport_entities")\
.setEntities("file:/databricks/driver/sport_entities.txt")\
.setCaseSensitive(False)\
.setEntityValue('sport_entity')
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
financial_entity_extractor,
sport_entity_extractor
])
empty_df = spark.createDataFrame([['']]).toDF("description")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(news_df)
result.select('financial_entities.result','sport_entities.result').take(2)
```
This means there are no financial and sport entities in the first two lines.
```
from pyspark.sql import functions as F
result.select('description','financial_entities.result','sport_entities.result')\
.toDF('text','financial_matches','sport_matches').filter((F.size('financial_matches')>1) | (F.size('sport_matches')>1))\
.show(truncate=70)
```
### Using the pipeline in a LightPipeline
```
light_model = LightPipeline(pipelineModel)
light_result = light_model.fullAnnotate("Google, Inc. significantly cut the expected share price for its stock at Wall Street")
light_result[0]['financial_entities']
```
## Pretrained Models
Spark NLP offers the following pre-trained models in around **40 languages** and all you need to do is to load the pre-trained model into your disk by specifying the model name and then configuring the model parameters as per your use case and dataset. Then you will not need to worry about training a new model from scratch and will be able to enjoy the pre-trained SOTA algorithms directly applied to your own data with transform().
In the official documentation, you can find detailed information regarding how these models are trained by using which algorithms and datasets.
https://github.com/JohnSnowLabs/spark-nlp-models
for a more detailed notebook, see https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/3.SparkNLP_Pretrained_Models.ipynb
### LemmatizerModel and ContextSpellCheckerModel
```
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_dl')\
.setInputCols("token")\
.setOutputCol("checked")
lemmatizer = LemmatizerModel.pretrained('lemma_antbnc', 'en') \
.setInputCols(["checked"]) \
.setOutputCol("lemma")
pipeline = Pipeline(stages = [
documentAssembler,
tokenizer,
spellModel,
lemmatizer
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
sc_model = pipeline.fit(empty_ds)
lp = LightPipeline(sc_model)
result = lp.annotate("Plaese alliow me tao introdduce myhelf, I am a man of waelth und tiaste and he just knows that")
list(zip(result['token'],result['checked'],result['lemma']))
```
### Word and Sentence Embeddings
#### Word Embeddings
```
glove_embeddings = WordEmbeddingsModel.pretrained('glove_100d')\
.setInputCols(["document", "token"])\
.setOutputCol("embeddings")
documentAssembler = DocumentAssembler()\
.setInputCol("description")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
glove_embeddings
])
empty_df = spark.createDataFrame([['']]).toDF("description")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(news_df.limit(1))
output = result.select('token.result','embeddings.embeddings').limit(1).rdd.flatMap(lambda x: x).collect()
pd.DataFrame({'token':output[0],'embeddings':output[1]})
result = pipelineModel.transform(news_df.limit(10))
result_df = result.select(F.explode(F.arrays_zip('token.result', 'embeddings.embeddings')).alias("cols")) \
.select(F.expr("cols['0']").alias("token"),
F.expr("cols['1']").alias("embeddings"))
result_df.show(10, truncate=100)
```
#### Bert Embeddings
```
bert_embeddings = BertEmbeddings.pretrained('bert_base_cased')\
.setInputCols(["document", "token"])\
.setOutputCol("embeddings")
documentAssembler = DocumentAssembler()\
.setInputCol("description")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
bert_embeddings
])
empty_df = spark.createDataFrame([['']]).toDF("description")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(news_df.limit(10))
result_df = result.select(F.explode(F.arrays_zip('token.result', 'embeddings.embeddings')).alias("cols")) \
.select(F.expr("cols['0']").alias("token"),
F.expr("cols['1']").alias("bert_embeddings"))
result_df.show(truncate=100)
```
#### Bert Sentence Embeddings
```
bert_sentence_embeddings = BertSentenceEmbeddings.pretrained('sent_small_bert_L6_128')\
.setInputCols(["document"])\
.setOutputCol("bert_sent_embeddings")
nlpPipeline = Pipeline(stages=[
documentAssembler,
bert_sentence_embeddings
])
empty_df = spark.createDataFrame([['']]).toDF("description")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(news_df.limit(10))
result_df = result.select(F.explode(F.arrays_zip('document.result', 'bert_sent_embeddings.embeddings')).alias("cols")) \
.select(F.expr("cols['0']").alias("document"),
F.expr("cols['1']").alias("bert_sent_embeddings"))
result_df.show(truncate=100)
```
#### Universal Sentence Encoder
```
# no need for token columns
use_embeddings = UniversalSentenceEncoder.pretrained('tfhub_use')\
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
nlpPipeline = Pipeline(stages=[
documentAssembler,
use_embeddings
])
empty_df = spark.createDataFrame([['']]).toDF("description")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(news_df.limit(10))
result_df = result.select(F.explode(F.arrays_zip('document.result', 'sentence_embeddings.embeddings')).alias("cols")) \
.select(F.expr("cols['0']").alias("document"),
F.expr("cols['1']").alias("USE_embeddings"))
result_df.show(truncate=100)
```
### Named Entity Recognition (NER) Models
for a detailed notebbok, see https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/4.NERDL_Training.ipynb
```
glove_embeddings = WordEmbeddingsModel.pretrained('glove_100d')\
.setInputCols(["document", "token"])\
.setOutputCol("embeddings")
onto_ner = NerDLModel.pretrained("onto_100", 'en') \
.setInputCols(["document", "token", "embeddings"]) \
.setOutputCol("ner")
ner_converter = NerConverter() \
.setInputCols(["document", "token", "ner"]) \
.setOutputCol("ner_chunk")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
glove_embeddings,
onto_ner,
ner_converter
])
empty_df = spark.createDataFrame([['']]).toDF("description")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(news_df.limit(10))
result.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata')).alias("cols")) \
.select(F.expr("cols['0']").alias("chunk"),
F.expr("cols['1']['entity']").alias("ner_label")).show(truncate=False)
light_model = LightPipeline(pipelineModel)
light_result = light_model.fullAnnotate('Peter Parker is a nice persn and lives in New York. Bruce Wayne is also a nice guy and lives in Gotham City.')
chunks = []
entities = []
for n in light_result[0]['ner_chunk']:
chunks.append(n.result)
entities.append(n.metadata['entity'])
import pandas as pd
df = pd.DataFrame({'chunks':chunks, 'entities':entities})
df
```
#### Train a NER model
**To train a new NER from scratch, check out**
https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/4.NERDL_Training.ipynb
```
!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp/master/src/test/resources/conll2003/eng.train
#dbutils.fs.cp("file:/databricks/driver/sample-sentences-en.txt", "dbfs:/")
from sparknlp.training import CoNLL
training_data = CoNLL().readDataset(spark, 'file:/databricks/driver/eng.train')
training_data.show(3)
training_data.select(F.explode(F.arrays_zip('token.result','label.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("token"),
F.expr("cols['1']").alias("ground_truth")).groupBy('ground_truth').count().orderBy('count', ascending=False).show(100,truncate=False)
# You can use any word embeddings you want (Glove, Elmo, Bert, custom etc.)
glove_embeddings = WordEmbeddingsModel.pretrained('glove_100d')\
.setInputCols(["document", "token"])\
.setOutputCol("embeddings")
nerTagger = NerDLApproach()\
.setInputCols(["sentence", "token", "embeddings"])\
.setLabelColumn("label")\
.setOutputCol("ner")\
.setMaxEpochs(1)\
.setLr(0.003)\
.setPo(0.05)\
.setBatchSize(32)\
.setRandomSeed(0)\
.setVerbose(1)\
.setValidationSplit(0.2)\
.setEvaluationLogExtended(True) \
.setEnableOutputLogs(True)\
.setIncludeConfidence(True)\
.setOutputLogsPath('ner_logs') # if not set, logs will be written to ~/annotator_logs
#.setGraphFolder('graphs') >> put your graph file (pb) under this folder if you are using a custom graph generated thru NerDL-Graph
ner_pipeline = Pipeline(stages=[
glove_embeddings,
nerTagger
])
ner_model = ner_pipeline.fit(training_data)
# 1 epoch takes around 2.5 min with batch size=32
# if you get an error for incompatible TF graph, use NERDL Graph script to generate the necessary TF graph at the end of this notebook
#%sh cd ~/annotator_logs && ls -lt
%sh cd ner_logs && ls -lt
%sh head -n 45 ner_logs/NerDLApproach_86ff127a6f55.log
%sh ls -la
%sh mkdir models
ner_model.stages[1].write().overwrite().save('/databricks/driver/models/NER_glove_e1_b32')
# load back and use in any pipeline
loaded_ner_model = NerDLModel.load("/databricks/driver/models/NER_glove_e1_b32")\
.setInputCols(["sentence", "token", "embeddings"])\
.setOutputCol("ner")
```
### Text Classification
for a detailed notebook, see https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/5.Text_Classification_with_ClassifierDL.ipynb
```
! wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Public/data/news_category_test.csv
dbutils.fs.cp("file:/databricks/driver/news_category_test.csv", "dbfs:/")
from pyspark.sql.functions import col
trainDataset = spark.read \
.option("header", True) \
.csv("/news_category_train.csv")
trainDataset.groupBy("category") \
.count() \
.orderBy(col("count").desc()) \
.show()
testDataset = spark.read \
.option("header", True) \
.csv("/news_category_test.csv")
testDataset.groupBy("category") \
.count() \
.orderBy(col("count").desc()) \
.show()
# actual content is inside description column
document = DocumentAssembler()\
.setInputCol("description")\
.setOutputCol("document")
# we can also use sentece detector here if we want to train on and get predictions for each sentence
use_embeddings = UniversalSentenceEncoder.pretrained('tfhub_use')\
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
# the classes/labels/categories are in category column
classsifierdl = ClassifierDLApproach()\
.setInputCols(["sentence_embeddings"])\
.setOutputCol("class")\
.setLabelColumn("category")\
.setMaxEpochs(5)\
.setEnableOutputLogs(True)
use_clf_pipeline = Pipeline(
stages = [
document,
use_embeddings,
classsifierdl
])
use_pipelineModel = use_clf_pipeline.fit(trainDataset)
# 5 epochs takes around 3 min
%sh cd ~/annotator_logs && ls -lt
%sh cat ~/annotator_logs/ClassifierDLApproach_ac9199b197d9.log
from sparknlp.base import LightPipeline
light_model = LightPipeline(use_pipelineModel)
text='''
Fearing the fate of Italy, the centre-right government has threatened to be merciless with those who flout tough restrictions.
As of Wednesday it will also include all shops being closed across Greece, with the exception of supermarkets. Banks, pharmacies, pet-stores, mobile phone stores, opticians, bakers, mini-markets, couriers and food delivery outlets are among the few that will also be allowed to remain open.
'''
result = light_model.annotate(text)
result['class']
light_model.annotate('the soccer games will be postponed.')
```
# NerDL Graph
```
!pip -q install tensorflow==1.15.0
!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/training/english/dl-ner/nerdl-graph/create_graph.py
!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/training/english/dl-ner/nerdl-graph/dataset_encoder.py
!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/training/english/dl-ner/nerdl-graph/ner_model.py
!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/training/english/dl-ner/nerdl-graph/ner_model_saver.py
!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/training/english/dl-ner/nerdl-graph/sentence_grouper.py
import sys
sys.path.append('/databricks/driver/')
sys.path.append('/databricks/driver/create_graph.py')
import create_graph
ntags = 12 # number of labels
embeddings_dim = 90
nchars =60
create_graph.create_graph(ntags, embeddings_dim, nchars)
%sh ls -la
```
End of Notebook #
| github_jupyter |
# Week 11 - Regression and Classification
In previous weeks we have looked at the steps needed in preparing different types of data for use by machine learning algorithms.
```
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
from sklearn import datasets
diabetes = datasets.load_diabetes()
# Description at http://www4.stat.ncsu.edu/~boos/var.select/diabetes.html
X = diabetes.data
y = diabetes.target
print(X.shape, y.shape)
from sklearn import linear_model
clf = linear_model.LinearRegression()
clf.fit(X, y)
plt.plot(y, clf.predict(X), 'k.')
plt.show()
```
All the different models in scikit-learn follow a consistent structure.
* The class is passed any parameters needed at initialization. In this case none are needed.
* The fit method takes the features and the target as the parameters X and y.
* The predict method takes an array of features and returns the predicted values
These are the basic components with additional methods added when needed. For example, classifiers also have
* A predict_proba method that gives the probability that a sample belongs to each of the classes.
* A predict_log_proba method that gives the log of the probability that a sample belongs to each of the classes.
## Evaluating models
Before we consider whether we have a good model, or which model to choose, we must first decide on how we will evaluate our models.
### Metrics
As part of our evaluation having a single number with which to compare models can be very useful. Choosing a metric that is as close a representation of our goal as possible enables many models to be automatically compared. This can be important when choosing model parameters or comparing different types of algorithm.
Even if we have a metric we feel is reasonable it can be worthwhile considering in detail the predictions made by any model. Some questions to ask:
* Is the model sufficiently sensitive for our use case?
* Is the model sufficiently specific for our use case?
* Is there any systemic bias?
* Does the model perform equally well over the distribution of features?
* How does the model perform outside the range of the training data?
* Is the model overly dependent on one or two samples in the training dataset?
The metric we decide to use will depend on the type of problem we have (regression or classification) and what aspects of the prediction are most important to us. For example, a decision we might have to make is between:
* A model with intermediate errors for all samples
* A model with low errors for the majority of samples but with a small number of samples that have large errors.
For these two situations in a regression task we might choose mean_squared_error and mean_absolute_error.
There are lists for [regression metrics](http://scikit-learn.org/stable/modules/model_evaluation.html#regression-metrics) and [classification metrics](http://scikit-learn.org/stable/modules/model_evaluation.html#classification-metrics).
We can apply the mean_squared_error metric to the linear regression model on the diabetes dataset:
```
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
clf = linear_model.LinearRegression()
clf.fit(X, y)
plt.plot(y, clf.predict(X), 'k.')
plt.show()
from sklearn import metrics
metrics.mean_squared_error(y, clf.predict(X))
```
Although this single number might seem unimpressive, metrics are a key component for model evaluation. As a simple example, we can perform a permutation test to determine whether we might see this performance by chance.
```
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
clf = linear_model.LinearRegression()
clf.fit(X, y)
error = metrics.mean_squared_error(y, clf.predict(X))
rounds = 1000
np.random.seed(0)
errors = []
for i in range(rounds):
y_shuffle = y.copy()
np.random.shuffle(y_shuffle)
clf_shuffle = linear_model.LinearRegression()
clf_shuffle.fit(X, y_shuffle)
errors.append(metrics.mean_squared_error(y_shuffle, clf_shuffle.predict(X)))
better_models_by_chance = len([i for i in errors if i <= error])
if better_models_by_chance > 0:
print('Probability of observing a mean_squared_error of {0} by chance is {1}'.format(error,
better_models_by_chance / rounds))
else:
print('Probability of observing a mean_squared_error of {0} by chance is <{1}'.format(error,
1 / rounds))
```
### Training, validation, and test datasets
When evaluating different models the approach taken above is not going to work. Particularly for models with high variance, that overfit the training data, we will get very good performance on the training data but perform no better than chance on new data.
```
from sklearn import tree
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
clf = tree.DecisionTreeRegressor()
clf.fit(X, y)
plt.plot(y, clf.predict(X), 'k.')
plt.show()
metrics.mean_squared_error(y, clf.predict(X))
from sklearn import neighbors
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
clf = neighbors.KNeighborsRegressor(n_neighbors=1)
clf.fit(X, y)
plt.plot(y, clf.predict(X), 'k.')
plt.show()
metrics.mean_squared_error(y, clf.predict(X))
```
Both these models appear to give perfect solutions but all they do is map our test samples back to the training samples and return the associated value.
To understand how our model truly performs we need to evaluate the performance on previously unseen samples. The general approach is to divide a dataset into training, validation and test datasets. Each model is trained on the training dataset. Multiple models can then be compared by evaluating the model against the validation dataset. There is still the potential of choosing a model that performs well on the validation dataset by chance so a final check is made against a test dataset.
This unfortunately means that part of our, often expensively gathered, data can't be used to train our model. Although it is important to leave out a test dataset an alternative approach can be used for the validation dataset. Rather than just building one model we can build multiple models, each time leaving out a different validation dataset. Our validation score is then the average across each of the models. This is known as cross-validation.
Scikit-learn provides classes to support cross-validation but a simple solution can also be implemented directly. Below we will separate out a test dataset to evaluate the nearest neighbor model.
```
from sklearn import neighbors
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
np.random.seed(0)
split = np.random.random(y.shape) > 0.3
X_train = X[split]
y_train = y[split]
X_test = X[np.logical_not(split)]
y_test = y[np.logical_not(split)]
print(X_train.shape, X_test.shape)
clf = neighbors.KNeighborsRegressor(1)
clf.fit(X_train, y_train)
plt.plot(y_test, clf.predict(X_test), 'k.')
plt.show()
metrics.mean_squared_error(y_test, clf.predict(X_test))
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
np.random.seed(0)
split = np.random.random(y.shape) > 0.3
X_train = X[split]
y_train = y[split]
X_test = X[np.logical_not(split)]
y_test = y[np.logical_not(split)]
print(X_train.shape, X_test.shape)
clf = linear_model.LinearRegression()
clf.fit(X_train, y_train)
plt.plot(y_test, clf.predict(X_test), 'k.')
plt.show()
metrics.mean_squared_error(y_test, clf.predict(X_test))
```
## Model types
Scikit-learn includes a variety of [different models](http://scikit-learn.org/stable/supervised_learning.html). The most commonly used algorithms probably include the following:
* Regression
* Support Vector Machines
* Nearest neighbors
* Decision trees
* Ensembles & boosting
### Regression
We have already seen several examples of regression. The basic form is:
$$f(X) = \beta_{0} + \sum_{j=1}^p X_j\beta_j$$
Each feature is multipled by a coefficient and then the sum returned. This value is then transformed for classification to limit the value to the range 0 to 1.
### Support Vector Machines
Support vector machines attempt to project samples into a higher dimensional space such that they can be divided by a hyperplane. A good explanation can be found in [this article](http://noble.gs.washington.edu/papers/noble_what.html).
### Nearest neighbors
Nearest neighbor methods identify a number of samples from the training set that are close to the new sample and then return the average or most common value depending on the task.
### Decision trees
Decision trees attempt to predict the value of a new sample by learning simple rules from the training samples.
### Ensembles & boosting
Ensembles are combinations of other models. Combining different models together can improve performance by boosting generalizability. An average or most common value from the models is returned.
Boosting builds one model and then attempts to reduce the errors with the next model. At each stage the bias in the model is reduced. In this way many weak predictors can be combined into one much more powerful predictor.
I often begin with an ensemble or boosting approach as they typically give very good performance without needing to be carefully optimized. Many of the other algorithms are sensitive to their parameters.
## Parameter selection
Many of the models require several different parameters to be specified. Their performance is typically heavily influenced by these parameters and choosing the best values is vital in developing the best model.
Some models have alternative implementations that handle parameter selection in an efficient way.
```
from sklearn import datasets
diabetes = datasets.load_diabetes()
# Description at http://www4.stat.ncsu.edu/~boos/var.select/diabetes.html
X = diabetes.data
y = diabetes.target
print(X.shape, y.shape)
from sklearn import linear_model
clf = linear_model.LassoCV(cv=20)
clf.fit(X, y)
print('Alpha chosen was ', clf.alpha_)
plt.plot(y, clf.predict(X), 'k.')
```
There is an expanded example in [the documentation](http://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_model_selection.html#example-linear-model-plot-lasso-model-selection-py).
There are also general classes to handle parameter selection for situations when dedicated classes are not available. As we will often have parameters in preprocessing steps these general classes will be used much more often.
```
from sklearn import grid_search
from sklearn import neighbors
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
np.random.seed(0)
split = np.random.random(y.shape) > 0.3
X_train = X[split]
y_train = y[split]
X_test = X[np.logical_not(split)]
y_test = y[np.logical_not(split)]
print(X_train.shape, X_test.shape)
knn = neighbors.KNeighborsRegressor()
parameters = {'n_neighbors':[1,2,3,4,5,6,7,8,9,10]}
clf = grid_search.GridSearchCV(knn, parameters)
clf.fit(X_train, y_train)
plt.plot(y_test, clf.predict(X_test), 'k.')
plt.show()
print(metrics.mean_squared_error(y_test, clf.predict(X_test)))
clf.get_params()
```
## Exercises
1. Load the handwritten digits dataset and choose an appropriate metric
2. Divide the data into a training and test dataset
4. Build a RandomForestClassifier on the training dataset, using cross-validation to evaluate performance
5. Choose another classification algorithm and apply it to the digits dataset.
6. Use grid search to find the optimal parameters for the chosen algorithm.
7. Comparing the true values with the predictions from the best model identify the numbers that are most commonly confused.
| github_jupyter |
```
import numpy as nmp
import pandas as pnd
import matplotlib.pyplot as plt
import pymc3 as pmc
import clonosGP as cln
%load_ext autoreload
%autoreload 2
%matplotlib inline
DATA = pnd.read_csv('data/cll_Rincon_2019_patient1.csv')
METRICS = pnd.read_csv('results/cll_Rincon_2019_patient1.csv')
nmp.random.seed(42)
pmc.tt_rng(42);
RES1 = cln.infer(DATA,
model_args={'K': 20, 'prior': 'Flat', 'cov': 'Mat32', 'lik': 'Bin', 'threshold': 0.015},
pymc3_args={'niters': 40000, 'method': 'advi', 'flow': 'scale-loc', 'learning_rate': 1e-2, 'random_seed': 42})
nmp.random.seed(42)
pmc.tt_rng(42);
RES2 = cln.infer(DATA,
model_args={'K': 20, 'prior': 'GP0', 'cov': 'Mat32', 'lik': 'Bin', 'threshold': 0.015},
pymc3_args={'niters': 40000, 'method': 'advi', 'flow': 'scale-loc', 'learning_rate': 1e-2, 'random_seed': 42})
data1, centres1 = RES1['data'], RES1['centres']
data2, centres2, centres_gp = RES2['data'], RES2['centres'], RES2['centres_gp']
%load_ext rpy2.ipython
%R library(tidyverse)
%R library(patchwork)
%%R -i data1,data2,centres1,centres2,centres_gp,METRICS -w 10 -h 10 --units in
df2 = data2 %>% filter(CLUSTERID != 'uncertain')
cids2 = df2 %>% pull(CLUSTERID) %>% unique()
colors = colorRampPalette(RColorBrewer::brewer.pal(8, 'Set2'))(length(cids2))
gg1 =
df2 %>%
ggplot() +
geom_line(aes(x = TIME2, y = VAF, group = MUTID, color = CLUSTERID)) +
scale_x_continuous(breaks = unique(df2$TIME2), labels = unique(df2$SAMPLEID)) +
scale_color_manual(values = colors) +
labs(x = NULL, y = 'variant allele fraction') +
theme_bw() +
theme(legend.position = 'none',
axis.text.x = element_blank())
ctrs2 = centres2 %>% filter(CLUSTERID %in% cids2) %>% mutate(CLUSTERID = as.character(CLUSTERID))
ctrs_gp = centres_gp %>% filter(CLUSTERID %in% cids2) %>% mutate(CLUSTERID = as.character(CLUSTERID))
gg2 =
ggplot() +
geom_ribbon(aes(x = TIME, ymin = PHI_LO, ymax = PHI_HI, fill = CLUSTERID), data = ctrs_gp, alpha = 0.5) +
geom_line(aes(x = TIME, y = PHI, color = CLUSTERID), data = ctrs_gp) +
geom_point(aes(x = TIME2, y = PHI, color = CLUSTERID), data = ctrs2) +
scale_x_continuous(breaks = unique(df2$TIME2), labels = unique(df2$SAMPLEID)) +
scale_color_manual(values = colors) +
scale_fill_manual(values = colors) +
labs(x = NULL, y = 'cancer cell fraction') +
theme_bw() +
theme(legend.position = 'none',
axis.text.x = element_blank())
df1 = data1 %>% filter(CLUSTERID != 'uncertain')
cids1 = df1 %>% pull(CLUSTERID) %>% unique()
ctrs1 = centres1 %>% mutate(CLUSTERID = as.character(CLUSTERID)) %>% filter(CLUSTERID %in% cids1)
gg3 =
ctrs1 %>%
ggplot() +
geom_line(aes(x = TIME2, y = PHI, group = CLUSTERID), linetype = 'dashed') +
geom_linerange(aes(x = TIME2, ymin = PHI_LO, ymax=PHI_HI, group = CLUSTERID)) +
geom_point(aes(x = TIME2, y = PHI, group = CLUSTERID)) +
scale_x_continuous(breaks = unique(df1$TIME2), labels = unique(df1$SAMPLEID)) +
labs(x = 'sample', y = 'cancer cell fraction') +
theme_bw() +
theme(legend.position = 'none',
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
metrics =
METRICS %>%
filter(LIK == 'Bin', METRIC == 'LOSS') %>%
mutate(LABEL = if_else(PRIOR == 'Flat', 'Flat', str_c(PRIOR, COV, sep='-'))) %>%
mutate(LABEL = factor(LABEL, levels = .$LABEL))
med = metrics %>% filter(LABEL == 'Flat') %>% pull(MEDIAN)
gg4 =
metrics %>%
ggplot() +
geom_hline(yintercept = -med, linetype = 'dashed') +
geom_linerange(aes(x = LABEL, ymin = -HIGH, ymax=-LOW)) +
geom_point(aes(x = LABEL, y = -MEDIAN)) +
labs(x = NULL, y = 'evidence lower bound') +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
gg1 / gg2 / gg3 / gg4 +
plot_annotation(tag_levels = 'A')
# ggsave('tmp.pdf')
data1.CLUSTERID.unique(), data2.CLUSTERID.unique()
data2.query('CLUSTERID == "uncertain"').groupby('CLUSTERID').count()
```
| github_jupyter |
# Leverage
### Stupidity or genius?
Updated 2020-August-28.
* This notebook looks at what the last 92 years of daily S&P 500 data has to say about the now well-known intra-day leverage.
* Automatic reinvestment of dividends is assumed.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
### Constants
```
# Number of trading days in a year
N_YEAR = 252
# 2x dividends as a fraction of S&P 500 dividends,
# assumed from the current ratio of SSO to SPY dividends
DIV2 = 0.18
# Explored leverage space (0% leverage to 100% leverage)
lev = np.linspace(0, 1, 41)
```
### Load Data
```
# S&P 500 daily - from Yahoo Finance
df = pd.read_csv('../data/^GSPC.csv', index_col=0, parse_dates=[0])
# S&P 500 annual average dividend - from Quandl
dfi = pd.read_csv('../data/MULTPL-SP500_DIV_YIELD_YEAR-annual.csv',
index_col=0, parse_dates=[0])
dividend_year = dict(zip(dfi.index.year, dfi.Value.to_numpy()))
df['DividendYear'] = df.index.map(lambda x: dividend_year[x.year]) / 100
df
```
### Create Daily Leverage
```
dl = (df.Close / df.Close.shift()).iloc[1:]
div = df.DividendYear.iloc[1:]
```
Each entry of `dl` is the end of day multiple of the previous trading day's closing price, such that 1.05 would indicate a 5% increase.
```
# How many trading days in a year, i.e., how long to rebalance?
# We will settle on the standard 252 trading days
dl.groupby(dl.index.year).count().value_counts()
# Long-term accuracy is good, as expected
assert np.round(np.product(dl), 5) == np.round(df.Close[-1] / df.Close[0], 5)
dl2 = 2*(dl-1) + 1
```
## All n-Year periods since 1927
We assume n = 10 and annual (252 trading days) rebalancing of leverage percentages.
#### Constants
```
num_years = 10
n_period = num_years * N_YEAR
len_chunk = n_period
len_sep = 1
n_split = N_YEAR
rebalance_idxs = np.arange(n_split, n_period, n_split)
```
#### Get the index architecture
```
assert dl.size == dl2.size
%%time
n_arrays = np.int(np.ceil((dl.size - len_chunk + 1) / len_sep))
rows = np.array((np.arange(n_arrays).reshape(n_arrays, -1) + np.tile(
np.zeros(len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
columns = np.array(((len_sep*np.arange(0, n_arrays)).reshape(n_arrays, -1) + np.tile(
np.arange(0, len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
n_arrays
```
#### Get the start dates
```
start_dates = dl.index[:n_arrays:len_sep]
```
#### Get the periods
```
def get_periods(array):
return np.tile(array, n_arrays).reshape(n_arrays, -1)[rows, columns]
%%time
dlm = get_periods(dl.to_numpy())
%%time
dlm2 = get_periods(dl2.to_numpy())
```
#### Combine with Dividend Data
```
%%time
divm = get_periods(div.to_numpy())
print(dlm.shape)
assert dlm.shape == dlm2.shape == divm.shape
assert dlm.shape[0] == n_arrays
divmsplit = np.array(np.hsplit(divm, rebalance_idxs)).T
divmsplit = np.average(divmsplit, axis=0)
divmsplit2 = divmsplit * DIV2
```
#### Get returns from each year
```
%%time
dlmsplit = np.array(np.hsplit(dlm, rebalance_idxs)).T
dlmsplit = np.product(dlmsplit, axis=0)
dlmsplit += divmsplit
%%time
dlmsplit2 = np.array(np.hsplit(dlm2, rebalance_idxs)).T
dlmsplit2 = np.product(dlmsplit2, axis=0)
dlmsplit2 += divmsplit2
```
#### Aggregate the results over the n-years with varying leverage rates
```
agg2 = (1-lev).reshape(-1, 1, 1)*dlmsplit + lev.reshape(-1, 1, 1)*dlmsplit2
results2 = np.product(agg2.T, axis=0)
print(results2.shape)
```
#### Get results relative to baseline (S&P 500)
```
relative2 = results2 / results2[:,0].reshape(n_arrays, -1)
```
#### Plot many leverage curves
```
%%time
plt.figure(figsize=(12, 8))
for i in range(0, n_arrays, 5):
plt.plot(lev, results2[i], alpha=0.005, color='#1f77b4')
plt.yscale('log')
plt.xticks(lev[::4], ['{:.0%}'.format(p) for p in lev[::4]])
plt.xlim(0, 1)
plt.xlabel('Percent Leveraged')
plt.title('Return on Investment for 20% of all {}-Year Periods from Jan 1928 to Aug 2020, with Annual\n\
Rebalancing of Leverage Rates, and Assumed Reinvestment of Dividends.'.format(num_years))
plt.tight_layout();
plt.savefig('plots/leverage-2x-10yr-many_lev_curves.png', dpi=300);
```
### Plotting leverage curves by percentile
```
quantiles = np.linspace(0, 1, 101, endpoint=True)
results2q = np.quantile(results2, quantiles, axis=0)
scheme = sns.color_palette('viridis', quantiles.size)
plt.figure(figsize=(12, 8))
for i, quant in enumerate(quantiles):
results2q[i]
color = scheme[i]
label = None
if quant == 0.5:
color = 'r'
label = 'Median'
plt.plot(lev, results2q[i],
color=color, label=label, linewidth=2)
plt.yscale('log')
plt.xticks(lev[::4], ['{:.0%}'.format(p) for p in lev[::4]])
plt.axhline(y=1, color='k')
plt.xlim(0, 0.8)
plt.ylim(.08, 20)
plt.xlabel('Percent Leveraged')
plt.grid(alpha=0.6, which='both')
plt.title('Return on Investment for all {}-Year Periods from Jan 1928 to Aug 2020, with Annual\n\
Rebalancing of Leverage Rates, and Assumed Reinvestment of Dividends.\n\
Each line represents a percentile (0%, 1%,..., 99%, 100%). Median is in Red.'.format(num_years))
plt.tight_layout();
plt.savefig('plots/leverage-2x-10yr-percentiles.png', dpi=300);
relative2q = np.quantile(relative2, quantiles, axis=0
plt.figure(figsize=(12, 8))
for i, quant in enumerate(quantiles):
relative2q[i]
color = scheme[i]
label = None
if quant == 0.5:
color = 'r'
label = 'Median'
plt.plot(lev, relative2q[i],
color=color, label=label, linewidth=2)
plt.yscale('log')
plt.xticks(lev[::4], ['{:.0%}'.format(p) for p in lev[::4]])
plt.axhline(y=1, color='k')
plt.xlim(0, 0.8)
plt.ylim(.1, 5)
plt.xlabel('Percent Leveraged')
plt.grid(alpha=0.6, which='both')
plt.title('Relative Return on Investment for all {}-Year Periods from Jan 1928 to Aug 2020, with Annual\n\
Rebalancing of Leverage Rates, and Assumed Reinvestment of Dividends.\n\
Each line represents a percentile (0%, 1%,..., 99%, 100%). Median is in Red.'.format(num_years))
plt.tight_layout();
plt.savefig('plots/leverage-2x-10yr-relative-percentiles.png', dpi=300);
```
#### Limited quantiles
```
quantiles2 = np.array([0.05, 0.15, 0.25, 0.4, 0.6, 0.75, 0.85, 0.95])[::-1]
scheme2 = sns.color_palette('viridis', quantiles2.size)[::-1]
fig, ax = plt.subplots(4, 2, figsize=(10, 9))
for i, quant in enumerate(quantiles2):
cur_ax = ax.ravel()[i]
q_array = np.quantile(results2, quant, axis=0)
color = scheme2[i]
cur_ax.plot(lev, q_array,
color=color, label='{:.2%}'.format(quant), linewidth=2)
cur_ax.set_xticks(lev[::4])
cur_ax.set_xticklabels(['{:.0%}'.format(p) for p in lev[::4]])
cur_ax.set_xlim(0, 1)
cur_ax.grid(alpha=0.4)
cur_ax.set_xlabel('Percent Leveraged')
cur_ax.legend()
fig.suptitle('Return on Investment for all {}-Year Periods from Jan 1928 to Aug 2020, with Annual\n\
Rebalancing of Leverage Rates, and Assumed Reinvestment of Dividends.'.format(num_years))
plt.savefig('plots/leverage-2x-10yr-limited_percentiles.png', dpi=300);
fig, ax = plt.subplots(4, 2, figsize=(10, 9))
for i, quant in enumerate(quantiles2):
cur_ax = ax.ravel()[i]
q_array = np.quantile(relative2, quant, axis=0)
color = scheme2[i]
cur_ax.plot(lev, q_array,
color=color, label='{:.2%}'.format(quant), linewidth=2)
cur_ax.set_xticks(lev[::4])
cur_ax.set_xticklabels(['{:.0%}'.format(p) for p in lev[::4]])
cur_ax.set_xlim(0, 1)
cur_ax.grid(alpha=0.4)
cur_ax.set_xlabel('Percent Leveraged')
cur_ax.legend()
fig.suptitle('Relative Return on Investment for all {}-Year Periods from Jan 1928 to Aug 2020, with Annual\n\
Rebalancing of Leverage Rates, and Assumed Reinvestment of Dividends.'.format(num_years))
plt.savefig('plots/leverage-2x-10yr-relative-limited_percentiles.png', dpi=300);
plt.figure(figsize=(6.4, 4.8))
q = 0.5
q_array = np.quantile(results2, q, axis=0)
plt.plot(lev, q_array, color='r', linewidth=2)
plt.xticks(lev[::4], ['{:.0%}'.format(p) for p in lev[::4]])
plt.xlim(0, 1)
plt.xlabel('Percent Leveraged')
plt.grid(alpha=0.4)
plt.title('Median Return on Investment for all {}-Year Periods\n\
from Jan 1928 to Aug 2020, with Annual\n\
Rebalancing of Leverage Rates, \n\
and Assumed Reinvestment of Dividends.'.format(num_years));
plt.tight_layout();
plt.savefig('plots/leverage-2x-10yr-median.png', dpi=300);
plt.figure(figsize=(6.4, 4.8))
q = 0.5
q_array = np.quantile(relative2, q, axis=0)
plt.plot(lev, q_array, color='r', linewidth=2)
plt.xticks(lev[::4], ['{:.0%}'.format(p) for p in lev[::4]])
plt.xlim(0, 1)
plt.xlabel('Percent Leveraged')
plt.grid(alpha=0.4)
plt.title('Median Relative Return on Investment for all {}-Year Periods\n\
from Jan 1928 to Aug 2020, with Annual\n\
Rebalancing of Leverage Rates, \n\
and Assumed Reinvestment of Dividends.'.format(num_years));
plt.tight_layout();
plt.savefig('plots/leverage-2x-relative-10yr-median.png', dpi=300);
plt.figure(figsize=(6.4, 4.8))
plt.scatter(quantiles, lev[np.argmax(results2q, axis=1)])
plt.yticks(lev[::4], ['{:.0%}'.format(p) for p in lev[::4]])
plt.ylabel('Percent Leveraged')
plt.xlim(0.2, 0.55)
plt.xlabel('Percentile')
plt.grid(alpha=.4)
plt.ylim(0, 1)
plt.title('Optimal Leverage Rate as a Function of Percentile for all {}-Year Periods\n\
from Jan 1928 to Aug 2020, with Annual Rebalancing of Leverage Rates, \n\
and Assumed Reinvestment of Dividends.'.format(num_years))
plt.tight_layout();
plt.savefig('plots/leverage-2x-10yr-optimal_leverage.png', dpi=300);
```
### Compare histograms of 0% and 50% leveraged.
```
idx50 = 20
lev[idx50]
print(np.quantile(results2[:,0], 0.5), np.quantile(results2[:,idx50], 0.5))
plt.hist(results2[:,0], bins=40, alpha=0.2, density=True)
plt.hist(results2[:,idx50],
bins=40, alpha=0.2, density=True)
plt.xlim(0, 20);
```
### What were some of the craziest n-year returns, and when were they?
#### Maximum returns
```
maximums = np.unique(np.argmax(results2, axis=0))
# 0% leverage, 100% leverage
results2[maximums][:,0], results2[maximums][:,-1]
start_dates[maximums]
```
#### Minimum returns
```
minimums = np.unique(np.argmin(results2, axis=0))
# 0% leverage, 100% leverage
results2[minimums][:,0], results2[minimums][:,-1]
start_dates[minimums]
```
| github_jupyter |
## Exercise 5.03: Visually comparing different tile providers
Geoplotlib offers the possibility to switch between several providers of map tiles.
This means we can try out different map tile styles that fit our visualization.
In this exercise we'll take a look at how easily tile providers can be swapped.
#### Loading our dataset
**Note:**
Since geoplotlib is layer based, we can also look at the map without any layers applied.
In this exercise we'll focus on the tiles themselves without plotting any data points.
```
# importing the necessary dependencies
import geoplotlib
```
---
#### Changing the tile provider
Geoplotlib uses map tiles from the **CartoDB Positron** by default.
However you can use any OpenStreetMap tile server by using the `tiles_provider` method.
Only display the map tiles without adding any plotting layer on top.
```
# displaying the map with the default tile provider
geoplotlib.show()
```
Other examples of popular free tile providers are:
- [Stamen Watercolor](http://maps.stamen.com/watercolor/#12/37.7706/-122.3782) => `watercolor`
- [Stamen Toner](http://maps.stamen.com/toner/#12/37.7706/-122.3782) => `toner`
- [Stamen Toner Lite](http://maps.stamen.com/toner-lite/#12/37.7706/-122.3782) => `toner-lite`
- [DarkMatter](https://carto.com/location-data-services/basemaps/) => `darkmatter`
Geoplotlib already provides the mapping for some of the most common tile providers.
By providing the name of the tile provider into the `tiles_provider` method, we can quickly switch between different styles.
Use the tiles_provider method and provide the `darkmatter` tiles.
```
# using map tiles from the dark matter tile provider
geoplotlib.tiles_provider('darkmatter')
geoplotlib.show()
```
In some cases the already provided tiles are not the right ones or take away too much of the users attention from the data displayed.
In this case we can also provide an object containing the tile provider information.
More free tile providers for OpenStreetMap can be found here:
https://wiki.openstreetmap.org/wiki/Tile_servers
Use the attribution element of the tiles_provider argument object (the entity passed to the method) to provide a custom attribution.
```
# using custom object to set up tile provider
geoplotlib.tiles_provider({
'url': lambda zoom, xtile, ytile: 'http://a.tile.openstreetmap.fr/hot/%d/%d/%d.png' % (zoom, xtile, ytile),
'tiles_dir': 'custom_tiles',
'attribution': 'Custom Tiles Provider - Humanitarian map style'
})
geoplotlib.show()
```
**Note:**
Choosing the right tiles that complement your visualization can push your visualizations to the next level and add an artistic element.
Just make sure to keep the data in focus without giving too much of the users attention to the map itself.
| github_jupyter |
```
import keras
keras.__version__
```
# 透過二元分類訓練 IMDB 評論資料
二元分類或稱兩類分類可能是在機器學習中應用最廣泛問題。只要處理的問題只有兩個結果,就可以適用。在這個例子中,我們將根據 IMDB 評論的文本內容將電影評論分為「正面」評論和「負面」評論。
## 關於 IMDB Dataset 資料集
IMDB Dataset 是來自 Internet 電影數據庫 50,000 條評論文字。他們分為 25,000 條訓練數據和 25,000 條測試數據,每組皆包含包括 50% 的負面評論和 50% 的正面評論。
我們可以直接透過 Keras Datasets 函式庫載入已經整理好的資料集。這些資料集已經經過處理,會將評論依據單詞順序,排列為整數序列,其中每個整數代表字典中的特定單詞。如下:
```
from keras.datasets import imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
```
num_words=10000 表示我們只採用前 10000 個常出現的字詞,此外在 label 中 0 表示負評 1 表示正評。
```
max([max(sequence) for sequence in train_data])
```
也可以透過字典檔,將資料組合回評論文字。
```
# word_index is a dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# We reverse it, mapping integer indices to words
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# We decode the review; note that our indices were offset by 3
# because 0, 1 and 2 are reserved indices for "padding", "start of sequence", and "unknown".
decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])
# show
decoded_review
```
## 處理資料
我們無法將代表字典檔索引位置的整數資料直接送進網路進行訓練,因此需要對資料進行轉換。由於我們只採用前 10000 常用字詞作為資料集,因此輸入資料可以轉換為 10000 維度的 one-hot-encode,例如 [3, 18] 表示一個全部都是 0 的陣列,只有 index 3, 18 是 1。我們會將這樣的資料格式作為張量進行訓練,轉換如下:
```
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
# Create an all-zero matrix of shape (len(sequences), dimension)
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1. # set specific indices of results[i] to 1s
return results
# Our vectorized training data
x_train = vectorize_sequences(train_data)
# Our vectorized test data
x_test = vectorize_sequences(test_data)
```
將結果標記進行正規化
```
# Our vectorized labels
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
```
## 建立網路架構
這裡我們預計使用三層網路,全連接層僅使用兩層 16 個神經元的網路,啟動函數設定為 relu,連接最後使用一個神經元輸出(表示正評或負評),並使用 sigmoid 作為最後輸出的啟動函數。由下而上網路架構如下:

```
from keras import models
from keras import layers
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
```
將優化器設定為 rmsprop,損失函數使用 binary_crossentropy,將網路進行 Compile
```
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
```
## 訓練模型
先將準備訓練的 25000 筆資料集,抽出 10000 筆資料集用在訓練時期的驗證資料,好讓我們監控訓練過程的準確性變化。如下:
```
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
```
開始訓練模型
```
history = model.fit(partial_x_train,
partial_y_train,
epochs=100,
batch_size=512,
validation_data=(x_val, y_val))
```
訓練的過程會把相關資訊存放在 history,透過事後分析訓練過程的資訊可以幫助我們優化參數。
```
history_dict = history.history
history_dict.keys()
```
透過上面的方法可以取得訓練 History 包含的資訊,然後我們將資訊繪製成為圖表,如下:
```
#@title
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['accuracy']
val_acc_values = history_dict['val_accuracy']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
```
由上面的數據可以看出來,以現在的網路架構,其實在第 3 次 Epoch 就已經獲得最佳的結果,之後的訓練已經造成過度擬合 (Over Fitting),因此在這個案例中將 Epoch 設定為 3 或 4 是取得最佳訓練模型的方法。
| github_jupyter |
```
%matplotlib inline
```
# Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
This is an example of applying :class:`sklearn.decomposition.NMF` and
:class:`sklearn.decomposition.LatentDirichletAllocation` on a corpus
of documents and extract additive models of the topic structure of the
corpus. The output is a list of topics, each represented as a list of
terms (weights are not shown).
Non-negative Matrix Factorization is applied with two different objective
functions: the Frobenius norm, and the generalized Kullback-Leibler divergence.
The latter is equivalent to Probabilistic Latent Semantic Indexing.
The default parameters (n_samples / n_features / n_components) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
```
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_components = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print()
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_components, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
# Fit the NMF model
print("Fitting the NMF model (generalized Kullback-Leibler divergence) with "
"tf-idf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_components, random_state=1,
beta_loss='kullback-leibler', solver='mu', max_iter=1000, alpha=.1,
l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (generalized Kullback-Leibler divergence):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_components=n_components, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
```
| github_jupyter |
# Amazon SageMaker와 병렬로 SageMaker 분산 모델을 사용하여 모델 병렬화로 MNIST 훈련 작업 시작
SageMaker 분산 모델 병렬 (SageMaker Distributed Model Parallel, SMP)은 GPU 메모리 제한으로 인해 이전에 학습하기 어려웠던 대규모 딥러닝 모델을 훈련하기 위한 모델 병렬 처리 라이브러리입니다. SageMaker Distributed Model Parallel은 여러 GPU 및 인스턴스에서 모델을 자동으로 효율적으로 분할하고 모델 훈련을 조정하므로 더 많은 매개 변수로 더 큰 모델을 생성하여 예측 정확도를 높일 수 있습니다.
이 노트북에서는 예제 PyTorch 훈련 스크립트 `utils/pt_mnist.py` 및 [Amazon SageMaker Python SDK](https://sagemaker.readthedocs.io/en/stable/overview.html#train-a-model-with-the-sagemaker-python-sdk) 를 사용하여 모델을 훈련하도록 Sagemaker Distributed Model Parallel을 구성합니다.
### 추가 리소스
Amazon SageMaker를 처음 사용하는 경우, SageMaker 상에서 SMP로 PyTorch 모델을 훈련 시 다음 정보들이 도움이 될 수 있습니다.
* SageMaker 모델 병렬 처리 라이브러리에 대한 자세한 내용은 [SageMaker Distributed를 사용한 모델 병렬 분산 훈련](http://docs.aws.amazon.com/sagemaker/latest/dg/model-parallel.html)을 참조하세요.
* Pytorch와 함께 SageMaker Python SDK를 사용하는 방법에 대한 자세한 내용은 [SageMaker Python SDK와 함께 PyTorch 사용](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html)을 참조하세요.
* 자체 훈련 이미지로 Amazon SageMaker에서 훈련 작업을 시작하는 방법에 대한 자세한 내용은 [자체 훈련 알고리즘 사용](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html)을 참조하세요.
## Amazon SageMaker 초기화
다음 셀을 실행하여 노트북 인스턴스를 초기화합니다. 이 노트북을 실행하는 데 사용되는 SageMaker 실행 역할을 가져옵니다.
```
pip install sagemaker-experiments
pip install sagemaker --upgrade
%%time
import sagemaker
from sagemaker import get_execution_role
from sagemaker.pytorch import PyTorch
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
import boto3
from time import gmtime, strftime
role = get_execution_role() # provide a pre-existing role ARN as an alternative to creating a new role
print(f'SageMaker Execution Role:{role}')
session = boto3.session.Session()
```
## 훈련 스크립트 준비
이 데모에서 사용할 예제 훈련 스크립트를 보려면 다음 셀을 실행하세요. 이것은 MNIST 데이터셋을 사용하는 PyTorch 1.6 훈련 스크립트입니다.
스크립트에 모델 병렬 학습을 구성하는 `SMP` 특정 오퍼레이션 및 데코레이터가 포함되어 있음을 알 수 있습니다. 스크립트에 사용된 SMP 함수 및 유형에 대한 자세한 내용은 훈련 스크립트 주석을 참조하세요.
```
%%writefile utils/pt_mnist.py
# Future
from __future__ import print_function
# Standard Library
import os, time
import argparse
import math
import random
# Third Party
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.cuda.amp import autocast
from torch.optim.lr_scheduler import StepLR
from torchnet.dataset import SplitDataset
from torchvision import datasets, transforms
# First Party
import smdistributed.modelparallel.torch as smp
# SM Distributed: import scaler from smdistributed.modelparallel.torch.amp, instead of torch.cuda.amp
# Make cudnn deterministic in order to get the same losses across runs.
# The following two lines can be removed if they cause a performance impact.
# For more details, see:
# https://pytorch.org/docs/stable/notes/randomness.html#cudnn
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def aws_s3_sync(source, destination):
"""aws s3 sync in quiet mode and time profile"""
import time, subprocess
cmd = ["aws", "s3", "sync", "--quiet", source, destination]
print(f"Syncing files from {source} to {destination}")
start_time = time.time()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
end_time = time.time()
print("Time Taken to Sync: ", (end_time-start_time))
return
def sync_local_checkpoints_to_s3(local_path="/opt/ml/checkpoints", s3_path=os.path.dirname(os.path.dirname(os.getenv('SM_MODULE_DIR', '')))+'/checkpoints'):
""" sample function to sync checkpoints from local path to s3 """
import boto3, botocore
#check if local path exists
if not os.path.exists(local_path):
raise RuntimeError("Provided local path {local_path} does not exist. Please check")
#check if s3 bucket exists
s3 = boto3.resource('s3')
if 's3://' not in s3_path:
raise ValueError("Provided s3 path {s3_path} is not valid. Please check")
s3_bucket = s3_path.replace('s3://','').split('/')[0]
print(f"S3 Bucket: {s3_bucket}")
try:
s3.meta.client.head_bucket(Bucket=s3_bucket)
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if error_code == '404':
raise RuntimeError('S3 bucket does not exist. Please check')
aws_s3_sync(local_path, s3_path)
return
def sync_s3_checkpoints_to_local(local_path="/opt/ml/checkpoints", s3_path=os.path.dirname(os.path.dirname(os.getenv('SM_MODULE_DIR', '')))+'/checkpoints'):
""" sample function to sync checkpoints from s3 to local path """
import boto3, botocore
#creat if local path does not exists
if not os.path.exists(local_path):
print(f"Provided local path {local_path} does not exist. Creating...")
try:
os.makedirs(local_path)
except Exception as e:
raise RuntimeError(f"failed to create {local_path}")
#check if s3 bucket exists
s3 = boto3.resource('s3')
if 's3://' not in s3_path:
raise ValueError("Provided s3 path {s3_path} is not valid. Please check")
s3_bucket = s3_path.replace('s3://','').split('/')[0]
print(f"S3 Bucket: {s3_bucket}")
try:
s3.meta.client.head_bucket(Bucket=s3_bucket)
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if error_code == '404':
raise RuntimeError('S3 bucket does not exist. Please check')
aws_s3_sync(s3_path, local_path)
return
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
return x
class Net2(nn.Module):
def __init__(self):
super(Net2, self).__init__()
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, 1)
return output
class GroupedNet(nn.Module):
def __init__(self):
super(GroupedNet, self).__init__()
self.net1 = Net1()
self.net2 = Net2()
def forward(self, x):
x = self.net1(x)
x = self.net2(x)
return x
# SM Distributed: Define smp.step. Return any tensors needed outside.
@smp.step
def train_step(model, scaler, data, target):
with autocast(1 > 0):
output = model(data)
loss = F.nll_loss(output, target, reduction="mean")
scaled_loss = loss
model.backward(scaled_loss)
return output, loss
def train(model, scaler, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
# SM Distributed: Move input tensors to the GPU ID used by the current process,
# based on the set_device call.
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
# Return value, loss_mb is a StepOutput object
_, loss_mb = train_step(model, scaler, data, target)
# SM Distributed: Average the loss across microbatches.
loss = loss_mb.reduce_mean()
optimizer.step()
if smp.rank() == 0 and batch_idx % 10 == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
# SM Distributed: Define smp.step for evaluation.
@smp.step
def test_step(model, data, target):
output = model(data)
loss = F.nll_loss(output, target, reduction="sum").item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct = pred.eq(target.view_as(pred)).sum().item()
return loss, correct
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
# SM Distributed: Moves input tensors to the GPU ID used by the current process
# based on the set_device call.
data, target = data.to(device), target.to(device)
# Since test_step returns scalars instead of tensors,
# test_step decorated with smp.step will return lists instead of StepOutput objects.
loss_batch, correct_batch = test_step(model, data, target)
test_loss += sum(loss_batch)
correct += sum(correct_batch)
test_loss /= len(test_loader.dataset)
if smp.mp_rank() == 0:
print(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
return test_loss
def main():
if not torch.cuda.is_available():
raise ValueError("The script requires CUDA support, but CUDA not available")
use_ddp = True
use_horovod = False
# Fix seeds in order to get the same losses across runs
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
smp.init()
# SM Distributed: Set the device to the GPU ID used by the current process.
# Input tensors should be transferred to this device.
torch.cuda.set_device(smp.local_rank())
device = torch.device("cuda")
kwargs = {"batch_size": 64}
kwargs.update({"num_workers": 1, "pin_memory": True, "shuffle": False})
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
# SM Distributed: Download only on a single process per instance.
# When this is not present, the file is corrupted by multiple processes trying
# to download and extract at the same time
if smp.local_rank() == 0:
dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform)
smp.barrier()
dataset1 = datasets.MNIST("../data", train=True, download=False, transform=transform)
if (use_ddp or use_horovod) and smp.dp_size() > 1:
partitions_dict = {f"{i}": 1 / smp.dp_size() for i in range(smp.dp_size())}
dataset1 = SplitDataset(dataset1, partitions=partitions_dict)
dataset1.select(f"{smp.dp_rank()}")
# Download and create dataloaders for train and test dataset
dataset2 = datasets.MNIST("../data", train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **kwargs)
model = GroupedNet()
# SMP handles the transfer of parameters to the right device
# and the user doesn't need to call 'model.to' explicitly.
# model.to(device)
optimizer = optim.Adadelta(model.parameters(), lr=4.0)
# SM Distributed: Use the DistributedModel container to provide the model
# to be partitioned across different ranks. For the rest of the script,
# the returned DistributedModel object should be used in place of
# the model provided for DistributedModel class instantiation.
model = smp.DistributedModel(model)
scaler = smp.amp.GradScaler()
optimizer = smp.DistributedOptimizer(optimizer)
scheduler = StepLR(optimizer, step_size=1, gamma=0.7)
for epoch in range(1, 2):
train(model, scaler, device, train_loader, optimizer, epoch)
test_loss = test(model, device, test_loader)
scheduler.step()
if smp.rank() == 0:
if os.path.exists('/opt/ml/local_checkpoints'):
print("-INFO- PATH DO EXIST")
else:
os.makedirs('/opt/ml/local_checkpoints')
print("-INFO- PATH DO NOT EXIST")
# Waiting the save checkpoint to be finished before run another allgather_object
smp.barrier()
if smp.dp_rank() == 0:
model_dict = model.local_state_dict()
opt_dict = optimizer.local_state_dict()
smp.save(
{"model_state_dict": model_dict, "optimizer_state_dict": opt_dict},
f"/opt/ml/local_checkpoints/pt_mnist_checkpoint.pt",
partial=True,
)
smp.barrier()
if smp.local_rank() == 0:
print("Start syncing")
base_s3_path = os.path.dirname(os.path.dirname(os.getenv('SM_MODULE_DIR', '')))
curr_host = os.getenv('SM_CURRENT_HOST')
full_s3_path = f'{base_s3_path}/checkpoints/{curr_host}/'
sync_local_checkpoints_to_s3(local_path='/opt/ml/local_checkpoints', s3_path=full_s3_path)
print("Finished syncing")
if __name__ == "__main__":
main()
```
## SageMaker 훈련 작업 정의
다음으로 SageMaker Estimator API를 사용하여 SageMaker 훈련 작업을 정의합니다. [`Estimator`](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html)를 사용하여 Amazon SageMaker가 훈련에 사용하는 EC2 인스턴스의 수와 유형과 해당 인스턴스에 연결된 볼륨의 크기를 정의합니다.
다음을 업데이트할 수 있습니다.
* `processes_per_host`
* `entry_point`
* `instance_count`
* `instance_type`
* `base_job_name`
또한 SageMaker Distributed Model Parallel 라이브러리에 대한 설정 파라메터를 제공하고 수정할 수 있습니다. 이러한 파라메터는 아래와 같이 `distributions` 인수를 통해 전달됩니다.
### 사용할 EC2 인스턴스의 유형 및 개수 업데이트
`processes_per_host`를 지정하세요. 기본적으로 파티션의 2배수여야 합니다. (예: 2, 4, ...)
`instance_type` 및 `instance_count`에서 각각 지정하는 인스턴스 유형 및 인스턴스 수에 따라 Amazon SageMaker가 훈련 중에 사용하는 GPU 수가 결정됩니다. 명시 적으로`instance_type`은 단일 인스턴스의 GPU 수를 결정하고 그 숫자에 `instance_count`를 곱합니다.
훈련에 사용할 수 있는 총 GPU 수가 훈련 스크립트의 `smp.init`의 `config`에 있는 `partitions`와 같도록 `instance_type`및 `instance_count`의 값을 지정해야 합니다.
인스턴스 유형을 확인하려면 [Amazon EC2 인스턴스 유형](https://aws.amazon.com/sagemaker/pricing/)을 참조하세요.
### 훈련 중 체크 포인트 업로드 또는 이전 훈련에서 체크 포인트 재개
또한 사용자가 훈련 중에 체크 포인트를 업로드하거나 이전 훈련에서 체크 포인트를 재개할 수있는 맞춤형 방법을 제공합니다. 자세한 방법은 `aws_s3_sync`, `sync_local_checkpoints_to_s3` 및` sync_s3_checkpoints_to_local` 함수를 참조하세요.
`pt_mnist.py` 예제 스크립트에서 이를 확인할 수 있으며, 이 예제에서는 `sync_local_checkpoints_to_s3`을 사용하여 훈련 중에 체크 포인트만 업로드합니다.
After you have updated `entry_point`, `instance_count`, `instance_type` and `base_job_name`, run the following to create an estimator.
```
sagemaker_session = sagemaker.session.Session(boto_session=session)
mpioptions = "-verbose -x orte_base_help_aggregate=0 "
mpioptions += "--mca btl_vader_single_copy_mechanism none "
all_experiment_names = [exp.experiment_name for exp in Experiment.list()]
#choose an experiment name (only need to create it once)
experiment_name = "SM-MP-DEMO"
# Load the experiment if it exists, otherwise create
if experiment_name not in all_experiment_names:
customer_churn_experiment = Experiment.create(
experiment_name=experiment_name, sagemaker_boto_client=boto3.client("sagemaker")
)
else:
customer_churn_experiment = Experiment.load(
experiment_name=experiment_name, sagemaker_boto_client=boto3.client("sagemaker")
)
# Create a trial for the current run
trial = Trial.create(
trial_name="SMD-MP-demo-{}".format(strftime("%Y-%m-%d-%H-%M-%S", gmtime())),
experiment_name=customer_churn_experiment.experiment_name,
sagemaker_boto_client=boto3.client("sagemaker"),
)
smd_mp_estimator = PyTorch(
entry_point="pt_mnist.py", # Pick your train script
source_dir='utils',
role=role,
instance_type='ml.p3.16xlarge',
sagemaker_session=sagemaker_session,
framework_version='1.6.0',
py_version='py36',
instance_count=1,
distribution={
"smdistributed": {
"modelparallel": {
"enabled":True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 2,
"ddp": True,
}
}
},
"mpi": {
"enabled": True,
"processes_per_host": 2, # Pick your processes_per_host
"custom_mpi_options": mpioptions
},
},
base_job_name="SMD-MP-demo",
)
```
마지막으로 estimator를 사용하여 SageMaker 훈련 작업을 시작합니다.
```
%%time
smd_mp_estimator.fit(
experiment_config={
"ExperimentName": customer_churn_experiment.experiment_name,
"TrialName": trial.trial_name,
"TrialComponentDisplayName": "Training",
})
```
| github_jupyter |
# The Fuzzing Book
## Sitemap
While the chapters of this book can be read one after the other, there are many possible paths through the book. In this graph, an arrow _A_ → _B_ means that chapter _A_ is a prerequisite for chapter _B_. You can pick arbitrary paths in this graph to get to the topics that interest you most:
```
# ignore
from IPython.display import SVG
# ignore
SVG(filename='PICS/Sitemap.svg')
```
## [Table of Contents](index.ipynb)
### <a href="01_Intro.ipynb" title="Part I: Whetting Your Appetite (01_Intro) In this part, we introduce the topics of the book.">Part I: Whetting Your Appetite</a>
* <a href="Tours.ipynb" title="Tours through the Book (Tours) This book is massive. With 17,000 lines of code and 125,000 words of text, a printed version would cover more than 1,000 pages of text. Obviously, we do not assume that everybody wants to read everything.">Tours through the Book</a>
* <a href="Intro_Testing.ipynb" title="Introduction to Software Testing (Intro_Testing) Before we get to the central parts of the book, let us introduce essential concepts of software testing. Why is it necessary to test software at all? How does one test software? How can one tell whether a test has been successful? How does one know if one has tested enough? In this chapter, let us recall the most important concepts, and at the same time get acquainted with Python and interactive notebooks.">Introduction to Software Testing</a>
### <a href="02_Lexical_Fuzzing.ipynb" title="Part II: Lexical Fuzzing (02_Lexical_Fuzzing) This part introduces test generation at the lexical level, that is, composing sequences of characters.">Part II: Lexical Fuzzing</a>
* <a href="Fuzzer.ipynb" title="Fuzzing: Breaking Things with Random Inputs (Fuzzer) In this chapter, we'll start with one of the simplest test generation techniques. The key idea of random text generation, also known as fuzzing, is to feed a string of random characters into a program in the hope to uncover failures.">Fuzzing: Breaking Things with Random Inputs</a>
* <a href="Coverage.ipynb" title="Code Coverage (Coverage) In the previous chapter, we introduced basic fuzzing – that is, generating random inputs to test programs. How do we measure the effectiveness of these tests? One way would be to check the number (and seriousness) of bugs found; but if bugs are scarce, we need a proxy for the likelihood of a test to uncover a bug. In this chapter, we introduce the concept of code coverage, measuring which parts of a program are actually executed during a test run. Measuring such coverage is also crucial for test generators that attempt to cover as much code as possible.">Code Coverage</a>
* <a href="MutationFuzzer.ipynb" title="Mutation-Based Fuzzing (MutationFuzzer) Most randomly generated inputs are syntactically invalid and thus are quickly rejected by the processing program. To exercise functionality beyond input processing, we must increase chances to obtain valid inputs. One such way is so-called mutational fuzzing – that is, introducing small changes to existing inputs that may still keep the input valid, yet exercise new behavior. We show how to create such mutations, and how to guide them towards yet uncovered code, applying central concepts from the popular AFL fuzzer.">Mutation-Based Fuzzing</a>
* <a href="GreyboxFuzzer.ipynb" title="Greybox Fuzzing (GreyboxFuzzer) In the previous chapter, we have introduced mutation-based fuzzing, a technique that generates fuzz inputs by applying small mutations to given inputs. In this chapter, we show how to guide these mutations towards specific goals such as coverage. The algorithms in this book stem from the popular American Fuzzy Lop (AFL) fuzzer, in particular from its AFLFast and AFLGo flavors. We will explore the greybox fuzzing algorithm behind AFL and how we can exploit it to solve various problems for automated vulnerability detection.">Greybox Fuzzing</a>
* <a href="SearchBasedFuzzer.ipynb" title="Search-Based Fuzzing (SearchBasedFuzzer) Sometimes we are not only interested in fuzzing as many as possible diverse program inputs, but in deriving specific test inputs that achieve some objective, such as reaching specific statements in a program. When we have an idea of what we are looking for, then we can search for it. Search algorithms are at the core of computer science, but applying classic search algorithms like breadth or depth first search to search for tests is unrealistic, because these algorithms potentially require us to look at all possible inputs. However, domain-knowledge can be used to overcome this problem. For example, if we can estimate which of several program inputs is closer to the one we are looking for, then this information can guide us to reach the target quicker – this information is known as a heuristic. The way heuristics are applied systematically is captured in meta-heuristic search algorithms. The "meta" denotes that these algorithms are generic and can be instantiated differently to different problems. Meta-heuristics often take inspiration from processes observed in nature. For example, there are algorithms mimicking evolutionary processes, swarm intelligence, or chemical reactions. In general they are much more efficient than exhaustive search approaches such that they can be applied to vast search spaces – search spaces as vast as the domain of program inputs are no problem for them.">Search-Based Fuzzing</a>
* <a href="MutationAnalysis.ipynb" title="Mutation Analysis (MutationAnalysis) In the chapter on coverage, we showed how one can identify which parts of the program are executed by a program, and hence get a sense of the effectiveness of a set of test cases in covering the program structure. However, coverage alone may not be the best measure for the effectiveness of a test, as one can have great coverage without ever checking a result for correctness. In this chapter, we introduce another means for assessing the effectiveness of a test suite: After injecting mutations – artificial faults – into the code, we check whether a test suite can detect these artificial faults. The idea is that if it fails to detect such mutations, it will also miss real bugs.">Mutation Analysis</a>
### <a href="03_Syntactical_Fuzzing.ipynb" title="Part III: Syntactical Fuzzing (03_Syntactical_Fuzzing) This part introduces test generation at the syntactical level, that is, composing inputs from language structures.">Part III: Syntactical Fuzzing</a>
* <a href="Grammars.ipynb" title="Fuzzing with Grammars (Grammars) In the chapter on "Mutation-Based Fuzzing", we have seen how to use extra hints – such as sample input files – to speed up test generation. In this chapter, we take this idea one step further, by providing a specification of the legal inputs to a program. Specifying inputs via a grammar allows for very systematic and efficient test generation, in particular for complex input formats. Grammars also serve as the base for configuration fuzzing, API fuzzing, GUI fuzzing, and many more.">Fuzzing with Grammars</a>
* <a href="GrammarFuzzer.ipynb" title="Efficient Grammar Fuzzing (GrammarFuzzer) In the chapter on grammars, we have seen how to use grammars for very effective and efficient testing. In this chapter, we refine the previous string-based algorithm into a tree-based algorithm, which is much faster and allows for much more control over the production of fuzz inputs.">Efficient Grammar Fuzzing</a>
* <a href="GrammarCoverageFuzzer.ipynb" title="Grammar Coverage (GrammarCoverageFuzzer) Producing inputs from grammars gives all possible expansions of a rule the same likelihood. For producing a comprehensive test suite, however, it makes more sense to maximize variety – for instance, by not repeating the same expansions over and over again. In this chapter, we explore how to systematically cover elements of a grammar such that we maximize variety and do not miss out individual elements.">Grammar Coverage</a>
* <a href="Parser.ipynb" title="Parsing Inputs (Parser) In the chapter on Grammars, we discussed how grammars can be used to represent various languages. We also saw how grammars can be used to generate strings of the corresponding language. Grammars can also perform the reverse. That is, given a string, one can decompose the string into its constituent parts that correspond to the parts of grammar used to generate it – the derivation tree of that string. These parts (and parts from other similar strings) can later be recombined using the same grammar to produce new strings.">Parsing Inputs</a>
* <a href="ProbabilisticGrammarFuzzer.ipynb" title="Probabilistic Grammar Fuzzing (ProbabilisticGrammarFuzzer) Let us give grammars even more power by assigning probabilities to individual expansions. This allows us to control how many of each element should be produced, and thus allows us to target our generated tests towards specific functionality. We also show how to learn such probabilities from given sample inputs, and specifically direct our tests towards input features that are uncommon in these samples.">Probabilistic Grammar Fuzzing</a>
* <a href="GeneratorGrammarFuzzer.ipynb" title="Fuzzing with Generators (GeneratorGrammarFuzzer) In this chapter, we show how to extend grammars with functions – pieces of code that get executed during grammar expansion, and that can generate, check, or change elements produced. Adding functions to a grammar allows for very versatile test generation, bringing together the best of grammar generation and programming.">Fuzzing with Generators</a>
* <a href="GreyboxGrammarFuzzer.ipynb" title="Greybox Fuzzing with Grammars (GreyboxGrammarFuzzer) <!-- Previously, we have learned about mutational fuzzing, which generates new inputs by mutating seed inputs. Most mutational fuzzers represent inputs as a sequence of bytes and apply byte-level mutations to this byte sequence. Such byte-level mutations work great for compact file formats with a small number of structural constraints. However, most file formats impose a high-level structure on these byte sequences.">Greybox Fuzzing with Grammars</a>
* <a href="Reducer.ipynb" title="Reducing Failure-Inducing Inputs (Reducer) By construction, fuzzers create inputs that may be hard to read. This causes issues during debugging, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that automatically reduce and simplify failure-inducing inputs to a minimum in order to ease debugging.">Reducing Failure-Inducing Inputs</a>
### <a href="04_Semantical_Fuzzing.ipynb" title="Part IV: Semantical Fuzzing (04_Semantical_Fuzzing) This part introduces test generation techniques that take the semantics of the input into account, notably the behavior of the program that processes the input.">Part IV: Semantical Fuzzing</a>
* <a href="GrammarMiner.ipynb" title="Mining Input Grammars (GrammarMiner) So far, the grammars we have seen have been mostly specified manually – that is, you (or the person knowing the input format) had to design and write a grammar in the first place. While the grammars we have seen so far have been rather simple, creating a grammar for complex inputs can involve quite some effort. In this chapter, we therefore introduce techniques that automatically mine grammars from programs – by executing the programs and observing how they process which parts of the input. In conjunction with a grammar fuzzer, this allows us to 1. take a program, 2. extract its input grammar, and 3. fuzz it with high efficiency and effectiveness, using the concepts in this book.">Mining Input Grammars</a>
* <a href="InformationFlow.ipynb" title="Tracking Information Flow (InformationFlow) We have explored how one could generate better inputs that can penetrate deeper into the program in question. While doing so, we have relied on program crashes to tell us that we have succeeded in finding problems in the program. However, that is rather simplistic. What if the behavior of the program is simply incorrect, but does not lead to a crash? Can one do better?">Tracking Information Flow</a>
* <a href="ConcolicFuzzer.ipynb" title="Concolic Fuzzing (ConcolicFuzzer) We have previously seen how one can use dynamic taints to produce more intelligent test cases than simply looking for program crashes. We have also seen how one can use the taints to update the grammar, and hence focus more on the dangerous methods.">Concolic Fuzzing</a>
* <a href="SymbolicFuzzer.ipynb" title="Symbolic Fuzzing (SymbolicFuzzer) One of the problems with traditional methods of fuzzing is that they fail to exercise all the possible behaviors that a system can have, especially when the input space is large. Quite often the execution of a specific branch of execution may happen only with very specific inputs, which could represent an extremely small fraction of the input space. The traditional fuzzing methods relies on chance to produce inputs they need. However, relying on randomness to generate values that we want is a bad idea when the space to be explored is huge. For example, a function that accepts a string, even if one only considers the first $10$ characters, already has $2^{80}$ possible inputs. If one is looking for a specific string, random generation of values will take a few thousand years even in one of the super computers.">Symbolic Fuzzing</a>
* <a href="DynamicInvariants.ipynb" title="Mining Function Specifications (DynamicInvariants) When testing a program, one not only needs to cover its several behaviors; one also needs to check whether the result is as expected. In this chapter, we introduce a technique that allows us to mine function specifications from a set of given executions, resulting in abstract and formal descriptions of what the function expects and what it delivers.">Mining Function Specifications</a>
### <a href="05_Domain-Specific_Fuzzing.ipynb" title="Part V: Domain-Specific Fuzzing (05_Domain-Specific_Fuzzing) This part discusses test generation for a number of specific domains. For all these domains, we introduce fuzzers that generate inputs as well as miners that analyze the input structure.">Part V: Domain-Specific Fuzzing</a>
* <a href="ConfigurationFuzzer.ipynb" title="Testing Configurations (ConfigurationFuzzer) The behavior of a program is not only governed by its data. The configuration of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically test and cover software configurations. By automatically inferring configuration options, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover combinations of configuration options, quickly detecting unwanted interferences.">Testing Configurations</a>
* <a href="APIFuzzer.ipynb" title="Fuzzing APIs (APIFuzzer) So far, we have always generated system input, i.e. data that the program as a whole obtains via its input channels. However, we can also generate inputs that go directly into individual functions, gaining flexibility and speed in the process. In this chapter, we explore the use of grammars to synthesize code for function calls, which allows you to generate program code that very efficiently invokes functions directly.">Fuzzing APIs</a>
* <a href="Carver.ipynb" title="Carving Unit Tests (Carver) So far, we have always generated system input, i.e. data that the program as a whole obtains via its input channels. If we are interested in testing only a small set of functions, having to go through the system can be very inefficient. This chapter introduces a technique known as carving, which, given a system test, automatically extracts a set of unit tests that replicate the calls seen during the unit test. The key idea is to record such calls such that we can replay them later – as a whole or selectively. On top, we also explore how to synthesize API grammars from carved unit tests; this means that we can synthesize API tests without having to write a grammar at all.">Carving Unit Tests</a>
* <a href="WebFuzzer.ipynb" title="Testing Web Applications (WebFuzzer) In this chapter, we explore how to generate tests for Graphical User Interfaces (GUIs), notably on Web interfaces. We set up a (vulnerable) Web server and demonstrate how to systematically explore its behavior – first with hand-written grammars, then with grammars automatically inferred from the user interface. We also show how to conduct systematic attacks on these servers, notably with code and SQL injection.">Testing Web Applications</a>
* <a href="GUIFuzzer.ipynb" title="Testing Graphical User Interfaces (GUIFuzzer) In this chapter, we explore how to generate tests for Graphical User Interfaces (GUIs), abstracting from our previous examples on Web testing. Building on general means to extract user interface elements and to activate them, our techniques generalize to arbitrary graphical user interfaces, from rich Web applications to mobile apps, and systematically explore user interfaces through forms and navigation elements.">Testing Graphical User Interfaces</a>
### <a href="06_Managing_Fuzzing.ipynb" title="Part VI: Managing Fuzzing (06_Managing_Fuzzing) This part discusses how to manage fuzzing in the large.">Part VI: Managing Fuzzing</a>
* <a href="FuzzingInTheLarge.ipynb" title="Fuzzing in the Large (FuzzingInTheLarge) In the past chapters, we have always looked at fuzzing taking place on one machine for a few seconds only. In the real world, however, fuzzers are run on dozens or even thousands of machines; for hours, days and weeks; for one program or dozens of programs. In such contexts, one needs an infrastructure to collect failure data from the individual fuzzer runs, and to aggregate such data in a central repository. In this chapter, we will examine such an infrastructure, the FuzzManager framework from Mozilla.">Fuzzing in the Large</a>
* <a href="WhenToStopFuzzing.ipynb" title="When To Stop Fuzzing (WhenToStopFuzzing) In the past chapters, we have discussed several fuzzing techniques. Knowing what to do is important, but it is also important to know when to stop doing things. In this chapter, we will learn when to stop fuzzing – and use a prominent example for this purpose: The Enigma machine that was used in the second world war by the navy of Nazi Germany to encrypt communications, and how Alan Turing and I.J. Good used fuzzing techniques to crack ciphers for the Naval Enigma machine.">When To Stop Fuzzing</a>
### <a href="99_Appendices.ipynb" title="Appendices (99_Appendices) This part holds notebooks and modules that support other notebooks.">Appendices</a>
* <a href="PrototypingWithPython.ipynb" title="Prototyping with Python (PrototypingWithPython) This is the manuscript of Andreas Zeller's keynote "Coding Effective Testing Tools Within Minutes" at the TAIC PART 2020 conference.">Prototyping with Python</a>
* <a href="ExpectError.ipynb" title="Error Handling (ExpectError) The code in this notebook helps with handling errors. Normally, an error in notebook code causes the execution of the code to stop; while an infinite loop in notebook code causes the notebook to run without end. This notebook provides two classes to help address these concerns.">Error Handling</a>
* <a href="Timer.ipynb" title="Timer (Timer) The code in this notebook helps with measuring time.">Timer</a>
* <a href="ControlFlow.ipynb" title="Control Flow Graph (ControlFlow) The code in this notebook helps with obtaining the control flow graph of python functions.">Control Flow Graph</a>
* <a href="RailroadDiagrams.ipynb" title="Railroad Diagrams (RailroadDiagrams) The code in this notebook helps with drawing syntax-diagrams. It is a (slightly customized) copy of the excellent library from Tab Atkins jr., which unfortunately is not available as a Python package.">Railroad Diagrams</a>
| github_jupyter |
# Random Forest Classifier (RFC)
```
#Importing necessary libraries
import numpy as np
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
#Taking the EDA data
with open('../EDA/EDA.pickle', 'rb') as data:
df = pickle.load(data)
#Initial values of dataset
df.head()
#Shape of dataset
df.shape
#Lable mapping
label_mapping = {'NEGATIVE': 0, 'NEUTRAL': 1, 'POSITIVE': 2}
#Function for creating train test split
def preprocess_inputs(df):
df = df.copy()
df['label'] = df['label'].replace(label_mapping)
y = df['label'].copy()
X = df.drop('label', axis=1).copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, random_state=123)
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = preprocess_inputs(df)
print('Shape of Training Dataset:',X_train.shape)
print('Shape of Testing Dataset:',X_test.shape)
#Creating Pipeline for Random Forrest Classifer Classifier Algorithm
pipeline_rfc=make_pipeline(RandomForestClassifier())
%%time
#Fitting the model
best_model=pipeline_rfc.fit(X_train, y_train)
#Prediction
rfc_pred = best_model.predict(X_test)
# Training accuracy
print("The training set accuracy is: {} ".format(accuracy_score(y_train, best_model.predict(X_train))))
# Test accuracy
print("The test set accuracy is: {} %".format(accuracy_score(y_test, best_model.predict(X_test))))
# Classification report
print("Classification report")
print(classification_report(y_test,rfc_pred))
#Plotting the confusion matrix
conf_matrix = confusion_matrix(y_test, rfc_pred)
plt.figure(figsize=(12.8,6))
sns.heatmap(conf_matrix,
annot=True,
cmap="Blues")
plt.ylabel('Predicted')
plt.xlabel('Actual')
plt.title('Confusion matrix')
plt.savefig("../Images/ConfusionMatrix_RFC.png")
#Creating dictionary for storing the accuracy details
d = {
'Model': 'Random Forest Classifier',
'Training Set Accuracy': accuracy_score(y_train, best_model.predict(X_train)),
'Test Set Accuracy': accuracy_score(y_test, best_model.predict(X_test))
}
#Creating Data Frame
df_models_rfc = pd.DataFrame(d, index=[0])
df_models_rfc
#Creating pickle files for further use
with open('../Models/best_rfc.pickle', 'wb') as output:
pickle.dump(best_model, output)
with open('../Models/df_models_rfc.pickle', 'wb') as output:
pickle.dump(df_models_rfc, output)
```
| github_jupyter |
# Introduction to climlab and 1D grey radiation models
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc
import climlab
```
# Validate climlab against analytical solution for 2-layer atmosphere
```
# Test in a 2-layer atmosphere
col = climlab.GreyRadiationModel(num_lev=2)
print(col)
col.subprocess
col.state
col.Ts
col.Ts[:] = 288.
col.Tatm[:] = np.array([275., 230.])
col.state
LW = col.subprocess['LW']
print (LW)
LW.absorptivity
LW.absorptivity = 0.58377
LW.absorptivity
col.diagnostics
col.compute_diagnostics()
col.diagnostics
col.diagnostics['OLR']
col.state
col.step_forward()
col.state
# integrate out to radiative equilibrium
col.integrate_years(2.)
col.diagnostics['ASR'] - col.diagnostics['OLR']
# Compare these temperatures against our analytical solutions for radiative equilibrium
col.state
```
# Get observed annual, global mean temperature profile
```
ncep_url = "http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis.derived/"
ncep_air = nc.Dataset( ncep_url + "pressure/air.mon.1981-2010.ltm.nc" )
level = ncep_air.variables['level'][:]
lat = ncep_air.variables['lat'][:]
zstar = np.log(level/1000)
Tzon = np.mean(ncep_air.variables['air'][:],axis=(0,3))
Tglobal = np.average( Tzon , weights=np.cos(np.deg2rad(lat)), axis=1) + climlab.constants.tempCtoK
fig = plt.figure( figsize=(8,6) )
ax = fig.add_subplot(111)
ax.plot( Tglobal , level )
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
ax.set_title('Global, annual mean sounding from NCEP Reanalysis', fontsize = 24)
ax.grid()
```
# Create 30-layer model with observed temperatures
```
# initialize a grey radiation model with 30 levels
col = climlab.GreyRadiationModel()
print (col)
# interpolate to 30 evenly spaced pressure levels
lev = col.lev
Tinterp = np.flipud(np.interp(np.flipud(lev), np.flipud(level), np.flipud(Tglobal)))
Tinterp
# Initialize model with observed temperatures
col.Ts[:] = Tglobal[0]
col.Tatm[:] = Tinterp
def plot_sounding(collist):
color_cycle=['r', 'g', 'b', 'y']
# col is either a column model object or a list of column model objects
if isinstance(collist, climlab.Process):
# make a list with a single item
collist = [collist]
fig = plt.figure()
ax = fig.add_subplot(111)
for i, col in enumerate(collist):
ax.plot(col.Tatm, col.lev, color=color_cycle[i])
ax.plot(col.Ts, climlab.constants.ps, 'o', markersize=12, color=color_cycle[i])
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)')
ax.set_ylabel('Pressure (hPa)')
ax.grid()
return ax
plot_sounding(col)
```
# Tune absorptivity to get observed OLR
```
col.compute_diagnostics()
col.diagnostics['OLR']
# Need to tune absorptivity to get OLR = 239
epsarray = np.linspace(0.01, 0.1, 100)
OLRarray = np.zeros_like(epsarray)
for i in range(epsarray.size):
col.subprocess['LW'].absorptivity = epsarray[i]
col.compute_diagnostics()
OLRarray[i] = col.diagnostics['OLR']
plt.plot(epsarray, OLRarray)
plt.grid()
def OLRanom(eps):
col.subprocess['LW'].absorptivity = eps
col.compute_diagnostics()
return col.diagnostics['OLR'] - 239.
OLRanom(0.02)
# Use numerical root-finding to get the equilibria
from scipy.optimize import brentq
# brentq is a root-finding function
# Need to give it a function and two end-points
# It will look for a zero of the function between those end-points
eps = brentq(OLRanom, 0.01, 0.1)
print (eps)
col.subprocess['LW'].absorptivity = eps
col.subprocess['LW'].absorptivity
col.compute_diagnostics()
col.diagnostics['OLR']
```
# Compute radiative forcing for a 2% increase in absorptivity
```
col2 = climlab.process_like(col)
print (col2)
col2.subprocess['LW'].absorptivity *= 1.02
col2.subprocess['LW'].absorptivity
col2.compute_diagnostics()
col2.diagnostics['OLR']
col2.Ts - col.Ts
col2.diagnostics['OLR'] - col.diagnostics['OLR']
RF = -(col2.diagnostics['OLR'] - col.diagnostics['OLR'])
print ('The radiative forcing is %f W/m2.' %RF)
```
# Radiative equilibrium in the 30-layer model
```
re = climlab.process_like(col)
re.integrate_years(2.)
# Check for energy balance
re.diagnostics['ASR'] - re.diagnostics['OLR']
plot_sounding([col, re])
```
# Radiative-Convective equilibrium in the 30-layer model
```
rce = climlab.RadiativeConvectiveModel(adj_lapse_rate=6.)
print (rce)
rce.subprocess['LW'].absorptivity = eps
rce.integrate_years(2.)
# Check for energy balance
rce.diagnostics['ASR'] - rce.diagnostics['OLR']
plot_sounding([col, rce])
```
# Greenhouse warming in RCE model
```
# ANother 1% increase in absorptivity
rce2 = climlab.process_like(rce)
rce2.subprocess['LW'].absorptivity *= 1.02
rce2.compute_diagnostics()
RF = -(rce2.diagnostics['OLR'] - rce.diagnostics['OLR'])
print ('The radiative forcing is %f W/m2.' %RF)
# Timestep forward, and the check for energy balance
rce2.integrate_years(2.)
rce2.diagnostics['ASR'] - rce2.diagnostics['OLR']
plot_sounding([col, rce, rce2])
ECS = rce2.Ts - rce.Ts
print ('Equilibrium climate sensitivity is %f K.' %ECS)
# Calculate the net climate feedback
# This is the change in TOA flux per degree warming that was necessary to get back to equilibrium.
feedback = -RF/ECS
print ('The net feedback is %f W/m2/K' %feedback )
# could calculate a Planck feedback explicitly...
# What would the TOA flux change be if the warming were perfectly uniform?
rce3 = climlab.process_like(rce)
rce3.subprocess['LW'].absorptivity *= 1.02
rce3.Ts += ECS
rce3.Tatm += ECS
```
| github_jupyter |
```
import math
import time
import util
import torch
import logging
import numpy as np
from torch import nn
import torch.optim as optim
from util import DataLoaderS
from model import *
from model_time_shift import A2GCN
logging.basicConfig(level=logging.INFO,#控制台打印的日志级别
filename='logging_ablation.txt',
filemode='a',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志
#a是追加模式,默认如果不写的话,就是追加模式
format=
'%(asctime)s : %(message)s',
)
logging.info('\n\n\n*********************************start*************************\n\n\n')
device = torch.device('cuda')
num_nodes = 8
sparse = 1
model = A2GCN(num_nodes, in_T = 12, in_dim = 1,out_T=1,out_dim=1,
predefined_G=None, \
channel = 32, sparse = int(num_nodes*sparse),gnn_layers=2,dropout=0.3,)
base_params = list(map(id,model.latent_graph.parameters()))
other = filter(lambda x:id(x) not in base_params, model.parameters())
optimizer = optim.Adam([
{'params':other, 'lr':1e-3, 'weight_decay':1e-4},
{'params':model.latent_graph.parameters(),'lr':1e-2},
])
base_params
#正在跑去掉weight decay的实验
##solar 137
##traffic 862
##electricity 321
###exchange-rate 8
Data = DataLoaderS('./multivariate-time-series-data/exchange_rate/exchange_rate.txt', 0.6, 0.2, device, horizon=6,window=24*7,normalize = 2 )
num_nodes = 8
sparse = 1
model = A2GCN(num_nodes, in_T = 12, in_dim = 1,out_T=1,out_dim=1,
,predefined_G=supports, \
channel = 32, sparse = int(num_nodes*sparse),gnn_layers=layers,dropout=dropout,device=device,)
t_shift_net(device, num_nodes=num_nodes, T=24*7,delta_T=24*7,dropout=0.3, supports=None, \
in_dim=1, out_dim=1, residual_channels=16, \
skip_channels=256, end_channels=512,layers = 1,sparse = sparse,)
model = model.to(device)
optimizer = optim.Adam([
{'params':filter(lambda x:id(x) not in [id(model.new_supports),], model.parameters()), 'lr':1e-3, 'weight_decay':1e-4},
# {'params':model.nodevec1,'lr':1e-2},
# {'params':model.nodevec2,'lr':1e-2},
{'params':model.new_supports,'lr':1e-2},
])
evaluateL2 = nn.MSELoss(size_average=False).to(device)
evaluateL1 = nn.L1Loss(size_average=False).to(device)
logging.info('\n\n\n*********************************start*************************\n\n\n')
def evaluate(data, X, Y, model, evaluateL2, evaluateL1, batch_size):
model.eval()
total_loss = 0
total_loss_l1 = 0
n_samples = 0
predict = None
test = None
for X, Y in data.get_batches(X, Y, batch_size, False):
X = X.unsqueeze(dim = 1).permute(0,1,3,2)
with torch.no_grad():
output = model(X)
output = torch.squeeze(output)
if len(output.shape)==1:
output = output.unsqueeze(dim=0)
if predict is None:
predict = output
test = Y
else:
predict = torch.cat((predict, output))
test = torch.cat((test, Y))
scale = data.scale.expand(output.size(0), data.m)
total_loss += evaluateL2(output * scale, Y * scale).item()
total_loss_l1 += evaluateL1(output * scale, Y * scale).item()
n_samples += (output.size(0) * data.m)
rse = math.sqrt(total_loss / n_samples) / data.rse
rae = (total_loss_l1 / n_samples) / data.rae
predict = predict.data.cpu().numpy()
Ytest = test.data.cpu().numpy()
sigma_p = (predict).std(axis=0)
sigma_g = (Ytest).std(axis=0)
mean_p = predict.mean(axis=0)
mean_g = Ytest.mean(axis=0)
index = (sigma_g != 0)
correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis=0) / (sigma_p * sigma_g)
correlation = (correlation[index]).mean()
return rse, rae, correlation
for ep in range(60):
print('*******{}*******'.format(ep))
logging.info('*******{}*******'.format(ep))
losses = []
model.train()
start = time.time()
for x,y in Data.get_batches(Data.train[0], Data.train[1],4,True):
optimizer.zero_grad()
x = x.unsqueeze(dim = 1).permute(0,1,3,2)
out = model(x)
out = out.squeeze()
scale = Data.scale.unsqueeze(dim = 0)
loss = evaluateL1(out*scale,y*scale)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
losses.append(loss.item())
if len(losses)%50 == 0:
print(np.mean(losses))
now = time.time()
print('train epoch time: {:.2f} \s'.format(now- start))
logging.info('train epoch time: {:.2f} \s'.format(now- start))
r1 = evaluate(Data,Data.valid[0], Data.valid[1], model, evaluateL2, evaluateL1,64)
r2 = evaluate(Data,Data.test[0], Data.test[1], model, evaluateL2, evaluateL1,64)
print('inference time: {:.2f} \s'.format(0.5*(time.time()- now)))
logging.info(' '.join([str(i) for i in r1+r2]))
print(r1,r2)
```
| github_jupyter |
*Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
```
- Runs on CPU or GPU (if available)
# Model Zoo -- Softmax Regression
Implementation of softmax regression (multinomial logistic regression).
## Imports
```
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch
```
## Settings and Dataset
```
##########################
### SETTINGS
##########################
# Device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Hyperparameters
random_seed = 123
learning_rate = 0.1
num_epochs = 10
batch_size = 256
# Architecture
num_features = 784
num_classes = 10
##########################
### MNIST DATASET
##########################
train_dataset = datasets.MNIST(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
##########################
### MODEL
##########################
class SoftmaxRegression(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(SoftmaxRegression, self).__init__()
self.linear = torch.nn.Linear(num_features, num_classes)
self.linear.weight.detach().zero_()
self.linear.bias.detach().zero_()
def forward(self, x):
logits = self.linear(x)
probas = F.softmax(logits, dim=1)
return logits, probas
model = SoftmaxRegression(num_features=num_features,
num_classes=num_classes)
model.to(device)
##########################
### COST AND OPTIMIZER
##########################
# note that the PyTorch implementation of
# CrossEntropyLoss works with logits, not
# probabilities
cost_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Manual seed for deterministic data loader
torch.manual_seed(random_seed)
def compute_accuracy(model, data_loader):
correct_pred, num_examples = 0, 0
for features, targets in data_loader:
features = features.view(-1, 28*28).to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float() / num_examples * 100
for epoch in range(num_epochs):
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.view(-1, 28*28).to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = cost_fn(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_dataset)//batch_size, cost))
print('Epoch: %03d/%03d training accuracy: %.2f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader)))
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
```
| github_jupyter |
# Title Generation using Recurrent Neural Networks
I never know what I should title most things I have written. I hope that by using a corpus of titles, recurrent neural networks (RNNs) can write my titles for me.
I thought a fitting title to generate would be something within Machine Learning, so I used [Publish or Perish](https://harzing.com/resources/publish-or-perish) to fetch any title from Google Scholar associated with *Machine Learning*. It retrieved 950 titles, which you can view [here](https://gist.github.com/AngusTheMack/defadcbc503e2d625720661e9893ff0a).
If you want to use this to generate your own titles (or any text whatsoever), just change the `url` to download the data from, or the `save_location` to where your data is stored.
## Titles Generated
During the time playing around with the implementations below I was able to gain some very cool sounding titles:
* Function Classification Using Machine Learning Techniques
* Bayesian Approximation of Effective Machine Learning
* Data Classification With Machine Learning
* Computer Multi-agent Boltzmann Machine Learning
* Machine Learning Approaches for Visual Classification
* New Machine Learning for Astrophysics
* Neural Machine Learning for Medical Imaging
* Deep Similarity Learning Filters
## Implementations
I wanted to compare results between somewhat vanilla RNN implementations and a Long Short Term Memory (LSTM) model. To that end I used a character level RNN, word level RNN and a LSTM. This was done mainly to try and better understand the underlying concepts in RNNs, and what differentiates them from LSTMs.
I used [Andrej Karpathy's blog](https://karpathy.github.io/) post [The Unreasonable Effectiveness of Recurrent Neural Networks ](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) as my starting point - and utilised his amazing [112 line char level RNN](https://gist.github.com/karpathy/d4dee566867f8291f086) implemented in vanilla python.
After that I used [Denny Britz's](https://github.com/dennybritz) [word level RNN](https://github.com/dennybritz/rnn-tutorial-rnnlm/blob/master/RNNLM.ipynb) from his series of [blogspost](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-2-implementing-a-language-model-rnn-with-python-numpy-and-theano/) on the topic.
Finally, I used [Shivam Bansal's](https://www.kaggle.com/shivamb) [Beginners Guide to Text Generation using LSTMs](https://www.kaggle.com/shivamb/beginners-guide-to-text-generation-using-lstms/notebook) for the LSTM implementation.
```
import numpy as np
import matplotlib.pyplot as plt
import string
import urllib.request
import pickle
%matplotlib inline
def download_data(url, save_location):
"""
Download data to be used as corpus
"""
print('Beginning file download...')
urllib.request.urlretrieve(url,save_location)
print("Downloaded file, saving to:",save_location)
def load_data(save_location):
"""
Load data from Textfile
"""
file = open(save_location,"r")
data = file.read()
return data
def avg_char_per_title(data):
"""
Calculate the average number of chars in a title for the sequence length
"""
lines = data.split("\n")
line_lengths = np.zeros(len(lines))
for i,line in enumerate(lines):
line_lengths[i] = len(line)
return np.average(line_lengths)
def save_object(obj, filename):
"""
Save an object - used to save models
"""
with open(filename, 'wb') as output:
pickle.dump(obj, output, -1)
# Change the URL to whatever text you want to train with
url = "https://gist.githubusercontent.com/AngusTheMack/defadcbc503e2d625720661e9893ff0a/raw/bb978a5ef025ff104009ab8139da4a0b7367992f/Titles.txt"
# Save Location will be used to load the data in
save_location = "Titles.txt" # either the name of the file downloaded with the URL above, or the location of your own file to load in
# Downloads the data, and loads it in
download_data(url,save_location)
data = load_data(save_location)
# Print first 100 characters of the data
print(data[:100])
def clean_text(data):
"""
Removes non essential characters in corpus of text
"""
data = "".join(v for v in data if v not in string.punctuation).lower()
data = data.encode("utf8").decode("ascii",'ignore')
return data
# You don't need to clean, but it can make things simpler
cleaned = clean_text(data)
print(cleaned[:100])
def unique_chars(data):
"""
Get all unique Characters in the Dataset
"""
return list(set(data))
# Some info about the data
chars = unique_chars(cleaned)
data_size, input_size = len(cleaned), len(chars)
print("Data has %d characters, %d of them are unique" % (data_size, input_size))
def tokenize_chars(chars):
"""
Create dictionaries to make it easy to convert from tokens to chars
"""
char_to_idx = {ch:i for i,ch in enumerate(chars)}
idx_to_char = {i:ch for i,ch in enumerate(chars)}
return char_to_idx, idx_to_char
# Create dictionaries, and display example using 11 chars
char_to_idx, idx_to_char = tokenize_chars(chars)
first_title = cleaned[:11]
print("{0:<2}|{1:<2}".format('Character', 'Index'))
print("________________")
for i in range(len(first_title)):
char_index = char_to_idx[first_title[i]]
print("{0:<9}|{a:d}".format(idx_to_char[char_index], a=char_to_idx[first_title[i]]))
```
# Char Level RNN
Created by Andrej Karpathy, available here: [here](https://gist.github.com/karpathy/d4dee566867f8291f086)
```
"""
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)
BSD License
Ever so slightly modified to be used with the above code
"""
data = cleaned
chars = unique_chars(cleaned)
data_size, vocab_size = len(cleaned), len(chars)
# hyperparameters
hidden_size = 100 # size of hidden layer of neurons
seq_length = 25 # number of steps to unroll the RNN for
learning_rate = 1e-1
# model parameters
Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
y = np.dot(Why, h) + by
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
n, p = 0, 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
while True:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p+seq_length+1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_idx[ch] for ch in data[p:p+seq_length]]
targets = [char_to_idx[ch] for ch in data[p+1:p+seq_length+1]]
# sample from the model now and then
if n % 100 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(idx_to_char[ix] for ix in sample_ix)
print('----\n %s \n----' % (txt, ))
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0: print('iter %d, loss: %f' % (n, smooth_loss)) # print progress
# perform parameter update with Adagrad
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
p += seq_length # move data pointer
n += 1 # iteration counter
```
I stopped the above compilation as it takes quite a while to generate meaninful text - and sometimes it doesn't seem to converge at all. Here is the output from an implementation I had running for a day or two that got down to about 16 for its loss.
```
Oxprensur Machine Learning Based Comparison Imagepredalyic Problem A Machine Learning Shidenticing With Stomement Machine
Genetional Translingl Data O
Tby Of Panadigunoous Of Machine Learning Approach
Machine Learning Approaches And Ancerxards
Applications
Ortamenopforcher Image Of And Comparison Hen
Bytesca For Dete
Semapt Recognition
Neural Ontropicaty Stvediction
Thance Resules Of Machinelearning Based And Machine Learning
Ma
Rward Algorithms
Thek Support Vector Machine Learning Toces
Survey
Subperai Scalistose Machine Learning
Classer Ald Optimization
Spatsimentar Scanisys
Twarites In Machine Learning For Algorithms
Realtime S Forildetion For Support Vector Machine Learning Techniques For The Laond Machine Learning For S
Syppbys
Mumporaty Researchon Using Of Temporing
Entruasian Designs Spevied Alghid Machine Learning
Clesit A Dizen Interaninergopers
Machine Learning
D
Operpne Mencal Work2Bated Athito Mativing Thootimic Optoraty For Machine Learning Methodent Methods In Dete Detection Of The Ancherch Of Contratecompu
Hacingar Proborion
Machine Learning In Metric Learning Transif Trassing An Learning
Machine Learning Audomement Machine Learning Of Machine Learning T
Ttymane Learning Coneftrand An Application For Mmfes On Undersec Auport Text Machine Learning A Machine Learning With Stalsaby Data Misuse Contronimic
Rsenticing Machineleseratigg
Machinelearning Of Vector
Machine Learning
Hungersing On Machine Learning And Activity
Approach To Trugbal Machine Learni
Rcemative Learning
Machine Learning And Compilianc User Introppshibution Of Brain Berial Distoneer Machine Learning
Discovery Descnessow Of Ant Seqmen
Oventicing Using Recognstimessing Practical Frainetation
Mesticabily For Parxam Experimaphitist Besk Coxican
Machine Learning Bos Automated Machine Le
Fxamentle Image Of Machine Learning Gave Trapean Schemass Of Machine Learning Of Methods Inty On Combinion Gane Technical Deabficimation Classaletrati
Esintiafforcemental Nerkase Deterabe Optimization Agversitoraling
A For Decision Techniques And Optimization For Usey In Machine Learning Corsed Machi
Onedential Machine Learning
Detection
Drepoutivelearning Machine Learning
Computtess Design Re6Aition To By Intempregressir Tomation
Suportiva Contere
Raph Incrotelaxics Ylame Tring Code
Anemoriomative Reperimity In Paraller
Munt Langouupmi Plediction Of Machine Learning
Predicting Prowibley Increman
Ecosting Machine Learning
Predict Learning And Smanced
Machine Learning
Data With Machine Learning Toateraby Ougcing Word Feature Ussifbees
Jachi Elar
Dations
Analysis Of Liagn Twictite Classification
Patferetistic Prospe Identificies Clamngenoun
Progmaris
Machine Learning For Anpreaching Methoduntac
Ocion Ad Applisition Reclasy Envinids
Quantsys A Otsum Mazining A Machine Learning
Machine Learning
Machine Learning
Extraction
Machine Learning Appro
Iches Using Machine Learning Pprssmase To Machine Learning Approach To Filteral Progrom Om Feremble Identifica Optiman Enviroptimization Of The Use In
```
As you can see, they are generally quite nonsensical. Although, the simple RNN does latch onto a few words that it has learned based on character sequences alone which is really cool! It has basically learned a tiny and very focused bit of the english language.
# Word Level RNN
The second implementation uses [Denny Brit'z Word level model](https://github.com/dennybritz/rnn-tutorial-rnnlm)
```
import csv
import itertools
import operator
import nltk
import sys
from datetime import datetime
# Chops the stream of titles into an array of titles based on new line characters
titles = cleaned.split("\n")
titles[0]
unknown_token = "UNKNOWN_TOKEN"
title_start_token = "SENTENCE_START"
title_end_token = "SENTENCE_END"
# Add the start and end token to the title
titles = ["%s %s %s" % (title_start_token, x, title_end_token) for x in titles]
# Ensure that nltk has the punkt package
nltk.download('punkt')
tokenized_titles = [nltk.word_tokenize(t) for t in titles]
word_freq = nltk.FreqDist(itertools.chain(*tokenized_titles))
print("Found %d unique words tokens." % len(word_freq.items()))
vocabulary_size=2000#len(word_freq.items())
vocab = word_freq.most_common(vocabulary_size-1)
index_to_word = [x[0] for x in vocab]
index_to_word.append(unknown_token)
word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])
print("Using vocabulary size %d." % vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_titles):
tokenized_titles[i] = [w if w in word_to_index else unknown_token for w in sent]
print("\nExample sentence: '%s'" % titles[0])
print("\nExample sentence after Pre-processing: '%s'" % tokenized_titles[0])
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_titles])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_titles])
# Print training data example
x_example, y_example = X_train[17], y_train[17]
print("x:\n%s\n%s" % (" ".join([index_to_word[x] for x in x_example]), x_example))
print("\ny:\n%s\n%s" % (" ".join([index_to_word[x] for x in y_example]), y_example))
def softmax(x):
xt = np.exp(x - np.max(x))
return xt / np.sum(xt)
class RNNNumpy:
def __init__(self, word_dim, hidden_dim=100, bptt_truncate=4):
# Assign instance variables
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.bptt_truncate = bptt_truncate
# Randomly initialize the network parameters
self.U = np.random.uniform(-np.sqrt(1./word_dim), np.sqrt(1./word_dim), (hidden_dim, word_dim))
self.V = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (word_dim, hidden_dim))
self.W = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (hidden_dim, hidden_dim))
def forward_propagation(self, x):
# The total number of time steps
T = len(x)
# During forward propagation we save all hidden states in s because need them later.
# We add one additional element for the initial hidden, which we set to 0
s = np.zeros((T + 1, self.hidden_dim))
s[-1] = np.zeros(self.hidden_dim)
# The outputs at each time step. Again, we save them for later.
o = np.zeros((T, self.word_dim))
# For each time step...
for t in np.arange(T):
# Note that we are indxing U by x[t]. This is the same as multiplying U with a one-hot vector.
s[t] = np.tanh(self.U[:,x[t]] + self.W.dot(s[t-1]))
o[t] = softmax(self.V.dot(s[t]))
return [o, s]
RNNNumpy.forward_propagation = forward_propagation
def predict(self, x):
# Perform forward propagation and return index of the highest score
o, s = self.forward_propagation(x)
return np.argmax(o, axis=1)
RNNNumpy.predict = predict
np.random.seed(10)
model = RNNNumpy(vocabulary_size)
o, s = model.forward_propagation(X_train[10])
print(o.shape)
print(o)
predictions = model.predict(X_train[10])
print(predictions.shape)
print(predictions)
def calculate_total_loss(self, x, y):
L = 0
# For each sentence...
for i in np.arange(len(y)):
o, s = self.forward_propagation(x[i])
# We only care about our prediction of the "correct" words
correct_word_predictions = o[np.arange(len(y[i])), y[i]]
# Add to the loss based on how off we were
L += -1 * np.sum(np.log(correct_word_predictions))
return L
def calculate_loss(self, x, y):
# Divide the total loss by the number of training examples
N = np.sum((len(y_i) for y_i in y))
return self.calculate_total_loss(x,y)/N
RNNNumpy.calculate_total_loss = calculate_total_loss
RNNNumpy.calculate_loss = calculate_loss
# Limit to 1000 examples to save time
print("Expected Loss for random predictions: %f" % np.log(vocabulary_size))
print("Actual loss: %f" % model.calculate_loss(X_train[:1000], y_train[:1000]))
def bptt(self, x, y):
T = len(y)
# Perform forward propagation
o, s = self.forward_propagation(x)
# We accumulate the gradients in these variables
dLdU = np.zeros(self.U.shape)
dLdV = np.zeros(self.V.shape)
dLdW = np.zeros(self.W.shape)
delta_o = o
delta_o[np.arange(len(y)), y] -= 1.
# For each output backwards...
for t in np.arange(T)[::-1]:
dLdV += np.outer(delta_o[t], s[t].T)
# Initial delta calculation
delta_t = self.V.T.dot(delta_o[t]) * (1 - (s[t] ** 2))
# Backpropagation through time (for at most self.bptt_truncate steps)
for bptt_step in np.arange(max(0, t-self.bptt_truncate), t+1)[::-1]:
# print "Backpropagation step t=%d bptt step=%d " % (t, bptt_step)
dLdW += np.outer(delta_t, s[bptt_step-1])
dLdU[:,x[bptt_step]] += delta_t
# Update delta for next step
delta_t = self.W.T.dot(delta_t) * (1 - s[bptt_step-1] ** 2)
return [dLdU, dLdV, dLdW]
RNNNumpy.bptt = bptt
def gradient_check(self, x, y, h=0.001, error_threshold=0.01):
# Calculate the gradients using backpropagation. We want to checker if these are correct.
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to check.
model_parameters = ['U', 'V', 'W']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter = operator.attrgetter(pname)(self)
print("Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape)))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
# Reset parameter to original value
parameter[ix] = original_value
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print("Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix))
print("+h Loss: %f" % gradplus)
print("-h Loss: %f" % gradminus)
print("Estimated_gradient: %f" % estimated_gradient)
print("Backpropagation gradient: %f" % backprop_gradient)
print("Relative Error: %f" % relative_error)
return
it.iternext()
print("Gradient check for parameter %s passed." % (pname))
RNNNumpy.gradient_check = gradient_check
# To avoid performing millions of expensive calculations we use a smaller vocabulary size for checking.
grad_check_vocab_size = 100
np.random.seed(10)
word_model = RNNNumpy(grad_check_vocab_size, 10, bptt_truncate=1000)
word_model.gradient_check([0,1,2,3], [1,2,3,4])
# Performs one step of SGD.
def numpy_sdg_step(self, x, y, learning_rate):
# Calculate the gradients
dLdU, dLdV, dLdW = self.bptt(x, y)
# Change parameters according to gradients and learning rate
self.U -= learning_rate * dLdU
self.V -= learning_rate * dLdV
self.W -= learning_rate * dLdW
RNNNumpy.sgd_step = numpy_sdg_step
# Outer SGD Loop
# - model: The RNN model instance
# - X_train: The training data set
# - y_train: The training data labels
# - learning_rate: Initial learning rate for SGD
# - nepoch: Number of times to iterate through the complete dataset
# - evaluate_loss_after: Evaluate the loss after this many epochs
def train_with_sgd(model, X_train, y_train, learning_rate=0.005, nepoch=100, evaluate_loss_after=5):
# We keep track of the losses so we can plot them later
losses = []
num_examples_seen = 0
for epoch in range(nepoch):
# Optionally evaluate the loss
if (epoch % evaluate_loss_after == 0):
loss = model.calculate_loss(X_train, y_train)
losses.append((num_examples_seen, loss))
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s: Loss after num_examples_seen=%d epoch=%d: %f" % (time, num_examples_seen, epoch, loss))
# Adjust the learning rate if loss increases
if (len(losses) > 1 and losses[-1][1] > losses[-2][1]):
learning_rate = learning_rate * 0.5
print("Setting learning rate to %f" % learning_rate)
sys.stdout.flush()
# For each training example...
for i in range(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate)
num_examples_seen += 1
np.random.seed(10)
word_model = RNNNumpy(vocabulary_size)
%timeit model.sgd_step(X_train[10], y_train[10], 0.005)
np.random.seed(10)
model = RNNNumpy(vocabulary_size)
losses = train_with_sgd(model, X_train[:1000], y_train[:1000], nepoch=100, evaluate_loss_after=1)
def generate_sentence(model):
# We start the sentence with the start token
new_sentence = [word_to_index[sentence_start_token]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[sentence_end_token]:
next_word_probs = model.forward_propagation(new_sentence)
#print(next_word_probs[0][-1])
#print(max(next_word_probs[0][-1]))
sampled_word = word_to_index[unknown_token]
# We don't want to sample unknown words
while sampled_word == word_to_index[unknown_token]:
samples = np.random.multinomial(1, next_word_probs[0][-1])
sampled_word = np.argmax(samples)
new_sentence.append(sampled_word)
sentence_str = [index_to_word[x] for x in new_sentence[1:-1]]
return sentence_str
num_sentences = 15
senten_min_length = 5
for i in range(num_sentences):
sent = []
# We want long sentences, not sentences with one or two words
while len(sent) < senten_min_length:
sent = generate_sentence(model)
print(" ".join(sent).title())
```
# LSTM
This is the [Beginners guide to text generation with LSTM](https://www.kaggle.com/shivamb/beginners-guide-to-text-generation-using-lstms) implementation
```
import tensorflow as tf
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.preprocessing.text import Tokenizer
from keras.callbacks import EarlyStopping
from keras.models import Sequential
import keras.utils as ku
from tensorflow import set_random_seed
from numpy.random import seed
set_random_seed(2)
seed(1)
import os
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter(action='ignore', category=FutureWarning)
corpus = cleaned.split("\n")
print(corpus[:10])
tokenizer = Tokenizer()
def get_sequence_of_tokens(corpus):
## tokenization
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
## convert data to sequence of tokens
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
return input_sequences, total_words
inp_sequences, total_words = get_sequence_of_tokens(corpus)
print(total_words)
inp_sequences[:10]
def generate_padded_sequences(input_sequences):
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
predictors, label = input_sequences[:,:-1],input_sequences[:,-1]
label = ku.to_categorical(label, num_classes=total_words)
return predictors, label, max_sequence_len
predictors, label, max_sequence_len = generate_padded_sequences(inp_sequences)
print(max_sequence_len)
def create_model(max_sequence_len, total_words):
input_len = max_sequence_len - 1
model = Sequential()
# Add Input Embedding Layer
model.add(Embedding(total_words, 10, input_length=input_len))
# Add Hidden Layer 1 - LSTM Layer
model.add(LSTM(100))
model.add(Dropout(0.1))
# Add Output Layer
model.add(Dense(total_words, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
lstm_model = create_model(max_sequence_len, total_words)
lstm_model.summary()
lstm_model.fit(predictors, label, epochs=100, verbose=5)
def generate_text(seed_text, next_words, model, max_sequence_len):
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict_classes(token_list, verbose=0)
output_word = ""
for word,index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " "+output_word
return seed_text.title()
print (generate_text("", 5, lstm_model, max_sequence_len))
print (generate_text("euclidean", 4, lstm_model, max_sequence_len))
print (generate_text("generative", 5, lstm_model, max_sequence_len))
print (generate_text("ground breaking", 5, lstm_model, max_sequence_len))
print (generate_text("new", 4, lstm_model, max_sequence_len))
print (generate_text("understanding", 5, lstm_model, max_sequence_len))
print (generate_text("long short term memory", 6, lstm_model, max_sequence_len))
print (generate_text("LSTM", 6, lstm_model, max_sequence_len))
print (generate_text("a", 5, lstm_model, max_sequence_len))
print (generate_text("anomaly", 5, lstm_model, max_sequence_len))
print (generate_text("data", 7, lstm_model, max_sequence_len))
print (generate_text("designing", 7, lstm_model, max_sequence_len))
print (generate_text("reinforcement", 7, lstm_model, max_sequence_len))
```
# Results
When trying to analyse each different method I used the number of titles that made sense from start to finish and whether a title contained a sub-string that made sense. I named these two metrics **Coherent Titles* and **Coherent Sub-strings**.
I then generated 15 titles with each model and calculated the following results:
|Model|Coherent Titles|Coherent Sub-strings|
|------|-----|-----|
|Char RNN|6.67%|6.67%|
|Word RNN|40%|53%|
|LSTM|60%|100%|
Its apparent that the LSTM outperforms the RNNs, but that was to be expected. I think the word level RNN is actually quite good, and the char level one can definitely be improved upon. Also, the dataset is quite small. With a large corpus I think the results would likely be improved.
However, a more formalised method for comparing the models is definitely necessary for further research.
# Going Forward
I think a possible method of comparing the different models would be to use a language model that can indicidate whether a sentence makes sense to some degree. Then that could be used on the generated titles in order to extrapolate some more meaningful and reproducible results. I was advised by my lecturer that a possible method of doing this was to use something like [Google Ngram](https://books.google.com/ngrams/info), and check whether a title or a substring of a title has been previously used to a certain degree. If it has, then it likely makes some sense.
The parameters for the different implementations can definitely also be experimented with in order to better understand the impact on the final titles.
I was also advised that an interesting area of research would be to generate a title for your paper (or writings) based on the abstract (or some subsection of your writings). This would very likely lead to titles that are more related to the actual content.
This was a very fun and interesting experience, and was inspired by the following:
* [Harry Potter and the Portrait of what Looked like a Large Pile of Ash by Botnik ](https://botnik.org/content/harry-potter.html)
* [King James Programming](https://kingjamesprogramming.tumblr.com/)
*[Alice in Elsinore](https://www.eblong.com/zarf/markov/alice_in_elsinore.txt) from [Fun with Markov Chains](https://www.eblong.com/zarf/markov/)
* [Stack Exchange Simulator](https://se-simulator.lw1.at/)
* [Pun Generation with Suprise](https://github.com/hhexiy/pungen)
| github_jupyter |
# DeepDreaming with TensorFlow
>[Loading and displaying the model graph](#loading)
>[Naive feature visualization](#naive)
>[Multiscale image generation](#multiscale)
>[Laplacian Pyramid Gradient Normalization](#laplacian)
>[Playing with feature visualzations](#playing)
>[DeepDream](#deepdream)
This notebook demonstrates a number of Convolutional Neural Network image generation techniques implemented with TensorFlow for fun and science:
- visualize individual feature channels and their combinations to explore the space of patterns learned by the neural network (see [GoogLeNet](http://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](http://storage.googleapis.com/deepdream/visualz/vgg16/index.html) galleries)
- embed TensorBoard graph visualizations into Jupyter notebooks
- produce high-resolution images with tiled computation ([example](http://storage.googleapis.com/deepdream/pilatus_flowers.jpg))
- use Laplacian Pyramid Gradient Normalization to produce smooth and colorful visuals at low cost
- generate DeepDream-like images with TensorFlow (DogSlugs included)
The network under examination is the [GoogLeNet architecture](http://arxiv.org/abs/1409.4842), trained to classify images into one of 1000 categories of the [ImageNet](http://image-net.org/) dataset. It consists of a set of layers that apply a sequence of transformations to the input image. The parameters of these transformations were determined during the training process by a variant of gradient descent algorithm. The internal image representations may seem obscure, but it is possible to visualize and interpret them. In this notebook we are going to present a few tricks that allow to make these visualizations both efficient to generate and even beautiful. Impatient readers can start with exploring the full galleries of images generated by the method described here for [GoogLeNet](http://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](http://storage.googleapis.com/deepdream/visualz/vgg16/index.html) architectures.
```
# boilerplate code
from __future__ import print_function
import os
from io import BytesIO
import numpy as np
from functools import partial
import PIL.Image
from IPython.display import clear_output, Image, display, HTML
import tensorflow as tf
```
<a id='loading'></a>
## Loading and displaying the model graph
The pretrained network can be downloaded [here](https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip). Unpack the `tensorflow_inception_graph.pb` file from the archive and set its path to `model_fn` variable. Alternatively you can uncomment and run the following cell to download the network:
```
!wget -nc https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip && unzip -n inception5h.zip
model_fn = 'tensorflow_inception_graph.pb'
# creating TensorFlow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='input') # define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})
```
To take a glimpse into the kinds of patterns that the network learned to recognize, we will try to generate images that maximize the sum of activations of particular channel of a particular convolutional layer of the neural network. The network we explore contains many convolutional layers, each of which outputs tens to hundreds of feature channels, so we have plenty of patterns to explore.
```
layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
feature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]
print('Number of layers', len(layers))
print('Total number of feature channels:', sum(feature_nums))
# Helper functions for TF Graph visualization
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = tf.compat.as_bytes("<stripped %d bytes>"%size)
return strip_def
def rename_nodes(graph_def, rename_func):
res_def = tf.GraphDef()
for n0 in graph_def.node:
n = res_def.node.add()
n.MergeFrom(n0)
n.name = rename_func(n.name)
for i, s in enumerate(n.input):
n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
return res_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:800px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# Visualizing the network graph. Be sure expand the "mixed" nodes to see their
# internal structure. We are going to visualize "Conv2D" nodes.
tmp_def = rename_nodes(graph_def, lambda s:"/".join(s.split('_',1)))
show_graph(tmp_def)
```
<a id='naive'></a>
## Naive feature visualization
Let's start with a naive way of visualizing these. Image-space gradient ascent!
```
# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
# to have non-zero gradients for features with negative initial activations.
layer = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139 # picking some feature channel to visualize
# start with a gray image with a little noise
img_noise = np.random.uniform(size=(224,224,3)) + 100.0
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 1)*255)
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
def visstd(a, s=0.1):
'''Normalize the image range for visualization'''
return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
def T(layer):
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("import/%s:0"%layer)
def render_naive(t_obj, img0=img_noise, iter_n=20, step=1.0):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for i in range(iter_n):
g, score = sess.run([t_grad, t_score], {t_input:img})
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
print(score, end = ' ')
clear_output()
showarray(visstd(img))
render_naive(T(layer)[:,:,:,channel])
```
<a id="multiscale"></a>
## Multiscale image generation
Looks like the network wants to show us something interesting! Let's help it. We are going to apply gradient ascent on multiple scales. Details formed on smaller scale will be upscaled and augmented with additional details on the next scale.
With multiscale image generation it may be tempting to set the number of octaves to some high value to produce wallpaper-sized images. Storing network activations and backprop values will quickly run out of GPU memory in this case. There is a simple trick to avoid this: split the image into smaller tiles and compute each tile gradient independently. Applying random shifts to the image before every iteration helps avoid tile seams and improves the overall image quality.
```
def tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See "resize" function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
# Helper function that uses TF to resize an image
def resize(img, size):
img = tf.expand_dims(img, 0)
return tf.image.resize_bilinear(img, size)[0,:,:,:]
resize = tffunc(np.float32, np.int32)(resize)
def calc_grad_tiled(img, t_grad, tile_size=512):
'''Compute the value of tensor t_grad over the image in a tiled way.
Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad = np.zeros_like(img)
for y in range(0, max(h-sz//2, sz),sz):
for x in range(0, max(w-sz//2, sz),sz):
sub = img_shift[y:y+sz,x:x+sz]
g = sess.run(t_grad, {t_input:sub})
grad[y:y+sz,x:x+sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
def render_multiscale(t_obj, img0=img_noise, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
print('.', end = ' ')
clear_output()
showarray(visstd(img))
render_multiscale(T(layer)[:,:,:,channel])
```
<a id="laplacian"></a>
## Laplacian Pyramid Gradient Normalization
This looks better, but the resulting images mostly contain high frequencies. Can we improve it? One way is to add a smoothness prior into the optimization objective. This will effectively blur the image a little every iteration, suppressing the higher frequencies, so that the lower frequencies can catch up. This will require more iterations to produce a nice image. Why don't we just boost lower frequencies of the gradient instead? One way to achieve this is through the [Laplacian pyramid](https://en.wikipedia.org/wiki/Pyramid_%28image_processing%29#Laplacian_pyramid) decomposition. We call the resulting technique _Laplacian Pyramid Gradient Normalization_.
```
k = np.float32([1,4,6,4,1])
k = np.outer(k, k)
k5x5 = k[:,:,None,None]/k.sum()*np.eye(3, dtype=np.float32)
def lap_split(img):
'''Split the image into lo and hi frequency components'''
with tf.name_scope('split'):
lo = tf.nn.conv2d(img, k5x5, [1,2,2,1], 'SAME')
lo2 = tf.nn.conv2d_transpose(lo, k5x5*4, tf.shape(img), [1,2,2,1])
hi = img-lo2
return lo, hi
def lap_split_n(img, n):
'''Build Laplacian pyramid with n splits'''
levels = []
for i in range(n):
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def lap_merge(levels):
'''Merge Laplacian pyramid'''
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5*4, tf.shape(hi), [1,2,2,1]) + hi
return img
def normalize_std(img, eps=1e-10):
'''Normalize image by making its standard deviation = 1.0'''
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img/tf.maximum(std, eps)
def lap_normalize(img, scale_n=4):
'''Perform the Laplacian pyramid normalization.'''
img = tf.expand_dims(img,0)
tlevels = lap_split_n(img, scale_n)
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0,:,:,:]
# Showing the lap_normalize graph with TensorBoard
lap_graph = tf.Graph()
with lap_graph.as_default():
lap_in = tf.placeholder(np.float32, name='lap_in')
lap_out = lap_normalize(lap_in)
show_graph(lap_graph)
def render_lapnorm(t_obj, img0=img_noise, visfunc=visstd,
iter_n=10, step=1.0, octave_n=3, octave_scale=1.4, lap_n=4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# build the laplacian normalization graph
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
g = lap_norm_func(g)
img += g*step
print('.', end = ' ')
clear_output()
showarray(visfunc(img))
render_lapnorm(T(layer)[:,:,:,channel])
```
<a id="playing"></a>
## Playing with feature visualizations
We got a nice smooth image using only 10 iterations per octave. In case of running on GPU this takes just a few seconds. Let's try to visualize another channel from the same layer. The network can generate wide diversity of patterns.
```
render_lapnorm(T(layer)[:,:,:,65])
```
Lower layers produce features of lower complexity.
```
render_lapnorm(T('mixed3b_1x1_pre_relu')[:,:,:,101])
```
There are many interesting things one may try. For example, optimizing a linear combination of features often gives a "mixture" pattern.
```
render_lapnorm(T(layer)[:,:,:,65]+T(layer)[:,:,:,139], octave_n=4)
```
<a id="deepdream"></a>
## DeepDream
Now let's reproduce the [DeepDream algorithm](https://github.com/google/deepdream/blob/master/dream.ipynb) with TensorFlow.
```
def render_deepdream(t_obj, img0=img_noise,
iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# split the image into a number of octaves
img = img0
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw)/octave_scale))
hi = img-resize(lo, hw)
img = lo
octaves.append(hi)
# generate details octave by octave
for octave in range(octave_n):
if octave>0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2])+hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g*(step / (np.abs(g).mean()+1e-7))
print('.',end = ' ')
clear_output()
showarray(img/255.0)
```
Let's load some image and populate it with DogSlugs (in case you've missed them).
```
img0 = PIL.Image.open('pilatus800.jpg')
img0 = np.float32(img0)
showarray(img0/255.0)
render_deepdream(tf.square(T('mixed4c')), img0)
```
Note that results can differ from the [Caffe](https://github.com/BVLC/caffe)'s implementation, as we are using an independently trained network. Still, the network seems to like dogs and animal-like features due to the nature of the ImageNet dataset.
Using an arbitrary optimization objective still works:
```
render_deepdream(T(layer)[:,:,:,139], img0)
```
Don't hesitate to use higher resolution inputs (also increase the number of octaves)! Here is an [example](http://storage.googleapis.com/deepdream/pilatus_flowers.jpg) of running the flower dream over the bigger image.
We hope that the visualization tricks described here may be helpful for analyzing representations learned by neural networks or find their use in various artistic applications.
| github_jupyter |
```
import os
import sys
import glob
import itertools
from IPython.display import Image
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib.colors import ListedColormap
import numpy as np
import pandas as pd
np.random.seed(1234)
%matplotlib inline
```
# Load AML data
```
# load AML data and table
##### X: np.array, flow cytometry data, arcsin transformed
##### T: table of expert knowledge
np.random.seed(1234)
#PATH = '/Users/disiji/Dropbox/current/flow_cytometry/acdc/data/'
PATH = '/home/disij/projects/acdc/data/'
### LOAD DATA ###
path = PATH + 'AML_benchmark/'
df = pd.read_csv( path + 'AML_benchmark.csv.gz', sep=',', header = 0, compression = 'gzip', engine='python')
table = pd.read_csv(path + 'AML_table.csv', sep=',', header=0, index_col=0)
### PROCESS: discard ungated events ###
df = df[df.cell_type != 'NotGated']
df = df.drop(['Time', 'Cell_length','file_number', 'event_number', 'DNA1(Ir191)Di',
'DNA2(Ir193)Di', 'Viability(Pt195)Di', 'subject'], axis = 1)
channels = [item[:item.find('(')] for item in df.columns[:-1]]
df.columns = channels + ['cell_type']
df = df.loc[df['cell_type'] != 'NotDebrisSinglets']
table = table.fillna(0)
X = df[channels].values
table_headers = list(table)
### transform data
data = np.arcsinh((X-1.)/5.)
print table
print X.shape
```
Explore a little bit about properties of arcsinh...
```
t1 = np.arange(-10.0, 10.0, 0.1)
t2 = np.arcsinh(t1)
plt.plot(t1,t2)
print np.arcsinh(10000000000000000000)
```
# Draw cuts with prior information
Data is now in a D dimensional cube, the goal it to find one optimal cut in each dimension
```
def draw_informed_cuts(theta_space, table):
"""
INPUT: table: a K*D DataFrame of +-1 knowledge
OUTPUT: np.array of lenght D, cuts on each dimension
"""
# return a list of cuts
# INFORMATIVE PRIORS
upper_cut = (5., 1.)
lower_cut = (1., 5.)
middle_cut = (5., 5.)
neutral_cut = (1., 1.)
priors_dict = { '-1':lower_cut, '0':neutral_cut, '1':upper_cut,
'-1 0':lower_cut, '-1 1':middle_cut, '0 1':upper_cut,
'-1 0 1': middle_cut
}
headers = list(table)
matching_prior_info = [np.unique(table[_]) for _ in headers]
prior_type_str = [' '.join([str(int(x)) for x in _ ]) for _ in matching_prior_info]
prior_params = [priors_dict[_] for _ in prior_type_str]
cuts = [(theta_space[d,1] - theta_space[d,0]) * np.random.beta(prior_params[d][0], prior_params[d][1]) + theta_space[d,0]
for d in range(len(headers))]
return cuts
def comp_log_p_sample(theta_space, data, cuts):
D = len(cuts)
if D == 0:
return 0
idx_left = data[:,0] < cuts[0]
idx_right = data[:,0] >= cuts[0]
log_len_left = np.log(cuts[0] - theta_space[0,0])
log_len_right = np.log(theta_space[0,1] - cuts[0])
return - idx_left.sum() * log_len_left - idx_right.sum() * log_len_right +\
comp_log_p_sample(theta_space[1:], data[:,1:],cuts[1:])
```
# Fit model to 2 dimensional data and visualize
```
data_2d = np.hstack([data[:,5][np.newaxis].T, data[:,6][np.newaxis].T])
np.random.shuffle(data_2d)
data_2d = data_2d[:1000,:]
table_2d = table[['CD4','CD8']]
print data_2d.shape
x_min, y_min, x_max, y_max = data_2d[:,0].min(), data_2d[:,1].min(), data_2d[:,0].max(), data_2d[:,1].max()
theta_space = np.array([[x_min, x_max], [y_min, y_max]])
print theta_space
n_mcmc_chain = 2
n_mcmc_sample = 30000
mcmc_gaussin_std = np.array([(_[1]-_[0])*0.01 for _ in theta_space]) # tune step size s.t. acceptance rate ~50%
accepts = [[] for _ in range(n_mcmc_chain)]
rejects = [[] for _ in range(n_mcmc_chain)]
for chain in range(n_mcmc_chain):
print "Drawing Chain %d ..." % chain
sample = draw_informed_cuts(theta_space, table_2d)
log_p_sample = comp_log_p_sample(theta_space, data_2d, sample)
accepts[chain].append(sample)
for idx in xrange(n_mcmc_sample):
# propose a new sample under gaussian distribution
good_sample = False
while good_sample == False:
new_sample = np.array([np.random.normal(0, mcmc_gaussin_std[d], 1)[0] for d in range(2)]) + sample
if (new_sample > theta_space[:,0]).all() and (new_sample < theta_space[:,1]).all():
good_sample = True
# perform accept-reject step
new_log_p_sample = comp_log_p_sample(theta_space, data_2d, new_sample)
if new_log_p_sample < log_p_sample and \
np.log(np.random.uniform(low=0, high=1.)) > new_log_p_sample - log_p_sample:
rejects[chain].append(new_sample)
else:
sample = new_sample
log_p_sample = new_log_p_sample
accepts[chain].append(sample)
if (idx+1) % 1000 == 0:
print "Iteration %d, Samples %d" %(idx+1, len(accepts[chain]))
print "Number of samples collected: %d" %(len(accepts[chain]))
# get an average model
burnt_accepts = np.array([_ for chain in accepts for _ in chain[len(chain)/2:]])
avg_cuts = np.mean(burnt_accepts, axis=0)
```
to-do: burn-in and thinning ...
```
### VISUALIZE 2D POSTERIOR WITH DATA###
def print_posterior(theta_space, data, list_of_cuts , trans_level=.05, color='k'):
plt.figure()
plt.scatter(data[:,0], data[:,1], c='k', edgecolors='k', s=10, alpha=.5)
for cuts in list_of_cuts:
plt.plot([cuts[0], cuts[0]], [theta_space[1,0], theta_space[1,1]], color+'-', linewidth=5, alpha=trans_level)
plt.plot([theta_space[0,0], theta_space[0,1]], [cuts[1], cuts[1]], color+'-', linewidth=5, alpha=trans_level)
np.random.shuffle(burnt_accepts)
print_posterior(theta_space, data_2d, burnt_accepts[:100] , trans_level=.05, color='b')
plt.xlabel("CD4")
plt.ylabel("CD8")
plt.title("Mondrian Process Posterior Samples")
plt.xlim([x_min, x_max])
plt.ylim([y_min, y_max])
plt.show()
```
# Fit model to full AML data
```
print data.shape
N,D = data.shape
# input data: data, table
theta_space = np.array([[data[:,d].min(), data[:,d].max()] for d in range(data.shape[1])])
%%time
n_mcmc_chain = 4
n_mcmc_sample = 10000
mcmc_gaussin_std = np.array([(_[1]-_[0])*0.01 for _ in theta_space]) # tune step size s.t. acceptance rate ~50%
batch_size = 5000
accepts = [[] for _ in range(n_mcmc_chain)]
rejects = [[] for _ in range(n_mcmc_chain)]
for chain in range(n_mcmc_chain):
print "Drawing Chain %d ..." % chain
sample = draw_informed_cuts(theta_space, table)
log_p_sample = comp_log_p_sample(theta_space, data[np.random.choice(data.shape[0], size=batch_size,replace=False), :], sample)
accepts[chain].append(sample)
for idx in xrange(n_mcmc_sample):
# propose a new sample under gaussian distribution
good_sample = False
while good_sample == False:
new_sample = np.array([np.random.normal(0, mcmc_gaussin_std[d], 1)[0] for d in range(D)]) + sample
if (new_sample > theta_space[:,0]).all() and (new_sample < theta_space[:,1]).all():
good_sample = True
# perform accept-reject step
new_log_p_sample = comp_log_p_sample(theta_space, data[np.random.randint(data.shape[0], size=batch_size), :], new_sample)
if new_log_p_sample < log_p_sample and \
np.log(np.random.uniform(low=0, high=1.)) > new_log_p_sample - log_p_sample:
rejects[chain].append(new_sample)
else:
sample = new_sample
log_p_sample = new_log_p_sample
accepts[chain].append(sample)
if (idx+1) % 1000 == 0:
print "Iteration %d, cummulative accepted sample size is %d" %(idx+1, len(accepts[chain]))
print "Total number of accepted samples: %d" %(sum([len(accepts[chain]) for chain in range(n_mcmc_chain)]))
# get an average model
burnt_accepts = np.array([_ for chain in accepts for _ in chain[len(chain)*10/11:]])
avg_cuts = np.mean(burnt_accepts, axis=0)
# plot trace of burnt_accepts
# Four axes, returned as a 2-d array
f, axarr = plt.subplots(D/4, 4,figsize=(20,20))
for _ in range(D):
axarr[_/4, _%4].plot(burnt_accepts[:,_])
axarr[_/4, _%4].plot([0,len(burnt_accepts)],[theta_space[_,0],theta_space[_,0]])
axarr[_/4, _%4].plot([0,len(burnt_accepts)],[theta_space[_,1],theta_space[_,1]])
print table.iloc[[8,10,12]]
from math import *
def y(x):
return -(1.-pow(x,3)/3.)*log(x) - pow(x,3)/3*log(1-x)
x_lim = [i*1.0/10000 for i in range(1,10000)]
y = [exp(y(x)) for x in x_lim]
plt.plot(x_lim,y,linewidth=5)
```
| github_jupyter |
# BERT based NER experiment
> Tutorial author: 徐欣(<xxucs@zju.edu.cn>)
On this demo, we use `BERT` to recognize named entities. We hope this demo can help you understand the process of named entity recognition.
This demo uses `Python3`.
## NER
**Named-entity recognition** (also known as named entity identification, entity chunking, and entity extraction) is a subtask of information extraction that seeks to locate and classify named entities mentioned in unstructured text into pre-defined categories such as person names, organizations, locations, medical codes, time expressions, quantities, monetary values, percentages, etc.
## Dataset
In this demo, we use [**People's Daily(人民日报) dataset**](https://github.com/OYE93/Chinese-NLP-Corpus/tree/master/NER/People's%20Daily). It is a dataset for NER, concentrating on their types of named entities related to persons(PER), locations(LOC), and organizations(ORG).
| Word | Named entity tag |
| :--: | :--------------: |
| 早 | O |
| 在 | O |
| 1 | O |
| 9 | O |
| 7 | O |
| 5 | O |
| 年 | O |
| , | O |
| 张 | B-PER |
| 鸿 | I-PER |
| 飞 | I-PER |
| 就 | O |
| 有 | O |
| 《 | O |
| 草 | O |
| 原 | O |
| 新 | O |
| 医 | O |
| 》 | O |
| 赴 | O |
| 法 | B-LOC |
| 展 | O |
| 览 | O |
| , | O |
| 为 | O |
| 我 | O |
| 国 | O |
| 驻 | B-ORG |
| 法 | I-ORG |
| 使 | I-ORG |
| 馆 | I-ORG |
| 收 | O |
| 藏 | O |
| 。 | O |
- train.txt: It contains 20,864 sentences, including 979,180 named entity tags.
- valid.txt: It contains 2,318 sentences, including 109,870 named entity tags.
- test.txt: It contains 4,636 sentences, including 219,197 named entity tags.
## BERT
[**Bidirectional Encoder Representations from Transformers (BERT)**](https://github.com/google-research/bert)

## Prepare the runtime environment
```
!pip install deepke
!wget 120.27.214.45/Data/ner/standard/data.tar.gz
!tar -xzvf data.tar.gz
```
## Import modules
```
from __future__ import absolute_import, division, print_function
import csv
import json
import logging
import os
import random
import sys
import numpy as np
import torch
import torch.nn.functional as F
from pytorch_transformers import (WEIGHTS_NAME, AdamW, BertConfig, BertForTokenClassification, BertTokenizer, WarmupLinearSchedule)
from torch import nn
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from seqeval.metrics import classification_report
import hydra
from hydra import utils
from deepke.name_entity_re.standard import *
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
```
## Configure model parameters
```
class Config(object):
data_dir = "data/" # The input data dir
bert_model = "bert-base-chinese"
task_name = "ner"
output_dir = "checkpoints"
max_seq_length = 128
do_train = True # Fine-tune or not
do_eval = True # Evaluate or not
eval_on = "dev"
do_lower_case = True
train_batch_size = 32
eval_batch_size = 8
learning_rate = 5e-5
num_train_epochs = 3 # The number of training epochs
warmup_proportion = 0.1
weight_decay = 0.01
adam_epsilon = 1e-8
max_grad_norm = 1.0
use_gpu = True # Use gpu or not
gpu_id = 1 # Which gpu to be used
local_rank = -1
seed = 42
gradient_accumulation_steps = 1
fp16 = False
fp16_opt_level = "01"
loss_scale = 0.0
text = "秦始皇兵马俑位于陕西省西安市,1961年被国务院公布为第一批全国重点文物保护单位,是世界八大奇迹之一。"
cfg = Config()
```
## Prepare the model
```
class TrainNer(BertForTokenClassification):
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,valid_ids=None,attention_mask_label=None):
sequence_output = self.bert(input_ids, token_type_ids, attention_mask,head_mask=None)[0]
batch_size,max_len,feat_dim = sequence_output.shape
valid_output = torch.zeros(batch_size,max_len,feat_dim,dtype=torch.float32,device=1) #device 如用gpu需要修改为cfg.gpu_id的值 不用则为cpu
for i in range(batch_size):
jj = -1
for j in range(max_len):
if valid_ids[i][j].item() == 1:
jj += 1
valid_output[i][jj] = sequence_output[i][j]
sequence_output = self.dropout(valid_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=0)
if attention_mask_label is not None:
active_loss = attention_mask_label.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
# Use gpu or not
if cfg.use_gpu and torch.cuda.is_available():
device = torch.device('cuda', cfg.gpu_id)
else:
device = torch.device('cpu')
if cfg.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(cfg.gradient_accumulation_steps))
cfg.train_batch_size = cfg.train_batch_size // cfg.gradient_accumulation_steps
random.seed(cfg.seed)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if not cfg.do_train and not cfg.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
# Checkpoints
if os.path.exists(cfg.output_dir) and os.listdir(cfg.output_dir) and cfg.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(cfg.output_dir))
if not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
# Preprocess the input dataset
processor = NerProcessor()
label_list = processor.get_labels()
num_labels = len(label_list) + 1
# Prepare the model
tokenizer = BertTokenizer.from_pretrained(cfg.bert_model, do_lower_case=cfg.do_lower_case)
train_examples = None
num_train_optimization_steps = 0
if cfg.do_train:
train_examples = processor.get_train_examples(cfg.data_dir)
num_train_optimization_steps = int(len(train_examples) / cfg.train_batch_size / cfg.gradient_accumulation_steps) * cfg.num_train_epochs
config = BertConfig.from_pretrained(cfg.bert_model, num_labels=num_labels, finetuning_task=cfg.task_name)
model = TrainNer.from_pretrained(cfg.bert_model,from_tf = False,config = config)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias','LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': cfg.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
warmup_steps = int(cfg.warmup_proportion * num_train_optimization_steps)
optimizer = AdamW(optimizer_grouped_parameters, lr=cfg.learning_rate, eps=cfg.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
label_map = {i : label for i, label in enumerate(label_list,1)}
```
## Train
```
if cfg.do_train:
train_features = convert_examples_to_features(train_examples, label_list, cfg.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,all_valid_ids,all_lmask_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=cfg.train_batch_size)
model.train()
for _ in trange(int(cfg.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, valid_ids,l_mask = batch
loss = model(input_ids, segment_ids, input_mask, label_ids,valid_ids,l_mask)
if cfg.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % cfg.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
model_to_save.save_pretrained(cfg.output_dir)
tokenizer.save_pretrained(cfg.output_dir)
label_map = {i : label for i, label in enumerate(label_list,1)}
model_config = {"bert_model":cfg.bert_model,"do_lower":cfg.do_lower_case, "max_seq_length":cfg.max_seq_length,"num_labels":len(label_list)+1,"label_map":label_map}
json.dump(model_config,open(os.path.join(cfg.output_dir,"model_config.json"),"w"))
# Load a trained model and config that you have fine-tuned
else:
# Load a trained model and vocabulary that you have fine-tuned
model = TrainNer.from_pretrained(cfg.output_dir)
tokenizer = BertTokenizer.from_pretrained(cfg.output_dir, do_lower_case=cfg.do_lower_case)
model.to(device)
```
## Evaluate
```
if cfg.do_eval:
if cfg.eval_on == "dev":
eval_examples = processor.get_dev_examples(cfg.data_dir)
elif cfg.eval_on == "test":
eval_examples = processor.get_test_examples(cfg.data_dir)
else:
raise ValueError("eval on dev or test set only")
eval_features = convert_examples_to_features(eval_examples, label_list, cfg.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in eval_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,all_valid_ids,all_lmask_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=cfg.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
y_true = []
y_pred = []
label_map = {i : label for i, label in enumerate(label_list,1)}
for input_ids, input_mask, segment_ids, label_ids,valid_ids,l_mask in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
valid_ids = valid_ids.to(device)
label_ids = label_ids.to(device)
l_mask = l_mask.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask,valid_ids=valid_ids,attention_mask_label=l_mask)
logits = torch.argmax(F.log_softmax(logits,dim=2),dim=2)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
input_mask = input_mask.to('cpu').numpy()
for i, label in enumerate(label_ids):
temp_1 = []
temp_2 = []
for j,m in enumerate(label):
if j == 0:
continue
elif label_ids[i][j] == len(label_map):
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(label_map[label_ids[i][j]])
temp_2.append(label_map[logits[i][j]])
report = classification_report(y_true, y_pred,digits=4)
logger.info("\n%s", report)
output_eval_file = os.path.join(cfg.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
logger.info("\n%s", report)
writer.write(report)
```
## Predict
```
model = InferNer("checkpoints/")
text = cfg.text
print("NER句子:")
print(text)
print('NER结果:')
result = model.predict(text)
for k,v in result.items():
if v:
print(v,end=': ')
if k=='PER':
print('Person')
elif k=='LOC':
print('Location')
elif k=='ORG':
print('Organization')
```
This demo does not include parameter adjustment. If you are interested in this, you can go to [deepke](http://openkg.cn/tool/deepke)
Warehouse, download and use more models:)
| github_jupyter |
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
import random
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, BatchNormalization, Flatten, GlobalAveragePooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping
import os
```
# **DATA LOADING AND PREPROCESSING**
```
train_images = []
train_labels = []
val_images = []
val_labels = []
train_path = "../input/hindi-letter-dataset/Dataset/train"
val_path = "../input/hindi-letter-dataset/Dataset/valid"
print("Train data")
margin = 0
removed_labels = [18,23,24]
for label, i in enumerate(sorted(os.listdir(train_path))):
if label in removed_labels:
margin+=1
continue
print(len(os.listdir(os.path.join(train_path, i))))
for j in os.listdir(os.path.join(train_path, i)):
img = cv2.imread(os.path.join(train_path, i, j))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).reshape(64,64,1).astype("float32")/255
train_images.append(img)
train_labels.append(label-margin)
margin = 0
print('Validation data')
for label, i in enumerate(sorted(os.listdir(val_path))):
if label in removed_labels:
margin+=1
continue
print(len(os.listdir(os.path.join(val_path, i))))
for j in os.listdir(os.path.join(val_path, i)):
img = cv2.imread(os.path.join(val_path, i, j))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).reshape(64,64,1).astype("float32")/255
val_images.append(img)
val_labels.append(label-margin)
margin = 0
val_path = "../input/test-data-ps1/test_labels_updated"
print('Further data')
for label, i in enumerate(sorted(os.listdir(val_path))):
if label in removed_labels:
margin+=1
continue
print(len(os.listdir(os.path.join(val_path, i))))
for j in os.listdir(os.path.join(val_path, i)):
img = cv2.imread(os.path.join(val_path, i, j))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).reshape(64,64,1).astype("float32")/255
val_images.append(img)
val_labels.append(label-margin)
np.unique(val_labels)
train_images = np.asarray(train_images)
val_images = np.asarray(val_images)
train_labels = tf.keras.utils.to_categorical(train_labels, num_classes = 32)
val_labels = tf.keras.utils.to_categorical(val_labels, num_classes = 32)
val_labels.shape
train_datagen = ImageDataGenerator(
width_shift_range = 0.09,
height_shift_range = 0.09,
shear_range = 0.3,
fill_mode = 'nearest')
test_datagen = ImageDataGenerator()
traingen = train_datagen.flow(train_images, train_labels, batch_size=16, shuffle = True)
valgen = test_datagen.flow(val_images, val_labels, batch_size=16)
```
# **MODEL BUILDING**
```
model_vc = Sequential()
model_vc.add(Conv2D(128, (6, 6), strides = 1, activation='relu', input_shape=(64, 64, 1)))
model_vc.add(BatchNormalization())
model_vc.add(Conv2D(128, (6, 6), strides = 1, activation='relu'))
model_vc.add(BatchNormalization())
model_vc.add(Dropout(0.4))
model_vc.add(Conv2D(128, (4, 4), strides = 2, activation='relu'))
model_vc.add(BatchNormalization())
model_vc.add(Conv2D(128, (4, 4), strides = 2, activation='relu'))
model_vc.add(BatchNormalization())
model_vc.add(Dropout(0.4))
model_vc.add(Conv2D(128, (3, 3), strides = 2, activation='relu'))
model_vc.add(BatchNormalization())
model_vc.add(Conv2D(128, (3, 3), strides = 2, activation='relu'))
model_vc.add(BatchNormalization())
model_vc.add(Dropout(0.4))
model_vc.add(Flatten())
model_vc.add(Dense(64, activation = "relu"))
model_vc.add(BatchNormalization())
model_vc.add(Dropout(0.2))
model_vc.add(Dense(32, activation = "softmax"))
model_vc.summary()
model_vc.compile(loss='categorical_crossentropy', optimizer = tf.keras.optimizers.Adam(lr = 1e-3), metrics = ['accuracy'])
callbacks = [
ModelCheckpoint(
filepath='model_vc.h5',
save_freq='epoch',
save_weights_only=True,
mode = "max",
monitor = "val_accuracy",
verbose=1,
save_best_only = True
),
EarlyStopping(monitor="val_loss", mode="min", patience=25),
LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x)
#keras.callbacks.TensorBoard(log_dir='./Graph_vowels', histogram_freq=0, write_graph=True, write_images=True)
]
history = model_vc.fit(traingen, steps_per_epoch = len(train_images)//16, epochs = 120,
validation_data= valgen, validation_steps = len(val_images)//16, callbacks = callbacks, verbose = 1)
model_vc.load_weights("model_vc.h5")
model_vc.save("98.2-96.5.h5")
model_vc.evaluate(valgen)
from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sns
import itertools
def plot_confusion_matrix(cm, classes=['0','1'],
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
predict = model_vc.predict(val_images)
pred_labs =np.argmax(predict, axis =1)
confusion = confusion_matrix(np.argmax(val_labels, axis=1), pred_labs)
report = classification_report(np.argmax(val_labels, axis=1), pred_labs)
print(report)
plt.figure(figsize=(10,10))
plot_confusion_matrix(confusion, classes=list(range(0,32)),
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues)
plt.savefig('confuse')
plt.imshow(val_images[np.argmax(val_labels, axis=1)==23][0][:,:,0])
```
| github_jupyter |
```
import time
from termcolor import colored
import torch
import torch.autograd.profiler as profiler
from modules.Swc2d import Swc2d
from modules.Dcls2dFull import Dcls2dFull
assert torch.cuda.is_available()
cuda_device = torch.device("cuda") # device object representing GPU
in_channels = 1
out_channels = 1
kernel_size = (2,2)
dilation = (2,2)
stride = (1,1)
padding = (0,0)
groups = 1
bias = False
m = torch.nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
stride=stride,
padding=padding,
groups=groups,
bias=bias).to(cuda_device)
n = Swc2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
stride=stride,
padding=padding,
groups=groups,
bias=bias).to(cuda_device)
X1 = torch.nn.Parameter(
torch.tensor([[[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.]]]],device=cuda_device),
requires_grad = True)
X2 = torch.nn.Parameter(
torch.tensor([[[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.]]]],device=cuda_device),
requires_grad = True)
m.weight = torch.nn.Parameter(
torch.tensor([[[[20., 40.],
[60., 80.]]]],device=cuda_device),
requires_grad = True)
n.weight = torch.nn.Parameter(
torch.tensor([[[[20., 40.],
[60., 80.]]]],device=cuda_device),
requires_grad = True)
back_truth = torch.nn.Parameter(
torch.tensor([[[[1., 2.],
[4., 5.]]]],device=cuda_device),
requires_grad = True)
with torch.autograd.profiler.profile(use_cuda=True, profile_memory=True) as prof:
var2 = (n(X2) - back_truth).norm()
var1 = (m(X1) - back_truth).norm()
var1.backward();
var2.backward();
print(X1.size())
print(m.weight.size())
print(n.weight.size())
print(m(X1).size())
print(m(X1))
print(n(X2).size())
print(n(X2))
print(m.weight.grad)
print(n.weight.grad)
print(X1.grad)
print(X2.grad)
n.weight.nonzero().size(0)*100/n.weight.numel()
batch = 16
in_channels = 2**9
out_channels = 2**10
kernel_size = (3,3)
dilation = (8,8)
stride = (1,1)
padding = (0,0)
groups = 1
bias = False
h = 200
w = 200
h_o = int((h + 2 * padding[0] - (dilation[0] * (kernel_size[0] - 1) + 1)) / stride[0] + 1)
w_o = int((w + 2 * padding[1] - (dilation[1] * (kernel_size[1] - 1) + 1)) / stride[1] + 1)
n = Swc2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
stride=stride,
padding=padding,
groups=groups,
bias=bias).to(cuda_device)
X2 = torch.nn.Parameter(torch.rand(batch,in_channels,h,w,device=cuda_device), requires_grad = True)
back_truth = torch.nn.Parameter(torch.rand(batch,out_channels,h_o,w_o,device=cuda_device), requires_grad = True)
with torch.autograd.profiler.profile(use_cuda=True, profile_memory=True) as prof:
var2 = (n(X2) - back_truth).norm()
var2.backward();
print(torch.cuda.memory_summary(device=cuda_device, abbreviated=True))
print(prof.key_averages().table( row_limit=1000))
#prof.export_chrome_trace("trace.json")
----------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg Self CUDA Self CUDA % CUDA total CUDA time avg CPU Mem Self CPU Mem CUDA Mem Self CUDA Mem # of Calls
----------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
swc2d 1.81% 35.057us 2.47% 47.811us 47.811us 51.328us 0.12% 51.328us 51.328us 0 b 0 b 2.07 Gb 0 b 1
aten::view 2.61% 50.557us 2.61% 50.557us 10.111us 0.000us 0.00% 0.000us 0.000us 0 b 0 b 0 b 0 b 5
aten::empty 5.65% 109.522us 5.65% 109.522us 10.952us 0.000us 0.00% 0.000us 0.000us 0 b 0 b 8.27 Gb 8.27 Gb 10
aten::sub 1.60% 31.078us 1.85% 35.862us 35.862us 11.582ms 28.00% 11.582ms 11.582ms 0 b 0 b 2.07 Gb 0 b 1
aten::frobenius_norm 0.67% 13.056us 5.76% 111.609us 111.609us 5.504us 0.01% 3.684ms 3.684ms 0 b 0 b 1.00 Kb 0 b 1
aten::norm 3.24% 62.703us 3.54% 68.657us 68.657us 3.672ms 8.88% 3.672ms 3.672ms 0 b 0 b 512 b 0 b 1
aten::as_strided 0.08% 1.471us 0.08% 1.471us 1.471us 0.000us 0.00% 0.000us 0.000us 0 b 0 b 0 b 0 b 1
aten::resize_ 2.46% 47.604us 2.46% 47.604us 15.868us 0.000us 0.00% 0.000us 0.000us 0 b 0 b 2.07 Gb 2.07 Gb 3
aten::copy_ 1.15% 22.211us 1.15% 22.211us 22.211us 6.112us 0.01% 6.112us 6.112us 0 b 0 b 0 b 0 b 1
aten::ones_like 0.41% 7.996us 1.31% 25.292us 25.292us 3.456us 0.01% 7.552us 7.552us 0 b 0 b 512 b 0 b 1
aten::empty_like 1.28% 24.808us 4.54% 87.984us 21.996us 0.000us 0.00% 0.000us 0.000us 0 b 0 b 1.24 Gb 0 b 4
aten::empty_strided 3.26% 63.176us 3.26% 63.176us 15.794us 0.000us 0.00% 0.000us 0.000us 0 b 0 b 1.24 Gb 1.24 Gb 4
aten::fill_ 6.85% 132.681us 6.85% 132.681us 26.536us 2.249ms 5.44% 2.249ms 449.728us 0 b 0 b 0 b 0 b 5
torch::autograd::GraphRoot 0.16% 3.055us 0.16% 3.055us 3.055us 2.048us 0.00% 2.048us 2.048us 0 b 0 b 0 b 0 b 1
torch::autograd::CopyBackwards 2.56% 49.679us 3.32% 64.323us 64.323us 2.111us 0.01% 4.160us 4.160us 0 b 0 b 0 b 0 b 1
aten::to 0.76% 14.644us 0.76% 14.644us 14.644us 2.049us 0.00% 2.049us 2.049us 0 b 0 b 0 b 0 b 1
NormBackward1 4.69% 90.810us 26.74% 517.888us 517.888us 10.143us 0.02% 7.956ms 7.956ms 0 b 0 b 2.07 Gb -1.00 Kb 1
aten::div 4.40% 85.285us 5.73% 111.043us 111.043us 8.064us 0.02% 8.064us 8.064us 0 b 0 b 512 b 0 b 1
aten::eq 6.62% 128.152us 11.40% 220.838us 110.419us 12.960us 0.03% 17.536us 8.768us 0 b 0 b 1.00 Kb 0 b 2
aten::masked_fill_ 3.03% 58.597us 3.03% 58.597us 58.597us 6.305us 0.02% 6.305us 6.305us 0 b 0 b 0 b 0 b 1
aten::mul 9.68% 187.394us 11.50% 222.707us 111.354us 15.806ms 38.20% 15.806ms 7.903ms 0 b 0 b 4.13 Gb 0 b 2
SubBackward0 2.12% 41.081us 18.10% 350.638us 350.638us 6.051us 0.01% 15.776ms 15.776ms 0 b 0 b 2.07 Gb -2.07 Gb 1
aten::neg 8.47% 164.000us 13.74% 266.156us 133.078us 7.883ms 19.05% 15.760ms 7.880ms 0 b 0 b 4.13 Gb 0 b 2
torch::autograd::AccumulateGrad 4.47% 86.639us 10.05% 194.741us 48.685us 13.738us 0.03% 30.340us 7.585us 0 b 0 b 0 b 0 b 4
aten::detach 3.49% 67.622us 5.58% 108.102us 27.026us 13.176us 0.03% 16.602us 4.150us 0 b 0 b 0 b 0 b 4
detach 2.09% 40.480us 2.09% 40.480us 10.120us 3.426us 0.01% 3.426us 0.856us 0 b 0 b 0 b 0 b 4
swc2dBackward 8.58% 166.152us 30.24% 585.618us 585.618us 7.844us 0.02% 2.277ms 2.277ms 0 b 0 b 1.24 Gb -132.50 Kb 1
aten::zeros_like 3.46% 66.969us 14.73% 285.204us 95.068us 13.887us 0.03% 2.259ms 753.108us 0 b 0 b 1.24 Gb 0 b 3
aten::zero_ 3.01% 58.339us 7.15% 138.447us 46.149us 7.039us 0.02% 2.245ms 748.479us 0 b 0 b 0 b 0 b 3
aten::ones 1.34% 26.019us 4.63% 89.663us 89.663us 4.098us 0.01% 10.242us 10.242us 0 b 0 b 132.50 Kb 0 b 1
----------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 1.937ms
CUDA time total: 41.371ms
```
| github_jupyter |
# Setup
## Instructions
1. Work on a copy of this notebook: _File_ > _Save a copy in Drive_ (you will need a Google account).
2. (Optional) If you would like to do the deep learning component of this tutorial, turn on the GPU with Edit->Notebook settings->Hardware accelerator->GPU
3. Execute the following cell (click on it and press Ctrl+Enter) to install Julia, IJulia and other packages (if needed, update `JULIA_VERSION` and the other parameters). This takes a couple of minutes.
4. Continue to the next section.
_Notes_:
* If your Colab Runtime gets reset (e.g., due to inactivity), repeat steps 3, 4.
* After installation, if you want to change the Julia version or activate/deactivate the GPU, you will need to reset the Runtime: _Runtime_ > _Delete and disconnect runtime_ and repeat steps 2-4.
**Run the following code to install Julia**
```
%%shell
set -e
#---------------------------------------------------#
JULIA_VERSION="1.7.2"
JULIA_PACKAGES="PyCall SymbolicRegression"
JULIA_NUM_THREADS=4
#---------------------------------------------------#
if [ -z `which julia` ]; then
# Install Julia
JULIA_VER=`cut -d '.' -f -2 <<< "$JULIA_VERSION"`
echo "Installing Julia $JULIA_VERSION on the current Colab Runtime..."
BASE_URL="https://julialang-s3.julialang.org/bin/linux/x64"
URL="$BASE_URL/$JULIA_VER/julia-$JULIA_VERSION-linux-x86_64.tar.gz"
wget -nv $URL -O /tmp/julia.tar.gz # -nv means "not verbose"
tar -x -f /tmp/julia.tar.gz -C /usr/local --strip-components 1
rm /tmp/julia.tar.gz
for PKG in `echo $JULIA_PACKAGES`; do
echo "Installing Julia package $PKG..."
julia -e 'using Pkg; pkg"add '$PKG'; precompile;"'
done
julia -e 'println("Success")'
fi
```
Install PySR and PyTorch-Lightning:
```
%pip install -Uq pysr pytorch_lightning
```
The following step is not normally required, but colab's printing is non-standard and we need to manually set it up PyJulia:
```
from julia import Julia
julia = Julia(compiled_modules=False)
from julia import Main
from julia.tools import redirect_output_streams
redirect_output_streams()
```
Let's install the backend of PySR, and all required libraries. We will also precompile them so they are faster at startup.
**(This may take some time)**
```
import pysr
pysr.install()
import sympy
import numpy as np
from matplotlib import pyplot as plt
from pysr import PySRRegressor
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, TensorDataset
import pytorch_lightning as pl
from sklearn.model_selection import train_test_split
```
# Simple PySR example:
First, let's learn a simple function
$$2.5382 \cos(x3) + x0^2 - 2$$
```
# Dataset
np.random.seed(0)
X = 2 * np.random.randn(100, 5)
y = 2.5382 * np.cos(X[:, 3]) + X[:, 0] ** 2 - 2
```
By default, we will set up 30 populations of expressions (which evolve independently except for migrations), use 4 threads, and use `"best"` for our model selection strategy:
```
default_pysr_params = dict(
populations=30,
procs=4,
model_selection="best",
)
```
PySR can run for arbitrarily long, and continue to find more and more accurate expressions. You can set the total number of cycles of evolution with `niterations`, although there are also a [few more ways](https://github.com/MilesCranmer/PySR/pull/134) to stop execution.
**This first execution will take a bit longer to startup, as the library is JIT-compiled. The next execution will be much faster.**
```
# Learn equations
model = PySRRegressor(
niterations=30,
binary_operators=["plus", "mult"],
unary_operators=["cos", "exp", "sin"],
**default_pysr_params
)
model.fit(X, y)
```
We can print the model, which will print out all the discovered expressions:
```
model
```
We can also view the SymPy format of the best expression:
```
model.sympy()
```
We can also view the SymPy of any other expression in the list, using the index of it in `model.equations_`.
```
model.sympy(2)
```
## Output
`model.equations_` is a Pandas DataFrame. We can export the results in various ways:
```
model.latex()
```
These is also `model.sympy(), model.jax(), model.pytorch()`. All of these can take an index as input, to get the result for an arbitrary equation in the list.
We can also use `model.predict` for arbitrary equations, with the default equation being the one chosen by `model_selection`:
```
ypredict = model.predict(X)
ypredict_simpler = model.predict(X, 2)
print("Default selection MSE:", np.power(ypredict - y, 2).mean())
print("Manual selection MSE for index 2:", np.power(ypredict_simpler - y, 2).mean())
```
# Custom operators
A full list of operators is given here: https://astroautomata.com/PySR/#/operators,
but we can also use any binary or unary operator in `julia`, or define our own as arbitrary functions.
Say that we want a command to do quartic powers:
$$ y = x_0^4 - 2 $$
```
y = X[:, 0] ** 4 - 2
```
We can do this by passing a string in Julia syntax.
We also define the operator in sympy, with `extra_sympy_mappings`, to enable its use in `predict`, and other export functions.
```
model = PySRRegressor(
niterations=5,
populations=40,
binary_operators=["plus", "mult"],
unary_operators=["cos", "exp", "sin", "quart(x) = x^4"],
extra_sympy_mappings={"quart": lambda x: x**4},
)
model.fit(X, y)
model.sympy()
```
Since `quart` is arguably more complex than the other operators, you can also give it a different complexity, using, e.g., `complexity_of_operators={"quart": 2}` to give it a complexity of 2 (instead of the default 2). You can also define custom complexities for variables and constants (`complexity_of_variables` and `complexity_of_constants`, respectively - both take a single number).
One can also add a binary operator, with, e.g., `"myoperator(x, y) = x^2 * y"`. All Julia operators that work on scalar 32-bit floating point values are available.
Make sure that any operator you add is valid over the real line. So, e.g., you will need to define `"mysqrt(x) = sqrt(abs(x))"` to enable it for negative numbers,
or, simply have it return a very large number for bad inputs (to prevent negative input in a soft way):
`"mysqrt(x::T) where {T} = (x >= 0) ? x : T(-1e9)"` (Julia syntax for a template function of input type `T`), which will make `mysqrt(x)` return -10^9 for negative x–hurting the loss of the equation.
## Scoring
Using `model_selection="best"`selects the equation with the max score and prints it. But in practice it is best to look through all the equations manually, select an equation above some MSE threshold, and then use the score to select among that loss threshold.
Here, "score" is defined by:
$$ \text{score} = - \log(\text{loss}_i/\text{loss}_{i-1})/
(\text{complexity}_i - \text{complexity}_{i-1})$$
This scoring is motivated by the common strategy of looking for drops in the loss-complexity curve.
From Schmidt & Lipson (2009) -

# Noise example
Here is an example with noise. Known Gaussian noise with $\sigma$ between 0.1 and 5.0. We record samples of $y$:
$$ \sigma \sim U(0.1, 5.0) $$
$$ \epsilon \sim \mathcal{N}(0, \sigma^2)$$
$$ y = 5\;\cos(3.5 x_0) - 1.3 + \epsilon.$$
We have 5 features, say. The weights change the loss function to be:
$$MSE = \sum [(y - f(x))^2*w],$$
so in this example, we can set:
$$w = 1/\sigma^2.$$
```
np.random.seed(0)
N = 3000
upper_sigma = 5
X = 2 * np.random.rand(N, 5)
sigma = np.random.rand(N) * (5 - 0.1) + 0.1
eps = sigma * np.random.randn(N)
y = 5 * np.cos(3.5 * X[:, 0]) - 1.3 + eps
```
Let's look at this dataset:
```
plt.scatter(X[:, 0], y, alpha=0.2)
plt.xlabel("$x_0$")
plt.ylabel("$y$")
```
Define some weights to use:
```
weights = 1 / sigma[:, None] ** 2
weights[:5, 0]
```
Let's run PySR again:
```
model = PySRRegressor(
loss="myloss(x, y, w) = w * abs(x - y)", # Custom loss function with weights.
niterations=20,
populations=20, # Use more populations
binary_operators=["plus", "mult"],
unary_operators=["cos"],
)
model.fit(X, y, weights=weights)
```
Let's see if we get similar results to the true equation
```
model
```
We can also filter all equations up to 2x the most accurate equation, then select the best score from that list:
```
best_idx = model.equations_.query(
f"loss < {2 * model.equations_.loss.min()}"
).score.idxmax()
model.sympy(best_idx)
```
We can also use `denoise=True`, which will run the input through a Gaussian process to denoise the dataset, before fitting on it.
Let's look at the fit:
```
plt.scatter(X[:, 0], y, alpha=0.1)
y_prediction = model.predict(X, index=best_idx)
plt.scatter(X[:, 0], y_prediction)
```
# High-dimensional input: Neural Nets + Symbolic Regression
In this example, let's learn a high-dimensional problem. **This will use the method proposed in our NeurIPS paper: https://arxiv.org/abs/2006.11287.**
Let's consider a time series problem:
$$ z = y^2,\quad y = \frac{1}{100} \sum(y_i),\quad y_i = x_{i0}^2 + 6 \cos(2*x_{i2})$$
Imagine our time series is 100 timesteps. That is very hard for symbolic regression, even if we impose the inductive bias of $$z=f(\sum g(x_i))$$ - it is the square of the number of possible equations!
But, as in our paper, **we can break this problem down into parts with a neural network. Then approximate the neural network with the symbolic regression!**
Then, instead of, say, $(10^9)^2=10^{18}$ equations, we only have to consider $2\times 10^9$ equations.
```
###### np.random.seed(0)
N = 100000
Nt = 100
X = 6 * np.random.rand(N, Nt, 5) - 3
y_i = X[..., 0] ** 2 + 6 * np.cos(2 * X[..., 2])
y = np.sum(y_i, axis=1) / y_i.shape[1]
z = y**2
X.shape, y.shape
```
## Neural Network definition
So, as described above, let's first use a neural network with the sum inductive bias to solve this problem.
Essentially, we will learn two neural networks:
- `f`
- `g`
each defined as a multi-layer perceptron. We will sum over `g` the same way as in our equation, but we won't define the summed part beforehand.
Then, we will fit `g` and `f` **separately** using symbolic regression.
```
hidden = 128
total_steps = 50000
def mlp(size_in, size_out, act=nn.ReLU):
return nn.Sequential(
nn.Linear(size_in, hidden),
act(),
nn.Linear(hidden, hidden),
act(),
nn.Linear(hidden, hidden),
act(),
nn.Linear(hidden, size_out),
)
class SumNet(pl.LightningModule):
def __init__(self):
super().__init__()
########################################################
# The same inductive bias as above!
self.g = mlp(5, 1)
self.f = mlp(1, 1)
def forward(self, x):
y_i = self.g(x)[:, :, 0]
y = torch.sum(y_i, dim=1, keepdim=True) / y_i.shape[1]
z = self.f(y)
return z[:, 0]
########################################################
# PyTorch Lightning bookkeeping:
def training_step(self, batch, batch_idx):
x, z = batch
predicted_z = self(x)
loss = F.mse_loss(predicted_z, z)
return loss
def validation_step(self, batch, batch_idx):
return self.training_step(batch, batch_idx)
def configure_optimizers(self):
self.trainer.reset_train_dataloader()
optimizer = torch.optim.Adam(self.parameters(), lr=self.max_lr)
scheduler = {
"scheduler": torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.max_lr,
total_steps=self.total_steps,
final_div_factor=1e4,
),
"interval": "step",
}
return [optimizer], [scheduler]
```
## Data bookkeeping
Put everything into PyTorch and do a train/test split:
```
Xt = torch.tensor(X).float()
zt = torch.tensor(z).float()
X_train, X_test, z_train, z_test = train_test_split(Xt, zt, random_state=0)
train_set = TensorDataset(X_train, z_train)
train = DataLoader(train_set, batch_size=128, num_workers=2)
test_set = TensorDataset(X_test, z_test)
test = DataLoader(test_set, batch_size=256, num_workers=2)
```
## Train the model with PyTorch Lightning on GPUs:
Start the model:
```
pl.seed_everything(0)
model = SumNet()
model.total_steps = total_steps
model.max_lr = 1e-2
```
PyTorch Lightning trainer object:
```
trainer = pl.Trainer(max_steps=total_steps, gpus=1, benchmark=True)
```
Here, we fit the neural network:
```
trainer.fit(model, train_dataloaders=train, val_dataloaders=test)
```
## Latent vectors of network
Let's get the input and output of the learned `g` function from the network over some random data:
```
np.random.seed(0)
idx = np.random.randint(0, 10000, size=1000)
X_for_pysr = Xt[idx]
y_i_for_pysr = model.g(X_for_pysr)[:, :, 0]
y_for_pysr = torch.sum(y_i_for_pysr, dim=1) / y_i_for_pysr.shape[1]
z_for_pysr = zt[idx] # Use true values.
X_for_pysr.shape, y_i_for_pysr.shape
```
## Learning over the network:
Now, let's fit `g` using PySR:
```
np.random.seed(1)
tmpX = X_for_pysr.detach().numpy().reshape(-1, 5)
tmpy = y_i_for_pysr.detach().numpy().reshape(-1)
idx2 = np.random.randint(0, tmpy.shape[0], size=3000)
model = PySRRegressor(
niterations=20,
binary_operators=["plus", "sub", "mult"],
unary_operators=["cos", "square", "neg"],
)
model.fit(X=tmpX[idx2], y=tmpy[idx2])
```
## Validation
Recall we are searching for $y_i$ above:
$$ z = y^2,\quad y = \frac{1}{100} \sum(y_i),\quad y_i = x_{i0}^2 + 6 \cos(2 x_{i2})$$
```
model
```
A neural network can easily undo a linear transform, so this is fine: the network for $f$ will learn to undo the linear transform.
Then, we can learn another analytic equation for $z$.
**Now, we can compose these together to get the time series model!**
Think about what we just did: we found an analytical equation for $z$ in terms of $500$ datapoints, under the assumption that $z$ is a function of a sum of another function over an axis:
$$ z = f(\sum_i g(x_i)) $$
And we pulled out analytical copies for $g$ using symbolic regression.
# Other PySR Options
The full list of PySR parameters can be found here: https://astroautomata.com/PySR/#/api
| github_jupyter |
```
"""
I've never used SQL before, so this is just trial and error for loading things right now.
This is just for helping me think and plan the steps.
"""
print('')
import pandas as pd
# pd.set_option('display.max_columns', 30)
# pd.set_option('display.width', 10000)
# pd.set_option('display.expand_frame_repr', False)
import sqlite3
connection = sqlite3.connect("data/bam-crowd-only.sqlite")
c = connection.cursor()
# c.execute(
# "CREATE TABLE modules (mid int primary key, project_id int, src text, mature_content boolean, license text)"
# )
# cats = c.execute(
# 'select "<img src=""" || src || """ height=200>" from modules, crowd_labels where modules.id = crowd_labels.mid and attribute = "content_cat" and label="positive" limit 100'
# )
# cursor = connection.execute('select * from scores')
# names = list(map(lambda x: x[0], cursor.description))
names
df = pd.read_sql("select * from crowd_labels",
connection,
index_col="mid")
df['attribute']
# pd.read_sql(
# r'select "<img src=""" || src || """ height=200>" from modules, crowd_labels where modules.id = crowd_labels.mid and attribute = "content_cat" and label="positive" limit 100;',
# connection
# )
urls = pd.read_sql(
"select * from modules limit 100",
connection
)
urls.head()#['src']#[1]
scores = pd.read_sql("select * from scores",
connection,
index_col="mid")
urls['src'][1]
urls['src'][3]
scores.head()
scores2 = pd.read_sql("select * from crowd_raw_captions",
connection,
index_col="mid")
#scores2
import urllib.request
urllib.request.urlretrieve("http://www.gunnerkrigg.com//comics/00000001.jpg", "images/00000001.jpg")
import urllib
import cv2
import numpy as np
import matplotlib.pyplot as plt
url = "http://s0.geograph.org.uk/photos/40/57/405725_b17937da.jpg"
url_response = urllib.request.urlopen(url)
img_array = np.array(bytearray(url_response.read()), dtype=np.uint8)
urllib.request.urlretrieve(url, "images/00000002.jpg")
url2 = "https://mir-s3-cdn-cf.behance.net/project_modules/disp/a9e0f21065.55f7265cdb7b3.jpg"
url_response2 = urllib.request.urlopen(url2)
img_array2 = np.array(bytearray(url_response2.read()), dtype=np.uint8)
getsizeof(img_array2)
url_dead = "https://mir-cdn.behance.net/v1/rendition/project_modules/disp/8f69a81004.55f726597e324.jpg"
url_dead_response = urllib.request.urlopen(url_dead)
urllib.request.urlretrieve(url_dead, "images/00000002.jpg")
img = cv2.imdecode(img_array, -1)
img2 = cv2.imdecode(img_array2, -1)
resized_img = cv2.resize(img, (128,128))
resized_img2 = cv2.resize(img2, (128,128))
# cv2.imshow('URL Image', img)
# plt.show()
plt.imshow(resized_img2)
plt.show()
# reshaped_img = resized_img.reshape(resized_img.shape + (1,))
# reshaped_img2 = resized_img2.reshape(resized_img2.shape + (1,))
# reshaped_img = np.reshape(resized_img, + (1,))
reshaped_img = resized_img.reshape((1, 128,128, 3))
reshaped_img2 = resized_img2.reshape((1, 128,128, 3))
resized_img.shape
final_shape = np.concatenate((reshaped_img, reshaped_img2), axis=0)
final_shape.shape
final_shape[0, :, :, :].shape
plt.imshow(final_shape[0])
plt.show()
#all_img = np.dstack((resized_img, resized_img2))
#all_img = np.concatenate((resized_img, resized_img2), axis=-1)
all_img = np.vstack((resized_img, resized_img2))
all_img.shape
print (img.shape)
print (img2.shape)
from sys import getsizeof
getsizeof(img)
getsizeof(img_array)
getsizeof(url_response)
img_array.shape
img.shape
480 * 640 * 3
```
| github_jupyter |
# Tutorial
## [How to do Novelty Detection in Keras with Generative Adversarial Network](https://www.dlology.com/blog/how-to-do-novelty-detection-in-keras-with-generative-adversarial-network-part-2/) | DLology
This notebook is for test phase Novelty Detection. To Train the model, run this first.
```bash
python models.py
```
It is recommended to understand how the model works in general before continuing the implementation.
→ [How to do Novelty Detection in Keras with Generative Adversarial Network (Part 1)](https://www.dlology.com/blog/how-to-do-novelty-detection-in-keras-with-generative-adversarial-network/)
```
from utils import *
from kh_tools import *
import models
import imp
imp.reload(models)
from models import ALOCC_Model
from keras.datasets import mnist
from keras.losses import binary_crossentropy
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
self =ALOCC_Model(dataset_name='mnist', input_height=28,input_width=28)
```
## Choose a stopping criterion
The training procedure is stopped when R successfully maps noisy images to clean images carrying the concept of the target class. When R can reconstruct its input with minimum error. In the following case, we pick the epoch 3.
```
# This image was generated at the end of the models.py training procedure to help pick a ending epoch to load.
from IPython.display import Image
Image(filename='plot_g_recon_losses.png')
# Load the epoch #3 saved weights.
self.adversarial_model.load_weights('./checkpoint/ALOCC_Model_3.h5')
(X_train, y_train), (_, _) = mnist.load_data()
X_train = X_train / 255
```
## Test the reconstruction loss and Discriminator output
The `abnormal` image has a **`larger` reconstruction loss** and **`smaller` discriminator output value**.
```
def test_reconstruction(label, data_index = 11):
specific_idx = np.where(y_train == label)[0]
if data_index >= len(X_train):
data_index = 0
data = X_train[specific_idx].reshape(-1, 28, 28, 1)[data_index:data_index+1]
model_predicts = self.adversarial_model.predict(data)
fig= plt.figure(figsize=(8, 8))
columns = 1
rows = 2
fig.add_subplot(rows, columns, 1)
input_image = data.reshape((28, 28))
reconstructed_image = model_predicts[0].reshape((28, 28))
plt.title('Input')
plt.imshow(input_image, label='Input')
fig.add_subplot(rows, columns, 2)
plt.title('Reconstruction')
plt.imshow(reconstructed_image, label='Reconstructed')
plt.show()
# Compute the mean binary_crossentropy loss of reconstructed image.
y_true = K.variable(reconstructed_image)
y_pred = K.variable(input_image)
error = K.eval(binary_crossentropy(y_true, y_pred)).mean()
print('Reconstruction loss:', error)
print('Discriminator Output:', model_predicts[1][0][0])
```
### Normal case
The network was trained with label == 1.
```
test_reconstruction(1)
```
## Abnormal cases
The network was not trained on those labels, so the Generator/R network find it hard to reconstruct the input images reflected in higher reconstruction loss values.
Discriminator also outputs a lower value compared to normal ones.
```
test_reconstruction(3)
test_reconstruction(5)
test_reconstruction(7)
```
| github_jupyter |
# Lab 07: Stack Applications
## Overview
For this assignment you will build on the stack data structure created in class to develop two distinct stack-driven applications.
Below is the completed stack implementation from class. While you needn't modify it for this assignment — indeed, all tests run on our end will *not* make use of any changes you introduce to the `Stack` class — we urge you to read through the code and make sure you understand how it works.
```
class Stack:
class Node:
def __init__(self, val, next=None):
self.val = val
self.next = next
def __init__(self):
self.top = None
def push(self, val):
self.top = Stack.Node(val, self.top)
def pop(self):
assert self.top, 'Stack is empty'
val = self.top.val
self.top = self.top.next
return val
def peek(self):
return self.top.val if self.top else None
def empty(self):
return self.top == None
def __bool__(self):
return not self.empty()
def __repr__(self):
if not self.top:
return ''
return '--> ' + ', '.join(str(x) for x in self)
def __iter__(self):
n = self.top
while n:
yield n.val
n = n.next
```
### 1. Paired delimiter matching
In class we wrote a function that uses a stack to help determine whether all paired delimiters (e.g., parentheses) in a given string are correctly matched — you can review the code at http://moss.cs.iit.edu/cs331/notebooks/stacks-and-queues.html (look for `check_parens`).
For this first exercise you will extend our implementation to check all the following paired delimiters: `{}, (), [], <>`. We've defined two strings — `delim_openers` and `delim_closers` — that might come in handy in your implementation (hint: look into using the `index` sequence method).
```
delim_openers = '{([<'
delim_closers = '})]>'
def check_delimiters(expr):
"""Returns True if and only if `expr` contains only correctly matched delimiters, else returns False."""
stack = Stack()
for i in expr:
for j in range(len(delim_openers)):
if i == delim_openers[j]:
stack.push(j)
break
else:
for j in range(len(delim_closers)):
if i == delim_closers[j]:
try:
if stack.pop() != j:
return False
except:
return False
if stack.empty():
return True
else:
return False
# (1 point)
from unittest import TestCase
tc = TestCase()
tc.assertTrue(check_delimiters('()'))
tc.assertTrue(check_delimiters('[]'))
tc.assertTrue(check_delimiters('{}'))
tc.assertTrue(check_delimiters('<>'))
# (1 point)
from unittest import TestCase
tc = TestCase()
tc.assertTrue(check_delimiters('([])'))
tc.assertTrue(check_delimiters('[{}]'))
tc.assertTrue(check_delimiters('{<()>}'))
tc.assertTrue(check_delimiters('<({[]})>'))
# (2 points)
from unittest import TestCase
tc = TestCase()
tc.assertTrue(check_delimiters('([] () <> [])'))
tc.assertTrue(check_delimiters('[{()} [] (<> <>) {}]'))
tc.assertTrue(check_delimiters('{} <> () []'))
tc.assertTrue(check_delimiters('<> ([] <()>) <[] [] <> <>>'))
# (1 point)
from unittest import TestCase
tc = TestCase()
tc.assertFalse(check_delimiters('('))
tc.assertFalse(check_delimiters('['))
tc.assertFalse(check_delimiters('{'))
tc.assertFalse(check_delimiters('<'))
tc.assertFalse(check_delimiters(')'))
tc.assertFalse(check_delimiters(']'))
tc.assertFalse(check_delimiters('}'))
tc.assertFalse(check_delimiters('>'))
# (1 point)
from unittest import TestCase
tc = TestCase()
tc.assertFalse(check_delimiters('( ]'))
tc.assertFalse(check_delimiters('[ )'))
tc.assertFalse(check_delimiters('{ >'))
tc.assertFalse(check_delimiters('< )'))
# (2 points)
from unittest import TestCase
tc = TestCase()
tc.assertFalse(check_delimiters('[ ( ] )'))
tc.assertFalse(check_delimiters('((((((( ))))))'))
tc.assertFalse(check_delimiters('< < > > >'))
tc.assertFalse(check_delimiters('( [] < {} )'))
```
### 2. Infix → Postfix conversion
Another function we looked at was one that used a stack to evaluate a postfix arithmetic expression — you can review the code at http://moss.cs.iit.edu/cs331/notebooks/stacks-and-queues.html (look for `eval_postfix`). Because most of us are more accustomed to infix-form arithmetic expressions (e.g., `2 * (3 + 4)`), however, the function seems to be of limited use. The good news: we can use a stack to convert an infix expression to postfix form!
To do so, we will use the following algorithm:
1. Start with an empty list and an empty stack. At the end of the algorithm, the list will contain the correctly ordered tokens of the postfix expression.
2. Next, for each token in the expression (split on whitespace):
- if the token is a digit (the string `isdigit` method can be used to determine this), simply append it to the list; else, the token must be either an operator or an opening or closing parenthesis, in which case apply one of the following options:
- if the stack is empty or contains a left parenthesis on top, push the token onto the stack.
- if the token is a left parenthesis, push it on the stack.
- if the token is a right parenthesis, pop the stack and append all operators to the list until a left parenthesis is popped. Discard the pair of parentheses.
- if the token has higher precedence than the top of the stack, push it on the stack. For our purposes, the only operators are +, -, *, /, where the latter two have higher precedecence than the first two.
- if the token has equal precedence with the top of the stack, pop and append the top of the stack to the list and then push the incoming operator.
- if the incoming symbol has lower precedence than the symbol on the top of the stack, pop the stack and append it to the list. Then repeat the above tests against the new top of stack.
3. After arriving at the end of the expression, pop and append all operators on the stack to the list.
A writeup containing a detailed explanation of the steps above (though it prints the tokens immediately rather than adding them to a list) can be found at http://csis.pace.edu/~wolf/CS122/infix-postfix.htm
```
# you may find the following precedence dictionary useful
prec = {'*': 2, '/': 2,
'+': 1, '-': 1}
def infix_to_postfix(expr):
"""Returns the postfix form of the infix expression found in `expr`"""
ops = Stack()
postfix = []
toks = expr.split()
for tok in toks:
if tok == '(':
ops.push(tok)
elif tok == ')':
while ops.peek() != '(':
postfix.append(ops.pop())
ops.pop()
else:
try:
prec[tok]
if ops.empty() or ops.peek() == '(' or prec[ops.peek()] < prec[tok]:
ops.push(tok)
else:
postfix.append(ops.pop())
ops.push(tok)
except:
postfix.append(tok)
while not ops.empty():
postfix.append(ops.pop())
return ' '.join(postfix)
# (3 points)
from unittest import TestCase
tc = TestCase()
tc.assertEqual(infix_to_postfix('1'), '1')
tc.assertEqual(infix_to_postfix('1 + 2'), '1 2 +')
tc.assertEqual(infix_to_postfix('( 1 + 2 )'), '1 2 +')
tc.assertEqual(infix_to_postfix('1 + 2 - 3'), '1 2 + 3 -')
tc.assertEqual(infix_to_postfix('1 + ( 2 - 3 )'), '1 2 3 - +')
# (3 points)
from unittest import TestCase
tc = TestCase()
tc.assertEqual(infix_to_postfix('1 + 2 * 3'), '1 2 3 * +')
tc.assertEqual(infix_to_postfix('1 / 2 + 3 * 4'), '1 2 / 3 4 * +')
tc.assertEqual(infix_to_postfix('1 * 2 * 3 + 4'), '1 2 * 3 * 4 +')
tc.assertEqual(infix_to_postfix('1 + 2 * 3 * 4'), '1 2 3 * 4 * +')
# (3 points)
from unittest import TestCase
tc = TestCase()
tc.assertEqual(infix_to_postfix('1 * ( 2 + 3 ) * 4'), '1 2 3 + * 4 *')
tc.assertEqual(infix_to_postfix('1 * ( 2 + 3 * 4 ) + 5'), '1 2 3 4 * + * 5 +')
tc.assertEqual(infix_to_postfix('1 * ( ( 2 + 3 ) * 4 ) * ( 5 - 6 )'), '1 2 3 + 4 * * 5 6 - *')
```
| github_jupyter |
### Exercise 1: Create a Numpy array (from a list)
```
import numpy as np
lst1=[1,2,3]
array1 = np.array(lst1)
type(array1)
type(lst1)
```
### Exercise 2: Add two Numpy arrays
```
lst2 = lst1 + lst1
print(lst2)
array2 = array1 + array1
print(array2)
```
### Exercise 3: Mathematical operations on Numpy arrays
```
print("array1 multiplied by array1: ",array1*array1)
print("array1 divided by array1: ",array1/array1)
print("array1 raised to the power of array1: ",array1**array1)
```
### Exercise 4: More advanced mathematical operations on Numpy arrays
```
lst_5=[i for i in range(1,6)]
print(lst_5)
array_5=np.array(lst_5)
# sine function
print("Sine: ",np.sin(array_5))
# logarithm
print("Natural logarithm: ",np.log(array_5))
print("Base-10 logarithm: ",np.log10(array_5))
print("Base-2 logarithm: ",np.log2(array_5))
# Exponential
print("Exponential: ",np.exp(array_5))
```
### Exercise 5: How to generate arrays easily? `arange` and `linspace`
```
print("A series of numbers:",np.arange(5,16))
print("Numbers spaced apart by 2:",np.arange(0,11,2))
print("Numbers spaced apart by float:",np.arange(0,11,2.5))
print("Every 5th number from 30 in reverse order: ",np.arange(30,-1,-5))
print("11 linearly spaced numbers between 1 and 5: ",np.linspace(1,5,11))
```
### Exercise 6: Creating multi-dimensional array
```
my_mat = [[1,2,3],[4,5,6],[7,8,9]]
mat = np.array(my_mat)
print("Type/Class of this object:",type(mat))
print("Here is the matrix\n----------\n",mat,"\n----------")
my_tuple = np.array([(1.5,2,3), (4,5,6)])
mat_tuple = np.array(my_tuple)
print (mat_tuple)
```
### Exercise 7: Dimension, shape, size, and data type of the 2D array
```
print("Dimension of this matrix: ",mat.ndim,sep='')
print("Size of this matrix: ", mat.size,sep='')
print("Shape of this matrix: ", mat.shape,sep='')
print("Data type of this matrix: ", mat.dtype,sep='')
```
### Exercise 8: Zeros, Ones, Random, and Identity Matrices and Vectors
```
print("Vector of zeros: ",np.zeros(5))
print("Matrix of zeros: ",np.zeros((3,4)))
print("Vector of ones: ",np.ones(4))
print("Matrix of ones: ",np.ones((4,2)))
print("Matrix of 5’s: ",5*np.ones((3,3)))
print("Identity matrix of dimension 2:",np.eye(2))
print("Identity matrix of dimension 4:",np.eye(4))
print("Random matrix of shape (4,3):\n",np.random.randint(low=1,high=10,size=(4,3)))
```
### Exercise 9: Reshaping, Ravel, Min, Max, Sorting
```
a = np.random.randint(1,100,30)
b = a.reshape(2,3,5)
c = a.reshape(6,5)
print ("Shape of a:", a.shape)
print ("Shape of b:", b.shape)
print ("Shape of c:", c.shape)
print("\na looks like\n",a)
print("\nb looks like\n",b)
print("\nc looks like\n",c)
b_flat = b.ravel()
print(b_flat)
```
### Exercise10: Indexing and slicing
```
arr = np.arange(0,11)
print("Array:",arr)
print("Element at 7th index is:", arr[7])
print("Elements from 3rd to 5th index are:", arr[3:6])
print("Elements up to 4th index are:", arr[:4])
print("Elements from last backwards are:", arr[-1::-1])
print("3 Elements from last backwards are:", arr[-1:-6:-2])
arr2 = np.arange(0,21,2)
print("New array:",arr2)
print("Elements at 2nd, 4th, and 9th index are:", arr2[[2,4,9]]) # Pass a list as a index to subset
mat = np.random.randint(10,100,15).reshape(3,5)
print("Matrix of random 2-digit numbers\n",mat)
print("\nDouble bracket indexing\n")
print("Element in row index 1 and column index 2:", mat[1][2])
print("\nSingle bracket with comma indexing\n")
print("Element in row index 1 and column index 2:", mat[1,2])
print("\nRow or column extract\n")
print("Entire row at index 2:", mat[2])
print("Entire column at index 3:", mat[:,3])
print("\nSubsetting sub-matrices\n")
print("Matrix with row indices 1 and 2 and column indices 3 and 4\n", mat[1:3,3:5])
print("Matrix with row indices 0 and 1 and column indices 1 and 3\n", mat[0:2,[1,3]])
```
### Exercise 11: Conditional subsetting
```
mat = np.random.randint(10,100,15).reshape(3,5)
print("Matrix of random 2-digit numbers\n",mat)
print ("\nElements greater than 50\n", mat[mat>50])
```
### Exercise 12: Array operations (array-array, array-scalar, universal functions)
```
mat1 = np.random.randint(1,10,9).reshape(3,3)
mat2 = np.random.randint(1,10,9).reshape(3,3)
print("\n1st Matrix of random single-digit numbers\n",mat1)
print("\n2nd Matrix of random single-digit numbers\n",mat2)
print("\nAddition\n", mat1+mat2)
print("\nMultiplication\n", mat1*mat2)
print("\nDivision\n", mat1/mat2)
print("\nLineaer combination: 3*A - 2*B\n", 3*mat1-2*mat2)
print("\nAddition of a scalar (100)\n", 100+mat1)
print("\nExponentiation, matrix cubed here\n", mat1**3)
print("\nExponentiation, sq-root using pow function\n",pow(mat1,0.5))
```
### Exercise 13: Stacking arrays
```
a = np.array([[1,2],[3,4]])
b = np.array([[5,6],[7,8]])
print("Matrix a\n",a)
print("Matrix b\n",b)
print("Vertical stacking\n",np.vstack((a,b)))
print("Horizontal stacking\n",np.hstack((a,b)))
```
### Exercise 14: Create some array and dictionary to create Pandas series
```
import pandas as pd
labels = ['a','b','c']
my_data = [10,20,30]
arr = np.array(my_data)
d = {'a':10,'b':20,'c':30}
print ("Labels:", labels)
print("My data:", my_data)
print("Dictionary:", d)
```
### Exercise 15: Creating a Pandas Series
```
s1=pd.Series(data=my_data)
print(s1)
s2=pd.Series(data=my_data, index=labels)
print(s2)
s3=pd.Series(arr, labels)
print(s3)
s4=pd.Series(d)
print(s4)
```
### Exercise 16: Pandas series can hold many types of data
```
print ("\nHolding numerical data\n",'-'*25, sep='')
print(pd.Series(arr))
print ("\nHolding text labels\n",'-'*20, sep='')
print(pd.Series(labels))
print ("\nHolding functions\n",'-'*20, sep='')
print(pd.Series(data=[sum,print,len]))
print ("\nHolding objects from a dictionary\n",'-'*40, sep='')
print(pd.Series(data=[d.keys, d.items, d.values]))
```
### Creating Pandas DataFrame
```
matrix_data = np.random.randint(1,10,size=20).reshape(5,4)
row_labels = ['A','B','C','D','E']
column_headings = ['W','X','Y','Z']
df = pd.DataFrame(data=matrix_data, index=row_labels, columns=column_headings)
print("\nThe data frame looks like\n",'-'*45, sep='')
print(df)
d={'a':[10,20],'b':[30,40],'c':[50,60]}
df2=pd.DataFrame(data=d,index=['X','Y'])
print(df2)
```
### Exercise 18: Indexing and slicing (columns)
```
print("\nThe 'X' column\n",'-'*25, sep='')
print(df['X'])
print("\nType of the column: ", type(df['X']), sep='')
print("\nThe 'X' and 'Z' columns indexed by passing a list\n",'-'*55, sep='')
print(df[['X','Z']])
print("\nType of the pair of columns: ", type(df[['X','Z']]), sep='')
```
### Exercise 19: Indexing and slicing (rows)
```
print("\nLabel-based 'loc' method can be used for selecting row(s)\n",'-'*60, sep='')
print("\nSingle row\n")
print(df.loc['C'])
print("\nMultiple rows\n")
print(df.loc[['B','C']])
print("\nIndex position based 'iloc' method can be used for selecting row(s)\n",'-'*70, sep='')
print("\nSingle row\n")
print(df.iloc[2])
print("\nMultiple rows\n")
print(df.iloc[[1,2]])
```
### Exercise 20: Creating and deleting a (new) column (or row)
```
print("\nA column is created by assigning it in relation to an existing column\n",'-'*75, sep='')
df['New'] = df['X']+df['Z']
df['New (Sum of X and Z)'] = df['X']+df['Z']
print(df)
print("\nA column is dropped by using df.drop() method\n",'-'*55, sep='')
df = df.drop('New', axis=1) # Notice the axis=1 option, axis = 0 is default, so one has to change it to 1
print(df)
df1=df.drop('A')
print("\nA row (index) is dropped by using df.drop() method and axis=0\n",'-'*65, sep='')
print(df1)
print("\nAn in-place change can be done by making inplace=True in the drop method\n",'-'*75, sep='')
df.drop('New (Sum of X and Z)', axis=1, inplace=True)
print(df)
```
### Exercise 21: Intro to Matplotlib through a simple scatter plot
```
people = ['Ann','Brandon','Chen','David','Emily','Farook',
'Gagan','Hamish','Imran','Joseph','Katherine','Lily']
age = [21,12,32,45,37,18,28,52,5,40,48,15]
weight = [55,35,77,68,70,60,72,69,18,65,82,48]
height = [160,135,170,165,173,168,175,159,105,171,155,158]
import matplotlib.pyplot as plt
plt.scatter(age,weight)
plt.show()
plt.figure(figsize=(8,6))
plt.title("Plot of Age vs. Weight (in kgs)",fontsize=20)
plt.xlabel("Age (years)",fontsize=16)
plt.ylabel("Weight (kgs)",fontsize=16)
plt.grid (True)
plt.ylim(0,100)
plt.xticks([i*5 for i in range(12)],fontsize=15)
plt.yticks(fontsize=15)
plt.scatter(x=age,y=weight,c='orange',s=150,edgecolors='k')
plt.text(x=20,y=85,s="Weights are more or less similar \nafter 18-20 years of age",fontsize=15)
plt.vlines(x=20,ymin=0,ymax=80,linestyles='dashed',color='blue',lw=3)
plt.legend(['Weight in kgs'],loc=2,fontsize=12)
plt.show()
```
### Exercise 22: Generating random numbers from a Uniform distribution
```
x = np.random.randint(1,10)
print(x)
x = np.random.randint(1,10,size=1)
print(x)
x = np.random.randint(1,6,size=10)
print(x)
x = 50+50*np.random.random(size=15)
x= x.round(decimals=2)
print(x)
x = np.random.rand(3,3)
print(x)
```
### Exercise 23: Generating random numbers from a Binomial distribution
```
x = np.random.binomial(10,0.6,size=8)
print(x)
plt.figure(figsize=(7,4))
plt.title("Number of successes in coin toss",fontsize=16)
plt.bar(left=np.arange(1,9),height=x)
plt.xlabel("Experiment number",fontsize=15)
plt.ylabel("Number of successes",fontsize=15)
plt.show()
```
### Exercise 24: Generating random numbers from Normal distribution
```
x = np.random.normal()
print(x)
heights = np.random.normal(loc=155,scale=10,size=100)
plt.figure(figsize=(7,5))
plt.hist(heights,color='orange',edgecolor='k')
plt.title("Histogram of teen aged students's height",fontsize=18)
plt.xlabel("Height in cm",fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
```
### Exercise 25: Calculation of descriptive statistics from a DataFrame
```
people_dict={'People':people,'Age':age,'Weight':weight,'Height':height}
people_df=pd.DataFrame(data=people_dict)
people_df
print(people_df['Age'].mean())
print(people_df['Height'].max())
print(people_df['Weight'].std())
np.percentile(people_df['Age'],25)
pcnt_75 = np.percentile(people_df['Age'],75)
pcnt_25 = np.percentile(people_df['Age'],25)
print("Inter-quartile range: ",pcnt_75-pcnt_25)
print(people_df.describe())
```
### Exercise 26: DataFrame even has built-in plotting utilities
```
people_df['Weight'].hist()
plt.show()
people_df.plot.scatter('Weight','Height',s=150,c='orange',edgecolor='k')
plt.grid(True)
plt.title("Weight vs. Height scatter plot",fontsize=18)
plt.xlabel("Weight (in kg)",fontsize=15)
plt.ylabel("Height (in cm)",fontsize=15)
plt.show()+
```
| github_jupyter |
# Convert OpenSN data to name,host,type,x,y,z,t,lum
Data downloaded from The Open Supernova Catalog https://sne.space on Aug. 20, 2019
```
import pandas as pd
import numpy as np
from astropy import units
from astropy.coordinates import SkyCoord, Distance
from astropy.cosmology import WMAP9
import datetime
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('OpenSNCat.csv')
#select the ones that have all the data we need
#In the end, I want z, but sonce there are multiple z values for some sources,
# I think I will just use the luminosity distance and convert below
df = df.loc[(df['R.A.'].notnull()) & \
(df['Dec.'].notnull()) & \
(df['dL (Mpc)'].notnull()) & \
(df['Disc. Date'].notnull()) & \
(df['Mmax'].notnull())]
df
```
I will have to iterate through the rows, since some coords have multiple entries, and some dates are bad
```
x = []
y = []
z = []
t = []
log10lum = []
name = []
host = []
tpe = []
#for datetime
fmt = '%Y/%m/%d'
N = 1e10
for index, row in df.iterrows():
bad = False
#there are still some dates that cause errors (e.g., 185/12/07/)
date = str(row['Disc. Date'])
pos = date.find(',')
fmt0 = fmt
if (pos != -1):
date = row['Disc. Date'][0:pos]
pos1 = date.find('/')
pos2 = date.rfind('/')
if (pos1 == -1):
fmt0 = '%Y'
if (pos1 != -1 and pos2 == pos1):
fmt0 = '%Y/%m/'
if (fmt0 == fmt):
val1 = int(date[0:pos1])
if (val1 <= 12):
fmt0 = '%m/%d/%Y'
if (val1 > 12 and val1 < 1800):
bad = True
if (not bad):
dt = datetime.datetime.strptime(date, fmt0)
t.append(dt.year + dt.month/12. + dt.day/365.24)
ra = row['R.A.']
pos = str(ra).find(',')
if (pos != -1):
ra = row['R.A.'][0:pos]
dec = row['Dec.']
pos = str(dec).find(',')
if (pos != -1):
dec = row['Dec.'][0:pos]
d = row['dL (Mpc)']*units.Mpc
#convert to comoving distance
cosmoz = Distance(d).z
c1 = SkyCoord(ra, dec, unit=(units.hourangle, units.deg), distance=WMAP9.comoving_distance(cosmoz)).galactic.cartesian
x.append(c1.x.to(units.Mpc).value)
y.append(c1.y.to(units.Mpc).value)
z.append(c1.z.to(units.Mpc).value)
log10lum.append(0.4*(4.74 - row['Mmax']))
name.append(row['Name'])
host.append(row['Host Name'])
tpe.append(row['Type'])
if (index > N):
break
print(min(t), max(t))
f, (ax1, ax2) = plt.subplots(1,2, figsize=(10, 5))
_ = ax1.hist(t,bins=100)
_ = ax2.hist(log10lum,bins=100)
```
### Write this to a new csv file
```
print(len(name), len(host), len(type), len(x), len(y), len(z), len(t))
data = {'name':np.array(name),
'host':np.array(host),
'type':np.array(tpe),
'x':np.array(x),
'y':np.array(y),
'z':np.array(z),
't':np.array(t),
'log10lum':np.array(log10lum)}
pd.DataFrame(data).to_csv('OpenSNCatConverted.csv', index=False)
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
# print list
for x in unique_list:
print(x)
unique(tpe)
```
| github_jupyter |
```
import random
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
length = 1000
cols = ["Q", "X", "Y", "Z"]
mu = 0
sigma = 5
import pingouin
lst_dct = {col:[] for col in cols }
for i in range(length):
lst_dct["Q"].append(50 + np.random.normal(mu, sigma))
lst_dct["X"].append(5 * lst_dct["Q"][-1] + 10 + np.random.normal(mu, sigma ))
lst_dct["Y"].append(lst_dct["Q"][-1] * -3 + 20 + np.random.normal(mu, sigma))
lst_dct["Z"].append(5 * lst_dct["X"][-1] + 10 * lst_dct["Y"][-1] + np.random.normal(mu, 3 * sigma))
df = pd.DataFrame(lst_dct)
keys = ["X",
"Y",
"Z",
"Q"]
dag_keys = keys
df
import pingouin
undirected_graph = {key:[] for key in df.keys()}
for x in undirected_graph:
remaining_vars = [y for y in df.keys() if y != x]
for y in remaining_vars:
undirected_graph[x].append(y)
undirected_graph
import copy
import pingouin
p_val = .01
def build_skeleton(df, undirected_graph):
def check_remaining_controls(control_vars, undirected_graph, x, y, controls_used) :
c_used = copy.copy(controls_used)
for c_var in control_vars:
if y not in undirected_graph[x]:
break
c_used.append(c_var)
test = df.partial_corr(x = x, y = y, covar=c_used,
method = "pearson")
if test["p-val"].values[0] > p_val:
undirected_graph[x].remove(y)
#breakout of the for
break
else:
remaining_controls = copy.copy(control_vars)
remaining_controls.remove(c_var)
check_remaining_controls(remaining_controls, undirected_graph, x, y, c_used)
d_sep = {}
for x in df.keys():
ys = undirected_graph[x]
for y in df.keys():
d_sep[(x,y)] = []
if x != y:
# first check for correlation with no controls
test = df.partial_corr(x = x, y = y, covar = None,method = "pearson")
if test["p-val"].values[0] > p_val:
undirected_graph[x].remove(y)
# if correlated check for deseparation controlling for other variables
else:
############## make recursive function #############
control_vars = [z for z in df.keys() if z != y and z != x]
check_remaining_controls(control_vars, undirected_graph, x, y, [])
return undirected_graph
undirected_graph = build_skeleton(df, undirected_graph)
undirected_graph
def check_colliders(df, undirected_graph):
directed_graph =copy.copy(undirected_graph)
for x in directed_graph.keys():
for y in directed_graph.keys():
if x != y:
ux = directed_graph[x]
uy = directed_graph[y]
if y not in ux and x not in uy:
neighbors = [node for node in directed_graph if node in ux and node in uy]
for neighbor in neighbors:
#test =
try:
directed_graph[neighbor].remove(x)
directed_graph[neighbor].remove(y)
except:
continue
return directed_graph
d_graph = check_colliders(df, undirected_graph)
print(d_graph)
import matplotlib.pyplot as plt
import networkx as nx
def graph_DAG(undirected_graph, df, title = "DAG Structure"):
# generate partial correlation matrix to draw values from
# for graph edges
pcorr_matrix = df.pcorr()
graph = nx.Graph()
edges = []
edge_labels = {}
for key in undirected_graph:
for key2 in undirected_graph[key]:
if (key2, key) not in edges:
edge = (key.replace(" ","\n"), key2[0].replace(" ","\n"))
edges.append(edge)
# edge label is partial correlation between
# key and key2
edge_labels[edge] = str(round(pcorr_matrix.loc[key][key2],2))
# edge format: ("i", "j") --> from node i to node j
graph.add_edges_from(edges)
color_map = ["C0" for g in graph]
fig, ax = plt.subplots(figsize = (20,12))
graph.nodes()
plt.tight_layout()
pos = nx.spring_layout(graph)#, k = 5/(len(sig_corr.keys())**.5))
plt.title(title, fontsize = 30)
nx.draw_networkx(graph, pos, node_color=color_map,
node_size = 1000,
with_labels=True, arrows=False,
font_size = 20, alpha = 1,
font_color = "white",
ax = ax)
nx.draw_networkx_edge_labels(graph,pos,
edge_labels=edge_labels,
font_color='green',
font_size=20)
plt.axis("off")
plt.savefig("g1.png", format="PNG")
# tell matplotlib you're done with the plot: https://stackoverflow.com/questions/741877/how-do-i-tell-matplotlib-that-i-am-done-with-a-plot
plt.show()
graph_DAG(undirected_graph, df, title = "DAG Structure")
from pgmpy.estimators import PC
c = PC(df)
max_cond_vars = len(keys) - 2
model = c.estimate(return_type = "dag",variant= "parallel",
significance_level = 0.05,
max_cond_vars = max_cond_vars, ci_test = "pearsonr")
edges = model.edges()
pcorr = df.pcorr()
weights = {}
for edge in edges:
print(edge, ":",pcorr[edge[0]].loc[edge[1]])
edges
from matplotlib.patches import ArrowStyle
def graph_DAG(edges, df, title = ""):
pcorr = df.pcorr()
graph = nx.DiGraph()
edge_labels = {}
for edge in edges:
edge_labels[edge] = str(round(pcorr[edge[0]].loc[edge[1]],2))
graph.add_edges_from(edges)
color_map = ["C0" for g in graph]
fig, ax = plt.subplots(figsize = (20,12))
graph.nodes()
plt.tight_layout()
pos = nx.spring_layout(graph)#, k = 5/(len(sig_corr.keys())**.5))
plt.title(title, fontsize = 30)
nx.draw_networkx(graph, pos, node_color=color_map, node_size = 1200,
with_labels=True, arrows=True,
font_color = "white",
font_size = 26, alpha = 1,
width = 1, edge_color = "C1",
arrowstyle=ArrowStyle("Fancy, head_length=3, head_width=1.5, tail_width=.1"), ax = ax)
#nx.draw_networkx_edge_labels(graph,pos,
# edge_labels=edge_labels,
# font_color='green',
# font_size=20)
graph_DAG(edges, df)
def graph_stats(df, edges):
statistics = {}
for (node1, node2) in edges:
covar = [node for node in df.keys() if node not in [node1, node2]]
statistics[(node1, node2)] = df.partial_corr(x = node1, y = node2, covar=covar,
method = "pearson")
statistics[(node1, node1)] = statistics[(node1, node2)]
print(node1, node2, statistics[(node1, node2)], sep = "\n")
graph_stats(df, edges)
```
| github_jupyter |
```
%matplotlib notebook
# test imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
print(f"The version of numpy is: {np.__version__}")
print(f"The version of pandas is: {pd.__version__}")
print(f"The version of scikit-learn is: {sklearn.__version__}")
```
You should see the versions of the libaries installed in your environment. If you are using the local virtual environment set up by `pipenv`, you should see the following:
```
The version of numpy is: 1.19.5
The version of pandas is: 1.1.5
The version of scikit-learn is: 0.22.2.post1
```
If you are running this notebook on Google Colab, your versions might be different.
## Variables
```
number_1 = 1
number_2 = 2.0
print(number_1)
print(type(number_1))
print(number_2)
print(type(number_2))
string_1 = "hello"
string_2 = "hello world"
print(string_1)
print(type(string_1))
print(string_2)
print(type(string_2))
list_1 = [1, 2, 3]
print(list_1)
print(len(list_1))
list_2 = ["hello", "world", "1", 1]
print(list_2)
print(list_2[2])
dict_1 = {
"class_number": "MECE2020",
"class_capacity": 150,
}
print(type(dict_1))
print(dict_1["class_capacity"])
```
## Operators
```
number_1 + number_2
# this will fail
list_1 / number_1
number_1 >= number_2
len(list_2[0]) == len(list_2[1])
(len(list_2[0]) == len(list_2[1])) and False
```
## Control Structures
```
wether_today = "raining"
if wether_today == "raining":
print("Bring an umbrella!")
elif wether_today == "sunny":
print("Enjoy the sun!")
else:
print("What is the wether today?")
for i in range(10):
print("The number is:", i)
i = 0
while i < 10:
print("The number is:", i)
i += 1
```
## List comprehension
```
list_3 = []
for i in range(10):
list_3.append(i**2)
list_3
list_4 = [i**2 for i in range(10)]
print(list_4)
list_5 = [1.25, -9.45, 10.22, 3.78, -5.92, 1.16]
list_6 = [x if x > 0 else 0 for x in list_5]
list_6
```
## Function
```
def add(number_1: float, number_2: float) -> float:
"""Add two numbers."""
return number_1 + number_2
add(1, 2)
def square_root(x: float) -> float:
"""Calcuate the square root of the input using Newton's method.
Args:
x (float): The input number, must be greater or equal to zero.
Returns:
(float): Sqaure root of the input.
Raises:
ValueError: If the input number is negative.
"""
if x < 0:
raise ValueError("The input number can not be negative.")
def get_next_guess(current_guess: float) -> float:
"""Get next guess using Newton's method."""
return 0.5 * (current_guess + x / current_guess)
epislon = 1e-5
current_guess = x
next_guess = get_next_guess(current_guess)
while abs(current_guess - next_guess) > epislon:
current_guess = next_guess
next_guess = get_next_guess(current_guess)
return next_guess
square_root(3)
```
## Class
```
class Person:
"""A simple class."""
def __init__(self, name: str):
self.name = name
def say(self, words: str):
"""Say something."""
print(f"{self.name} says: {words}")
def pat(self, person: Person):
"""Pat another person."""
print(f"{self.name} pats {person.name}'s shoulder.")
person_1 = Person("John Doe")
person_1.say("Hello!")
person_2 = Person("Jane Doe")
person_2.say("Hello too!")
person_1.pat(person_2)
```
## Using `pandas`
```
data = pd.read_csv("https://raw.githubusercontent.com/changyaochen/MECE4520/master/lectures/lecture_1/iris.csv")
data.head()
data.shape
# some simple data aggregation
data["Species"].value_counts()
data.groupby("Species")["SepalLengthCm"].mean()
# Some simple visualization
plt.scatter(x=data["SepalLengthCm"], y=data["SepalWidthCm"])
plt.xlabel("SepalLengthCm")
plt.ylabel("SepalWidthCm")
plt.tight_layout()
plt.show()
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Goal" data-toc-modified-id="Goal-1"><span class="toc-item-num">1 </span>Goal</a></span></li><li><span><a href="#Var" data-toc-modified-id="Var-2"><span class="toc-item-num">2 </span>Var</a></span></li><li><span><a href="#Init" data-toc-modified-id="Init-3"><span class="toc-item-num">3 </span>Init</a></span></li><li><span><a href="#Load" data-toc-modified-id="Load-4"><span class="toc-item-num">4 </span>Load</a></span><ul class="toc-item"><li><span><a href="#Checks" data-toc-modified-id="Checks-4.1"><span class="toc-item-num">4.1 </span>Checks</a></span></li></ul></li><li><span><a href="#Merging-&-pruning-tree" data-toc-modified-id="Merging-&-pruning-tree-5"><span class="toc-item-num">5 </span>Merging & pruning tree</a></span><ul class="toc-item"><li><span><a href="#Checks" data-toc-modified-id="Checks-5.1"><span class="toc-item-num">5.1 </span>Checks</a></span></li></ul></li><li><span><a href="#Writing-tree" data-toc-modified-id="Writing-tree-6"><span class="toc-item-num">6 </span>Writing tree</a></span></li><li><span><a href="#sessionInfo" data-toc-modified-id="sessionInfo-7"><span class="toc-item-num">7 </span>sessionInfo</a></span></li></ul></div>
# Goal
* Create phylogeny for all genome reps used for the Struo2 database
* merging & filtering GTDB MLSA phylogenies
# Var
```
work_dir = '/ebio/abt3_projects/databases_no-backup/GTDB/release202/Struo2/phylogeny/'
# species-rep genomes selected
genomes_file = file.path(dirname(work_dir),'metadata_1per-GTDB-Spec_gte50comp-lt5cont_wtaxID_wPath.tsv')
# trees from gtdb
arc_tree_file = '/ebio/abt3_projects/databases_no-backup/GTDB/release202/phylogeny/ar122_r202.tree'
bac_tree_file = '/ebio/abt3_projects/databases_no-backup/GTDB/release202/phylogeny/bac120_r202.tree'
# full gtdb metadata
gtdb_meta_dir = '/ebio/abt3_projects/databases_no-backup/GTDB/release202/metadata/'
gtdb_meta_arc_file = file.path(gtdb_meta_dir, 'ar122_metadata_r202.tsv')
gtdb_meta_bac_file = file.path(gtdb_meta_dir, 'bac120_metadata_r202.tsv')
```
# Init
```
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(tidytable)
library(ape)
library(LeyLabRMisc)
df.dims()
```
# Load
```
tax_levs = c('Domain', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species')
# genomes used for struo
genomes = Fread(genomes_file) %>%
select.(ncbi_organism_name, accession, gtdb_taxonomy) %>%
separate.(gtdb_taxonomy, tax_levs, sep = ';') %>%
mutate.(Species = gsub(' ', '_', Species))
genomes %>% unique_n('genomes', ncbi_organism_name)
genomes
# arc tree
arc_tree = read.tree(arc_tree_file)
arc_tree
# bac tree
bac_tree = read.tree(bac_tree_file)
bac_tree
# metadata: archaea
gtdb_meta_arc = Fread(gtdb_meta_arc_file) %>%
select.(accession, gtdb_taxonomy) %>%
filter.(accession %in% arc_tree$tip.label) %>%
separate.(gtdb_taxonomy, tax_levs, sep = ';')
gtdb_meta_arc
# metadata: bacteria
gtdb_meta_bac = Fread(gtdb_meta_bac_file) %>%
select.(accession, gtdb_taxonomy) %>%
filter.(accession %in% bac_tree$tip.label) %>%
separate.(gtdb_taxonomy, tax_levs, sep = ';')
gtdb_meta_bac
# combined
gtdb_meta = rbind(gtdb_meta_arc, gtdb_meta_bac)
gtdb_meta_arc = gtdb_meta_bac = NULL
gtdb_meta
```
## Checks
```
summary_x(arc_tree$edge.length)
summary_x(bac_tree$edge.length)
```
# Merging & pruning tree
```
# binding trees at root
tree = ape::bind.tree(arc_tree, bac_tree)
tree
# renaming as species
idx = gtdb_meta %>%
filter.(accession %in% tree$tip.label) %>%
select.(accession, Species) %>%
mutate.(Species = gsub(' ', '_', Species)) %>%
as.data.frame
rownames(idx) = idx$accession
tree$tip.label = idx[tree$tip.label,'Species']
tree
# checking overlap
overlap(genomes$Species, tree$tip.label)
# purning
to_rm = setdiff(tree$tip.label, genomes$Species)
to_rm %>% length
tree_f = ape::drop.tip(tree, to_rm)
tree_f
```
## Checks
```
# checking overlap
overlap(genomes$Species, tree_f$tip.label)
# branch lengths
summary_x(tree_f$edge.length)
```
# Writing tree
```
F = file.path(work_dir, 'ar122-bac120_r202_1per-GTDB-Spec_gte50comp-lt5cont.nwk')
write.tree(tree_f, F)
cat('File writen:', F, '\n')
```
# sessionInfo
```
sessionInfo()
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

## Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier
This example shows you how to use MLflow together with Azure Machine Learning services for tracking the metrics and artifacts while training a Keras model to classify MNIST digit images and deploy the model as a web service. You'll learn how to:
1. Set up MLflow tracking URI so as to use Azure ML
2. Create experiment
3. Instrument your model with MLflow tracking
4. Train a Keras model locally with MLflow auto logging
5. Train a model on GPU compute on Azure with MLflow auto logging
6. View your experiment within your Azure ML Workspace in Azure Portal
7. Deploy the model as a web service on Azure Container Instance
8. Call the model to make predictions
### Pre-requisites
If you are using a Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met.
Install TensorFlow and Keras, this notebook has been tested with TensorFlow version 2.1.0 and Keras version 2.3.1.
Also, install azureml-mlflow package using ```pip install azureml-mlflow```. Note that azureml-mlflow installs mlflow package itself as a dependency if you haven't done so previously.
### Set-up
Import packages and check versions of Azure ML SDK and MLflow installed on your computer. Then connect to your Workspace.
```
import sys, os
import mlflow
import mlflow.azureml
import azureml.core
from azureml.core import Workspace
print("SDK version:", azureml.core.VERSION)
print("MLflow version:", mlflow.version.VERSION)
ws = Workspace.from_config()
ws.get_details()
```
### Set tracking URI
Set the MLflow tracking URI to point to your Azure ML Workspace. The subsequent logging calls from MLflow APIs will go to Azure ML services and will be tracked under your Workspace.
```
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
```
### Create Experiment
In both MLflow and Azure ML, training runs are grouped into experiments. Let's create one for our experimentation.
```
experiment_name = "keras-with-mlflow"
mlflow.set_experiment(experiment_name)
```
### Train model locally while logging metrics and artifacts
The ```scripts/train.py``` program contains the code to load the image dataset, train and test the model. Within this program, the train.driver function wraps the end-to-end workflow.
Within the driver, the ```mlflow.start_run``` starts MLflow tracking. Then, MLflow's automatic logging is used to log metrics, parameters and model for the Keras run.
Let's add the program to search path, import it as a module and invoke the driver function. Note that the training can take few minutes.
```
lib_path = os.path.abspath("scripts")
sys.path.append(lib_path)
import train
run = train.driver()
```
### Train model on GPU compute on Azure
Next, let's run the same script on GPU-enabled compute for faster training. If you've completed the the [Configuration](../../../configuration.ipnyb) notebook, you should have a GPU cluster named "gpu-cluster" available in your workspace. Otherwise, follow the instructions in the notebook to create one. For simplicity, this example uses single process on single VM to train the model.
Clone an environment object from the Tensorflow 2.1 Azure ML curated environment. Azure ML curated environments are pre-configured environments to simplify ML setup, reference [this doc](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-environments#use-a-curated-environment) for more information. To enable MLflow tracking, add ```azureml-mlflow``` as pip package.
```
from azureml.core import Environment
env = Environment.get(workspace=ws, name="AzureML-TensorFlow-2.1-GPU").clone("mlflow-env")
env.python.conda_dependencies.add_pip_package("azureml-mlflow")
env.python.conda_dependencies.add_pip_package("keras==2.3.1")
env.python.conda_dependencies.add_pip_package("numpy")
```
Create a ScriptRunConfig to specify the training configuration: script, compute as well as environment.
```
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory="./scripts", script="train.py")
src.run_config.environment = env
src.run_config.target = "gpu-cluster"
```
Get a reference to the experiment you created previously, but this time, as an Azure Machine Learning experiment object.
Then, use the ```Experiment.submit``` method to start the remote training run. Note that the first training run often takes longer as Azure Machine Learning service builds the Docker image for executing the script. Subsequent runs will be faster as the cached image is used.
```
from azureml.core import Experiment
exp = Experiment(ws, experiment_name)
run = exp.submit(src)
```
You can monitor the run and its metrics on Azure Portal.
```
run
```
Also, you can wait for run to complete.
```
run.wait_for_completion(show_output=True)
```
### Deploy model as web service
The ```mlflow.azureml.deploy``` function registers the logged Keras+Tensorflow model and deploys the model in a framework-aware manner. It automatically creates the Tensorflow-specific inferencing wrapper code and specifies package dependencies for you. See [this doc](https://mlflow.org/docs/latest/models.html#id34) for more information on deploying models on Azure ML using MLflow.
In this example, we deploy the Docker image to Azure Container Instance: a serverless compute capable of running a single container. You can tag and add descriptions to help keep track of your web service.
[Other inferencing compute choices](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where) include Azure Kubernetes Service which provides scalable endpoint suitable for production use.
Note that the service deployment can take several minutes.
```
from azureml.core.webservice import AciWebservice, Webservice
model_path = "model"
aci_config = AciWebservice.deploy_configuration(cpu_cores=2,
memory_gb=5,
tags={"data": "MNIST", "method" : "keras"},
description="Predict using webservice")
webservice, azure_model = mlflow.azureml.deploy(model_uri='runs:/{}/{}'.format(run.id, model_path),
workspace=ws,
deployment_config=aci_config,
service_name="keras-mnist-1",
model_name="keras_mnist")
```
Once the deployment has completed you can check the scoring URI of the web service.
```
print("Scoring URI is: {}".format(webservice.scoring_uri))
```
In case of a service creation issue, you can use ```webservice.get_logs()``` to get logs to debug.
### Make predictions using a web service
To make the web service, create a test data set as normalized NumPy array.
Then, let's define a utility function that takes a random image and converts it into a format and shape suitable for input to the Keras inferencing end-point. The conversion is done by:
1. Select a random (image, label) tuple
2. Take the image and converting to to NumPy array
3. Reshape array into 1 x 1 x N array
* 1 image in batch, 1 color channel, N = 784 pixels for MNIST images
* Note also ```x = x.view(-1, 1, 28, 28)``` in net definition in ```train.py``` program to shape incoming scoring requests.
4. Convert the NumPy array to list to make it into a built-in type.
5. Create a dictionary {"data", <list>} that can be converted to JSON string for web service requests.
```
import keras
import random
import numpy as np
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_test = x_test.astype("float32") / 255
x_test = x_test.reshape(len(x_test), -1)
# convert class vectors to binary class matrices
y_test = keras.utils.to_categorical(y_test, 10)
%matplotlib inline
import json
import matplotlib.pyplot as plt
# send a random row from the test set to score
random_index = np.random.randint(0, len(x_test)-1)
input_data = "{\"data\": [" + str(list(x_test[random_index])) + "]}"
response = webservice.run(input_data)
response = sorted(response[0].items(), key = lambda x: x[1], reverse = True)
print("Predicted label:", response[0][0])
plt.imshow(x_test[random_index].reshape(28,28), cmap = "gray")
```
You can also call the web service using a raw POST method against the web service
```
import requests
response = requests.post(url=webservice.scoring_uri, data=input_data,headers={"Content-type": "application/json"})
print(response.text)
```
## Clean up
You can delete the ACI deployment with a delete API call.
```
webservice.delete()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/gyyang/neurogym/blob/master/examples/demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Exploring NeuroGym tasks
NeuroGym is a comprehensive toolkit that allows training any network model on many established neuroscience tasks using Reinforcement Learning techniques. It includes working memory tasks, value-based decision tasks and context-dependent perceptual categorization tasks.
In this notebook we first show how to install the relevant toolbox.
We then show how to access the available tasks and their relevant information.
Finally we train an LSTM network on the Random Dots Motion task using the A2C algorithm [Mnih et al. 2016](https://arxiv.org/abs/1602.01783) implemented in the [stable-baselines](https://github.com/hill-a/stable-baselines) toolbox, and plot the results.
You can easily change the code to train a network on any other available task or using a different algorithm (e.g. ACER, PPO2).
### Installation on google colab
```
%tensorflow_version 1.x
# Install gym
! pip install gym
# Install neurogym
! git clone https://github.com/gyyang/neurogym.git
%cd neurogym/
! pip install -e .
# Install stable-baselines
! pip install --upgrade stable-baselines
```
### Explore tasks
```
import warnings
import gym
import neurogym as ngym
from neurogym.utils import info, plotting
warnings.filterwarnings('ignore')
info.all_tasks()
```
### Visualize a single task
```
task = 'PerceptualDecisionMaking-v0'
env = gym.make(task);
print(env)
plotting.plot_env(env, num_steps=300, def_act=0, ob_traces=['Fixation cue', 'Stim1', 'Stim2'], fig_kwargs={'figsize': (12, 12)});
```
### Explore wrappers
```
info.all_wrappers()
info.info_wrapper('TrialHistory-v0', show_code=True);
```
### Train a network
```
import warnings
import numpy as np
from neurogym.wrappers import trial_hist, monitor
from stable_baselines.common.policies import LstmPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import A2C # ACER, PPO2
warnings.filterwarnings('default')
# task paremters
task = 'PerceptualDecisionMaking-v0'
timing = {'fixation': ('constant', 300),
'stimulus': ('constant', 700),
'decision': ('constant', 300)}
kwargs = {'dt': 100, 'timing': timing, 'stim_scale': 2}
# wrapper parameters
n_ch = 2
p = 0.8
num_blocks = 2
block_1 = np.array([[p, 1-p], [1-p, p]]) # repeating block
block_2 = np.array([[1-p, p], [p, 1-p]]) # alternating block
probs = np.empty((num_blocks, n_ch, n_ch))
probs[0, :, :] = block_1
probs[1, :, :] = block_2
block_dur = 50
# build task
env = gym.make(task, **kwargs)
# Apply the wrapper
env = trial_hist.TrialHistory(env, probs=probs, block_dur=block_dur)
env = monitor.Monitor(env, folder='content/tests/', sv_per=10000, verbose=1, sv_fig=True, num_stps_sv_fig=100)
# the env is now wrapped automatically when passing it to the constructor
env = DummyVecEnv([lambda: env])
model = A2C(LstmPolicy, env, verbose=1, policy_kwargs={'feature_extraction':"mlp"})
model.learn(total_timesteps=500000, log_interval=100000)
env.close()
```
### Visualize results
```
import numpy as np
import matplotlib.pyplot as plt
# Create task
env = gym.make(task, **kwargs)
# Apply the wrapper
env = trial_hist.TrialHistory(env, probs=probs, block_dur=block_dur)
env = DummyVecEnv([lambda: env])
plotting.plot_env(env, num_steps=50, def_act=0, ob_traces=['Fixation cue', 'Stim1', 'Stim2'], fig_kwargs={'figsize': (12, 12)}, model=model);
```
| github_jupyter |
```
from netCDF4 import Dataset
import netCDF4 as netcdf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib as mpl
import cmocean as cmo
#mapping
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io import shapereader
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from scipy.interpolate import griddata
import xarray as xr
import xarray.ufuncs as xu
#import geoviews as gv
#import holoviews as hv
#import geoviews.feature as gf
#gv.extension('bokeh', 'matplotlib')
def subset(lonw,lone,latn,lats,data,date1,date2):
mask_lon = (data.lon >= lonw) & (data.lon <= lone)
mask_lat = (data.lat >= lats) & (data.lat <= latn)
%time new_data = data.sel(time=slice(date1,date2)).where(mask_lon & mask_lat, drop=True)
return new_data
# import HFR dataset
aggr_url = "https://hfrnet-tds.ucsd.edu/thredds/dodsC/HFR/USEGC/6km/hourly/RTV/HFRADAR_US_East_and_Gulf_Coast_6km_Resolution_Hourly_RTV_best.ncd"
data_h = xr.open_dataset(aggr_url)
# import CMEMS outputs
aggr_url = "./data/CMEMS-global-analysis-forecast-phy-001-024-hourly-u-v.nc"
data_m = xr.open_dataset(aggr_url)
# subset the dataset to the area and time period of interest
lonw, lone = -76, -73 # westernmonst and easternmost longitude
lats, latn = 36.5, 39.5 # southernmonst and northernmost latitude
date1 = "2020-05-01" # initial date
date2 = "2020-06-01" # final date
# create mask
mask_lon = (data_h.lon >= lonw) & (data_h.lon <= lone)
mask_lat = (data_h.lat >= lats) & (data_h.lat <= latn)
# subset
%time data_sub_h = data_h.sel(time=slice(date1, date2)).where(mask_lon & mask_lat, drop=True)
mask_lon = (data_m.longitude >= lonw) & (data_m.longitude <= lone)
mask_lat = (data_m.latitude >= lats) & (data_m.latitude <= latn)
%time data_sub_m = data_m.sel(time=slice(date1, date2)).where(mask_lon & mask_lat, drop=True)
#data.groupby("time.month").mean(dim='time')
#data_sub # 'subseted' dataset
data_sub_m #
# select a specific date and time
date = '2020-05-24T23:00:00'
datah = data_sub_h.sel(time=date,method='nearest')
datam = data_sub_m.sel(time=date,method='nearest') # mimics the model output - to be merged with Teresa and Maurício's code
# need to import the satellite data s well
# calculate the magnitude of surface currents
magh = np.sqrt(datah.u.values**2+datah.v.values**2)
magm = np.sqrt(datam.uo.values**2+datam.vo.values**2)
# create the mesh
xh, yh = np.meshgrid(datah.lon.values,datah.lat.values)
xm, ym = np.meshgrid(datam.longitude.values,datam.latitude.values)
# mask data
magh = np.ma.masked_invalid(magh)
magm = np.ma.masked_invalid(magm).squeeze()
uh = np.ma.masked_invalid(datah.u.values)
vh = np.ma.masked_invalid(datah.v.values)
um = np.ma.masked_invalid(datam.uo.values).squeeze()
vm = np.ma.masked_invalid(datam.vo.values).squeeze()
uh = griddata((xh.ravel(),yh.ravel()),uh.ravel(),(xm,ym))
vh = griddata((xh.ravel(),yh.ravel()),vh.ravel(),(xm,ym))
magh = griddata((xh.ravel(),yh.ravel()),magh.ravel(),(xm,ym))
proj = ccrs.PlateCarree()
stph, stpm = 2, 2 #subsample the vectorfield
# compute angles of the vector
angleh = np.pi/2 - np.arctan2(uh/magh, vh/magh)
anglem = np.pi/2 - np.arctan2(um/magm, vm/magm)
# holoviews objects
#hfr = gv.VectorField((xm[::stph,::stph], ym[::stph,::stph], # HFR vectorfield
# angleh[::stph,::stph], magh[::stph,::stph]),
# crs=proj)
#mdl = gv.VectorField((xm[::stpm,::stpm], ym[::stpm,::stpm], # 'model' vectorfield
# anglem[::stpm,::stpm], magm[::stpm,::stpm]),
# crs=proj)
#tiles = gv.tile_sources.CartoLight # map object - I really didn't like any of them
# plot interactive map
#hfr.opts(magnitude='Magnitude',color='red',
# padding=.1, colorbar=True, line_width=2,
# height=350, width=350) * \
#mdl.opts(magnitude='Magnitude',color='black',
# padding=.1, colorbar=True, line_width=2,
# height=350, width=350) * tiles.opts(alpha=1,fontsize='0pt')
# not sure which colors to choose.... bur colormaps for each dataset does not look good
#arquivo track satélite 228!!
hv.help(hv.Tiles)
# time holograph test
#kdims = ['time', 'lon', 'lat']
#vdims = ['u','v']
#data_gv = gv.Dataset(data_sub, kdims=kdims, vdims=vdims)
#hv.Dimension.type_formatters[np.datetime64] = '%Y-%m-%d'
# conventional map (cartopy) - Panagiotis code
fig = plt.figure(figsize=(8,12))
proj = ccrs.PlateCarree()
ax=fig.add_subplot(1,1,1,projection=proj)
ax.set_extent([lonw, lone, lats, latn])
#mpl.rcParams['font.family'] = 'Arial'
ax.xaxis.set_major_formatter(LongitudeFormatter())
ax.yaxis.set_major_formatter(LatitudeFormatter())
stp = 1
lon_ticks = np.arange(int(lonw),int(lone)+stp) #[-75, -74, -73, -72, -71, -70, -69]
lat_ticks = np.arange(int(lats),int(latn)+stp)
ax.set_xticks(lon_ticks, crs=ccrs.PlateCarree())
ax.set_yticks(lat_ticks, crs=ccrs.PlateCarree())
ax.xaxis.set_tick_params(which='major', size=2., width=1,
direction='in', top='on', pad=7)
ax.yaxis.set_tick_params(which='major', size=2., width=1,
direction='in', right='on',pad=7)
ax.tick_params(labelleft=True,labelbottom=True,labeltop=True,labelright=True)
ax.add_feature(cfeature.LAND.with_scale('10m'), color='black', alpha=0.4)
ax.coastlines(resolution='10m',linewidth=0.3)
vmax = np.nanmean(mag)+2*np.nanstd(mag)
bounds = np.linspace(0,vmax,5)
norm = mpl.colors.Normalize(vmin=0., vmax=vmax)
# plot current vectors
cf=ax.quiver(x,y,u_m,v_m,data_m,cmap=cmo.cm.speed,
scale=10,width=.0015,norm=norm)
# add the scale for the currents magnitude
qk = ax.quiverkey(cf, 0.8, 0.58, 0.5, label='0.5 m s$^{-1}$', labelpos='E',
coordinates='figure')
# add colorbar
cax,kw = mpl.colorbar.make_axes(ax,location='bottom',pad=0.05,aspect=50)
out=fig.colorbar(cf,cax=cax,ticks=bounds,extend='max',format='%.2f',**kw)
out.set_label('Surface Velocity (m s$^{-1}$)',size=10)
out.ax.tick_params(direction='in',size=2., width=1)
#plt.savefig('hf_radar.svg', format='svg', transparent=False, dpi=300, bbox_inches='tight')
#plt.savefig('hf_radar.png', format='png', transparent=False, dpi=300, bbox_inches='tight')
```
| github_jupyter |
# Lecture 1.1: Introduction to NumPy & pandas
This lecture, we are getting to know the two python libraries at the heart of data analysis: [NumPy](https://numpy.org/) and [pandas](https://pandas.pydata.org/).
**Learning goals:**
- Explain the difference between NumPy ndarrays, pandas Series, and pandas DataFrames
- Construct ndarrays, Series, and DataFrames
- Carry out basic operations on a ndarrays, Series, and DataFrames
- Show the need for efficient maths & data librairies
---
## 1. NumPy
ℹ️ When importing NumPy, the convention is to use the name `np`. You don't technically have to follow this convention, but please do, as it will make your code more readable and shareable.
💪 Appreciate running your first numpy import... this will be the first of many! 🎊
```
import numpy as np
```
### 1.1 Array creation
In the world of NumPy, the main class is the `ndarray`. According to the [documentation](https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html):
> An ndarray is a multidimensional container of items of the same type and size
In other words, they are efficient data structures for vectors and matrices, like these ones:
$$
\begin{equation*}
a =
\begin{bmatrix}
1 \\
2 \\
3 \\
4
\end{bmatrix}
; \,
B =
\begin{pmatrix}
1 & 2 & 3 \\
4 & 5 & 6
\end{pmatrix}
\end{equation*}
$$
Vectors, like $a$ above, can be created by passing a python list as argument to `np.array()`:
```
a = np.array([1, 2, 3, 4])
a
```
The output looks a lot like a regular list... 🧐 Fear not, arrays can also be _multi-dimentional_ 👻. Matrices like $B$ can be created by passing one python list per row to `np.array()`:
```
B = np.array([[1, 2, 3], [4, 5, 6]])
B
```
NumPy is _intended_ for maths. It's in the name! So it comes with a lots of neat inbuilt constructor methods for `ndarray`. Here's some examples:
```
np.zeros([2, 3])
np.ones([2, 3])
np.arange(-3, 10)
```
💪 Now your turn, can you use `.linspace()` to return a 1D array of 8 equally spaced numbers between -3 & 10?
Pro-tip: always check the documentation if this is your first time using a method. You can do that with the `?` prefix in this notebook, or by looking for it online.
```
# INSERT YOUR CODE HERE
```
🧠 What's the main difference between `.arange()` and `.linspace()`?
### 1.2 Basic Operations
One of the most convenient things about NumPy is that it's very integrated with python. It overloads a lot of operators. For example, accessing a number in an `ndarray` is exactly like selecting an element in a list:
```
a = np.array([0, 1, 1, 2, 3, 5, 8, 13])
a[2]
```
The [list slicing notation](https://stackoverflow.com/questions/509211/understanding-slice-notation) works as well 🔪:
```
a[2:-1]
```
Neat! This makes working with `ndarray` very "natural". In fact, sometimes it's easy to forget if you are dealing with an `ndarray` or a simple list, so try to keep track of your data types. You can always check them using the [`type()`](https://www.geeksforgeeks.org/python-type-function/) built-in function.
NumPy also overloads arithmetic operators:
```
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
a + b
a * b
```
As you might have noticed, piecewise operations are the default with python arithmetic operators.
```
a @ b
```
🧠 Can you guess what the `@` operator does with `ndarray`? Check your answer by looking up the official documentation (or searching online).
### 1.3 NumPy Data Types
NumPy tries to make the _api_ very integrated with native python, but we know that in the backend... it runs C code! So it's not super clear what classes are running under the hood. Let's investigate what data types are used:
```
arr = np.array([1, 2, 3])
type(arr)
```
As we already know, NumPy arrays are not python lists... but what's inside the `ndarray`? We fed a list of `int`s in the constructor in the cell just above, so maybe the `ndarray` holds `int`s?
```
type(arr[1])
```
🙀 Wait a minute, that's not an `int`!
```
type(np.random.random(10)[3])
```
And that's not a normal `float`! NumPy uses its own [data types](https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html) so it can be lightning fast ⚡️. These are called `dtypes`. In fact, there are many more `dtypes` than there are primitive data types in python! Here's a complete [list](https://numpy.org/devdocs/user/basics.types.html). No need to learn them all, NumPy will take care of the casting for you most of the time. But it's good to be aware that they exist should you be debugging your code, or worried about performance.
### 1.4 NumPy Performance
Let's see if NumPy is really as fast as it claims!
First, let's make a 1D `ndarray` with of length 10000. We'll fill it with random numbers $\in [0,1]$.
ℹ️ It's a good idea to explicitly set the seeds where you can, as it keeps results reproducible. A data scientist's nightmare would be to achieve a new state of the art result and be unable to repeat it afterwards!
```
np.random.seed(0)
arr = np.random.random(10000)
```
Then, let's use NumPy's `.tolist()` method to convert the array to a native python list. This will also cast the elements from `np.float64` to python `float`.
```
lst = arr.tolist()
```
We can check that the two contain the same data:
```
print(f'The array is of size: {len(arr)}')
print(f'The list is of size: {len(lst)}')
print('The first three elements of the array are:')
print(arr[:3])
print('The first three elements of the list are:')
print(lst[:3])
```
Now, let's imagine we want to filter elements that are smaller than a threshold. We can make one function for our NumPy `arr`, and one for our python `lst`.
```
def filter_list(lst, threshold):
new_lst = []
for e in lst:
if e > threshold:
new_lst.append(e)
return new_lst
def filter_array(arr, threshold):
return arr[arr > threshold]
```
Notice how the notation for the array filter is more terse? This uses a _boolean mask_. They are very useful and we'll cover them in more detail next lecture.
💪 Can you write another function called `filter_list_comprehension` where you use list comprehensions instead of a for loop to filter the values of `lst`? It should fit in one line! Add your code to the cell below. The unit test should not fail when the cell is run.
```
def filter_list_comprehension(lst, threshold):
# INSERT YOUR CODE HERE
def test_filter_list_comprehension():
assert len(filter_list_comprehension(lst, 0.5)) == 4936
print('Success! 🎉')
test_filter_list_comprehension()
```
List comprehensions are terse, fast, and awesome. You should use them when you can, but be aware that sticking complex chains of operations in one line can also quickly become illegible!
Now let's use the `timeit` [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html) to time the execution of our functions. `timeit` will run the expressions in loops to measure the average running time, so this may take a few seconds. We'll use an arbitrary threshold value of 0.7.
```
%timeit filter_list(lst, 0.7)
%timeit filter_list_comprehension(lst, 0.7)
%timeit filter_array(arr, 0.7)
```
Wow, that's fast! 🏎 The list comprehension is faster than the for loop, but numpy is almost ten times as fast!
Gaining $300\,\mu s$ might not sound like like much, but remember this is a test with (only) 10000 values. When we'll deal with multi-dimensional arrays of millions of values, this speed up can make a huge difference!
🧠 What's the difference between `timeit` and `time`?
## 2. Pandas
ℹ️ The convention is to use the name `pd` when importing pandas. Just like `np` & NumPy, you don't technically have to follow this convention, but please always do!
💪 This will also be your first time of many to import pandas! 🎊
```
import pandas as pd
```
### 2.1 Series
There are two main classes in the world of pandas. The first one is the `Series`. According to the [documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html):
> A Series is a one-dimensional ndarray with axis labels.
Notice how pandas leverage NumPy `ndarray`s as data structures. Also observe that a `Series` object wraps the array with "axis labels", also called an _index_. But before we dig into indices, let's see how `Series` are created. Just like the 1D `ndarray`, a Series constructor can take a python list as argument:
```
s = pd.Series([1, 2, 3, 4])
s
```
The output statement details the NumPy `dtype` used in the inner `ndarray` of the Series.
Overloading of list slicing syntax and arithmetic operators is also similar to NumPy:
```
s[2]
s[2::-1]
s + s
```
OK, so far, a Series looks and acts pretty much the same as a 1D `ndarray`. One exception is the "axis labels" we can see on the left hand side of the output statement: `0 1 2 3`.
These form the [`index`]() of the Series. `ndarray`s also have an index, since we can access them with the `arr[i]` syntax, but in pandas Series, the index is _explicit_. This means that we can select it and manipulate it as a first class object!
```
s.index
s.index[2]
s.index[2:3]
```
Indexes don't have to be incremental integers. For example, let's sort a Series and see what happens to it.
💪 Sort the rows of the following series by _value_. No need to write a function here, just modify `s` so that the unit test passes. Pro-tip: look up the [official documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html).
```
s = pd.Series([1, 3, 5, 2, 4, 0])
print(f'This is the original series:\n{s}')
# INSERT CODE HERE
print(f'This is the sorted series:\n{s}')
def test_sorted(s):
assert s.is_monotonic_increasing
print('Success! 🎉')
test_sorted(s)
```
Notice how the indices "stuck" with their value in the sorted series, and are now disordered? We can also use other data types as index labels:
```
print('This series has a string index:\n')
s = pd.Series({'seven': 7, 'ate':8, 'nine':9})
print(s)
print('\nThis series has a date index:\n')
s = pd.Series(data=np.linspace(666, 1337, 3), index=pd.date_range('20121221', periods=3))
print(s)
```
Series are cool, but `ndarray`s could be _multi-dimensional_ and we learned that was useful in machine learning. This leads us to the second main class of pandas: the `DataFrame`.
### 2.2 DataFrame
According to the [documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html):
> A DataFrame is a two-dimensional, size-mutable, potentially heterogeneous tabular data.
The important term here is _tabular_. `DataFrame`s are the excel sheets of python! In other words, a `DataFrame` is a 2D indexed array. This includes an inner 2D `ndarray`, the same row index as the `Series` class, as well as a list of column names. In fact, each column in a DataFrame is just a named `Series`.
Just like 2D `ndarray`s, we can create `DataFrame`s with one list per row:
```
df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
df
```
Notice the index `0 1` on the LHS, and the column names `0 1 2 3` on top of the data array. Since columns are first class citizens in pandas (just like indices), we can manipulate and modify them directly! These default column names are bit a boring. Let's change them to something else:
```
df.columns = ['👌', '👽', 'ayy', 'lmao']
df
```
Just like `ndarray`s and `Series`, `DataFrames` are closely integrated with python and overload common operators. They are also element wise by default.
```
df + df
df * df
```
Since we've only dealt with toy data this notebook, here's a little teaser of what one can do in a few lines of python with the pandas library. 😏 Let's create some fake financial data by adding a noise term to two lines. We then load this in a date-indexed `DataFrame`:
```
np.random.seed(1337)
data = {}
data['stonks'] = np.linspace(99, 101, 100) + np.random.random(100)
data['not stonks'] = np.linspace(101, 99, 100) + np.random.random(100)
stacks = pd.DataFrame(data=data, index=pd.date_range('20191223', periods=100))
```
The 100 rows of our `DataFrame` might be a bit too much to fit on our screen, but sometimes it's still helpful to keep an eye on the data. We can use `.head()`, which only returns the first 5 rows of our `DataFrame`.
```
stacks.head()
```
pandas integrates [matplotlib](https://matplotlib.org/) in their api, so it's _super_ easy to plot `DataFrame` data:
```
stacks.plot.line()
```
Data visualization is an entire field of data science, and we'll go over it in more detail during another lecture. But remember that the `DataFrame.plot` api is handy when exploring data. 👨🎨
🧠🧠 Can you explain how the lines defining `data['stonks']` and `data['not stonks']` translate to these plotted lines? Pro-tip: When dealing `ndarray`s, it helps to keep track of the dimension of your objects between operations.
## 3. Summary
Today, we have learned about **NumPy** and **pandas**, and how they are key tools for manipulating data in python. We understood that the **`ndarray`** is an efficient data structure for **matrices**, that the **`Series`** adds an **index** to these arrays, and that the **`DataFrame`** extends them in 2D to represent **tabular** data. We have learned how to **construct** and do **basic operations** with these three objects, by looking up **official documentation**. We've also measured their **performance** compared to python data structures. We even got a little taster of what financial analysis can look like with pandas. 💸
---
# Resources
## Core Resources
- [**Slides**](https://docs.google.com/presentation/d/1B66WgZfitY8FTMUu4I1Q5AihxTHJZwsqb3ZP4FgSsCg/edit?usp=sharing)
- [ndarray official documentation](https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html)
- [ndarray creation documentation](https://docs.scipy.org/doc/numpy/user/quickstart.html#array-creation)
- [Python Data Science handbook on numpy](https://jakevdp.github.io/PythonDataScienceHandbook/02.00-introduction-to-numpy.html)
- [Series official documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html)
- [DataFrame official documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
- [Pandas getting started documentation](https://pandas.pydata.org/docs/getting_started/intro_tutorials/index.html)
## Additional Resources
- [List slicing explained](https://stackoverflow.com/questions/509211/understanding-slice-notation)
Stackoverflow post succinctly explaining the python list slicing notation
- [Learn NumPy in 5 minutes](https://youtu.be/xECXZ3tyONo)
Terse video summary of basic numpy operations
- [3 numPy image transformations on baby yoda](https://towardsdatascience.com/3-numpy-image-transformations-on-baby-yoda-c27c1409b411)
Short blog post with examples of ndarray manipulation
- [The ultimate begginer's guide to NumPy](https://towardsdatascience.com/the-ultimate-beginners-guide-to-numpy-f5a2f99aef54)
- [Numpy example list](http://scipy.github.io/old-wiki/pages/Numpy_Example_List)
Examples of all numpy methods
| github_jupyter |
# MeshCat Animations
MeshCat.jl also provides an animation interface, built on top of the [three.js animation system](https://threejs.org/docs/#manual/introduction/Animation-system). While it is possible to construct animation clips and tracks manually, just as you would in Three.js, it's generally easier to use the MeshCat `Animation` type.
Let's show off building a simple animation. We first have to create our scene:
```
import meshcat
from meshcat.geometry import Box
vis = meshcat.Visualizer()
## To open the visualizer in a new browser tab, do:
# vis.open()
## To open the visualizer inside this jupyter notebook, do:
# vis.jupyter_cell()
vis["box1"].set_object(Box([0.1, 0.2, 0.3]))
```
### Building an Animation
We construct an animation by first creating a blank `Animation()` object. We can then use the `at_frame` method to set properties or transforms of the animation at specific frames of the animation. Three.js will automatically interpolate between whatever values we provide.
For example, let's animate moving the box from [0, 0, 0] to [0, 1, 0]:
```
from meshcat.animation import Animation
import meshcat.transformations as tf
anim = Animation()
with anim.at_frame(vis, 0) as frame:
# `frame` behaves like a Visualizer, in that we can
# call `set_transform` and `set_property` on it, but
# it just stores information inside the animation
# rather than changing the current visualization
frame["box1"].set_transform(tf.translation_matrix([0, 0, 0]))
with anim.at_frame(vis, 30) as frame:
frame["box1"].set_transform(tf.translation_matrix([0, 1, 0]))
# `set_animation` actually sends the animation to the
# viewer. By default, the viewer will play the animation
# right away. To avoid that, you can also pass `play=false`.
vis.set_animation(anim)
```
You should see the box slide 1 meter to the right in the viewer. If you missed the animation, you can run it again from the viewer. Click "Open Controls", find the "Animations" section, and click "play".
### Animating the Camera
The camera is just another object in the MeshCat scene. To set its transform, we just need to index into the visualizer with the right path (note the leading `/`):
```
vis["/Cameras/default"].set_transform(tf.translation_matrix([0, 0, 1]))
```
To animate the camera, we just have to do that same kind of `settransform!` to individual frames in an animation:
```
anim = Animation()
with anim.at_frame(vis, 0) as frame:
frame["/Cameras/default"].set_transform(tf.translation_matrix([0, 0, 0]))
with anim.at_frame(vis, 30) as frame:
frame["/Cameras/default"].set_transform(tf.translation_matrix([0, 0, 1]))
# we can repeat the animation playback with the
# repetitions argument:
vis.set_animation(anim, repetitions=2)
```
We can also animate object properties. For example, let's animate the camera's `zoom` property to smoothly zoom out and then back in. Note that to do this, we have to access a deeper path in the visualizer to get to the actual camera object. For more information, see: https://github.com/rdeits/meshcat#camera-control
```
anim = Animation()
camera_path = "/Cameras/default/rotated/<object>"
with anim.at_frame(vis, 0) as frame:
frame[camera_path].set_property("zoom", "number", 1)
with anim.at_frame(vis, 30) as frame:
frame[camera_path].set_property("zoom", "number", 0.5)
with anim.at_frame(vis, 60) as frame:
frame[camera_path].set_property("zoom", "number", 1)
# While we're animating the camera zoom, we can also animate any other
# properties we want. Let's simultaneously translate the box during
# the same animation:
with anim.at_frame(vis, 0) as frame:
frame["box1"].set_transform(tf.translation_matrix([0, -1, 0]))
with anim.at_frame(vis, 60) as frame:
frame["box1"].set_transform(tf.translation_matrix([0, 1, 0]))
vis.set_animation(anim)
```
### Recording an Animation
To record an animation at a smooth, fixed frame rate, click on "Open Controls" in the viewer, and then go to "Animations" -> "default" -> "Recording" -> "record". This will play the entire animation, recording every frame and then let you download the resulting frames to your computer.
To record activity in the MeshCat window that isn't a MeshCat animation, we suggest using a screen-capture tool like Quicktime for macOS or RecordMyDesktop for Linux.
### Converting the Animation into a Video
Currently, meshcat can only save an animation as a `.tar` file consisting of a list of `.png` images, one for each frame. To convert that into a video, you will need to install the `ffmpeg` program, and then you can run:
```
from meshcat.animation import convert_frames_to_video
convert_frames_to_video("/home/rdeits/Downloads/meshcat_1528401494656.tar", overwrite=True)
```
| github_jupyter |
## Summary
----
## Imports
```
import concurrent.futures
import gzip
import os
import shutil
import subprocess
from collections import Counter
from pathlib import Path
import logomaker
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import proteinsolver
import pyarrow as pa
import pyarrow.parquet as pq
from IPython.display import set_matplotlib_formats
from kmbio import PDB
from tqdm.notebook import tqdm
pd.set_option("max_columns", 1000)
pd.set_option("max_rows", 1000)
set_matplotlib_formats("png")
```
## Parameters
```
NOTEBOOK_DIR = Path("12_profile_recovery").resolve()
NOTEBOOK_DIR.mkdir(exist_ok =True)
NOTEBOOK_DIR
UNIQUE_ID = "191f05de"
SEQUENCE_GENERATION_METHOD = "expectimax"
DESIGNS_PATH = NOTEBOOK_DIR.parent.joinpath("generate_protein_sequences")
assert DESIGNS_PATH.is_dir()
DESIGNS_PATH
# 1n5uA03 4z8jA00 4unuA00 4beuA02
STRUCTURE_ID = "4beuA02"
from papermill import execute_notebook
def worker(params):
input_path = NOTEBOOK_DIR.with_suffix(".ipynb").as_posix()
output_path = NOTEBOOK_DIR.parent.joinpath(
NOTEBOOK_DIR.name + f"-{params['STRUCTURE_ID']}.ipynb"
).as_posix()
out = execute_notebook(input_path, output_path, params)
return out
print(os.getpid())
if os.getpid() == None:
params_list = [
{"STRUCTURE_ID": structure_id}
for structure_id in ["1n5uA03", "4z8jA00", "4unuA00", "4beuA02"]
]
with concurrent.futures.ThreadPoolExecutor(4) as pool:
futures = pool.map(worker, params_list)
results = list(futures)
proteinsolver_stat_dict = {
"1n5uA03": {
"map_sequence_identity": 0.41304347826086957,
"map_proba": 0.42337074875831604,
"map_logproba": -1.015546441078186,
"ref_sequence_identity": 1.0,
"ref_proba": 0.28906363248825073,
"ref_logproba": -1.9106740951538086,
"sequence": "KFGERAFKAWAVARLSQRFPKAEFAEVSKLVTDLTKVHTECCHGDLLECADDRADLAKYICENQDSISSKLKECCEKPLLEKSHCIAEVEND",
},
"4z8jA00": {
"map_sequence_identity": 0.4166666666666667,
"map_proba": 0.48954343795776367,
"map_logproba": -0.7965440154075623,
"ref_sequence_identity": 1.0,
"ref_proba": 0.325325071811676,
"ref_logproba": -1.840335726737976,
"sequence": "SPRVVRIVKSESGYGFNVRGQVSEGGQLRSINGELYAPLQHVSAVLPGGAADRAGVRKGDRILEVNGVNVEGATHKQVVDLIRAGEKELILTVLSV",
},
"4unuA00": {
"map_sequence_identity": 0.43119266055045874,
"map_proba": 0.46164458990097046,
"map_logproba": -0.8745880126953125,
"ref_sequence_identity": 1.0,
"ref_proba": 0.3054293692111969,
"ref_logproba": -1.7174205780029297,
"sequence": "SALTQPPSASGSLGQSVTISCTGTSSDVGGYNYVSWYQQHAGKAPKVIIYEVNKRPSGVPDRFSGSKSGNTASLTVSGLQAEDEADYYCSSYEGSDNFVFGTGTKVTVL",
},
"4beuA02": {
"map_sequence_identity": 0.41013824884792627,
"map_proba": 0.5304911136627197,
"map_logproba": -0.6995142102241516,
"ref_sequence_identity": 1.0,
"ref_proba": 0.3164460062980652,
"ref_logproba": -1.7113533020019531,
"sequence": "LGQFQSNIEQFKSHMNANTKICAIMKADAYGNGIRGLMPTIIAQGIPCVGVASNAEARAVRESGFKGELIRVRSASLSEMSSALDLNIEELIGTHQQALDLAELAKQSGKTLKVHIALNDGGMGRNGIDMTTEAGKKEAVSIATQPSLSVVGIMTHFPNYNADEVRAKLAQFKESSTWLMQQANLKREEITLHVANSYTALNVPEAQLDMVRPGGVL",
},
}
proteinsolver_stats = proteinsolver_stat_dict[STRUCTURE_ID]
proteinsolver_stats
```
## Common functions
```
columns = list("GVALICMFWPDESTYQNKRH")
assert len(columns) == 20
assert len(set(columns)) == 20
def get_sequence_logo_score(sequence, aa_proba_adj):
return np.mean(
np.log(aa_proba_adj.values[np.arange(len(sequence)), [columns.index(aa) for aa in sequence]])
)
def get_sequence_identity(seq1, seq2):
assert len(seq1) == len(seq2)
return sum((aa1 == aa2) for aa1, aa2 in zip(seq1, seq2)) / len(seq1)
```
## Load reference LOGOs
```
AA_PROBA_ADJ = {}
for rl in [100, 95, 60, 35]:
AA_PROBA_ADJ[rl] = pq.read_table(
NOTEBOOK_DIR.parents[2].joinpath(
"proteinsolver",
"notebooks",
"11_profile_recovery",
f"{STRUCTURE_ID}-rl{rl}-aa-proba-adj.parquet",
)
).to_pandas()
```
## Load reference structure
```
STRUCTURE_FILE = Path(proteinsolver.__path__[0]).joinpath("data", "inputs", f"{STRUCTURE_ID}.pdb")
assert STRUCTURE_FILE.is_file()
STRUCTURE_FILE
structure = PDB.load(STRUCTURE_FILE)
structure_df = structure.to_dataframe()
```
## Ingraham
```
ingraham_data_dir = NOTEBOOK_DIR.parents[2].joinpath(
"neurips19-graph-protein-design", "notebooks", "03_generate_designs", STRUCTURE_ID
)
!ls {ingraham_data_dir}
```
### Load data
```
def parse_fasta_file(fin):
results = []
row = None
for line in fin:
if line.startswith(">"):
if row is not None:
results.append(row)
row = None
header = [l.strip("> ") for l in line.strip().split(",")]
if header[0] == "Native":
row = {
"T": None,
"sample": -1,
"score": float(header[1].replace("score=", "")),
"sequence": "",
}
else:
row = {
"T": header[0].replace("T=", ""),
"sample": header[1].replace("sample=", ""),
"score": float(header[2].replace("score=", "")),
"sequence": "",
}
else:
row["sequence"] += line.strip()
if row is not None:
results.append(row)
return results
result_dfs = {}
for model_features in ["full", "dist", "coarse", "hbonds"]:
with gzip.open(
ingraham_data_dir.joinpath(model_features, "alignments", f"{STRUCTURE_ID}.fa.gz"), "rt"
) as fin:
results = parse_fasta_file(fin)
results_df = pd.DataFrame(results)
result_dfs[model_features] = results_df
result_dfs["full"].head()
```
### Calculate scores
```
# for feature_set in result_dfs:
# result_dfs[feature_set]["seq_identity"] = list(
# tqdm(
# (
# get_sequence_identity(sequence, proteinsolver_stats["sequence"])
# for sequence in result_dfs[feature_set]["sequence"].values
# ),
# total=len(result_dfs[feature_set]),
# )
# )
# for feature_set in result_dfs:
# for rl in AA_PROBA_ADJ:
# result_dfs[feature_set][f"avg_logprob_{rl}"] = list(
# tqdm(
# (
# get_sequence_logo_score(sequence, AA_PROBA_ADJ[rl])
# for sequence in result_dfs[feature_set]["sequence"].values
# ),
# total=len(result_dfs[feature_set]),
# )
# )
```
### Accuracy plot
```
summary_dfs = {}
for model_features in ["full", "dist", "coarse", "hbonds"]:
df = pd.read_csv(ingraham_data_dir.joinpath(model_features, "results.csv"))
df["model_features"] = model_features
summary_dfs[model_features] = df
fg, ax = plt.subplots()
for model_features, df in summary_dfs.items():
x = np.arange(len(df))
ax.plot(x, df["similarity"], "o-", label=model_features)
ax.set_xticks(x)
ax.set_xticklabels(df["T"])
ax.grid(True, linestyle="--")
ax.set_ylim(0.1, 0.65)
ax.hlines(proteinsolver_stats["map_sequence_identity"], *ax.set_xlim(), linestyle='--')
ax.legend()
ax.set_title(STRUCTURE_ID)
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-ingraham-seq-identity.svg"))
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-ingraham-seq-identity.pdf"))
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-ingraham-seq-identity.png"), dpi=300)
```
## Proteinsolver
### Load data
```
design_files = list(DESIGNS_PATH.glob(f"designs-{UNIQUE_ID}-{SEQUENCE_GENERATION_METHOD}-{STRUCTURE_FILE.stem}-*.parquet"))
design_files[-5:]
dfs = []
for design_file in tqdm(design_files):
df = pq.read_table(design_file).to_pandas(integer_object_nulls=True)
dfs.append(df)
proteinsolver_df = pd.concat(dfs, sort=False)
print(len(df))
proteinsolver_df = proteinsolver_df.drop_duplicates(subset=["sequence"])
print(len(df))
```
### Calculate scores
```
proteinsolver_df["seq_identity"] = list(
tqdm(
(
get_sequence_identity(sequence, proteinsolver_stats["sequence"])
for sequence in proteinsolver_df["sequence"].values
),
total=len(proteinsolver_df),
)
)
for rl in AA_PROBA_ADJ:
proteinsolver_df[f"avg_logprob_{rl}"] = list(
tqdm(
(
get_sequence_logo_score(sequence, AA_PROBA_ADJ[rl])
for sequence in proteinsolver_df["sequence"].values
),
total=len(proteinsolver_df),
)
)
```
## Make plots
```
result_dfs["proteinsolver"] = proteinsolver_df
fg, axs = plt.subplots(1, 5, figsize=(9, 4))
ylim = [None, None]
ps_median = None
for i, (featureset, df) in enumerate(result_dfs.items()):
data = {}
if featureset in ["proteinsolver"]:
data["all\ndesigns"] = df["seq_identity"]
else:
for temp, gp in df.groupby("T"):
data[temp] = gp["seq_identity"].values
ax = axs[i]
box_values = list(data.values())
positions = np.linspace(-0.5, 0.5, len(data) + 2)
width = 0.1
_ = ax.boxplot(box_values, positions=positions[1:-1], sym=".", widths=width)
if featureset in ["proteinsolver"]:
ax.set_xticklabels([f" T=1.00" for key in data.keys()], rotation="vertical")
ax.set_xlabel(f"ProteinSolver", fontsize="large")
else:
ax.set_xticklabels([f" T={float(key):.2f}" for key in data.keys()], rotation="vertical")
ax.set_xlabel(f"ST {featureset}", fontsize="large")
ax.set_xlim(positions[0], positions[-1])
y_min_ = min([v.min() for v in box_values])
if ylim[0] is None or ylim[0] > y_min_:
ylim[0] = y_min_
y_max_ = max([v.max() for v in box_values])
if ylim[1] is None or ylim[1] < y_max_:
ylim[1] = y_max_
if featureset in ["proteinsolver"]:
ps_median = np.median(box_values[0])
if i == 0:
ax.set_ylabel("Average sequence identity")
if i > 0:
ax.set_yticklabels("")
for tic in ax.yaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.grid(axis="y", linestyle=":")
ylim = [ylim[0] * 10 // 1 / 10, np.ceil(ylim[1] * 10) / 10]
for ax in axs:
ax.set_ylim(ylim)
ax.hlines(
proteinsolver_stats["map_sequence_identity"],
*ax.set_xlim(),
linestyle="--",
linewidth=1,
color=plt.get_cmap("Set2")(2),
)
ax.hlines(
ps_median, *ax.set_xlim(), linestyle="--", linewidth=1, color=plt.get_cmap("Set2")(3),
)
fg.subplots_adjust(left=0.08, bottom=0.23, top=0.97, right=0.99)
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-seq-identity.svg"))
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-seq-identity.pdf"))
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-seq-identity.png"), dpi=300)
for rl in AA_PROBA_ADJ:
fg, axs = plt.subplots(1, 5, figsize=(9, 4), constrained_layout=False)
ylim = [None, None]
ps_median = None
for i, (featureset, df) in enumerate(result_dfs.items()):
data = {}
if featureset in ["proteinsolver"]:
data["all\ndesigns"] = np.exp(df[f"avg_logprob_{rl}"].values)
else:
for temp, gp in df.groupby("T"):
data[temp] = np.exp(gp[f"avg_logprob_{rl}"].values)
ax = axs[i]
box_values = list(data.values())
positions = np.linspace(-0.5, 0.5, len(data) + 2)
width = 0.1
_ = ax.boxplot(box_values, positions=positions[1:-1], sym=".", widths=width)
if featureset in ["proteinsolver"]:
ax.set_xticklabels([f" T=1.00" for key in data.keys()], rotation="vertical")
ax.set_xlabel(f"ProteinSolver", fontsize="large")
else:
ax.set_xticklabels([f" T={float(key):.2f}" for key in data.keys()], rotation="vertical")
ax.set_xlabel(f"ST {featureset}", fontsize="large")
ax.set_xlim(positions[0], positions[-1])
y_min_ = min([v.min() for v in box_values])
if ylim[0] is None or ylim[0] > y_min_:
ylim[0] = y_min_
y_max_ = max([v.max() for v in box_values])
if ylim[1] is None or ylim[1] < y_max_:
ylim[1] = y_max_
if featureset in ["proteinsolver"]:
ps_median = np.median(box_values[0])
if i == 0:
ax.set_ylabel("Average residue probability")
if i > 0:
ax.set_yticklabels("")
for tic in ax.yaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.grid(axis="y", linestyle=":")
ylim = [ylim[0] * 100 // 1 / 100, np.ceil(ylim[1] * 100) / 100]
for ax in axs:
ax.set_ylim(ylim)
ax.hlines(
np.exp(get_sequence_logo_score(proteinsolver_stats["sequence"], AA_PROBA_ADJ[rl])),
*ax.set_xlim(),
linestyle="--",
linewidth=1,
color=plt.get_cmap("Set2")(2),
)
ax.hlines(
ps_median, *ax.set_xlim(), linestyle="--", linewidth=1, color=plt.get_cmap("Set2")(3),
)
fg.subplots_adjust(left=0.08, bottom=0.23, top=0.97, right=0.99)
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-avg-logprog-rl{rl}.svg"))
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-avg-logprog-rl{rl}.pdf"))
fg.savefig(NOTEBOOK_DIR.joinpath(f"{STRUCTURE_ID}-avg-logprog-rl{rl}.png"), dpi=300)
```
| github_jupyter |
##### Copyright 2021 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/federated/tutorials/federated_select"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/master/docs/tutorials/federated_select.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/federated/blob/master/docs/tutorials/federated_select.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/federated_select.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
# Sending Different Data To Particular Clients With tff.federated_select
This tutorial demonstrates how to implement custom federated algorithms in TFF that require sending different data to different clients. You may already be familiar with `tff.federated_broadcast` which sends a single server-placed value to all clients. This tutorial focuses on cases where different parts of a server-based value are sent to different clients. This may be useful for dividing up parts of a model across different clients in order to avoid sending the whole model to any single client.
Let's get started by importing both `tensorflow` and `tensorflow_federated`.
```
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated-nightly
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
import tensorflow as tf
import tensorflow_federated as tff
```
## Sending Different Values Based On Client Data
Consider the case where we have some server-placed list from which we want to send a few elements to each client based on some client-placed data. For example, a list of strings on the server, and on the clients, a comma-separated list of indices to download. We can implement that as follows:
```
list_of_strings_type = tff.TensorType(tf.string, [None])
# We only ever send exactly two values to each client. The number of keys per
# client must be a fixed number across all clients.
number_of_keys_per_client = 2
keys_type = tff.TensorType(tf.int32, [number_of_keys_per_client])
get_size = tff.tf_computation(lambda x: tf.size(x))
select_fn = tff.tf_computation(lambda val, index: tf.gather(val, index))
client_data_type = tf.string
# A function from our client data to the indices of the values we'd like to
# select from the server.
@tff.tf_computation(client_data_type)
@tff.check_returns_type(keys_type)
def keys_for_client(client_string):
# We assume our client data is a single string consisting of exactly three
# comma-separated integers indicating which values to grab from the server.
split = tf.strings.split([client_string], sep=',')[0]
return tf.strings.to_number([split[0], split[1]], tf.int32)
@tff.tf_computation(tff.SequenceType(tf.string))
@tff.check_returns_type(tf.string)
def concatenate(values):
def reduce_fn(acc, item):
return tf.cond(tf.math.equal(acc, ''),
lambda: item,
lambda: tf.strings.join([acc, item], ','))
return values.reduce('', reduce_fn)
@tff.federated_computation(tff.type_at_server(list_of_strings_type), tff.type_at_clients(client_data_type))
def broadcast_based_on_client_data(list_of_strings_at_server, client_data):
keys_at_clients = tff.federated_map(keys_for_client, client_data)
max_key = tff.federated_map(get_size, list_of_strings_at_server)
values_at_clients = tff.federated_select(keys_at_clients, max_key, list_of_strings_at_server, select_fn)
value_at_clients = tff.federated_map(concatenate, values_at_clients)
return value_at_clients
```
Then we can simulate our computation by providing the server-placed list of strings as well as string data for each client:
```
client_data = ['0,1', '1,2', '2,0']
broadcast_based_on_client_data(['a', 'b', 'c'], client_data)
```
## Sending A Randomized Element To Each Client
Alternatively, it may be useful to send a random portion of the server data to each client. We can implement that by first generating a random key on each client and then following a similar selection process to the one used above:
```
@tff.tf_computation(tf.int32)
@tff.check_returns_type(tff.TensorType(tf.int32, [1]))
def get_random_key(max_key):
return tf.random.uniform(shape=[1], minval=0, maxval=max_key, dtype=tf.int32)
list_of_strings_type = tff.TensorType(tf.string, [None])
get_size = tff.tf_computation(lambda x: tf.size(x))
select_fn = tff.tf_computation(lambda val, index: tf.gather(val, index))
@tff.tf_computation(tff.SequenceType(tf.string))
@tff.check_returns_type(tf.string)
def get_last_element(sequence):
return sequence.reduce('', lambda _initial_state, val: val)
@tff.federated_computation(tff.type_at_server(list_of_strings_type))
def broadcast_random_element(list_of_strings_at_server):
max_key_at_server = tff.federated_map(get_size, list_of_strings_at_server)
max_key_at_clients = tff.federated_broadcast(max_key_at_server)
key_at_clients = tff.federated_map(get_random_key, max_key_at_clients)
random_string_sequence_at_clients = tff.federated_select(
key_at_clients, max_key_at_server, list_of_strings_at_server, select_fn)
# Even though we only passed in a single key, `federated_select` returns a
# sequence for each client. We only care about the last (and only) element.
random_string_at_clients = tff.federated_map(get_last_element, random_string_sequence_at_clients)
return random_string_at_clients
```
Since our `broadcast_random_element` function doesn't take in any client-placed data, we have to configure the TFF Simulation Runtime with a default number of clients to use:
```
tff.backends.native.set_local_execution_context(default_num_clients=3)
```
Then we can simulate the selection. We can change `default_num_clients` above and the list of strings below to generate different results, or simply re-run the computation to generate different random outputs.
```
broadcast_random_element(tf.convert_to_tensor(['foo', 'bar', 'baz']))
```
| github_jupyter |
**This notebook is an exercise in the [Python](https://www.kaggle.com/learn/python) course. You can reference the tutorial at [this link](https://www.kaggle.com/colinmorris/functions-and-getting-help).**
---
Functions are powerful. Try writing some yourself.
As before, don't forget to run the setup code below before jumping into question 1.
```
# SETUP. You don't need to worry for now about what this code does or how it works.
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex2 import *
print('Setup complete.')
```
# 1.
Complete the body of the following function according to its docstring.
HINT: Python has a built-in function `round`.
```
def round_to_two_places(num):
"""Return the given number rounded to two decimal places.
>>> round_to_two_places(3.14159)
3.14
"""
# Replace this body with your own code.
# ("pass" is a keyword that does literally nothing. We used it as a placeholder
# because after we begin a code block, Python requires at least one line of code)
# pass
return round(num, 2)
# Check your answer
q1.check()
# Uncomment the following for a hint
#q1.hint()
# Or uncomment the following to peek at the solution
#q1.solution()
```
# 2.
The help for `round` says that `ndigits` (the second argument) may be negative.
What do you think will happen when it is? Try some examples in the following cell.
```
# Put your test code here
round(104256, -4)
```
Can you think of a case where this would be useful? Once you're ready, run the code cell below to see the answer and to receive credit for completing the problem.
```
# Check your answer (Run this code cell to receive credit!)
q2.solution()
```
# 3.
In the previous exercise, the candy-sharing friends Alice, Bob and Carol tried to split candies evenly. For the sake of their friendship, any candies left over would be smashed. For example, if they collectively bring home 91 candies, they'll take 30 each and smash 1.
Below is a simple function that will calculate the number of candies to smash for *any* number of total candies.
Modify it so that it optionally takes a second argument representing the number of friends the candies are being split between. If no second argument is provided, it should assume 3 friends, as before.
Update the docstring to reflect this new behaviour.
```
def to_smash(total_candies, nfriends = 3):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between 3 friends.
>>> to_smash(91)
1
"""
return total_candies % nfriends
# Check your answer
q3.check()
#q3.hint()
#q3.solution()
```
# 4. (Optional)
It may not be fun, but reading and understanding error messages will be an important part of your Python career.
Each code cell below contains some commented buggy code. For each cell...
1. Read the code and predict what you think will happen when it's run.
2. Then uncomment the code and run it to see what happens. (**Tip**: In the kernel editor, you can highlight several lines and press `ctrl`+`/` to toggle commenting.)
3. Fix the code (so that it accomplishes its intended purpose without throwing an exception)
<!-- TODO: should this be autochecked? Delta is probably pretty small. -->
```
round_to_two_places(9.9999)
# x = -10
# y = 5
# # Which of the two variables above has the smallest absolute value?
# smallest_abs = min(abs(x, y))
# def f(x):
# y = abs(x)
# return y
# print(f(5))
```
# Keep Going
Nice job with the code. Next up, you'll learn about *conditionals*, which you'll need to **[write interesting programs](https://www.kaggle.com/colinmorris/booleans-and-conditionals)**.
---
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161283) to chat with other Learners.*
| github_jupyter |
# Variational Multi-modal Recurrent Graph AutoEncoder
In this tuorial, we will go through how to run a Variational Multi-modal Recurrent Graph AutoEncoder (VMR-GAE) model for origin-destination (OD) matrix completion. In particular, we will demonstrate how to train the model and evaluate the completion results.
## Part I: Training
In this part, we will show how to train a VMR-GAE model for OD matrix completion on the NYC taxi dataset. In particular, we adopt some training skills from previous works, including data normalization, Kullback-Leibler loss delay and so on.
Visit `paddlespatial/networks/vmrgae/train.py` for more details.
```
import argparse
import os
import numpy as np
import paddle
import pgl
from model import VmrGAE
import utils as utils
from utils import MinMaxScaler
```
The VmrGAE class is built upon PaddlePaddle, which is a deep learning framework.
```
def prep_env(flag='train'):
# type: (str) -> dict
"""
Desc:
Prepare the environment
Args:
flag: specify the environment, 'train' or 'evaluate'
Returns:
A dict indicating the environment variables
"""
parser = \
argparse.ArgumentParser(description='{} [VMR-GAE] on the task of OD Matrix Completion'
.format("Training" if flag == "train" else "Evaluating"))
parser.add_argument('--num_nodes', type=int, default=263, help='The number of nodes in the graph')
parser.add_argument('--timelen', type=int, default=3, help='The length of input sequence')
parser.add_argument('--hidden_dim', type=int, default=32, help='The dimensionality of the hidden state')
parser.add_argument('--rnn_layer', type=int, default=2, help='The number of RNN layers')
parser.add_argument('--delay', type=int, default=0, help='delay to apply kld_loss')
parser.add_argument('--clip_max_value', type=int, default=1, help='clip the max value')
parser.add_argument('--align', type=bool, default=True,
help='Whether or not align the distributions of two modals')
parser.add_argument('--x_feature', type=bool, default=False,
help='X is a feature matrix (if True) or an identity matrix (otherwise)')
parser.add_argument('--data_path', type=str, default='./data/NYC-taxi', help='Data path')
parser.add_argument('--checkpoints', type=str, default='./nyc/checkpoints', help='Checkpoints path')
parser.add_argument('--device', type=str, default='cpu', help='cpu or gpu')
if flag == "train":
parser.add_argument('--iter_num', type=int, default=10, help='The number of iterations')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='delay to apply kld_loss')
parser.add_argument('--result_path', type=str, default='./nyc/results', help='result path')
else:
parser.add_argument('--sample_time', type=int, default=10, help='The sample time for point estimation')
args = parser.parse_known_args()[0]
if flag == "train":
if not os.path.exists(args.checkpoints):
os.makedirs(args.checkpoints)
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
else:
if not os.path.exists(args.checkpoints):
print('Checkpoint does not exist.')
exit()
primary_flow = np.load('%s/train_data.npy' % args.data_path, allow_pickle=True)
supp_flow = np.load('%s/green_data.npy' % args.data_path, allow_pickle=True)
train_data = np.load('%s/train_data.npy' % args.data_path, allow_pickle=True)[-1]
val_data = np.load('%s/val_data.npy' % args.data_path, allow_pickle=True)
test_data = np.load('%s/test_data.npy' % args.data_path, allow_pickle=True)
# scaling data
ground_truths = []
for i in range(len(primary_flow)):
primary_flow[i][0] = np.array(primary_flow[i][0]).astype("int")
primary_flow[i][1] = np.array(primary_flow[i][1]).astype("float32")
ground_truths.append(utils.index_to_adj_np(primary_flow[i][0], primary_flow[i][1], args.num_nodes))
ground_truths = np.stack(ground_truths, axis=0)
if args.clip_max_value == 1:
max_value = 50
else:
print(np.concatenate(primary_flow[:, 1]).max())
max_value = np.concatenate(primary_flow[:, 1]).max()
primary_scale = MinMaxScaler(0, max_value)
for i in range(args.timelen):
primary_flow[i][1] = primary_scale.transform(primary_flow[i][1])
for i in range(len(supp_flow)):
supp_flow[i][0] = np.array(supp_flow[i][0]).astype("int")
supp_flow[i][1] = np.array(supp_flow[i][1]).astype("float32")
supp_scale = MinMaxScaler(0, np.concatenate(supp_flow[:, 1]).max())
for i in range(args.timelen):
supp_flow[i][1] = supp_scale.transform(supp_flow[i][1])
# load into paddle
mask = np.zeros((args.num_nodes, args.num_nodes))
for i in range(args.timelen):
mask[np.where(ground_truths[i] > (2 / max_value))] = 1.0
target_graph = []
for i in range(len(primary_flow)):
target_graph.append(pgl.Graph(edges=primary_flow[i][0],
num_nodes=args.num_nodes,
edge_feat={'efeat': paddle.to_tensor(primary_flow[i][1])}))
supp_graph = []
for i in range(len(primary_flow)):
supp_graph.append(pgl.Graph(edges=supp_flow[i][0],
num_nodes=args.num_nodes,
edge_feat={'efeat': paddle.to_tensor(supp_flow[i][1])}))
mask = paddle.to_tensor(mask)
xs = paddle.to_tensor([np.eye(args.num_nodes) for i in range(args.timelen)])
x = paddle.to_tensor([np.eye(args.num_nodes) for i in range(args.timelen)])
ground_truths = paddle.to_tensor(ground_truths, dtype='float32')
res = {
"args": args,
"primary_flow": primary_flow, "primary_scale": primary_scale, "target_graph": target_graph, "x": x,
"mask": mask,
# "supp_flow": supp_flow, "supp_scale": supp_scale,
"supp_graph": supp_graph, "xs": xs,
"ground_truths": ground_truths,
"train_data": train_data, "val_data": val_data, "test_data": test_data
}
return res
```
### Environment Preparation
Here we use `argparse` method for arguments setting, including model parameters, file paths and arguments for training. Then, we load the data from the given path and scale them with normalization process. Since we use `PaddlePaddle` as backend, we should also transform the data into `paddle.tensor` form. Note that we use the iteration number as 10 only for demonstration and it need a larger number for training (e.g., 10e5).
```
if __name__ == '__main__':
env = prep_env()
if env['args'].device=='gpu':
paddle.set_device('gpu')
```
### Load the model and settings
The class is defined in the `paddlespatial/networks/vmrgae/model.py`.
Check it for more details.
```
model = VmrGAE(x_dim=env["x"].shape[-1], d_dim=env["xs"].shape[-1], h_dim=env["args"].hidden_dim,
num_nodes=env["args"].num_nodes, n_layers=env["args"].rnn_layer,
eps=1e-10, same_structure=True)
```
Before training, read the checkpoints if available
```
if not os.path.isfile('%s/model.pdparams' % env["args"].checkpoints):
print("Start new train (model).")
min_loss = np.Inf
epoch = 0
else:
print("Found the model file. continue to train ... ")
model.set_state_dict(paddle.load('%s/model.pdparams' % env["args"].checkpoints))
min_loss = paddle.load('%s/minloss.pdtensor' % env["args"].checkpoints)
epoch = np.load('%s/logged_epoch.npy' % env["args"].checkpoints)
optimizer = paddle.optimizer.Adam(learning_rate=env["args"].learning_rate, parameters=model.parameters())
if os.path.isfile('%s/opt_state.pdopt' % env["args"].checkpoints):
opt_state = paddle.load('%s/opt_state.pdopt' % env["args"].checkpoints)
optimizer.set_state_dict(opt_state)
patience = np.Inf
best_val_mape = np.Inf
max_iter = 0
```
### Start train
We now initialize the Adam optimizer and start the training procedure. The learning rate is set to 0.0001. Here we can activate the early stop mechanism if need. Then we train the model for 10 epochs for demostration purposes. In each epoch, we receive the losses, the critical intermediate variables, and the completed OD matrix. If the loss goes down, we then save the checkpoint.
```
for k in range(epoch, env["args"].iter_num):
kld_loss_tvge, kld_loss_avde, pis_loss, all_h, all_enc_mean, all_prior_mean, all_enc_d_mean, all_dec_t, \
all_z_in, all_z_out \
= model(env["x"], env["xs"], env["target_graph"], env["supp_graph"], env["mask"],
env["primary_scale"], env["ground_truths"])
pred = env["primary_scale"].inverse_transform(all_dec_t[-1].numpy())
val_MAE, val_RMSE, val_MAPE = utils.validate(pred, env["val_data"][0],
env["val_data"][1], flag='val')
test_MAE, test_RMSE, test_MAPE = utils.validate(pred, env["test_data"][0],
env["test_data"][1], flag='test')
if val_MAPE < best_val_mape:
best_val_mape = val_MAPE
max_iter = 0
else:
max_iter += 1
if max_iter >= patience:
print('Early Stop!')
break
if k >= env["args"].delay:
loss = kld_loss_tvge + kld_loss_avde + pis_loss
else:
loss = pis_loss
loss.backward()
optimizer.step()
optimizer.clear_grad()
if k % 10 == 0:
print('epoch: ', k)
print('loss =', loss.mean().item())
print('kld_loss_tvge =', kld_loss_tvge.mean().item())
print('kld_loss_avde =', kld_loss_avde.mean().item())
print('pis_loss =', pis_loss.mean().item())
print('val', "MAE:", val_MAE, 'RMSE:', val_RMSE, 'MAPE:', val_MAPE)
print('test', "MAE:", test_MAE, 'RMSE:', test_RMSE, 'MAPE:', test_MAPE)
if (loss.mean() < min_loss).item() | (k == env["args"].delay):
print('epoch: %d, Loss goes down, save the model. pis_loss = %f' % (k, pis_loss.mean().item()))
print('val', "MAE:", val_MAE, 'RMSE:', val_RMSE, 'MAPE:', val_MAPE)
print('test', "MAE:", test_MAE, 'RMSE:', test_RMSE, 'MAPE:', test_MAPE)
min_loss = loss.mean().item()
paddle.save(all_enc_mean, '%s/all_enc_mean.pdtensor' % env["args"].result_path)
paddle.save(all_prior_mean, '%s/all_prior_mean.pdtensor' % env["args"].result_path)
paddle.save(all_enc_d_mean, '%s/all_enc_d_mean.pdtensor' % env["args"].result_path)
paddle.save(all_dec_t, '%s/all_dec_t.pdtensor' % env["args"].result_path)
paddle.save(all_z_in, '%s/all_z_in.pdtensor' % env["args"].result_path)
paddle.save(all_z_out, '%s/all_z_out.pdtensor' % env["args"].result_path)
paddle.save(model.state_dict(), '%s/model.pdparams' % env["args"].checkpoints)
paddle.save(loss.mean(), '%s/minloss.pdtensor' % env["args"].checkpoints)
paddle.save(optimizer.state_dict(), '%s/opt_state.pdopt' % env["args"].checkpoints)
np.save('%s/logged_epoch.npy' % env["args"].checkpoints, k)
```
The above is about the training steps, you can adjust it as needed.
## Part II: Result Evalution
Below we will introduce how to use the trained model for OD matrix completion and evaluate the results.
Visit `paddlespatial/networks/vmrgae/eval.py` for more details.
```
from train import prep_env
if __name__ == '__main__':
env = prep_env(flag='eval')
if env['args'].device=='gpu':
paddle.set_device('gpu')
# load VMR-GAE and run
model = VmrGAE(x_dim=env["x"].shape[-1], d_dim=env["xs"].shape[-1], h_dim=env["args"].hidden_dim,
num_nodes=env["args"].num_nodes, n_layers=env["args"].rnn_layer,
eps=1e-10, same_structure=True)
if not os.path.isfile('%s/model.pdparams' % env["args"].checkpoints):
print('Checkpoint does not exist.')
exit()
else:
model.set_state_dict(paddle.load('%s/model.pdparams' % env["args"].checkpoints))
min_loss = paddle.load('%s/minloss.pdtensor' % env["args"].checkpoints)
epoch = np.load('%s/logged_epoch.npy' % env["args"].checkpoints)
```
Here we use the same preparation function in training process with `eval` flag to hold the model configurations. Then, we load the model and the available checkpoint.
### Start Evaluation
We perform the trained model for `sample_time` times and report the mean values as the completion results, as well as the standard deviations.
```
pred = []
for i in range(env["args"].sample_time):
_, _, _, _, _, _, _, all_dec_t, _, _ \
= model(env["x"], env["xs"], env["target_graph"], env["supp_graph"], env["mask"],
env["primary_scale"], env["ground_truths"])
pred.append(env["primary_scale"].inverse_transform(all_dec_t[-1].numpy()))
pred = np.stack(pred, axis=0)
pe, std = pred.mean(axis=0), pred.std(axis=0)
pe[np.where(pe < 0.5)] = 0
print(pe)
```
| github_jupyter |
```
!pip install -q transformers datasets sentencepiece coral_pytorch
import torch
import torch.nn as nn
from torch.functional import F
from datasets import Dataset
import transformers as ts
from transformers import AutoTokenizer , AutoModelForSequenceClassification
from transformers import TrainingArguments, Trainer
from transformers import DataCollatorWithPadding
from transformers import create_optimizer
from transformers.file_utils import ModelOutput
from transformers.modeling_outputs import SequenceClassifierOutput
from coral_pytorch.layers import CoralLayer
from coral_pytorch.losses import coral_loss
from coral_pytorch.dataset import levels_from_labelbatch
from coral_pytorch.dataset import proba_to_label
from dataclasses import dataclass
from typing import Optional, Tuple
import numpy as np
import pandas as pd
from scipy import stats
import sys
from data_loader import (
retrieve_instances_from_dataset,
retrieve_labels_from_dataset_for_classification,
retrieve_labels_from_dataset_for_ranking,
write_predictions_to_file,
)
PRE_TRAINED_MODEL = "microsoft/deberta-v3-base"
DIM_KEY = "hidden_size"
```
#Preparing Data
```
def loadDataset(dataPath , labelPath=None , scoresPath=None):
dataset = pd.read_csv(dataPath, sep="\t", quoting=3)
ids , sentences , fillers = retrieve_instances_from_dataset(dataset)
#Creating dictionaries to convert datas to Huggingface Dataset
datasetDict = {
"id": ids,
"sentence": sentences,
"filler": fillers,
}
labels = None
if labelPath != None:
labels = pd.read_csv(labelPath, sep="\t", header=None, names=["Id", "Label"])
labels = retrieve_labels_from_dataset_for_classification(labels)
datasetDict["labels"] = labels
scores = None
if scoresPath != None:
scores = pd.read_csv(scoresPath, sep="\t", header=None, names=["Id", "Label"])
scores = retrieve_labels_from_dataset_for_ranking(scores)
datasetDict["scores"] = scores
#Removing Periods if fillers appear at the end of the sentence (because if we don't period will be considered last word piece of the filler)
for index , _ in enumerate(fillers):
fillers[index].replace("." , "")
#Creating Huggingface Datasets from Dictionaries
dataset = Dataset.from_dict(datasetDict)
return dataset
trainDataset = loadDataset("Data/Train_Dataset.tsv",
"Data/Train_Labels.tsv",
"Data/Train_Scores.tsv")
valDataset = loadDataset("Data/Val_Dataset",
"Data/Val_Labels",
"Data/Val_Scores")
testDataset = loadDataset("Data/Test_Scores")
print(trainDataset)
print(valDataset)
print(testDataset)
print(trainDataset["sentence"][0])
```
#Preprocessing
```
tokenizer = AutoTokenizer.from_pretrained(PRE_TRAINED_MODEL)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer , return_tensors="pt")
def preprocessDataset(dataset , tokenizer):
def addToDict(dict_1 , dict_2 , columns_1=[] , columns_2=["input_ids" , "attention_mask"]):
for item_1 , item_2 in zip(columns_1 , columns_2):
dict_1[item_1] = dict_2.pop(item_2)
def mappingFunction(dataset):
outputDict = {}
cleanedSentence = dataset["sentence"].replace("\n" , " ").replace("(...)" , "").strip()
sentenceWithFiller = cleanedSentence.replace("[MASK]" , dataset["filler"].strip()).strip()
tokenized_sentence = tokenizer(sentenceWithFiller)
addToDict(outputDict , tokenized_sentence , ["input_ids" , "attention_mask"])
#Getting the index of the last word piece of the filler
if "cls_token" in tokenizer.special_tokens_map.keys():
filler_indecies = len(tokenizer(tokenizer.special_tokens_map["cls_token"] + " " + cleanedSentence.split("[MASK]")[0].strip() + " " + dataset["filler"].strip() , add_special_tokens=False)["input_ids"]) - 1
elif "bos_token" in tokenizer.special_tokens_map.keys():
filler_indecies = len(tokenizer(tokenizer.special_tokens_map["bos_token"] + " " + cleanedSentence.split("[MASK]")[0].strip() + " " + dataset["filler"].strip() , add_special_tokens=False)["input_ids"]) - 1
else:
filler_indecies = len(tokenizer(cleanedSentence.split("[MASK]")[0].strip() + " " + dataset["filler"].strip() , add_special_tokens=False)["input_ids"]) - 1
outputDict["filler_indecies"] = filler_indecies
return outputDict
return dataset.map(mappingFunction , batched=False)
tokenizedTrainDataset = preprocessDataset(trainDataset , tokenizer)
tokenizedValDataset = preprocessDataset(valDataset , tokenizer)
tokenizedTestDataset = preprocessDataset(testDataset , tokenizer)
print(tokenizedTrainDataset)
print(tokenizedValDataset)
print(tokenizedTestDataset)
```
#Model Definition
```
@dataclass
class CustomOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
classificationOutput: torch.FloatTensor = None
regressionOutput: torch.FloatTensor = None
class SequenceClassificationModel(nn.Module):
def __init__(self,
encoder,
dim,
use_coral=False,
use_cls=True,
supportPooledRepresentation=False,
mode="both",
num_labels=3,
num_ranks=5,
lambda_c=0.5,
lambda_r=0.5,
dropout_rate=0.2):
super().__init__()
#mode can be one of these: ["both" , "classification" , "regression"]
self.encoder = encoder
self.dim = dim
self.use_coral = use_coral
self.use_cls = use_cls
self.supportPooledRepresentation = supportPooledRepresentation
self.mode = mode
self.num_labels = num_labels
self.num_ranks = num_ranks
self.lambda_c = lambda_c
self.lambda_r = lambda_r
self.dropout_rate = dropout_rate
if self.use_cls:
self.pre_classifier = nn.Linear(self.dim*2 , self.dim , bias=True)
else:
self.pre_classifier = nn.Linear(self.dim , self.dim , bias=True)
self.dropout = nn.Dropout(p=self.dropout_rate , inplace=False)
self.regressionHead = CoralLayer(self.dim , self.num_ranks)
if use_coral:
self.classificationHead = CoralLayer(self.dim , self.num_labels)
else:
self.classificationHead = nn.Linear(self.dim , self.num_labels , bias=True)
def forward(
self,
input_ids,
attention_mask,
filler_indecies,
labels=None,
scores=None,
**args):
device = self.encoder.device
# Getting fillers representation from pre-trained transformer (encoder)
sentence_embedding = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
)
#Getting Fillers Representation
filler_tokens = sentence_embedding[0][filler_indecies[0] , filler_indecies[1]]
fillers = filler_tokens[: , 0 , :]
#Concatenating [CLS] output with Filler output if the model supports [CLS]
pooled_output = None
if self.use_cls:
if self.supportPooledRepresentation:
pooled_output = torch.concat((sentence_embedding[1] , fillers) , dim=-1)
else:
pooled_output = torch.concat((sentence_embedding[0][: , 0 , :] , fillers) , dim=-1)
else:
pooled_output = fillers
#Passing Pooled Output to another dense layer followed by activation function and dropout
pooled_output = self.pre_classifier(pooled_output)
pooled_output = nn.GELU()(pooled_output)
pooled_output = self.dropout(pooled_output)
#Passing the final output to the classificationHead and RegressionHead
classificationOutput = self.classificationHead(pooled_output)
regressionOutput = self.regressionHead(pooled_output)
totalLoss = None
classification_loss = None
regression_loss = None
#Computing classification loss
if labels != None and (self.mode.lower() == "both" or self.mode.lower() == "classification"):
if self.use_coral:
levels = levels_from_labelbatch(labels.view(-1) , self.num_labels).to(device)
classification_loss = coral_loss(classificationOutput.view(-1 , self.num_labels - 1) , levels.view(-1 , self.num_labels - 1))
else:
loss_fct = nn.CrossEntropyLoss()
classification_loss = loss_fct(classificationOutput.view(-1 , self.num_labels) , labels.view(-1))
#Computing regression loss
if scores != None and (self.mode.lower() == "both" or self.mode.lower() == "regression"):
levels = levels_from_labelbatch(scores.view(-1) , self.num_ranks).to(device)
regression_loss = coral_loss(regressionOutput.view(-1 , self.num_ranks - 1) , levels.view(-1 , self.num_ranks - 1))
if self.mode.lower() == "both" and (labels != None and scores != None):
totalLoss = (self.lambda_c * classification_loss) + (self.lambda_r * regression_loss)
elif self.mode.lower() == "classification" and labels != None:
totalLoss = classification_loss
elif self.mode.lower() == "regression" and scores != None:
totalLoss = regression_loss
outputs = torch.concat((classificationOutput , regressionOutput) , dim=-1)
finalClassificationOutput = torch.sigmoid(classificationOutput)
finalRegressionOutput = torch.sigmoid(regressionOutput)
finalClassificationOutput = proba_to_label(finalClassificationOutput.cpu().detach()).numpy()
finalRegressionOutput = torch.sum(finalRegressionOutput.cpu().detach() , dim=-1).numpy() + 1
return CustomOutput(
loss=totalLoss,
logits=outputs,
classificationOutput=finalClassificationOutput,
regressionOutput=finalRegressionOutput,
)
def model_init(encoderPath=None,
dimKey=None,
customEncoder=None,
customDim=None,
mode="both",
use_coral=True,
use_cls=True,
supportPooledRepresentation=False,
freezeEmbedding=True,
num_labels=3,
num_ranks=5,
lambda_c=0.5,
lambda_r=0.5,
dropout_rate=0.2,):
encoder = ts.AutoModel.from_pretrained(encoderPath) if encoderPath != None else customEncoder
dim = encoder.config.to_dict()[dimKey] if dimKey != None else customDim
model = SequenceClassificationModel(
encoder,
dim,
use_coral=use_coral,
use_cls=use_cls,
supportPooledRepresentation=supportPooledRepresentation,
mode=mode,
num_labels=num_labels,
num_ranks=num_ranks,
lambda_c=lambda_c,
lambda_r=lambda_r,
dropout_rate=dropout_rate,
)
try:
if freezeEmbedding:
for param in model.encoder.embeddings.parameters():
param.requires_grad = False
except:
print("The embedding layer name is different in this model, try to find the name of the emebdding layer and freeze it manually")
return model
def makeTrainer(model,
trainDataset,
data_collator,
tokenizer,
outputsPath,
learning_rate=1.90323e-05,
scheduler="cosine",
save_steps=5000,
batch_size=8,
num_epochs=5,
weight_decay=0.00123974,
roundingType="F"):
def data_collator_fn(items , columns=[]):
data_collator_input = {
"input_ids": items[columns[0]],
"attention_mask": items[columns[1]]
}
result = data_collator(data_collator_input)
items[columns[0]] = result["input_ids"]
items[columns[1]] = result["attention_mask"]
def collate_function(items):
outputDict = {
key: [] for key in items[0].keys()
}
for item in items:
for key in item.keys():
outputDict[key].append(item[key])
data_collator_fn(outputDict , ["input_ids" , "attention_mask"])
#Removing unnecessary Items from outputDict
columns = ["sentence" , "filler" , "id"]
for item in columns:
try:
outputDict.pop(item)
except:
pass
#Adding New Columns
if "labels" in outputDict.keys():
outputDict["labels"] = torch.tensor(outputDict.pop("labels"))
if "scores" in outputDict.keys():
if roundingType == "F":
outputDict["scores"] = torch.tensor(outputDict.pop("scores") , dtype=torch.int32) - 1
elif roundingType == "R":
outputDict["scores"] = torch.tensor([round(score) for score in outputDict.pop("scores")] , dtype=torch.int32) - 1
filler_indecies = torch.tensor(outputDict.pop("filler_indecies")).view(-1 , 1)
outputDict["filler_indecies"] = (torch.arange(filler_indecies.shape[0]).view(-1 , 1) , filler_indecies)
return outputDict
training_args = TrainingArguments(
outputsPath,
learning_rate= learning_rate,
lr_scheduler_type=scheduler,
save_steps=save_steps,
per_device_train_batch_size=batch_size,
num_train_epochs=num_epochs,
weight_decay=weight_decay,
remove_unused_columns=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=trainDataset,
tokenizer=tokenizer,
data_collator=collate_function,
)
return trainer , collate_function
model = model_init(encoderPath=PRE_TRAINED_MODEL,
dimKey=DIM_KEY,
mode="both",
use_coral=True,
use_cls=True,
supportPooledRepresentation=False,
freezeEmbedding=True)
trainer , collate_function = makeTrainer(model,
tokenizedTrainDataset,
data_collator,
tokenizer,
"outputs/")
trainer.train()
```
#Evaluating on Val Dataset
```
def evaluateModel(
model,
dataset,
collate_function,
):
model.eval()
#Passing the inputs through model
labels = []
scores = []
for item in dataset:
sample_input = collate_function([item])
outputs = model(input_ids=sample_input["input_ids"].to(model.encoder.device),
attention_mask=sample_input["attention_mask"].to(model.encoder.device),
filler_indecies=sample_input["filler_indecies"],
scores=None)
labels.append(outputs["classificationOutput"][0])
scores.append(outputs["regressionOutput"][0])
#Computing Accuracy
count = 0
correctCount = 0
for prediction , target in zip(labels , dataset["labels"]):
count += 1
correctCount += 1 if prediction == target else 0
accuracy = (correctCount / count)
#Computing Spearman
scores = np.array(scores , dtype=np.float32)
valScores = np.array(dataset["scores"] , dtype=np.float32)
spearman = stats.spearmanr(scores.reshape(-1 , 1) , valScores.reshape(-1 , 1))
return (labels , scores) , accuracy , spearman
(labels , scores) , accuracy , spearman = evaluateModel(model, tokenizedValDataset, collate_function)
print(f"Accuracy is: {accuracy}")
print(f"Spearman is: {spearman}")
```
#Making Predictions on Test Dataset
```
def predictOnTestDataset(
model,
dataset,
collate_function,
labelsPath=None,
scoresPath=None,
):
model.eval()
ids = []
classification_predictions = []
ranking_predictions = []
for item in dataset:
sample_input = collate_function([item])
outputs = model(input_ids=sample_input["input_ids"].to(model.encoder.device),
attention_mask=sample_input["attention_mask"].to(model.encoder.device),
filler_indecies=sample_input["filler_indecies"],
scores=None,
labels=None)
ids.append(item["id"])
classification_predictions.append(outputs["classificationOutput"][0])
ranking_predictions.append(outputs["regressionOutput"][0])
if labelsPath != None:
open(labelsPath , mode="wb")
write_predictions_to_file(labelsPath , ids , classification_predictions , "classification")
if scoresPath != None:
open(scoresPath , mode="wb")
write_predictions_to_file(scoresPath , ids , ranking_predictions , "ranking")
return ids , classification_predictions , ranking_predictions
ids , labels , scores = predictOnTestDataset(model , tokenizedTestDataset , collate_function , "classification_answers.tsv" , "ranking_answers.tsv" )
```
#Inference
```
def inference(
model,
sentences,
fillers,
tokenizer,
collate_function
):
model.eval()
datasetDict = {
"sentence": sentences,
"filler": fillers,
}
dataset = Dataset.from_dict(datasetDict)
tokenizedDataset = preprocessDataset(dataset , tokenizer)
finalInput = collate_function(tokenizedDataset)
outputs = model(
input_ids=finalInput["input_ids"].to(model.encoder.device),
attention_mask=finalInput["attention_mask"].to(model.encoder.device),
filler_indecies=finalInput["filler_indecies"],
)
finalLabels = []
for item in outputs["classificationOutput"].reshape(-1):
if item == 0:
finalLabels.append("Implausible")
elif item == 1:
finalLabels.append("Neutral")
elif item == 2:
finalLabels.append("Plausible")
finalLabels = np.array(finalLabels)
return {
"labels": finalLabels,
"scores": outputs["regressionOutput"],
}
inference(model,
sentences=["This is a [MASK] to see how model works" , "This is a [MASK] to see how model works"],
fillers=["test" , "fork"],
tokenizer=tokenizer,
collate_function=collate_function,)
```
| github_jupyter |
# HW3: Variational Autoencoders
```
import torch
import torch.optim as optim
import torch.nn as nn
from torch.distributions import Normal
from itertools import chain
from torchlib.generative_model.autoencoder.vae import VAE
from torchlib.dataset.utils import create_data_loader
from torchlib.utils.distributions import IndependentNormal
from sklearn.model_selection import train_test_split
from torchlib.common import FloatTensor, move_tensor_to_gpu
import matplotlib.pyplot as plt
%load_ext autoreload
%autoreload 2
%qtconsole
```
## VAEs in 2D
### Part A
```
import numpy as np
def sample_data_1():
count = 100000
rand = np.random.RandomState(0)
return [[1.0, 2.0]] + rand.randn(count, 2) * [[5.0, 1.0]]
def sample_data_2():
count = 100000
rand = np.random.RandomState(0)
return [[1.0, 2.0]] + (rand.randn(count, 2) * [[5.0, 1.0]]).dot(
[[np.sqrt(2) / 2, np.sqrt(2) / 2], [-np.sqrt(2) / 2, np.sqrt(2) / 2]])
data_1 = sample_data_1().astype(np.float32)
data_1_loader = create_data_loader((data_1,), batch_size=1024)
data_2 = sample_data_2().astype(np.float32)
data_2_loader = create_data_loader((data_2,), batch_size=1024)
# visualize data distribution
plt.scatter(data_1[:, 0], data_1[:, 1])
# visualize data distribution
plt.scatter(data_2[:, 0], data_2[:, 1])
```
### Define prior
```
prior = Normal(loc=torch.zeros(2).type(FloatTensor), scale=torch.ones(2).type(FloatTensor))
```
### Define encoder
```
class Encoder(nn.Module):
def __init__(self, code_size=2, nn_size=32):
super(Encoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(2, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
nn.Linear(nn_size, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
nn.Linear(nn_size, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
)
self.mu = nn.Linear(nn_size, code_size)
self.logvar = nn.Linear(nn_size, code_size)
def forward(self, x):
x = self.model(x)
mu = self.mu(x)
logvar = self.logvar(x)
return Normal(mu, torch.exp(logvar))
```
### Define decoder
```
class Decoder_1(nn.Module):
def __init__(self, code_size=2, nn_size=32):
super(Decoder_1, self).__init__()
self.model = nn.Sequential(
nn.Linear(code_size, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
nn.Linear(nn_size, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
nn.Linear(nn_size, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
)
self.mu = nn.Linear(nn_size, code_size)
self.logvar = nn.Linear(nn_size, code_size)
def forward(self, x):
x = self.model(x)
mu = self.mu(x)
logvar = self.logvar(x)
return IndependentNormal(mu, torch.exp(logvar))
class Decoder_2(nn.Module):
def __init__(self, code_size=2, nn_size=32):
super(Decoder_2, self).__init__()
self.model = nn.Sequential(
nn.Linear(code_size, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
nn.Linear(nn_size, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
nn.Linear(nn_size, nn_size),
nn.BatchNorm1d(nn_size),
nn.ReLU(),
)
self.mu = nn.Linear(nn_size, code_size)
self.logvar = nn.Linear(nn_size, 1)
def forward(self, x):
x = self.model(x)
mu = self.mu(x)
logvar = self.logvar(x)
return IndependentNormal(mu, torch.exp(logvar))
```
### Fit on dataset 1 using diag normal decoder
```
encoder = Encoder()
decoder = Decoder_1()
optimizer = optim.Adam(chain(encoder.parameters(),
decoder.parameters()), lr=1e-3)
model = VAE(encoder, decoder, prior, optimizer)
model.train(num_epoch=20, train_data_loader=data_1_loader, verbose=False)
full_path_samples = model.sample(1000, full_path=True).cpu().numpy()
plt.figure()
plt.scatter(full_path_samples[:, 0], full_path_samples[:, 1])
no_decoder_noise_samples = model.sample(1000, full_path=False).cpu().numpy()
plt.figure()
plt.scatter(no_decoder_noise_samples[:, 0], no_decoder_noise_samples[:, 1])
```
### Fit on dataset 2 using diag normal decoder
```
encoder = Encoder()
decoder = Decoder_1()
optimizer = optim.Adam(chain(encoder.parameters(),
decoder.parameters()), lr=1e-3)
model = VAE(encoder, decoder, prior, optimizer)
model.train(num_epoch=20, train_data_loader=data_2_loader, verbose=False)
full_path_samples = model.sample(1000, full_path=True).cpu().numpy()
plt.figure()
plt.scatter(full_path_samples[:, 0], full_path_samples[:, 1])
no_decoder_noise_samples = model.sample(1000, full_path=False).cpu().numpy()
plt.figure()
plt.scatter(no_decoder_noise_samples[:, 0], no_decoder_noise_samples[:, 1])
```
### Fit on dataset 1 using single sigma decoder
```
encoder = Encoder()
decoder = Decoder_2()
optimizer = optim.Adam(chain(encoder.parameters(),
decoder.parameters()), lr=1e-3)
model = VAE(encoder, decoder, prior, optimizer)
model.train(num_epoch=20, train_data_loader=data_1_loader, verbose=False)
full_path_samples = model.sample(1000, full_path=True).cpu().numpy()
plt.figure()
plt.scatter(full_path_samples[:, 0], full_path_samples[:, 1])
no_decoder_noise_samples = model.sample(1000, full_path=False).cpu().numpy()
plt.figure()
plt.scatter(no_decoder_noise_samples[:, 0], no_decoder_noise_samples[:, 1])
```
### Fit on dataset 2 using single sigma decoder
```
encoder = Encoder()
decoder = Decoder_2()
optimizer = optim.Adam(chain(encoder.parameters(),
decoder.parameters()), lr=1e-3)
model = VAE(encoder, decoder, prior, optimizer)
model.train(num_epoch=20, train_data_loader=data_2_loader, verbose=False)
full_path_samples = model.sample(1000, full_path=True).cpu().numpy()
plt.figure()
plt.scatter(full_path_samples[:, 0], full_path_samples[:, 1])
no_decoder_noise_samples = model.sample(1000, full_path=False).cpu().numpy()
plt.figure()
plt.scatter(no_decoder_noise_samples[:, 0], no_decoder_noise_samples[:, 1])
```
### Part B
```
def sample_data_3():
count = 100000
rand = np.random.RandomState(0)
a = [[-1.5, 2.5]] + rand.randn(count // 3, 2) * 0.2
b = [[1.5, 2.5]] + rand.randn(count // 3, 2) * 0.2
c = np.c_[2 * np.cos(np.linspace(0, np.pi, count // 3)),
-np.sin(np.linspace(0, np.pi, count // 3))]
c += rand.randn(*c.shape) * 0.2
data_x = np.concatenate([a, b, c], axis=0)
data_y = np.array([0] * len(a) + [1] * len(b) + [2] * len(c))
perm = rand.permutation(len(data_x))
return data_x[perm], data_y[perm]
data_3, data_3_label = sample_data_3()
data_3 = data_3.astype(np.float32)
data_3_train, data_3_test, data_3_train_label, data_3_test_label = train_test_split(
data_3, data_3_label, test_size=0.2)
data_3_train_loader = create_data_loader((data_3_train, data_3_train_label), batch_size=1024)
data_3_test_loader = create_data_loader((data_3_test, data_3_test_label), batch_size=1024,
shuffle=False, drop_last=False)
plt.scatter(data_3[:, 0], data_3[:, 1], c=data_3_label)
encoder = Encoder(nn_size=512)
decoder = Decoder_1(nn_size=512)
optimizer = optim.Adam(chain(encoder.parameters(),
decoder.parameters()), lr=1e-3)
model = VAE(encoder, decoder, prior, optimizer)
model.train(num_epoch=100, train_data_loader=data_3_train_loader, verbose=False)
full_path_samples = model.sample(10000, full_path=True).cpu().numpy()
plt.figure()
plt.scatter(full_path_samples[:, 0], full_path_samples[:, 1])
no_decoder_noise_samples = model.sample(10000, full_path=False).cpu().numpy()
plt.figure()
plt.scatter(no_decoder_noise_samples[:, 0], no_decoder_noise_samples[:, 1])
```
### Visualize Latent of training data
```
with torch.no_grad():
latent = []
for data in data_3_test_loader:
data = move_tensor_to_gpu(data[0])
latent.append(model.encode_reparm(data))
latent = torch.cat(latent, dim=0).cpu().numpy()
plt.scatter(latent[:, 0], latent[:, 1], c=data_3_test_label)
```
| github_jupyter |
# Analyzing Real vs. Fake News Article Headlines 📰
Author:<br>[Navraj Narula](http://navierula.github.io)<br><br>
Data Source: <br>[Randomly-Collected Fake News Dataset](https://github.com/BenjaminDHorne/fakenewsdata1)<br><br>
Resources Consulted: <br>[Text Mining with R](http://tidytextmining.com)<br>[R: Text Classification using a K Nearest Neighbour Model](http://garonfolo.dk/herbert/2015/05/r-text-classification-using-a-k-nearest-neighbour-model/)
```
# turn off warnings
options(warn=-1)
# import libraries
library(dplyr)
library(e1071)
library(ggplot2)
library(tidytext)
library(stringr)
library(RColorBrewer)
library(tm)
library(class)
library(SnowballC)
# load in dataset
mydata = read.csv("cleaned_data/headlines.txt",sep="\t",stringsAsFactors = FALSE,col.names=c("text", "status"),fill=TRUE)
# remove rows with empty values
mydata = mydata[!apply(mydata, 1, function(x) any(x=="")),]
# preview the first five rows
# (mostly fake articles at the top)
head(mydata)
# preview the last five rows
# (mostly real articles at the bottom)
tail(mydata)
# calculate term/word frequency for words present in articles
news_words <- mydata %>%
unnest_tokens(word, text) %>%
count(status, word, sort = TRUE) %>%
ungroup()
total_words <- news_words %>%
group_by(status) %>%
summarize(total = sum(n))
news_words <- left_join(news_words, total_words)
news_words
```
From the table above, we can see that the word "trump" is not only the most commonly used word in real news article headlines, but also the most commonly used world overall. This makes sense given the past election cycle. Out of 633 total words that appeared in real news article headlines, the word "trump" appeared 28 times, or rather 4.4% overall.<br><br>
In fake news article headlines, the most commonly used word was "obama," and following that, "trump" once again. These words appeared 20 out of 842 times and 19 out of 842 times. Respectively, 2.3% and 2.2%.
```
# visualize word counts in buckets
ggplot(news_words, aes(n/total, fill = status)) +
geom_histogram(show.legend = TRUE,binwidth = 30,color="black") +
facet_wrap(~status, ncol = 4)
```
The visualization above simply counts the number of words present in each type of headline. For fake news headlines, the total number of words is 842. The total number of words for real news headlines is 633. Considering the fact that the particular dataset that I am using contains less real news headlines rather than fake news articles, the counts make sense.
```
sprintf("The number of real news headlines in my dataset is: %d", str_count(mydata, "real")[2])
sprintf("The number of fake news headlines in my dataset is: %d", str_count(mydata, "fake")[2])
# calculate frequency by rank, using Zipf's law
freq_by_rank <- news_words %>%
group_by(status) %>%
mutate(rank = row_number(),
`term frequency` = n/total)
freq_by_rank
```
The rank describes the rank of each word in the frequency table. It is plotted below, showing a constant negative slope.
```
myColors <- c("gold4", "mediumorchid4")
# plot Zipf's law
freq_by_rank %>%
ggplot(aes(rank, `term frequency`, col=status)) +
geom_line(size = 3, alpha = 0.8) +
scale_x_log10() +
scale_y_log10() +
scale_color_manual(values=myColors)
```
From the above graph, we can see that words associate with real news headlines have a higher rank - which is not surprising.
I will now use TF-IDF (Term Frequency–Inverse Document Frequency) to find the most relevant word for each article headline. According to [tidytextmining](http://tidytextmining.com/tfidf.html#term-frequency-in-jane-austens-novels), "the idea of tf-idf is to find the important words for the content of each document by decreasing the weight for commonly used words and increasing the weight for words that are not used very much in a collection or corpus of documents."
TF-IDF may be a good method to use in regards to understanding contents of a document (or headline, in our case) because it finds words that are common, but not too common. This perhaps get rids of words that are unnecessary or irrelevant.
```
news_words <- news_words %>%
bind_tf_idf(word, status, n)
news_words
```
We can see that tf-idf scores are ZERO for words that are very common. They appear in both types of news headlines. The idf will be low for such words and higher for words that appear in fewer headlines.
```
# order terms by highest tf-idf score
news_words %>%
select(-total) %>%
arrange(desc(tf_idf))
myColors <- c("rosybrown3", "darkseagreen4")
# plot top 30 words by tf-idf
plot_ <- news_words %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word))))
plot_ %>%
top_n(30) %>%
ggplot(aes(word, tf_idf, fill = status)) +
geom_bar(stat="identity") +
scale_fill_manual(values=myColors) + #scale_fill instead of scale_col to fill color manually
labs(x = "words", y = "tf-idf") +
coord_flip()
myColors <- c("lightpink1", "cornflowerblue")
# plot by grouping for top 25 words
plot_ %>%
group_by(status) %>%
top_n(25) %>%
ungroup %>%
ggplot(aes(word, tf_idf, fill = status)) +
geom_col(show.legend = FALSE) +
labs(x = "word", y = "tf-idf") +
facet_wrap(~status, ncol = 2, scales = "free") +
scale_fill_manual(values=myColors) +
coord_flip()
```
### News Classifier Using K-Nearest Neighbors Algorithm
```
# turn off warnings
options(warn=-1)
#install.packages("RTextTools") #try installing this as a package
# set seed value
set.seed(100)
# generate headlines corpus
headlines <- Corpus(VectorSource(mydata$text))
# clean headlines
headlines <- tm_map(headlines, content_transformer(tolower))
headlines <- tm_map(headlines, removeNumbers)
headlines <- tm_map(headlines, removeWords, stopwords("english"))
headlines <- tm_map(headlines, removePunctuation)
headlines <- tm_map(headlines, stripWhitespace)
headlines <- tm_map(headlines, stemDocument, language = "english")
# create document-term matrix
dtm <- DocumentTermMatrix(headlines)
# transforms document-term matrix to dataframe
mat.mydata <- as.data.frame(data.matrix(dtm), stringsAsfactors = FALSE)
# column bind on status
mat.mydata <- cbind(mat.mydata, mydata$status)
# Change name of new column to "status"
colnames(mat.mydata)[ncol(mat.mydata)] <- "status"
all <- 0
max = -Inf
for (i in 1:1000)
{
# split data into train and test sets
train <- sample(nrow(mat.mydata), ceiling(nrow(mat.mydata) * .50))
test <- (1:nrow(mat.mydata))[- train]
# assign classifier
classifier <- mat.mydata[, "status"]
modeldata <- mat.mydata[,!colnames(mat.mydata) %in% "status"]
# make predictions using knn algo
knn_predictions <- knn(modeldata[train, ], modeldata[test, ], classifier[train])
# create confusion matrix
confusion_matrix <- table("Predictions" = knn_predictions, Actual = classifier[test])
accuracy <- sum(diag(confusion_matrix))/length(test) * 100
all = all + accuracy
if (accuracy > max) {
max <- accuracy # find max accuracy
print(max)
print(confusion_matrix)
}
}
all/1000
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Kisisellestirilmis egitim: Temelleri
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/tr/r1/tutorials/eager/custom_training.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/tr/r1/tutorials/eager/custom_training.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
Bir onceki egitim kitapciginda TensorFLow API'sinin makine ogrenmesinin temel yapi taslarindan biri olan otomatik degisimlerde kullanimini gorduk.
Bu kitapcikta ise daha onceki kitapciklarda ogrendigimiz TensorFlow ilkellerini basit makine ogrenmesinde kullanacagiz.
Bunun disinda TensorFlow icinde soyutlamayi kolaylastiran yuksek seviye sinir agi API'si 'tf.keras'i da bulundurmaktadir. Sinir aglari ile calisanlarin bu yuksek seviye API'larini kullanmalarini siddetle tavsiye ederiz. Fakat, bu kisa kitapcikta saglam temeller olusturmak icin sinir aglari egitiminin ilk ilkelerinden bahsedecegiz.
## Kurulum
```
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.enable_eager_execution()
```
## Degiskenler
TensorFlow'daki Tensorlar sabit(immutable), durumsuz(stateless) nesnelerdir. Fakat makine ogrenmesi modellerinin durumu degiskendir: modeliniz egitiminde ilerledikce ayni kodun yaptigi tahminler farkli davranislar sergilemelidir (bunu dusuk kayipla yapmasini umut ediyoruz!). Yaptiginiz islemler suresunce degisen bu durumu Python'un durumsal bir yazilim dili olmasi ile gosterebiliriz:
```
# Python durumunu kullanmak
x = tf.zeros([10, 10])
x += 2 # This is equivalent to x = x + 2, which does not mutate the original
# value of x
print(x)
```
TensorFlow icerisinde durumsal operasyonlar bulundurmaktadir ve cogu zaman durumun gosterimi icin bunlari kullanmak dusuk-seviye Python gosterimlerinden daha iyidir. Ornegin, modeldeki agirliklari gostermek icin TensorFlow degiskenlerini kullanmak hem kolay hem de verimlidir.
TensorFlow hesaplamalarinda kullanildiginda kaydedilen degeri okuyacak olan, Degisken icinde bir deger bulunduran bir nesnedir. TensorFlow degiskenininde kaydedilen degeri degistirmek icin `tf.assign_sub` ve `tf.scatter_update` gibi cesitli operasyonlar bulunmaktadir.
```
v = tf.Variable(1.0)
assert v.numpy() == 1.0
# Degeri yeniden esleyelim
v.assign(3.0)
assert v.numpy() == 3.0
# tf.square() gibi bir TensorFlow operasyonunda `v`yi kullanmalim ve yeniden esleyelim
v.assign(tf.square(v))
assert v.numpy() == 9.0
```
Degiskenler kullanilarak yapilan hesaplamalarda egimler otomatik olarak takip edelir. Katistirmalari gosteren Degiskenlerde ise TensorFlow donem donem guncellestirme yapar, ki bunlar hesaplama gucunde ve hafizada verimlidir.
Degiskenlerin kullanilmasi ayni zamanda kodunuzu okuyan kisiye bu durumun sabit olmadigini(mutable) belirtir.
## Ornek: Dogrusal modeli uydurmak
Simdi 'Tensor', 'GradientTape', ve 'Variable' gibi ogrendigimiz kavramlari bir araya getirip basit bir modeli olusturalim ve egitelim. Bu genel olarak birkac asamadan olusur:
1. Modeli tanimla.
2. Kayip fonksiyonunu tanimla.
3. Egitim verilerini topla.
4. Egitim verilerini incele ve "optimizer" kullanarak degiskenleri veriye uydur.
Bu egitimde, onemsiz bir ornek olan `f(x) = x * W + b` dogrusal modelinin ustunden gececegiz. Bu modelde iki degisken bulunmaktadir - `W` ve `b`. Dahasi, veriyi oyle olusturacagiz ki iyi egitilmis bir model `W = 3.0` ve `b = 2.0` degerlerine sahip olacak.
### Modeli tanimlayalim
Degiskenleri ve hesaplamalari kapsayacak basit bir sinif tanimlayalim.
```
class Model(object):
def __init__(self):
# Degiskeni (5.0, 0.0) degerlerine ilkle
# Normalde bu degerler rasgele verilir.
self.W = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def __call__(self, x):
return self.W * x + self.b
model = Model()
assert model(3.0).numpy() == 15.0
```
### Kayip fonksiyonunu tanimlayalim
Kayip fonksiyonu belirli bir girdiye gore beklenen cikti degeri ile modelin sonuclarinin ne kadar benzedigini hesaplar. Burada standart L2 kaybini kullanalim.
```
def loss(predicted_y, desired_y):
return tf.reduce_mean(tf.square(predicted_y - desired_y))
```
### Egitim verisini toplayalim
Biraz gurultulu bir egitim verisi sentezleyelim.
```
TRUE_W = 3.0
TRUE_b = 2.0
NUM_EXAMPLES = 1000
inputs = tf.random_normal(shape=[NUM_EXAMPLES])
noise = tf.random_normal(shape=[NUM_EXAMPLES])
outputs = inputs * TRUE_W + TRUE_b + noise
```
Modelimizi egitmeye baslamadan once modelimizin durumuna bir bakalim. Modelin tahminlarini kirmizi ile egittim verisini mavi ile cizecegiz.
```
import matplotlib.pyplot as plt
plt.scatter(inputs, outputs, c='b')
plt.scatter(inputs, model(inputs), c='r')
plt.show()
print('Current loss: '),
print(loss(model(inputs), outputs).numpy())
```
### Egitim dongusunu tanimlayalim
Simdi agimiz ve egitim verimiz hazir. Haydi modelimizi egitelim. Egitim verilerini kullanarak modelin degiskenlerini (`W` ve `b`) guncelleyelim ki kayip fonksiyonumuzun degeri [alcalan egim](https://en.wikipedia.org/wiki/Gradient_descent) ile azalsin. `tf.train.Optimizer` gerceklemesi alcalan egimin bircok degisimin yakalar. Bu nedenle bu gerceklemeleri kullanmanizi siddetle tavsiye ederiz. Fakat burada ilk ilkeleri kullanarak olusturdugumuz model icin bu basit matematik islemlerini kendimiz gercekleyecegiz.
```
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as t:
current_loss = loss(model(inputs), outputs)
dW, db = t.gradient(current_loss, [model.W, model.b])
model.W.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
```
Son olarak, egitim verisinden tekrar tekrar gecerek 'W' ve 'b' degerlerinin nasil evrimlestigini gorelim.
```
model = Model()
# W ve b degerlerini daha sonra bir grafikte gostermek icin toplayalim
Ws, bs = [], []
epochs = range(10)
for epoch in epochs:
Ws.append(model.W.numpy())
bs.append(model.b.numpy())
current_loss = loss(model(inputs), outputs)
train(model, inputs, outputs, learning_rate=0.1)
print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' %
(epoch, Ws[-1], bs[-1], current_loss))
# Simdi bunlari cizelim
plt.plot(epochs, Ws, 'r',
epochs, bs, 'b')
plt.plot([TRUE_W] * len(epochs), 'r--',
[TRUE_b] * len(epochs), 'b--')
plt.legend(['W', 'b', 'true W', 'true_b'])
plt.show()
```
## Siradaki adimlar
Bu egitim kitapciginda `Variable(Degisken)`leri gozden gecirdik ve TensorFlow ilkellerini kullanarak basit dogrusal bir model egittik.
Teoride bunlar TensorFlow'u makine ogrenmesi arastirmalarinda kullanmanizda yeterlidir.
Pratikte, ozellikle sinir aglari icin, yuksek seviye yapi taslari ("layers(katmanlar)"), durumu(state) kaydetme ve geri yukleme imkanlari, cok sayida kayip fonksiyonu ve eniyileme taktikleri sundugu icin `tf.keras` gibi yuksek seviye API kullanimi cok daha kolay olacaktir.
| github_jupyter |
# Questions [40marks]
* Q1 Who did spend most money for renting?
* Q2 Which room does make the most amount of income?
* Q3 How many time Jack Jones rent the room?
* Q4 How many time Claire Taylor rent each room?
* Q5 what is the total income of ALL rooms in June? Between 1st June 2018(inclusive) and 30th June 2018(inclusive), (use return date).
* Q6 What are the total incomes of EACH room in June? Between 1st June 2018(inclusive) and 30th June 2018(inclusive), (use return date)
* Q7 Find members that have total rent time exactly 8 hours in June?
* Q8 Find members that have total rent time greater than or equal than 9 hours in June?
* Q9 Find the total rent duration of EACH room in June?
* Q10 Find the total income of EACH room in June?
# คำถาม [ข้อละ 4 คะแนน รวม 40 คะแนน]
* Q1 ใครจ่ายเงินค่าเช่ามากที่สุด
* Q2 ห้องเช่าใดทำรายได้มากที่สุด
* Q3 Jack Jones เช่าห้องไปกี่ครั้ง
* Q4 Claire Taylor เช่าแต่ละห้องไปกี่ครั้ง
* Q5 ในเดือนมิถุนายนมีรายรับรวมเท่าใด
* Q6 ในเดือนมิถุนายนแต่ละห้องมีรายรับรวมเท่าใด
* Q7 หาชื่อสมาชิกที่เช่าห้อง 8 ชั่วโมงในเดือนมิถุนายน
* Q8 หาชื่อสมาชิกที่เช่าห้องมากกว่าหรือเท่ากับ 9 ชั่วโมงในเดือนมิถุนายน
* Q9 หาระยะเวลาเช่ารวมของแต่ละห้องในเดือนมิถุนายน
* Q10 หารายได้รวมของแต่ละห้องในเดือนมิถุนายน
```
q=Rent.objects.get(pk=1000)
q.id
q.room.hour_price
q.member.name
vars(q)
q=Rent.objects.filter(cost__gte=1700)
q
%%timeit -n1
q=Rent.objects.filter(cost__gte=1700)
for i in q:
print(i.member.name)
%%timeit -n1
q=Member.objects.filter(rent__cost__gte=1700)
for i in q:
print(i.name)
print(Rent.objects.filter(cost__gte=1700).query)
print(Member.objects.filter(rent__cost__gte=1700).query)
q=Rent.objects.all().order_by('cost')
for i in q:
print(i.id, i.cost)
q=Rent.objects.filter(member__name='Oscar Smith').aggregate(Sum('cost'))
q
q=Member.objects.annotate(Sum('rent__cost'))
print(q.query)
vars(q[0])
#Q1 Who did spend most money for renting?
q=Member.objects.annotate(total_cost=Sum('rent__cost')).order_by('-total_cost')
print(q.query)
print(f'{q[0].name} paid most at {q[0].total_cost}')
#Q2 Which room does make the most amount of income?
q=Room.objects.annotate(total_cost=Sum('rent__cost')).order_by('-total_cost')
print(q.query)
print(f'{q[0].number} made most income at {q[0].total_cost}')
for i in q:
print(i.id, i.name, i.total_cost)
vars(i)
#Q3 How many time Jack Jones rent the room?
q=Rent.objects.filter(member__name='Jack Jones')
print(q.count())
for i in q:
print(i.id, i.member.name)
#How many time Claire Taylor rent each room?
q=Room.objects.filter(rent__member__name='Claire Taylor').annotate(Count('rent'))
print(q.query)
for i in q:
print(i.number, i.rent__count)
#How many time Claire Taylor rent each room?
q=Rent.objects.filter(member__name='Claire Taylor')
for i in q:
print(i.room.number)
from datetime import datetime
init = datetime.strptime('2018-06-01 00:00:00', '%Y-%m-%d %H:%M:%S')
final = datetime.strptime('2018-06-30 23:59:59', '%Y-%m-%d %H:%M:%S')
q=Rent.objects.filter(stop__gte=init, stop__lte=final)
for i in q:
print(i.id, i.stop)
q=Rent.objects.filter(stop__month=6)
for i in q:
print(i.id, i.stop)
#Q5 what is the total income of ALL rooms in June? Between 1st June 2018(inclusive) and 30th June 2018(inclusive), (use return date).
Rent.objects.filter(stop__gte=init, stop__lte=final).aggregate(Sum('cost'))
print(Rent.objects.filter(stop__gte=init, stop__lte=final).query)
```
# Week07_2
```
%%timeit -n1
q=Rent.objects.filter(stop__gte=init, stop__lte=final)
sum=0
for i in q:
sum += i.cost
print(sum)
%%timeit -n1
Rent.objects.filter(stop__gte=init, stop__lte=final).aggregate(Sum('cost'))
```
# Q6
What are the total incomes of EACH room in June?
Between 1st June 2018(inclusive) and 30th June 2018(inclusive), (use return date)
```
#%%timeit -n1
q=Rent.objects.filter(stop__gte=init, stop__lte=final)
d={}
for i in q:
d[i.room.number]=d.get(i.room.number,0)+i.cost
for k,v in d.items():
print(k, v)
#%%timeit -n1
q=Room.objects.filter(rent__stop__month=6)\
.annotate(Sum('rent__cost'))
for i in q:
print(i.number, i.rent__cost__sum)
```
# Q7 Find members that have total rent time exactly 8 hours in June?
```
from django.db.models import F
import pandas as pd
df_rent = pd.DataFrame(list(Rent.objects.filter(stop__month=6).values()))
df_rent.head()
df_member = pd.DataFrame(list(Member.objects.all().values()))
df_member.head()
df=df_rent\
.merge(df_member, left_on='member_id', right_on='id')
df.head()
df['hour_diff']=df['stop']-df['start']
df.head()
from datetime import timedelta
df[ df.hour_diff==timedelta(hours=8) ]
df[['room_id','cost']]
```
| github_jupyter |
We can use embedding comparison to measure the difference between the representations that neural network models learn. In this notebook, we compare the final-layer embeddings for Imagenet-trained VGG16, VGG19, and InceptionV3 models
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
from tqdm import tqdm
import tensorflow as tf
import keras
from keras import backend
import subprocess
import logging
from scipy.misc import imread, imresize
from urllib import urlretrieve
from repcomp.comparison import CCAComparison, UnitMatchComparison, NeighborsComparison
tf.logging.set_verbosity(tf.logging.ERROR)
logging.getLogger().setLevel("ERROR")
```
# Load the Data
```
data_path = "../../../data"
clear_command = "rm -rf {}/caltech.tar.gz; rm -rf {}/101_ObjectCategories".format(data_path, data_path)
os.system(clear_command)
urlretrieve("http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz",
"{}/caltech.tar.gz".format(data_path))
unzip_command = "tar xvzf {}/caltech.tar.gz -C {}".format(data_path, data_path)
subprocess.check_output(unzip_command, shell=True)
categories = os.listdir("{}/101_ObjectCategories".format(data_path))
def load_image(path):
im = imresize(imread(path), (224,224, 3))
return np.dstack([im, im, im]) if len(im.shape) == 2 else im
images = []
image_categories = []
for c in tqdm(categories):
dirpath = "{}/101_ObjectCategories/{}".format(data_path, c)
images += [load_image(os.path.join(dirpath, name)) for name in os.listdir(dirpath) if len(name)]
image_categories += [c] * len(images)
imageset = np.vstack([im[None,...] for im in images])
```
# Load the trained CNN models
```
from keras.applications import vgg16, vgg19, inception_v3
batch_size = 100
embeddings = {}
for name, Model, preprocess_func in [
("vgg16", vgg16.VGG16, vgg16.preprocess_input),
("vgg19", vgg19.VGG19, vgg19.preprocess_input),
("inception", inception_v3.InceptionV3, inception_v3.preprocess_input)]:
backend.clear_session()
model = Model(weights='imagenet', include_top=False)
img_data = preprocess_func(imageset)
embeddings[name] = np.vstack([model.predict(img_data[i:i + batch_size])[:,0,0,:]
for i in tqdm(range(0, imageset.shape[0], batch_size))])
```
# Compare the embeddings
```
from repcomp.comparison import CCAComparison, UnitMatchComparison, NeighborsComparison
for similarity_kind, comparator in [
("Neighbors", NeighborsComparison()),
("SVCCA", CCAComparison(pca_components=100))
]:
print("Inception to VGG16 {} Similarity: {}".format(similarity_kind,
comparator.run_comparison(embeddings["inception"], embeddings["vgg16"])['similarity']))
print("Inception to VGG19 {} Similarity: {}".format(similarity_kind,
comparator.run_comparison(embeddings["vgg19"], embeddings["vgg16"])['similarity']))
print("VGG16 to VGG19 {} Similarity: {}".format(similarity_kind,
comparator.run_comparison(embeddings["vgg19"], embeddings["inception"])['similarity']))
print
```
| github_jupyter |
# Gradient Checking
Welcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking.
You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker.
But backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking".
Let's do it!
```
# Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
```
## 1) How does gradient checking work?
Backpropagation computes the gradients $\frac{\partial J}{\partial \theta}$, where $\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function.
Because forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$.
Let's look back at the definition of a derivative (or gradient):
$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$
If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying "when $\varepsilon$ is really really small."
We know the following:
- $\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly.
- You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct.
Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct!
## 2) 1-dimensional gradient checking
Consider a 1D linear function $J(\theta) = \theta x$. The model contains only a single real-valued parameter $\theta$, and takes $x$ as input.
You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct.
<img src="images/1Dgrad_kiank.png" style="width:600px;height:250px;">
<caption><center> <u> **Figure 1** </u>: **1D linear model**<br> </center></caption>
The diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation").
**Exercise**: implement "forward propagation" and "backward propagation" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions.
```
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
### START CODE HERE ### (approx. 1 line)
J = theta * x
### END CODE HERE ###
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
```
**Expected Output**:
<table style=>
<tr>
<td> ** J ** </td>
<td> 8</td>
</tr>
</table>
**Exercise**: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$.
```
# GRADED FUNCTION: backward_propagation
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
### START CODE HERE ### (approx. 1 line)
dtheta = x
### END CODE HERE ###
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
```
**Expected Output**:
<table>
<tr>
<td> ** dtheta ** </td>
<td> 2 </td>
</tr>
</table>
**Exercise**: To show that the `backward_propagation()` function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking.
**Instructions**:
- First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow:
1. $\theta^{+} = \theta + \varepsilon$
2. $\theta^{-} = \theta - \varepsilon$
3. $J^{+} = J(\theta^{+})$
4. $J^{-} = J(\theta^{-})$
5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$
- Then compute the gradient using backward propagation, and store the result in a variable "grad"
- Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula:
$$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$
You will need 3 Steps to compute this formula:
- 1'. compute the numerator using np.linalg.norm(...)
- 2'. compute the denominator. You will need to call np.linalg.norm(...) twice.
- 3'. divide them.
- If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation.
```
# GRADED FUNCTION: gradient_check
def gradient_check(x, theta, epsilon = 1e-7):
"""
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.
### START CODE HERE ### (approx. 5 lines)
thetaplus = theta + epsilon # Step 1
thetaminus = theta - epsilon # Step 2
J_plus = forward_propagation(x, thetaplus) # Step 3
J_minus = forward_propagation(x, thetaminus) # Step 4
gradapprox = (J_plus - J_minus)/(2*epsilon) # Step 5
### END CODE HERE ###
# Check if gradapprox is close enough to the output of backward_propagation()
### START CODE HERE ### (approx. 1 line)
grad = backward_propagation(x, theta)
### END CODE HERE ###
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(gradapprox - grad) # Step 1'
denominator = np.linalg.norm(gradapprox) + np.linalg.norm(grad) # Step 2'
difference = numerator/denominator # Step 3'
### END CODE HERE ###
if difference < 1e-7:
print ("The gradient is correct!")
else:
print ("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
```
**Expected Output**:
The gradient is correct!
<table>
<tr>
<td> ** difference ** </td>
<td> 2.9193358103083e-10 </td>
</tr>
</table>
Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in `backward_propagation()`.
Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it!
## 3) N-dimensional gradient checking
The following figure describes the forward and backward propagation of your fraud detection model.
<img src="images/NDgrad_kiank.png" style="width:600px;height:400px;">
<caption><center> <u> **Figure 2** </u>: **deep neural network**<br>*LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID*</center></caption>
Let's look at your implementations for forward propagation and backward propagation.
```
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1./m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
```
Now, run backward propagation.
```
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
```
You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct.
**How does gradient checking work?**.
As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still:
$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$
However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "`dictionary_to_vector()`" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.
The inverse function is "`vector_to_dictionary`" which outputs back the "parameters" dictionary.
<img src="images/dictionary_to_vector.png" style="width:600px;height:400px;">
<caption><center> <u> **Figure 2** </u>: **dictionary_to_vector() and vector_to_dictionary()**<br> You will need these functions in gradient_check_n()</center></caption>
We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that.
**Exercise**: Implement gradient_check_n().
**Instructions**: Here is pseudo-code that will help you implement the gradient check.
For each i in num_parameters:
- To compute `J_plus[i]`:
1. Set $\theta^{+}$ to `np.copy(parameters_values)`
2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$
3. Calculate $J^{+}_i$ using to `forward_propagation_n(x, y, vector_to_dictionary(`$\theta^{+}$ `))`.
- To compute `J_minus[i]`: do the same thing with $\theta^{-}$
- Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$
Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to `parameter_values[i]`. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute:
$$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$
```
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs two parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = parameters_values.copy() # Step 1
thetaplus[i][0] = thetaplus[i][0]+epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = parameters_values.copy() # Step 1
thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(gradapprox - grad) # Step 1'
denominator = np.linalg.norm(gradapprox) + np.linalg.norm(grad) # Step 2'
difference = numerator/denominator # Step 3'
### END CODE HERE ###
if difference > 2e-7:
print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
else:
print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
```
**Expected output**:
<table>
<tr>
<td> ** There is a mistake in the backward propagation!** </td>
<td> difference = 0.285093156781 </td>
</tr>
</table>
It seems that there were errors in the `backward_propagation_n` code we gave you! Good that you've implemented the gradient check. Go back to `backward_propagation` and try to find/correct the errors *(Hint: check dW2 and db1)*. Rerun the gradient check when you think you've fixed it. Remember you'll need to re-execute the cell defining `backward_propagation_n()` if you modify the code.
Can you get gradient check to declare your derivative computation correct? Even though this part of the assignment isn't graded, we strongly urge you to try to find the bug and re-run gradient check until you're convinced backprop is now correctly implemented.
**Note**
- Gradient Checking is slow! Approximating the gradient with $\frac{\partial J}{\partial \theta} \approx \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon}$ is computationally costly. For this reason, we don't run gradient checking at every iteration during training. Just a few times to check if the gradient is correct.
- Gradient Checking, at least as we've presented it, doesn't work with dropout. You would usually run the gradient check algorithm without dropout to make sure your backprop is correct, then add dropout.
Congrats, you can be confident that your deep learning model for fraud detection is working correctly! You can even use this to convince your CEO. :)
<font color='blue'>
**What you should remember from this notebook**:
- Gradient checking verifies closeness between the gradients from backpropagation and the numerical approximation of the gradient (computed using forward propagation).
- Gradient checking is slow, so we don't run it in every iteration of training. You would usually run it only to make sure your code is correct, then turn it off and use backprop for the actual learning process.
| github_jupyter |
```
import numpy as np
import subprocess as sub
import SWAT_ReadOut as read
from SWAT_Manipulate import rteManipulator
from SWAT_Manipulate import bsnManipulator
from SWAT_Manipulate import gwManipulator
from SWAT_Manipulate import solManipulator
from SWAT_Manipulate import mgtManipulator
from SWAT_Manipulate import hruManipulator
import os, shutil
from mpi4py import MPI
import spotpy
class spot_setup(object):
def __init__(self,para):
self.parameter_fname = 'input_wwr_hp.txt'
self.observeddata_fname = 'observed'+os.sep+'discharge2.txt'
self.observeddata = np.loadtxt(self.observeddata_fname)
self.nr_of_observations = len(self.observeddata)
self.para = para
self.parf = np.genfromtxt(self.parameter_fname, delimiter=',', dtype=None, encoding='utf-8')
self.params = []
for i in range(len(self.parf)):
self.params.append(
spotpy.parameter.Uniform(
self.parf[i][0], self.parf[i][1], np.mean( [self.parf[i][1],self.parf[i][2]] )))
def parameters(self):
return spotpy.parameter.generate(self.params)
def onerror(self, func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def evaluation(self):
# Load Observation data here and return them as lists [[],[],[]...]
observationdatalists = []
observationdatalists.append(self.observeddata)
return observationdatalists
def objectivefunction(self,simulation,evaluation):
indexs=[]
for obs in evaluation:
index=[]
for i in range(len(obs)):
if not obs[i] == -9999: #used for missing observation data
index.append(i)
indexs.append(index)
sub1 = np.array(simulation[0])
sub = np.array(sub1)[indexs[0]]
sub_nse1 = spotpy.objectivefunctions.lognashsutcliffe(evaluation[0][indexs[0]],sub, epsilon=0.001)
sub_nse = sub_nse1 - 1
return sub_nse
# import datetime as DT
# def make_date(datestr):
# return DT.datetime.strptime(datestr, '%Y%m%d')
def simulation(self,vector):
# if self.para == 'mpi':
try:
# +1 to prevent zero if necessary...
core_nr = str(int(os.environ['OMPI_COMM_WORLD_RANK']))
except KeyError:
# if you run on windows
core_nr = str(1)
#str(int(np.random.uniform(0,1000)))
pathdir = 'calib_parallel'+ os.sep +'parallel_'+core_nr
if os.path.exists(pathdir+ os.sep):
print('Deleting folder ' + pathdir)
shutil.rmtree(pathdir, onerror=self.onerror)
print('Copying folder ' + pathdir)
shutil.copytree('TxtInOut', pathdir)
else:
print('Copying folder ' + pathdir)
shutil.copytree('TxtInOut', pathdir)
files=os.listdir(pathdir)
# else:
# files=os.listdir('TxtInOut')
# files=os.listdir('calib_parallel'+os.sep+'parallel_'+core_nr)
# for i in range(len(self.parnames)):
# rtefiles = [i for i in files if i.endswith(".rte")]
# for d in rte:
# if name in d.parInfo:
# d.setChangePar(self.parnames[i],vector[i],self.parroutine[i]w)
# for d in gw:
# if name in d.parInfo:
# d.setChangePar(self.parnames[i],vector[i],self.parroutine[i]w)
manipulators = {}
### here all parameters from the bsn-file are assigned in the dictionary for calibration
bsnfiles = [i for i in files if i.endswith(".bsn")]
bsn = []
for i in bsnfiles:
bsn.append(bsnManipulator(i, ["SURLAG","SFTMP","SMTMP","TIMP","ESCO","EPCO"], core_nr))
manipulators["bsn"] = bsn
### here all parameters from the gw-file are assigned in the dictionary for calibration
gwfiles = [i for i in files if i.endswith(".gw")]
gw = []
for i in gwfiles:
gw.append(gwManipulator(i, ["GW_DELAY","ALPHA_BF","GW_REVAP","GWQMN","RCHRG_DP","REVAPMN"], core_nr))
manipulators["gw"] = gw
### here all parameters from the sol-file are assigned in the dictionary for calibration
solfiles = [i for i in files if i.endswith(".sol")]
sol = []
for i in solfiles:
if solManipulator(i,[],core_nr).landuse != "URBN":
sol.append(solManipulator(i, ["SOL_K","ROCK","SOL_AWC"], core_nr))
manipulators["sol"] = sol
## here all parameters from the hru-file are assigned in the dictionary for calibration
hrufiles= [i for i in files if i.endswith(".hru")]
hru=[]
for i in hrufiles:
if i[0].isdigit():
hru.append(hruManipulator(i, ["HRU_FR","ESCO","EPCO","OV_N","CANMX"], core_nr))
manipulators["hru"] = hru
### here all parameters from the rte-file are assigned in the dictionary for calibration
rtefiles = [i for i in files if i.endswith(".rte")]
rte = []
for i in rtefiles:
rte.append(rteManipulator(i, ["CH_N2", "CH_K2"], core_nr))
manipulators["rte"] = rte
### here all parameters from the mgt-file are assigned in the dictionary for calibration
mgtfiles = [i for i in files if i.endswith(".mgt")]
mgt = []
for i in mgtfiles:
mgt.append(mgtManipulator(i, ["CN2"], core_nr))
manipulators["mgt"] = mgt
### lists with subbasins which should receive different groundwater and routing parameters
#gw1=["1"]
#gw2=["10","11"]
### here the file containing all parameter values is loaded
data=vector
#### here the iteration starts -> the loop iterates through the total number of parameter combinations
#data[i,1]
sftmp = data[0]
ksat = data[1]
ch_n2 = data[2]
delay = data[3]
CN2 = data[4]
ESCO = data[5]
## the parameters in the bsn files are changed
for d in bsn:
d.setChangePar("SFTMP",sftmp,"s")
d.finishChangePar(core_nr)
## the parameters in the sol files are changed
for d in sol:
d.setChangePar("SOL_K",ksat,"*")
d.finishChangePar(core_nr)
## the parameters in the rte files are changed -> the algorithm checks the number of the subbasin and assigns the parameter according
## to the list specified above
for d in rte:
d.setChangePar("CH_N2",ch_n2,"s")
d.finishChangePar(core_nr)
## the parameters in the gw files are changed -> the algorithm checks the number of the subbasin and assigns the parameter according
## to the list specified above
for d in gw:
# here different groundwater parameters are applied for different subbasins
# groundwater region I
# if d.subbasin in gw1:
d.setChangePar("GW_DELAY",delay,"s")
# groundwater region II
#else:
d.finishChangePar(core_nr)
## the parameters in the mgt files are changed
for d in mgt:
d.setChangePar("CN2",CN2,"*")
d.finishChangePar(core_nr)
## the parameters in the hru files are changed
for d in hru:
d.setChangePar("ESCO",ESCO,"s")
d.finishChangePar(core_nr)
try:
# os.system("calib_parallel"+os.sep+"parallel_"+core_nr+os.sep+"swat_64rel.exe")
# swat is finally executed for the current iteration
# sub.check_call("'calib_parallel'+os.sep+'parallel_'+core_nr+os.sep+'swat_64rel.exe'")
curdir = os.getcwd()
os.chdir('calib_parallel'+os.sep+'parallel_'+core_nr)
sub.call(['swat_64rel.exe'])
os.chdir(curdir)
# swat is finally executed for the current iteration
# sub.check_call("swat_64rel.exe",cwd='calib_parallel'+os.sep+'parallel_'+core_nr+os.sep)
# subbasin number where output should be extracted
subbasins = [1]
# results = (read.rchOutputManipulator(["FLOW_OUT"],subbasins,"indi",False,1,core_nr),
# read.subOutputManipulator(["SURQ","GW_Q","LAT_Q","PRECIP"],subbasins,"indi",False,1,core_nr))
results = (read.rchOutputManipulator(["FLOW_OUT"],subbasins,"indi",False,1,core_nr))
results = []
for subb in subbasins:
if subb<10:
with open('calib_parallel'+os.sep+'parallel_'+core_nr+os.sep+'SWATsensitivity_FLOW_OUT_000'+str(subb)+'.rch', 'r') as fh:
for line in fh:
pass
last = line
else:
with open('calib_parallel'+os.sep+'parallel_'+core_nr+os.sep+'SWATsensitivity_FLOW_OUT_00'+str(subb)+'.rch', 'r') as fh:
for line in fh:
pass
last = line
vals = last.split(' ')
oneresult=[]
for val in vals:
oneresult.append(float(val))
results.append(oneresult)
except:
raise
print("SWAT produced an error, returning nans")
# Number of simulations that SWAT creates (without warm-up period)
results = [[np.nan]*self.nr_of_observations]
return results
parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
display(size)
display(rank)
print(parallel)
# parallel = 'umpc'
# Initiate class
starter = spot_setup(parallel)
sampler = spotpy.algorithms.lhs(starter, dbname='calib_parallel'+os.sep+'SWAT-discharge_lhs',dbformat='csv', parallel=parallel)
sampler.sample(repetitions=5)
from spotpy import analyser
results = sampler.getdata()
analyser.plot_parameterInteraction(results)
# posterior = analyser.get_posterior(results, percentage=10)
# analyser.plot_parameterInteraction(posterior)
analyser.get_best_parameterset(results)
spotpy.parameter.Uniform('cmax',low=1.0 , high=500, optguess=412.33)
parameter_fname = 'input_wwr_hp.txt'
observeddata_fname = 'observed'+os.sep+'discharge2.txt'
observeddata = np.loadtxt(observeddata_fname)
nr_of_observations = len(observeddata)
para = 'seq'
parf = np.genfromtxt(parameter_fname, delimiter=',', dtype=None, encoding='utf-8')
params = []
parf
for i in range(len(parf)):
print(f"{parf[i][0]}, {parf[i][1]}, np.mean( {parf[i][1]},{parf[i][2]} = {np.mean([parf[i][1],parf[i][2]])})")
print(spotpy.parameter.Uniform(parf[i][0], parf[i][1], np.mean( [parf[i][1],parf[i][2]] )))
params.append(spotpy.parameter.Uniform(parf[i][0], parf[i][1], np.mean( [parf[i][1],parf[i][2]] )))
spotpy.parameter.generate(params)
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
display(size)
display(rank)
import os
'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'
# only tmp, delete after use
import geopandas as gpd
gdf = gpd.read_file(r'C:\dev\05_geodata\qgis_map_projects\kik_project\grid_tiles\DEM_1m_grid_cells_wzone_overlap_multi.shp', encoding='utf-8')
gdf.sample(5)
zone_ids = gdf['zone_id'].unique()
zone_ids_o = gdf['zoverlap_i'].unique()
sorted(zone_ids) == sorted(zone_ids_o)
display(len(gdf['grid_id'].unique()))
display(len(gdf['zoverlap_i']))
p = r'C:\dev\05_geodata\qgis_map_projects\kik_project\grid_tiles'
for i in sorted(zone_ids):
fs = gdf[gdf['zoverlap_i'] == i]['filename']
with open(os.path.join(p, f"zone_{int(i)}_dem.csv"), 'w', encoding='utf-8') as fh:
for elem in fs.unique().tolist():
fh.write(f"{elem}\n")
fh.flush()
import subprocess
s = r'Z:\Evelyn\est_topographic_metrics\1m\DEM_tiff'
cur_keep = os.path.abspath(os.curdir)
os.chdir(s)
for i in sorted(zone_ids):
# gdalbuildvrt -input_file_list zone_xxx_dem.csv -a_srs EPSG:3301 zone_xxx_dem.vrt
o = subprocess.run(["gdalbuildvrt", "-input_file_list", f"zone_{int(i)}_dem.csv", "-a_srs", "EPSG:3301", "-overwrite", os.path.join(p, f"zone_{int(i)}_dem.vrt")], capture_output=True)
# o = ["gdalbuildvrt", "-input_file_list", os.path.join(p, f"zone_{int(i)}_dem.csv"), "-a_srs", "EPSG:3301", os.path.join(p, f"zone_{int(i)}_dem.vrt")]
display(o.stdout)
display(' '.join(o.args))
display(o.returncode)
os.chdir(cur_keep)
import shutil
pathdir = 'TxtInOut'
outdir = 'temp'
if os.path.exists(outdir):
print('Deleting tmp folder ' + outdir)
shutil.rmtree(outdir, onerror=self.onerror)
print('Copying src folder to ' + outdir)
shutil.copytree(pathdir, outdir)
else:
print('Copying src folder as ' + outdir)
shutil.copytree('TxtInOut', outdir)
files=os.listdir(outdir)
core_nr = 1
manipulators = {}
### here all parameters from the bsn-file are assigned in the dictionary for calibration
bsnfiles = [i for i in files if i.endswith(".bsn")]
bsn = []
for i in bsnfiles:
bsn.append(bsnManipulator(i, ["SURLAG","SFTMP","SMTMP","TIMP","ESCO","EPCO"], core_nr))
manipulators["bsn"] = bsn
### here all parameters from the gw-file are assigned in the dictionary for calibration
gwfiles = [i for i in files if i.endswith(".gw")]
gw = []
for i in gwfiles:
gw.append(gwManipulator(i, ["GW_DELAY","ALPHA_BF","GW_REVAP","GWQMN","RCHRG_DP","REVAPMN"], core_nr))
manipulators["gw"] = gw
### here all parameters from the sol-file are assigned in the dictionary for calibration
solfiles = [i for i in files if i.endswith(".sol")]
sol = []
for i in solfiles:
if solManipulator(i,[],core_nr).landuse != "URBN":
sol.append(solManipulator(i, ["SOL_K","ROCK","SOL_AWC"], core_nr))
manipulators["sol"] = sol
## here all parameters from the hru-file are assigned in the dictionary for calibration
hrufiles= [i for i in files if i.endswith(".hru")]
hru=[]
for i in hrufiles:
if i[0].isdigit():
hru.append(hruManipulator(i, ["HRU_FR","ESCO","EPCO","OV_N","CANMX"], core_nr))
manipulators["hru"] = hru
### here all parameters from the rte-file are assigned in the dictionary for calibration
rtefiles = [i for i in files if i.endswith(".rte")]
rte = []
for i in rtefiles:
rte.append(rteManipulator(i, ["CH_N2", "CH_K2"], core_nr))
manipulators["rte"] = rte
### here all parameters from the mgt-file are assigned in the dictionary for calibration
mgtfiles = [i for i in files if i.endswith(".mgt")]
mgt = []
for i in mgtfiles:
mgt.append(mgtManipulator(i, ["CN2"], core_nr))
manipulators["mgt"] = mgt
manipulators
import fastavro
fastavro
```
| github_jupyter |
```
import numpy as np
%matplotlib tk
import matplotlib.pyplot as plt
import pickle
from sklearn import cluster
from sklearn import metrics
from sympy.solvers import solve
import sympy as sym
from scipy import optimize
class VelocityPlotter():
def __init__(self):
personNames = ['person1','person2','person3', 'person4','person5','person6']
colors = ['red', 'green', 'orange', 'cyan', 'magenta', 'black']
picklesToLoad = ['person1.pickle', 'person2.pickle','person3.pickle', 'person4.pickle', 'person5.pickle', 'person6.pickle']
startTimes = [[26.8, 382, 483.3], #person1
[3.6, 352, 446.75], # person2
[9.5, 378, 481.25], # person3
[35.1, 436, 543], #person4
[10.8, 387, 500], # person5
[12.1, 364, 458.25], #person6
]
self.x, self.y = sym.symbols('x y')
self.k1, self.k2 = sym.symbols('k1 k2')
self.eq_y = None
self.eq_x = None
dataSets = []
for pickleToLoad in picklesToLoad:
data = pickle.load(open(pickleToLoad, 'rb'))
dataSets.append(data)
newDataset = [[[] for _ in range(4)] for _ in range(len(dataSets))]
for i, dataSet in enumerate(dataSets):
x_log, distance_to_spot_log, time_log_angle, time_log_filter, spot_v_log, time_log_spot = dataSet
for data in zip(x_log, distance_to_spot_log, time_log_angle, time_log_filter):
if startTimes[i][1] > data[3] > startTimes[i][0]:
newDataset[i][0].append(data[0])
newDataset[i][1].append(data[1])
newDataset[i][2].append(data[2])
newDataset[i][3].append(data[3])
self.all_distance = []
self.all_vel = []
self.all_angles = []
fig2, ax2 = plt.subplots(1)
for j, dataSet in enumerate(newDataset):
x_log, distance_to_spot_log, time_log_angle, time_log_filter = dataSet
#fig, ax = plt.subplots(1)
for i in range(len(time_log_angle)):
time_log_angle[i] = (time_log_angle[i]-np.pi) * (180/np.pi)
person_velocity = []
for i in range(len(x_log)):
person_velocity.append( np.linalg.norm( np.array(x_log[i])[3:5] ) )
bool_array = np.array(person_velocity) > 0.25
person_velocity = list(np.array(person_velocity)[bool_array == True])
distance_to_spot_log = list(np.array(distance_to_spot_log)[bool_array == True])
time_log_angle = list(np.array(time_log_angle)[bool_array == True])
time_log_filter = list(np.array(time_log_filter)[bool_array == True])
#print(len(distance_to_spot_log))
#print(len(time_log_angle))
#print(len(time_log_filter))
self.all_distance = self.all_distance + distance_to_spot_log
self.all_vel = self.all_vel + person_velocity
self.all_angles = self.all_angles + time_log_angle
filter = np.array(distance_to_spot_log) < 3.5
coefs = np.polyfit(np.array(person_velocity)[filter == True], np.array(distance_to_spot_log)[filter == True], 1)
# X = np.arange(0.1, 1.4, 0.1)
# ax.plot(X, np.polyval(coefs, X), color="black")
# ax.plot(np.array(person_velocity)[filter == True], np.array(distance_to_spot_log)[filter == True], 'o', label=personNames[j], color=colors[j])
# ax.set_ylabel('Distance to Spot [m]')
# ax.set_xlabel('Speed [m/s]')
# ax.legend()
# ax.set_ylim([0.5, 3.5])
ax2.plot(np.array(person_velocity)[filter == True], np.array(distance_to_spot_log)[filter == True], 'o', markersize=1, label=personNames[j], color=colors[j])
ax2.set_ylabel('Distance to Spot [m]')
ax2.set_xlabel('Speed [m/s]')
ax2.legend()
ax2.set_ylim([0.5, 3.5])
ax2.set_xlim([0.1, 1.5])
#print("dist:" ,personNames[j], np.median(distance_to_spot_log))
#print("angle:" ,personNames[j], np.median(time_log_angle))
#fig, ax3 = plt.subplots(1)
self.X = np.arange(-2, 5, 0.5)
self.Y = np.arange(0, 10, 0.5)
filter = np.array(self.all_distance) < 3.5
self.coefs = np.polyfit(np.array(self.all_vel)[filter == True], np.array(self.all_distance)[filter == True], 1)
#self.coefs = np.array([1, 0])
self.filtered = (np.array(self.all_vel)[filter == True], np.array(self.all_distance)[filter == True])
ax2.plot(self.X, np.polyval(self.coefs, self.X), color="black")
fig2, ax5 = plt.subplots(1)
def point_on_curve(x0, y0):
x_min = sym.Symbol('x')
mean = np.polyval(self.coefs, x_min)
the_diff = sym.diff( sym.sqrt( (x_min - x0)**2 + (mean - y0)**2 ), x_min )
return_var = solve(the_diff, x_min)
return float(sym.re(return_var[0])), float(sym.re(np.polyval(self.coefs, return_var[0])))
def equation(x):
x_min, y_min = point_on_curve(x[0], x[1])
distance = sym.sqrt( (x_min - x[0])**2 + (y_min - x[1])**2 )#
x1 = 1.4
y1 = np.polyval(self.coefs, x1)
arc_length = sym.sqrt((x1 - x_min)**2 + (y1 - y_min)**2)
return float(sym.re(distance + arc_length))
def func3d(x0, y0):
x2, y2 = point_on_curve(x0, y0)
dist = sym.sqrt( (x2 - x0)**2 + (y2 - y0)**2 )
x1 = 1.4
y1 = np.polyval(self.coefs, x1)
arc_length = np.linalg.norm([x1 - x2,
y1 - y2])
if float(sym.re(dist + arc_length)) < 0.2:
return 0.2
return float(sym.re(dist + arc_length))
def point_on_line(x0, y0):
a,b = self.coefs[0], self.coefs[1]
y = y0 - b
vector = np.array([1, a])
the_length = np.dot(np.array([x0, y]), vector) / np.dot(vector, vector)
point = (the_length * vector)
return point[0], point[1] + b
def func3dLinear(x0, y0):
x1, y1 = point_on_line(x0, y0)
k1 = sym.symbols('kp1')
k2 = sym.symbols('kp2')
#return sym.sqrt((x0500 - x1)**2 + (y0 - y1)**2)
return k1*sym.sqrt( (1.4 - x1)**2 + (np.polyval(self.coefs, 1.4) - y1)**2 )**2 + k2*sym.sqrt((x0 - x1)**2 + (y0 - y1)**2)**2
def quiver(x0, y0):
if self.eq_x == None:
eq = func3dLinear(self.x, self.y)
self.eq_x = eq.diff(self.x)
self.eq_y = eq.diff(self.y)
return (float(self.eq_x.subs(self.x, x0).subs(self.y, y0).subs(self.k1, 1).subs(self.k2, 1)),
float(self.eq_y.subs(self.x, x0).subs(self.y, y0).subs(self.k1, 1).subs(self.k2, 1)))
self.grads = np.zeros((len(self.X), len(self.Y), 2))
for ind_x, x in enumerate(self.X):
for ind_y, y in enumerate(self.Y):
values = quiver(x, y)
self.grads[ind_x, ind_y, :] = np.asfarray(values)
ax5.quiver(x, y, -values[0], -values[1])
#func3d_vectorized = np.vectorize(func3dLinear)
#self.Z = np.zeros((len(self.Y),len(self.X)))
#for x_ind, x_val in enumerate(self.X):
# for y_ind, y_val in enumerate(self.Y):
# self.Z[y_ind, x_ind] = func3dLinear(x_val, y_val)
#X, Y = np.meshgrid(self.X, self.Y)
#self.Z = np.asfarray(func3d_vectorized(Y, X))
letsgoo = VelocityPlotter()
fig, ax3 = plt.subplots(1)
cm = plt.cm.get_cmap('viridis')
#ax3.contourf(letsgoo.X, letsgoo.Y, letsgoo.Z, 50)
#ax3.plot(letsgoo.X, np.polyval(letsgoo.coefs, letsgoo.X), 'r-')
#ax3.set_xlim([0,5])
#ax3.set_ylim([0,5])
#letsgoo.coefs
print([sym.simplify(letsgoo.eq_x), sym.simplify(letsgoo.eq_y)])
fig, ax3 = plt.subplots(1)
cm = plt.cm.get_cmap('viridis')
ax3.plot(letsgoo.X, np.polyval(letsgoo.coefs, letsgoo.X), 'r-')
x1, y1 = 1, 2
ax3.plot(x1, y1, 'ro')
def point_on_line(x0, y0):
a,b = letsgoo.coefs[0], letsgoo.coefs[1]
y = y0 - b
vector = np.array([1, a])
the_length = np.dot(np.array([x0, y]), np.transpose(vector)) / np.dot(np.transpose(vector), vector)
point = (the_length * vector) + np.array([0, b])
return point
ax3.plot(point_on_line(x1,y1)[0], point_on_line(x1,y1)[1], 'ro')
print(sym.sqrt( (1.4 - point_on_line(x1,y1)[0])**2 + (np.polyval(letsgoo.coefs, 1.4) - point_on_line(x1,y1)[1])**2 ))
fig, ax3 = plt.subplots(1)
cm = plt.cm.get_cmap('viridis')
ax3.contourf(letsgoo.X, letsgoo.Y, letsgoo.Z)
x_i = 1
y_i = 1
Last_cost = letsgoo.Z[y_i, x_i]
iteration = 0
while True: #not (1.4 + 0.01 > letsgoo.X[x_i] > 1.4 - 0.01):
iteration += 1
if iteration > 50:
break
new_x = x_i
new_y = y_i
for x in range(-1, 2):
for y in range(-1, 2):
if letsgoo.Z[y_i + y, x_i + x] < Last_cost:
new_x = x_i + x
new_y = y_i + y
Last_cost = letsgoo.Z[y_i + y, x_i + x]
x_i = new_x
y_i = new_y
ax3.plot(letsgoo.X[x_i], letsgoo.Y[y_i], 'o')
vale = []
for i in range(1, 15):
polynomial = np.polyfit(letsgoo.filtered[0], letsgoo.filtered[1], i, full=True)
vale.append(polynomial[1] / len(letsgoo.filtered[0]))
# r-squared
p = np.poly1d(polynomial[0])
# fit values, and mean
yhat = p(letsgoo.filtered[0]) # or [p(z) for z in x]
ybar = np.sum(letsgoo.filtered[1])/len(letsgoo.filtered[1]) # or sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
sstot = np.sum((letsgoo.filtered[1] - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
print("rSquared: ", ssreg/sstot)
vale
plt.plot(range(1, len(vale) + 1),vale)
fig = plt.figure()
ax3 = fig.gca(projection='3d')
X, Y = np.meshgrid(letsgoo.X, letsgoo.Y)
Z = letsgoo.Z
ax3.plot_surface(X, Y, Z, cmap='jet')
ax3.set_ylabel('Distance to spot [m]')
ax3.set_xlabel('Speed [m/s]')
ax3.set_zlabel('Cost')
import pickle
pickle.dump((letsgoo.X, letsgoo.Y, letsgoo.grads), open("gradients.pickle", "wb"))
fig2, ax5 = plt.subplots(1)
X, Y, grads = pickle.load(open("gradients.pickle", 'rb'))
for ind_x, x in enumerate(X):
for ind_y, y in enumerate(Y):
ax5.quiver(x, y, -grads[ind_x, ind_y, 0], -grads[ind_x, ind_y, 1])
```
| github_jupyter |
## 一、比较类排序
### A、交换排序
#### a、冒泡排序
```
def bubble_sort(List):
n = len(List)
for i in range(n):
for j in range(0, n-i-1):
if List[j] > List[j+1]:
List[j], List[j+1] = List[j+1], List[j]
return List
arr = [1, 6, 9, 8, 2, 6, 7, 4, 3]
print(bubble_sort(arr))
```
#### b、快速排序
```
def partition(List, start, end):
i = start
pivot = List[end]
for j in range(start, end):
if List[j] <= pivot:
List[i], List[j] = List[j], List[i]
i += 1
List[i], List[end] = List[end], List[i]
return i
def quick_sort(List, start, end):
if start < end:
middle = partition(List, start, end)
quick_sort(List, start, middle-1)
quick_sort(List, middle+1, end)
return List
arr = [300, 500, 650, 700, 200, 275, 330]
n = len(arr)
print(quick_sort(arr, 0, n-1))
print(quick_sort(arr, 0, n-1)[(n+1)//2 - 1])
```
### B、插入排序
#### a、插入排序
```
def insertSort(arr):
for i in range(len(arr)):
preIndex = i-1
current = arr[i]
while preIndex >= 0 and arr[preIndex] > current:
arr[preIndex+1] = arr[preIndex]
preIndex -= 1
arr[preIndex+1] = current
return arr
arr = [1,5,6,8,4,3,7]
print(insertSort(arr))
```
#### b、希尔排序
### C、选择排序
#### a、选择排序
```
def selectSort(arr):
for i in range(len(arr)-1):
tmp = i
for j in range(i,len(arr)):
if arr[j] < arr[tmp]:
tmp = j
arr[i],arr[tmp] = arr[tmp],arr[i]
return arr
arr = [4,2,5,3]
print(selectSort(arr))
```
#### b、堆排序
### D、归并排序
```
def merge(arr_left,arr_right):
i = 0
j = 0
res = []
while i < len(arr_left) and j < len(arr_right):
if arr_left[i] < arr_right[j]:
res.append(arr_left[i])
i += 1
else:
res.append(arr_right[j])
j += 1
res += arr_left[i:]
res += arr_right[j:]
return res
def mergeSort(arr):
if len(arr) <= 1:
return arr
else:
middle = len(arr) // 2
left = mergeSort(arr[:middle])
right = mergeSort(arr[middle:])
return merge(left,right)
arr = [1,5,6,8,4,3,7]
print(mergeSort(arr))
```
## 二、非比较类排序
### a、计数排序
```
def countingSort(arr):
count_arr = [0] * (max(arr)+1)
i = 0
for i in arr:
count_arr[i] += 1
for j in rang(len(count_arr)):
while count_arr[j] != 0:
arr[i] = j
i += 1
count_arr -= 1
return arr
arr = [1,5,6,8,4,3,7]
print(mergeSort(arr))
```
### b、桶排序
### c、基数排序
| github_jupyter |
```
import numpy as np
import sys
from scipy.special import expit as sigmoid
training_data_path = sys.argv[1]
testing_data_path = sys.argv[2]
output_path = sys.argv[3]
# training_data_path = "../data/devnagri_train.csv"
# testing_data_path = "../data/devnagri_test_public.csv"
# output_path = "../data/nn/b/cs1160328"
batch_size = 128
n0 = 5
activation = 'sigmoid'
hidden_layers_sizes = [100,50]
def relu(x):
return (x>0) * x
def tanh(x):
return np.tanh(x)
def reluPrime(x):
return (x>0)+0
def tanhPrime(x):
return 1 - np.power(x,2)
def sigmoidPrime(x):
return x * (1 - x)
def exp_normalize(x):
b = np.amax(x,axis=1,keepdims = True)
y = np.exp(x - b)
return y / y.sum(axis=1,keepdims=True)
class NeuralNetwork:
def __init__(self,input_size,output_size,hidden_layers_sizes, activation):
self.weights = []
self.biases = []
if(activation == 'relu'):
self.activation = relu
self.activationPrime = reluPrime
elif(activation == 'tanh'):
self.activation = tanh
self.activationPrime = tanhPrime
else:
self.activation = sigmoid
self.activationPrime = sigmoidPrime
self.input_size = input_size
self.output_size = output_size
self.hiddent_layers_sizes = hidden_layers_sizes
prev_layer_count = input_size
for i in range(len(hidden_layers_sizes) + 1):
if i==len(hidden_layers_sizes):
self.weights.append(np.random.rand(prev_layer_count, output_size)/100)
self.biases.append(np.random.rand(1, output_size)/100)
else:
hidden_layer_count = hidden_layers_sizes[i]
self.weights.append(np.random.rand(prev_layer_count, hidden_layer_count)/100)
self.biases.append(np.random.rand(1, hidden_layer_count)/100)
prev_layer_count = hidden_layer_count
def train(self,inpX,inpY,batch_size,n0,max_iterations):
max_examples = inpX.shape[0]
max_possible_iterations = int(0.5 + max_examples / batch_size)
num_hidden_layers = len(self.weights) - 1
count = 0
lr = n0
totLoss = 0
prevAvgLoss = sys.float_info.max
epoch = 0
for n in range(max_iterations):
# Forming Mini Batches
i_eff = n%max_possible_iterations
# Updating Learning Rate
if (i_eff == 0 and n!=0):
avgLoss = totLoss/max_possible_iterations
if(avgLoss >= prevAvgLoss):
count += 1
lr = n0 / np.sqrt(count+1)
# print("Epoch = ",epoch," Average Loss = ",avgLoss," New Learning Rate = ",lr)
epoch += 1
prevAvgLoss = avgLoss
totLoss = 0
outputs = []
if i_eff != max_possible_iterations - 1:
X = inpX[i_eff*batch_size: (i_eff+1)*batch_size]
Y = inpY[i_eff*batch_size: (i_eff+1)*batch_size]
else:
X = inpX[i_eff*batch_size:]
Y = inpY[i_eff*batch_size:]
# # Neural Network Forward Propagation (Cross Entropy)
# outputs.append(X)
# prev_layer_output = X
# for i in range(num_hidden_layers + 1):
# weight = self.weights[i]
# bias = self.biases[i]
# if i == num_hidden_layers:
# prev_layer_output = exp_normalize(prev_layer_output.dot(weight) + bias)
# else:
# prev_layer_output = self.activation(prev_layer_output.dot(weight) + bias)
# outputs.append(prev_layer_output)
# # Backpropagation
# dWs = []
# dbs = []
# for i in range(num_hidden_layers + 1,0,-1):
# if i == num_hidden_layers + 1:
# delta = outputs[i].copy()
# delta[range(Y.shape[0]),Y] -= 1
# else:
# delta = delta.dot(self.weights[i].T) * self.activationPrime(outputs[i])
# dW = (outputs[i-1].T).dot(delta)
# dWs.append(dW)
# dbs.append(np.sum(delta,axis=0,keepdims=True))
# if (n%100 == 0):
# loss_ = np.sum(-1*np.log(outputs[-1][range(Y.shape[0]),Y] + 0.001)) / Y.shape[0]
# labels_ = np.argmax(outputs[-1],axis = 1)
# accuracy_ = 100 * np.sum(labels_ == Y)/Y.shape[0]
# print("Iteration ",n,"\tLoss = ",loss_,"\tAccuracy = ",accuracy_,"%")
# dWs.reverse()
# dbs.reverse()
# # Gradient Descent Parameter Update
# for i in range(len(dWs)):
# self.weights[i] += dWs[i].dot(-1 * lr)
# self.biases[i] += dbs[i].dot(-1 * lr)
# loss = np.sum(-1*np.log(outputs[-1][range(Y.shape[0]),Y] + 0.001)) / Y.shape[0]
# totLoss += loss
# Neural Network Forward Propagation (MSE)
outputs.append(X)
prev_layer_output = X
for i in range(num_hidden_layers + 1):
weight = self.weights[i]
bias = self.biases[i]
if i == num_hidden_layers:
prev_layer_output = sigmoid(prev_layer_output.dot(weight) + bias)
else:
prev_layer_output = self.activation(prev_layer_output.dot(weight) + bias)
outputs.append(prev_layer_output)
# Backpropagation
dWs = []
dbs = []
y_onehot = np.zeros((Y.shape[0],self.output_size))
y_onehot[range(Y.shape[0]),Y] = 1
for i in range(num_hidden_layers + 1,0,-1):
if i == num_hidden_layers + 1:
delta = (outputs[i] - y_onehot).dot(2/Y.shape[0]) * sigmoidPrime(outputs[i])
else:
delta = delta.dot(self.weights[i].T) * self.activationPrime(outputs[i])
dW = (outputs[i-1].T).dot(delta)
dWs.append(dW)
dbs.append(np.sum(delta,axis=0,keepdims=True))
# if (n%100 == 0):
# loss = np.sum(np.power(outputs[-1] - y_onehot,2) )/Y.shape[0]
# labels = np.argmax(outputs[-1],axis = 1)
# accuracy = 100 * np.sum(labels == Y)/Y.shape[0]
# print("Iteration ",n,"\tLoss = ",loss,"\tAccuracy = ",accuracy,"%")
dWs.reverse()
dbs.reverse()
# Gradient Descent Parameter Update
for i in range(len(dWs)):
self.weights[i] += dWs[i].dot(-1 * lr)
self.biases[i] += dbs[i].dot(-1 * lr)
loss = np.sum(np.power(outputs[-1] - y_onehot,2) )/Y.shape[0]
totLoss += loss
def predict(self,X):
return self.forward_run(X)
def forward_run(self,X):
prev_layer_output = X
num_hidden_layers = len(self.weights) - 1
for i in range(num_hidden_layers + 1):
weight = self.weights[i]
bias = self.biases[i]
if i == num_hidden_layers:
probabilities = exp_normalize(prev_layer_output.dot(weight) + bias)
labels = np.argmax(probabilities,axis = 1)
return labels
else:
prev_layer_output = self.activation(prev_layer_output.dot(weight) + bias)
def load_data(path,avg,std):
if avg is None:
input_data = np.loadtxt(open(path, "rb"), delimiter=",")
Y = input_data[:,0].copy()
X = input_data[:,1:].copy()
avg = np.average(X,axis=0)
X = X - avg
std = np.std(X,axis=0)
std[(std == 0)] = 1
X = X / std
return X,Y,avg,std
else:
input_data = np.loadtxt(open(path, "rb"), delimiter=",")
X = input_data[:,1:].copy()
X = (X - avg)/std
return X
inpX,Y,avg,std = load_data(training_data_path,None,None)
X = inpX.copy()
input_size = X.shape[1]
output_size = int(np.amax(Y))+1
num_examples = X.shape[0]
max_iterations = int(40*(num_examples/batch_size))
network = NeuralNetwork(input_size,output_size,hidden_layers_sizes,activation)
network.train(X,Y.astype(int),batch_size,n0,max_iterations)
# predictions = network.predict(X.copy())
# print(100 * np.sum(predictions == Y)/Y.shape[0])
# print(np.average(predictions))
testX = load_data(testing_data_path,avg,std)
predictions = network.predict(testX)
np.savetxt(output_path,predictions,fmt="%i")
```
| github_jupyter |
This notebook can be executed in a notebook hosted in KubeFlow.
You can find instructions on how to deploy a KubeFlow cluster and how to access the the KubeFlow UI and the hosted notebooks here: https://www.kubeflow.org/docs/pipelines/pipelines-quickstart/
Please install KubeFlow Pipelines SDK using the following comand:
```
!pip3 install 'https://storage.googleapis.com/ml-pipeline/release/0.1.9/kfp.tar.gz'
```
# Energy Price Forecasting Pipeline
This notebook generates a KubeFlow pipeline that runs the solution end to end.
For more information on KubeFlow pipelines and how to run them in GCP please visit https://github.com/kubeflow/pipelines
```
import kfp
from kfp import compiler
import kfp.dsl as dsl
import kfp.gcp as gcp
import kfp.notebook
#Please modify the following values to match your GCP bucket, project, and docker image name.
OUTPUT_DIR = 'gs://pipelinestest/out'
PROJECT_NAME = 'energy-forecasting'
EF_IMAGE='gcr.io/%s/energy:dev' % PROJECT_NAME
```
### Create base image
This image takes the `tensorflow/tensorflow:1.10.0-py3` as a starting point and installs python libraries and applications that are required by some components in the pipeline.
```
%%docker {EF_IMAGE} {OUTPUT_DIR}
FROM tensorflow/tensorflow:1.10.0-py3
RUN apt-get update
RUN apt-get install -y git
RUN pip3 install --upgrade google-api-python-client
RUN pip3 install --upgrade pyarrow
RUN pip3 install --upgrade google-cloud-bigquery
RUN pip3 install --upgrade google-cloud-storage
RUN pip3 install --upgrade gitpython
```
### Create Components
Each cell defines the logic of different components that will be used in the pipeline and produces a `.yaml` file for it.
```
def copy_table(
dataset: str) -> str:
"""Retrieves raw data from competition website.
Retrieves raw data from the competition site and saves it in BigQuery.
Args:
dataset: String specifying the dataset in BigQuery to save the data in.
Returns:
String specifying if the component finished succesfully.
"""
from google.cloud import bigquery
import requests
import pandas as pd
from io import StringIO
from io import BytesIO
import zipfile
bq_client = bigquery.Client()
price_data = pd.read_csv(
StringIO(requests.get(
'http://complatt.smartwatt.net/assets/files/historicalRealData/RealMarketPriceDataPT.csv').text),
sep=';'
)
price_data.columns = ['date_utc', 'price']
bq_client.load_table_from_dataframe(
price_data,
bq_client.dataset(dataset).table(
'MarketPricePT')).result()
weather_zip = zipfile.ZipFile(
BytesIO(requests.get(
'http://complatt.smartwatt.net/assets/files/weatherHistoricalData/WeatherHistoricalData.zip').content))
weather_data = pd.read_csv(
weather_zip.open(
'WeatherHistoricalData/historical_weather.csv'))
bq_client.load_table_from_dataframe(
weather_data,
bq_client.dataset(dataset).table(
'historical_weather')).result()
return('success')
compiler.build_python_component(
component_func = copy_table,
staging_gcs_path = OUTPUT_DIR,
base_image=EF_IMAGE,
target_component_file='copy-table.component.yaml',
target_image = 'gcr.io/' + PROJECT_NAME + '/component-copy-table:latest')
def export_table(
inp: str,
table: str,
file: str) -> str:
"""Exports table to csv.
Exports BigQuery table into CSV file.
Args:
inp: String containing the output from previous component.
table: String specifying the origin BigQuery table.
file: String specifying the path and name for the csv file.
Returns:
String specifying if the component finished succesfully.
"""
from google.cloud import bigquery
bq_client = bigquery.Client()
bq_client.extract_table(
table,
file).result()
return('success')
compiler.build_python_component(
component_func = export_table,
staging_gcs_path = OUTPUT_DIR,
base_image=EF_IMAGE,
target_component_file='export-table.component.yaml',
target_image = 'gcr.io/' + PROJECT_NAME + '/component-export-table:latest')
def run_git_python_script(
inp: str,
code_repo: str,
code_folder: str,
script: str,
script_args: str) -> str:
"""Runs Python script from git repository.
Args:
inp: String containing the output from previous component.
code_repo: String specifying the url to the git repository.
code_folder: String specifying the folder for the script.
script: String specifying the name of the script.
script_args: String specifying the arguments for the script.
Returns:
String specifying if the component finished succesfully.
"""
import os
import git
git.Git('').clone(code_repo)
os.chdir(code_folder)
output = os.system(' '.join([
'python -m',
script,
script_args]))
if output == 0:
return('success')
raise Exception('Script failed. The exit status was: {}'.format(output))
compiler.build_python_component(
component_func = run_git_python_script,
staging_gcs_path = OUTPUT_DIR,
base_image=EF_IMAGE,
target_component_file='run-git-python-script.component.yaml',
target_image = 'gcr.io/' + PROJECT_NAME + '/component-run-git-python-script:latest')
def train_git_cmle_model(
tr_inp: str,
va_inp: str,
code_repo: str,
code_folder: str,
project: str,
bucket: str,
package_folder: str,
cmle_folder: str,
scale_tier: str,
python_module: str,
region: str,
runtime_version: str,
cmle_args: str) -> str:
"""Executes CMLE training job.
Retrieves python file from git repo and launches training job in CMLE.
Args:
tr_inp: String containing the source for the training data.
va_inp: String containing the source for the validation data.
code_repo: String specifying the url to the git repository.
code_folder: String specifying the folder for the job code.
project: String specifying the GCP project where job will run.
bucket: String specifying the GCS bucket where to save the job's outputs.
package_folder: String specifying the python package to run for the job.
cmle_folder: String specifying the folder in GCS where to save outputs.
scale_tier: String specifying compute resources to use for training job.
python_module: String specifying the python module to run for the job.
region: String specifying the GCP region in which to run the job.
runtime_version: String specifying the CMLE version to use for the job.
script_args: String specifying the arguments for the CMLE job.
Returns:
String containing output from running the training job in CMLE.
"""
import os
import git
import tarfile
import datetime
from google.cloud import storage
from googleapiclient import discovery
jobId = 'train' + datetime.datetime.today().strftime('%Y%m%d%H%M%S')
git.Git('').clone(code_repo)
with tarfile.open('code.tar.gz', 'w:gz') as tar:
tar.add(
code_folder,
arcname=os.path.basename(code_folder))
gcs_client = storage.Client()
gcs_bucket = gcs_client.get_bucket(bucket)
blob = gcs_bucket.blob(package_folder + jobId + '.tar.gz')
blob.upload_from_filename('code.tar.gz')
training_inputs = {
'scaleTier': scale_tier,
'pythonModule': python_module,
'args': cmle_args.split(' '),
'region': region,
'packageUris': [
'gs://'+ bucket + '/' + package_folder + jobId + '.tar.gz'],
'jobDir': 'gs://'+ bucket + '/' + cmle_folder + jobId,
'runtimeVersion': runtime_version}
job_spec = {
'jobId': jobId,
'trainingInput': training_inputs}
cloudml = discovery.build('ml', 'v1')
project_id = 'projects/{}'.format(project)
request = cloudml.projects().jobs().create(
body=job_spec,
parent=project_id)
return(str(request.execute()))
compiler.build_python_component(
component_func = train_git_cmle_model,
staging_gcs_path = OUTPUT_DIR,
base_image=EF_IMAGE,
target_component_file='train-git-cmle-model.component.yaml',
target_image = 'gcr.io/' + PROJECT_NAME + '/component-train-git-cmle-model:latest')
```
### Create pipeline
The following code loads all components needed for the pipeline. Specifies dependencies between components. Defines arguments and defaults for the pipeline and saves the pipeline into a `.tar.gz` file that can be loaded into KubeFlow pipelines.
```
@dsl.pipeline(
name='Energy Price Forecasting',
description='Energy Price Forecasting')
def basic_bq_pipeline(
project = dsl.PipelineParam(
'project',
value='energy-forecasting'),
dataset = dsl.PipelineParam(
'dataset',
value='Energy'),
bucket = dsl.PipelineParam(
'bucket',
value='energyforecast'),
code_repo = dsl.PipelineParam(
'code-repo',
value='https://github.com/GoogleCloudPlatform/professional-services.git'),
code_folder = dsl.PipelineParam(
'code-folder',
value='professional-services/examples/cloudml-energy-price-forecasting'),
data_prep_script = dsl.PipelineParam(
'data-prep-script',
value='data_preparation.data_prep'),
data_prep_args = dsl.PipelineParam(
'data-prep-args',
value=' '.join([
'--dataset=Energy',
'--train_table=MLDataTrain',
'--valid_table=MLDataValid',
'--test_table=MLDataTest',
'--prepare_data_file=data_preparation/prepare_data.sql',
'--weather_mean_std_file=data_preparation/weather_mean_std.sql',
'--train_from_date="2015-01-05 00:00:00"',
'--train_to_date="2015-10-04 23:01:00"',
'--valid_from_date="2015-10-05 00:00:00"',
'--valid_to_date="2015-10-11 23:01:00"',
'--test_from_date="2015-10-12 00:00:00"',
'--test_to_date="2015-10-18 23:01:00"',
'--price_scaling=0.01',
'--mean_path=gs://energyforecast/data/pickle/mean.pkl',
'--std_path=gs://energyforecast/data/pickle/std.pkl'])),
package_folder = dsl.PipelineParam(
'package-folder',
value='package/'),
cmle_folder = dsl.PipelineParam(
'cmle-folder',
value='cmle/'),
cmle_args = dsl.PipelineParam(
'cmle-args',
value=' '.join([
'--training_path', 'gs://energyforecast/data/csv/MLDataTrain.csv',
'--validation_path', 'gs://energyforecast/data/csv/MLDataValid.csv',
'--mean_path', 'gs://energyforecast/data/pickle/mean.pkl',
'--std_path', 'gs://energyforecast/data/pickle/std.pkl',
'--dropout' , '0.2',
'--hour_embedding', '20',
'--day_embedding', '10',
'--first_layer_size', '100',
'--number_layers', '3',
'--layer_reduction_fraction', '0.5',
'--learning_rate', '0.01',
'--batch_size', '64',
'--eval_batch_size', '168',
'--max_steps', '5000'])),
scale_tier = dsl.PipelineParam(
'scale-tier',
value='BASIC'),
python_module = dsl.PipelineParam(
'python-module',
value='trainer.task'),
region = dsl.PipelineParam(
'region',
value='us-central1'),
runtime_version = dsl.PipelineParam(
'runtime-version',
value='1.10'),
train_table = dsl.PipelineParam(
'train-table',
value='Energy.MLDataTrain'),
valid_table = dsl.PipelineParam(
'valid-table',
value='Energy.MLDataValid'),
test_table = dsl.PipelineParam(
'test-table',
value='Energy.MLDataTest'),
train_file = dsl.PipelineParam(
'train-file',
value='gs://energyforecast/data/csv/MLDataTrain.csv'),
valid_file = dsl.PipelineParam(
'valid-file',
value='gs://energyforecast/data/csv/MLDataValid.csv'),
test_file = dsl.PipelineParam(
'test-file',
value='gs://energyforecast/data/csv/MLDataTest.csv')):
CopTableOp = kfp.components.load_component('copy-table.component.yaml')
ExpTableOp = kfp.components.load_component('export-table.component.yaml')
DataPrepOp = kfp.components.load_component('run-git-python-script.component.yaml')
TrainModelOp = kfp.components.load_component('train-git-cmle-model.component.yaml')
ct_op = CopTableOp(
dataset).apply(gcp.use_gcp_secret('user-gcp-sa'))
dp_op = DataPrepOp(
ct_op.output,
code_repo,
code_folder,
data_prep_script,
data_prep_args).apply(gcp.use_gcp_secret('user-gcp-sa'))
tr_et_op = ExpTableOp(
dp_op.output,
train_table,
train_file).apply(gcp.use_gcp_secret('user-gcp-sa'))
va_et_op = ExpTableOp(
dp_op.output,
valid_table,
valid_file).apply(gcp.use_gcp_secret('user-gcp-sa'))
te_et_op = ExpTableOp(
dp_op.output,
test_table,
test_file).apply(gcp.use_gcp_secret('user-gcp-sa'))
tm_op = TrainModelOp(
tr_et_op.output,
va_et_op.output,
code_repo,
code_folder,
project,
bucket,
package_folder,
cmle_folder,
scale_tier,
python_module,
region,
runtime_version,
cmle_args).apply(gcp.use_gcp_secret('user-gcp-sa'))
compiler.Compiler().compile(basic_bq_pipeline, 'energy-forecasting.tar.gz')
```
| github_jupyter |
Exercise 9 - Advanced Neural Networks
==========
There are many factors that influence how well a neural network might perform. AI practitioners tend to play around with the structure of the hidden layers, the activation functions used, and the optimisation function.
In this exercise we will look at how changing these parameters impacts the accuracy performance of our network.
Step 1
------
In this exercise we will use the same dog dataset as in exercise 8, building on what we learnt before and trying different parameters for a network to try and improve performance.
Let's start by opening up our data set and setting up our train and test sets.
#### __Run the code__ below.
```
# Run this!
# Here we set a randomisation seed for replicatability.
import os
os.environ['PYTHONHASHSEED'] = '0'
seed = 6
import random as rn
rn.seed(seed)
import numpy as np
np.random.seed(seed)
import warnings
warnings.filterwarnings("ignore")
from keras import backend as K
import keras
print('keras using %s backend'%keras.backend.backend())
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
# Sets up the graphing configuration
import matplotlib.pyplot as graph
%matplotlib inline
graph.rcParams['figure.figsize'] = (15,5)
graph.rcParams["font.family"] = 'DejaVu Sans'
graph.rcParams["font.size"] = '12'
graph.rcParams['image.cmap'] = 'rainbow'
# Run this too!
# This gets our data ready
# Load the data
dataset = pd.read_csv('Data/dog_data.csv')
# Separate out the features
features = dataset.drop(['breed'], axis = 1)
# Sets the target one-hot vectors
target = OneHotEncoder(sparse = False).fit_transform(np.transpose([dataset['breed']]))
# Take the first 4/5 of the data and assign it to training
train_X = features.values[:160]
train_Y = target[:160]
# Take the last 1/5 of the data and assign it to testing
test_X = features.values[160:]
test_Y = target[160:]
```
Step 2
------
The box below contains methods to help us quickly change the structure. Don't edit them - just run the box.
The __train_network__ method allows us to change:
* the number of layers
* the activation functions the layers use
* the optimizer of the model
* the number of training cycles for the model (__epochs__)
The plot_acc and bar_acc just plot our models so we can easily see how well they do.
Don't worry about the code - it is simply to make the next steps easier.
#### __Run the code__ below.
```
# Run this!
# Below are a few helper methods. Do not edit these.
def train_network(structure, activation, optimizer, epochs):
os.environ['PYTHONHASHSEED'] = '0'
rn.seed(seed)
np.random.seed(seed)
# This initialises the model
model = keras.models.Sequential()
# This is our input + the first hidden layer 1
model.add(keras.layers.Dense(units = structure[1], input_dim = structure[0], activation = activation))
# Hidden layer 2, if not ignored (of size 0)
if structure[2] > 0:
model.add(keras.layers.Dense(units = structure[2], activation = activation))
# Output layer
model.add(keras.layers.Dense(units=structure[-1], activation = "softmax"))
# Compiles the model with parameters
model.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
# This tells the us training has started, so we know that it's actually running
print('training... ', end = '')
# This trains the network
training_stats = model.fit(train_X, train_Y, batch_size = 1, epochs = epochs, verbose = 0, shuffle = False)
# Results!
print('train_acc: %0.3f, test_acc: %0.3f' %(training_stats.history['accuracy'][-1],
model.evaluate(test_X, test_Y, verbose = 0)[1]))
# This returns the results and the model for use outside the function
return training_stats, model
# Plots our evaluations in a line graph to see how they compare
def plot_acc(train_acc, test_acc, title):
# Plots the training and testing accuracy lines
training_accuracy, = graph.plot(train_acc, label = 'Training Accuracy')
testing_accuracy, = graph.plot(test_acc, label = 'Testing Accuracy')
graph.legend(handles = [training_accuracy, testing_accuracy])
# Plots guide lines along y = 0 and y = 1 to help visualise
xp = np.linspace(0, train_acc.shape[0] - 1, 10 * train_acc.shape[0])
graph.plot(xp, np.full(xp.shape, 1), c = 'k', linestyle = ':', alpha = 0.5)
graph.plot(xp, np.full(xp.shape, 0), c = 'k', linestyle = ':', alpha = 0.5)
graph.xticks(range(0, train_acc.shape[0]), range(1, train_acc.shape[0] + 1))
graph.ylim(0,1)
graph.title(title)
graph.show()
# Plots our evaluations in a bar chart to see how they compare
def bar_acc(train_acc, test_acc, title, xticks):
index = range(1, train_acc.shape[0] + 1)
# Plots the training and testing accuracy bars
training_accuracy = graph.bar(index, train_acc, 0.4, align = 'center')
testing_accuracy = graph.bar(index, test_acc, 0.4, align = 'edge')
graph.legend((training_accuracy[0], testing_accuracy[0]), ('Training Accuracy', 'Testing Accuracy'))
graph.xticks(index, xticks)
graph.title(title)
graph.show()
```
Step 3
------
Let's first look at how different layer sizes impact performance.
Let's look at a network with just one hidden layer. We'll see how it performs with 1 to 10 nodes.
### In the cell below replace:
#### 1. `<addHidden1>` with `hidden1`
#### 2. `<addTrainAcc>` with `train_acc`
#### 3. `<addTestAcc>` with `test_acc`
#### and then __run the code__.
```
# Initialises empty arrays into which to append new values.
train_acc = np.empty((0))
test_acc = np.empty((0))
for hidden1 in range (1,11):
print('Evaluating model with %i hidden neurons... ' %hidden1, end = '')
###
# REPLACE <addHidden1> BELOW WITH hidden1
###
training_stats, model = train_network(structure = [3, <addHidden1>, <addHidden1>, 3],
activation = 'relu', optimizer = 'RMSprop', epochs = 12)
###
train_acc = np.append(train_acc, training_stats.history['accuracy'][-1])
test_acc = np.append(test_acc, model.evaluate(test_X, test_Y, verbose = 0)[1])
###
# REPLACE <addTrainAcc> WITH train_acc AND <addTestAcc> WITH test_acc
###
plot_acc(<addTrainAcc>, <addTestAcc>, 'hidden layer size performance comparison')
###
```
So, experimenting with different sizes of hidden layers can dramatically improve your results.
Step 4
------
Now we'll look at how different activation functions impact the performance.
There's lots we will try, just remember it is common to try both `relu` and `tanh` first.
### In the cell below replace:
#### 1. `<addActivation>` with `activation`
#### 2. `<addActivationFunctions>` with `activation_functions`
#### and then __run the code__.
```
train_acc = np.empty((0))
test_acc = np.empty((0))
# Makes a list of the activation functions we wish to compare
activation_functions = ['elu', 'selu', 'relu', 'tanh', 'sigmoid',
'hard_sigmoid', 'softplus', 'softsign', 'linear']
for activation in activation_functions:
print('Evaluating model with %s hidden layer activation function... ' %activation, end = '')
###
# REPLACE <addActivation> WITH activation
###
training_stats, model = train_network(structure = [3, 4, 2, 3],
activation = <addActivation>, optimizer = 'RMSprop', epochs = 12)
###
train_acc = np.append(train_acc, training_stats.history['accuracy'][-1])
test_acc = np.append(test_acc, model.evaluate(test_X, test_Y, verbose=0)[1])
###
# REPLACE THE <addActivationFunctions> BELOW WITH activation_functions
###
bar_acc(train_acc, test_acc, 'activation function performance comparison using (4,2) hidden layer', <addActivationFunctions>)
###
```
There's quite a lot of variance there. It's always good to quickly test different activation functions first.
Next, lets try changing the shape of the hidden layers.
#### Replace `<updateHere>`'s with `3` and run the code.
```
train_acc = np.empty((0))
test_acc = np.empty((0))
activation_functions = ['elu', 'selu', 'relu', 'tanh', 'sigmoid',
'hard_sigmoid', 'softplus', 'softsign', 'linear']
for activation in activation_functions:
print('Evaluating model with %s hidden layer activation function... ' %activation, end='')
# The value you choose for <updateHere> below will change the size of the hidden layers. Lets try changing them both to 3 for now
# (but you can have a play around with different numbers if you want)
###
# REPLACE THE <updateHere>'s BELOW WITH 3
###
training_stats, model = train_network(structure = [3, <updateHere>, <updateHere>, 3],
activation = activation, optimizer = 'RMSprop', epochs = 12)
###
train_acc = np.append(train_acc, training_stats.history['accuracy'][-1])
test_acc = np.append(test_acc, model.evaluate(test_X, test_Y, verbose=0)[1])
bar_acc(train_acc, test_acc, 'activation function performance comparison using (3,3) hidden layer', activation_functions)
```
Step 5
-----
The __optimisation function__ is the last major parameter of the network architecture. It changes how the network is trained - so it can have a __very large impact on training time and end performance__.
Note: this step won't always provide the same results every time it is run. Optimizers such as SGD will give different results.
#### Replace `<addOptimizer>` with `optimizer` and run the code.
```
train_acc = np.empty((0))
test_acc = np.empty((0))
# This is a list of the optimisation functions for us to compare
optimization_functions = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta',
'Adam', 'Adamax', 'Nadam']
for optimizer in optimization_functions:
print('Evaluating model with %s optimizer... ' %optimizer, end='')
# The <addOptimizer> below is where we specify the optimizer in the code
###
# REPLACE THE <addOptimizer> BELOW WITH optimizer
###
training_stats, model = train_network(structure = [3, 4, 2, 3],
activation = 'relu', optimizer = <addOptimizer>, epochs = 12)
###
# This is recording our data for the plot
train_acc = np.append(train_acc, training_stats.history['accuracy'][-1])
test_acc = np.append(test_acc, model.evaluate(test_X, test_Y, verbose=0)[1])
# And now, the plot!
bar_acc(train_acc, test_acc, 'optimizer performance comparison using (4,2) hidden layer', optimization_functions)
```
Step 6
-------
Let's try to combine what we've seen above and try to create a neural network that performs better than what we made in exercise 7, where we used the structure `[3,4,2,3]`, the activation function `relu`, and the optimiser `SGD` (Stochastic Gradient Descent).
### In the cell below replace:
#### 1. `<layerSize>`'s with numbers of your choice (how many nodes the hidden layers will have)
#### 2. `<activationFunction>` with one of the following: `'relu'`, `'softsign'`, `'tanh'`, `'elu'`, `'selu'`, `'softplus'`, `'linear'`
#### 3. `<optimiser>` with one of the following: `'SGD'`, `'adam'`, `'RMSprop'`, `'Adagrad'`, `'Adadelta'`, `'Adamax'`, `'Nadam'`
#### and then __run the code__.
```
###
# REPLACE THE <layerSize>'s' BELOW WITH PARAMETERS TO TEST A NEW NEURAL NETWORK e.g. 4 and 2
###
structure = [3, <layerSize>, <layerSize>, 3]
###
###
# REPLACE <activationFunction> WITH ONE OF THE FOLLOWING: 'relu', 'softsign', 'tanh', 'elu', 'selu', 'softplus', 'linear'
###
activation = <activationFunction>
###
###
# REPLACE <optimiser> WITH ONE OF THE FOLLOWING: 'SGD', 'adam', 'RMSprop', 'Adagrad', 'Adadelta', 'Adamax', 'Nadam'
###
optimizer = <optimiser>
###
training_stats, model = train_network(structure, activation, optimizer, epochs = 24)
# We can plot our training statistics to see how it developed over time
accuracy, = graph.plot(training_stats.history['accuracy'], label = 'Accuracy')
training_loss, = graph.plot(training_stats.history['loss'], label = 'Training Loss')
graph.legend(handles = [accuracy, training_loss])
loss = np.array(training_stats.history['loss'])
xp = np.linspace(0, loss.shape[0], 10 * loss.shape[0])
graph.plot(xp, np.full(xp.shape, 1), c = 'k', linestyle = ':', alpha = 0.5)
graph.plot(xp, np.full(xp.shape, 0), c = 'k', linestyle = ':', alpha = 0.5)
graph.show()
```
How does it look? Were we able to beat the other network? Try out a number of different configurations to see how they perform!
Conclusion
-------
We've compared how different neural network architecture parameters influence accuracy performance, and we've tried to combine them in such a way that we maximise this performance.
| github_jupyter |
## Convolutional Neural Networks
---
In this notebook, we train an MLP to classify images from the MNIST database.
### 1. Load MNIST Database
```
from keras.datasets import mnist
# use Keras to import pre-shuffled MNIST database
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("The MNIST database has a training set of %d examples." % len(X_train))
print("The MNIST database has a test set of %d examples." % len(X_test))
```
### 2. Visualize the First Six Training Images
```
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.cm as cm
import numpy as np
# plot first six training images
fig = plt.figure(figsize=(20,20))
for i in range(6):
ax = fig.add_subplot(1, 6, i+1, xticks=[], yticks=[])
ax.imshow(X_train[i], cmap='gray')
ax.set_title(str(y_train[i]))
```
### 3. View an Image in More Detail
```
def visualize_input(img, ax):
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
ax.annotate(str(round(img[x][y],2)), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
visualize_input(X_train[0], ax)
```
### 4. Rescale the Images by Dividing Every Pixel in Every Image by 255
```
# rescale [0,255] --> [0,1]
X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255
```
### 5. Encode Categorical Integer Labels Using a One-Hot Scheme
```
from keras.utils import np_utils
# print first ten (integer-valued) training labels
print('Integer-valued labels:')
print(y_train[:10])
# one-hot encode the labels
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# print first ten (one-hot) training labels
print('One-hot labels:')
print(y_train[:10])
```
### 6. Define the Model Architecture
```
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
# define the model
model = Sequential()
model.add(Flatten(input_shape=X_train.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
# summarize the model
model.summary()
```
### 7. Compile the Model
```
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
```
### 8. Calculate the Classification Accuracy on the Test Set (Before Training)
```
# evaluate test accuracy
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = 100*score[1]
# print test accuracy
print('Test accuracy: %.4f%%' % accuracy)
```
### 9. Train the Model
```
from keras.callbacks import ModelCheckpoint
# train the model
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5',
verbose=1, save_best_only=True)
hist = model.fit(X_train, y_train, batch_size=128, epochs=10,
validation_split=0.2, callbacks=[checkpointer],
verbose=1, shuffle=True)
```
### 10. Load the Model with the Best Classification Accuracy on the Validation Set
```
# load the weights that yielded the best validation accuracy
model.load_weights('mnist.model.best.hdf5')
```
### 11. Calculate the Classification Accuracy on the Test Set
```
# evaluate test accuracy
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = 100*score[1]
# print test accuracy
print('Test accuracy: %.4f%%' % accuracy)
```
| github_jupyter |
## EEML2019: ConvNets and Computer Vision Tutorial (PART I)
### Supervised classification, overfitting and inductive biases in convnets, and how to improve models through self-supervision
### by Viorica Patraucean (vpatrauc@gmail.com)
* Exercise 1: Implement and train a Resnet-50 classifier using supervised learning; enable/disable batch norm updates to see the effect.
* Exercise 2: Inductive biases in convnets; comparison with MLP.
* Exercise 3: Overfitting and regularization using weight decay.
* Exercise 4: Enable self-supervised learning using data augmentation.
**Questions**:
1. What happens with resnet's performance when batch norm statistics are not updated? How about MLP? Why is one affected less than the other?
*A: If the batch statistics are not updated, resnet's performance is similar to a random classifier. The MLP performs considerably better than chance, due to its shallow depth.*
2. What is resnet's train loss on permuted cifar? How about the test accuracy? How is the MLP affected by the permutation?
*A: Resnet fits perfectly the training set (100% train accuracy), but generalises very poorly (around 45%). It manages to memorise the training set due to its high capacity and small dataset, but cannot generalise. Note that this would not happen on Imagenet, i.e. resnet would not be able to memorize the dataset. The performance of a shallow 2-layer MLP is much better than resnet on permuted cifar (65%). Due to fully connectedness, the MLP is not affected by the permutation. The same would happen for a Transformer style model. This shows the strong effect that the inductive biases (here mainly locality of the data) have on the generalisation power of convnets.*
3. What other types of regularization could you use to avoid overfitting?
*A: dropout*
4. In what applications do you expect the auxiliary self-supervised task to help more?
*A: In applications where it might be desirable for the latent representation to encode information about object pose, e.g. in robotics for grasping.*
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import tensorflow as tf
# Don't forget to select GPU runtime environment in Runtime -> Change runtime type
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
import numpy as np
# Plotting library.
from matplotlib import pyplot as plt
import pylab as pl
from IPython import display
import collections
import enum
import warnings
warnings.filterwarnings('ignore')
tf.logging.set_verbosity(tf.logging.ERROR)
# Reset graph
tf.reset_default_graph()
```
## Download dataset to be used for training and testing
* Cifar-10 equivalent of MNIST for natural RGB images
* 60000 32x32 colour images in 10 classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck
* train: 50000; test: 10000
```
cifar10 = tf.keras.datasets.cifar10
# (down)load dataset
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Check sizes of tensors
print ('Size of training images')
print (train_images.shape)
print ('Size of training labels')
print (train_labels.shape)
print ('Size of test images')
print (test_images.shape)
print ('Size of test labels')
print (test_labels.shape)
assert train_images.shape[0] == train_labels.shape[0]
```
## Display the images
The gallery function below shows sample images from the data, together with their labels.
```
MAX_IMAGES = 10
def gallery(images, label, title='Input images'):
class_dict = [u'airplane', u'automobile', u'bird', u'cat', u'deer', u'dog', u'frog', u'horse', u'ship', u'truck']
num_frames, h, w, num_channels = images.shape
num_frames = min(num_frames, MAX_IMAGES)
ff, axes = plt.subplots(1, num_frames,
figsize=(num_frames, 1),
subplot_kw={'xticks': [], 'yticks': []})
for i in range(0, num_frames):
if num_channels == 3:
axes[i].imshow(np.squeeze(images[i]))
else:
axes[i].imshow(np.squeeze(images[i]), cmap='gray')
axes[i].set_title(class_dict[label[i][0]])
plt.setp(axes[i].get_xticklabels(), visible=False)
plt.setp(axes[i].get_yticklabels(), visible=False)
ff.subplots_adjust(wspace=0.1)
plt.show()
gallery(train_images, train_labels)
```
## Prepare the data for training and testing
* for training, we use stochastic optimizers (e.g. SGD, Adam), so we need to sample at random mini-batches from the training dataset
* for testing, we iterate sequentially through the test set
```
# define dimension of the batches to sample from the datasets
BATCH_SIZE_TRAIN = 100 #@param
BATCH_SIZE_TEST = 100 #@param
# create Dataset objects using the data previously downloaded
dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
# we shuffle the data and sample repeatedly batches for training
batched_dataset_train = dataset_train.shuffle(100000).repeat().batch(BATCH_SIZE_TRAIN)
# create iterator to retrieve batches
iterator_train = batched_dataset_train.make_one_shot_iterator()
# get a training batch of images and labels
(batch_train_images, batch_train_labels) = iterator_train.get_next()
# check that the shape of the training batches is the expected one
print ('Shape of training images')
print (batch_train_images)
print ('Shape of training labels')
print (batch_train_labels)
# we do the same for test dataset
dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
batched_dataset_test = dataset_test.repeat().batch(BATCH_SIZE_TEST)
iterator_test = batched_dataset_test.make_one_shot_iterator()
(batch_test_images, batch_test_labels) = iterator_test.get_next()
print ('Shape of test images')
print (batch_test_images)
print ('Shape of test labels')
print (batch_test_labels)
# Squeeze labels and convert from uint8 to int32 - required below by the loss op
batch_test_labels = tf.cast(tf.squeeze(batch_test_labels), tf.int32)
batch_train_labels = tf.cast(tf.squeeze(batch_train_labels), tf.int32)
```
## General setting; use the options below to switch between exercises.
```
model = "resnet_v2" #@param['resnet_v2','mlp']
flag_batch_norm = 'OFF' #@param['ON', 'OFF']
flag_permute = False #@param['True', 'False'] {type:"raw"}
flag_regularize = False #@param['True', 'False'] {type:"raw"}
flag_selfsup = True #@param['True', 'False'] {type:"raw"}
```
## Preprocess input for training and testing
```
#@title 32x32 permutation list (for Exercise 3)
def get_permutation_cifar10():
p = tf.constant([ 273, 746, 984, 197, 597, 519, 757, 113, 1009, 470, 321,
552, 585, 246, 229, 569, 773, 6, 955, 379, 847, 548,
148, 503, 27, 132, 1014, 82, 101, 260, 923, 53, 842,
635, 203, 912, 439, 487, 162, 832, 395, 311, 593, 33,
988, 856, 183, 215, 264, 699, 826, 692, 560, 124, 126,
948, 708, 41, 368, 484, 467, 267, 731, 17, 73, 14,
521, 240, 296, 846, 43, 779, 629, 640, 874, 268, 150,
586, 56, 756, 769, 808, 382, 354, 95, 283, 900, 970,
775, 432, 178, 998, 271, 118, 563, 445, 946, 261, 518,
723, 725, 449, 595, 617, 935, 607, 400, 697, 96, 786,
656, 138, 343, 653, 175, 993, 433, 681, 654, 574, 322,
918, 831, 754, 381, 12, 797, 338, 182, 695, 829, 927,
556, 1008, 491, 512, 717, 224, 303, 496, 1002, 693, 140,
599, 332, 758, 465, 80, 501, 690, 1022, 422, 211, 331,
926, 849, 313, 583, 128, 776, 168, 463, 201, 1018, 334,
228, 643, 514, 934, 106, 799, 713, 507, 543, 703, 299,
74, 263, 710, 622, 486, 344, 210, 687, 537, 362, 858,
898, 688, 320, 91, 403, 778, 534, 674, 783, 284, 968,
807, 724, 549, 184, 605, 15, 8, 535, 85, 495, 774,
701, 848, 416, 642, 553, 897, 989, 370, 942, 571, 489,
891, 388, 171, 761, 475, 844, 397, 227, 753, 278, 855,
938, 794, 155, 748, 1017, 941, 745, 437, 414, 181, 759,
234, 143, 554, 762, 46, 476, 417, 911, 1007, 882, 716,
336, 117, 47, 977, 602, 837, 525, 880, 718, 660, 760,
451, 142, 609, 405, 455, 315, 394, 987, 36, 389, 719,
715, 386, 393, 446, 109, 658, 612, 685, 577, 1015, 967,
641, 770, 510, 704, 793, 892, 275, 904, 335, 893, 259,
307, 903, 985, 435, 712, 326, 232, 1021, 237, 827, 1005,
172, 1000, 675, 84, 670, 963, 434, 485, 68, 677, 415,
492, 947, 859, 732, 810, 366, 557, 22, 824, 765, 722,
902, 950, 579, 288, 308, 48, 198, 1003, 481, 604, 139,
1001, 647, 115, 618, 243, 466, 107, 795, 440, 152, 885,
200, 230, 83, 821, 755, 10, 144, 749, 528, 494, 199,
546, 282, 921, 223, 828, 962, 346, 925, 352, 421, 1023,
763, 424, 894, 328, 290, 13, 62, 129, 156, 820, 436,
871, 252, 359, 538, 35, 459, 226, 657, 401, 191, 483,
187, 242, 680, 646, 473, 802, 4, 581, 130, 666, 709,
889, 7, 864, 236, 991, 450, 532, 667, 70, 1011, 410,
907, 266, 914, 189, 943, 796, 649, 990, 257, 937, 700,
500, 188, 813, 809, 634, 789, 25, 517, 573, 104, 387,
673, 966, 638, 845, 540, 1006, 910, 249, 610, 110, 480,
663, 9, 225, 339, 398, 976, 131, 372, 628, 875, 174,
488, 908, 79, 766, 310, 468, 691, 425, 289, 616, 309,
915, 570, 636, 768, 591, 956, 464, 412, 120, 958, 939,
782, 652, 541, 971, 100, 280, 721, 423, 430, 442, 506,
160, 502, 333, 615, 399, 57, 250, 384, 959, 1012, 71,
103, 429, 411, 59, 862, 887, 980, 529, 630, 444, 785,
625, 916, 883, 901, 983, 852, 179, 747, 801, 218, 627,
408, 443, 830, 305, 733, 509, 274, 682, 884, 584, 358,
536, 739, 369, 933, 221, 247, 676, 982, 206, 1, 438,
265, 954, 866, 672, 287, 26, 39, 606, 479, 102, 291,
88, 205, 61, 931, 127, 351, 1004, 477, 655, 865, 355,
67, 37, 735, 458, 454, 737, 873, 909, 173, 231, 158,
555, 825, 945, 930, 337, 644, 505, 233, 730, 431, 1020,
530, 580, 312, 720, 441, 550, 952, 367, 513, 50, 371,
34, 45, 705, 153, 122, 209, 51, 870, 216, 185, 611,
327, 815, 899, 603, 428, 1010, 42, 669, 1019, 601, 788,
620, 771, 886, 116, 293, 986, 363, 834, 881, 81, 90,
474, 94, 302, 31, 863, 317, 619, 471, 86, 869, 64,
994, 683, 20, 330, 1013, 472, 650, 714, 380, 812, 853,
196, 272, 736, 349, 75, 169, 28, 340, 163, 151, 979,
798, 582, 559, 376, 24, 539, 818, 176, 207, 992, 600,
975, 767, 867, 592, 978, 726, 277, 511, 98, 850, 498,
668, 298, 292, 792, 523, 598, 742, 623, 426, 841, 361,
121, 157, 964, 146, 490, 791, 780, 360, 679, 38, 222,
419, 192, 587, 30, 77, 702, 235, 953, 997, 318, 751,
2, 396, 542, 661, 499, 29, 69, 180, 621, 217, 588,
972, 58, 60, 164, 840, 772, 545, 452, 170, 951, 752,
281, 478, 711, 648, 575, 787, 213, 345, 19, 803, 190,
527, 508, 149, 323, 624, 404, 817, 895, 420, 256, 413,
626, 134, 390, 614, 342, 565, 238, 949, 241, 781, 590,
533, 659, 365, 561, 112, 248, 357, 566, 407, 253, 913,
461, 957, 932, 594, 255, 406, 784, 750, 3, 356, 141,
97, 92, 919, 522, 734, 325, 54, 877, 738, 456, 133,
917, 374, 66, 729, 835, 114, 833, 214, 504, 383, 631,
347, 686, 905, 578, 613, 239, 806, 645, 790, 764, 427,
651, 568, 87, 119, 63, 65, 202, 890, 940, 928, 286,
409, 662, 551, 49, 251, 572, 632, 5, 524, 515, 888,
608, 208, 329, 18, 516, 350, 295, 448, 385, 678, 936,
896, 258, 204, 276, 177, 854, 72, 341, 16, 974, 836,
851, 497, 316, 805, 262, 544, 981, 838, 843, 526, 707,
348, 254, 447, 520, 453, 270, 304, 558, 462, 418, 279,
99, 353, 314, 306, 564, 219, 167, 186, 297, 706, 0,
804, 89, 878, 11, 816, 402, 868, 531, 78, 728, 373,
562, 684, 944, 860, 876, 194, 195, 906, 973, 294, 960,
567, 698, 378, 589, 40, 220, 493, 460, 929, 861, 823,
76, 105, 1016, 839, 639, 324, 166, 740, 23, 52, 161,
319, 996, 392, 135, 111, 391, 547, 145, 961, 999, 123,
744, 364, 147, 469, 811, 125, 159, 664, 965, 93, 727,
245, 814, 696, 377, 21, 665, 694, 920, 857, 55, 879,
269, 285, 671, 165, 924, 193, 244, 969, 800, 457, 922,
741, 375, 995, 482, 576, 108, 743, 689, 300, 44, 32,
136, 872, 596, 637, 137, 819, 154, 633, 777, 301, 212,
822])
return p
# Data augmentation
# - scale image to [-1 , 1]
# - during training: apply horizontal flip randomly
# - random crop after padding
# - apply optional data augmentation (permutation, rotation)
def train_image_preprocess(h, w, num_transf=None):
def fn(image):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = image * 2 - 1
image = tf.image.random_flip_left_right(image)
# Data augmentation: pad images and randomly sample a (h, w) patch.
image = tf.pad(image, [[0, 0], [4, 4], [4, 4], [0, 0]], mode='REFLECT')
image = tf.random_crop(image, size=(BATCH_SIZE_TRAIN, h, w, 3))
# Exercise 2: permuted Cifar10; uses fixed permutation
if flag_permute:
sh = image.get_shape().as_list()
image = tf.reshape(image, [BATCH_SIZE_TRAIN, -1, 3])
p = get_permutation_cifar10()
image = tf.gather(image, p, axis=1)
image = tf.reshape(image, [BATCH_SIZE_TRAIN, sh[1], sh[2], sh[3]])
# # Exercise 4: data augmentation as self-supervision signal
label_transf = []
if flag_selfsup and num_transf:
list_img = []
for i in xrange(BATCH_SIZE_TRAIN):
label_ = tf.random.uniform([1], minval=0, maxval=num_transf,
dtype=tf.int32)[0]
img = tf.image.rot90(image[i], k=label_)
label_transf.append(label_)
list_img.append(img)
image = tf.stack(list_img, axis=0)
label_transf = tf.stack(label_transf, axis=0)
return image, label_transf
return fn
def test_image_preprocess():
def fn(image):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = image * 2 - 1
if flag_permute:
sh = image.get_shape()
image = tf.reshape(image, [BATCH_SIZE_TEST, -1, 3])
p = get_permutation_cifar10()
image = tf.gather(image, p, axis=1)
image = tf.reshape(image, [BATCH_SIZE_TEST, sh[1], sh[2], sh[3]])
else:
sh = image.get_shape()
image = tf.reshape(image, [BATCH_SIZE_TEST, sh[1], sh[2], sh[3]])
return image
return fn
```
## Define the model
```
# define parameters of resnet blocks for resnet-50 model
ResNetBlockParams = collections.namedtuple(
"ResNetBlockParams", ["output_channels", "bottleneck_channels", "stride"])
BLOCKS_50 = (
(ResNetBlockParams(256, 64, 1),) * 2 + (ResNetBlockParams(256, 64, 2),),
(ResNetBlockParams(512, 128, 1),) * 3 + (ResNetBlockParams(512, 128, 2),),
(ResNetBlockParams(1024, 256, 1),) * 5 + (ResNetBlockParams(1024, 256, 2),),
(ResNetBlockParams(2048, 512, 1),) * 3)
#@title Utils
def _fixed_padding(inputs, kernel_size):
"""Pads the input along the spatial dimensions."""
pad_total = kernel_size - 1
pad_begin = pad_total // 2
pad_end = pad_total - pad_begin
padded_inputs = tf.pad(inputs, [[0, 0], [pad_begin, pad_end],
[pad_begin, pad_end], [0, 0]])
return padded_inputs
def _max_pool2d_same(inputs, kernel_size, stride, padding):
"""Strided 2-D max-pooling with fixed padding.
When padding='SAME' and stride > 1, we do fixed zero-padding followed by
max_pool2d with 'VALID' padding."""
if padding == "SAME" and stride > 1:
padding = "VALID"
inputs = _fixed_padding(inputs, kernel_size)
return tf.layers.MaxPooling2D(kernel_size, strides=stride, padding=padding)(inputs)
def _conv2d_same(inputs, num_outputs, kernel_size, stride, use_bias=False,
name="conv_2d_same"):
"""Strided 2-D convolution with 'SAME' padding. If stride > 1, we do fixed
zero-padding, followed by conv2d with 'VALID' padding."""
if stride == 1:
padding = "SAME"
else:
padding = "VALID"
inputs = _fixed_padding(inputs, kernel_size)
return tf.layers.Conv2D(num_outputs, kernel_size, strides=stride,
padding=padding, use_bias=use_bias, name=name)(inputs)
```
### [Resnet Block V2](https://arxiv.org/pdf/1603.05027.pdf)

```
# define resnet block v2
def resnet_block(inputs, output_channels, bottleneck_channels, stride,
training=None, name="resnet_block"):
"""Create a resnet block."""
num_input_channels = inputs.get_shape()[-1]
batch_norm_args = {
"training": training
}
# ResNet V2 uses pre-activation, where the batch norm and relu are before
# convolutions, rather than after as in ResNet V1.
preact = tf.layers.BatchNormalization(name=name+"/bn_preact")(inputs,
**batch_norm_args)
preact = tf.nn.relu(preact)
if output_channels == num_input_channels:
# Use subsampling to match output size.
# Note we always use `inputs` in this case, not `preact`.
if stride == 1:
shortcut = inputs
else:
shortcut = _max_pool2d_same(inputs, 1, stride=stride, padding="SAME")
else:
# Use 1x1 convolution shortcut to increase channels to `output_channels`.
shortcut = tf.layers.Conv2D(output_channels, 1, stride,
use_bias=False,
name=name+"/conv_shortcut")(preact)
residual = tf.layers.Conv2D(bottleneck_channels, 1, strides=1,
use_bias=False, name=name+"/conv_r1")(preact)
residual = tf.layers.BatchNormalization(name=name+"/bn_r1")(residual,
**batch_norm_args)
residual = tf.nn.relu(residual)
residual = _conv2d_same(residual, bottleneck_channels, 3, stride,
name=name+"/conv_r2")
residual = tf.layers.BatchNormalization(name=name+"/bn_r2")(residual,
**batch_norm_args)
residual = tf.nn.relu(residual)
residual = tf.layers.Conv2D(output_channels, 1, strides=1,
use_bias=False, name=name+"/conv_r3")(residual)
output = shortcut + residual
return output
# stack resnet blocks
def _build_resnet_blocks(inputs, blocks, batch_norm_args):
"""Connects the resnet block into the graph."""
outputs = []
for num, subblocks in enumerate(blocks):
with tf.variable_scope("block_{}".format(num)):
for i, block in enumerate(subblocks):
args = {
"name": "resnet_block_{}".format(i)
}
args.update(block._asdict())
args.update(batch_norm_args)
inputs = resnet_block(inputs, **args)
outputs += [inputs]
return outputs
# define full architecture: input convs, resnet blocks, output classifier
def resnet_v2(inputs, blocks, is_training=True,
num_classes=10, num_transf=None, use_global_pool=True,
name="resnet_v2"):
"""ResNet V2."""
blocks = tuple(blocks)
batch_norm_args = {
"training": is_training
}
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# Add initial non-resnet conv layer and max_pool
inputs = _conv2d_same(inputs, 64, 7, stride=2, name="root")
inputs = _max_pool2d_same(inputs, 3, stride=2, padding="SAME")
# Stack resnet blocks
resnet_outputs = _build_resnet_blocks(inputs, blocks, batch_norm_args)
# Take the activations of the last resnet block.
inputs = resnet_outputs[-1]
inputs = tf.layers.BatchNormalization(name="bn_postnorm")(inputs,
**batch_norm_args)
inputs = tf.nn.relu(inputs)
if use_global_pool:
inputs = tf.reduce_mean(inputs, [1, 2], name="use_global_pool",
keepdims=True)
# Add output classifier
logits = tf.layers.Conv2D(num_classes, 1, name="logits")(inputs)
logits = tf.squeeze(logits, axis=[1, 2])
# Add second head for transformation prediction
logits_transf = None
if num_transf:
logits_transf = tf.layers.Conv2D(num_transf, 1, name="logits_transf")(inputs)
logits_transf = tf.squeeze(logits_transf, axis=[1, 2])
return (logits, logits_transf)
```
## Define simple MLP baseline
```
def mlp(inputs, num_classes=10, num_transf=None, is_training=True, name="mlp"):
batch_norm_args = {
"training": is_training
}
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
bs = inputs.get_shape().as_list()[0]
inputs = tf.reshape(inputs, [bs, -1])
net = tf.layers.dense(inputs, 1024)
net = tf.nn.relu(net)
net = tf.layers.BatchNormalization(name="bn_postnorm1")(inputs, **batch_norm_args)
net = tf.layers.dense(net, 1024)
net = tf.nn.relu(net)
net = tf.layers.BatchNormalization(name="bn_postnorm2")(net, **batch_norm_args)
logits = tf.layers.dense(net, num_classes, name="logits")
logits_transf = None
if num_transf:
logits_transf = tf.layers.dense(net, num_transf, name="logits_transf")
return logits, logits_transf
```
## Set up training pipeline
```
# First define the preprocessing ops for the train/test data
crop_height = 32 #@param
crop_width = 32 #@param
# NUM_TRANSF can be None or 4 corresponding to 4 rotations (0, 90, 180, 270)
NUM_TRANSF = 4 #@param
preprocess_fn_train = train_image_preprocess(crop_height, crop_width, NUM_TRANSF)
preprocess_fn_test = test_image_preprocess()
NUM_CLASSES = 10 #@param
```
### Get predictions from either MLP baseline or convnet
```
blocks = BLOCKS_50
inp_train, labels_selfsup = preprocess_fn_train(batch_train_images)
inp_test = preprocess_fn_test(batch_test_images)
if model == 'mlp':
train_predictions, logits_selfsup = mlp(inp_train, num_classes=NUM_CLASSES,
num_transf=NUM_TRANSF, is_training=True)
test_predictions, _ = mlp(inp_test, num_classes=NUM_CLASSES,
num_transf=NUM_TRANSF, is_training=False)
else: # model is resnet_v2
train_predictions, logits_selfsup = resnet_v2(inp_train, blocks,
num_classes=NUM_CLASSES,
num_transf=NUM_TRANSF,
is_training=True)
test_predictions, _ = resnet_v2(inp_test, blocks,
num_classes=NUM_CLASSES,
num_transf=NUM_TRANSF, is_training=False)
print (train_predictions)
print(logits_selfsup)
print (test_predictions)
# Get number of parameters in a scope by iterating through the trainable variables
def get_num_params(scope):
total_parameters = 0
for variable in tf.trainable_variables(scope):
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
# Get number of parameters in the model.
print ("Total number of parameters of models")
print (get_num_params("resnet_v2"))
print (get_num_params("mlp"))
# classification loss using cross entropy
def classification_loss(logits=None, labels=None):
# We reduce over batch dimension, to ensure the loss is a scalar.
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
# l2 regularization on the weights
def regularization_loss(l2_regularization=1e-4):
"""Provides regularization loss if it is enabled."""
if tf.trainable_variables() and (l2_regularization > 0):
l2_reg = tf.contrib.layers.l2_regularizer(l2_regularization)
reg_losses = map(l2_reg, tf.trainable_variables())
return tf.add_n(reg_losses, name='regularization_loss')
else:
return tf.constant(0.)
# Define train and test loss functions
train_loss = classification_loss(labels=batch_train_labels, logits=train_predictions)
test_loss = classification_loss(labels=batch_test_labels, logits=test_predictions)
# Exercise 3 - Add regularization
if flag_regularize:
reg_loss = regularization_loss()
train_loss += reg_loss
# Exercise 4: Add auxiliary loss for self-supervised learning; you can use the same classification_loss fn defined above
if flag_selfsup:
print (labels_selfsup)
print (logits_selfsup)
aux_loss = classification_loss(labels=labels_selfsup, logits=logits_selfsup)
train_loss += aux_loss
# For evaluation, we look at top_k_accuracy since it's easier to interpret; normally k=1 or k=5
def top_k_accuracy(k, labels, logits):
in_top_k = tf.nn.in_top_k(predictions=tf.squeeze(logits), targets=labels, k=k)
return tf.reduce_mean(tf.cast(in_top_k, tf.float32))
def get_optimizer(step):
"""Get the optimizer used for training."""
lr_init = 0.01 # initial value for the learning rate
lr_schedule = (90e3, 100e3, 110e3) # after how many iterations to reduce the learning rate
lr_schedule = tf.cast(lr_schedule, tf.int64)
lr_factor = 0.1 # reduce learning rate by this factor
num_epochs = tf.reduce_sum(tf.cast(step >= lr_schedule, tf.float32))
lr = lr_init * lr_factor**num_epochs
return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
# Create a global step that is incremented during training; useful for e.g. learning rate annealing
global_step = tf.train.get_or_create_global_step()
# instantiate the optimizer
optimizer = get_optimizer(global_step)
# Get training ops
training_op = optimizer.minimize(train_loss, global_step)
if flag_batch_norm == 'ON':
# Retrieve the update ops, which contain the moving average ops
update_ops = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
# Manually add the update ops to the dependency path executed at each training iteration
training_op = tf.group(training_op, update_ops)
# Get test ops
test_acc_op = top_k_accuracy(1, batch_test_labels, test_predictions)
train_acc_op = top_k_accuracy(1, batch_train_labels, train_predictions)
# Function that takes a list of losses and plots them.
def plot_losses(loss_list, steps):
display.clear_output(wait=True)
display.display(pl.gcf())
pl.plot(steps, loss_list, c='b')
time.sleep(1.0)
```
### Define training parameters
```
# Define number of training iterations and reporting intervals
TRAIN_ITERS = 100e3 #@param
REPORT_TRAIN_EVERY = 100 #@param
PLOT_EVERY = 500 #@param
REPORT_TEST_EVERY = 1000 #@param
TEST_ITERS = 100 #@param
```
### Training the model
```
# Create the session and initialize variables
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Question: What is the accuracy of the model at iteration 0, i.e. before training starts?
train_iter = 0
losses = []
steps = []
for train_iter in range(int(TRAIN_ITERS)):
_, train_loss_np, inp_img, tr_lbl = sess.run([training_op, train_loss, inp_train, batch_train_labels])
if (train_iter % REPORT_TRAIN_EVERY) == 0:
losses.append(train_loss_np)
steps.append(train_iter)
if (train_iter % PLOT_EVERY) == 0:
pass
# plot_losses(losses, steps)
if (train_iter % REPORT_TEST_EVERY) == 0:
avg_acc = 0.0
train_avg_acc = 0.0
for test_iter in range(TEST_ITERS):
acc, acc_train = sess.run([test_acc_op, train_acc_op])
avg_acc += acc
train_avg_acc += acc_train
avg_acc /= (TEST_ITERS)
train_avg_acc /= (TEST_ITERS)
print ('Test acc at iter {0:5d} out of {1:5d} is {2:.2f}%'.format(int(train_iter), int(TRAIN_ITERS), avg_acc*100.0))
```
| github_jupyter |
```
cd /content/drive/My\ Drive/lane_follower
%tensorflow_version 1.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
print(tf.__version__)
import cv2
import time
import os
import pandas as pd
import numpy as np
from numpy import array
from tqdm import tqdm
from collections import deque
from random import shuffle
import tflearn
from google.colab.patches import cv2_imshow
from collections import Counter
from random import shuffle
HEIGHT = 100
WIDTH = 150
lr = 1e-3
td = []
image_1 = np.load('training_data/data_image.npy')
value_1 = np.load('training_data/data_value.npy')
image_2 = np.load('training_data/data_image_2.npy')
value_2 = np.load('training_data/data_value_2.npy')
image = np.concatenate((image_1, image_2), axis=0)
value = np.concatenate((value_1, value_2), axis=0)
print(image.shape)
choice = []
for i in range(len(value)):
if (value[i]==[2,0]).all():
choice.append([1,0,0,0])
#print(choice[i])
elif (value[i]==[0,2]).all():
choice.append([0,1,0,0])
#print(choice[i])
elif (value[i]==[0,-2]).all():
choice.append([0,0,1,0])
#print(choice[i])
elif (value[i]==[0,0]).all():
choice.append([0,0,0,1])
#print(choice[i])
else:
print("error")
for i in range(len(image)):
td.append([image[i],choice[i]])
df = pd.DataFrame(td)
print(Counter(df[1].apply(str)))
Left = []
Right = []
Front = []
for data in td:
image = data[0]
value = data[1]
#print(value)
if (value == [1,0,0,0]):
Front.append([image,value])
elif (value == [0,0,1,0]):
Right.append([image,value])
elif (value == [0,1,0,0]):
Left.append([image,value])
else:
pass
#print(len(Front))
Front = Front[:5000]
final_data = np.array(Front+Left+Right)
shuffle(final_data)
print(final_data.shape)
train = final_data[:-500,:]
test=final_data[-500:,:]
print(train.shape)
X = np.array([i[0] for i in train]).reshape(-1,WIDTH,HEIGHT,1)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,WIDTH,HEIGHT,1)
test_y = [i[1] for i in test]
print(X[1].shape)
from models import alexnet2
model = alexnet2(WIDTH,HEIGHT,lr,output=4)
model.fit({'input': X}, {'targets': Y}, n_epoch=1, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=2500, show_metric=True)
images = np.load('training_data/data_image_2.npy')
value = np.load('training_data/data_value_2.npy')
choice = []
for i in range(len(value)):
if (value[i]==[2,0]).all():
choice.append([1,0,0,0])
#print(choice[i])
elif (value[i]==[0,2]).all():
choice.append([0,1,0,0])
#print(choice[i])
elif (value[i]==[0,-2]).all():
choice.append([0,0,1,0])
#print(choice[i])
elif (value[i]==[0,0]).all():
choice.append([0,0,0,1])
#print(choice[i])
else:
print("error")
correct = 0
Left = []
Right = []
Front = []
for i in range(len(choice)):
if (choice[i] == [1,0,0,0]):
Front.append([images[i],choice[i]])
elif (choice[i] == [0,0,1,0]):
Right.append([images[i],choice[i]])
elif (choice[i] == [0,1,0,0]):
Left.append([images[i],choice[i]])
else:
pass
required = Left + Right
X = np.array([i[0] for i in required])
for i in range(1745):
im = X[i].reshape(150,100,1)
prediction = np.array(model.predict([im]))
prediction = (prediction > 0.5)*1
#print(prediction[0])
if (prediction[0] == choice[i]).all():
print("correct")
correct+=1
else:
print("u r fucked")
print(correct)
print(X.shape)
print(X.shape)
print(len(required))
model.save("final_2.model")
```
| github_jupyter |
```
from datetime import date
import pandas as pd
import numpy as np
from datetime import datetime, timedelta, date
import geopandas as gpd
from pathlib import Path
import re
pd.options.display.max_columns = 100
# data from github jhu,import the lastest data from timeseries
df_Counties_confirmed = pd.read_csv(
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv")
df_Counties_deaths = pd.read_csv(
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv")
# Root file variables
covid19Root = Path("~/Documents/GitHub/COVID-19").expanduser()
covid19Data = covid19Root / "data_tables/Data_for_UScounty_map/"
covid19Export = covid19Root / "data_tables/JHU_USCountymap_TEST/"
# the lastest date, printing both just make sure they have the data are updated at the same pace
yesterday = '{d.month}/{d.day}/{d.year}'.format(
d=datetime.now() - timedelta(days=1))
tdst = df_Counties_confirmed.columns[-1]
tdst1 = df_Counties_deaths.columns[-1]
if tdst and tdst1 == yesterday[:-4] + yesterday[-2:]:
print("The Confirmed and Death tables are up to date")
else:
print("Error: The Confirmed and Death tables are not up to date")
# Change date on yesterday's csv and zipped shapefile in TEST folder
oldZip = covid19Export / 'USCounties_JHUmap.zip'
oldCsv = covid19Export / 'df_Counties2020.csv'
zipDate = 'USCounties_JHUmap_' + tdst.replace("/", "-") + '.zip'
csvDate = 'df_Counties2020_' + tdst.replace("/", "-") + '.csv'
oldZip.rename(covid19Export / zipDate)
oldCsv.rename(covid19Export / csvDate)
# Exclude military and extra data added to the end
df_Counties_confirmed=df_Counties_confirmed.iloc[:3251]
df_Counties_deaths=df_Counties_deaths.iloc[:3251]
#df_Counties_confirmed.iloc[-2:]
Day14Series=[]
for i in range(1,15):
#print (i)
day=df_Counties_confirmed.columns[(i-16)]
Day14Series.append(day)
len(Day14Series)
Day14Series.extend([tdst,'FIPS', 'Admin2','Province_State','Combined_Key'])
df_Counties_confirmed=df_Counties_confirmed[Day14Series]
# Replace data with NY data
df_NY_confirmed=pd.read_csv( covid19Data / 'NY_Boroughs_Confirmed.csv')
df_NY_deaths=pd.read_csv( covid19Data / 'NY_Boroughs_Deaths.csv')
#skip the first column
df_NY_confirmed=df_NY_confirmed.iloc[:,1:]
df_NY_deaths=df_NY_deaths.iloc[:,1:]
#import NY data https://github.com/nychealth/coronavirus-data/blob/master/by-boro.csv
df_NY_new=pd.read_csv("https://raw.githubusercontent.com/nychealth/coronavirus-data/master/by-boro.csv",skipfooter=1,engine='python')
if tdst == df_NY_confirmed.columns[-1]:
print ('Error: The New York data is not updated or has already been updated')
# new_date='{dt.month}/{dt.day}/{dt:%y}'.format(dt = datetime.now())
# new_date='{dt.month}/{dt.day}/{dt:%y}'.format(dt = datetime.now()-timedelta(1)
#df_NY_confirmed[new_date]=df_NY_new['CASE_COUNT']
#df_NY_deaths[new_date]=df_NY_new['DEATH_COUNT']
else:
print ('The New York data is updated')
df_NY_confirmed[tdst]=df_NY_new['CASE_COUNT']
df_NY_deaths[tdst]=df_NY_new['DEATH_COUNT']
df_Counties_confirmed.columns
#15 days time-series
for days in df_Counties_confirmed.columns[:15]:
# print (days)
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[0,0],days]=df_NY_confirmed.loc[0][days]
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[1,0],days]=df_NY_confirmed.loc[1][days]
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[2,0],days]=df_NY_confirmed.loc[2][days]
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[3,0],days]=df_NY_confirmed.loc[3][days]
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[4,0],days]=df_NY_confirmed.loc[4][days]
#calcualte new cases for 14 days
df_Counties_confirmed['NewCaseDay01']=df_Counties_confirmed.iloc[:,1]-df_Counties_confirmed.iloc[:,0]
df_Counties_confirmed['NewCaseDay02']=df_Counties_confirmed.iloc[:,2]-df_Counties_confirmed.iloc[:,1]
df_Counties_confirmed['NewCaseDay03']=df_Counties_confirmed.iloc[:,3]-df_Counties_confirmed.iloc[:,2]
df_Counties_confirmed['NewCaseDay04']=df_Counties_confirmed.iloc[:,4]-df_Counties_confirmed.iloc[:,3]
df_Counties_confirmed['NewCaseDay05']=df_Counties_confirmed.iloc[:,5]-df_Counties_confirmed.iloc[:,4]
df_Counties_confirmed['NewCaseDay06']=df_Counties_confirmed.iloc[:,6]-df_Counties_confirmed.iloc[:,5]
df_Counties_confirmed['NewCaseDay07']=df_Counties_confirmed.iloc[:,7]-df_Counties_confirmed.iloc[:,6]
df_Counties_confirmed['NewCaseDay08']=df_Counties_confirmed.iloc[:,8]-df_Counties_confirmed.iloc[:,7]
df_Counties_confirmed['NewCaseDay09']=df_Counties_confirmed.iloc[:,9]-df_Counties_confirmed.iloc[:,8]
df_Counties_confirmed['NewCaseDay10']=df_Counties_confirmed.iloc[:,10]-df_Counties_confirmed.iloc[:,9]
df_Counties_confirmed['NewCaseDay11']=df_Counties_confirmed.iloc[:,11]-df_Counties_confirmed.iloc[:,10]
df_Counties_confirmed['NewCaseDay12']=df_Counties_confirmed.iloc[:,12]-df_Counties_confirmed.iloc[:,11]
df_Counties_confirmed['NewCaseDay13']=df_Counties_confirmed.iloc[:,13]-df_Counties_confirmed.iloc[:,12]
df_Counties_confirmed['NewCases']=df_Counties_confirmed.iloc[:,14]-df_Counties_confirmed.iloc[:,13]
#clean confirmed file
#df_Counties_confirmed.columns[-19:]
df_Counties_confirmed=df_Counties_confirmed[df_Counties_confirmed.columns[-19:]]
#calculate new deaths
pre_day=df_Counties_deaths.columns[-2]
df_Counties_deaths=df_Counties_deaths[[pre_day,tdst,'FIPS']]
for days in df_Counties_deaths.columns[:2]:
# print (days)
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[0,0],days]=df_NY_deaths.loc[0][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[1,0],days]=df_NY_deaths.loc[1][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[2,0],days]=df_NY_deaths.loc[2][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[3,0],days]=df_NY_deaths.loc[3][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[4,0],days]=df_NY_deaths.loc[4][days]
df_Counties_deaths['NewDeaths']=df_Counties_deaths.iloc[:,1]-df_Counties_deaths.iloc[:,0]
df_Counties_deaths.drop(columns={pre_day},inplace=True)
#Remove Nan value in FIPS
df_Counties_deaths=df_Counties_deaths[df_Counties_deaths['FIPS'].notna()]
df_Counties_confirmed=df_Counties_confirmed[df_Counties_confirmed['FIPS'].notna()]
#tdst=df_Counties_confirmed.columns[-1]
df_Counties=pd.merge(df_Counties_confirmed,df_Counties_deaths,how='left',on='FIPS',suffixes=('_confirmed','_deaths'))
#df_Counties.head(2)
df_Counties.rename(columns={tdst+'_confirmed':'Confirmed',tdst+'_deaths':'Deaths'},inplace=True)
#census and health data from ESRI
df_ACS1=pd.read_excel( covid19Data / 'ACS_2014-2018_Fields.xlsx')
df_ACSState=pd.read_excel( covid19Data / 'ACS_State_Final_ToExcel_noMOE.xlsx')
df_ACSCounty=pd.read_excel( covid19Data / 'ACS_County_Final_ToExcel_noMOE.xlsx')
df_ACSCounty1=df_ACSCounty[['FIPS', 'NAME', 'State']]
df_ACSCounty1['Age_85']=df_ACSCounty['B01001_049E']+df_ACSCounty['B01001_025E']
df_ACSCounty1['Age_80_84']=df_ACSCounty['B01001_048E']+df_ACSCounty['B01001_024E']
df_ACSCounty1['Age_75_79']=df_ACSCounty['B01001_047E']+df_ACSCounty['B01001_023E']
df_ACSCounty1['Age_70_74']=df_ACSCounty['B01001_046E']+df_ACSCounty['B01001_022E']
df_ACSCounty1['Age_65_69']=df_ACSCounty['B01001_045E']+df_ACSCounty['B01001_021E']+df_ACSCounty['B01001_044E']+df_ACSCounty['B01001_020E']
df_ACSCounty1['AgedPop']=df_ACSCounty1['Age_85']+df_ACSCounty1['Age_80_84']+df_ACSCounty1['Age_75_79']+df_ACSCounty1['Age_70_74']+df_ACSCounty1['Age_65_69']
# df_ACSCounty1.head(2)
df_Healthcare=pd.read_excel( covid19Data / 'Definitive_Healthcare_Hospital_Beds_By_County_and_Demographics.xlsx',
sheet_name= 'Sheet2',skiprows=2)
df_Healthcare1=df_Healthcare[['Row Labels', 'Sum of NUM_LICENSED_BEDS', 'Sum of NUM_STAFFED_BEDS',
'Sum of NUM_ICU_BEDS','Sum of AVG_VENTILATOR_USAGE']]
# 'ID','Sum # of Licensed Beds', 'Sum # of Staffed Beds', 'Sum # of ICU Beds','Average Average Ventilator Usage']]
df_Healthcare1.rename(columns={'Row Labels':'ID','Sum of NUM_LICENSED_BEDS':'Beds_Licensed','Sum of NUM_STAFFED_BEDS':'Beds_Staffed','Sum of NUM_ICU_BEDS':'Beds_ICU',
'Sum of AVG_VENTILATOR_USAGE':'Ventilators_Average' },inplace=True)
df_CountyHealth=pd.merge(df_ACSCounty1,df_Healthcare1,how='inner',left_on='FIPS',right_on='ID')
# df_CountyHealth.shape
#Merge JHU data with Esri County data, left join to keep all the confirmed cases
df_Counties1=pd.merge(df_Counties,df_CountyHealth,how='left',left_on='FIPS',right_on='FIPS')
#import demographic info https://data.census.gov/cedsci/ data source: https://www.ers.usda.gov/data-products/county-level-data-sets/download-data/
df_pop=pd.read_excel(covid19Data / 'PopulationEstimates.xls',skiprows=2)
df_poverty=pd.read_excel(covid19Data / 'PovertyEstimates.xls',skiprows=4)
#df_edu=pd.read_excel(covid19Data / 'Education.xls',skiprows=4)
df_eco=pd.read_excel(covid19Data / 'employment.xls',skiprows=4)
df_eco=df_eco.drop(df_eco.index[0])
df_poverty=df_poverty.drop(df_poverty.index[0])
df_pop=df_pop.drop(df_pop.index[0])
#df_eco.head(2)
#Select columns from demo data
df_pop1=df_pop[['FIPS','POP_ESTIMATE_2018']]
df_poverty1=df_poverty[['FIPStxt','POVALL_2018','PCTPOVALL_2018']]
#df_edu1=df_edu[[]]
df_eco1=df_eco[['FIPS','Unemployed_2018','Unemployment_rate_2018','Median_Household_Income_2018','Med_HH_Income_Percent_of_State_Total_2018']]
#merge demo data
demo1=pd.merge(df_pop1,df_poverty1,how='left',left_on='FIPS',right_on='FIPStxt')
demo2=pd.merge(demo1,df_eco1,how='right',left_on='FIPS',right_on='FIPS')
#merge JHU,Esri, Demo data
USCounties1=pd.merge(df_Counties1,demo2,how='left',left_on='FIPS',right_on='FIPS')
#import the most recent daily data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports
td=datetime.strftime(datetime.now()-timedelta(1), '%m-%d-%Y')
#td=datetime.strftime(datetime.now(), '%m-%d-%Y')
# print (str(td))
#df_new=pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/"+str(td)+".csv")
df_new=pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/"+str(td)+".csv")
df_USnew=df_new[df_new['Country_Region']=='US']
dfStates=pd.pivot_table(df_USnew,values=['Confirmed', 'Deaths', 'Recovered',
'Active'],index=['Province_State'],aggfunc=np.sum)
dfStates.rename(columns={'Confirmed':'State_Confirmed','Deaths':'State_Deaths','Recovered':'State_Recovered'},inplace=True)
#dfStates['State_Testing']=999
dfStates.reset_index(inplace=True)
# dfStates.State_Recovered.sum()
#dfStates
#merge UScounties1 with state data
USCounties2=pd.merge(USCounties1,dfStates,how='outer',left_on='State',right_on='Province_State')
#import Red Cross
dfRC=pd.read_csv( covid19Data / 'Red_Cross_Centroids_for_COVID19_Public_Health_Emergency_Status_by_County_0.csv')
dfRC1=dfRC[['FIPS','Notes','Last Update', 'Local Public Emergency']]
dfRC1.loc[dfRC1['Local Public Emergency']=='Red','Local Public Emergency']= 'Govt Ordered Community Quarantine'
dfRC1.loc[dfRC1['Local Public Emergency']=='Orange','Local Public Emergency']= 'Govt Directed Social Distancing'
dfRC1.loc[dfRC1['Local Public Emergency']=='Yellow','Local Public Emergency']= 'Declared Public Health Emergency'
# dfRC1['Local Public Emergency'].unique()
dfRC1.rename(columns={'Notes':'EM_notes','Last Update':'EM_date','Local Public Emergency':'EM_type'},inplace=True)
#merge UScounties2 with RC data
USCounties3=pd.merge(USCounties2, dfRC1,how='left',left_on='FIPS',right_on='FIPS')
#import US Counties, State shp file downloaded from Esri
US_Counties=gpd.read_file( covid19Data / "JHUCounties.shp")
US_Counties.rename(columns={'NAME':'Countyname','COUNTYFP':'CountyFP'},inplace=True)
US_Counties['GEOID']=US_Counties['GEOID'].astype(float)
#import state testing data from https://covidtracking.com/api/states
dfStatesTesting=pd.read_json('https://covidtracking.com/api/v1/states/current.json')
dfStatesTesting=dfStatesTesting[['state','total','dateChecked']]
dfStatesTesting.rename(columns={'total':'State_Testing','dateChecked':'DateChecked'},inplace=True)
#Merge with state testing data
USCounties=pd.merge(US_Counties,dfStatesTesting,how='left',left_on='ST_Abbr',right_on='state')
#Import Race data
#USCounties_pre=gpd.read_file(r"C:\Work_GovEx\COVID-19\Daily Data\USCounties_JHUmap_05_05\USCounties_JHUmap.shp")
Counties_race=pd.read_csv( covid19Data / 'County_ShpHeaders.csv')
Race_Age=pd.read_csv( covid19Data / 'Race_Age.csv')
Race_Agemerge=pd.merge(Counties_race,Race_Age,how='left',left_on='GEOIDFIPS',right_on='FIPS')
USCountiesm1=pd.merge(USCounties,Race_Agemerge,how='left',left_on='GEOID',right_on='GEOIDFIPS')
USCountiesm1=USCountiesm1[['Countyname', 'GEOID', 'ST_Abbr', 'ST_ID', 'ST_Name', 'geometry',
'state', 'State_Testing', 'DateChecked', 'ObjectID', 'CountyGNIS',
'GEOIDFIPS', 'TotalPop',
'NonHispWhPop', 'BlackPop',
'AmIndop', 'AsianPop',
'PacIslPop', 'OtherPop', 'TwoMorPop',
'HispPop', 'PCPopNWh',
'PCPopBk', 'PCPopAI', 'PCPopAs',
'PPCPopPI', 'PCPopOr',
'PCPopTm', 'PCPopHL',
'racePop_total', 'White alone',
'Black or African American alone',
'American Indian and Alaska Native alone', 'Asian alone',
'Native Hawaiian and Other Pacific Islander alone',
'Some other race alone', 'Two or more races',
'Not Hispanic or Latino origin', 'Hispanic or Latino Origin',
'Age_under15', 'Age_15_24', 'Age_25_34', 'Age_35_64', 'Age_65_74',
'Age_over75', 'Agetotal' ]]
#join the Countyshp and State testing file with USCounties3
USCounties4=pd.merge(USCountiesm1,USCounties3,how='right',left_on='GEOID',right_on='FIPS')
#USCounties1=pd.merge(US_Counties,df_USnew,how='left',left_on='forjoin',right_on='Combined_Key1')
#Add calcuations
USCounties4['FatalityRate']=USCounties4['Deaths']/USCounties4['Confirmed']*100
USCounties4['ConfirmedbyPop']=USCounties4['Confirmed']/USCounties4['POP_ESTIMATE_2018']*100000
USCounties4['DeathsbyPop']=USCounties4['Deaths']/USCounties4['POP_ESTIMATE_2018']*100000
USCounties4['State_FatalityRate']=USCounties4['State_Deaths']/USCounties4['State_Confirmed']*100
USCounties4['Recovered']=0 # place holder
USCounties4['Active']=0 #place holder
USCounties4['url']='infUrl' #place holder
USCounties4['Thumbnail']='placeholder' #place holder
USCounties4['ConfirmedbyPop']=USCounties4['ConfirmedbyPop'].round(2)
#select columns
USCounties4=USCounties4[['Admin2', 'Province_State_x','ST_Abbr', 'ST_ID', 'geometry',
'FIPS', 'FatalityRate', 'ConfirmedbyPop','DeathsbyPop', 'PCTPOVALL_2018',
'Unemployment_rate_2018', 'Med_HH_Income_Percent_of_State_Total_2018',
'State_FatalityRate', 'DateChecked',
'EM_type', 'EM_date','EM_notes','url', 'Thumbnail', 'Confirmed', 'Deaths',
'Age_85', 'Age_80_84', 'Age_75_79', 'Age_70_74', 'Age_65_69',
'Beds_Licensed', 'Beds_Staffed','Beds_ICU', 'Ventilators_Average',
'POP_ESTIMATE_2018','POVALL_2018', 'Unemployed_2018','Median_Household_Income_2018',
'Recovered', 'Active', 'State_Confirmed', 'State_Deaths', 'State_Recovered',
'State_Testing', 'AgedPop','NewCases','NewDeaths','TotalPop', 'NonHispWhPop', 'BlackPop', 'AmIndop',
'AsianPop', 'PacIslPop', 'OtherPop', 'TwoMorPop', 'HispPop', 'PCPopNWh',
'PCPopBk', 'PCPopAI', 'PCPopAs', 'PPCPopPI', 'PCPopOr', 'PCPopTm',
'PCPopHL','racePop_total', 'White alone',
'Black or African American alone',
'American Indian and Alaska Native alone', 'Asian alone',
'Native Hawaiian and Other Pacific Islander alone',
'Some other race alone', 'Two or more races',
'Not Hispanic or Latino origin', 'Hispanic or Latino Origin',
'Age_under15', 'Age_15_24', 'Age_25_34', 'Age_35_64', 'Age_65_74',
'Age_over75', 'Agetotal']]
USCounties4.rename(columns={'Admin2':'Countyname'},inplace=True)
USCounties4.rename(columns={'Province_State_x':'ST_Name'},inplace=True)
USCounties4['FIPS']=USCounties4['FIPS'].fillna(0).astype(int)
USCounties4['FIPS']=USCounties4['FIPS'].apply(str).str.pad(width=5, side='left', fillchar='0')
USCounties4['Age_85']=USCounties4['Age_85'].fillna(0).astype(int)
USCounties4['Age_80_84']=USCounties4['Age_80_84'].fillna(0).astype(int)
USCounties4['Age_75_79']=USCounties4['Age_75_79'].fillna(0).astype(int)
USCounties4['Age_70_74']=USCounties4['Age_70_74'].fillna(0).astype(int)
USCounties4['Age_65_69']=USCounties4['Age_65_69'].fillna(0).astype(int)
USCounties4['AgedPop']=USCounties4['AgedPop'].fillna(0).astype(int)
USCounties4['Beds_Licensed']=USCounties4['Beds_Licensed'].fillna(0).astype(int)
USCounties4['Beds_ICU']=USCounties4['Beds_ICU'].fillna(0).astype(int)
USCounties4['Beds_Staffed']=USCounties4['Beds_Staffed'].fillna(0).astype(int)
# USCounties4['NewCases']=USCounties4['NewCases'].fillna(0).astype(int)
# USCounties4['NewDeaths']=USCounties4['NewDeaths'].fillna(0).astype(int)
USCounties4[['Confirmed', 'Deaths',
'Ventilators_Average',
'POP_ESTIMATE_2018', 'POVALL_2018', 'Unemployed_2018',
'Median_Household_Income_2018', 'Recovered', 'Active',
'State_Confirmed', 'State_Deaths', 'State_Recovered',
'State_Testing']]=USCounties4[['Confirmed', 'Deaths',
'Ventilators_Average',
'POP_ESTIMATE_2018', 'POVALL_2018', 'Unemployed_2018',
'Median_Household_Income_2018', 'Recovered', 'Active',
'State_Confirmed', 'State_Deaths', 'State_Recovered',
'State_Testing']].fillna(0).astype(int)
#Add url and Thumbnail columns
fiplist=USCounties4['FIPS'].tolist()
urllist=list()
for i in fiplist:
url0='https://bao.arcgis.com/covid-19/jhu/county/'+i+'.html'
urllist.append(url0)
USCounties4['url']=urllist
USCounties4['Thumbnail']="https://coronavirus.jhu.edu/static/media/dashboard_infographic_thumbnail.png"
tdtime=datetime.strftime(datetime.now(), '%m/%d/%Y %H:%M:%S')
# print (tdtime)
USCounties4['DateChecked']=tdtime
# USCounties4['DateChecked']
#reorganize the field and export
USCounties4=USCounties4[['Countyname', 'ST_Name','ST_Abbr', 'ST_ID','geometry',
'FIPS', 'FatalityRate', 'ConfirmedbyPop','DeathsbyPop', 'PCTPOVALL_2018',
'Unemployment_rate_2018', 'Med_HH_Income_Percent_of_State_Total_2018',
'State_FatalityRate', 'DateChecked',
'EM_type', 'EM_date','EM_notes','url', 'Thumbnail', 'Confirmed', 'Deaths',
'Age_85', 'Age_80_84', 'Age_75_79', 'Age_70_74', 'Age_65_69',
'Beds_Licensed', 'Beds_Staffed','Beds_ICU', 'Ventilators_Average',
'POP_ESTIMATE_2018','POVALL_2018', 'Unemployed_2018','Median_Household_Income_2018',
'Recovered', 'Active', 'State_Confirmed', 'State_Deaths', 'State_Recovered',
'State_Testing', 'AgedPop','NewCases', 'NewDeaths','TotalPop',
'NonHispWhPop', 'BlackPop', 'AmIndop', 'AsianPop', 'PacIslPop',
'OtherPop', 'TwoMorPop', 'HispPop', 'PCPopNWh', 'PCPopBk', 'PCPopAI',
'PCPopAs', 'PPCPopPI', 'PCPopOr', 'PCPopTm', 'PCPopHL','racePop_total',
'White alone', 'Black or African American alone',
'American Indian and Alaska Native alone', 'Asian alone',
'Native Hawaiian and Other Pacific Islander alone',
'Some other race alone', 'Two or more races',
'Not Hispanic or Latino origin', 'Hispanic or Latino Origin',
'Age_under15', 'Age_15_24', 'Age_25_34', 'Age_35_64', 'Age_65_74',
'Age_over75', 'Agetotal']]
USCounties4.rename(columns={'FatalityRate':'FatalityRa','ConfirmedbyPop':'Confirmedb','DeathsbyPop':'DeathsbyPo',
'PCTPOVALL_2018':'PCTPOVALL_','Unemployment_rate_2018':'Unemployme',
'Med_HH_Income_Percent_of_State_Total_2018':'Med_HH_Inc',
'State_FatalityRate':'State_Fata', 'DateChecked':'DateChecke','Beds_Licensed':'Beds_Licen',
'Ventilators_Average':'Ventilator', 'POP_ESTIMATE_2018':'POP_ESTIMA',
'POVALL_2018':'POVALL_201', 'Unemployed_2018':'Unemployed',
'Median_Household_Income_2018':'Median_Hou',
'State_Confirmed':'State_Conf','State_Deaths':'State_Deat',
'State_Recovered':'State_Reco','State_Testing':'State_Test',
'White alone':'Wh_Alone', 'Black or African American alone':'Bk_Alone',
'American Indian and Alaska Native alone':'AI_Alone', 'Asian alone':'As_Alone',
'Native Hawaiian and Other Pacific Islander alone':'NH_Alone','Some other race alone':'SO_Alone',
'Two or more races':'Two_More','Not Hispanic or Latino origin':'Not_Hisp',
'Hispanic or Latino Origin':'NonHisp',
'Age_under15':'Age_Less15', 'Age_over75':'Age_Over75',
},inplace=True)
USCounties4.to_file( covid19Data / 'USCounties_JHUmap.shp')
#USCounties4.to_file(r'C:\Work_GovEx\COVID-19\Daily Data\USCounties_JHUmap_Race_Age_NewCases.shp')
# USCounties4.to_file(r'C:\Work_GovEx\COVID-19\Daily Data\USCounties_JHUmap.shp')
# testcheck=gpd.read_file(r'C:\Work_GovEx\COVID-19\Daily Data\USCounties_JHUmap.shp')
# testcheck[ testcheck['Countyname']=='Washington'][['ST_Name','Confirmed','Countyname','NewCases']]
```
# Update CSV
```
#data from github jhu,import the lastest data from timeseries
df_Counties_confirmed=pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv")
df_Counties_deaths=pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv")
# Exclude military and extra data added to the end
df_Counties_confirmed=df_Counties_confirmed.iloc[:3253]
df_Counties_deaths=df_Counties_deaths.iloc[:3253]
# check the latest date
# tdst=df_Counties_confirmed.columns[-1]
# print (tdst)
#replace with NY boroughs files
dates_list=df_NY_confirmed.columns[4:]
# print (dates_list)
#df_NY_deaths=pd.read_csv(r'C:\Work_GovEx\COVID-19\Daily Data\JHU US Map_NY_KC_Duke_Nantucket Counties - Deaths.csv')
for days in dates_list:
#print (days)
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[0,0],days]=df_NY_confirmed.loc[0][days]
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[1,0],days]=df_NY_confirmed.loc[1][days]
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[2,0],days]=df_NY_confirmed.loc[2][days]
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[3,0],days]=df_NY_confirmed.loc[3][days]
df_Counties_confirmed.loc[df_Counties_confirmed.FIPS==df_NY_confirmed.iloc[4,0],days]=df_NY_confirmed.loc[4][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[0,0],days]=df_NY_deaths.loc[0][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[1,0],days]=df_NY_deaths.loc[1][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[2,0],days]=df_NY_deaths.loc[2][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[3,0],days]=df_NY_deaths.loc[3][days]
df_Counties_deaths.loc[df_Counties_deaths.FIPS==df_NY_deaths.iloc[4,0],days]=df_NY_deaths.loc[4][days]
#deaths data has an extra column
df_Counties_deaths.drop(columns=['iso2'],inplace=True)
#convert table
def converttable(df):
cols=df.columns.tolist()
pd_list=[]
for i in range(11,df.shape[1]):
temp_cols=cols[:11]
temp_cols.append(cols[i])
# print(temp_cols)
temp_pd=df[temp_cols].copy()
temp_pd['dt']=cols[i]
temp_pd.rename(columns={cols[i]:'value'},inplace=True)
pd_list.append(temp_pd)
df_new=pd.concat(pd_list,axis=0,ignore_index=True)
return df_new
df_confirmed_new = converttable(df_Counties_confirmed)
df_deaths_new = converttable(df_Counties_deaths)
#merge confirmed and deaths data
df_confirmed_new['dt']=pd.to_datetime(df_confirmed_new['dt'])
df_deaths_new['dt']=pd.to_datetime(df_deaths_new['dt'])
# Rename cols
df_confirmed_new.rename(columns = {'value': 'confirmed'}, inplace = True)
df_deaths_new.rename(columns = {'value': 'deaths'}, inplace = True)
# Merge two tables
df_merged = df_confirmed_new.merge(df_deaths_new[['Admin2','Province_State', 'Country_Region', 'dt', 'deaths','Population']],
on = ['Admin2','Province_State', 'dt'],
how = 'inner')
df_merged = df_merged[['Admin2','Province_State', 'FIPS', 'dt', 'confirmed', 'deaths','Population']]
df_merged['FIPS']=df_merged['FIPS'].fillna(0).astype(int)
df_merged['FIPS'] = df_merged['FIPS'].apply(lambda x: '{0:0>5}'.format(x))
#merge with shpfiles for geometry info
US_Counties=gpd.read_file( covid19Data / "JHUCounties.shp")
df_merged1=pd.merge(df_merged,US_Counties,how='left',left_on='FIPS',right_on='GEOID')
#select columns and rename
df_merged1 = df_merged1[['Admin2','Province_State', 'FIPS','ST_ID','dt', 'confirmed', 'deaths','Population']]
df_merged1.rename(columns={'Admin2':'Countyname','Province_State':'ST_Name','ST_ID':'ST_ID','confirmed':'Confirmed','deaths':'Deaths'},inplace=True)
#Format FIPS
df_merged1['FIPS']=df_merged1['FIPS'].fillna(0).astype(int)
df_merged1['FIPS'] = df_merged1['FIPS'].apply(lambda x: '{0:0>5}'.format(x))
#calculate IncidenceRate
df_merged1['IncidenceRate']=df_merged1['Confirmed']/df_merged1['Population']*100000
df_merged1['IncidenceRate']=df_merged1['IncidenceRate'].round(2)
#calculate new cases
# Sort values
df_merged1.sort_values(by = ['ST_Name','FIPS', 'dt'], ascending = True, inplace = True)
# Differences
df_merged1['NewCases'] = df_merged1.groupby(by = ['FIPS']).Confirmed.diff()
df_merged1['dt']=pd.to_datetime(df_merged1['dt']).dt.date
from datetime import date
df_merged1[df_merged1['dt']==date(2020,1,24)].head()
# df_merged1['ST_ID']=df_merged1['ST_ID'].fillna(0).astype(int)
# df_merged1['ST_ID']=df_merged1['ST_ID'].apply(str).str.pad(width=2, side='left', fillchar='0')
# df_merged1['FIPS']=df_merged1['FIPS'].fillna(0).astype(int)
# df_merged1['FIPS']=df_merged1['FIPS'].apply(str).str.pad(width=5, side='left', fillchar='0')
df_merged1.to_csv(covid19Export / 'df_Counties2020.csv')
#update the new files
df_NY_confirmed.to_csv( covid19Data / 'NY_Boroughs_Confirmed.csv')
df_NY_deaths.to_csv( covid19Data / 'NY_Boroughs_Deaths.csv')
```
| github_jupyter |
```
%matplotlib inline
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.graphics.tsaplots import plot_pacf, plot_acf
sns.set_style('darkgrid')
df = pd.read_csv('../data/raw/arquivo_geral.csv', sep=';', parse_dates=['data'])
df.info()
new_cases = df.groupby(['data']).agg({
'casosNovos': 'sum'
})
```
## Analise exploratória
Nessa seção vamos criar algumas visualizações da serie temporal e testar algumas transformações possiveis dos dados.
```
def plot_ts(series):
fig, ax = plt.subplots(3, 2, figsize=(15, 17));
_df = pd.DataFrame(index=series.index)
_df['casosNovos'] = series
_df['x'] = series.index
sns.lineplot(x='x', y='casosNovos', data=_df, ci=90, err_style='band', ax=ax[0, 0]);
ax[0, 0].set_title('time series')
plot_acf(series.dropna(), ax=ax[0, 1], lags=int(len(series)/2))
ax[0, 1].set_title('acf')
plot_pacf(series.dropna(), ax=ax[1, 1], lags=int(len(series)/2))
ax[1, 1].set_title('pacf')
series.plot.hist(bins=20, ax=ax[1, 0]);
ax[1, 0].set_title('distribution')
series.rolling('15D').std().plot(ax=ax[2, 0]);
ax[2, 0].set_title('rolling std')
series.rolling('15D').mean().plot(ax=ax[2, 1]);
ax[2, 1].set_title('rolling mean');
def plot_decomposition(series):
fig, (ax1,ax2,ax3) = plt.subplots(3,1, figsize=(17,8), sharex=True)
res = seasonal_decompose(new_cases, model='aditive', freq=15)
res.trend.plot(ax=ax1)
res.resid.plot(ax=ax2)
res.seasonal.plot(ax=ax3)
ax1.set_title('Trend');
ax2.set_title('Residual');
ax3.set_title('Seasonality');
return res
#raw
ts1 = new_cases.copy()
# primeira diferença
ts2 = new_cases.diff()
# ignorando os dados antes dos 100 primeiros casos
first_100 = np.where(ts1.cumsum() >= 100)[0][0]
ts3 = ts1[first_100:]
# primeira diferença da serie após os 100 primeiros casos
ts4 = ts3.diff()
```
### Serie temporal sem tratamento
```
plot_ts(ts1)
ts1_res = plot_decomposition(ts1);
```
## Primeira diferença da serie temporal
```
plot_ts(ts2)
ts2_res = plot_decomposition(ts2);
```
## Retirando parte inicial da serie que não houveram casos
#### Após os 100 primeiros casos
```
plot_ts(ts3)
ts3_res = plot_decomposition(ts3);
```
## Primeira diferença da serie a partir do 100ø caso
```
plot_ts(ts4)
ts4_res = plot_decomposition(ts4);
```
## Modelagem
#### Verificação da estacionariedade
```
series = {
'raw': ts1,
'diff1': ts2,
'after_100': ts3,
'after_100_diff1': ts4
}
for name, s in series.items():
res = sm.tsa.adfuller(s.dropna(), regression='ct')
print('%22s | %s' % (name, 'non-stationary' if res[0] > res[4]['5%'] else 'stationary'))
ts = series['after_100'].dropna()
ts.index
```
#### Divisão entre test e train
```
tr_start,tr_end = '2020-03-15', '2020-04-14'
te_start,te_end = '2020-04-13', '2020-04-20'
pred_end = '2020-04-27'
tra = ts[tr_start:tr_end].dropna()
tes = ts[te_start:te_end].dropna()
arima = sm.tsa.statespace.SARIMAX(tra,order=(0,1,2), freq='D', seasonal_order=(0,1,[],15), enforce_stationarity=False, enforce_invertibility=False,).fit() # Marco, rmse + aic
# arima = sm.tsa.statespace.SARIMAX(tra, order=(0,1,0), freq='D', seasonal_order=(0, 1,0, 17), enforce_stationarity=False, enforce_invertibility=False).fit() # vittor, rmse do teste
arima.summary()
#SARIMAX(0, 1, 0)x(0, 1, 0, 17)
from sklearn.metrics import mean_squared_error
pred_train = arima.predict(tr_start,tr_end)[1:]
print('IN TRAIN: ARIMA model MSE:{}'.format(mean_squared_error(tra[1:], pred_train)))
pred_test = arima.predict(te_start,te_end)[1:]
print('IN TEST: ARIMA model MSE:{}'.format(mean_squared_error(tes[1:], pred_test)))
pred = arima.predict(te_start, pred_end)[1:]
_, ax = plt.subplots(figsize=(12, 8), dpi=100)
pred_train.name = 'Predicted on train'
pred.name = 'Predicted out of train'
ts.columns = ['New cases']
ts.shift(1).plot(ax=ax, color='k', marker='o')
pred_train.plot(ax=ax, marker='o', color=sns.xkcd_rgb["windows blue"]);
pred.plot(ax=ax, ls='--', marker='o', color=sns.xkcd_rgb["amber"])
plt.legend();
_, ax = plt.subplots(figsize=(12, 8), dpi=100)
pred_train.name = 'Predicted on train'
pred.name = 'Predicted out of train'
total_cases = ts.copy()
total_cases.columns = ['Total cases']
total_cases.shift(1).cumsum().plot(ax=ax, color='k', marker='o')
cum_test = pred.copy()
cum_test.loc[pd.Timestamp(tr_end)] = pred_train.cumsum().values[-1]
cum_test.sort_index(inplace=True)
ts.shift(1).cumsum().values[-1]
pred_train.cumsum().plot(ax=ax, marker='o', color=sns.xkcd_rgb["windows blue"]);
cum_test.cumsum().plot(ax=ax, ls='--', marker='o', color=sns.xkcd_rgb["amber"])
plt.legend();
pred
pred = arima.predict(te_start,te_end)
pred.plot( marker='o', color=sns.xkcd_rgb["amber"])
ts['New cases'].loc[pd.Timestamp(te_start): pd.Timestamp(te_end)].plot( marker='o', color=sns.xkcd_rgb["windows blue"]);
resid = (ts['New cases'].loc[pd.Timestamp(te_start): pd.Timestamp(te_end)] - pred)
resid.plot.hist(bins=4);
sm.qqplot(resid, line ='45')
```
## Obs.
Os residuos não estão com distribuição aproximando a normal, porem isso pode ser justificado pela baixo numero de amostras que temos no *test set*.
| github_jupyter |
# CMFGEN
Database from John Hillier’s CMFGEN, a radiative transfer code designed to solve the radiative transfer and statistical equilibrium equations in spherical geometry.
<div class="alert alert-info">
**Note:**
In this example, the data was downloaded from the [CMFGEN website](http://kookaburra.phyast.pitt.edu/hillier/web/CMFGEN.htm) and extracted to the `/tmp/atomic` folder.
</div>
## Parsers
The CMFGEN parsers read data from text files and retrieves the information as DataFrames. Currently `osc`, `col` and `pho` files are supported.
### Levels, Lines and Collisions
Energy levels and spectral lines are stored in the `osc` files, while collisions strengths are kept in the `col` files.
```
from carsus.io.cmfgen import CMFGENEnergyLevelsParser, CMFGENOscillatorStrengthsParser, CMFGENCollisionalStrengthsParser
si2_lvl = CMFGENEnergyLevelsParser('/tmp/atomic/SIL/II/16sep15/si2_osc_kurucz')
si2_osc = CMFGENOscillatorStrengthsParser('/tmp/atomic/SIL/II/16sep15/si2_osc_kurucz')
si2_col = CMFGENCollisionalStrengthsParser('/tmp/atomic/SIL/II/16sep15/si2_col')
```
The header information is stored in the `meta` attribute, for example:
```
si2_col.meta
```
And the table is stored in the `base` attribute:
```
si2_lvl.base
```
Finally, dump the data with the `to_hdf` method.
### Photoionization Cross-sections
Photoionization cross-sections are stored in the `pho` files.
```
from carsus.io.cmfgen import CMFGENPhotoionizationCrossSectionParser
si2_cross_sections = CMFGENPhotoionizationCrossSectionParser('/tmp/atomic/SIL/II/16sep15/phot_nahar_A')
si2_cross_sections.meta
```
In this case, the `base` attribute is a list containing many DataFrames.
```
type(si2_cross_sections.base)
len(si2_cross_sections.base)
```
There are three different types of photoionization cross-sections tables: points, fit coefficients and Verner analytic fits for the ground state shells (not shown in this example).
```
si2_cross_sections.base[0]
si2_cross_sections.base[92]
```
### Batch Convert Files to HDF5
To convert multiple CMFGEN files to the HDF5 format import the `hdf_dump` function.
```
from carsus.io.cmfgen import hdf_dump
hdf_dump('/tmp/atomic/', ['osc', 'OSC', 'Osc'], CMFGENEnergyLevelsParser(), chunk_size=10, ignore_patterns=['ERROR_CHK'])
```
Required parameters are `cmfgen_dir`, `patterns` and `parser`, while `chunk_size` and `ignore_patterns` are optional.
## CMFGENReader
The `CMFGENReader` mimics the structure of `GFALLReader` and provides `levels` and `lines` tables for the selected ions.
```
from carsus.io.cmfgen import CMFGENReader
```
<div class="alert alert-info">
**Note:**
Remember in Carsus `Si 0` is Si I, `Si 1` is Si II, etc.
See [Notation in Carsus](../development/notation.rst).
</div>
```
cmfgen_data = {'Si 1': {'levels': si2_lvl, 'lines': si2_osc}, }
cmfgen_reader = CMFGENReader(cmfgen_data, priority=20)
cmfgen_reader.levels
cmfgen_reader.lines
```
| github_jupyter |
# Exploring different Symbol options in Magics
This notebook will help you discover lots of posibilities for plotting symbols on your maps in Magics.
Symbol plotting in Magics is the plotting of different types of symbols at selected locations. A symbol in this context is a number (the value at the location), a text string (given by the user) or a Magics marker.
The Magics marker set contains classic markers such as dot, cross, square but also weather or clouds symbols.
List of all **msymbol** parameters you can find [in Magics documentation](https://confluence.ecmwf.int/display/MAGP/Symbol "Symbol parameters")
Magics has built in collection of meteorological symbols and all you have to do is give it WMO code. Full list of WMO codes and descriptions for present/past weather, sky cover, pressure tendency and clouds you can find [on Wikipedia](https://commons.wikimedia.org/wiki/Weather_map "Weather map symbols").
Here are Magics names for meteorological symbols:
|marker type | names |
|---------|--------|
| present weather | ww_00,.., ww_99 |
| past weather | W_0,.., W_9 |
| low clouds | CL_1,.., CL_9 |
| medium clouds | CM_1,.., CM_9 |
| high clouds | CH_1,.., CH_9 |
| type of cloud| C_0,.., C_9|
| total amount all clouds | N_0,.., N9 |
| atmospheric pressure tendency | a_0,.., a_9 |
Here is list of built in symbol indices:

### Install Magics
If you don't have Magics installed, run the next cell to install Magics using conda.
```
# Install Magics in the current Jupyter kernel
import sys
!conda install --yes --prefix {sys.prefix} Magics
```
### Import Magics and define non Symbol paramters
For start let's import Magics and define some **none symbol** parameters. We will try not to change these much in the rest of the notebook.
```
import Magics.macro as magics
import numpy as np
central_europe = magics.mmap(
subpage_map_library_area = "on",
subpage_map_area_name = "central_europe",
page_id_line = "off"
)
coast = magics.mcoast()
legend = magics.mlegend(legend_display_type = "continuous")
# Different meteorological parameters we will plot in this notebook
tcc = magics.mgeo(geo_input_file_name = "../../data/tcc.gpt")
```
### Simbols on geographical map
The simplest thing we can do is to plot same simbol for all points in our dataset.
```
symbol = magics.msymb(
legend = "off",
symbol_type = "marker",
symbol_colour = "evergreen",
symbol_height = 1.,
symbol_marker_index = 20)
magics.plot(central_europe, tcc, symbol, coast)
```
### Simbols on cartesian projection
```
#Setting the cartesian view
cartesian_projection = magics.mmap(
subpage_y_position = 2.,
subpage_map_projection = 'cartesian',
subpage_x_axis_type = 'regular',
subpage_y_axis_type = 'regular',
subpage_x_min = 10.,
subpage_x_max = 40.,
subpage_y_min = 20.,
subpage_y_max = 100.,
page_id_line = "off")
#Vertical axis
vertical = magics.maxis(
axis_orientation = "vertical",
axis_type = "regular",
axis_tick_label_height = 0.4,
axis_tick_label_colour = 'navy',
axis_grid = "on",
axis_grid_colour = "grey",
axis_grid_thickness = 1,
axis_grid_line_style = "dot")
#Horizontal axis
horizontal = magics.maxis(
axis_orientation = "horizontal",
axis_type = "regular",
axis_tick_label_height = 0.4,
axis_tick_label_colour = 'navy',
axis_grid = "on",
axis_grid_colour = "grey",
axis_grid_thickness = 1,
axis_grid_line_style = "dot")
#define the data
x = np.array([15.,25.,35.])
y = np.array([30.,60.,45.])
input = magics.minput(
input_x_values = x,
input_y_values = y)
#Define the graph
symbol = magics.msymb(
symbol_type = "marker",
symbol_colour = "burgundy",
symbol_height = 1.,
symbol_marker_index = 15
)
magics.plot(cartesian_projection, vertical, horizontal, input, symbol)
```
### Attach a text to a symbol
We can attach text to a symbol on the left or right side, at top or bottom of the symbol or over the symbol.
```
bottom = np.array([30.,30.,30.])
left = np.array([45.,45.,45.])
right = np.array([60.,60.,60.])
top = np.array([75.,75.,75.])
middle = np.array([90.,90.,90.])
topinput = magics.minput(
input_x_values = x,
input_y_values = top)
#Define the graph
toptext = magics.msymb(
symbol_type = "marker",
symbol_colour = "navy",
symbol_text_list = ["top", "top", "top"],
symbol_height = 1.,
symbol_text_font_size = 0.8,
symbol_text_font_colour = "black",
symbol_text_position = "top",
symbol_marker_index = 15
)
leftinput = magics.minput(
input_x_values = x,
input_y_values = left)
#Define the graph
lefttext = magics.msymb(
symbol_type = "marker",
symbol_colour = "ochre",
symbol_text_list = ["left", "left", "left"],
symbol_height = 1.,
symbol_text_font_size = 0.8,
symbol_text_font_colour = "black",
symbol_text_position = "left",
symbol_marker_index = 15
)
rightinput = magics.minput(
input_x_values = x,
input_y_values = right)
#Define the graph
righttext = magics.msymb(
symbol_type = "marker",
symbol_colour = "chestnut",
symbol_text_list = ["one","two","five"],
symbol_height = 1.,
symbol_text_font_size = 0.8,
symbol_text_font_colour = "black",
symbol_text_position = "right",
symbol_marker_index = 15
)
bottominput = magics.minput(
input_x_values = x,
input_y_values = bottom)
#Define the graph
bottomtext = magics.msymb(
symbol_type = "marker",
symbol_colour = "rose",
symbol_text_list = ["bottom"],
symbol_height = 1.,
symbol_text_font_size = 0.8,
symbol_text_font_colour = "black",
symbol_text_position = "bottom",
symbol_marker_index = 15
)
centreinput = magics.minput(
input_x_values = x,
input_y_values = middle)
#Define the graph
centretext = magics.msymb(
symbol_type = "marker",
symbol_colour = "sky",
symbol_text_list = ["a", "b", "centre"],
symbol_height = 1.2,
symbol_text_font_size = 0.8,
symbol_text_font_colour = "black",
symbol_text_position = "centre",
symbol_marker_index = 15
)
magics.plot(cartesian_projection, vertical, horizontal, topinput, toptext,
leftinput, lefttext, bottominput, bottomtext,
rightinput, righttext, centreinput, centretext)
```
### Using pictogram
We can also use external images. If we don't specify symbol_image_format, the file extension will be used to determine the file type. Here's a simple example.
```
#Setting the cartesian view
cartesian = magics.mmap(
subpage_y_position = 2.,
subpage_map_projection = 'cartesian',
subpage_x_axis_type = 'date',
subpage_y_axis_type = 'regular',
subpage_x_date_min = "2012-03-01 12:00:00",
subpage_x_date_max = "2012-03-07 12:00:00",
subpage_y_min = 25.,
subpage_y_max = 75.,
page_id_line = "off")
#Vertical axis
vertical = magics.maxis(
axis_orientation = "vertical",
axis_type = "regular",
axis_tick_label_height = 0.4,
axis_tick_label_colour = 'navy',
axis_grid = "on",
axis_grid_colour = "grey",
axis_grid_thickness = 1,
axis_grid_line_style = "dot")
#Horizontal axis
horizontal = magics.maxis(
axis_orientation = "horizontal",
axis_type = "date",
axis_days_label_height = 0.40,
axis_months_label_height = 0.40,
axis_years_label_height = 0.50,
axis_minor_tick = "on",
axis_grid = "on",
axis_grid_colour = "grey",
axis_grid_thickness = 1,
axis_grid_line_style = "dot")
#define the data
x = ["2012-03-02 00:00:00","2012-03-03 12:00:00","2012-03-05 00:00:00"]
y = np.array([35.,45.,55.])
input = magics.minput(
input_x_type = 'date',
input_date_x_values = x,
input_y_values = y)
#Define the graph
symbols = magics.msymb(
legend = 'on',
symbol_type = "marker",
symbol_marker_mode = "image",
symbol_image_path = 'D96.png',
symbol_image_format = 'png',
symbol_colour = "red"
)
legend = magics.mlegend(
legend_user_text = "<font colour='navy' size='0.7'> Pictograms </font>",
legend_box_mode = "positional",
legend_box_y_position = 16.5,
legend_box_x_position = 20.00,
legend_box_x_length = 5.00,
legend_box_y_length = 2.00)
#To the plot
magics.plot(cartesian, vertical, horizontal, input, symbols, legend)
```
| github_jupyter |
# Unity ML Agents
## Environment Basics
This notebook contains a walkthrough of the basic functions of the Python API for Unity ML Agents. For instructions on building a Unity environment, see [here](https://github.com/Unity-Technologies/ml-agents/wiki/Getting-Started-with-Balance-Ball).
### 1. Load dependencies
```
import matplotlib.pyplot as plt
import numpy as np
from unityagents import UnityEnvironment
%matplotlib inline
```
### 2. Set environment parameters
Be sure to set `env_name` to the name of the Unity environment file you want to launch.
```
env_name = "unity_environment_11_14" # Name of the Unity environment binary to launch
train_mode = True # Whether to run the environment in training or inference mode
```
### 3. Start the environment
`UnityEnvironment` launches and begins communication with the environment when instantiated.
Environments contain _brains_ which are responsible for deciding the actions of their associated _agents_. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
```
env = UnityEnvironment(file_name=env_name)
# Examine environment parameters
print(str(env))
# Set the default brain to work with
default_brain = env.brain_names[0]
brain = env.brains[default_brain]
```
### 4. Examine the observation and state spaces
We can reset the environment to be provided with an initial set of observations and states for all the agents within the environment. In ML-Agents, _states_ refer to a vector of variables corresponding to relevant aspects of the environment for an agent. Likewise, _observations_ refer to a set of relevant pixel-wise visuals for an agent.
```
# Reset the environment
env_info = env.reset(train_mode=train_mode)[default_brain]
# Examine the state space for the default brain
print("Agent state looks like: \n{}".format(env_info.states[0]))
# Examine the observation space for the default brain
for observation in env_info.observations:
print("Agent observations look like:")
if observation.shape[3] == 3:
plt.imshow(observation[0,:,:,:])
else:
plt.imshow(observation[0,:,:,0])
```
### 5. Take random actions in the environment
Once we restart an environment, we can step the environment forward and provide actions to all of the agents within the environment. Here we simply choose random actions based on the `action_space_type` of the default brain.
```
for episode in range(10):
env_info = env.reset(train_mode=train_mode)[default_brain]
done = False
episode_rewards = 0
while not done:
if brain.action_space_type == 'continuous':
env_info = env.step(np.random.randn(len(env_info.agents),
brain.action_space_size))[default_brain]
else:
env_info = env.step(np.random.randint(0, brain.action_space_size,
size=(len(env_info.agents))))[default_brain]
episode_rewards += env_info.rewards[0]
done = env_info.local_done[0]
print("Total reward this episode: {}".format(episode_rewards))
```
### 6. Close the environment when finished
When we are finished using an environment, we can close it with the function below.
```
env.close()
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.