text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# import mpld3
# mpld3.enable_notebook()
import warnings
warnings.filterwarnings('ignore')
def aggregate_precision(data, window=100):
count_agg = 1
summ = 0.
aggregates = []
last_row = [-1, -1]
for row in data:
last_row = row
summ += row[1]
if row[0] + 1 == count_agg * window:
aggregates.append([count_agg * window, summ / window])
summ = 0.
count_agg += 1
if summ > 0.:
aggregates.append([count_agg * window, summ / ((last_row[0] + 1) - ((count_agg - 1) * window))])
return aggregates
```
# TRAIN
### Random-v2
```
X_random_tr = []
y_random_tr = []
with open('../data/rs/random-v2/eval_train_return.csv', mode='r') as f:
for row in f:
line = row.split(" ")
y_random_tr.append(float(line[0]))
X_random_tr.append(float(line[1]))
X_random_tr = np.array(X_random_tr)
y_random_tr = np.array(y_random_tr)
print X_random_tr.shape
print y_random_tr.shape
print "Mean average return: {}".format(np.mean(y_random_tr))
for i in range(0, y_random_tr.size, 25):
print "{}: {}".format(i * 100, y_random_tr[i])
precision_random_tr = []
session_length_random_tr = []
with open('../data/rs/random-v2/precision_train.csv', mode='r') as f:
for row in f:
line = row.split(" ")
row = [int(line[0]), float(line[2])]
row_ = [int(line[0]), int(line[1])]
precision_random_tr.append(row)
session_length_random_tr.append(row_)
precision_random_tr = np.array(aggregate_precision(precision_random_tr))
session_length_random_tr = np.array(aggregate_precision(session_length_random_tr))
```
### ExpCB
```
X_cb_tr = []
y_cb_tr = []
with open('../data/rs/expCB/eval_train_return.csv', mode='r') as f:
for row in f:
line = row.split(" ")
y_cb_tr.append(float(line[0]))
X_cb_tr.append(float(line[1]))
X_cb_tr = np.array(X_cb_tr)
y_cb_tr = np.array(y_cb_tr)
print X_cb_tr.shape
print y_cb_tr.shape
print "Mean average return: {}".format(np.mean(y_cb_tr))
for i in range(0, y_cb_tr.size, 25):
print "{}: {}".format(i * 100, y_cb_tr[i])
precision_cb_tr = []
session_length_cb_tr = []
with open('../data/rs/expCB/precision_train.csv', mode='r') as f:
for row in f:
line = row.split(" ")
row = [int(line[0]), float(line[2])]
row_= [int(line[0]), int(line[1])]
precision_cb_tr.append(row)
session_length_cb_tr.append(row_)
precision_cb_tr = np.array(aggregate_precision(precision_cb_tr))
session_length_cb_tr = np.array(aggregate_precision(session_length_cb_tr))
```
### ExpFM
```
X_fm_tr = []
y_fm_tr = []
with open('../data/rs/expFM/eval_train_return.csv', mode='r') as f:
for row in f:
line = row.split(" ")
y_fm_tr.append(float(line[0]))
X_fm_tr.append(float(line[1]))
X_fm_tr = np.array(X_fm_tr)
y_fm_tr = np.array(y_fm_tr)
print X_fm_tr.shape
print y_fm_tr.shape
print "Mean average return: {}".format(np.mean(y_fm_tr))
for i in range(0, y_fm_tr.size, 25):
print "{}: {}".format(i * 100, y_fm_tr[i])
precision_fm_tr = []
session_length_fm_tr = []
with open('../data/rs/expFM/precision_train.csv', mode='r') as f:
for row in f:
line = row.split(" ")
row = [int(line[0]), float(line[2])]
row_= [int(line[0]), int(line[1])]
precision_fm_tr.append(row)
session_length_fm_tr.append(row_)
precision_fm_tr = np.array(aggregate_precision(precision_fm_tr))
session_length_fm_tr = np.array(aggregate_precision(session_length_fm_tr))
```
### ExpCF
```
X_cf_tr = []
y_cf_tr = []
with open('../data/rs/expCF/eval_train_return.csv', mode='r') as f:
for row in f:
line = row.split(" ")
y_cf_tr.append(float(line[0]))
X_cf_tr.append(float(line[1]))
X_cf_tr = np.array(X_cf_tr)
y_cf_tr = np.array(y_cf_tr)
print X_cf_tr.shape
print y_cf_tr.shape
print "Mean average return: {}".format(np.mean(y_cf_tr))
for i in range(0, y_cf_tr.size, 25):
print "{}: {}".format(i * 100, y_cf_tr[i])
precision_cf_tr = []
with open('../data/rs/expCF/precision_train.csv', mode='r') as f:
for row in f:
line = row.split(" ")
row = [int(line[0]), float(line[2]), int(line[1])]
precision_cf_tr.append(row)
precision_cf_tr = np.array(aggregate_precision(precision_cf_tr))
```
# Graph
```
# cb_color="#C68C00"
cb_color="#3E2900"
cb_random_color="red"
fm_color="#117E4F"
cf_color="#596FC8"
import matplotlib.patheffects as path_effects
fig = plt.figure(figsize=(24,6))
axes = fig.add_subplot(1, 2, 1)
plt.subplot(1,2,1)
axes.plot(X_random_tr, y_random_tr, label='Random agent', color=cb_random_color, linestyle='solid', linewidth=1.5)
axes.plot(X_cb_tr, y_cb_tr, label='DRL-KNN-CB', color=cb_color, linestyle='solid', linewidth=1.5)
axes.plot(X_cf_tr, y_cf_tr, label='DRL-KNN-CF', color=cf_color, linestyle='solid', linewidth=1.5)
axes.plot(X_fm_tr, y_fm_tr, label='DRL-FM', color=fm_color, linestyle='solid', linewidth=1.5)
# axes.plot(X_t, y_t, label='test CB', color="black", linestyle='--', linewidth=3.0)
# axes.plot(X_m, y_m, label='CF-knn 5%', color="red", linestyle='solid', linewidth=1.0)
axes.plot([a for a in np.arange(8000)], [70 for a in np.arange(8000)], color="black", linestyle='--', linewidth=0.5)
plt.xlabel('Steps')
plt.ylabel('Return')
plt.xlim([0, 20000])
plt.legend(loc=1)
fig_p = plt.figure(figsize=(24,6))
axes_p = fig_p.add_subplot(1, 2, 1)
plt.subplot(1,2,1)
axes_p.plot(precision_random_tr[:, 0], precision_random_tr[:, 1], label='Random agent', color=cb_random_color, linestyle='solid', linewidth=1.5)
axes_p.plot(precision_cb_tr[:, 0], precision_cb_tr[:, 1], label='DRL-KNN-CB', color=cb_color, linestyle='solid', linewidth=1.5)
axes_p.plot(precision_cf_tr[:, 0], precision_cf_tr[:, 1], label='DRL-KNN-CF', color=cf_color, linestyle='solid', linewidth=1.5)
axes_p.plot(precision_fm_tr[:, 0], precision_fm_tr[:, 1], label='DRL-FM', color=fm_color, linestyle='solid', linewidth=1.5)
plt.xlabel('Steps')
plt.ylabel('Average Precision')
plt.xlim([0, 20000])
plt.legend(loc=4)
```
### Precision@10
```
train = "<<==TRAIN"
test = "<<==TEST"
precision_at_tr = []
precision_at_t = []
session = []
is_train = False
is_test = False
with open('../data/rs/expFM/precision_logs.log', mode='r') as f:
for row in f:
is_train = row.startswith(train)
is_test = row.startswith(test)
if is_train or is_test:
if is_train and len(session) > 0:
precision_at_tr.append(np.sum(session) / 10.)
if is_test and len(session) > 0:
precision_at_t.append(np.sum(session) / 10.)
session = []
continue
else:
line = row.split(" ")
# print line[1]
session.append(int(line[1]))
# np.savetxt("vector", precision_at_tr)
np.max(precision_at_tr)
precision_at_tr = np.array(precision_at_tr)
print precision_at_tr.shape
steps = np.arange(1, len(precision_at_tr) + 1)
print steps.shape
data_p_at_tr = np.stack((steps, precision_at_tr), axis=1)
aggregate_p_at_tr = aggregate_precision(data_p_at_tr, window=100)
aggregate_p_at_tr = np.array(aggregate_p_at_tr)
aggregate_p_at_tr.shape
cprecision_at_tr = []
cprecision_at_t = []
session = []
is_train = False
is_test = False
with open('../data/rs/expCB/precision_logs.log', mode='r') as f:
for row in f:
is_train = row.startswith(train)
is_test = row.startswith(test)
if is_train or is_test:
if is_train and len(session) > 0:
cprecision_at_tr.append(np.sum(session) / 10.)
if is_test and len(session) > 0:
cprecision_at_t.append(np.sum(session) / 10.)
session = []
continue
else:
line = row.split(" ")
# print line[1]
session.append(int(line[1]))
# np.savetxt("vector", precision_at_tr)
np.max(cprecision_at_tr)
cprecision_at_tr = np.array(cprecision_at_tr)
print cprecision_at_tr.shape
steps = np.arange(1, len(cprecision_at_tr) + 1)
print steps.shape
data_p_at_tr = np.stack((steps, cprecision_at_tr), axis=1)
caggregate_p_at_tr = aggregate_precision(data_p_at_tr, window=100)
caggregate_p_at_tr = np.array(caggregate_p_at_tr)
caggregate_p_at_tr.shape
fig_p_at = plt.figure(figsize=(24,6))
axes_p_at = fig_p_at.add_subplot(1, 2, 1)
plt.subplot(1,2,1)
axes_p_at.plot(caggregate_p_at_tr[:, 0], caggregate_p_at_tr[:, 1], label='DRL-KNN-CB', color=cb_color, linestyle='solid', linewidth=1.5)
axes_p_at.plot(aggregate_p_at_tr[:, 0], aggregate_p_at_tr[:, 1], label='DRL-FM', color=fm_color, linestyle='solid', linewidth=1.5)
plt.xlabel('Steps')
plt.ylabel('Precision@10')
plt.xlim([0, 20000])
plt.legend(loc=4)
```
### Session Length
```
fig_s = plt.figure(figsize=(24,6))
axes_s = fig_s.add_subplot(1, 2, 1)
plt.subplot(1,2,1)
axes_s.fill(session_length_random_tr[:, 0], session_length_random_tr[:, 1], label='Random agent', color='black', linestyle='solid', linewidth=1.5, alpha=0.1)
axes_s.fill(session_length_cb_tr[:, 0], session_length_cb_tr[:, 1], label='DRL-KNN-CB', color=cb_color, linestyle='solid', linewidth=1.5, alpha=0.4)
axes_s.fill(session_length_fm_tr[:, 0], session_length_fm_tr[:, 1], label='DRL-FM', color=fm_color, linestyle='solid', linewidth=1.5, alpha=0.6)
plt.xlabel('Steps')
plt.ylabel('Average number of items per user session')
plt.xlim([0, 20000])
plt.legend(loc=3)
```
# TEST
### Random-v2
```
Xt_random_tr = []
yt_random_tr = []
with open('../data/rs/random-v2/eval_test_return.csv', mode='r') as f:
for row in f:
line = row.split(" ")
yt_random_tr.append(line[0])
Xt_random_tr.append(line[1])
Xt_random_tr = np.array(Xt_random_tr)
yt_random_tr = np.array(yt_random_tr)
print Xt_random_tr.shape
print yt_random_tr.shape
for i in range(0, yt_random_tr.size, 25):
print "{}: {}".format(i * 100, yt_random_tr[i])
precision_random_t = []
with open('../data/rs/random-v2/precision_test.csv', mode='r') as f:
for row in f:
line = row.split(" ")
row = [int(line[0]), float(line[2]), int(line[1])]
precision_random_t.append(row)
precision_random_t = np.array(aggregate_precision(precision_random_t))
```
### ExpCB
```
X_cb_t = []
y_cb_t = []
with open('../data/rs/expCB/eval_test_return.csv', mode='r') as f:
for row in f:
line = row.split(" ")
y_cb_t.append(float(line[0]))
X_cb_t.append(float(line[1]))
X_cb_t = np.array(X_cb_t)
y_cb_t = np.array(y_cb_t)
print X_cb_t.shape
print y_cb_t.shape
print "Mean average return: {}".format(np.mean(y_cb_t))
for i in range(0, y_cb_t.size, 25):
print "{}: {}".format(i * 100, y_cb_t[i])
precision_cb_t = []
with open('../data/rs/expCB/precision_test.csv', mode='r') as f:
for row in f:
line = row.split(" ")
row = [int(line[0]), float(line[2]), int(line[1])]
precision_cb_t.append(row)
precision_cb_t = np.array(aggregate_precision(precision_cb_t))
precision_cb_t.shape
precision_cb_t[223]
```
### ExpFM
```
X_fm_t = []
y_fm_t = []
with open('../data/rs/expFM/eval_test_return.csv', mode='r') as f:
for row in f:
line = row.split(" ")
y_fm_t.append(float(line[0]))
X_fm_t.append(float(line[1]))
X_fm_t = np.array(X_fm_t)
y_fm_t = np.array(y_fm_t)
print X_fm_t.shape
print y_fm_t.shape
print "Mean average return: {}".format(np.mean(y_fm_t))
for i in range(0, y_fm_t.size, 25):
print "{}: {}".format(i * 100, y_fm_t[i])
precision_fm_t = []
with open('../data/rs/expFM/precision_test.csv', mode='r') as f:
for row in f:
line = row.split(" ")
row = [int(line[0]), float(line[2]), int(line[1])]
precision_fm_t.append(row)
precision_fm_t = np.array(aggregate_precision(precision_fm_t))
```
### ExpCF
```
X_cf_t = []
y_cf_t = []
with open('../data/rs/expCF/eval_test_return.csv', mode='r') as f:
for row in f:
line = row.split(" ")
y_cf_t.append(float(line[0]))
X_cf_t.append(float(line[1]))
X_cf_t = np.array(X_cf_t)
y_cf_t = np.array(y_cf_t)
print X_cf_t.shape
print y_cf_t.shape
print "Mean average return: {}".format(np.mean(y_cf_t))
for i in range(0, y_cf_t.size, 25):
print "{}: {}".format(i * 100, y_cf_t[i])
precision_cf_t = []
with open('../data/rs/expCF/precision_test.csv', mode='r') as f:
for row in f:
line = row.split(" ")
row = [int(line[0]), float(line[2]), int(line[1])]
precision_cf_t.append(row)
precision_cf_t = np.array(aggregate_precision(precision_cf_t))
precision_cf_t.shape
```
# Graph
```
figt = plt.figure(figsize=(24,6))
axest = figt.add_subplot(1, 2, 1)
plt.subplot(1,2,1)
axest.plot(Xt_random_tr, yt_random_tr, label='CB-random agent', color=cb_random_color, linestyle='solid', linewidth=1.5)
axest.plot(X_cb_t, y_cb_t, label='DRL-KNN-CB', color=cb_color, linestyle='solid', linewidth=1.5)
axest.plot(X_cf_t, y_cf_t, label='DRL-KNN-CF', color=cf_color, linestyle='solid', linewidth=1.5)
axest.plot(X_fm_t, y_fm_t, label='DRL-FM', color=fm_color, linestyle='solid', linewidth=1.5)
# axes.plot(X_t, y_t, label='test CB', color="black", linestyle='--', linewidth=3.0)
# axes.plot(X_m, y_m, label='CF-knn 5%', color="red", linestyle='solid', linewidth=1.0)
axest.plot([a for a in np.arange(8000)], [70 for a in np.arange(8000)], color="black", linestyle='--', linewidth=0.5)
plt.xlabel('Steps')
plt.ylabel('Return')
plt.xlim([0, 20000])
plt.legend(loc=1)
fig_pt = plt.figure(figsize=(24,6))
axes_pt = fig_pt.add_subplot(1, 2, 1)
plt.subplot(1,2,1)
axes_pt.plot(precision_random_t[:, 0], precision_random_t[:, 1], label='Random agent', color=cb_random_color, linestyle='solid', linewidth=1.5)
axes_pt.plot(precision_cb_t[:, 0], precision_cb_t[:, 1], label='DRL-KNN-CB', color=cb_color, linestyle='solid', linewidth=1.5)
axes_pt.plot(precision_cf_t[:, 0], precision_cf_t[:, 1], label='DRL-KNN-CF', color=cf_color, linestyle='solid', linewidth=1.)
axes_pt.plot(precision_fm_t[:, 0], precision_fm_t[:, 1], label='DRL-FM', color=fm_color, linestyle='solid', linewidth=1.5)
plt.xlabel('Steps')
plt.ylabel('Average Precision')
plt.xlim([0, 20000])
plt.legend(loc=4)
```
| github_jupyter |
```
import torch
import numpy as np
%matplotlib inline
from matplotlib import pyplot as plt
from matplotlib import animation
from IPython.display import HTML
import math
from scipy import ndimage
from model_lcrn import LRCN
from model_seq_cnn import SequentialCNN
from model_trainer import train_model
from data_loader import VideoDataset
import data_utils
from pprint import pprint
import data_def
import metrics
# np array with shape (frames, height, width, channels)
def show_video(video, start=0, end=25):
span = math.ceil(math.sqrt(end - start))
plt.figure(figsize=(20, 10))
_, vH, vW = video[0].shape
H, W = vH * span, vW * span
grid = np.zeros((H, W, 3))
for idx in range(start, end):
r, c = (idx - start) // span, (idx - start) % span
frame = video[idx]
frame = np.moveaxis(frame, 0, -1).copy()
if frame.shape[2] == 3: # if not grayscale, switch R and B
ch2 = frame[:, :, 2].copy()
frame[:, :, 2] = frame[:, :, 0]
frame[:, :, 0] = ch2
grid[r * vH : (r + 1) * vH, c * vW : (c + 1) * vW] = frame
plt.imshow(grid.astype('uint8'))
```
### Preparation
We build a custom dataloader. It will return a new sample clip drawn uniformly from all possible clips of L frames from all the videos combined.
```
video_dataset = VideoDataset(
'../data/video_small',
'../data/label_aligned',
clip_length=30,
verbose_init=True,
label_conversion_func=data_def.sparse_to_expanded_array,
video_filenames=['kevin_random_moves_quick.mp4']
)
```
Now let's display the frames:
```
show_video(video_dataset[0][0])
torch.cuda.is_available()
```
## Baseline Model
Now it's time to build our model based on the paper "A Fast Statistical Approach for Human Activity Recognition". We take the delta frames and calculate the scalar features. Then we'll pass these features to a simple RNN. We will include the preprocess step by extending the dataloader, and we will use a LSTM with FC layer for the model.
```
def preprocess(video):
#video = dataset[0]
#print(video[0].shape)
diff_video = np.abs(np.diff(video, axis=0))
F, C, H, W = diff_video.shape
diff_video_t = diff_video.transpose((0, 1, 3, 2))
X_center_of_mass = np.sum(diff_video * np.arange(W), axis=(2, 3)) \
/ np.sum(diff_video, axis=(2, 3))
Y_center_of_mass = np.sum(diff_video_t * np.arange(H), axis=(2, 3)) \
/ np.sum(diff_video_t, axis=(2, 3))
X_mean_abs_dev = np.sum( (diff_video - X_center_of_mass.reshape(F, C, 1, 1)) * np.arange(W), axis=(2, 3)) \
/ np.sum(diff_video, axis=(2, 3))
Y_mean_abs_dev = np.sum( (diff_video_t - Y_center_of_mass.reshape(F, C, 1, 1)) * np.arange(H), axis=(2, 3)) \
/ np.sum(diff_video_t, axis=(2, 3))
intensity = np.mean(diff_video, axis=(2, 3))
return np.stack((X_center_of_mass,
Y_center_of_mass,
X_mean_abs_dev,
Y_mean_abs_dev,
intensity)
,axis=-1)
#print([ ndimage.measurements.center_of_mass(diff_video[i][0]) for i in range(20)])
#diff_video = diff_video.reshape(F, 1, H, W)
#show_video(diff_video)
#print("total number of samples", dataset.len)
class BaselineVideoDataset(VideoDataset):
def __getitem__(self, idx):
x, y, l = super().__getitem__(idx)
# TODO: maybe trim y[0] so that x and y dimension aligns ?
return preprocess(x), y[1:], l - 1
B, L, C = 5, 30 + 1, data_utils.NUM_CHANNEL
train_files = [
'kevin_random_moves_quick.mp4',
'kevin_random_moves_quick_2.mp4',
'kevin_random_moves_quick_3.mp4',
'kevin_random_moves_quick_4.mp4',
'kevin_random_moves_slow.mp4',
'kevin_rotate_1.mp4',
'kevin_simple_shuffle_1.mp4',
'kevin_single_moves_2.mp4',
'kevin_single_solve_1.mp4',
'kevin_solve_play_1.mp4',
'kevin_solve_play_10.mp4',
'kevin_solve_play_11.mp4',
'kevin_solve_play_12.mp4',
'kevin_solve_play_13.mp4',
'kevin_solve_play_2.mp4',
'kevin_solve_play_3.mp4',
'kevin_solve_play_6.mp4',
'kevin_solve_play_7.mp4',
'kevin_solve_play_8.mp4',
'kevin_solve_play_9.mp4',
'zhouheng_cfop_solve.mp4',
'zhouheng_oll_algorithm.mp4',
'zhouheng_pll_algorithm_fast.mp4',
'zhouheng_rotation.mp4',
'zhouheng_scramble_01.mp4',
'zhouheng_scramble_03.mp4',
'zhouheng_weird_turns.mp4',
]
dev_files = [
'kevin_single_moves_1.mp4',
'kevin_solve_play_5.mp4',
'zhouheng_scramble_02.mp4',
]
train_dataset = BaselineVideoDataset(
'../data/video_small',
'../data/label_aligned',
clip_length=L,
verbose_init=True,
label_conversion_func=data_def.sparse_to_expanded_array,
video_filenames=train_files
)
dev_dataset = BaselineVideoDataset(
'../data/video_small',
'../data/label_aligned',
clip_length=L,
verbose_init=True,
label_conversion_func=data_def.sparse_to_expanded_array,
video_filenames=dev_files
)
H, W = train_dataset.frame_size()
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=B, num_workers=1,
)
dev_loader = torch.utils.data.DataLoader(
dev_dataset, batch_size=B, num_workers=1,
)
```
Again, let's display some frames of the diff data:
```
class BaselinePredictor(torch.nn.Module):
def __init__(self, lstm_hidden_size, lstm_num_layers, num_classes):
super(BaselinePredictor, self).__init__()
self.input_dim = input_dim = 5 * 3
self.lstm_hidden_size = lstm_hidden_size
self.num_classes = num_classes
self.lstm = torch.nn.LSTM(
input_size=input_dim,
hidden_size=lstm_hidden_size,
num_layers=lstm_num_layers,
bias=True,
batch_first=True, # so that input is (B, L, input_dim)
dropout=0, # dont use dropout between lstm layers,
bidirectional=False
)
self.fc = torch.nn.Linear(
in_features=lstm_hidden_size,
out_features=num_classes,
bias=True
)
self.prev_lstm_state = None
def forward(self, x, use_prev_lstm_state=False):
lstm_state = self.prev_lstm_state if use_prev_lstm_state else None
x = x.reshape(B, L - 1, self.input_dim)
x, self.prev_lstm_state = self.lstm(x, lstm_state)
x = x.reshape(B * (L - 1), self.lstm_hidden_size)
# x=(B * L, lstm_hidden_size)
x = self.fc(x)
x = x.reshape(B, (L - 1), self.num_classes)
return x
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
print(train_dataset[0][0].shape)
pprint(train_dataset.get_label_counts())
model = BaselinePredictor(
lstm_hidden_size=20,
lstm_num_layers=2,
num_classes=train_dataset.num_class(),
)
print(model)
optimizer = torch.optim.Adam(model.parameters())
def on_batch(i, x, y, scores):
if i % 10 == 0:
# print(y.shape)
# print(scores.shape)
yhat = torch.argmax(scores[0], dim=-1).cpu().numpy()
# yhat_collapsed = data_def.to_collapsed_string_list(yhat)
# y_collapsed = data_def.to_collapsed_string_list(y[0].cpu().numpy())
# print(yhat_collapsed)
# print(y_collapsed)
print()
# data_utils.array_to_video_view(
# x[0].cpu().numpy(), y[0].cpu().numpy())
print(yhat)
print(y[0].cpu().numpy())
print('mean edit dist')
print(metrics.sum_edit_distance(scores, y) / len(y))
results = train_model(
model,
train_dataloader=train_loader,
optimizer=optimizer,
criterion_name='cross_entropy',
#dev_dataloader=dev_loader,
# test_dataloader=dev_loader,
num_epoch=1,
on_batch=on_batch,
additional_metrics={
'edit_distance': metrics.sum_edit_distance
}
)
print(results)
train_lcrn()
```
| github_jupyter |
```
from utils import *
from preprocessing import *
from cnn_model import *
SIZE = 256
'''
# loading data and pre-process
'''
# original image, binary mask, contour(self-defined class)
img_train, mask_train, contour_train = prepareDataset(TRAIN_CONTOUR_PATH, TRAIN_IMG_PATH)
img_test, mask_test, contour_test = prepareDataset(TEST_CONTOUR_PATH, TEST_IMG_PATH)
img_val, mask_val, contour_val = prepareDataset(VAL_CONTOUR_PATH, VAL_IMG_PATH)
#Region of interest, aka bounding box computed from the contour
ROI_train = get_ROI(contour_train)
ROI_test = get_ROI(contour_test)
ROI_val = get_ROI(contour_val)
#Re-organize the data for CNN
img_tv = np.concatenate((img_train, img_val), axis = 0)
roi_tv = np.concatenate((ROI_train, ROI_val), axis = 0)
print(img_tv.shape)
print(roi_tv.shape)
X, Y = reformDataXY(img_tv, roi_tv)
X_test, Y_test = reformDataXY(img_test, ROI_test)
#Get the result from the first step (CNN):
cnn_data = {'X_train': X, 'X_test': X_test, 'Y_train': Y, 'Y_test': Y_test}
y_pred = run_cnn(cnn_data)
cropped = get_cropped(img_test, y_pred, win_size = 100)
print(y_pred.shape)
#Get result from Stacked AE
def get_mask_pred(img,y_pred):
n = img.shape[0]
mask_contour = np.zeros((n, 80, 80, 1))
for i in range(y_pred.shape[0]):
pred = y_pred[i, 0, :,:]
[x_min, x_max, y_min, y_max] = get_bbox_single(pred)
mask_contour[i] = img[i][x_min:x_max, y_min:y_max]
return mask_contour
mask_contour =get_mask_pred(mask_test,y_pred)
print(mask_contour.shape)
def open_data_AE(y_pred):
"""
Open dataset from the output of the CNN and
unroll it as 64*64 = vector of 4096 elements
:param y_pred: CNN output
:return: input AE, output
"""
input_AE = []
contour_experts = []
for j in range(y_pred.shape[0]):
in_AE = cv2.resize(cropped[j],(64 , 64))
contour = cv2.resize(mask_contour[j], (64,64), interpolation = cv2.INTERSECT_NONE)
input_AE.append(in_AE)
contour_experts.append(contour)
return np.array(input_AE).reshape((-1, 64*64)), np.array(contour_experts).reshape((-1, 64*64))
X_train, Y_train = open_data_AE(y_pred)
X_train.shape, Y_train.shape
from StackedAeModel import SAE
#apply stacked auto encoder to preprocessed data
h, model = SAE(X_train,Y_train)
def prediction_plot(X, model, idx):
"""
Compute the Inferred shape binary mask using the trained stacked AE model
:param X: dataset to predict
:param model: trained AE model
:param idx: index of the particular picture to return
:return: inferred shape binary mask, infered shape on the MR image
"""
contours = model.predict(X)
contour = contours[idx].reshape((64,64))
# thresholding
binary = cv2.threshold(contour, 0, 1, cv2.INTERSECT_NONE)
return binary[1], binary[1]*X[idx].reshape(64,64)
#n is number of test images
n = img_test.shape[0]
pred_mask,pred_img=[],[]
for i in range(n):
bin_mask,img = prediction_plot(X_train,model,i)
pred_mask.append(bin_mask)
pred_img.append(img)
pred_mask = np.array(pred_mask)
pred_img = np.array(pred_img)
print (pred_mask.shape)
idx = 66
X_input = img_test[idx].reshape((256,256))
X_roi = X_train[idx].reshape((64,64))
bin_pred = pred_mask[idx]
from skimage import measure
contours_pred = measure.find_contours(bin_pred, 0.5)
contour_pred = contours_pred[np.argmax([k.shape[0] for k in contours_pred])]
f, ax = plt.subplots(ncols=4, figsize=(20,4))
from skimage.segmentation import active_contour
img = X_train[idx].reshape((64,64))
from skimage.filters import gaussian
ac_contour = active_contour(gaussian(img,3), contour_pred, alpha=0.01, beta=0.1, w_edge=0,gamma=0.01,bc='periodic',convergence=0.1)
ax[0].imshow(X_input, cmap='gray')
ax[1].imshow(X_roi, cmap='gray')
ax[2].imshow(img, cmap='gray')
ax[2].plot(contour_pred[:, 1], contour_pred[:, 0], linewidth=2, color='red',label='Prediction')
ax[3].imshow(img, cmap='gray')
ax[3].plot(ac_contour[:, 1], ac_contour[:, 0], linewidth=2, color='orange',label='Prediction')
for i in range(4):
ax[i].axis('off')
```
| github_jupyter |
## Airbnb Data
```
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from urllib3.exceptions import ProtocolError
import pprint
from pandas.io.json import json_normalize
import pandas as pd
from urllib3.exceptions import ProtocolError
api_base = 'https://www.airbnb.com/api/v2/explore_tabs?version=1.3.8&_format=for_explore_search_web&experiences_per_grid=20&items_per_grid=18&guidebooks_per_grid=20&auto_ib=true&fetch_filters=true&has_zero_guest_treatment=false&is_guided_search=true&is_new_cards_experiment=true&luxury_pre_launch=false&query_understanding_enabled=true&show_groupings=true&supports_for_you_v3=true&timezone_offset=-360&client_session_id=82674acc-b274-41dc-8b00-499d5c2fea44&metadata_only=false&is_standard_search=true&refinement_paths%5B%5D=%2Fhomes&selected_tab_id=home_tab&place_id=ChIJ5S-raZElv0cR8HcqSvxgJwQ&allow_override%5B%5D=&s_tag=kke9hwU1&screen_size=medium&query=Cologne%2C+Germany&_intents=p1'
api_key = '&key=d306zoyjsyarp7ifhu67rjxn52tv0t20¤cy=USD&locale=en'
api_section_offset = '§ion_offset=4'
import requests
import json
retry_lvl = [100, 0.1]
```
## Key
```
from bs4 import BeautifulSoup
airbnb_home = requests.get("http://airbnb.com")
soup = BeautifulSoup(airbnb_home.content, "html.parser")
metatags = soup.find_all('meta', id="_bootstrap-layout-init")
metacontents = [metatag["content"] for metatag in metatags]
metajson = json.loads(metacontents[0])
metajson
api_key = metajson['api_config']['key']
api_key = '&key='+api_key
print(api_key)
```
## Explore API
```
rsp = requests.get(api_base+api_key)
rsp_json = rsp.json()
len(rsp_json)
rsp_json.keys()
"pagination_metadata": {
"has_next_page": true,
"items_offset": 18,
"section_offset": 4,
"search_session_id": "eee6b55b-100d-4314-800a-393bec147f25"
}
rsp_json['explore_tabs'][0]['pagination_metadata']['has_next_page']
rsp_json['explore_tabs'][0]['home_tab_metadata'].keys()
len(rsp_json['explore_tabs'][0]['sections'])
rsp_json['explore_tabs'][0]['sections'][3].keys()
len(rsp_json['explore_tabs'][0]['sections'][3]['listings'])
rsp_json['explore_tabs'][0]['sections'][3]['listings'][0];
flatten_json(rsp_json['explore_tabs'][0]['sections'][3]['listings'][0]);
```
## Functions
```
# fields
listing_fields = [
'bathrooms',
'bedrooms',
'beds',
'person_capacity',
# 'host_languages',
'id',
'is_business_travel_ready',
'is_fully_refundable',
'is_host_highly_rated',
'is_rebookable',
'is_superhost',
'lat',
'lng',
'picture_count',
'preview_amenities',
'reviews_count',
'star_rating',
'tier_id'
]
pricing_quote_rate_with_service_fee_fields = [
'amount',
'currency'
]
fields_default = ['listing_'+ field for field in listing_fields] + ['pricing_quote_rate_with_service_fee_'+field for field in pricing_quote_rate_with_service_fee_fields] + ['pricing_quote_rate_type']
fields_default
def listing_parser(listing, fields = None):
if fields == None:
fields = ['listing_bathrooms', 'listing_bedrooms', 'listing_beds', 'listing_person_capacity', 'listing_host_languages', 'listing_id', 'listing_is_business_travel_ready', 'listing_is_fully_refundable', 'listing_is_host_highly_rated', 'listing_is_rebookable', 'listing_is_superhost', 'listing_lat', 'listing_lng', 'listing_picture_count', 'listing_preview_amenities', 'listing_reviews_count', 'listing_star_rating', 'listing_tier_id', 'pricing_quote_rate_with_service_fee_amount', 'pricing_quote_rate_with_service_fee_currency', 'pricing_quote_rate_type']
return pd.DataFrame(listing)[fields_default]
def next_indicator(response_json):
return response_json['explore_tabs'][0]['pagination_metadata']['has_next_page']
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
```
## Demo Scraping
```
next_sec = True
sec_off = 0
rsp = requests.get(api_base+api_key+'§ion_offset=' + str(sec_off))
rsp_json = rsp.json()
listing_array = [flatten_json(item) for item in rsp_json['explore_tabs'][0]['sections'][3]['listings'] ]
df_total = listing_parser(listing_array)
df_total = pd.concat([df, df_total], join='inner')
next_sec = next_indicator(rsp_json)
sec_off = sec_off + 1
while next_sec == True:
try_flag = 0
while True and (try_flag < retry_lvl[0]):
try:
with requests.Session() as s:
rsp = s.get(api_base+api_key+'§ion_offset='+str(sec_off))
except requests.exceptions.RequestException as err:
print('request error', err)
try_flag = try_flag + 1
sleep(try_flag * retry_lvl[1])
pass
else:
break
rsp_json = rsp.json()
listing_array = [flatten_json(item) for item in rsp_json['explore_tabs'][0]['sections'][0]['listings'] ]
df = listing_parser(listing_array)
df_total = pd.concat([df, df_total], join='inner')
sec_off = sec_off + 1
next_sec = next_indicator(rsp_json)
df_total
df_total.to_csv('data/cologne.csv')
```
| github_jupyter |
# Connecting to UniBit Api
Importing Libraries
```
import requests
import numpy as np
import pandas as pd
import json
from matplotlib import pyplot as plt
import statistics
API_KEY = ## get your free API key
```
Functions definition
```
def getIntraDayByTicker(Ticker):
""" This function takes as an input ticker symbole
and returns intraday stock price as an object"""
import requests
import json
response = requests.get('https://api.unibit.ai/realtimestock/'+Ticker+'?AccessKey='+API_KEY)
data_str = response.text
parsed_data = json.loads(data_str)
return parsed_data
def getStockNewsByTicker(Ticker):
""" This function takes as an input ticker symbole
and returns Latest stock news data array"""
import requests
import json
response = requests.get('https://api.unibit.ai/news/latest/'+Ticker+'?AccessKey='+API_KEY)
data_str = response.text
parsed_data = json.loads(data_str)
return parsed_data
def getHistoricalPrice(Ticker,rng,interval):
""" This function takes as an input ticker symbole, range as rng and interval
and returns historical stock price as object
Possible ranges : 1m - 3m - 1y - 3y - 5y - 10y - 20y
A positive number (n). If passed, chart data will return every nth element as defined by Interval"""
import requests
import json
response = requests.get('https://api.unibit.ai/historicalstockprice/'+Ticker+'?range='+rng+'&interval='+str(interval)+'&AccessKey='+API_KEY)
data_str = response.text
parsed_data = json.loads(data_str)
return parsed_data
```
Creating the dataframe of intraday price
```
AAPL_intra = pd.DataFrame(data = getIntraDayByTicker("AAPL"))
AAPL_intra.head(2)
```
Adding time index, the combination of date and time and setting it as an index
```
AAPL_intra["time_index"] = pd.to_datetime((AAPL_intra['date'] + AAPL_intra["minute"]).values, format='%Y%m%d%H:%M')
AAPL_intra.set_index("time_index",inplace=True)
```
## Visualizing intraday stock price
We see that the data isn't consistent between 16 February until 01 March
```
AAPL_intra.tail(4000).price.plot()
```
Creating dataframe of historical stock price
```
AAPL_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='AAPL',rng="3y",interval=1)["Stock price"])
```
Setting the date as an index
```
AAPL_hist.date = pd.to_datetime(AAPL_hist.date)
AAPL_hist.set_index("date",inplace=True)
```
## Visualzing historical stock close price
The plot seems accepatable comparable to the intraday plot
```
AAPL_hist.close.plot(figsize=[10,8])
```
We will continue with the historical data to do some predictions using multiple models
# EDA
```
AAPL_hist.describe()
```
AAPL stock price and volume evolution
```
plt.figure(figsize=[10,8])
ax1 = plt.subplot(211)
plt.plot(AAPL_hist.close)
plt.setp(ax1.get_xticklabels(), fontsize=6)
ax2 = plt.subplot(212)
plt.plot(AAPL_hist.volume)
plt.setp(ax2.get_xticklabels(), fontsize=6)
```
### In this part we will look for anomalies in both volume and price
##### Support Vector Machine-Based Anomaly Detection
```
from sklearn.preprocessing import StandardScaler
from sklearn.svm import OneClassSVM
data = AAPL_hist[['close', 'volume']]
scaler = StandardScaler()
np_scaled = scaler.fit_transform(data)
data = pd.DataFrame(np_scaled)
# train oneclassSVM
outliers_fraction =0.09
model = OneClassSVM(nu=outliers_fraction, kernel="rbf", gamma=0.01)
model.fit(data)
AAPL_hist['anomaly'] = pd.Series(model.predict(data),index=AAPL_hist.index)
fig, ax = plt.subplots(figsize=(12,8))
a = AAPL_hist.loc[AAPL_hist['anomaly'] == -1, ['close','volume']] #anomaly
ax = plt.subplot(212)
ax.plot( AAPL_hist['volume'], color='blue')
ax.scatter(a.index,a['volume'], color='red')
ax1 = plt.subplot(211)
ax1.plot( AAPL_hist['close'], color='blue')
ax1.scatter(a.index,a['close'], color='red')
plt.show();
```
# Feature engenieering
#### First type of Features
We create a window moving average
```
def pastNdayCloseMean(variable,N):
"""This function returns past N day moving average serie
N is number of days
variable is Dataframe or series to apply the function EX: AAPL_hist.close"""
pastNdaysCloseMean = []
for ind in range(0,variable.count()):
pastNdaysCloseMean.append(variable.head(ind+N).tail(N).mean())
return pastNdaysCloseMean
```
We will create a weekley window and a monthly window for the close, Meaning 7 days and 30 days
```
AAPL_hist["weekely_close_window_mean"] = pd.Series(pastNdayCloseMean(AAPL_hist.close,7),index=AAPL_hist.index)
AAPL_hist["monthly_close_window_mean"] = pd.Series(pastNdayCloseMean(AAPL_hist.close,30),index=AAPL_hist.index)
```
We will create a weekley window and a monthly window for the volume, Meaning 7 days and 30 days
```
AAPL_hist["weekely_volume_window_mean"] = pd.Series(pastNdayCloseMean(AAPL_hist.volume,7),index=AAPL_hist.index)
AAPL_hist["monthly_volume_window_mean"] = pd.Series(pastNdayCloseMean(AAPL_hist.volume,30),index=AAPL_hist.index)
```
#### Second Feature
We create the feature indicating duration between current value and last anomalie detected
```
def lastAnomaly(variable):
"""This function returns serie of last anomalie detected
variable is Dataframe or series to apply the function EX: AAPL_hist"""
lastAnomaly = variable.anomaly.copy().values
firstAnomalyIndex = np.where(lastAnomaly==-1)[0][0]
last=0
for ind,value in enumerate(lastAnomaly):
if(value==-1):
last=0
lastAnomaly[ind]=last
else:
last=last+1
lastAnomaly[ind]=last
maxAnomaly = lastAnomaly[firstAnomalyIndex:].max()
for ind,value in enumerate(lastAnomaly[:firstAnomalyIndex]):
lastAnomaly[ind]=maxAnomaly-firstAnomalyIndex+ind
return lastAnomaly
AAPL_hist["lastAnomaly_AAPL"] = pd.Series(lastAnomaly(AAPL_hist),index=AAPL_hist.index)
```
We can see the UP and Downs of the last anomaly
```
AAPL_hist.lastAnomaly_AAPL.plot(figsize=(10,8))
```
### Related data
We can add other features which related domain companies. In AAPL case, we can use [ AMZN, FB, GOOG, MSFT, TSLA ] and aggregate their data to be added to our dataset
We will focus only on Close and volume
```
AMZN_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='AMZN',rng="3y",interval=1)["Stock price"])
FB_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='FB',rng="3y",interval=1)["Stock price"])
GOOG_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='GOOG',rng="3y",interval=1)["Stock price"])
#MSFT_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='MSFT',rng="3y",interval=1)["Stock price"])
#TSLA_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='TSLA',rng="3y",interval=1)["Stock price"])
#Internal server error on microsoft and tesla data
```
Setting the time index
```
AMZN_hist.date = pd.to_datetime(AMZN_hist.date)
AMZN_hist.set_index("date",inplace=True)
FB_hist.date = pd.to_datetime(FB_hist.date)
FB_hist.set_index("date",inplace=True)
GOOG_hist.date = pd.to_datetime(GOOG_hist.date)
GOOG_hist.set_index("date",inplace=True)
```
##### After a little search I found top 10 Major Companies Tied to the Apple Supply Chain like "Analog Devices, Inc (ADI)", "Glu Mobile, Inc. (GLUU)", "Jabil Circuit Inc. (JBL)", "STMicroelectronics (STM)" ...
We can add some of those companies stock to our model
```
ADI_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='ADI',rng="3y",interval=1)["Stock price"])
GLUU_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='GLUU',rng="3y",interval=1)["Stock price"])
JBL_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='JBL',rng="3y",interval=1)["Stock price"])
#STM_hist = pd.DataFrame(data = getHistoricalPrice(Ticker='STM',rng="3y",interval=1)["Stock price"])
```
Setting Time Index
```
ADI_hist.date = pd.to_datetime(ADI_hist.date)
ADI_hist.set_index("date",inplace=True)
GLUU_hist.date = pd.to_datetime(GLUU_hist.date)
GLUU_hist.set_index("date",inplace=True)
JBL_hist.date = pd.to_datetime(JBL_hist.date)
JBL_hist.set_index("date",inplace=True)
```
We will create two more features, One for related field companies and the Other for the tied to the apple supply chain
##### Related field dataframe
```
Related_field = pd.merge(AMZN_hist[['close','volume']],FB_hist[['close','volume']],how='left',on='date',suffixes=('_AMZN','_FB'))
Related_field = pd.merge(Related_field,GOOG_hist[['close','volume']],how='left',on='date',suffixes=('','_GOOG'))
```
##### Tied to supply chain
```
Supply_chain = pd.merge(ADI_hist[['close','volume']],GLUU_hist[['close','volume']],how='left',on='date',suffixes=('_ADI','_GLUU'))
Supply_chain = pd.merge(Supply_chain,JBL_hist[['close','volume']],how='left',on='date',suffixes=('','_JBL'))
```
Scaling Values for volume and close
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(Related_field.fillna(0))
Related_field_fit = scaler.transform(Related_field.fillna(0))
scaler.fit(Supply_chain.fillna(0))
Supply_chain_fit = scaler.transform(Supply_chain.fillna(0))
Related_field = pd.DataFrame(Related_field_fit,index=Related_field.index,columns=Related_field.columns)
Supply_chain = pd.DataFrame(Supply_chain_fit,index=Supply_chain.index,columns=Supply_chain.columns)
```
Now extract the mean of each
```
Related_field["Related_field_close_mean"] = Related_field[["close_AMZN","close_FB","close"]].mean(axis=1)
Related_field["Related_field_volume_mean"] = Related_field[["volume_AMZN","volume_FB","volume"]].mean(axis=1)
Supply_chain["Supply_chain_close_mean"] = Supply_chain[["close_ADI","close_GLUU","close"]].mean(axis=1)
Supply_chain["Supply_chain_volume_mean"] = Supply_chain[["volume_ADI","volume_GLUU","volume"]].mean(axis=1)
```
#### Now we merge all data to AAPL_hist dataframe
```
AAPL_hist["Related_field_close_mean"] = Related_field["Related_field_close_mean"]
AAPL_hist["Related_field_volume_mean"] =Related_field["Related_field_volume_mean"]
AAPL_hist["Supply_chain_close_mean"] = Supply_chain["Supply_chain_close_mean"]
AAPL_hist["Supply_chain_volume_mean"] = Supply_chain["Supply_chain_volume_mean"]
```
Standardizing AAPL features
```
scaler = StandardScaler()
scaler.fit(AAPL_hist[['adj_close', 'close', 'high', 'low', 'open', 'volume','weekely_close_window_mean','monthly_close_window_mean','weekely_volume_window_mean','monthly_volume_window_mean']])
AAPL_hist_fit = scaler.transform(AAPL_hist[['adj_close', 'close', 'high', 'low', 'open', 'volume','weekely_close_window_mean','monthly_close_window_mean','weekely_volume_window_mean','monthly_volume_window_mean']])
AAPL_hist[['adj_close', 'close', 'high', 'low', 'open', 'volume','weekely_close_window_mean','monthly_close_window_mean','weekely_volume_window_mean','monthly_volume_window_mean']] = AAPL_hist_fit
```
Filling missing values
```
AAPL_hist.isnull().sum(axis = 0)
AAPL_hist.fillna(0,inplace=True)
AAPL_hist.drop(['anomaly'],axis=1,inplace=True)
AAPL_hist.to_csv("clean_Stock_data.csv",index=True)
AAPL_hist.to_csv("clean_Stock_data_no_index.csv",index=False)
```
# Model building
Rearranging columns
```
cols = AAPL_hist.columns.tolist()
type(cols)
cols.remove('close')
cols.append('close')
AAPL_hist = AAPL_hist[cols]
```
## We will use a RNNs with basic, LSTM, GRU cells
### First method
```
import matplotlib.pyplot as plt
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import numpy
# load the dataset
dataframe = pd.read_csv('clean_Stock_data_no_index.csv', engine='python', skipfooter=3)
dataset = dataframe.values
dataset = dataset.astype('float32')
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
print(len(train), len(test))
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), :dataset.shape[1]-1]
dataX.append(a)
dataY.append(dataset[i + look_back, dataset.shape[1]-1])
return numpy.array(dataX), numpy.array(dataY)
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], dataset.shape[1]-1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], dataset.shape[1]-1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(dataset.shape[1]-1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY, trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY, testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
plt.plot(trainY)
plt.plot(trainPredict)
```
### Second Method
```
import tensorflow as tf
# split data in 80%/10%/10% train/validation/test sets
valid_set_size_percentage = 10
test_set_size_percentage = 10
def load_data(stock, seq_len):
data_raw = stock.as_matrix() # convert to numpy array
data = []
# create all possible sequences of length seq_len
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index: index + seq_len])
data = np.array(data);
valid_set_size = int(np.round(valid_set_size_percentage/100*data.shape[0]));
test_set_size = int(np.round(test_set_size_percentage/100*data.shape[0]));
train_set_size = data.shape[0] - (valid_set_size + test_set_size);
x_train = data[:train_set_size,:-1,:]
y_train = data[:train_set_size,-1,:]
x_valid = data[train_set_size:train_set_size+valid_set_size,:-1,:]
y_valid = data[train_set_size:train_set_size+valid_set_size,-1,:]
x_test = data[train_set_size+valid_set_size:,:-1,:]
y_test = data[train_set_size+valid_set_size:,-1,:]
return [x_train, y_train, x_valid, y_valid, x_test, y_test]
# create train, test data
seq_len = 20 # choose sequence length
x_train, y_train, x_valid, y_valid, x_test, y_test = load_data(AAPL_hist, seq_len)
print('x_train.shape = ',x_train.shape)
print('y_train.shape = ', y_train.shape)
print('x_valid.shape = ',x_valid.shape)
print('y_valid.shape = ', y_valid.shape)
print('x_test.shape = ', x_test.shape)
print('y_test.shape = ',y_test.shape)
## Basic Cell RNN in tensorflow
index_in_epoch = 0;
perm_array = np.arange(x_train.shape[0])
np.random.shuffle(perm_array)
# function to get the next batch
def get_next_batch(batch_size):
global index_in_epoch, x_train, perm_array
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > x_train.shape[0]:
np.random.shuffle(perm_array) # shuffle permutation array
start = 0 # start next epoch
index_in_epoch = batch_size
end = index_in_epoch
return x_train[perm_array[start:end]], y_train[perm_array[start:end]]
# parameters
n_steps = seq_len-1
n_inputs = len(AAPL_hist.columns)
n_neurons = 200
n_outputs = len(AAPL_hist.columns)
n_layers = 2
learning_rate = 0.001
batch_size = 50
n_epochs = 100
train_set_size = x_train.shape[0]
test_set_size = x_test.shape[0]
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_outputs])
# use Basic RNN Cell
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.elu)
for layer in range(n_layers)]
# use Basic LSTM Cell
#layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=tf.nn.elu)
# for layer in range(n_layers)]
# use LSTM Cell with peephole connections
#layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons,
# activation=tf.nn.leaky_relu, use_peepholes = True)
# for layer in range(n_layers)]
# use GRU cell
#layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.leaky_relu)
# for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
outputs = outputs[:,n_steps-1,:] # keep only last output of sequence
loss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
# run graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for iteration in range(int(n_epochs*train_set_size/batch_size)):
x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch
sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
if iteration % int(5*train_set_size/batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
print('%.2f epochs: MSE train/valid = %.6f/%.6f'%(
iteration*batch_size/train_set_size, mse_train, mse_valid))
y_train_pred = sess.run(outputs, feed_dict={X: x_train})
y_valid_pred = sess.run(outputs, feed_dict={X: x_valid})
y_test_pred = sess.run(outputs, feed_dict={X: x_test})
ft = 4 # 0 to 14 including all variables
## show predictions
plt.figure(figsize=(15, 5));
plt.subplot(1,2,1);
plt.plot(np.arange(y_train.shape[0]), y_train[:,ft], color='blue', label='train target')
plt.plot(np.arange(y_train.shape[0], y_train.shape[0]+y_valid.shape[0]), y_valid[:,ft],
color='gray', label='valid target')
plt.plot(np.arange(y_train.shape[0]+y_valid.shape[0],
y_train.shape[0]+y_test.shape[0]+y_test.shape[0]),
y_test[:,ft], color='black', label='test target')
plt.plot(np.arange(y_train_pred.shape[0]),y_train_pred[:,ft], color='red',
label='train prediction')
plt.plot(np.arange(y_train_pred.shape[0], y_train_pred.shape[0]+y_valid_pred.shape[0]),
y_valid_pred[:,ft], color='orange', label='valid prediction')
plt.plot(np.arange(y_train_pred.shape[0]+y_valid_pred.shape[0],
y_train_pred.shape[0]+y_valid_pred.shape[0]+y_test_pred.shape[0]),
y_test_pred[:,ft], color='green', label='test prediction')
plt.title('past and future stock prices')
plt.xlabel('time [days]')
plt.ylabel('normalized price')
plt.legend(loc='best');
plt.subplot(1,2,2);
plt.plot(np.arange(y_train.shape[0], y_train.shape[0]+y_test.shape[0]),
y_test[:,ft], color='black', label='test target')
plt.plot(np.arange(y_train_pred.shape[0], y_train_pred.shape[0]+y_test_pred.shape[0]),
y_test_pred[:,ft], color='green', label='test prediction')
plt.title('future stock prices')
plt.xlabel('time [days]')
plt.ylabel('normalized price')
plt.legend(loc='best');
corr_price_development_train = np.sum(np.equal(np.sign(y_train[:,1]-y_train[:,0]),
np.sign(y_train_pred[:,1]-y_train_pred[:,0])).astype(int)) / y_train.shape[0]
corr_price_development_valid = np.sum(np.equal(np.sign(y_valid[:,1]-y_valid[:,0]),
np.sign(y_valid_pred[:,1]-y_valid_pred[:,0])).astype(int)) / y_valid.shape[0]
corr_price_development_test = np.sum(np.equal(np.sign(y_test[:,1]-y_test[:,0]),
np.sign(y_test_pred[:,1]-y_test_pred[:,0])).astype(int)) / y_test.shape[0]
print('correct sign prediction for close - open price for train/valid/test: %.2f/%.2f/%.2f'%(
corr_price_development_train, corr_price_development_valid, corr_price_development_test))
```
| github_jupyter |
In this tutorial we will show how to access and navigate the Iteration/Expression Tree (IET) rooted in an `Operator`.
# Part I - Top Down
Let's start with a fairly trivial example. First of all, we disable all performance-related optimizations, to maximize the simplicity of the created IET as well as the readability of the generated code.
```
from devito import configuration
configuration['opt'] = 'noop'
configuration['language'] = 'C'
```
Then, we create a `TimeFunction` with 3 points in each of the space `Dimension`s _x_ and _y_.
```
from devito import Grid, TimeFunction
grid = Grid(shape=(3, 3))
u = TimeFunction(name='u', grid=grid)
```
We now create an `Operator` that increments by 1 all points in the computational domain.
```
from devito import Eq, Operator
eq = Eq(u.forward, u+1)
op = Operator(eq)
```
An `Operator` is an IET node that can generate, JIT-compile, and run low-level code (e.g., C). Just like all other types of IET nodes, it's got a number of metadata attached. For example, we can query an `Operator` to retrieve the input/output `Function`s.
```
op.input
op.output
```
If we print `op`, we can see how the generated code looks like.
```
print(op)
```
An `Operator` is the root of an IET that typically consists of several nested `Iteration`s and `Expression`s – two other fundamental IET node types. The user-provided SymPy equations are wrapped within `Expressions`. Loop nest embedding such expressions are constructed by suitably nesting `Iterations`.
The Devito compiler constructs the IET from a collection of `Cluster`s, which represent a higher-level intermediate representation (not covered in this tutorial).
The Devito compiler also attaches to the IET key computational properties, such as _sequential_, _parallel_, and _affine_, which are derived through data dependence analysis.
We can print the IET structure of an `Operator`, as well as the attached computational properties, using the utility function `pprint`.
```
from devito.tools import pprint
pprint(op)
```
In this example, `op` is represented as a `<Callable Kernel>`. Attached to it are metadata, such as `_headers` and `_includes`, as well as the `body`, which includes the children IET nodes. Here, the body is the concatenation of an `PointerCast` and a `List` object.
```
op._headers
op._includes
op.body
```
We can explicitly traverse the `body` until we locate the user-provided `SymPy` equations.
```
print(op.body.casts[0]) # Printing the PointerCast
print(op.body.body[0]) # Printing the actual body
```
Below we access the `Iteration` representing the time loop.
```
t_iter = op.body.body[0].body[0]
t_iter
```
We can for example inspect the `Iteration` to discover what its iteration bounds are.
```
t_iter.limits
```
And as we keep going down through the IET, we can eventually reach the `Expression` wrapping the user-provided SymPy equation.
```
expr = t_iter.nodes[0].body[0].body[0].nodes[0].nodes[0].body[0]
expr.view
```
Of course, there are mechanisms in place to, for example, find all `Expression`s in a given IET. The Devito compiler has a number of IET visitors, among which `FindNodes`, usable to retrieve all nodes of a particular type. So we easily
can get all `Expression`s within `op` as follows
```
from devito.ir.iet import Expression, FindNodes
exprs = FindNodes(Expression).visit(op)
exprs[0].view
```
| github_jupyter |
<div>
<img src="https://drive.google.com/uc?export=view&id=1vK33e_EqaHgBHcbRV_m38hx6IkG0blK_" width="350"/>
</div>
#**Artificial Intelligence - MSc**
This notebook is designed specially for the module
ET5003 - MACHINE LEARNING APPLICATIONS
Instructor: Enrique Naredo
###ET5003_NNwithFixedLayers
© All rights reserved to the author, do not share outside this module.
## Introduction
[Artificial neural networks](https://en.wikipedia.org/wiki/Artificial_neural_network) (ANNs), usually simply called neural networks ( [NNs](https://www.investopedia.com/terms/n/neuralnetwork.asp)), are computing systems inspired by the biological neural networks that constitute animal brains.
* An ANN is based on a collection of connected units or nodes called artificial neurons, which loosely model the neurons in a biological brain.
* Each connection, like the synapses in a biological brain, can transmit a signal to other neurons.
* An artificial neuron receives a signal then processes it and can signal neurons connected to it.
* The "signal" at a connection is a real number, and the output of each neuron is computed by some non-linear function of the sum of its inputs.
* The connections are called edges. Neurons and edges typically have a weight that adjusts as learning proceeds.
* The weight increases or decreases the strength of the signal at a connection.
* Neurons may have a threshold such that a signal is sent only if the aggregate signal crosses that threshold.
* Typically, neurons are aggregated into layers. Different layers may perform different transformations on their inputs.
* Signals travel from the first layer (the input layer), to the last layer (the output layer), possibly after traversing the layers multiple times.
**Acknowledgement**
This notebook is refurbished taking source code from Alessio Benavoli's webpage and from the libraries numpy, GPy, pylab, and pymc3.
## Libraries
```
# Suppressing Warnings:
import warnings
warnings.filterwarnings("ignore")
# https://pypi.org/project/GPy/
!pip install gpy
import GPy as GPy
import numpy as np
import pylab as pb
import pymc3 as pm
%matplotlib inline
```
## Data generation
Generate data from a nonlinear function and use a Gaussian Process to sample it.
```
# seed the legacy random number generator
# to replicate experiments
seed = None
#seed = 7
np.random.seed(seed)
# training set
# points evenly spaced over [0,1]
samples_train = 500
X_train = np.linspace(-1,1,samples_train)
# min-max training
X_train.min(), X_train.max()
# test set
# points evenly spaced over [0,1]
samples_test = 200
X_test = np.linspace(-1,1,samples_test)
# min-max training
X_test.min(), X_test.max()
# Gather all samples to get a model from them
# Stack arrays in sequence horizontally (column wise)
X_all = np.hstack([X_train,X_test]).reshape(-1,1)
# mean μ=0: distribution centered at zero
μ = np.zeros(samples_train + samples_test)
print(μ.shape)
# Gaussian Processes
# https://gpy.readthedocs.io/en/deploy/GPy.kern.html
# Radial Basis Functions
# https://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html
# kernel is a function that specifies the degree of similarity
# between variables given their relative positions in parameter space
kernel = GPy.kern.RBF(input_dim=1,lengthscale=0.15,variance=0.2)
print(kernel)
# covariance matrix
C = kernel.K(X_all,X_all)
print(C.shape)
# Generate samples using μ & C parameters
# random generator using normal distribution
# mean=μ, cov=C, size=1
# true function
TF = np.random.multivariate_normal(μ,C,1)[0,:]
# noise factor
noise = 0.1
# training noisy data -> 0.1 (10%)
y_train = TF[0:samples_train] + np.random.randn(samples_train)*noise
# test noisy data -> 0.1 (10%)
y_test = TF[samples_train:] + np.random.randn(samples_test)*noise
# TF-> true function
TF = TF[samples_train:]
# Plotting using pylab (pb)
# 1) First figure
pb.figure()
# the default drawing order for axes is patches, lines, text
# this order is determined by the zorder attribute
pb.plot(X_test,TF,zorder=100,c='b',label='True Function')
# train: scatter plot
pb.scatter(X_train,y_train,c='g',label='Train Samples',alpha=0.5)
pb.legend()
pb.xlabel("x",fontsize=16)
pb.ylabel("y",fontsize=16,rotation=0)
# save temporarily to drive
pb.savefig("Gaussian_train_1.pdf")
# 2) Second figure
pb.figure()
pb.plot(X_test,TF,zorder=100,c='b',label='True Function')
# test: scatter plot
pb.scatter(X_test,y_test,c='r',label='Test Samples',alpha=0.5)
pb.legend()
pb.xlabel("x",fontsize=16)
pb.ylabel("y",fontsize=16,rotation=0)
# save temporarily to drive
pb.savefig("Gaussian_test_1.pdf")
```
## Neural Network
[Mulilayer Perceptron](https://en.wikipedia.org/wiki/Multilayer_perceptron) (MLP)
**Neural Network with fixed weights in the inner layer**
* We do not need to fit the weights of the inner layer.
* We can consider the inner layer as a set of basis functions.
* Address the fitting problem as in the Least Squares (LS) algorithm.
```
def computeL(X,W):
"""
Compute L matrix for a single-layer NN
as in linear regression
X is the set of samples
W is a matrix of weights
Return the matrix L
"""
# activation function
# compute hyperbolic tangent element-wise
def ϕ(x,w):
return np.tanh( w[:,0] + x*w[:,1] )
L = []
# loop to create matrix L
for i in range(len(X)):
L.append(ϕ(X[i],W))
# create a numpy array
L = np.array(L)
# column with ones
O = np.ones((L.shape[0],1))
# add a column with ones to L
L = np.hstack([O,L])
# return the L matrix
return L
def leastsquares(X,y):
"""
Computes the Least Squares (LS) method
https://en.wikipedia.org/wiki/Least_squares
"""
# machine epsilon
# https://en.wikipedia.org/wiki/Machine_epsilon
# for float values can be obtained by typing
eps = np.finfo(float).eps
# we add this epsilon to avoid numerical problems when we invert the matrix
# linalg-> linear algebra functions
M = np.linalg.inv(np.matmul(X.T,X) + eps*np.identity(X.shape[1]))
# matmul-> matrix product of two arrays
# X.T-> transposed array
# https://numpy.org/doc/stable/reference/generated/numpy.ndarray.T.html#numpy.ndarray.T
theta = np.matmul(M, np.matmul(X.T,y) )
return theta
# hu-> hidden units
hu = 10
# generate random weights for 2 inner layers
W = np.random.randn(hu,2)
print(W)
# train: compute H matrix as in linear regression
MLP_train = computeL(X_train,W)
print(MLP_train)
# array shapes
X_train.shape, W.shape, MLP_train.shape
# estimate weights θ for the outer layer (linear layer)
θ_ol = leastsquares(MLP_train,y_train)
print(θ_ol)
# test: compute H matrix as in linear regression
MLP_test = computeL(X_test,W)
# predictions
y_pred = np.matmul(MLP_test,θ_ol)
print(y_pred)
# plot test data with TF -> true function
pb.plot(X_test,TF,c='blue',label='True Function')
pb.legend()
# plot test data with predictions
pb.plot(X_test,y_pred,c='brown',label='MLP')
pb.legend()
# plot all together
pb.plot(X_test,TF,c='b',label='True Function')
pb.plot(X_test,y_pred,c='brown',label='MLP')
pb.scatter(X_train,y_train,c='g',label='Train Samples',alpha=0.5)
pb.xlabel("x",fontsize=16)
pb.ylabel("y",fontsize=16,rotation=0)
pb.legend()
# save temporarily to drive
pb.savefig("MLP_pred_1.pdf")
```
### Plot of the basis functions
```
# Basis functions
# https://www.psych.mcgill.ca/misc/fda/ex-basis-a1.html
pb.plot(X_train,MLP_train)
pb.xlabel("x",fontsize=16)
pb.ylabel("y",fontsize=16,rotation=0)
pb.savefig("Basis_functions_1.pdf")
```
## Fitting the inner layer too
**The above approach works.**
But, if we also optimize the weights of the inner layer, then we can have a better estimate by using less hidden units.
* This is what [Keras](https://keras.io/) does.
* We have the usual over-fitting problem for general-recipe ML algorithms.
* Increasing the hidden units and therefore the network parameters, the NN starts overfitting!
```
from keras.models import Sequential
from keras.layers import Dense
```
An [epoch](https://radiopaedia.org/articles/epoch-machine-learning) is a term used in machine learning and indicates the number of passes of the entire training dataset the machine learning algorithm has completed.
* Datasets are usually grouped into batches (especially when the amount of data is very large).
* Some people use the term iteration loosely and refer to putting one batch through the model as an iteration.
```
# start with 250 epochs, then increase them to 25000 or more?
num_epochs = 250
```
[Batch size](https://radiopaedia.org/articles/batch-size-machine-learning) is a term used in machine learning and refers to the number of training examples utilized in one iteration.
The batch size can be one of three options:
* batch mode: where the batch size is equal to the total dataset thus making the iteration and epoch values equivalent
* mini-batch mode: where the batch size is greater than one but less than the total dataset size. Usually, a number that can be divided into the total dataset size.
* stochastic mode: where the batch size is equal to one. Therefore the gradient and the neural network parameters are updated after each sample.
```
# batch size
batch_size = 200
# fix random seed for reproducibility
seed = None
#seed = 7
np.random.seed(seed)
# figure with subplots (9)
fig, axs = pb.subplots(3,3, figsize=(16, 16), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace=0.2, wspace=0.2)
# ravel-> return a contiguous flattened array
# https://numpy.org/doc/stable/reference/generated/numpy.ravel.html
axs = axs.ravel()
# first index
ax_index = 0
# loop in the hidden units
for i in range(1,10,1):
# Sequential-> groups a linear stack of layers into a tf.keras.Model
model = Sequential()
# inner layer
# Dense implements the operation: output = activation(dot(input, kernel) + bias)
model.add(Dense(i, input_dim=1, kernel_initializer='normal', activation='tanh', name='input_layer'))
# outer linear layer
model.add(Dense(1, kernel_initializer='normal', activation='linear', name='output_layer'))
# compile model
# configure the model for training
# mse = tf.keras.losses.MeanSquaredError()
# https://keras.io/api/losses/regression_losses/#mean_squared_error-function
# adam-> optimizer, https://keras.io/api/optimizers/
model.compile(loss='mse', optimizer='adam')
# fit model: train the model for a fixed number of epochs
model.fit(X_train, y_train, epochs=num_epochs, batch_size=batch_size, verbose=0)
# predictions
y_pred2 = model.predict(X_test)
# MSE-> Mean Squared Error in test set
MSE_test = np.round( np.mean( (y_pred2 - y_test)**2 ), 3)
# plots
axs[ax_index].plot(X_test,TF,zorder=100,label='True Function')
axs[ax_index].plot(X_test,y_pred2,c='green',label='NN Model')
axs[ax_index].scatter(X_train,y_train,c='r',label='Train Samples',alpha=0.3)
axs[ax_index].set_title('$\ell$='+str(i)+', MSE_test='+str(MSE_test) )
axs[ax_index].legend()
# index counter
ax_index = ax_index+1
# save temporarily to drive
pb.savefig("NN_overfitting.pdf", bbox_inches='tight')
```
Here you have some examples of how to compute MSE, following these ideas, you could build your own methods or to write your code in different way.
```
# examples to compute MSE
y_1 = [3, -0.5, 2, 7]
y_1 = np.array(y_1)
y_2 = [2.5, 0.0, 2, 8]
y_2 = np.array(y_2)
# 1) using sklearn
from sklearn.metrics import mean_squared_error
MSE_sklearn = mean_squared_error(y_1, y_2)
print(MSE_sklearn)
# 2) numpy custom-1
MSE_custom1 = np.round( np.mean( (y_1 - y_2)**2 ), 3)
print(MSE_custom1)
# 3) numpy custom-2
MSE_custom2 = np.square(np.subtract(y_1, y_2)).mean()
print(MSE_custom2)
```
## New data
```
# number of samples
num_samples_train = 250
num_samples_test = 200
# intervals to sample
a, b, c = 0.2, 0.6, 0.8
# points evenly spaced over [0,1]
interval_1 = np.random.rand(int(num_samples_train/2))*b - c
interval_2 = np.random.rand(int(num_samples_train/2))*b + c
X_new_train = np.sort(np.hstack([interval_1,interval_2]))
X_new_test = np.linspace(-1,1,num_samples_test)
X_new_all = np.hstack([X_new_train,X_new_test]).reshape(-1,1)
# vector of the means
μ_new = np.zeros((len(X_new_all)))
# covariance matrix
C_new = kernel.K(X_new_all,X_new_all)
# noise factor
noise_new = 0.1
# generate samples path with mean μ and covariance C
TF_new = np.random.multivariate_normal(μ_new,C_new,1)[0,:]
y_new_train = TF_new[0:len(X_new_train)] + np.random.randn(len(X_new_train))*noise_new
y_new_test = TF_new[len(X_new_train):] + np.random.randn(len(X_new_test))*noise_new
TF_new = TF_new[len(X_new_train):]
```
In this example, first generate a nonlinear functions and then generate noisy training data from that function.
The constrains are:
* Training samples $x$ belong to either interval $[-0.8,-0.2]$ or $[0.2,0.8]$.
* There is not data training samples from the interval $[-0.2,0.2]$.
* The goal is to evaluate the extrapolation error outside in the interval $[-0.2,0.2]$.
```
# plot
pb.figure()
pb.plot(X_new_test,TF_new,c='b',label='True Function',zorder=100)
# training data
pb.scatter(X_new_train,y_new_train,c='g',label='Train Samples',alpha=0.5)
pb.xlabel("x",fontsize=16);
pb.ylabel("y",fontsize=16,rotation=0)
pb.legend()
pb.savefig("New_data.pdf")
model_new = Sequential()
epochs_new = 20
# inner layer
model_new.add(Dense(epochs_new, input_dim=1, kernel_initializer='normal', activation='tanh', name='input_layer'))
# outer linear layer
model_new.add(Dense(1, kernel_initializer='normal', activation='linear', name='output_layer'))
# Create a model
model_new.compile(loss='mse', optimizer='adam')
# fit model
model_new.fit(X_new_train, y_new_train, epochs=250, batch_size=20, verbose=0)
#model_new.fit(X_new_train, y_new_train, epochs=25000, batch_size=200, verbose=0)
```
Note that **general-recipe ML** does not tell anything about the uncertainty.
* For instance, the estimation
in $[-0.2,0.2]$ should be less reliable than that in $[-0.8,-0.2]$ because there is no data in $[-0.2,0.2]$.
* However, Keras only returns the prediction, there is no uncertainty.
* This together with the overfitting problems shows once again that general-recipe ML is not a sound approach.
```
# plot
y_new_pred = model.predict(X_new_test)
pb.plot(X_new_test,TF,zorder=100,c='b',label='True Function')
pb.scatter(X_new_train, y_new_train,c='green',label='Train Samples',alpha=0.3)
pb.plot(X_new_test,y_new_pred,c='orange',label='NN')
pb.title('$\ell$='+str(epochs_new)+' MSE_test_set='+str(np.round(np.mean((y_new_pred - y_new_test)**2),3)))
pb.legend()
pb.savefig("NN_gap.pdf", bbox_inches='tight')
```
| github_jupyter |
# AWS DeepComposer: Train it Again Maestro
## Episode 6 - Head-to-Head Battle
---
This notebook is for episode 6 of the <b> AWS DeepComposer: Train it Again Maestro </b> web series on the <b>A Cloud Guru</b> platform.
This covers building a custom GAN architecture and training a custom music genre model using Amazon SageMaker. In this exmaple, we will train a hip hop model and use it to generate a brand new song.
---
```
# Create the environment
!conda update --all --y
!pip install tensorflow-gpu==1.14.0
!pip install numpy==1.16.4
!pip install pretty_midi
!pip install pypianoroll
!pip install music21
!pip install seaborn
!pip install --ignore-installed moviepy
# IMPORTS
import os
import numpy as np
from numpy import asarray
from numpy import save
from PIL import Image
import logging
import pypianoroll
from pypianoroll import Multitrack, Track
import scipy.stats
import pickle
import music21
from IPython import display
import matplotlib.pyplot as plt
# Configure Tensorflow
import tensorflow as tf
print(tf.__version__)
tf.logging.set_verbosity(tf.logging.ERROR)
tf.enable_eager_execution()
# Use this command to make a subset of GPUS visible to the jupyter notebook.
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# Utils library for plotting, loading and saving midi among other functions
from utils import display_utils, metrics_utils, path_utils, inference_utils, midi_utils
LOGGER = logging.getLogger("gan.train")
%matplotlib inline
from collections import Counter
root_dir = './Experiments'
# Location of the original MIDI files used for training; place your MIDI files here
hiphop_midi_location = './hiphop_midi/'
# Directory to save checkpoints
model_dir = os.path.join(root_dir,'2HipHop')
# Directory to save pianorolls during training
train_dir = os.path.join(model_dir, 'train')
# Directory to save checkpoint generated during training
check_dir = os.path.join(model_dir, 'preload')
# Directory to save midi during training
sample_dir = os.path.join(model_dir, 'sample')
# Directory to save samples generated during inference
eval_dir = os.path.join(model_dir, 'eval')
# Directory to save eval data
dataset_eval_dir = './dataset/'
os.makedirs(train_dir, exist_ok=True)
os.makedirs(eval_dir, exist_ok=True)
os.makedirs(sample_dir, exist_ok=True)
```
# MIDI File Review
---
This section of code provides additional details on the MIDI files used to train the hip hop genere model. This is necessary because the GAN only supports 4 instruments (no more, no less). Through experimentation, you will need to determine which 4 instruments are the most important to your genre.
---
<img src="images/multi_track_object.png" alt="multitrack object" width="600">
<img src="images/track_object.png" alt="track object" width="600">
```
instrument_list = [] #holds the list of instruments
program_number = []
for filename in os.listdir(hiphop_midi_location):
if filename.endswith(".mid"):
try:
"""
music_tracks is a Multitrack object containing Track objects
each Track object has a Pianoroll, which is a numpy array shaped as (time steps, pitch range)
Multitrack ---> https://salu133445.github.io/pypianoroll/multitrack.html
Track ---> https://salu133445.github.io/pypianoroll/track.html
"""
music_tracks = pypianoroll.Multitrack(beat_resolution=4)
music_tracks.parse_midi(hiphop_midi_location + filename)
for index, track in enumerate(music_tracks.tracks):
if track.name not in instrument_list:
print(track.name, " ", track.program)
instrument_list.append(track.name)
program_number.append(track.program)
except Exception as e:
print("**********ERROR**************")
print(e)
print("The amount of instruments across tracks: ", len(instrument_list))
print(instrument_list)
```
# Prepare Training Data (MIDI files -----> .npy)
---
This section of code demonstrates the process of converting MIDI files to the needed format for training, which is a .npy file. The final shape on the .npy file should be (x, 32, 128, 4), which represents (number of samples, number of time steps per sample, pitch range, instruments).
---
<img src="images/training-image.png" alt="training image" width="600">
```
#helper function that stores the reshaped arrays, per instrument
def store_track(track, collection):
"""
Pull out the 4 selected instrument types based on program number
The program number represents the unique identifier for the instrument (ie. track.program)
https://en.wikipedia.org/wiki/General_MIDI
"""
instrument1_program_numbers = [1,2,3,4,5,6,7,8] #Piano
instrument2_program_numbers = [41,42,43,44,45,46,47,48] #Ensemble!!!!!
instrument3_program_numbers = [33,34,35,36,37,38,39,40] #Bass
instrument4_program_numbers = [25,26,27,28,29,30,31,32] #Guitar
if isinstance (collection, dict):
if track.program in instrument1_program_numbers:
collection['Piano'].append(track)
elif track.program in instrument2_program_numbers:
collection['Organ'].append(track)
elif track.program in instrument3_program_numbers:
collection['Bass'].append(track)
elif track.program in instrument4_program_numbers:
collection['Guitar'].append(track)
else:
print("Skipping this instrument------------------->", track.name)
else: #collection will hold chosen tracks
if track.program in instrument1_program_numbers:
collection.append(track)
elif track.program in instrument2_program_numbers:
collection.append(track)
elif track.program in instrument3_program_numbers:
collection.append(track)
elif track.program in instrument4_program_numbers:
collection.append(track)
else:
print("Skipping this instrument------------------->", track.name)
return collection
#helper function that returns the pianorolls merged to 4 tracks for 4 chosen instruments
def get_merged(music_tracks, filename):
chosen_tracks = []
#choose the tracks from the Multitrack object
for index, track in enumerate(music_tracks.tracks):
chosen_tracks = store_track(track, chosen_tracks)
#dictionary to hold reshaped pianorolls for 4 chosen instruments
reshaped_piano_roll_dict = {'Piano': [], 'Organ': [], 'Bass': [], 'Guitar': []}
#loop thru chosen tracks
for index, track in enumerate(chosen_tracks):
# fig, ax = track.plot()
# plt.show()
try:
#reshape pianoroll to 2 bar (i.e. 32 time step) chunks
track.pianoroll = track.pianoroll.reshape( -1, 32, 128)
#store reshaped pianoroll per instrument
reshaped_piano_roll_dict = store_track(track, reshaped_piano_roll_dict)
except Exception as e:
print("ERROR!!!!!----> Skipping track # ", index, " with error ", e)
#will hold all merged instrument tracks
merge_piano_roll_list = []
for instrument in reshaped_piano_roll_dict:
try:
merged_pianorolls = np.empty(shape=(0,32,128))
#concatenate/stack all tracks for a single instrument
if len(reshaped_piano_roll_dict[instrument]) > 0:
if reshaped_piano_roll_dict[instrument]:
merged_pianorolls = np.stack([track.pianoroll for track in reshaped_piano_roll_dict[instrument]], -1)
merged_pianorolls = merged_pianorolls[:, :, :, 0]
merged_piano_rolls = np.any(merged_pianorolls, axis=0)
merge_piano_roll_list.append(merged_piano_rolls)
except Exception as e:
print("ERROR!!!!!----> Cannot concatenate/merge track for instrument", instrument, " with error ", e)
continue;
merge_piano_roll_list = np.stack([track for track in merge_piano_roll_list], -1)
return merge_piano_roll_list.reshape(-1,32,128,4)
#holds final reshaped tracks that will be saved to training .npy file
track_list = np.empty(shape=(0,32,128,4))
#init with beat resolution of 4
music_tracks = pypianoroll.Multitrack(beat_resolution=4)
#loop through all the .mid files
for filename in os.listdir(hiphop_midi_location):
print("Starting to process filename---->", hiphop_midi_location + filename)
if filename.endswith(".mid"):
try:
#Load MIDI file using parse_midi
#returns Multi-Track object containing Track objects
music_tracks.parse_midi(hiphop_midi_location + filename)
#add padding to avoid reshape errors
#pad the pianorolls with zeros making the length a multiple of 32
music_tracks.pad_to_multiple(32)
music_tracks.pad_to_same()
#merge pianoroll objects by instrument
merged_tracks_to_add_to_training_file = get_merged(music_tracks, filename)
#concatenate merged pianoroll objects to final training data track list
track_list = np.concatenate((merged_tracks_to_add_to_training_file, track_list))
print("Successfully processed filename---->", hiphop_midi_location + filename)
except Exception as e:
print("**********ERROR**************It's possible that not all 4 instruments exist in this track; at least one is 0")
print("Skipping file---->", filename, e)
print(e)
# binarize data
track_list[track_list == 0] = -1
track_list[track_list >= 0] = 1
#split the data into training and evaluation datasets
training_data, eval_data = np.split(track_list, 2)
#save training data
save(train_dir + '/hiphop-train.npy', np.array(training_data))
#save evaluation data
save(dataset_eval_dir + '/eval.npy', np.array(eval_data))
```
# Review Training Data
```
#double check the shape on training data, should be (x, 32, 128, 4), where x represents the amount of records
training_data = np.load(train_dir + '/hiphop-train.npy')
print("Testing the training shape: ", training_data.shape)
#view sample of data that will be feed to model, four graphs == four tracks
display_utils.show_pianoroll(training_data)
```
# Prepare the Dataset and Build Model Architecture
> The code below is based on the Lab 2 code (with only minor modifications) provided by AWS in the AWS DeepComposer samples found here: https://github.com/aws-samples/aws-deepcomposer-samples/blob/master/Lab%202/GAN.ipynb. Please review for more detailed explanations and copyright info.
We now create a Tensorflow dataset object from our numpy array to feed into our model. The dataset object helps us feed batches of data into our model. A batch is a subset of the data that is passed through the deep learning network before the weights are updated. Batching data is necessary in most training scenarios as our training environment might not be able to load the entire dataset into memory at once.
```
#Number of input data samples in a batch
BATCH_SIZE = 5
#Shuffle buffer size for shuffling data
SHUFFLE_BUFFER_SIZE = 10
#Preloads PREFETCH_SIZE batches so that there is no idle time between batches
PREFETCH_SIZE = 2
def prepare_dataset(filename):
"""Load the samples used for training."""
data = np.load(train_dir + '/hiphop-train.npy')
data = np.asarray(data, dtype=np.float32) # {-1, 1}
print('data shape = {}'.format(data.shape))
dataset = tf.data.Dataset.from_tensor_slices(data)
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).repeat()
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
dataset = dataset.prefetch(PREFETCH_SIZE)
return dataset
dataset = prepare_dataset(train_dir + '/hiphop-train.npy')
```
## Model architecture
In this section, we will walk through the architecture of the proposed GAN.
The model consists of two networks, a generator and a critic. These two networks work in a tight loop as following:
* Generator:
1. The generator takes in a batch of single-track piano rolls (melody) as the input and generates a batch of multi-track piano rolls as the output by adding accompaniments to each of the input music tracks.
2. The critic then takes these generated music tracks and predicts how far it deviates from the real data present in your training dataset.
3. This feedback from the critic is used by the generator to update its weights.
* Critic: As the generator gets better at creating better music accompaniments using the feedback from the critic, the critic needs to be retrained as well.
1. Train the critic with the music tracks just generated by the generator as fake inputs and an equivalent number of songs from the original dataset as the real input.
* Alternate between training these two networks until the model converges and produces realistic music, beginning with the critic on the first iteration.
We use a special type of GAN called the **Wasserstein GAN with Gradient Penalty** (or **WGAN-GP**) to generate music. While the underlying architecture of a WGAN-GP is very similar to vanilla variants of GAN, WGAN-GPs help overcome some of the commonly seen defects in GANs such as the vanishing gradient problem and mode collapse (see appendix for more details).
Note our "critic" network is more generally called a "discriminator" network in the more general context of vanilla GANs.
### Generator
The generator is adapted from the U-Net architecture (a popular CNN that is used extensively in the computer vision domain), consisting of an “encoder” that maps the single track music data (represented as piano roll images) to a relatively lower dimensional “latent space“ and a ”decoder“ that maps the latent space back to multi-track music data.
Here are the inputs provided to the generator:
**Single-track piano roll input**: A single melody track of size (32, 128, 1) => (TimeStep, NumPitches, NumTracks) is provided as the input to the generator.
**Latent noise vector**: A latent noise vector z of dimension (2, 8, 512) is also passed in as input and this is responsible for ensuring that there is a distinctive flavor to each output generated by the generator, even when the same input is provided.
Notice from the figure below that the encoding layers of the generator on the left side and decoder layer on on the right side are connected to create a U-shape, thereby giving the name U-Net to this architecture.
<img src="images/dgen.png" alt="Generator architecture" width="800">
In this implementation, we build the generator following a simple four-level Unet architecture by combining `_conv2d`s and `_deconv2d`, where `_conv2d` compose the contracting path and `_deconv2d` forms the expansive path.
```
def _conv2d(layer_input, filters, f_size=4, bn=True):
"""Generator Basic Downsampling Block"""
d = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=2,
padding='same')(layer_input)
d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)
if bn:
d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)
return d
def _deconv2d(layer_input, pre_input, filters, f_size=4, dropout_rate=0):
"""Generator Basic Upsampling Block"""
u = tf.keras.layers.UpSampling2D(size=2)(layer_input)
u = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=1,
padding='same')(u)
u = tf.keras.layers.BatchNormalization(momentum=0.8)(u)
u = tf.keras.layers.ReLU()(u)
if dropout_rate:
u = tf.keras.layers.Dropout(dropout_rate)(u)
u = tf.keras.layers.Concatenate()([u, pre_input])
return u
def build_generator(condition_input_shape=(32, 128, 1), filters=64,
instruments=4, latent_shape=(2, 8, 512)):
"""Buld Generator"""
c_input = tf.keras.layers.Input(shape=condition_input_shape)
z_input = tf.keras.layers.Input(shape=latent_shape)
d1 = _conv2d(c_input, filters, bn=False)
d2 = _conv2d(d1, filters * 2)
d3 = _conv2d(d2, filters * 4)
d4 = _conv2d(d3, filters * 8)
d4 = tf.keras.layers.Concatenate(axis=-1)([d4, z_input])
u4 = _deconv2d(d4, d3, filters * 4)
u5 = _deconv2d(u4, d2, filters * 2)
u6 = _deconv2d(u5, d1, filters)
u7 = tf.keras.layers.UpSampling2D(size=2)(u6)
output = tf.keras.layers.Conv2D(instruments, kernel_size=4, strides=1,
padding='same', activation='tanh')(u7) # 32, 128, 4
generator = tf.keras.models.Model([c_input, z_input], output, name='Generator')
return generator
```
Let us now dive into each layer of the generator to see the inputs/outputs at each layer.
```
# Models
generator = build_generator()
generator.summary()
```
### Critic (Discriminator)
The goal of the critic is to provide feedback to the generator about how realistic the generated piano rolls are, so that the generator can learn to produce more realistic data. The critic provides this feedback by outputting a scalar that represents how “real” or “fake” a piano roll is.
Since the critic tries to classify data as “real” or “fake”, it is not very different from commonly used binary classifiers. We use a simple architecture for the critic, composed of four convolutional layers and a dense layer at the end.
<img src="images/ddis.png" alt="Discriminator architecture" width="800">
```
def _build_critic_layer(layer_input, filters, f_size=4):
"""
This layer decreases the spatial resolution by 2:
input: [batch_size, in_channels, H, W]
output: [batch_size, out_channels, H/2, W/2]
"""
d = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=2,
padding='same')(layer_input)
# Critic does not use batch-norm
d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)
return d
def build_critic(pianoroll_shape=(32, 128, 4), filters=64):
"""WGAN critic."""
condition_input_shape = (32,128,1)
groundtruth_pianoroll = tf.keras.layers.Input(shape=pianoroll_shape)
condition_input = tf.keras.layers.Input(shape=condition_input_shape)
combined_imgs = tf.keras.layers.Concatenate(axis=-1)([groundtruth_pianoroll, condition_input])
d1 = _build_critic_layer(combined_imgs, filters)
d2 = _build_critic_layer(d1, filters * 2)
d3 = _build_critic_layer(d2, filters * 4)
d4 = _build_critic_layer(d3, filters * 8)
x = tf.keras.layers.Flatten()(d4)
logit = tf.keras.layers.Dense(1)(x)
critic = tf.keras.models.Model([groundtruth_pianoroll,condition_input], logit,
name='Critic')
return critic
# Create the Discriminator
critic = build_critic()
critic.summary() # View discriminator architecture.
```
## Training
We train our models by searching for model parameters which optimize an objective function. For our WGAN-GP, we have special loss functions that we minimize as we alternate between training our generator and critic networks:
*Generator Loss:*
* We use the Wasserstein (Generator) loss function which is negative of the Critic Loss function. The generator is trained to bring the generated pianoroll as close to the real pianoroll as possible.
* $\frac{1}{m} \sum_{i=1}^{m} -D_w(G(z^{i}|c^{i})|c^{i})$
*Critic Loss:*
* We begin with the Wasserstein (Critic) loss function designed to maximize the distance between the real piano roll distribution and generated (fake) piano roll distribution.
* $\frac{1}{m} \sum_{i=1}^{m} [D_w(G(z^{i}|c^{i})|c^{i}) - D_w(x^{i}|c^{i})]$
* We add a gradient penalty loss function term designed to control how the gradient of the critic with respect to its input behaves. This makes optimization of the generator easier.
* $\frac{1}{m} \sum_{i=1}^{m}(\lVert \nabla_{\hat{x}^i}D_w(\hat{x}^i|c^{i}) \rVert_2 - 1)^2 $
```
# Define the different loss functions
def generator_loss(critic_fake_output):
""" Wasserstein GAN loss
(Generator) -D(G(z|c))
"""
return -tf.reduce_mean(critic_fake_output)
def wasserstein_loss(critic_real_output, critic_fake_output):
""" Wasserstein GAN loss
(Critic) D(G(z|c)) - D(x|c)
"""
return tf.reduce_mean(critic_fake_output) - tf.reduce_mean(
critic_real_output)
def compute_gradient_penalty(critic, x, fake_x):
c = tf.expand_dims(x[..., 0], -1)
batch_size = x.get_shape().as_list()[0]
eps_x = tf.random.uniform(
[batch_size] + [1] * (len(x.get_shape()) - 1)) # B, 1, 1, 1, 1
inter = eps_x * x + (1.0 - eps_x) * fake_x
with tf.GradientTape() as g:
g.watch(inter)
disc_inter_output = critic((inter,c), training=True)
grads = g.gradient(disc_inter_output, inter)
slopes = tf.sqrt(1e-8 + tf.reduce_sum(
tf.square(grads),
reduction_indices=tf.range(1, grads.get_shape().ndims)))
gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0))
return gradient_penalty
```
With our loss functions defined, we associate them with Tensorflow optimizers to define how our model will search for a good set of model parameters. We use the *Adam* algorithm, a commonly used general-purpose optimizer. We also set up checkpoints to save our progress as we train.
```
# Setup Adam optimizers for both G and D
generator_optimizer = tf.keras.optimizers.Adam(1e-3, beta_1=0.5, beta_2=0.9)
critic_optimizer = tf.keras.optimizers.Adam(1e-3, beta_1=0.5, beta_2=0.9)
# We define our checkpoint directory and where to save trained checkpoints
ckpt = tf.train.Checkpoint(generator=generator,
generator_optimizer=generator_optimizer,
critic=critic,
critic_optimizer=critic_optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, check_dir, max_to_keep=5)
```
Now we define the `generator_train_step` and `critic_train_step` functions, each of which performs a single forward pass on a batch and returns the corresponding loss.
```
@tf.function
def generator_train_step(x, condition_track_idx=0):
############################################
#(1) Update G network: maximize D(G(z|c))
############################################
# Extract condition track to make real batches pianoroll
c = tf.expand_dims(x[..., condition_track_idx], -1)
# Generate batch of latent vectors
z = tf.random.truncated_normal([BATCH_SIZE, 2, 8, 512])
with tf.GradientTape() as tape:
fake_x = generator((c, z), training=True)
fake_output = critic((fake_x,c), training=False)
# Calculate Generator's loss based on this generated output
gen_loss = generator_loss(fake_output)
# Calculate gradients for Generator
gradients_of_generator = tape.gradient(gen_loss,
generator.trainable_variables)
# Update Generator
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables))
return gen_loss
@tf.function
def critic_train_step(x, condition_track_idx=0):
############################################################################
#(2) Update D network: maximize (D(x|c)) + (1 - D(G(z|c))|c) + GradientPenality()
############################################################################
# Extract condition track to make real batches pianoroll
c = tf.expand_dims(x[..., condition_track_idx], -1)
# Generate batch of latent vectors
z = tf.random.truncated_normal([BATCH_SIZE, 2, 8, 512])
# Generated fake pianoroll
fake_x = generator((c, z), training=False)
# Update critic parameters
with tf.GradientTape() as tape:
real_output = critic((x,c), training=True)
fake_output = critic((fake_x,c), training=True)
critic_loss = wasserstein_loss(real_output, fake_output)
# Caculate the gradients from the real and fake batches
grads_of_critic = tape.gradient(critic_loss,
critic.trainable_variables)
with tf.GradientTape() as tape:
gp_loss = compute_gradient_penalty(critic, x, fake_x)
gp_loss *= 10.0
# Calculate the gradients penalty from the real and fake batches
grads_gp = tape.gradient(gp_loss, critic.trainable_variables)
gradients_of_critic = [g + ggp for g, ggp in
zip(grads_of_critic, grads_gp)
if ggp is not None]
# Update Critic
critic_optimizer.apply_gradients(
zip(gradients_of_critic, critic.trainable_variables))
return critic_loss + gp_loss
```
Before we begin training, let's define some training configuration parameters and prepare to monitor important quantities. Here we log the losses and metrics which we can use to determine when to stop training. Consider coming back here to tweak these parameters and explore how your model responds.
```
# We use load_melody_samples() to load 5 input data samples from our dataset into sample_x
# and 5 random noise latent vectors into sample_z
sample_x, sample_z = inference_utils.load_melody_samples(n_sample=1)
# Number of iterations to train for
iterations = 500
# Update critic n times per generator update
n_dis_updates_per_gen_update = 5
# Determine input track in sample_x that we condition on
condition_track_idx = 0
sample_c = tf.expand_dims(sample_x[..., condition_track_idx], -1)
```
Let us now train our model!
```
# Clear out any old metrics we've collected
metrics_utils.metrics_manager.initialize()
# Keep a running list of various quantities:
c_losses = []
g_losses = []
# Data iterator to iterate over our dataset
it = iter(dataset)
for iteration in range(iterations):
# Train critic
for _ in range(n_dis_updates_per_gen_update):
c_loss = critic_train_step(next(it))
# Train generator
g_loss = generator_train_step(next(it))
# Save Losses for plotting later
c_losses.append(c_loss)
g_losses.append(g_loss)
display.clear_output(wait=True)
fig = plt.figure(figsize=(15, 5))
line1, = plt.plot(range(iteration+1), c_losses, 'r')
line2, = plt.plot(range(iteration+1), g_losses, 'k')
plt.xlabel('Iterations')
plt.ylabel('Losses')
plt.legend((line1, line2), ('C-loss', 'G-loss'))
display.display(fig)
plt.close(fig)
# Output training stats
print('Iteration {}, c_loss={:.2f}, g_loss={:.2f}'.format(iteration, c_loss, g_loss))
# Save checkpoints, music metrics, generated output
if iteration < 100 or iteration % 50 == 0 :
# Check how the generator is doing by saving G's samples on fixed_noise
fake_sample_x = generator((sample_c, sample_z), training=False)
metrics_utils.metrics_manager.append_metrics_for_iteration(fake_sample_x.numpy(), iteration)
if iteration % 50 == 0:
# Save the checkpoint to disk.
ckpt_manager.save(checkpoint_number=iteration)
fake_sample_x = fake_sample_x.numpy()
# plot the pianoroll
#display_utils.plot_pianoroll(iteration, sample_x[:4], fake_sample_x[:4], save_dir=train_dir)
# generate the midi
destination_path = path_utils.generated_midi_path_for_iteration(iteration, saveto_dir=sample_dir)
#use the programs to provide the program numbers for the instruments I care about
# 35 = Electric Bass (pick), 49 = String Ensemble 1, 1 = Acoustic Grand Piano, 25 = Acoustic Guitar (nylon)
# TODO: CHANGE THIS BASED ON YOUR SPECIFIC DATASET
midi_utils.save_pianoroll_as_midi(fake_sample_x[:4], programs=[35, 49, 1, 25], destination_path=destination_path)
```
### We have started training!
When using the Wasserstein loss function, we should train the critic to converge to ensure that the gradients for the generator update are accurate. This is in contrast to a standard GAN, where it is important not to let the critic get too strong, to avoid vanishing gradients.
Therefore, using the Wasserstein loss removes one of the key difficulties of training GANs—how to balance the training of the discriminator and generator. With WGANs, we can simply train the critic several times between generator updates, to ensure it is close to convergence. A typical ratio used is five critic updates to one generator update.
### "Babysitting" the learning process
Given that training these models can be an investment in time and resources, we must to continuously monitor training in order to catch and address anomalies if/when they occur. Here are some things to look out for:
**What should the losses look like?**
The adversarial learning process is highly dynamic and high-frequency oscillations are quite common. However if either loss (critic or generator) skyrockets to huge values, plunges to 0, or get stuck on a single value, there is likely an issue somewhere.
**Is my model learning?**
- Monitor the critic loss and other music quality metrics (if applicable). Are they following the expected trajectories?
- Monitor the generated samples (piano rolls). Are they improving over time? Do you see evidence of mode collapse? Have you tried listening to your samples?
**How do I know when to stop?**
- If the samples meet your expectations
- Critic loss no longer improving
- The expected value of the musical quality metrics converge to the corresponding expected value of the same metric on the training data
### How to measure sample quality during training
Typically, when training any sort of neural networks, it is standard practice to monitor the value of the loss function throughout the duration of the training. The critic loss in WGANs has been found to correlate well with sample quality.
While standard mechanisms exist for evaluating the accuracy of more traditional models like classifiers or regressors, evaluating generative models is an active area of research. Within the domain of music generation, this hard problem is even less well-understood.
To address this, we take high-level measurements of our data and show how well our model produces music that aligns with those measurements. If our model produces music which is close to the mean value of these measurements for our training dataset, our music should match on general “shape”.
We’ll look at three such measurements:
- **Empty bar rate:** The ratio of empty bars to total number of bars.
- **Pitch histogram distance:** A metric that captures the distribution and position of pitches.
- **In Scale Ratio:** Ratio of the number of notes that are in C major key, which is a common key found in music, to the total number of notes.
## Evaluate results
Now that we have finished training, let's find out how we did. We will analyze our model in several ways:
1. Examine how the generator and critic losses changed while training
2. Understand how certain musical metrics changed while training
3. Visualize generated piano roll output for a fixed input at every iteration and create a video
Let us first restore our last saved checkpoint. If you did not complete training but still want to continue with a pre-trained version, set `TRAIN = False`.
```
ckpt = tf.train.Checkpoint(generator=generator)
ckpt_manager = tf.train.CheckpointManager(ckpt, check_dir, max_to_keep=5)
ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()
print('Latest checkpoint {} restored.'.format(ckpt_manager.latest_checkpoint))
```
### Plot losses
```
display_utils.plot_loss_logs(g_losses, c_losses, figsize=(15, 5), smoothing=0.01)
```
Observe how the critic loss (C_loss in the graph) decays to zero as we train. In WGAN-GPs, the critic loss decreases (almost) monotonically as you train.
### Plot metrics
```
metrics_utils.metrics_manager.set_reference_metrics(training_data)
metrics_utils.metrics_manager.plot_metrics()
```
Each row here corresponds to a different music quality metric and each column denotes an instrument track.
Observe how the expected value of the different metrics (blue scatter) approach the corresponding training set expected values (red) as the number of iterations increase. You might expect to see diminishing returns as the model converges.
### Generated samples during training
The function below helps you probe intermediate samples generated in the training process. Remember that the conditioned input here is sampled from our training data. Let's start by listening to and observing a sample at iteration 0 and then iteration 100. Notice the difference!
```
# Enter an iteration number (can be divided by 50) and listen to the midi at that iteration
# Had 200 iterations
iteration = 150
midi_file = os.path.join(sample_dir, 'iteration-{}.mid'.format(iteration))
display_utils.playmidi(midi_file)
# Enter an iteration number (can be divided by 50) and look at the generated pianorolls at that iteration
iteration = 150
pianoroll_png = os.path.join(train_dir, 'sample_iteration_%05d.png' % iteration)
display.Image(filename=pianoroll_png)
```
Let's see how the generated piano rolls change with the number of iterations.
```
from IPython.display import Video
display_utils.make_training_video(train_dir)
video_path = "movie.mp4"
Video(video_path)
```
## Inference
### Generating accompaniment for custom input
Congratulations! You have trained your very own WGAN-GP to generate music. Let us see how our generator performs on a custom input.
The function below generates a new song based on "Still Not a Playa - Big Pun".
```
conditioned_track = midi_utils.get_conditioned_track(midi='./notaplaya.midi')
generated_pianoroll = inference_utils.generate_pianoroll(generator, conditioned_track)
destination_path = path_utils.new_temp_midi_path(saveto_dir=eval_dir)
# 35 = Electric Bass (pick), 49 = String Ensemble 1, 1 = Acoustic Grand Piano, 25 = Acoustic Guitar (nylon)
# TODO: CHANGE THIS BASED ON YOUR SPECIFIC DATASET
midi_utils.save_pianoroll_as_midi(generated_pianoroll.numpy(), destination_path=destination_path, programs=[35, 49, 1, 25],)
latest_midi = destination_path
display_utils.playmidi(latest_midi)
```
We can also take a look at the generated piano rolls for a certain sample, to see how diverse they are!
```
inference_utils.show_generated_pianorolls(generator, eval_dir, input_midi_file='./notaplaya.midi')
```
| github_jupyter |
<small>
Copyright (c) 2017 Andrew Glassner
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
</small>
# Deep Learning From Basics to Practice
## by Andrew Glassner, https://dlbasics.com, http://glassner.com
------
## Chapter 16: Feed-forward Networks
This notebook is provided as a “behind-the-scenes” look at code used to make some of the figures in this chapter. It is still in the hacked-together form used to develop the figures, and is only lightly commented.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
import seaborn as sns ; sns.set()
# Make a File_Helper for saving and loading files.
save_files = True
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0, os.path.dirname(current_dir)) # path to parent dir
from DLBasics_Utilities import File_Helper
file_helper = File_Helper(save_files)
# Height of a Gaussian (mu, sigma-squared) at x
def gaussian(x, mu, sigma2):
s2 = 2*sigma2
f = 1/math.sqrt(math.pi * s2)
g = np.power((x-mu), 2)/s2
return f * np.exp(-g)
def make_uniform_figure():
extent = .1
plt_blue = '#4d74ae' # matplotlib default blue
plt.plot([-extent, -0.05],[0,0], c=plt_blue)
plt.scatter([-.05],[0], s=130, facecolors='white', edgecolors=plt_blue, lw=2, zorder=10)
plt.scatter([-.05],[10], s=130, c=plt_blue)
plt.plot([-.05, .05],[10,10], c=plt_blue)
plt.scatter([.05],[10], s=130, c=plt_blue)
plt.scatter([.05],[0], s=130, facecolors='white', edgecolors=plt_blue, lw=2, zorder=10)
plt.plot([0.05, extent],[0,0], c=plt_blue)
plt.xlim(-extent, extent)
plt.ylim(-.5, 10.5)
file_helper.save_figure('uniform-init')
plt.show()
make_uniform_figure()
def make_normal_figure():
extent = .1
plt_blue = '#4d74ae' # matplotlib default blue
xs = np.linspace(-extent, extent, 200)
mu = 0
sigma = 0.0003 # really sigma^2
ys = [gaussian(x, mu, sigma) for x in xs]
plt.plot(xs, ys, lw=2, c=plt_blue)
plt.xlim(-extent, extent)
plt.ylim(-.15, 25)
file_helper.save_figure('normal-init')
plt.show()
make_normal_figure()
```
| github_jupyter |
## VQE and Quantum Graph Neural Networks
```
import numpy
import math
import random
import numpy as np
import scipy
from matplotlib import pyplot as plt
from tqdm import tqdm
from scipy.optimize import minimize
import networkx as nx
import cirq
```
In order to begin thinking about the quantum graph neural network, we must prepare some training data. We perform VQE in order to find the ground state of a given Ising model Hamiltonian:
```
# Initialize the necessary qubits
qubit_number = 4
data_register = [cirq.GridQubit(0, i) for i in range(0, qubit_number)]
network_register = [cirq.GridQubit(1, i) for i in range(0, qubit_number)]
# Creates the graph structure of the quantum system
ising_graph = nx.Graph()
ising_graph.add_nodes_from(range(0, qubit_number))
ising_graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0)])
nx.draw(ising_graph)
plt.show()
# Creates parameters
matrix_params = [[random.randint(10, 100)/10 for i in range(0, 4)] for j in range(0, 2)]
print(matrix_params)
# Defines the rz gate:
def rz(control, target, param):
yield cirq.CX.on(control, target)
yield cirq.rz(param).on(target)
yield cirq.CX.on(control, target)
# Method that initializes qubits in even superposition
def even_superposition(qubits):
for i in qubits:
yield cirq.H.on(i)
# Method that prepares the QAOA ansatz layers
def qaoa_layer(param1, param2, qubits, ising_graph):
# Applies another layer of coupling gates
for count, i in enumerate(ising_graph.edges):
yield rz(qubits[i[0]], qubits[i[1]], 2*param1[count])
# Applies the final layer of RX gates on the qubits
for i in range(0, len(qubits)):
yield cirq.rx(2*param2[i]).on(qubits[i])
# Method that prepares the decoupled layers
def decoupled_layer(param1, param2, qubits):
for i in range(0, len(qubits)):
yield cirq.ZPowGate(exponent=param1[i]).on(qubits[i])
yield cirq.XPowGate(exponent=param1[i]).on(qubits[i])
# Method that prepares the VQE circuit that will be used to
def vqe_circuit(parameters, qubits, ising_graph):
yield decoupled_layer(parameters[0], parameters[1], qubits)
yield decoupled_layer(parameters[2], parameters[3], qubits)
test_circuit = cirq.Circuit()
test_circuit.append(vqe_circuit([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], data_register, ising_graph))
print(test_circuit)
# Creates the Hamiltonian that we are attempting to learn
def create_hamiltonian_matrix(n, graph):
# Defines Pauli matrices
pauli_x = np.array([[0, 1], [1, 0]])
pauli_y = np.array([[0, -1j], [1j, 0]])
pauli_z = np.array([[1, 0], [0, -1]])
identity = np.array([[1, 0], [0, 1]])
matrix = np.zeros((2**n, 2**n))
# Creates the interaction component of the Hamiltonian
for count, i in enumerate(graph.edges):
m = 1
for j in range(0, n):
if (i[0] == j or i[1] == j):
m = np.kron(m, pauli_z)
else:
m = np.kron(m, identity)
matrix = np.add(matrix, matrix_params[0][count]*m)
# Creates the "bias" component of the matrix
for i in range(0, n):
m = 1
for j in range(0, n):
if (j == i):
m = np.kron(m, pauli_x)
else:
m = np.kron(m, identity)
matrix = np.add(matrix, matrix_params[1][i]*m)
return matrix
print(create_hamiltonian_matrix(qubit_number, ising_graph))
def create_density_matrix(arr):
array = np.array(arr)
plt.matshow(array)
plt.colorbar()
plt.show()
# Finds the eigenvector corresponding to the lowest energy state
val, vec = np.linalg.eig(create_hamiltonian_matrix(qubit_number, ising_graph))
m = []
min_ind = list(val).index(min(val))
print(val[min_ind])
for i in range(0, 2**qubit_number):
m.append(vec[i][min_ind])
'''
def apply(n):
return float(n*np.conj(n))
func_vec = np.vectorize(apply)
new = func_vec(np.outer(m, m))
'''
create_density_matrix(np.real(np.outer(m, np.conj(m))))
# Creates the VQE method that we will optimize
def create_circuit(parameters, qubits):
# Prepares the circuit
circuit = cirq.Circuit()
circuit.append(even_superposition(qubits))
circuit.append(vqe_circuit(parameters, qubits, ising_graph))
# Creates the simulation
simulator = cirq.Simulator()
results = simulator.simulate(circuit)
state_vector = results.final_state
return state_vector
# Creates the cost function
iterations = 0
def cost_function(parameters, qubits):
global iterations
hamiltonian = create_hamiltonian_matrix(qubit_number, ising_graph)
vector = create_circuit(parameters, qubits)
first = np.matmul(hamiltonian, vector)
cost = np.inner(np.conj(vector), first)
if (iterations%50 == 0):
print("Cost at Step "+str(iterations)+"= "+str(np.real(cost)))
iterations += 1
return np.real(cost)
# Creates the optimizer for our variational circuit
qubit_register = network_register
def optimizer_cost(params):
parameters = [
params[0:4],
params[4:8],
params[8:12],
params[12:16]
]
return cost_function(parameters, qubit_register)
# Creates the optimizer
init = [random.randint(0, 20)/10 for i in range(0, 16)]
out = minimize(optimizer_cost, x0=init, method="COBYLA", options={'maxiter':1000, 'tol':1e-10})
g = out['x']
print(out)
# Prepares the optimal state and visualizes it
optimal_params = [
g[0:4],
g[4:8],
g[8:12],
g[12:16]
]
optimal = create_circuit(optimal_params, qubit_register)
result = np.real(np.outer(optimal, np.conj(optimal)))
create_density_matrix(result)
v = [ 3.21629331, 0.54890376, 2.02976445, 0.7818173 , 1.3213677 ,
1.48080682, 1.67054856, 1.44101918, 1.20196752, 0.56441884,
-0.31570509, 0.15785939, 1.69543663, 0.72541886, 0.02910459,
-0.52821689]
# Attempts to evolve the prepared ground state forward in time, with the time evolution circuit
def le_state_evolve(depth, time, qubits, ising_graph, params):
yield even_superposition(qubits)
yield vqe_circuit(params, qubits, ising_graph)
yield time_evolution(depth, time, qubits, ising_graph)
# Creates the circuit
def create_time_circuit(depth, time, qubits, ising_graph, params):
circuit = cirq.Circuit()
circuit.append(le_state_evolve(depth, time, qubits, ising_graph, params))
simulator = cirq.Simulator()
results = simulator.simulate(circuit)
state_vector = results.final_state
return state_vector
vector = create_time_circuit(800, 10, data_register, ising_graph, optimal_params)
create_density_matrix(np.real(np.outer(vector, np.conj(vector))))
# Creates the numrical simulation, to test our time-evolution circuit
def time_evolution_test(time, vec):
new_matrix = scipy.linalg.expm(complex(0,-1)*create_hamiltonian_matrix(qubit_number, ising_graph)*time / hbar)
return np.matmul(new_matrix, vec)
vec = time_evolution_test(10, optimal)
create_density_matrix(np.real(np.outer(vec, np.conj(vec))))
print("Fidelity: "+str(np.inner(np.conj(vec), vector)*np.inner(np.conj(vector), vec)))
# Creates the initial "guess" graph of interactions, and assigns parameters to each of the edges
initial_graph = nx.complete_graph(qubit_number)
# Creates the SWAP test between two registers of qubits
def swap_test(control, index1, index2):
yield cirq.H.on(control)
for i in range(0, len(index1)):
yield cirq.CSWAP(control, index1[i], index2[i])
yield cirq.H.on(control)
# Creates the QGRNN ansatz
def qgrnn_ansatz(initial_graph, parameters, opt_params, depth, qubits, time):
yield even_superposition(qubits)
yield vqe_circuit(opt_params, qubits, ising_graph)
for i in range(0, depth):
yield qaoa_layer([i*time/depth for i in parameters[0]], [i*time/depth for i in parameters[1]], qubits, initial_graph)
def find_infidelity_time(depth, time, index1, index2, control, params, opt_params, ising_graph, initial_graph):
circuit = cirq.Circuit()
circuit.append(le_state_evolve(depth, time, index1, ising_graph, opt_params))
circuit.append(qgrnn_ansatz(initial_graph, params, opt_params, depth, index2, time))
circuit.append(swap_test(control, index1, index2))
circuit.append(cirq.measure(control, key="q"))
simulator = cirq.Simulator()
results = simulator.run(circuit, repetitions=100)
new_res = list(str(results)[2:])
return sum([int(i) for i in new_res])
control = cirq.GridQubit(2, 0)
# Now, we define the cost function that is used in the optimization method
time_range = range(0, 10)
iterations = 0
def cost_function(params):
global iterations
params = [params[0:6], params[6:10]]
total_cost = 0
for i in time_range:
res = find_infidelity_time(1, i, data_register, network_register, control, params, optimal_params, ising_graph, initial_graph)
total_cost += res
print("Cost at Step "+str(iterations)+": "+str(total_cost / len(time_range)))
iterations += 1
return total_cost / len(time_range)
init = [random.randint(10, 100)/10 for i in range(0, 10)]
init = [7.9, 2.7, 7.1, 3.9, 3.7, 9.9, 4.5, 6.4]
out = minimize(cost_function, x0=init, method="COBYLA", options={'maxiter':500, 'tol':1e-10})
g = out['x']
print(out)
```
| github_jupyter |
Navier-Stokes
```
import numpy as np
import torch
from torch.nn import Parameter
from torch.optim import Adam
from gpytorch.optim import NGD
from gpytorch.constraints import Interval
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from os import path
from lafomo.utilities.torch import get_image
from lafomo.datasets import DrosophilaSpatialTranscriptomics
from lafomo.models import MultiOutputGP, PartialLFM
from lafomo.models.pdes import ReactionDiffusion
from lafomo.datasets import ToySpatialTranscriptomics, P53Data
from lafomo.configuration import VariationalConfiguration
from lafomo.plot import Plotter, plot_spatiotemporal_data
from lafomo.trainers import PDETrainer
from lafomo.utilities.torch import discretise, softplus
from lafomo.utilities.fenics import interval_mesh
from fenics import *
from dolfin import *
from mshr import *
# Parameters
T = 5.0/20 # final time
num_steps = 5000//20 # number of time steps
dt = T / num_steps # time step size
mu = 0.001 # dynamic viscosity
rho = 1 # density
# Generate Mesh
channel = Rectangle(Point(0, 0), Point(2.2, 0.41))
cylinder = Circle(Point(0.2, 0.2), 0.05)
domain = channel - cylinder
mesh = generate_mesh(domain, 32)
plot(mesh)
# Define function spaces
V = VectorFunctionSpace(mesh, 'P', 2)
Q = FunctionSpace(mesh, 'P', 1)
# Define boundaries
inflow = 'near(x[0], 0)'
outflow = 'near(x[0], 2.2)'
walls = 'near(x[1], 0) || near(x[1], 0.41)'
cylinder = 'on_boundary && x[0]>0.1 && x[0]<0.3 && x[1]>0.1 && x[1]<0.3'
inflow_profile = ('4.0*1.5*x[1]*(0.41 - x[1]) / pow(0.41, 2)', '0')
# Define boundary conditions
bcu_inflow = DirichletBC(V, Expression(inflow_profile, degree=2), inflow)
bcu_walls = DirichletBC(V, Constant((0, 0)), walls)
bcu_cylinder = DirichletBC(V, Constant((0, 0)), cylinder)
bcp_outflow = DirichletBC(Q, Constant(0), outflow)
bcu = [bcu_inflow, bcu_walls, bcu_cylinder]
bcp = [bcp_outflow]
# Generate dataset
# Define trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
p = TrialFunction(Q)
q = TestFunction(Q)
# Define functions for solutions at previous and current time steps
u_n = Function(V)
u_ = Function(V)
p_n = Function(Q)
p_ = Function(Q)
# Define expressions used in variational forms
U = 0.5*(u_n + u)
n = FacetNormal(mesh)
f = Constant((0, 0))
k = Constant(dt)
mu = Constant(mu)
rho = Constant(rho)
# Define symmetric gradient
def epsilon(u):
return sym(nabla_grad(u))
# Define stress tensor
def sigma(u, p):
return 2*mu*epsilon(u) - p*Identity(len(u))
# Define variational forms:
# Define variational problem for step 1
F1 = rho*dot((u - u_n) / k, v)*dx \
+ rho*dot(dot(u_n, nabla_grad(u_n)), v)*dx \
+ inner(sigma(U, p_n), epsilon(v))*dx \
+ dot(p_n*n, v)*ds - dot(mu*nabla_grad(U)*n, v)*ds \
- dot(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Define variational problem for step 2
a2 = dot(nabla_grad(p), nabla_grad(q))*dx
L2 = dot(nabla_grad(p_n), nabla_grad(q))*dx - (1/k)*div(u_)*q*dx
# Define variational problem for step 3
a3 = dot(u, v)*dx
L3 = dot(u_, v)*dx - k*dot(nabla_grad(p_ - p_n), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Apply boundary conditions to matrices
[bc.apply(A1) for bc in bcu]
[bc.apply(A2) for bc in bcp]
from tqdm import tqdm
# Create progress bar
progress = Progress('Time-stepping')
xdmffile_u = XDMFFile('navier_stokes_cylinder/velocity.xdmf')
xdmffile_p = XDMFFile('navier_stokes_cylinder/pressure.xdmf')
xdmffile_u.parameters['flush_output'] = True
xdmffile_p.parameters['flush_output'] = True
# Time-stepping
t = 0
for n in tqdm(range(num_steps)):
# Update current time
t += dt
# Step 1: Tentative velocity step
b1 = assemble(L1)
[bc.apply(b1) for bc in bcu]
solve(A1, u_.vector(), b1, 'bicgstab', 'hypre_amg')
# Step 2: Pressure correction step
b2 = assemble(L2)
[bc.apply(b2) for bc in bcp]
solve(A2, p_.vector(), b2, 'bicgstab', 'hypre_amg')
# Step 3: Velocity correction step
b3 = assemble(L3)
solve(A3, u_.vector(), b3, 'cg', 'sor')
# Plot solution
plot(u_, title='Velocity')
plot(p_, title='Pressure')
# Save solution to file (XDMF/HDF5)
xdmffile_u.write(u_, t)
xdmffile_p.write(p_, t)
# Save nodal values to file
# timeseries_u.store(u_.vector(), t)
# timeseries_p.store(p_.vector(), t)
# Update previous solution
u_n.assign(u_)
p_n.assign(p_)
# Update progress bar
# print('u max:', u_.vector().max())
print(xdmffile_u)
print(T)
drosophila = True
if drosophila:
filepath = path.join('../../../experiments', 'dros-kr', 'partial', 'savedmodel')
dataset = DrosophilaSpatialTranscriptomics(gene='kr', data_dir='../../../data')
data = next(iter(dataset))
tx, y_target = data
lengthscale = 10
images = [get_image(dataset.orig_data, i)
for i in range(2, 4)
for dataset in [kr_dataset, kni_dataset, gt_dataset]
]
else:
filepath = path.join('../../../experiments', 'toy-spatial', 'partial', 'savedmodel')
dataset = ToySpatialTranscriptomics(data_dir='../../../data/')
data = next(iter(dataset))
tx, y_target = data
lengthscale = 0.2
num_inducing = int(tx.shape[1] * 5/6)
ts = tx[0, :].unique().sort()[0].numpy()
xs = tx[1, :].unique().sort()[0].numpy()
t_diff = ts[-1]-ts[0]
x_diff = xs[-1]-xs[0]
extent = [ts[0], ts[-1], xs[0], xs[-1]]
if drosophila: plot_spatiotemporal_data(images, extent, nrows=2, ncols=3)
```
Set up GP model
```
inducing_points = torch.stack([
tx[0, torch.randperm(tx.shape[1])[:int(0.3 * tx.shape[1])]],
tx[1, torch.randperm(tx.shape[1])[:int(0.3 * tx.shape[1])]]
], dim=1).unsqueeze(0)
gp_kwargs = dict(use_ard=True,
use_scale=False,
# lengthscale_constraint=Interval(0.1, 0.3),
learn_inducing_locations=False,
initial_lengthscale=lengthscale)
gp_model = MultiOutputGP(inducing_points, 1, **gp_kwargs)
gp_model.double();
print(inducing_points.shape)
plt.scatter(inducing_points[0,:,0], inducing_points[0, :, 1])
```
Set up PDE
```
t_range = (ts[0], ts[-1])
print(t_range)
time_steps = dataset.num_discretised
print(time_steps)
fenics_model = ReactionDiffusion(t_range, time_steps, mesh)
config = VariationalConfiguration(
initial_conditions=False,
num_samples=25
)
sensitivity = Parameter(torch.ones((1, 1), dtype=torch.float64), requires_grad=False)
decay = Parameter(0.1*torch.ones((1, 1), dtype=torch.float64), requires_grad=False)
diffusion = Parameter(0.01*torch.ones((1, 1), dtype=torch.float64), requires_grad=False)
fenics_params = [sensitivity, decay, diffusion]
lfm = PartialLFM(1, gp_model, fenics_model, fenics_params, config)
train_mask = torch.zeros_like(tx[0,:])
train_mask[torch.randperm(tx.shape[1])[:int(0.3 * tx.shape[1])]] = 1
num_training = tx.shape[1]
variational_optimizer = NGD(lfm.variational_parameters(), num_data=num_training, lr=0.1)
parameter_optimizer = Adam(lfm.nonvariational_parameters(), lr=0.07)
optimizers = [variational_optimizer, parameter_optimizer]
trainer = PDETrainer(lfm,
optimizers,
dataset,
track_parameters=list(lfm.fenics_named_parameters.keys()),
train_mask=train_mask.bool(),
warm_variational=1)
```
Now let's see some samples from the GP and corresponding LFM output
```
num_t = trainer.tx[0, :].unique().shape[0]
num_x = trainer.tx[1, :].unique().shape[0]
# gp_model.covar_module.lengthscale = 0.3*0.3 * 2
out = gp_model(trainer.tx.transpose(0, 1))
sample = out.sample(torch.Size([lfm.config.num_samples])).permute(0, 2, 1)
real = torch.tensor(dataset.orig_data[trainer.t_sorted, 2]).unsqueeze(0)
plot_spatiotemporal_data(
[sample.mean(0)[0].detach().view(num_t, num_x).transpose(0, 1),
real.squeeze().view(num_t, num_x).transpose(0, 1)],
extent,
titles=['Prediction', 'Ground truth']
)
sample = sample.view(lfm.config.num_samples, 1, num_t, num_x)
real = real.repeat(lfm.config.num_samples, 1, 1)
real = real.view(lfm.config.num_samples, 1, num_t, num_x)
out = lfm.solve_pde(sample)
real_out = lfm.solve_pde(real)
plot_spatiotemporal_data(
[out.mean(0).detach().transpose(0, 1),
real_out[0].detach().transpose(0, 1)],
extent,
titles=['Prediction', 'Ground truth']
)
#print(hihi)
trainer.train(2)
print(sample.shape)
plt.imshow(sample.mean(0)[0].transpose(0, 1))
plt.colorbar()
plt.figure()
plt.imshow(out.mean(0).detach())
# lfm.save(filepath)
lfm = PartialLFM.load(filepath,
gp_cls=MultiOutputGP,
gp_args=[inducing_points, 1],
gp_kwargs=gp_kwargs,
lfm_args=[1, fenics_model, fenics_params, config])
# lfm = PartialLFM(gp_model, fenics_model, fenics_params, config)
gp_model = lfm.gp_model
optimizer = torch.optim.Adam(lfm.parameters(), lr=0.07)
trainer = PDETrainer(lfm, optimizer, dataset, track_parameters=list(lfm.fenics_named_parameters.keys()))
from lafomo.utilities.torch import smse, cia, q2
tx = trainer.tx
num_t = tx[0, :].unique().shape[0]
num_x = tx[1, :].unique().shape[0]
# f_mean = lfm(tx).mean.detach()
# f_var = lfm(tx).variance.detach()
y_target = trainer.y_target[0]
ts = tx[0, :].unique().sort()[0].numpy()
xs = tx[1, :].unique().sort()[0].numpy()
t_diff = ts[-1] - ts[0]
x_diff = xs[-1] - xs[0]
extent = [ts[0], ts[-1], xs[0], xs[-1]]
print(y_target.shape, f_mean.squeeze().shape)
f_mean_test = f_mean.squeeze()
f_var_test = f_var.squeeze()
print(q2(y_target, f_mean.squeeze()))
print(cia(y_target, f_mean_test, f_var_test).item())
print(smse(y_target, f_mean_test).mean().item())
plotter = Plotter(lfm, np.arange(1))
labels = ['Sensitivity', 'Decay', 'Diffusion']
kinetics = list()
for key in lfm.fenics_named_parameters.keys():
kinetics.append(softplus(trainer.parameter_trace[key][-1]).squeeze().numpy())
plotter.plot_double_bar(kinetics, labels)
# plotter.plot_latents()
```
| github_jupyter |
This lab on Model Validation using Validation and Cross-Validation is a Python adaptation of p. 248-251 of "Introduction to Statistical Learning with Applications in R" by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani. Adapted by R. Jordan Crouser at Smith College for SDS293: Machine Learning (Spring 2016).
```
%matplotlib inline
import pandas as pd
import numpy as np
import itertools
import statsmodels.api as sm
import matplotlib.pyplot as plt
```
# Model selection using the Validation Set Approach
In Lab 8, we saw that it is possible to choose among a set of models of different
sizes using $C_p$, BIC, and adjusted $R^2$. We will now consider how to do this
using the validation set and cross-validation approaches.
As in Lab 8, we'll be working with the ${\tt Hitters}$ dataset from ${\tt ISLR}$. Since we're trying to predict ${\tt Salary}$ and we know from last time that some are missing, let's first drop all the rows with missing values and do a little cleanup:
```
df = pd.read_csv('Hitters.csv')
# Drop any rows the contain missing values, along with the player names
df = df.dropna().drop('Player', axis=1)
# Get dummy variables
dummies = pd.get_dummies(df[['League', 'Division', 'NewLeague']])
# Extract independent variable
y = pd.DataFrame(df.Salary)
# Drop the column with the independent variable (Salary), and columns for which we created dummy variables
X_ = df.drop(['Salary', 'League', 'Division', 'NewLeague'], axis=1).astype('float64')
# Define the feature set X.
X = pd.concat([X_, dummies[['League_N', 'Division_W', 'NewLeague_N']]], axis=1)
```
In order for the validation set approach to yield accurate estimates of the test
error, we must use *only the training observations* to perform all aspects of
model-fitting — including variable selection. Therefore, the determination of
which model of a given size is best must be made using *only the training
observations*. This point is subtle but important. If the full data set is used
to perform the best subset selection step, the validation set errors and
cross-validation errors that we obtain will not be accurate estimates of the
test error.
In order to use the validation set approach, we begin by splitting the
observations into a training set and a test set. We do this by creating
a random vector, train, of elements equal to TRUE if the corresponding
observation is in the training set, and FALSE otherwise. The vector test has
a TRUE if the observation is in the test set, and a FALSE otherwise. Note the
${\tt np.invert()}$ in the command to create test causes TRUEs to be switched to FALSEs and
vice versa. We also set a random seed so that the user will obtain the same
training set/test set split.
```
np.random.seed(seed=12)
train = np.random.choice([True, False], size = len(y), replace = True)
test = np.invert(train)
```
We'll define our helper function to outputs the best set of variables for each model size like we did in Lab 8. Not that we'll need to modify this to take in both test and training sets, because we want the returned error to be the **test** error:
```
def processSubset(feature_set, X_train, y_train, X_test, y_test):
# Fit model on feature_set and calculate RSS
model = sm.OLS(y_train,X_train[list(feature_set)])
regr = model.fit()
RSS = ((regr.predict(X_test[list(feature_set)]) - y_test) ** 2).sum()
return {"model":regr, "RSS":RSS}
```
And another function to perform forward selection:
```
def forward(predictors, X_train, y_train, X_test, y_test):
# Pull out predictors we still need to process
remaining_predictors = [p for p in X_train.columns if p not in predictors]
results = []
for p in remaining_predictors:
results.append(processSubset(predictors+[p], X_train, y_train, X_test, y_test))
# Wrap everything up in a nice dataframe
models = pd.DataFrame(results)
# Choose the model with the highest RSS
best_model = models.loc[models['RSS'].argmin()]
# Return the best model, along with some other useful information about the model
return best_model
```
Now, we'll call our ${\tt forward()}$ to the training set in order to perform forward selection for all nodel sizes:
```
models_train = pd.DataFrame(columns=["RSS", "model"])
predictors = []
for i in range(1,len(X.columns)+1):
models_train.loc[i] = forward(predictors, X[train], y[train]["Salary"], X[test], y[test]["Salary"])
predictors = models_train.loc[i]["model"].model.exog_names
```
Now let's plot the errors, and find the model that minimizes it:
```
plt.plot(models_train["RSS"])
plt.xlabel('# Predictors')
plt.ylabel('RSS')
plt.plot(models_train["RSS"].argmin(), models_train["RSS"].min(), "or")
```
Viola! We find that the best model (according to the validation set approach) is the one that contains 10 predictors.
Now that we know what we're looking for, let's perform forward selection on the full dataset and select the best 10-predictor model. It is important that we make use of the *full
data set* in order to obtain more accurate coefficient estimates. Note that
we perform best subset selection on the full data set and select the best 10-predictor
model, rather than simply using the predictors that we obtained
from the training set, because the best 10-predictor model on the full data
set may differ from the corresponding model on the training set.
```
models_full = pd.DataFrame(columns=["RSS", "model"])
predictors = []
for i in range(1,20):
models_full.loc[i] = forward(predictors, X, y["Salary"], X, y["Salary"])
predictors = models_full.loc[i]["model"].model.exog_names
```
In fact, we see that the best ten-variable model on the full data set has a
**different set of predictors** than the best ten-variable model on the training
set:
```
print(models_train.loc[10, "model"].model.exog_names)
print(models_full.loc[10, "model"].model.exog_names)
```
# Model selection using Cross-Validation
Now let's try to choose among the models of different sizes using cross-validation.
This approach is somewhat involved, as we must perform forward selection within each of the $k$ training sets. Despite this, we see that
with its clever subsetting syntax, ${\tt python}$ makes this job quite easy. First, we
create a vector that assigns each observation to one of $k = 10$ folds, and
we create a DataFrame in which we will store the results:
```
k=10 # number of folds
np.random.seed(seed=1)
folds = np.random.choice(k, size = len(y), replace = True)
# Create a DataFrame to store the results of our upcoming calculations
cv_errors = pd.DataFrame(columns=range(1,k+1), index=range(1,20))
cv_errors = cv_errors.fillna(0)
cv_errors
```
Now let's write a for loop that performs cross-validation. In the $j^{th}$ fold, the
elements of folds that equal $j$ are in the test set, and the remainder are in
the training set. We make our predictions for each model size, compute the test errors on the appropriate subset,
and store them in the appropriate slot in the matrix ${\tt cv.errors}$.
```
models_cv = pd.DataFrame(columns=["RSS", "model"])
# Outer loop iterates over all folds
for j in range(1,k+1):
# Reset predictors
predictors = []
# Inner loop iterates over each size i
for i in range(1,len(X.columns)+1):
# The perform forward selection on the full dataset minus the jth fold, test on jth fold
models_cv.loc[i] = forward(predictors, X[folds != (j-1)], y[folds != (j-1)]["Salary"], X[folds == (j-1)], y[folds == (j-1)]["Salary"])
# Save the cross-validated error for this fold
cv_errors[j][i] = models_cv.loc[i]["RSS"]
# Extract the predictors
predictors = models_cv.loc[i]["model"].model.exog_names
cv_errors
```
This has filled up the ${\tt cv\_errors}$ DataFrame such that the $(i,j)^{th}$ element corresponds
to the test MSE for the $i^{th}$ cross-validation fold for the best $j$-variable
model. We can then use the ${\tt apply()}$ function to take the ${\tt mean}$ over the columns of this
matrix. This will give us a vector for which the $j^{th}$ element is the cross-validation
error for the $j$-variable model.
```
cv_mean = cv_errors.apply(np.mean, axis=1)
plt.plot(cv_mean)
plt.xlabel('# Predictors')
plt.ylabel('CV Error')
plt.plot(cv_mean.argmin(), cv_mean.min(), "or")
```
We see that cross-validation selects a 9-predictor model. Now let's go back to our results on the full data set in order to obtain the 9-predictor model.
```
print(models_full.loc[9, "model"].summary())
```
For comparison, let's also take a look at the statistics from last lab:
```
plt.figure(figsize=(20,10))
plt.rcParams.update({'font.size': 18, 'lines.markersize': 10})
# Set up a 2x2 grid so we can look at 4 plots at once
plt.subplot(2, 2, 1)
# We will now plot a red dot to indicate the model with the largest adjusted R^2 statistic.
# The argmax() function can be used to identify the location of the maximum point of a vector
plt.plot(models_full["RSS"])
plt.xlabel('# Predictors')
plt.ylabel('RSS')
# We will now plot a red dot to indicate the model with the largest adjusted R^2 statistic.
# The argmax() function can be used to identify the location of the maximum point of a vector
rsquared_adj = models_full.apply(lambda row: row[1].rsquared_adj, axis=1)
plt.subplot(2, 2, 2)
plt.plot(rsquared_adj)
plt.plot(rsquared_adj.argmax(), rsquared_adj.max(), "or")
plt.xlabel('# Predictors')
plt.ylabel('adjusted rsquared')
# We'll do the same for AIC and BIC, this time looking for the models with the SMALLEST statistic
aic = models_full.apply(lambda row: row[1].aic, axis=1)
plt.subplot(2, 2, 3)
plt.plot(aic)
plt.plot(aic.argmin(), aic.min(), "or")
plt.xlabel('# Predictors')
plt.ylabel('AIC')
bic = models_full.apply(lambda row: row[1].bic, axis=1)
plt.subplot(2, 2, 4)
plt.plot(bic)
plt.plot(bic.argmin(), bic.min(), "or")
plt.xlabel('# Predictors')
plt.ylabel('BIC')
```
Notice how some of the indicators are similar the cross-validated model, and others are very different?
# Your turn!
Now it's time to test out these approaches (best / forward / backward selection) and evaluation methods (adjusted training error, validation set, cross validation) on other datasets. You may want to work with a team on this portion of the lab.
You may use any of the datasets included in ${\tt ISLR}$, or choose one from the UCI machine learning repository (http://archive.ics.uci.edu/ml/datasets.html). Download a dataset, and try to determine the optimal set of parameters to use to model it!
```
# Your code here
```
To get credit for this lab, please post your answers to the following questions:
- What dataset did you choose?
- Which selection techniques did you try?
- Which evaluation techniques did you try?
- What did you determine was the best set of parameters to model this data?
- How well did this model perform?
to Piazza: https://piazza.com/class/igwiv4w3ctb6rg?cid=35
| github_jupyter |
## The Zoning Problem
The aim of this notebook is to generate the optimal zoning by formulating a linear assignment problem and using google
OR-Toolsfor solving the optimization problem.
The agent behaviours are taken from the previous study. The occupation and env lattices are random for the test problem .
### The Test Problem
There are five agents :[Blue,Green,Yellow,Red,Violet]
The number of voxels which each agent occupies are [B1,G1,Y1,R1,V1]
Total number of voxels in a lattice = x
Value lattice for the agents are =[Bv],[Gv],[Yv],[Rv],[Vv]
#### Aim
: To find the best combination of Zones to achieve the maximum occupancy value
#### Steps
1. Generate the lattices , Agent behaviours, agents,
2. Find the possible origin locations for the agents
3. Simulate occupancy behaviour and retrieve the cost of occupancy for each agent at each position
4. Generate the Cost matrix
5. Use the MIP Solver to optimise the combination to get the permutation matrix
## Initilization
```
import os
import itertools
import sys
from math import factorial as fac
sys.path.append("D:/TU_Delft/Msc_Building_Technology/Semester_3/Graduation/Aditya_Graduation_Project_BT/06_Libraries")
import topogenesis as tg
import pyvista as pv
import trimesh as tm
import numpy as np
np.random.seed(0)
np.set_printoptions(threshold=sys.maxsize)
import networkx as nx
import pickle
```
## Base Lattices
```
# loading the lattice from csv
lattice_path = os.path.relpath('Base_lattice_2.csv')
avail_lattice_base = tg.lattice_from_csv(lattice_path)
avail_lattice = avail_lattice_base*1
init_avail_lattice = tg.to_lattice(np.copy(avail_lattice*1), avail_lattice)
```
## Env Lattices
```
Bv = np.random.randint(1, 20, (np.shape(avail_lattice)), dtype='l')
Gv = np.random.randint(1, 20, (np.shape(avail_lattice)), dtype='l')
Yv = np.random.randint(1, 9, (np.shape(avail_lattice)), dtype='l')
Rv = np.random.randint(1, 9, (np.shape(avail_lattice)), dtype='l')
Vv = np.random.randint(1, 99, (np.shape(avail_lattice)), dtype='l')
Vv
```
## Stencils
```
# creating neighborhood definition
stencil_von_neumann = tg.create_stencil("von_neumann", 1, 1)
stencil_von_neumann.set_index([0,0,0], 0)
#print(stencil_von_neumann)
# creating neighborhood definition
stencil_squareness = tg.create_stencil("moore", 1, 1)
# Reshaping the moore neighbourhood
stencil_squareness[0,:,:] = 0
stencil_squareness[2,:,:] = 0
stencil_squareness.set_index([0,0,0], 0)
stencil_squareness_t = np.transpose(stencil_squareness)
#print(stencil_squareness_t)
# creating neighborhood definition
stencil_squareness_von = tg.create_stencil("von_neumann", 1, 1)
# Reshaping the moore neighbourhood
stencil_squareness_von[0,:,:] = 0
stencil_squareness_von[2,:,:] = 0
stencil_squareness_von.set_index([0,0,0], 0)
stencil_squareness_von_t = np.transpose(stencil_squareness_von)
#print(stencil_squareness_von)
stencil_cuboid = tg.create_stencil("moore", 1, 1)
stencil_cuboid.set_index([0,0,0], 0)
#print(stencil_cuboid)
```
## Deriving all possible agent Center points
```
## The number of voxels which each agent has to occupy There are five agents :[Blue,Green,Yellow,Red,Violet]
## The number of voxels which each agent occupies are [B1,G1,Y1,R1,V1]
mass_size = np.count_nonzero((avail_lattice==1))
B1 = int(mass_size / 5)
G1= int( mass_size / 5)
Y1=int( mass_size / 7.5)
R1= int(mass_size / 7.5)
V1= int(mass_size -(B1+G1+Y1+R1))
Num_of_voxels_lst = np.array([B1,G1,Y1,R1,V1])
Cube_root_list = Num_of_voxels_lst ** (1/3)
Radius_list = Cube_root_list.astype(int)
Maximum_radius = int(Radius_list[np.argmax(Radius_list)])
Num_of_voxels_lst
#Divide the available lattice into grids to generate the necessary locations for origins??
avail_lattice_copy = np.copy(avail_lattice)
avail_lattice_copy[2].shape
#slices= np.arange(1,20,3)
x_cordinate= np.arange(3,21,7,dtype=int)
y_cordinate = np.array([2,8],dtype=int)
indexes= []
for item in x_cordinate :
for num in y_cordinate:
a=item,num
indexes.append(a)
indexes
for num in indexes:
selected_slice= avail_lattice_copy[2]
selected_slice[num[0]][num[1]]=2
index_lattice_flat = avail_lattice_copy.flatten()
indexing =list(np.where(index_lattice_flat ==2))
All_coordinates=[element for tupl in indexing for element in tupl]
All_coordinates
# All possible locations isolate the 2d array and find the points
locations= [194,190,257,253,307,311]
```
## Permutation Combinations for further simulation verifications
```
all_permutations = (list(itertools.permutations(All_coordinates, 5)))
all_permutations[618]
#all_permutations.index((574,503,651,497,580))
all_permutations.index((657,574,580,651,497))
```
## Agent Class
```
# agent class
class agent():
def __init__(self, origin, stencil, id):
# define the origin attribute of the agent and making sure that it is an intiger
self.origin = np.array(origin).astype(int)
# define old origin attribute and assigning the origin to it as the initial state
self.old_origin = self.origin
# define stencil of the agent
self.stencil = stencil
#define agent id
self.id = id
# definition of random/argmax occupancy on a 2d squarish stencil
def random_occupy_squareness(self, env):
# retrieve the list of neighbours of the agent based on the stencil
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
neighs_full_floor = env.availibility.find_neighbours_masked(stencil_full_floor, loc = self.origin)
# find availability of neighbours
neighs_availibility = env.availibility.flatten()[neighs]
neighs_availibility_full_floor = env.availibility.flatten()[neighs_full_floor]
# separate available neighbours
free_neighs = neighs[neighs_availibility==1]
free_neighs_full_floor = neighs_full_floor[neighs_availibility_full_floor==1]
#print(free_neighs)
if len(free_neighs)== 0 :
free_neighs = free_neighs_full_floor
else:
free_neighs= free_neighs
# retrieve the value of each neighbour
free_neighs_value = env.value.flatten()[free_neighs]
# find the neighbour with maximum my value
# selected_neigh = free_neighs[np.argmax(free_neighs_value)]
selected_neigh = np.random.choice(free_neighs,1)
#print(selected_neigh)
# update information
####################
# set the current origin as the ol origin
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten()
#print(self.origin)
# definition of random/argmax occupancy on a 3d cubish stencil
def random_occupy_cubish(self, env):
# retrieve the list of neighbours of the agent based on the stencil
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
neighs_full_lattice = env.availibility.find_neighbours_masked(stencil_full_lattice, loc = self.origin)
# find availability of neighbours
neighs_availibility = env.availibility.flatten()[neighs]
neighs_availibility_full_lattice = env.availibility.flatten()[neighs_full_lattice]
# separate available neighbours
free_neighs = neighs[neighs_availibility==1]
free_neighs_full_lattice = neighs_full_lattice[neighs_availibility_full_lattice==1]
#print(free_neighs)
if len(free_neighs)== 0 :
free_neighs = free_neighs_full_lattice
else:
free_neighs= free_neighs
# retrieve the value of each neighbour
free_neighs_value = env.value.flatten()[free_neighs]
# find the neighbour with maximum my value
selected_neigh = free_neighs[np.argmax(free_neighs_value)]
#selected_neigh = np.random.choice(free_neighs,1)
#print(selected_neigh)
# update information
####################
# set the current origin as the ol origin
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten()
#print(self.origin)
def random_occupy_cubish_von_neumann(self, env):
# retrieve the list of neighbours of the agent based on the stencil
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
neighs_full_lattice = env.availibility.find_neighbours_masked(stencil_cuboid, loc = self.origin)
# find availability of neighbours
neighs_availibility = env.availibility.flatten()[neighs]
neighs_availibility_full_lattice = env.availibility.flatten()[neighs_full_lattice]
# separate available neighbours
free_neighs = neighs[neighs_availibility==1]
free_neighs_full_lattice = neighs_full_lattice[neighs_availibility_full_lattice==1]
#print(free_neighs)
if len(free_neighs)== 0 :
free_neighs = free_neighs_full_lattice
else:
free_neighs= free_neighs
# retrieve the value of each neighbour
free_neighs_value = env.value.flatten()[free_neighs]
# find the neighbour with maximum my value
selected_neigh = np.random.choice(free_neighs,1)
#print(selected_neigh)
# update information
####################
# set the current origin as the ol origin
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten()
#print(self.origin)
def argmax_occupy_von_neumann(self, env):
# retrieve the list of neighbours of the agent based on the stencil
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
neighs_full_lattice = env.availibility.find_neighbours_masked(stencil_full_lattice, loc = self.origin)
# find availability of neighbours
neighs_availibility = env.availibility.flatten()[neighs]
neighs_availibility_full_lattice = env.availibility.flatten()[neighs_full_lattice]
# separate available neighbours
free_neighs = neighs[neighs_availibility==1]
free_neighs_full_lattice = neighs_full_lattice[neighs_availibility_full_lattice==1]
#print(free_neighs)
if len(free_neighs)== 0 :
free_neighs = free_neighs_full_lattice
else:
free_neighs= free_neighs
# retrieve the value of each neighbour
free_neighs_value = env.value.flatten()[free_neighs]
# find the neighbour with maximum my value
selected_neigh = free_neighs[np.argmax(free_neighs_value)]
#selected_neigh = np.random.choice(free_neighs,1)
#print(selected_neigh)
# update information
####################
# set the current origin as the ol origin
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten()
#print(self.origin)
# definition of 2d occupying method for agents
def one_neighbour_occupy_squareness_moore(self, env):
# retrieve the list of neighbours of the agent based on the stencil
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
#print(neighs)
neighs_full_floor = env.availibility.find_neighbours_masked(stencil_full_floor, loc = self.origin)
# find availability of neighbours
neighs_availibility = env.availibility.flatten()[neighs]
neighs_availibility_full_floor = env.availibility.flatten()[neighs_full_floor]
#print(neighs_availibility)
# find env values of all neighbours
all_neighs_value = env.value.flatten()[neighs]
all_neighs_value_mod = np.copy(all_neighs_value)
#finding number of neighbours and bumping the values based on adjacency for a 9 neighbourhood
#print(neighbourhood_details)
one = neighs_availibility[1] + neighs_availibility[2]
two = neighs_availibility[0] + neighs_availibility[2]
three = neighs_availibility[1] + neighs_availibility[4]
four = neighs_availibility[0] + neighs_availibility[6]
five = neighs_availibility[2] + neighs_availibility[7]
six = neighs_availibility[3] + neighs_availibility[6]
seven = neighs_availibility[5] + neighs_availibility[7]
eight = neighs_availibility[6] + neighs_availibility[4]
neighbourhood_details = [one,two,three,four,five,six,seven,eight]
#print(neighbourhood_details)
for detail in range(len(neighs_availibility)-1):
neighbourhood_condition = neighbourhood_details[detail]
#print(neighbourhood_condition)
if neighbourhood_condition == 3:
all_neighs_value_mod[detail]= all_neighs_value_mod[detail] + one_neighbour_factor
elif neighbourhood_condition == 4:
all_neighs_value_mod[detail]= all_neighs_value_mod[detail] + two_neighbour_factor
else:
all_neighs_value_mod[detail] = all_neighs_value_mod[detail]
#print(all_neighs_value_mod)
neighs_value_flattened = env.value.flatten()
for val_mod in all_neighs_value_mod:
for neigh in neighs :
neighs_value_flattened[neigh]=val_mod
# separate available neighbours
free_neighs = neighs[neighs_availibility==1]
free_neighs_full_floor = neighs_full_floor[neighs_availibility_full_floor==1]
#print(free_neighs)
if len(free_neighs)== 0 :
free_neighs = free_neighs_full_floor
else:
free_neighs= free_neighs
# retrieve the value of each neighbour
free_neighs_value = neighs_value_flattened[free_neighs]
#print(free_neighs_value)
# find the neighbour with maximum my value
selected_neigh = free_neighs[np.argmax(free_neighs_value)]
#print(selected_neigh)
# update information
####################
# set the current origin as the ol origin
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten()
#print(self.origin)
# definition of 2d occupying method for agents
def one_neighbour_occupy_squareness_von_neumann(self, env):
# retrieve the list of neighbours of the agent based on the stencil
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
neighs_full_floor = env.availibility.find_neighbours_masked(stencil_full_lattice, loc = self.origin)
# find availability of neighbours
neighs_availibility = env.availibility.flatten()[neighs]
neighs_availibility_full_floor = env.availibility.flatten()[neighs_full_floor]
# separate available neighbours
free_neighs = neighs[neighs_availibility==1]
free_neighs_full_floor = neighs_full_floor[neighs_availibility_full_floor==1]
#print(free_neighs)
if len(free_neighs)== 0 :
free_neighs = free_neighs_full_floor
else:
free_neighs= free_neighs
# retrieve the value of each neighbour
free_neighs_value = env.value.flatten()[free_neighs]
# find the neighbour with maximum my value
# selected_neigh = free_neighs[np.argmax(free_neighs_value)]
selected_neigh = np.random.choice(free_neighs,1)
#print(selected_neigh)
# update information
####################
# set the current origin as the ol origin
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten()
#print(self.origin)
def one_neighbour_occupy_squareness_behaviour (self,env):
value_lattice_flat = env.value.flatten()
sqr_factor = 10.1
sqr_shift = 10.0
init_loc = self.origin
neighs_full_lattice = env.availibility.find_neighbours_masked(stencil_full_lattice, loc = self.origin)
neighs_availibility_full_lattice = env.availibility.flatten()[neighs_full_lattice]
free_neighs_full_lattice = neighs_full_lattice[neighs_availibility_full_lattice==1]
agn_locs = [list(init_loc)]
all_neighs =[]
avail_lattice_flat = env.availibility.flatten()
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
all_neighs.append(neighs)
env.neigh_squareness.append(neighs)
neighs_flatten = np.array(env.neigh_squareness).flatten()
#print(neighs_flatten)
neighs_availability = avail_lattice_flat[neighs_flatten]
# keep the available ones only
avail_neighs = neighs_flatten[neighs_availability==1]
if len(avail_neighs)== 0 :
avail_neighs = free_neighs_full_lattice
else:
avail_neighs= avail_neighs
#print(avail_neighs)
avail_unq_neighs, avail_unq_neighs_count = np.unique(avail_neighs, return_counts=True)
#print(avail_unq_neighs)
#print(avail_unq_neighs_count)
neighs_unq_base_value = value_lattice_flat[avail_unq_neighs]
neigh_sqr_evaluation = np.power(sqr_factor, (avail_unq_neighs_count - 1)) * neighs_unq_base_value + sqr_shift
#neigh_sqr_evaluation = neighs_unq_base_value + sqr_shift * (avail_unq_neighs_count - 1)
selected_neigh_index = np.argmax(neigh_sqr_evaluation)
selected_neigh_1D_id = avail_unq_neighs[selected_neigh_index]
#selected_neigh_3D_id = np.unravel_index(selected_neigh_1D_id,bounds.shape )
# update information
####################
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh_1D_id, env.availibility.shape)).flatten()
def one_neighbour_occupy_cubish_behaviour (self,env):
value_lattice_flat = env.value.flatten()
sqr_factor = 10.1
sqr_shift = 10.0
init_loc = self.origin
neighs_availibility_full_lattice = env.availibility.flatten()
free_neighs_full_lattice = neighs_availibility_full_lattice[neighs_availibility_full_lattice==1]
agn_locs = [list(init_loc)]
all_neighs =[]
avail_lattice_flat = env.availibility.flatten()
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
all_neighs.append(neighs)
env.neigh_cubish.append(neighs)
neighs_flatten = np.array(env.neigh_cubish).flatten()
#print(neighs_flatten)
neighs_availability = avail_lattice_flat[neighs_flatten]
# keep the available ones only
avail_neighs = neighs_flatten[neighs_availability==1]
if len(avail_neighs)== 0 :
avail_neighs = free_neighs_full_lattice
else:
avail_neighs= avail_neighs
#print(avail_neighs)
avail_unq_neighs, avail_unq_neighs_count = np.unique(avail_neighs, return_counts=True)
#print(avail_unq_neighs)
#print(avail_unq_neighs_count)
neighs_unq_base_value = value_lattice_flat[avail_unq_neighs]
#neigh_sqr_evaluation = np.power(sqr_factor, (avail_unq_neighs_count - 1)) * neighs_unq_base_value + sqr_shift
neigh_sqr_evaluation = neighs_unq_base_value + sqr_shift * (avail_unq_neighs_count - 1)
# print(neighs_unq_base_value)
selected_neigh_index = np.argmax(neigh_sqr_evaluation)
selected_neigh_1D_id = avail_unq_neighs[selected_neigh_index]
#selected_neigh_3D_id = np.unravel_index(selected_neigh_1D_id,bounds.shape )
# update information
####################
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh_1D_id, env.availibility.shape)).flatten()
#print(self.origin)
```
## Initilize Agents
```
# Agent init class
def initialize_agents_random_origin (stencil,avail_lattice):
#finding origin
agn_num = 1
occ_lattice = avail_lattice*0 -1
avail_flat = avail_lattice.flatten()
avail_index = np.array(np.where(avail_lattice == 1)).T
select_id = np.random.choice(len(avail_index), agn_num)
agn_origins = tuple(avail_index[select_id].flatten())
# Defining agents
myagent = agent(agn_origins, stencil, select_id)
return myagent
def initialize_agents_fixed_origin (stencil,avail_lattice,origin):
#finding origin
occ_lattice = avail_lattice*0 -1
avail_flat = avail_lattice.flatten()
avail_index = np.array(np.where(avail_lattice == 1)).T
agn_origins = np.unravel_index(origin,avail_lattice.shape)
select_id = origin
# Defining agents
myagent = agent(agn_origins, stencil, select_id)
return myagent
```
## Enviornment Class
```
# environment class
class environment():
def __init__(self, lattices, agents,number_of_iterations,method_name):
self.availibility = lattices["availibility"]
self.value = lattices["enviornment"]
self.agent_origin = self.availibility
self.agents = agents
self.update_agents()
self.number_of_iterations = number_of_iterations
self.method_name = method_name
self.neigh_cubish = []
self.neigh_squareness = []
def update_agents(self):
# making previous position available
# self.availibility[tuple(self.agents.old_origin)] = self.availibility[tuple(self.agents.old_origin)] * 0 + 1
# removing agent from previous position
self.agent_origin[tuple(self.agents.old_origin)] *= 0+1
# making the current position unavailable
self.availibility[tuple(self.agents.origin)] = self.agents.id
# adding agent to the new position
self.agent_origin[tuple(self.agents.origin)] = self.agents.id
def random_occupy_squareness_agents(self):
# iterate over egents and perform the walk
self.agents.random_occupy_squareness(self)
# update the agent states in environment
self.update_agents()
def random_occupy_cubish_agents(self):
# iterate over egents and perform the walk
self.agents.random_occupy_cubish(self)
# update the agent states in environment
self.update_agents()
def random_occupy_cubish_von_neumann_agents(self):
# iterate over egents and perform the walk
self.agents.random_occupy_cubish_von_neumann(self)
# update the agent states in environment
self.update_agents()
def argmax_occupy_von_neumann(self):
# iterate over egents and perform the walk
self.agents.argmax_occupy_von_neumann(self)
# update the agent states in environment
self.update_agents()
def one_neighbour_occupy_squareness_moore(self):
# iterate over egents and perform the walk
self.agents.one_neighbour_occupy_squareness_moore(self)
# update the agent states in environment
self.update_agents()
def one_neighbour_occupy_squareness_von_neumann(self):
# iterate over egents and perform the walk
self.agents.one_neighbour_occupy_squareness_von_neumann(self)
# update the agent states in environment
self.update_agents()
def one_neighbour_occupy_cubish_behaviour(self):
# iterate over egents and perform the walk
self.agents.one_neighbour_occupy_cubish_behaviour(self)
# update the agent states in environment
self.update_agents()
def one_neighbour_occupy_squareness_behaviour(self):
# iterate over egents and perform the walk
self.agents.one_neighbour_occupy_squareness_behaviour(self)
# update the agent states in environment
self.update_agents()
```
# Blue function max simulation
```
env_availability_viz_blue= []
env_availability_score_blue= []
for item in All_coordinates:
Agent_one=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item)
# name the lattices myagent_attractor_one
occ_lattice_sim = tg.to_lattice(np.copy(avail_lattice*1), avail_lattice)
env_B = {"availibility": occ_lattice_sim,"enviornment": Bv}
# initiate the environment
# Replace the envs in the enviornment initilization to check the values for all the agents on all the locations
env_1 = environment(env_B, Agent_one,B1,"one_neighbour_occupy_cubish_behaviour")
env_list =[env_1]
number_steps = max(map(lambda e:e.number_of_iterations,env_list))
for a in range(number_steps):
# print(env.availibility)
#print(env.agent_origin)
for e in env_list:
if a < e.number_of_iterations :
#print(a)
#print(e.number_of_iterations)
if e.method_name == "one_neighbour_occupy_squareness_moore":
e.one_neighbour_occupy_squareness_moore()
elif e.method_name == "one_neighbour_occupy_cubish_agents" :
e.one_neighbour_occupy_cubish_agents()
elif e.method_name == "random_occupy_squareness_agents" :
e.random_occupy_squareness_agents()
elif e.method_name == "random_occupy_cubish_agents" :
e.random_occupy_cubish_agents()
elif e.method_name == "random_occupy_cubish_von_neumann_agents" :
e.random_occupy_cubish_von_neumann_agents()
elif e.method_name == "one_neighbour_occupy_squareness_von_neumann" :
e.one_neighbour_occupy_squareness_von_neumann()
elif e.method_name == "one_neighbour_occupy_squareness_behaviour" :
e.one_neighbour_occupy_squareness_behaviour()
elif e.method_name == "one_neighbour_occupy_cubish_behaviour" :
e.one_neighbour_occupy_cubish_behaviour()
elif e.method_name == "argmax_occupy_von_neumann" :
e.argmax_occupy_von_neumann()
env_availability_viz_blue.append(e.availibility-1)
env_availability_score_blue.append(np.sum(e.value[e.availibility == item]))
top_five_blue = sorted( [(x,i) for (i,x) in enumerate(env_availability_score_blue)], reverse=True )[:15]
top_five_blue_origins = []
for item in top_five_blue:
top_five_blue_origins.append(All_coordinates[item[1]])
top_five_blue_origins
```
# Green function max simulation
```
env_availability_viz_green= []
env_availability_score_green= []
for item in All_coordinates:
Agent_one=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item)
# name the lattices myagent_attractor_one
occ_lattice_sim = tg.to_lattice(np.copy(avail_lattice*1), avail_lattice)
env_G = {"availibility": occ_lattice_sim,"enviornment": Gv}
# initiate the environment
# Replace the envs in the enviornment initilization to check the values for all the agents on all the locations
env_1 = environment(env_G, Agent_one,G1,"one_neighbour_occupy_cubish_behaviour")
env_list =[env_1]
number_steps = max(map(lambda e:e.number_of_iterations,env_list))
for a in range(number_steps):
# print(env.availibility)
#print(env.agent_origin)
for e in env_list:
if a < e.number_of_iterations :
#print(a)
#print(e.number_of_iterations)
if e.method_name == "one_neighbour_occupy_squareness_moore":
e.one_neighbour_occupy_squareness_moore()
elif e.method_name == "one_neighbour_occupy_cubish_agents" :
e.one_neighbour_occupy_cubish_agents()
elif e.method_name == "random_occupy_squareness_agents" :
e.random_occupy_squareness_agents()
elif e.method_name == "random_occupy_cubish_agents" :
e.random_occupy_cubish_agents()
elif e.method_name == "random_occupy_cubish_von_neumann_agents" :
e.random_occupy_cubish_von_neumann_agents()
elif e.method_name == "one_neighbour_occupy_squareness_von_neumann" :
e.one_neighbour_occupy_squareness_von_neumann()
elif e.method_name == "one_neighbour_occupy_squareness_behaviour" :
e.one_neighbour_occupy_squareness_behaviour()
elif e.method_name == "one_neighbour_occupy_cubish_behaviour" :
e.one_neighbour_occupy_cubish_behaviour()
elif e.method_name == "argmax_occupy_von_neumann" :
e.argmax_occupy_von_neumann()
env_availability_viz_blue.append(e.availibility-1)
env_availability_score_green.append(np.sum(e.value[e.availibility == item]))
top_five_green = sorted( [(x,i) for (i,x) in enumerate(env_availability_score_green)], reverse=True )[:15]
top_five_green_origins = []
for item in top_five_green:
top_five_green_origins.append(All_coordinates[item[1]])
top_five_green_origins
```
# Yellow function max simulation
```
env_availability_viz_yellow= []
env_availability_score_yellow= []
for item in All_coordinates:
Agent_one=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item)
# name the lattices myagent_attractor_one
occ_lattice_sim = tg.to_lattice(np.copy(avail_lattice*1), avail_lattice)
env_Y = {"availibility": occ_lattice_sim,"enviornment": Yv}
# initiate the environment
# Replace the envs in the enviornment initilization to check the values for all the agents on all the locations
env_1 = environment(env_Y, Agent_one,Y1,"one_neighbour_occupy_cubish_behaviour")
env_list =[env_1]
number_steps = max(map(lambda e:e.number_of_iterations,env_list))
for a in range(number_steps):
# print(env.availibility)
#print(env.agent_origin)
for e in env_list:
if a < e.number_of_iterations :
#print(a)
#print(e.number_of_iterations)
if e.method_name == "one_neighbour_occupy_squareness_moore":
e.one_neighbour_occupy_squareness_moore()
elif e.method_name == "one_neighbour_occupy_cubish_agents" :
e.one_neighbour_occupy_cubish_agents()
elif e.method_name == "random_occupy_squareness_agents" :
e.random_occupy_squareness_agents()
elif e.method_name == "random_occupy_cubish_agents" :
e.random_occupy_cubish_agents()
elif e.method_name == "random_occupy_cubish_von_neumann_agents" :
e.random_occupy_cubish_von_neumann_agents()
elif e.method_name == "one_neighbour_occupy_squareness_von_neumann" :
e.one_neighbour_occupy_squareness_von_neumann()
elif e.method_name == "one_neighbour_occupy_squareness_behaviour" :
e.one_neighbour_occupy_squareness_behaviour()
elif e.method_name == "one_neighbour_occupy_cubish_behaviour" :
e.one_neighbour_occupy_cubish_behaviour()
elif e.method_name == "argmax_occupy_von_neumann" :
e.argmax_occupy_von_neumann()
env_availability_viz_yellow.append(e.availibility-1)
env_availability_score_yellow.append(np.sum(e.value[e.availibility == item]))
top_five_yellow = sorted( [(x,i) for (i,x) in enumerate(env_availability_score_yellow)], reverse=True )[:15]
top_five_yellow_origins = []
for item in top_five_yellow:
top_five_yellow_origins.append(All_coordinates[item[1]])
top_five_yellow_origins
```
# Red function max simulation
```
env_availability_viz_red= []
env_availability_score_red= []
for item in All_coordinates:
Agent_one=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item)
# name the lattices myagent_attractor_one
occ_lattice_sim = tg.to_lattice(np.copy(avail_lattice*1), avail_lattice)
env_R = {"availibility": occ_lattice_sim,"enviornment": Rv}
# initiate the environment
# Replace the envs in the enviornment initilization to check the values for all the agents on all the locations
env_1 = environment(env_R, Agent_one,R1,"one_neighbour_occupy_cubish_behaviour")
env_list =[env_1]
number_steps = max(map(lambda e:e.number_of_iterations,env_list))
for a in range(number_steps):
# print(env.availibility)
#print(env.agent_origin)
for e in env_list:
if a < e.number_of_iterations :
#print(a)
#print(e.number_of_iterations)
if e.method_name == "one_neighbour_occupy_squareness_moore":
e.one_neighbour_occupy_squareness_moore()
elif e.method_name == "one_neighbour_occupy_cubish_agents" :
e.one_neighbour_occupy_cubish_agents()
elif e.method_name == "random_occupy_squareness_agents" :
e.random_occupy_squareness_agents()
elif e.method_name == "random_occupy_cubish_agents" :
e.random_occupy_cubish_agents()
elif e.method_name == "random_occupy_cubish_von_neumann_agents" :
e.random_occupy_cubish_von_neumann_agents()
elif e.method_name == "one_neighbour_occupy_squareness_von_neumann" :
e.one_neighbour_occupy_squareness_von_neumann()
elif e.method_name == "one_neighbour_occupy_squareness_behaviour" :
e.one_neighbour_occupy_squareness_behaviour()
elif e.method_name == "one_neighbour_occupy_cubish_behaviour" :
e.one_neighbour_occupy_cubish_behaviour()
elif e.method_name == "argmax_occupy_von_neumann" :
e.argmax_occupy_von_neumann()
env_availability_viz_red.append(e.availibility-1)
env_availability_score_red.append(np.sum(e.value[e.availibility == item]))
top_five_red = sorted( [(x,i) for (i,x) in enumerate(env_availability_score_red)], reverse=True )[:15]
top_five_red_origins = []
for item in top_five_red:
top_five_red_origins.append(All_coordinates[item[1]])
top_five_red_origins
```
# Violet function max simulation
```
env_availability_viz_violet= []
env_availability_score_violet= []
for item in All_coordinates:
Agent_one=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item)
# name the lattices myagent_attractor_one
occ_lattice_sim = tg.to_lattice(np.copy(avail_lattice*1), avail_lattice)
env_V = {"availibility": occ_lattice_sim,"enviornment": Vv}
# initiate the environment
# Replace the envs in the enviornment initilization to check the values for all the agents on all the locations
env_1 = environment(env_V, Agent_one,V1,"one_neighbour_occupy_cubish_behaviour")
env_list =[env_1]
number_steps = max(map(lambda e:e.number_of_iterations,env_list))
for a in range(number_steps):
# print(env.availibility)
#print(env.agent_origin)
for e in env_list:
if a < e.number_of_iterations :
#print(a)
#print(e.number_of_iterations)
if e.method_name == "one_neighbour_occupy_squareness_moore":
e.one_neighbour_occupy_squareness_moore()
elif e.method_name == "one_neighbour_occupy_cubish_agents" :
e.one_neighbour_occupy_cubish_agents()
elif e.method_name == "random_occupy_squareness_agents" :
e.random_occupy_squareness_agents()
elif e.method_name == "random_occupy_cubish_agents" :
e.random_occupy_cubish_agents()
elif e.method_name == "random_occupy_cubish_von_neumann_agents" :
e.random_occupy_cubish_von_neumann_agents()
elif e.method_name == "one_neighbour_occupy_squareness_von_neumann" :
e.one_neighbour_occupy_squareness_von_neumann()
elif e.method_name == "one_neighbour_occupy_squareness_behaviour" :
e.one_neighbour_occupy_squareness_behaviour()
elif e.method_name == "one_neighbour_occupy_cubish_behaviour" :
e.one_neighbour_occupy_cubish_behaviour()
elif e.method_name == "argmax_occupy_von_neumann" :
e.argmax_occupy_von_neumann()
env_availability_viz_violet.append(e.availibility-1)
env_availability_score_violet.append(np.sum(e.value[e.availibility == item]))
top_five_violet = sorted( [(x,i) for (i,x) in enumerate(env_availability_score_violet)], reverse=True )[:15]
top_five_violet_origins = []
for item in top_five_violet:
top_five_violet_origins.append(All_coordinates[item[1]])
top_five_violet
```
# All_simulations
```
env_availability_viz_all_options = []
for item in all_permutations:
Agent_one=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item[0])
Agent_two=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item[1])
Agent_three=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item[2])
Agent_four=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item[3])
Agent_five=initialize_agents_fixed_origin (stencil_von_neumann,avail_lattice,item[4])
# name the lattices myagent_attractor_one
occ_lattice_sim = tg.to_lattice(np.copy(avail_lattice), avail_lattice)
env_B = {"availibility": occ_lattice_sim,"enviornment": Bv}
env_G = {"availibility": occ_lattice_sim,"enviornment": Gv}
env_R = {"availibility": occ_lattice_sim,"enviornment": Rv}
env_Y = {"availibility": occ_lattice_sim,"enviornment": Yv}
env_V = {"availibility": occ_lattice_sim,"enviornment": Vv}
# initiate the environment
# Replace the envs in the enviornment initilization to check the values for all the agents on all the locations
env_1 = environment(env_B, Agent_one,B1,"one_neighbour_occupy_cubish_behaviour")
env_2 = environment(env_G, Agent_two,G1,"one_neighbour_occupy_cubish_behaviour")
env_3 = environment(env_Y, Agent_three,Y1,"one_neighbour_occupy_cubish_behaviour")
env_4 = environment(env_R, Agent_four,R1,"one_neighbour_occupy_cubish_behaviour")
env_5 = environment(env_V, Agent_five,V1,"one_neighbour_occupy_cubish_behaviour")
env_list =[env_2,env_3,env_4,env_5]
number_steps = max(map(lambda e:e.number_of_iterations,env_list))
for a in range(number_steps):
# print(env.availibility)
#print(env.agent_origin)
for e in env_list:
if a < e.number_of_iterations :
#print(a)
#print(e.number_of_iterations)
if e.method_name == "one_neighbour_occupy_squareness_moore":
e.one_neighbour_occupy_squareness_moore()
elif e.method_name == "one_neighbour_occupy_cubish_agents" :
e.one_neighbour_occupy_cubish_agents()
elif e.method_name == "random_occupy_squareness_agents" :
e.random_occupy_squareness_agents()
elif e.method_name == "random_occupy_cubish_agents" :
e.random_occupy_cubish_agents()
elif e.method_name == "random_occupy_cubish_von_neumann_agents" :
e.random_occupy_cubish_von_neumann_agents()
elif e.method_name == "one_neighbour_occupy_squareness_von_neumann" :
e.one_neighbour_occupy_squareness_von_neumann()
elif e.method_name == "one_neighbour_occupy_squareness_behaviour" :
e.one_neighbour_occupy_squareness_behaviour()
elif e.method_name == "one_neighbour_occupy_cubish_behaviour" :
e.one_neighbour_occupy_cubish_behaviour()
elif e.method_name == "argmax_occupy_von_neumann" :
e.argmax_occupy_von_neumann()
env_availability_viz_all_options.append(e.availibility)
```
## Pickle Objects for reference
change the name for each simulation / put it in a for loop
```
all_options = pickle.dump( env_availability_viz_all_options, open( "all_options.p", "wb" ) )
pickle_all_options = pickle.load( open( "all_options.p", "rb" ) )
pickle_all_options_array = np.array(pickle_all_options)
pickle_all_options_tglattice=tg.to_lattice(np.copy(pickle_all_options_array), pickle_all_options_array.shape)
pickle_all_options_tglattice.shape
```
## Vizualise the simulation
```
p = pv.Plotter(notebook=True)
base_lattice = pickle_all_options_tglattice[0]
# Set the grid dimensions: shape + 1 because we want to inject our values on the CELL data
grid = pv.UniformGrid()
grid.dimensions = np.array(base_lattice.shape) + 1
# The bottom left corner of the data set
grid.origin = base_lattice.minbound - base_lattice.unit * 0.5
# These are the cell sizes along each axis
grid.spacing = base_lattice.unit
# adding the boundingbox wireframe
p.add_mesh(grid.outline(), color="grey", label="Domain")
# adding the avilability lattice
#base_env_availability_viz_top_500[0].fast_vis(p)
# adding axes
p.add_axes()
p.show_bounds(grid="back", location="back", color="#aaaaaa")
def create_mesh(value):
f = int(value)
lattice = pickle_all_options_tglattice[f]
# Add the data values to the cell data
grid.cell_arrays["Agents"] = lattice.flatten(order="F").astype(int) # Flatten the array!
# filtering the voxels
threshed = grid.threshold([1.0, avail_lattice.size])
# adding the voxels
p.add_mesh(threshed, name='sphere', show_edges=True, opacity=1.0, show_scalar_bar=False)
return
number_steps_2 = len(all_permutations)
p.add_slider_widget(create_mesh, [0, number_steps_2], title='Time', value=0, event_type="always", style="classic")
p.show(use_ipyvtk=True)
```
# Calculate the score for each simulation
```
all_values=[]
for lattice,index in zip (pickle_all_options,all_permutations):
lattice_flat = lattice.flatten()
B= np.sum(Bv.flatten()[lattice_flat == index[0]])
G= np.sum(Gv.flatten()[lattice_flat == index[1]])
Y= np.sum(Yv.flatten()[lattice_flat == index[2]])
R= np.sum(Rv.flatten()[lattice_flat == index[3]])
V= np.sum(Vv.flatten()[lattice_flat == index[4]])
all_values.append(B+G+Y+R+V)
sorted_values= np.sort(np.array(all_values))
np.argwhere(sorted_values==17131)
sorted_values
sorted_values[-1]
np.argwhere(sorted_values==16546)
max_value = max(all_values)
max_index = all_values.index(max_value)
print(all_values[max_index])
print(all_values[664])
sorted_val = sorted(all_values)
max_index
```
# Save to Csv
```
for i, lattice in enumerate(env_availability_viz_all_options):
csv_path = os.path.relpath('csv/abm_f_'+ f'{i:03}' + '.csv')
lattice.to_csv(csv_path)
```
## Linear Optimization to derive a solution
```
from ortools.linear_solver import pywraplp
def main():
# Data from all the colours on all the origins
costs = [
env_availability_score_blue,
env_availability_score_green,
env_availability_score_yellow,
env_availability_score_red,
env_availability_score_violet,
[0,0,0,0,0,0]
]
num_workers = 6
num_tasks = len(costs[0])
#print(num_tasks)
# Solver
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('SCIP')
# Variables
# x[i, j] is an array of 0-1 variables, which will be 1
# if worker i is assigned to task j.
x = {}
for i in range(num_workers):
for j in range(num_tasks):
x[i, j] = solver.IntVar(0, 1, '')
# Constraints
# Each worker is assigned to at most 1 task.
for i in range(num_workers):
solver.Add(solver.Sum([x[i, j] for j in range(num_tasks)]) <= 1)
# Each task is assigned to exactly one worker.
for j in range(num_tasks):
solver.Add(solver.Sum([x[i, j] for i in range(num_workers)]) == 1)
# Objective
objective_terms = []
for i in range(num_workers):
for j in range(num_tasks):
objective_terms.append(costs[i][j] * x[i, j])
solver.Maximize(solver.Sum(objective_terms))
# Solve
status = solver.Solve()
# Print solution.
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print('Total cost = ', solver.Objective().Value(), '\n')
for i in range(num_workers):
for j in range(num_tasks):
# Test if x[i,j] is 1 (with tolerance for floating point arithmetic).
if x[i, j].solution_value() > 0.5:
print('Worker %d assigned to task %d. Cost = %d' %
(i, j, costs[i][j]))
if __name__ == '__main__':
main()
```
## Total cost of permutation matrix
```
cost_for_location_filter_1 = [env_availability_viz[-1] == 193]
cost_for_location_filter_2 = [env_availability_viz[-1] == 189]
cost_for_location_filter_3 = [env_availability_viz[-1] == 256]
cost_for_location_filter_4 = [env_availability_viz[-1] == 252]
cost_for_location_filter_5 = [env_availability_viz[-1] == 306]
cost_for_location_filter_6 = [env_availability_viz[-1] == 310]
cost = [(np.sum(Rv[cost_for_location_filter_2]))+(np.sum(Yv[cost_for_location_filter_3]))+(np.sum(Vv[cost_for_location_filter_4]))+(np.sum(Bv[cost_for_location_filter_5]))+(np.sum(Gv[cost_for_location_filter_6]))]
cost
avail_lattice
for i, lattice in enumerate(env_availability_viz):
csv_path = os.path.relpath('csv/abm_f_'+ f'{i:03}' + '.csv')
lattice.to_csv(csv_path)
```
| github_jupyter |
### Mooring Timeseries Resampler
```
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.dates import YearLocator, WeekdayLocator, MonthLocator, DayLocator, HourLocator, DateFormatter
import matplotlib.ticker as ticker
import datetime
### specify primary bulk figure parameters
fontsize = 10
labelsize = 10
#plotstyle = 'seaborn'
max_xticks = 10
plt.style.use('seaborn-ticks')
mpl.rcParams['svg.fonttype'] = 'none'
mpl.rcParams['ps.fonttype'] = 42 #truetype/type2 fonts instead of type3
mpl.rcParams['pdf.fonttype'] = 42 #truetype/type2 fonts instead of type3
mpl.rcParams['axes.grid'] = False
mpl.rcParams['axes.edgecolor'] = 'black'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.labelcolor'] = 'black'
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['xtick.major.size'] = 4
mpl.rcParams['xtick.minor.size'] = 2
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['xtick.minor.width'] = 0.5
mpl.rcParams['ytick.major.size'] = 4
mpl.rcParams['ytick.minor.size'] = 2
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['ytick.minor.width'] = 0.5
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.color'] = 'black'
mpl.rcParams['xtick.color'] = 'black'
data = pd.read_csv('/Users/bell/Desktop/allsc.csv',parse_dates=True,index_col='time')
data.info()
data_daily = data.resample('D').mean()
data_daily[' S_41'][data_daily[' S_41'] < 30.4] = np.nan
fig = plt.figure(1,figsize=(9,4.5))
ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1)
for yy in range(2010,2020,1):
if (yy < 2016):
plt.plot(data_daily[str(yy)].index.dayofyear,data_daily[str(yy)][' S_41'],c='gray',linewidth=1.25,label='')
elif yy == 2016:
plt.plot(data_daily[str(yy)].index.dayofyear,data_daily[str(yy)][' S_41'],c='gray',linewidth=1.25,label='2010-2016')
elif yy == 2017:
plt.plot(data_daily[str(yy)].index.dayofyear,data_daily[str(yy)][' S_41'],c='red',linewidth=1.5,label='2017')
elif yy == 2018:
plt.plot(data_daily[str(yy)].index.dayofyear,data_daily[str(yy)][' S_41'],c='blue',linewidth=1.5,label='2018')
elif yy == 2019:
plt.plot(data_daily[str(yy)].index.dayofyear,data_daily[str(yy)][' S_41'],c='green',linewidth=1.5,label='2019')
plt.legend(loc='best')
plt.xticks((0,31,61,92,122,153,183,214,245,275,306,336),)
ax1.set_xlim([1,+365])
ax1.tick_params(axis='y',which='both',bottom='on')
ax1.yaxis.set_minor_locator(ticker.MultipleLocator(2.5))
ax1.xaxis.set_major_locator(DayLocator(bymonthday=1))
ax1.xaxis.set_minor_locator(DayLocator(bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter('%b'))
ax1.xaxis.set_major_formatter(DateFormatter(''))
ax1.xaxis.set_tick_params(which='major', pad=15)
ax1.xaxis.set_tick_params(which='minor', pad=5)
fig.savefig('images/C2_AnnualSal.png',dpi=300)
fig.savefig('images/C2_AnnualSal.svg',dpi=300)
path = '/Users/bell/Desktop/'
files = ['10ckp2a_sc_0039m.csv',
'11ckp2a_sc_0038m.csv',
'12ckp2a_sc_0040m.csv',
'13ckp2a_sc_0039m.csv',
'14ckp2a_sc_0039m.csv',
'15ckp2a_sc_0038m.csv',
'16ckp2a_sc_0040m.csv',
'17ckp2a_sc_0039m.csv',
'18ckp2a_sc_0039m.csv']
fig = plt.figure(1,figsize=(9,4.5))
ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1)
for t,yy in enumerate(files):
if (t>=0):
data = pd.read_csv('/Users/bell/Desktop/'+yy,parse_dates=True,index_col='time')
data_daily = data.resample('D').mean()
data_daily['doy']=data_daily.index.dayofyear
data_daily[' S_41'][data_daily[' S_41'] < 30.4] = np.nan
if '16' in yy:
plt.plot(data_daily['2016'].index.dayofyear,data_daily['2016'][' S_41'],c='red',linewidth=1.25,label='16ckp2a')
plt.plot(data_daily['2017'].index.dayofyear+365,data_daily['2017'][' S_41'],c='red',linewidth=1.25,label='')
elif '17' in yy:
plt.plot(data_daily['2017'].index.dayofyear,data_daily['2017'][' S_41'],c='blue',linewidth=1.5,label='17ckp2a')
plt.plot(data_daily['2018'].index.dayofyear+365,data_daily['2018'][' S_41'],c='blue',linewidth=1.5,label='')
elif '18' in yy:
plt.plot(data_daily['2018'].index.dayofyear,data_daily['2018'][' S_41'],c='green',linewidth=1.5,label='18ckp2a')
plt.plot(data_daily['2019'].index.dayofyear+365,data_daily['2019'][' S_41'],c='green',linewidth=1.5,label='')
elif '10' in yy:
plt.plot(data_daily['2010'].index.dayofyear,data_daily['2010'][' S_41'],c='gray',linewidth=1.25,label='2010-2015')
plt.plot(data_daily['2011'].index.dayofyear+365,data_daily['2011'][' S_41'],c='gray',linewidth=1.25,label='')
elif '11' in yy:
plt.plot(data_daily['2011'].index.dayofyear,data_daily['2011'][' S_41'],c='gray',linewidth=1.25,label='')
plt.plot(data_daily['2012'].index.dayofyear+365,data_daily['2012'][' S_41'],c='gray',linewidth=1.25,label='')
elif '12' in yy:
plt.plot(data_daily['2012'].index.dayofyear,data_daily['2012'][' S_41'],c='gray',linewidth=1.25,label='')
plt.plot(data_daily['2013'].index.dayofyear+365,data_daily['2013'][' S_41'],c='gray',linewidth=1.25,label='')
elif '13' in yy:
plt.plot(data_daily['2013'].index.dayofyear,data_daily['2013'][' S_41'],c='gray',linewidth=1.25,label='')
plt.plot(data_daily['2014'].index.dayofyear+365,data_daily['2014'][' S_41'],c='gray',linewidth=1.25,label='')
elif '14' in yy:
plt.plot(data_daily['2014'].index.dayofyear,data_daily['2014'][' S_41'],c='gray',linewidth=1.25,label='')
plt.plot(data_daily['2015'].index.dayofyear+365,data_daily['2015'][' S_41'],c='gray',linewidth=1.25,label='')
elif '15' in yy:
plt.plot(data_daily['2015'].index.dayofyear,data_daily['2015'][' S_41'],c='gray',linewidth=1.25,label='')
plt.plot(data_daily['2016'].index.dayofyear+365,data_daily['2016'][' S_41'],c='gray',linewidth=1.25,label='')
plt.legend(loc='best')
plt.xticks((0,31,61,92,122,153,183,214,245,275,306,336,
0+365,31+365,61+365,92+365,122+365,153+365,
183+365,214+365,245+365,275+365,306+365,336+365),)
ax1.set_xlim([213,276+365])
#plt.yticks(np.arange(0,1.2,.4))
ax1.tick_params(axis='y',which='both',bottom='on')
ax1.yaxis.set_minor_locator(ticker.MultipleLocator(2.5))
ax1.xaxis.set_major_locator(DayLocator(bymonthday=1))
ax1.xaxis.set_minor_locator(DayLocator(bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter('%b'))
ax1.xaxis.set_major_formatter(DateFormatter(''))
ax1.xaxis.set_tick_params(which='major', pad=15)
ax1.xaxis.set_tick_params(which='minor', pad=5)
fig.savefig('images/C2_DepSal.png',dpi=300)
fig.savefig('images/C2_DepSal.svg',dpi=300)
```
| github_jupyter |
<div style="width: 100%; overflow: hidden;">
<div style="width: 150px; float: left;"> <img src="https://raw.githubusercontent.com/DataForScience/Networks/master/data/D4Sci_logo_ball.png" alt="Data For Science, Inc" align="left" border="0" width=150px> </div>
<div style="float: left; margin-left: 10px;"> <h1>Transforming Excel Analysis into pandas Data Models</h1>
<h1>Excel Pitfalls</h1>
<p>Bruno Gonçalves<br/>
<a href="http://www.data4sci.com/">www.data4sci.com</a><br/>
@bgoncalves, @data4sci</p></div>
</div>
```
from collections import Counter
from pprint import pprint
import pandas as pd
import numpy as np
import numpy_financial as npf
import matplotlib
import matplotlib.pyplot as plt
import watermark
%load_ext watermark
%matplotlib inline
```
We start by print out the versions of the libraries we're using for future reference
```
%watermark -n -v -m -g -iv
```
Load default figure style
```
plt.style.use('./d4sci.mplstyle')
```
## Large file
Data Frames are limited only by available memory, and have no fixed limit on the number of rows or columns
```
taxis = pd.read_csv('data/green_tripdata_2014-04.csv.gz',
parse_dates=['lpep_pickup_datetime', 'Lpep_dropoff_datetime']
)
```
Even relatively small files can have a large number of rows
```
taxis.shape
```
And we can be sure that each column has a unique format without any unexpected changes in formatting
```
taxis.info()
```
Computations are column based and written in a compact form
```
taxis['Trip_type'].unique()
taxis['Fare_amount'].mean()
```
Easily index any part of the full DataFrame
```
taxis.iloc[1000:1020]
```
## Mortgage Calculator
The logic underlying any computation is always clear, as each cell displays the code instead of just the computed values
```
price = 1_000_000
down = .2 * price
ammount = price-down
years = 30
months = 12*years
rate = .03/12
payment = npf.pmt(rate, months, -ammount)
dates = pd.date_range('08/01/2020', periods=months, freq='M')+pd.Timedelta('1D')
dates
```
From these few lines of code we can easily audit how the computation is performing and find any existing bugs
```
rows = []
balance = price - down
extra_payment = 100
for month in dates:
row = [month]
row.append(balance)
row.append(payment)
row.append(extra_payment)
row.append(payment+extra_payment)
interest = balance*rate
principal = payment-interest
row.append(principal+extra_payment)
row.append(interest)
balance -= principal+extra_payment
row.append(balance)
rows.append(row)
if balance <= 0:
break
```
And convert the data into a compact DataFrame that can be used to perform further computations
```
mortgage = pd.DataFrame(rows, columns=['Date', 'Beginning Balance', 'Scheduled Payment', 'Extra Payment',
'Total Payment', 'Principal', 'Interest', 'Ending Balance'])
mortgage['Cummulative Interest'] = mortgage.Interest.cumsum()
mortgage
```
## Non-standardized data
```
movies = pd.read_excel('data/movies.xlsx')
movies.head()
```
Unexpected spaces in the data
```
movies['Title'].iloc[10]
```
That can easily be cleaned
```
movies['Title'] = movies['Title'].transform(lambda x: x.strip())
movies['Title'].iloc[0]
```
<div style="width: 100%; overflow: hidden;">
<img src="data/D4Sci_logo_full.png" alt="Data For Science, Inc" align="center" border="0" width=300px>
</div>
| github_jupyter |
## NLP datasets
```
from fastai.gen_doc.nbdoc import *
from fastai.text import *
from fastai.gen_doc.nbdoc import *
```
This module contains the [`TextDataset`](/text.data.html#TextDataset) class, which is the main dataset you should use for your NLP tasks. It automatically does the preprocessing steps described in [`text.transform`](/text.transform.html#text.transform). It also contains all the functions to quickly get a [`TextDataBunch`](/text.data.html#TextDataBunch) ready.
## Quickly assemble your data
You should get your data in one of the following formats to make the most of the fastai library and use one of the factory methods of one of the [`TextDataBunch`](/text.data.html#TextDataBunch) classes:
- raw text files in folders train, valid, test in an ImageNet style,
- a csv where some column(s) gives the label(s) and the following one the associated text,
- a dataframe structured the same way,
- tokens and labels arrays,
- ids, vocabulary (correspondence id to word) and labels.
If you are assembling the data for a language model, you should define your labels as always 0 to respect those formats. The first time you create a [`DataBunch`](/basic_data.html#DataBunch) with one of those functions, your data will be preprocessed automatically. You can save it, so that the next time you call it is almost instantaneous.
Below are the classes that help assembling the raw data in a [`DataBunch`](/basic_data.html#DataBunch) suitable for NLP.
```
show_doc(TextLMDataBunch, title_level=3)
```
All the texts in the [`datasets`](/datasets.html#datasets) are concatenated and the labels are ignored. Instead, the target is the next word in the sentence.
```
show_doc(TextLMDataBunch.create)
show_doc(TextClasDataBunch, title_level=3)
show_doc(TextClasDataBunch.create)
```
All the texts are grouped by length (with a bit of randomness for the training set) then padded so that the samples have the same length to get in a batch.
```
show_doc(TextDataBunch, title_level=3)
jekyll_warn("This class can only work directly if all the texts have the same length.")
```
### Factory methods (TextDataBunch)
All those classes have the following factory methods.
```
show_doc(TextDataBunch.from_folder)
```
The floders are scanned in `path` with a <code>train</code>, `valid` and maybe `test` folders. Text files in the <code>train</code> and `valid` folders should be places in subdirectories according to their classes (not applicable for a language model). `tokenizer` will be used to parse those texts into tokens.
You can pass a specific `vocab` for the numericalization step (if you are building a classifier from a language model you fine-tuned for instance). kwargs will be split between the [`TextDataset`](/text.data.html#TextDataset) function and to the class initialization, you can precise there parameters such as `max_vocab`, `chunksize`, `min_freq`, `n_labels` (see the [`TextDataset`](/text.data.html#TextDataset) documentation) or `bs`, `bptt` and `pad_idx` (see the sections LM data and classifier data).
```
show_doc(TextDataBunch.from_csv)
```
This method will look for `csv_name`, and optionally a `test` csv file, in `path`. These will be opened with [`header`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html#pandas-read-csv), using `delimiter`. You can specify which are the `text_cols` and `label_cols`; by default a single label column is assumed to come before a single text column. If your csv has no header, you must specify these as indices. If you're training a language model and don't have labels, you must specify the `text_cols`. If there are several `text_cols`, the texts will be concatenated together with an optional field token. If there are several `label_cols`, the labels will be assumed to be one-hot encoded and `classes` will default to `label_cols` (you can ignore that argument for a language model). `label_delim` can be used to specify the separator between multiple labels in a column.
You can pass a `tokenizer` to be used to parse the texts into tokens and/or a specific `vocab` for the numericalization step (if you are building a classifier from a language model you fine-tuned for instance). Otherwise you can specify parameters such as `max_vocab`, `min_freq`, `chunksize` for the Tokenizer and Numericalizer (processors). Other parameters (e.g. `bs`, `val_bs` and `num_workers`, etc.) will be passed to [`LabelLists.databunch()`](/data_block.html#LabelLists.databunch) documentation) (see the LM data and classifier data sections for more info).
```
show_doc(TextDataBunch.from_df)
```
This method will use `train_df`, `valid_df` and optionally `test_df` to build the [`TextDataBunch`](/text.data.html#TextDataBunch) in `path`. You can specify `text_cols` and `label_cols`; by default a single label column comes before a single text column. If you're training a language model and don't have labels, you must specify the `text_cols`. If there are several `text_cols`, the texts will be concatenated together with an optional field token. If there are several `label_cols`, the labels will be assumed to be one-hot encoded and `classes` will default to `label_cols` (you can ignore that argument for a language model).
You can pass a `tokenizer` to be used to parse the texts into tokens and/or a specific `vocab` for the numericalization step (if you are building a classifier from a language model you fine-tuned for instance). Otherwise you can specify parameters such as `max_vocab`, `min_freq`, `chunksize` for the default Tokenizer and Numericalizer (processors). Other parameters (e.g. `bs`, `val_bs` and `num_workers`, etc.) will be passed to [`LabelLists.databunch()`](/data_block.html#LabelLists.databunch) documentation) (see the LM data and classifier data sections for more info).
```
show_doc(TextDataBunch.from_tokens)
```
This function will create a [`DataBunch`](/basic_data.html#DataBunch) from `trn_tok`, `trn_lbls`, `val_tok`, `val_lbls` and maybe `tst_tok`.
You can pass a specific `vocab` for the numericalization step (if you are building a classifier from a language model you fine-tuned for instance). kwargs will be split between the [`TextDataset`](/text.data.html#TextDataset) function and to the class initialization, you can precise there parameters such as `max_vocab`, `chunksize`, `min_freq`, `n_labels`, `tok_suff` and `lbl_suff` (see the [`TextDataset`](/text.data.html#TextDataset) documentation) or `bs`, `bptt` and `pad_idx` (see the sections LM data and classifier data).
```
show_doc(TextDataBunch.from_ids)
```
Texts are already preprocessed into `train_ids`, `train_lbls`, `valid_ids`, `valid_lbls` and maybe `test_ids`. You can specify the corresponding `classes` if applicable. You must specify a `path` and the `vocab` so that the [`RNNLearner`](/text.learner.html#RNNLearner) class can later infer the corresponding sizes in the model it will create. kwargs will be passed to the class initialization.
### Load and save
To avoid losing time preprocessing the text data more than once, you should save and load your [`TextDataBunch`](/text.data.html#TextDataBunch) using [`DataBunch.save`](/basic_data.html#DataBunch.save) and [`load_data`](/basic_data.html#load_data).
```
show_doc(TextDataBunch.load)
jekyll_warn("This method should only be used to load back `TextDataBunch` saved in v1.0.43 or before, it is now deprecated.")
```
### Example
Untar the IMDB sample dataset if not already done:
```
path = untar_data(URLs.IMDB_SAMPLE)
path
```
Since it comes in the form of csv files, we will use the corresponding `text_data` method. Here is an overview of what your file you should look like:
```
pd.read_csv(path/'texts.csv').head()
```
And here is a simple way of creating your [`DataBunch`](/basic_data.html#DataBunch) for language modelling or classification.
```
data_lm = TextLMDataBunch.from_csv(Path(path), 'texts.csv')
data_clas = TextClasDataBunch.from_csv(Path(path), 'texts.csv')
```
## The TextList input classes
Behind the scenes, the previous functions will create a training, validation and maybe test [`TextList`](/text.data.html#TextList) that will be tokenized and numericalized (if needed) using [`PreProcessor`](/data_block.html#PreProcessor).
```
show_doc(Text, title_level=3)
show_doc(TextList, title_level=3)
```
`vocab` contains the correspondence between ids and tokens, `pad_idx` is the id used for padding. You can pass a custom `processor` in the `kwargs` to change the defaults for tokenization or numericalization. It should have the following form:
```
tokenizer = Tokenizer(SpacyTokenizer, 'en')
processor = [TokenizeProcessor(tokenizer=tokenizer), NumericalizeProcessor(max_vocab=30000)]
```
See below for all the arguments those tokenizers can take.
```
show_doc(TextList.label_for_lm)
show_doc(TextList.from_folder)
show_doc(TextList.show_xys)
show_doc(TextList.show_xyzs)
show_doc(OpenFileProcessor, title_level=3)
show_doc(open_text)
show_doc(TokenizeProcessor, title_level=3)
```
`tokenizer` is used on bits of `chunksize`. If `mark_fields=True`, add field tokens between each parts of the texts (given when the texts are read in several columns of a dataframe). See more about tokenizers in the [transform documentation](/text.transform.html).
```
show_doc(NumericalizeProcessor, title_level=3)
```
Uses `vocab` for this (if not None), otherwise create one with `max_vocab` and `min_freq` from tokens.
## Language Model data
A language model is trained to guess what the next word is inside a flow of words. We don't feed it the different texts separately but concatenate them all together in a big array. To create the batches, we split this array into `bs` chunks of continuous texts. Note that in all NLP tasks, we don't use the usual convention of sequence length being the first dimension so batch size is the first dimension and sequence length is the second. Here you can read the chunks of texts in lines.
```
path = untar_data(URLs.IMDB_SAMPLE)
data = TextLMDataBunch.from_csv(path, 'texts.csv')
x,y = next(iter(data.train_dl))
example = x[:15,:15].cpu()
texts = pd.DataFrame([data.train_ds.vocab.textify(l).split(' ') for l in example])
texts
jekyll_warn("If you are used to another convention, beware! fastai always uses batch as a first dimension, even in NLP.")
```
This is all done internally when we use [`TextLMDataBunch`](/text.data.html#TextLMDataBunch), by wrapping the dataset in the following pre-loader before calling a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader).
```
show_doc(LanguageModelPreLoader)
```
LanguageModelPreLoader is an internal class uses for training a language model. It takes the sentences passed as a jagged array of numericalised sentences in `dataset` and returns contiguous batches to the pytorch dataloader with batch size `bs` and a sequence length `bptt`.
- `lengths` can be provided for the jagged training data else lengths is calculated internally
- `backwards=True` will reverses the sentences.
- `shuffle=True`, will shuffle the order of the sentences, at the start of each epoch - except the first
The following description is usefull for understanding the implementation of [`LanguageModelPreLoader`](/text.data.html#LanguageModelPreLoader):
- idx: instance of CircularIndex that indexes items while taking the following into account 1) shuffle, 2) direction of indexing, 3) wraps around to head (reading forward) or tail (reading backwards) of the ragged array as needed in order to fill the last batch(s)
- ro: index of the first rag of each row in the batch to be extract. Returns as index to the next rag to be extracted
- ri: Reading forward: index to the first token to be extracted in the current rag (ro). Reading backwards: one position after the last token to be extracted in the rag
- overlap: overlap between batches is 1, because we only predict the next token
## Classifier data
When preparing the data for a classifier, we keep the different texts separate, which poses another challenge for the creation of batches: since they don't all have the same length, we can't easily collate them together in batches. To help with this we use two different techniques:
- padding: each text is padded with the `PAD` token to get all the ones we picked to the same size
- sorting the texts (ish): to avoid having together a very long text with a very short one (which would then have a lot of `PAD` tokens), we regroup the texts by order of length. For the training set, we still add some randomness to avoid showing the same batches at every step of the training.
Here is an example of batch with padding (the padding index is 1, and the padding is applied before the sentences start).
```
path = untar_data(URLs.IMDB_SAMPLE)
data = TextClasDataBunch.from_csv(path, 'texts.csv')
iter_dl = iter(data.train_dl)
_ = next(iter_dl)
x,y = next(iter_dl)
x[-10:,:20]
```
This is all done internally when we use [`TextClasDataBunch`](/text.data.html#TextClasDataBunch), by using the following classes:
```
show_doc(SortSampler)
```
This pytorch [`Sampler`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Sampler) is used for the validation and (if applicable) the test set.
```
show_doc(SortishSampler)
```
This pytorch [`Sampler`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Sampler) is generally used for the training set.
```
show_doc(pad_collate)
```
This will collate the `samples` in batches while adding padding with `pad_idx`. If `pad_first=True`, padding is applied at the beginning (before the sentence starts) otherwise it's applied at the end.
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(TextList.new)
show_doc(TextList.get)
show_doc(TokenizeProcessor.process_one)
show_doc(TokenizeProcessor.process)
show_doc(OpenFileProcessor.process_one)
show_doc(NumericalizeProcessor.process)
show_doc(NumericalizeProcessor.process_one)
show_doc(TextList.reconstruct)
show_doc(LanguageModelPreLoader.on_epoch_begin)
show_doc(LanguageModelPreLoader.on_epoch_end)
```
## New Methods - Please document or move to the undocumented section
```
show_doc(LMLabelList)
show_doc(LanguageModelPreLoader.allocate_buffers)
show_doc(LanguageModelPreLoader.CircularIndex.shuffle)
show_doc(LanguageModelPreLoader.fill_row)
```
| github_jupyter |
# KNN(K Nearest Neighbours) for classification of glass types
We will make use of KNN algorithms to classify the type of glass.
### What is covered?
- About KNN algorithm
- Exploring dataset using visualization - scatterplot,pairplot, heatmap (correlation matrix).
- Feature scaling
- Applying KNN to classify
- Optimization
- Distance metrics
- Finding the best K value
### About KNN-
- It is an instance-based algorithm.
- As opposed to model-based algorithms which pre trains on the data, and discards the data. Instance-based algorithms retain the data to classify when a new data point is given.
- The distance metric is used to calculate its nearest neighbors (Euclidean, manhattan)
- Can solve classification(by determining the majority class of nearest neighbors) and regression problems (by determining the means of nearest neighbors).
- If the majority of the nearest neighbors of the new data point belong to a certain class, the model classifies the new data point to that class.

For example, in the above plot, Assuming k=5,
the black point
(new data) can be classified as class 1(Blue), because 3 out 5 of its nearest neighbors belong to class 1.
### Dataset
[Glass classification dataset](https://www.kaggle.com/uciml/glass) . Download to follow along.
**Description** -
This is a Glass Identification Data Set from UCI. It contains 10 attributes including id. The response is glass type(discrete 7 values)
- Id number: 1 to 214 (removed from CSV file)
- RI: refractive index
- Na: Sodium (unit measurement: weight percent in corresponding oxide, as are attributes 4-10)
- Mg: Magnesium
- Al: Aluminum
- Si: Silicon
- K: Potassium
- Ca: Calcium
- Ba: Barium
- Fe: Iron
- Type of glass: (class attribute)
- 1 buildingwindowsfloatprocessed
- 2 buildingwindowsnonfloatprocessed
- 3 vehiclewindowsfloatprocessed
- 4 vehiclewindowsnonfloatprocessed (none in this database)
- 5 containers
- 6 tableware
- 7 headlamps
About Type 2,4 -> **Float processed glass** means they are made on a floating molten glass on a bed of molten metal, this gives the sheet uniform thickness and flat surfaces.
## Load dependencies and data
```
#import dependencies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import seaborn as sns
from sklearn.metrics import classification_report, accuracy_score
from sklearn.model_selection import cross_val_score
#load data
df = pd.read_csv('./data/glass.csv')
df.head()
# value count for glass types
df.Type.value_counts()
```
## Data exploration and visualizaion
#### correlation matrix -
```
cor = df.corr()
sns.heatmap(cor)
```
We can notice that Ca and K values don't affect Type that much.
Also Ca and RI are highly correlated, this means using only RI is enough.
So we can go ahead and drop Ca, and also K.(performed later)
## Scatter plot of two features
```
sns.scatterplot(df_feat['RI'],df_feat['Na'],hue=df['Type'])
```
Suppose we consider only RI, and Na values for classification for glass type.
- From the above plot, We first calculate the nearest neighbors from the new data point to be calculated.
- If the majority of nearest neighbors belong to a particular class, say type 4, then we classify the data point as type 4.
But there are a lot more than two features based on which we can classify.
So let us take a look at pairwise plot to capture all the features.
```
#pairwise plot of all the features
sns.pairplot(df,hue='Type')
plt.show()
```
The pairplot shows that the data is not linear and KNN can be applied to get nearest neighbors and classify the glass types
## Feature Scaling
Scaling is necessary for distance-based algorithms such as KNN.
This is to avoid higher weightage being assigned to data with a higher magnitude.
Using standard scaler we can scale down to unit variance.
**Formula:**
z = (x - u) / s
where x -> value, u -> mean, s -> standard deviation
```
scaler = StandardScaler()
scaler.fit(df.drop('Type',axis=1))
#perform transformation
scaled_features = scaler.transform(df.drop('Type',axis=1))
scaled_features
df_feat = pd.DataFrame(scaled_features,columns=df.columns[:-1])
df_feat.head()
```
## Applying KNN
- Drop features that are not required
- Use random state while splitting the data to ensure reproducibility and consistency
- Experiment with distance metrics - Euclidean, manhattan
```
dff = df_feat.drop(['Ca','K'],axis=1) #Removing features - Ca and K
X_train,X_test,y_train,y_test = train_test_split(dff,df['Type'],test_size=0.3,random_state=45) #setting random state ensures split is same eveytime, so that the results are comparable
knn = KNeighborsClassifier(n_neighbors=4,metric='manhattan')
knn.fit(X_train,y_train)
y_pred = knn.predict(X_test)
print(classification_report(y_test,y_pred))
accuracy_score(y_test,y_pred)
```
With this setup, We found the accuracy to be 73.84%
### Finding the best K value
We can do this either -
- by plotting Accuracy
- or by plotting the error rate
Note that plotting both is not required, both are plottted to show as an example.
```
k_range = range(1,25)
k_scores = []
error_rate =[]
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
#kscores - accuracy
scores = cross_val_score(knn,dff,df['Type'],cv=5,scoring='accuracy')
k_scores.append(scores.mean())
#error rate
knn.fit(X_train,y_train)
y_pred = knn.predict(X_test)
error_rate.append(np.mean(y_pred!=y_test))
#plot k vs accuracy
plt.plot(k_range,k_scores)
plt.xlabel('value of k - knn algorithm')
plt.ylabel('Cross validated accuracy score')
plt.show()
#plot k vs error rate
plt.plot(k_range,error_rate)
plt.xlabel('value of k - knn algorithm')
plt.ylabel('Error rate')
plt.show()
```
we can see that k=4 produces the most accurate results
## Findings -
- Manhattan distance produced better results (improved accuracy - more than 5%)
- Applying feature scaling improved accuracy by almost 5%.
- The best k value was found to be 4.
- Dropping Ca produced better results by a bit, K value did not affect results in any way.
- Also, we noticed that RI and Ca are highly correlated,
this makes sense as it was found that the Refractive index of glass was found to increase with the increase in Cao. (https://link.springer.com/article/10.1134/S1087659614030249)
## Further improvements -
We can see that the model can be improved further so we get better accuracy. Some suggestions -
- Using KFold Cross-validation
- Try different algorithms to find the best one for this problem - (SVM, Random forest, etc)
## Other Useful resources -
- [K Nearest Neighbour Easily Explained with Implementation by Krish Naik - video](https://www.youtube.com/watch?v=wTF6vzS9fy4)
- [KNN by sentdex -video](https://www.youtube.com/watch?v=1i0zu9jHN6U)
- [KNN sklearn - docs ](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)
- [Complete guide to K nearest neighbours - python and R - blog](https://kevinzakka.github.io/2016/07/13/k-nearest-neighbor/)
- [Why scaling is required in KNN and K-Means - blog](https://medium.com/analytics-vidhya/why-is-scaling-required-in-knn-and-k-means-8129e4d88ed7)
| github_jupyter |
```
import os
import json
import pandas as pd
import gc
import multiprocessing as mp
%%capture
# LdaMallet
# Dictionary (gensim)
# build_docs
# transform_dt
# get_tw
# get_top_words
%run ./LDAModule.ipynb
# DocsManager
# build_docs
%run ../../DocsManager.ipynb
## Jupyter.notebook.save_checkpoint()
# get_corpus_path
# get_txt_clean_path
%run ../../path_manager.ipynb
# CorpusCleaner
%run ../../DataCleanerModule.ipynb
from gensim.models.wrappers.ldamallet import malletmodel2ldamodel
get_corpus_path('IMF')
MODELS_PATH = get_models_path('LDA').replace('MODELS', 'MODELS.pos')
MALLET_BINARY_PATH = "../Mallet/bin/mallet"
NUM_WORKERS = get_workers()
NUM_ITERS = 196
MIN_TOKEN_COUNT = 50
NGRAM_FILE = '../../whitelists/whitelist_ngrams_cleaned.csv' # '../../whitelists/whitelist_ngrams_truncated_cleaned.csv'
DOC_PROCESSING_WORKERS = 2 * max(1, os.cpu_count() - 4)
MODELS_PATH
if not os.path.isdir(MODELS_PATH):
os.makedirs(MODELS_PATH)
import logging
import gc
TRAINING_MODEL_ID = 'LDA'
logging.basicConfig(filename=f'./{TRAINING_MODEL_ID.lower()}-iters_{NUM_ITERS}.log', format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(f'{TRAINING_MODEL_ID.lower()}-logger')
for CORPUS_ID in ['WB']:
if CORPUS_ID == 'WB':
region_partitions = [] # ['AFR', 'EAP', 'ECA', 'LAC', 'MENA', 'RoW', 'SAR', 'WLD']
doctype_partitions = ['PD', 'PR'] # ['BD', 'CF', 'ESW', 'PD', 'PR']
doctype_partitions = doctype_partitions[::-1]
corpus_partitions = ['ALL'] + doctype_partitions + region_partitions
else:
corpus_partitions = ['ALL']
num_topics = [25, 50, 100, 200]
for CORPUS_PART in corpus_partitions:
docs = build_docs(
metadata_filename=os.path.join(get_corpus_path(CORPUS_ID), f'{CORPUS_ID.lower()}_pos_metadata_complete.csv'),
cleaned_files_dir=get_txt_clean_pos_path(CORPUS_ID),
model_output_dir=MODELS_PATH # Use flat directory as discussed...
)
logger.info(f'Creating partitioned docs and loading files for {CORPUS_ID}-{CORPUS_PART}...')
# docs.set_ngram_mapper('../../whitelists/whitelist_ngrams_cleaned.csv', cleaner=None)
docs.set_ngram_mapper(NGRAM_FILE, cleaner=None)
docs.set_min_token_count(MIN_TOKEN_COUNT)
docs_filtered, meta = docs.filter_doclist(CORPUS_PART, corpus_id=CORPUS_ID, save=True, return_meta=True, pool_workers=DOC_PROCESSING_WORKERS)
print(docs_filtered.shape)
logger.info(f'Building model for {docs_filtered.shape[0]} documents...')
if docs_filtered.empty:
continue
logger.info('Building dictionary...')
g_dict = Dictionary(docs_filtered.text.str.split())
g_dict.filter_extremes(no_below=10, no_above=0.99, keep_n=200000, keep_tokens=None) # Exclude words appearing in less than 10 docs.
g_dict.id2token = {id: token for token, id in g_dict.token2id.items()}
logger.info('Performing doc2bow...')
# corpus = [g_dict.doc2bow(c.split()) for c in docs_filtered.text]
with mp.Pool(NUM_WORKERS) as pool:
# pool = mp.Pool(processes=10)
logger.info('Performing parallel doc2bow...')
corpus = pool.map(g_dict.doc2bow, docs_filtered.text.str.split())
logger.info('Completed parallel doc2bow...')
# pool.close()
# pool.join()
for NUM_TOPICS in num_topics:
MODEL_ID = f"{CORPUS_PART}_{NUM_TOPICS}"
logger.info(f'Starting process for {MODEL_ID}...')
MODEL_FOLDER = os.path.join(MODELS_PATH, f'{CORPUS_ID}-{MODEL_ID}')
MODEL_DATA_FOLDER = os.path.join(MODEL_FOLDER, 'data')
MODEL_MALLET_FOLDER = os.path.join(MODEL_FOLDER, 'mallet')
if not os.path.isdir(MODEL_DATA_FOLDER):
os.makedirs(MODEL_DATA_FOLDER)
if not os.path.isdir(MODEL_MALLET_FOLDER):
os.makedirs(MODEL_MALLET_FOLDER)
# Set logging
lfh = logging.FileHandler(f'./{CORPUS_ID.lower()}-iters_{NUM_ITERS}-{MODEL_ID}.log')
lfh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
lfh.setFormatter(formatter)
logger.addHandler(lfh)
# End logging setup
logger.info('Training mallet LDA model...')
model = LdaMallet(
MALLET_BINARY_PATH, corpus=corpus, num_topics=NUM_TOPICS, prefix=f'{MODEL_MALLET_FOLDER}/{CORPUS_ID}-{MODEL_ID}_',
id2word=g_dict.id2token, workers=NUM_WORKERS, iterations=NUM_ITERS,
random_seed=1029
)
logger.info('Completed training mallet LDA model...')
dt = pd.read_csv(
model.fdoctopics(), delimiter='\t', header=None,
names=[i for i in range(model.num_topics)], index_col=None,
usecols=[i + 2 for i in range(model.num_topics)],
)
dt.index = docs_filtered['id']
dt = dt.divide(dt.min(axis=1), axis=0).astype(int) - 1
logger.info('Generating dfr-browser data...')
ddt = transform_dt(dt.as_matrix().T)
ttw = get_tw(model)
with open(os.path.join(MODEL_DATA_FOLDER, 'tw.json'), 'w') as fl:
json.dump(ttw, fl)
with open(os.path.join(MODEL_DATA_FOLDER, 'dt.json'), 'w') as fl:
json.dump(ddt, fl)
info_json = {
"title": f"Topics in <em>{CORPUS_ID} {MODEL_ID}<\/em>",
"meta_info": "This site is the working demo for <a href=\"/\">dfr-browser</a>, a browsing interface for topic models of journal articles or other text.",
"VIS": {
"condition": {
"type": "time",
"spec": {
"unit": "year",
"n": 1
}
},
"bib_sort": {
"major": "year",
"minor": "alpha"
},
"model_view": {
"plot": {
"words": 6,
"size_range": [6, 14]
}
}
}
}
with open(os.path.join(MODEL_DATA_FOLDER, 'info.json'), 'w') as fl:
json.dump(info_json, fl)
# Generation of key LDA files
# doc_topics
logger.info('Storing doc_topics...')
dt.to_csv(
os.path.join(MODEL_DATA_FOLDER, f'doc_topics_{MODEL_ID}.csv'),
header=False, # Change to True if topic id should be present as the header
index=False # Change to True if the uid should be present as the index
)
dt.to_csv(
os.path.join(MODEL_DATA_FOLDER, f'doc_topics_{MODEL_ID}_with_details.csv'),
header=True, # Change to True if topic id should be present as the header
index=True # Change to True if the uid should be present as the index
)
# topic_words
word_topics = pd.DataFrame(model.word_topics, columns=range(model.word_topics.shape[1]), index=range(1, model.word_topics.shape[0] + 1))
word_topics = word_topics.rename(columns=model.id2word)
logger.info('Storing word_topics...')
word_topics.astype(int).to_csv(
os.path.join(MODEL_DATA_FOLDER, f'topic_words_{MODEL_ID}.csv'),
header=False, # Change to True if actual word should be present as the header
index=False # Sorted order by topic id
)
word_topics.astype(int).to_csv(
os.path.join(MODEL_DATA_FOLDER, f'topic_words_{MODEL_ID}_with_details.csv'),
header=True, # Change to True if actual word should be present as the header
index=False # Sorted order by topic id
)
logger.info('Storing top_words...')
top_words = get_top_words(word_topics, topic=None, topn=NUM_TOPICS)
top_words.to_csv(
os.path.join(MODEL_DATA_FOLDER, f'top_words_{MODEL_ID}.csv'),
index=False
)
logger.info('Saving mallet lda model...')
model.save(os.path.join(MODEL_DATA_FOLDER, f'{CORPUS_ID}_lda_model_{MODEL_ID}.mallet.lda'))
logger.info('Converting mallet lda to gensim lda model...')
gensim_lda = malletmodel2ldamodel(model, gamma_threshold=0.000001, iterations=1000)
gensim_lda.minimum_probability = 0.000001
logger.info('Saving mallet.gensim lda model...')
gensim_lda.save(os.path.join(MODEL_DATA_FOLDER, f'{CORPUS_ID}_lda_model_{MODEL_ID}.mallet.gensim.lda'))
logger.info(f'lda model for {CORPUS_ID} completed: {MODEL_ID}...')
logger.removeHandler(lfh)
del(model)
gc.collect()
del(docs_filtered)
del(docs.doclist)
del(docs)
del(meta)
del(g_dict)
del(corpus)
gc.collect()
CORPUS_PART
# !../Mallet/bin/mallet import-file --preserve-case --keep-sequence --remove-stopwords --token-regex "\S+" --input /R/NLP/MODELS/LDA/IMF-ALL_20/mallet/IMF-ALL_20_corpus.txt --output /R/NLP/MODELS/LDA/IMF-ALL_20/mallet/IMF-ALL_20_corpus.mallet
# http://microdatahub.com/topicsmodeling/dfr/topic_browser/browser.php?model=data50_SAR&type=SAR&topic_count=50#/doc/9622
```
| github_jupyter |
<h1 style='font-size:2rem;color:blue;'>Project on identifying animals and flowers :</h1>
Padmanabha Bose,
I-PhD- Physics,
Computer code : PBA117
```
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
```
<p><b> All the classifications :</b></p>
```
data = "C:\Data_science"
Categories = ["bougainvilla","Elephant","jasmine","lion","lotus","red rose","shark","spider","tulip","zebra"]
listdata=[]
sX = 100
sY = 100
for category in Categories:
path=os.path.join(data, category)#joined all the paths
for img in os.listdir(path):
img_array=cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
img_array=cv2.resize(img_array,(sX, sY))
listdata.append([img_array])
listdata=np.array(listdata)
print(np.shape(listdata[:,0,0,0]))#I tried to give all the 10 datasets, but somehow jupyter accepted only 5. I have also included a snippet of the folder
```

```
print(np.shape(listdata))
IMG_SIZE=100
new_array= cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
plt.imshow(new_array, cmap="gray")
plt.show()
N=[]
for i in range(5):
for j in range(10):
N.append(i)
def print_natural(images, target, top_n):
fig = plt.figure(figsize=(50,50))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(top_n):
p = fig.add_subplot(sX,sY,i+1,xticks=[],yticks=[]) # each image is a 20x20 matrix
p.imshow(images[i,0,:,:], cmap=plt.cm.bone)
p.text(0,18,str(target[i]))
p.text(0,60,str(i))
print_natural(listdata,N,50 )#data containg bougainvilla, elephant, jasmine, red rose and zebra only
y=listdata.reshape(len(listdata), -1)
print(np.shape(y))
from sklearn.svm import SVC
svc_1 = SVC(kernel ='poly')
#svc_1 = SVC(kernel ='linear')
#scv_1 = SVC(kernel = 'rbf')
from sklearn.model_selection import train_test_split
test_size =[0.05,0.2,0.3,0.4,0.5, 0.6, .7, .8, .9, .95]
t1=[]
t2=[]
for i in test_size:
X_train, X_test, y_train, y_test = train_test_split(y, N, test_size=i, random_state=5)
svc_1.fit(X_train, y_train)
train_score=svc_1.score(X_train, y_train)
print('Score for training set',train_score)
test_score=svc_1.score(X_test, y_test)
t1.append(test_score)
t2.append(train_score)
print('Score for test set ratio ' + str(i) + " = " + str(test_score) )
print('--')
print(t1)
from scipy import interpolate
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
f1=interpolate.interp1d(t1, test_size, kind='linear') # or simply, f=interpolate.interp1d(x_data, y_data)
f2=interpolate.interp1d(t2, test_size, kind='linear')
xgrids=np.linspace(np.min(t1), np.max(t1), 51)
xgrids1=np.linspace(np.min(t2), np.max(t2), 51)
ygrids_1=f1(xgrids)
ygrids1=f2(xgrids1)
plt.plot(ygrids_1,xgrids,'-',color='g',linewidth=1,label="test")
plt.plot(ygrids1,xgrids1,'-',color='r',linewidth=1,label="train")
plt.legend()
plt.xlabel("test_size")
plt.ylabel("test_score")
plt.title('The variation of test score with the test size')
plt.show()
##animal vs. flower
#N = np.zeros(50)
#N[0:10] = 0
#N[10:20] = 1
#N[20:30] = 0
#N[30:40] = 0
#N[40:50] = 1
##wild or domestic
N = np.zeros(50)
N[0:10] = 0
N[10:20] = 1
N[20:30] = 0
N[30:40] = 0
N[40:50] = 1
from sklearn.svm import SVC
svc_1 = SVC(kernel ='poly')
#svc_1 = SVC(kernel ='linear')
#scv_1 = SVC(kernel = 'rbf')
from sklearn.model_selection import train_test_split
test_score1=[]
train_score1=[]
test_size =[0.05,0.2,0.3,0.4,0.5, 0.6, .7, .8, .9, .95]
for i in test_size:
X_train, X_test, y_train, y_test = train_test_split(y, N, test_size=i, random_state=5)
svc_1.fit(X_train, y_train)
train_score=svc_1.score(X_train, y_train)
print('Score for training set',train_score)
test_score=svc_1.score(X_test, y_test)
test_score1.append(test_score)
train_score1.append(train_score)
print('Score for test set ratio ' + str(i) + " = " + str(test_score) )
print('--')
print(test_score1)
print(train_score1)
y_pred_test=svc_1.predict(X_test)
print(metrics.classification_report(y_test,y_pred_test))
import matplotlib.pyplot as plt
x=test_size
y=test_score1
y1=train_score1
plt.plot(x,y,'k-*', label= 'Test score')
plt.plot(x,y1,'b-o', label= 'Train score')
plt.xlabel('Test Ratio ->')
plt.ylabel('Scores ->')
plt.title('Score Plot')
plt.legend()
plt.grid()
#plt.savefig("A_40_linear_",dpi=200)
plt.show()
```
<p><b> All code that I tried are attached below :</b></p>
```
import sklearn as sk
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from sklearn.model_selection import train_test_split
data = "/content/drive/MyDrive/project_vehicle"
Categories = ["airplane","auto","bike","bus","car","heli","horse_cart","ship","tank","train"]
listdata=[]
sX = 100
sY = 100
for category in Categories:
path=os.path.join(data, category)
for img in os.listdir(path):
img_array=cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
img_array=cv2.resize(img_array,(sX, sY))
listdata.append([img_array])
listdata=np.array(listdata)
training_data=[]
def create_training_data():
for category in Categories:
path=os.path.join(Datadir, category)
class_num=Categories.index(category)
for img in os.listdir(path):
img_array=cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)
new_array=cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
training_data.append([new_array,class_num])
create_training_data()
print(training_data[5])
import random
random.shuffle(training_data)
for sample in training_data:
print (sample[1])
from PIL import Image
import matplotlib.pyplot as plt
my_img=Image.open(r'C:\Data_science\bougainvilla\1.jpg')
plt.imshow(my_img)
plt.show()
print(training_data)
import splitfolders
# Split with a ratio.
# To only split into training and validation set, set a tuple to `ratio`, i.e, `(.8, .2)`.
splitfolders.ratio("C:\Data_science", output="C:\Data_science_training_set",
seed=1337, ratio=(.8, .1, .1), group_prefix=None, move=False) # default values
from sklearn.svm import SVC # Support Vector Classifier
svc_1 = SVC(kernel='linear')
print(np.shape(listdata[:,0]))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(training_data,training_data , test_size=0.2, random_state=0)
from sklearn import metrics
svc_1.fit(X_train, y_train)
train_score=svc_1.score(X_train, y_train)
print('Score for training set',train_score)
test_score=svc_1.score(X_test, y_test)
print('Score for test set',test_score)
path = 'C:\Data_science'
list_of_files = os.listdir(path)
data = np.empty(0)
for i in list_of_files:
x = cv2.imread(os.path.join(path+i))
data.append(x)
import joblib
from skimage.io import imread
from skimage.transform import resize
def resize_all(src, pklname, include, width=150, height=None):
"""
load images from path, resize them and write them as arrays to a dictionary,
together with labels and metadata. The dictionary is written to a pickle file
named '{pklname}_{width}x{height}px.pkl'.
Parameter
---------
src: str
path to data
pklname: str
path to output file
width: int
target width of the image in pixels
include: set[str]
set containing str
"""
height = height if height is not None else width
data = dict()
data['description'] = 'resized ({0}x{1})animal images in rgb'.format(int(width), int(height))
data['label'] = []
data['filename'] = []
data['data'] = []
pklname = f"{pklname}_{width}x{height}px.pkl"
# read all images in PATH, resize and write to DESTINATION_PATH
for subdir in os.listdir(src):
if subdir in include:
print(subdir)
current_path = os.path.join(src, subdir)
for file in os.listdir(current_path):
if file[-3:] in {'jpg', 'png'}:
im = imread(os.path.join(current_path, file))
im = resize(im, (width, height)) #[:,:,::-1]
data['label'].append(subdir[:-4])
data['filename'].append(file)
data['data'].append(im)
joblib.dump(data, pklname)
data_path = 'C:\Data_science'
os.listdir(data_path)
base_name = 'identification'
width = 80
include = {'bougainvilla','Elephant','jasmine','lion','lotus','red rose','shark','spider','tulip','zebra'}
resize_all(src=data_path, pklname=base_name, width=width, include=include)
from collections import Counter
data = joblib.load(f'{base_name}_{width}x{width}px.pkl')
print('number of samples: ', len(data['data']))
print('keys: ', list(data.keys()))
print('description: ', data['description'])
print('image shape: ', data['data'][0].shape)
print('labels:', np.unique(data['label']))
Counter(data['label'])
from sklearn.datasets import fetch_olivetti_faces
faces=fetch_olivetti_faces()
print(faces.data)
print(np.shape(listdata[:,0]))
```
| github_jupyter |
# Package loading
```
# Imports
import os
import json
import argparse
import time
import ipdb
import spacy
import torch
import optuna
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
from tqdm import tqdm
from collections import deque
import torch.optim as optim
from torchtext import data
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from torch.utils.data import Dataset, TensorDataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from transformers import RobertaTokenizer, BertModel, TransfoXLTokenizer, TransfoXLModel, AdamW
from transformers import BigBirdTokenizer, BigBirdForSequenceClassification
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import LongformerTokenizer, LongformerForSequenceClassification
from transformers import XLNetTokenizer, XLNetForSequenceClassification
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
```
# Data Exploration
```
# train_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/paperwithcode/5Neg10unk/twofoldwithunk/fold1/train.tsv"
# valid_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/paperwithcode/5Neg10unk/twofoldwithunk/fold1/dev.tsv"
# train_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/paperwithcode/new/60Neg800unk/twofoldwithunk/fold1/train.tsv"
# valid_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/paperwithcode/new/60Neg800unk/twofoldwithunk/fold1/dev.tsv"
train_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/ibm/exp/few-shot-setup/NLP-TDMS/paperVersion/train.tsv"
valid_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/ibm/exp/few-shot-setup/NLP-TDMS/paperVersion/test.tsv"
output_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/ibm/exp/few-shot-setup/NLP-TDMS/paperVersion/torch/SciBert/"
model_pt_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/ibm/exp/few-shot-setup/NLP-TDMS/paperVersion/torch/SciBert/Model_SciBert_avg_metric_0.9001.pt"
N_EPOCHS = 3
model_name = "SciBert"
max_input_len = 512
# output_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/paperwithcode/new/60Neg800unk/twofoldwithunk/fold1/"
bs = 16
processors = {
"Bert": [BertTokenizer, BertForSequenceClassification, "bert-base-uncased"],
"SciBert": [BertTokenizer, BertForSequenceClassification, "allenai/scibert_scivocab_uncased"],
"XLNet": [XLNetTokenizer, XLNetForSequenceClassification, "xlnet-base-cased"],
"BigBird": [BigBirdTokenizer, BigBirdForSequenceClassification, "google/bigbird-roberta-base"],
"Longformer": [LongformerTokenizer, LongformerForSequenceClassification, "allenai/longformer-base-4096"],
}
train_df = pd.read_csv(train_path,
sep="\t", names=["label", "title", "TDM", "Context"])
valid_df = pd.read_csv(valid_path,
sep="\t", names=["label", "title", "TDM", "Context"])
train_df.head()
valid_df.head()
```
# Model
```
if model_name in processors.keys():
selected_processor = processors[model_name]
else:
print(f"Model not available check selected model only {list(processors.keys())} as supported")
quit()
if model_name == "SciBert":
tokenizer = selected_processor[0].from_pretrained("bert-base-uncased")
else:
tokenizer = selected_processor[0].from_pretrained(selected_processor[2])
init_token = tokenizer.cls_token
eos_token = tokenizer.sep_token
pad_token = tokenizer.pad_token
unk_token = tokenizer.unk_token
print(init_token, eos_token, pad_token, unk_token)
init_token_idx = tokenizer.convert_tokens_to_ids(init_token)
eos_token_idx = tokenizer.convert_tokens_to_ids(eos_token)
pad_token_idx = tokenizer.convert_tokens_to_ids(pad_token)
unk_token_idx = tokenizer.convert_tokens_to_ids(unk_token)
print(init_token_idx, eos_token_idx, pad_token_idx, unk_token_idx)
if model_name == "SciBert":
max_input_length = tokenizer.max_model_input_sizes["bert-base-uncased"]
else:
max_input_length = tokenizer.max_model_input_sizes[selected_processor[2]]
if not max_input_length:
max_input_length = max_input_len
print(f"Maximun sequence lenght {max_input_length}")
class TransformersNLI(Dataset):
def __init__(self, tokenizer, max_input_length):
self.label_dict = {'True': 0, 'False': 1} # Default {'entailment': 0, 'contradiction': 1, 'neutral': 2}
self.tokenizer = tokenizer
self.max_input_length = max_input_length
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def load_data(self, df):
MAX_LEN = self.max_input_length
token_ids = []
mask_ids = []
seg_ids = []
y = []
premise_list = df['TDM'].to_list() # df['sentence1'].to_list()
hypothesis_list = df['Context'].to_list() # df['sentence2'].to_list()
label_list = df['label'].to_list() # df['gold_label'].to_list()
for (premise, hypothesis, label) in tqdm(zip(premise_list, hypothesis_list, label_list), total=len(label_list)):
premise_id = self.tokenizer.encode(premise, add_special_tokens = False)
hypothesis_id = self.tokenizer.encode(hypothesis, add_special_tokens = False)
# ignore the warning as the ong sequence issuw is taken care of here
self._truncate_seq_pair(premise_id, hypothesis_id, MAX_LEN-3) # -3 to account for the special characters
pair_token_ids = [self.tokenizer.cls_token_id] + premise_id \
+ [self.tokenizer.sep_token_id] + hypothesis_id \
+ [self.tokenizer.sep_token_id]
premise_len = len(premise_id)
hypothesis_len = len(hypothesis_id)
segment_ids = torch.tensor([0] * (premise_len + 2) + [1] * (hypothesis_len + 1)) # sentence 0 and sentence 1
attention_mask_ids = torch.tensor([1] * (premise_len + hypothesis_len + 3)) # mask padded values
token_ids.append(torch.tensor(pair_token_ids))
seg_ids.append(segment_ids)
mask_ids.append(attention_mask_ids)
# we have str(label) to have the key work proprely
y.append(self.label_dict[str(label)]) # y.append(self.label_dict[label])
token_ids = pad_sequence(token_ids, batch_first=True)
mask_ids = pad_sequence(mask_ids, batch_first=True)
seg_ids = pad_sequence(seg_ids, batch_first=True)
y = torch.tensor(y)
dataset = TensorDataset(token_ids, mask_ids, seg_ids, y)
print(len(dataset))
return dataset
def get_train_data(self, train_df, batch_size=32, shuffle=True):
train_data = self.load_data(train_df)
train_loader = DataLoader(
train_data,
shuffle=shuffle,
batch_size=batch_size
)
return train_loader
def get_valid_data(self, valid_df, batch_size=32, shuffle=True):
valid_data = self.load_data(valid_df)
valid_loader = DataLoader(
valid_data,
shuffle=shuffle,
batch_size=batch_size
)
return valid_loader
def get_inference_data(self, test_df, batch_size=32, shuffle=False):
test_data = self.load_data(test_df)
test_loader = DataLoader(
test_data,
shuffle=shuffle,
batch_size=batch_size
)
return test_loader
TDM_dataset = TransformersNLI(tokenizer, max_input_length)
# train_loader = TDM_dataset.get_train_data(train_df, batch_size=bs, shuffle=True)
# valid_loader = TDM_dataset.get_valid_data(valid_df, batch_size=bs, shuffle=True)
# if os.path.exists(f'{output_path}train_loader_{bs}_seq_{max_input_length}.pth'):
# train_loader = torch.load(f'{output_path}train_loader_{bs}_seq_{max_input_length}.pth')
# else:
# train_loader = TDM_dataset.get_train_data(train_df, batch_size=bs, shuffle=True)
# # Save dataloader
# torch.save(train_loader, f'{output_path}train_loader_{bs}_seq_{max_input_length}.pth')
# if os.path.exists(f'{output_path}valid_loader_{bs}_seq_{max_input_length}.pth'):
# valid_loader = torch.load(f'{output_path}valid_loader_{bs}_seq_{max_input_length}.pth')
# else:
# valid_loader = TDM_dataset.get_valid_data(valid_df, batch_size=bs, shuffle=True)
# # Save dataloader
# torch.save(valid_loader, f'{output_path}valid_loader_{bs}_seq_{max_input_length}.pth')
if os.path.exists(f'{output_path}train_loader_{bs}_seq_{max_input_length}.pth'):
train_loader = torch.load(f'{output_path}train_loader_{bs}_seq_{max_input_length}.pth')
# os.remove(f'{output_path}train_loader_{bs}_seq_{max_input_length}.pth')
else:
train_loader = TDM_dataset.get_train_data(train_df, batch_size=bs, shuffle=True)
# Save dataloader
torch.save(train_loader, f'{output_path}train_loader_{bs}_seq_{max_input_length}.pth')
if os.path.exists(f'{output_path}valid_loader_{bs}_seq_{max_input_length}.pth'):
# valid_loader = torch.load(f'{output_path}valid_loader_{bs}_seq_{max_input_length}.pth')
os.remove(f'{output_path}valid_loader_{bs}_seq_{max_input_length}.pth')
# else:
# valid_loader = TDM_dataset.get_valid_data(valid_df, batch_size=bs, shuffle=True)
# # Save dataloader
# torch.save(valid_loader, f'{output_path}valid_loader_{bs}_seq_{max_input_length}.pth')
# train_loader = TDM_dataset.get_train_data(train_df, batch_size=bs, shuffle=True)
valid_loader = TDM_dataset.get_valid_data(valid_df, batch_size=bs, shuffle=False)
```
## Build the Model
```
model = selected_processor[1].from_pretrained(
selected_processor[2], num_labels=2)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
else:
print(f"Device: {device}")
model = model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5, correct_bias=False)
def count_parameters(model):
return (sum(p.numel() for p in model.parameters() if p.requires_grad), sum(p.numel() for p in model.parameters() if not p.requires_grad))
print(f'The model has {count_parameters(model)[0]:,} trainable parameters')
print(f'The model has {count_parameters(model)[1]:,} non-trainable parameters')
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train(model, iterator, optimizer, epoch):
model.train()
train_loss = AverageMeter()
train_acc = AverageMeter()
train_macro_p = AverageMeter()
train_macro_r = AverageMeter()
train_macro_f1 = AverageMeter()
train_micro_p = AverageMeter()
train_micro_r = AverageMeter()
train_micro_f1 = AverageMeter()
for batch_idx, (pair_token_ids, mask_ids, seg_ids, y) in tqdm(enumerate(iterator), total=len(iterator)):
optimizer.zero_grad()
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
seg_ids = seg_ids.to(device)
labels = y.to(device)
# ipdb.set_trace()
# loss, prediction = model(pair_token_ids,
# token_type_ids=seg_ids,
# attention_mask=mask_ids,
# labels=labels).values()
outputs = model(pair_token_ids,
token_type_ids=seg_ids,
attention_mask=mask_ids,
labels=labels)
loss = outputs.loss
prediction = outputs.logits
loss.backward()
optimizer.step()
prediction = torch.log_softmax(prediction, dim=1).argmax(dim=1)
train_acc.update(prediction.eq(labels.view_as(prediction)).sum().item()/len(labels)) # accuracy_score(labels.cpu(), prediction.cpu())
train_loss.update(loss.item())
train_macro_p.update(precision_score(labels.cpu(), prediction.cpu(), average ='macro'))
train_macro_r.update(recall_score(labels.cpu(), prediction.cpu(), average ='macro'))
train_macro_f1.update(f1_score(labels.cpu(), prediction.cpu(), average ='macro'))
train_micro_p.update(precision_score(labels.cpu(), prediction.cpu(), average ='micro'))
train_micro_r.update(recall_score(labels.cpu(), prediction.cpu(), average ='micro'))
train_micro_f1.update(f1_score(labels.cpu(), prediction.cpu(), average ='micro'))
if (batch_idx + 1) % 1000 == 0:
print(f"[epoch {epoch+1}] [iter {(batch_idx + 1)}/{len(iterator)}]")
print('------------------------------------------------------------')
print(f"Train Accuracy Score: {train_acc.avg}; Train loss : {train_loss.avg}")
print(f"Macro Precision: {train_macro_p.avg}; Macro Recall : {train_macro_r.avg}; Macro F1 : {train_macro_f1.avg}")
print(f"Micro Precision: {train_micro_p.avg}; Micro Recall : {train_micro_r.avg}; Micro F1 : {train_micro_f1.avg}")
print('------------------------------------------------------------')
return train_loss.avg, train_acc.avg, train_macro_p.avg, train_macro_r.avg, train_macro_f1.avg, train_micro_p.avg, train_micro_r.avg, train_micro_f1.avg
def evaluate(model, iterator, optimizer):
model.eval()
val_loss = AverageMeter()
val_acc = AverageMeter()
val_macro_p = AverageMeter()
val_macro_r = AverageMeter()
val_macro_f1 = AverageMeter()
val_micro_p = AverageMeter()
val_micro_r = AverageMeter()
val_micro_f1 = AverageMeter()
with torch.no_grad():
for batch_idx, (pair_token_ids, mask_ids, seg_ids, y) in tqdm(enumerate(iterator), total=len(iterator)):
# optimizer.zero_grad()
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
seg_ids = seg_ids.to(device)
labels = y.to(device)
outputs = model(pair_token_ids,
token_type_ids=seg_ids,
attention_mask=mask_ids,
labels=labels)
loss = outputs.loss
prediction = outputs.logits
prediction = torch.log_softmax(prediction, dim=1).argmax(dim=1)
ipdb.set_trace()
val_acc.update(prediction.eq(labels.view_as(prediction)).sum().item()/len(labels)) # accuracy_score(labels.cpu(), prediction.cpu())
val_macro_p.update(precision_score(labels.cpu(), prediction.cpu(), average ='macro'))
val_macro_r.update(recall_score(labels.cpu(), prediction.cpu(), average ='macro'))
val_macro_f1.update(f1_score(labels.cpu(), prediction.cpu(), average ='macro'))
val_micro_p.update(precision_score(labels.cpu(), prediction.cpu(), average ='micro'))
val_micro_r.update(recall_score(labels.cpu(), prediction.cpu(), average ='micro'))
val_micro_f1.update(f1_score(labels.cpu(), prediction.cpu(), average ='micro'))
val_loss.update(loss.item())
val_macro_avg_p, val_macro_avg_r, val_macro_avg_f1 = val_macro_p.avg, val_macro_r.avg, val_macro_f1.avg
val_micro_avg_p, val_micro_avg_r, val_micro_avg_f1 = val_micro_p.avg, val_micro_r.avg, val_micro_f1.avg
print('------------------------------------------------------------')
print(f"Validation Accuracy Score : {val_acc.avg}; Vadidation loss : {val_loss.avg}")
print(f"Macro Precision : {val_macro_avg_p}; Macro Recall : {val_macro_avg_r}; Macro F1 : {val_macro_avg_f1}")
print(f"Micro Precision : {val_micro_avg_p}; Micro Recall : {val_micro_avg_r}; Micro F1 : {val_micro_avg_f1}")
print('------------------------------------------------------------')
return val_loss.avg, val_acc.avg, val_macro_avg_p, val_macro_avg_r, val_macro_avg_f1, val_micro_avg_p, val_micro_avg_r, val_micro_avg_f1
def predict_TDM_from_pdf(model, tokenizer, iterator, output_path):
model.eval()
with torch.no_grad():
for batch_idx, (pair_token_ids, mask_ids, seg_ids, y) in tqdm(enumerate(iterator), total=len(iterator)):
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
seg_ids = seg_ids.to(device)
labels = y.to(device)
outputs = model(pair_token_ids,
token_type_ids=seg_ids,
attention_mask=mask_ids,
labels=labels)
loss = outputs.loss
prediction = outputs.logits
prediction_scalled = torch.sigmoid(prediction)
with open(f"{output_path}test_results.tsv", "a+", encoding="utf-8") as text_file:
for true, false in prediction_scalled.cpu():
text_file.write(str(true.item())+"\t"+str(false.item())+"\n")
def get_top_n_prediction_label(path_to_test_file, path_to_prediction_file, output_path, n = 5):
"""
This function return the label with the highest proba
"""
top5 = deque()
with open(f"{path_to_test_file}") as f:
txt_test_files = f.read().splitlines()
with open(f"{path_to_prediction_file}") as f:
txt_prediction_files = f.read().splitlines()
for example, prediction in zip(txt_test_files, txt_prediction_files):
true_prob, false_prob = prediction.split("\t")
true_prob, false_prob = float(true_prob), float(false_prob)
if true_prob > false_prob:
label = example.split("\t")[2]
top5.append((label, true_prob))
results = deque(sorted(top5, key=lambda x: x[1] if x else x, reverse=False), n)
with open(f"{output_path}test_top_{n}_tdm.tsv", "w+", encoding="utf-8") as text_file:
for tdm in results:
text_file.write(f"{tdm[0]}\t{tdm[1]}\n")
return results
def write_evaluation_result(val_macro_avg_p, val_macro_avg_r, val_macro_avg_f1, val_micro_avg_p, val_micro_avg_r, val_micro_avg_f1, output_path):
with open(f"{output_path}evaluation_tdm_results.tsv", "w+", encoding="utf-8") as text_file:
text_file.write(f"Macro P\tMacro R\t Macro F1\t Micro P\t Micro R\t Micro F1\n")
text_file.write(f"{val_macro_avg_p}\t{val_macro_avg_r}\t{val_macro_avg_f1}\t{val_micro_avg_p}\t{val_micro_avg_r}\t{val_micro_avg_f1}\n")
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# best_valid_loss = 0.30 #float('inf')
# best_valid_f1 = 0.5
# for epoch in range(N_EPOCHS):
# start_time = time.time()
# # train_loss, train_acc, train_macro_avg_p, train_macro_avg_r, train_macro_avg_f1, train_micro_avg_p, train_micro_avg_r, train_micro_avg_f1 = train(model, train_loader, optimizer, epoch)
# valid_loss, valid_acc, val_macro_avg_p, val_macro_avg_r, val_macro_avg_f1, val_micro_avg_p, val_micro_avg_r, val_micro_avg_f1 = evaluate(model, valid_loader, optimizer)
# end_time = time.time()
# epoch_mins, epoch_secs = epoch_time(start_time, end_time)
# print(f'Epoch: {epoch+1:02} Final | Epoch Time: {epoch_mins}m {epoch_secs}s')
# print('------------------------------------------------------------')
# print(f"Train Accuracy Score: {train_acc}; Train loss : {train_loss}")
# print(f"Macro Precision: {train_macro_avg_p}; Macro Recall : {train_macro_avg_r}; Macro F1 : {train_macro_avg_f1}")
# print(f"Micro Precision: {train_micro_avg_p}; Micro Recall : {train_micro_avg_r}; Micro F1 : {train_micro_avg_f1}")
# print('------------------------------------------------------------')
# valid_metric_avg = (val_macro_avg_p + val_macro_avg_r + val_macro_avg_f1+val_micro_avg_p + val_micro_avg_r + val_micro_avg_f1)/6
# if valid_metric_avg > best_valid_metric_avg : #and abs(valid_loss - best_valid_loss) < 1e-1
# best_valid_metric_avg = valid_metric_avg
# print('Saving Model ...')
# torch.save(model.state_dict(), f'{output_path}Model_{model_name}_avg_metric_{str(best_valid_metric_avg)[:4]}.pt')
# print('****************************************************************************')
# print('best record: [epoch %d], [val loss %.5f], [val acc %.5f], [val avg. metric %.5f]' % (epoch, valid_loss, valid_acc, valid_metric_avg))
# print(f"Macro Precision : {val_macro_avg_p}; Macro Recall : {val_macro_avg_r}; Macro F1 : {val_macro_avg_f1}")
# print(f"Micro Precision : {val_micro_avg_p}; Micro Recall : {val_micro_avg_r}; Micro F1 : {val_micro_avg_f1}")
# print('****************************************************************************')
```
## Inference
We'll then use the model to test the sentiment of some sequences. We tokenize the input sequence, trim it down to the maximum length, add the special tokens to either side, convert it to a tensor, add a fake batch dimension and then pass it through our model.
```
model_pt_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/ibm/exp/few-shot-setup/NLP-TDMS/paperVersion/torch/SciBert/Model_SciBert_avg_metric_0.9001.pt"
# model_pt_path = "/nfs/home/kabenamualus/Research/task-dataset-metric-extraction/data/paperwithcode/new/60Neg800unk/twofoldwithunk/fold1/Model_SciBert_avg_metric_0.95.pt"
# Reload the best model
# model.load_state_dict(torch.load('Model_f1_0.93.pt'))
model.load_state_dict(torch.load(model_pt_path))
valid_loss, valid_acc, val_macro_avg_p, val_macro_avg_r, val_macro_avg_f1, val_micro_avg_p, val_micro_avg_r, val_micro_avg_f1 = evaluate(model, valid_loader, optimizer)
# test_df = pd.read_csv("../data/paperwithcode/new/60Neg800unk/twofoldwithunk/fold1/test_results.tsv",
# sep="\t", names=["true", "false"])
# test_df = pd.read_csv("../data/paperwithcode/new/jar/10Neg20unk/testOutput.tsv",
# sep="\t", names=["label", "title", "TDM", "Context"])
test_df = valid_df
test_df.head()
# test_loader = TDM_dataset.get_inference_data(test_df, batch_size=16, shuffle=False) # this shuffle should be false to preserve the order
with open(valid_path) as f:
list_prediction_inputs = f.read().splitlines()
len(list_prediction_inputs)
review_text = list_prediction_inputs[0]
review_text[:100]
encoded_review = tokenizer.encode_plus(
list_prediction_inputs[-200],
max_length=max_input_length,
add_special_tokens=True,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
input_ids = encoded_review['input_ids'].to(device)
attention_mask = encoded_review['attention_mask'].to(device)
outputs = model(input_ids, attention_mask)
prediction_scalled = torch.sigmoid(outputs.logits)
# _, prediction = torch.max(output, dim=1)
# print(f'Review text: {review_text}')
print(f'Output : {prediction_scalled}')
print(f'Outputs logits : {outputs.logits}')
# sample = iter(test_loader)
# sample.next()
# def predict_TDM_from_pdf(model, tokenizer, sentence):
# model.eval()
# tokens = tokenizer.tokenize(sentence)
# tokens = tokens[:max_input_length-2]
# indexed = [init_token_idx] + tokenizer.convert_tokens_to_ids(tokens) + [eos_token_idx]
# tensor = torch.LongTensor(indexed).to(device)
# tensor = tensor.unsqueeze(0)
# prediction = torch.sigmoid(model(tensor))
# return prediction.item()
def predict_TDM_from_pdf(model, tokenizer, iterator):
model.eval()
with torch.no_grad():
for batch_idx, (pair_token_ids, mask_ids, seg_ids, y) in tqdm(enumerate(iterator), total=len(iterator)):
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
seg_ids = seg_ids.to(device)
labels = y.to(device)
loss, prediction = model(pair_token_ids,
token_type_ids=seg_ids,
attention_mask=mask_ids,
labels=labels).values()
prediction_scalled = torch.sigmoid(prediction)
with open("test_results.tsv", "a+", encoding="utf-8") as text_file:
for true, false in prediction_scalled.cpu():
text_file.write(str(true.item())+"\t"+str(false.item())+"\n")
predict_TDM_from_pdf(model, tokenizer, test_loader)
from collections import deque
def get_top_n_prediction_label(path_to_test_file, path_to_prediction_file, n = 5):
"""
This function return the label with the highest proba
"""
top5 = deque()
with open(f"{path_to_test_file}") as f:
txt_test_files = f.read().splitlines()
with open(f"{path_to_prediction_file}") as f:
txt_prediction_files = f.read().splitlines()
highest = 0
for example, prediction in zip(txt_test_files, txt_prediction_files):
true_prob, false_prob = prediction.split("\t")
true_prob, false_prob = float(true_prob), float(false_prob)
if true_prob > false_prob:
label = example.split("\t")[2]
highest = true_prob
top5.append((label, true_prob))
return deque(sorted(top5, key=lambda x: x[1] if x else x, reverse=False), n)
get_top_n_prediction_label(
path_to_test_file="../data/paperwithcode/new/jar/10Neg20unk/testOutput.tsv",
path_to_prediction_file="test_results.tsv",
n = 1)
```
| github_jupyter |
# Callbacks From Transformers To Driver
Transformers are the only Fugue extensions to execute on remote worker nodes. For some scenarios, the transformers need to communicate with the driver while it is running. For example, a transformer is training a Keras model, and at the end of each epoch, it needs to report the metrics to the driver and the driver can respond with a decision whether to stop the training.
In Fugue, the callback model is also abstract, you only need to define the callback functions and specify the server to handle remote calls.
## The simplest example
The simplest way to have a call back, is to define a callback parameter in the interfaceless way. You only need to annotate the parameter with `callable`, `Callable` or `Callable` with arguments, for example `Callable[[str],str]`. And this parameter must be after all dataframe parameters and before all other parameters.
```
import pandas as pd
from fugue import FugueWorkflow
# schema: *
def print_describe_and_return(df:pd.DataFrame, cb:callable) -> pd.DataFrame:
cb(str(df.describe()))
return df
dag = FugueWorkflow()
df = dag.df([[0,0],[1,1],[0,1],[2,2]],"a:int,b:int")
df.partition(by=["a"]).transform(print_describe_and_return, callback = lambda x:print(x)).show()
dag.run()
```
In the above example, it's a typical interfaceless transformer example with two additions: `cb:callback` in the transformer, and `callback = lambda x:print(x)` in the transform function. `cb:callback` is to tell Fugue that we want to use a callback inside the transformation. `callback = lambda x:print(x)` is to define the function that will execute on the driver.
As you can see, since there are 3 partitions of `a`, there are 3 descriptions printed, and in the end, the output dataframe is also printed.
You can make the callback optional
```
from typing import Optional, Callable, Any
# schema: *
def print_describe_and_return(df:pd.DataFrame, cb:Optional[Callable[[Any],None]]) -> pd.DataFrame:
if cb is not None:
cb(str(df.describe()))
return df
dag = FugueWorkflow()
df = dag.df([[0,0],[1,1],[0,1],[2,2]],"a:int,b:int")
df.partition(by=["a"]).transform(print_describe_and_return, callback = lambda x:print(x)).show()
dag.run()
dag2 = FugueWorkflow()
df = dag2.df([[0,0],[1,1],[0,1],[2,2]],"a:int,b:int")
df.partition(by=["a"]).transform(print_describe_and_return).show()
dag2.run()
```
In the above example we use `Optional` to tell Fugue that this transformer can work with or without the callback. The transformer code is responsible to check null on the callback parameter. And if you don't provide a callback handler when you invoke the transformer, the transformer side will get None on the callback parameter.
This is a more flexible way where your transformer can be used in different situations.
## Callbacks on distributed execution engines
The above code is running using `NativeExecutionEngine` running on the current process. It's the minimal code to test whether your callback logic will work. To run it using a distributed engine, You need to setup the callback server to handle network calls.
```
from fugue_spark import SparkExecutionEngine
conf = {
"fugue.rpc.server": "fugue.rpc.flask.FlaskRPCServer",
"fugue.rpc.flask_server.host": "0.0.0.0",
"fugue.rpc.flask_server.port": "1234",
"fugue.rpc.flask_server.timeout": "2 sec",
}
dag.run(SparkExecutionEngine(conf=conf))
```
The above code uses the built-in flask server to handle network calls from workers. To use `fugue.rpc.flask.FlaskRPCServer`, you must set `fugue.rpc.flask_server.host` and `fugue.rpc.flask_server.port`, and it's suggested to also set `fugue.rpc.flask_server.timeout` to a reasonable timeout for your own case.
You can also create your custom server by implementing [RPCServer](https://fugue.readthedocs.io/en/latest/api/fugue.rpc.html#fugue.rpc.base.RPCServer) and [RPCClient](https://fugue.readthedocs.io/en/latest/api/fugue.rpc.html#fugue.rpc.base.RPCClient). For example you may create a pair of server and client to communicate with [MLFlow](https://mlflow.org/) to update metrics in real time.
## Stateful callbacks
Commonly, callbacks need to be stateful. In Fugue, it's totally fine to set the callback to be a method of an instance (in order to be stateful), or to use a global method/variable. You only need to make the function thread safe because it could be invoked in parallel.
```
from threading import RLock
class Callback(object):
def __init__(self):
self.n=0
self._update_lock = RLock()
def should_skip(self):
with self._update_lock:
self.n+=1
return self.n>=3
callback = Callback()
# schema: *
def take(df:pd.DataFrame, skip:callable) -> pd.DataFrame:
if not skip():
return df
dag = FugueWorkflow()
df = dag.df([[0,0],[1,1],[0,1],[2,2]],"a:int,b:int")
df.partition(by=["a"]).transform(take, callback = callback.should_skip).show()
dag.run()
```
In the above example, we only take two partitions of the entire dataframe, so the `Callback` implemented a thread safe counter, and return true or false based on the counter.
**The only requirement** for a callback function that Fugue can use is that its input parameters and output are picklable (Nones are fine). The function itself is OK if not picklable. In the above case, `should_skip` invoked `RLock` which is not picklable, but it doesn't matter.
## Implementing `RPCHandler` instead
In most case the above native approaches are sufficient. However, if you want to having more control on the callback side, you can directly implement [RPCHandler](https://fugue.readthedocs.io/en/latest/api/fugue.rpc.html#fugue.rpc.base.RPCHandler). For example, you want to start a thread to process the incoming calls and stop the thread when the execution finishes.
```
from threading import RLock
from fugue.rpc import RPCHandler
from uuid import uuid4
class Callback(RPCHandler):
def __init__(self):
super().__init__() # <-- must call
self.n=0
self._update_lock = RLock()
def __uuid__(self) -> str:
"""UUID that can affect the determinism of the workflow"""
return str(uuid4()) # in this case, it will make the workflow non deterministic
def start_handler(self) -> None:
"""User implementation of starting the handler"""
print("counter started")
def stop_handler(self) -> None:
"""User implementation of stopping the handler"""
print("counter stopped")
def __call__(self):
with self._update_lock:
self.n+=1
return self.n>=3
callback = Callback()
# schema: *
def take(df:pd.DataFrame, skip:callable) -> pd.DataFrame:
if not skip():
return df
dag = FugueWorkflow()
df = dag.df([[0,0],[1,1],[0,1],[2,2]],"a:int,b:int")
df.partition(by=["a"]).transform(take, callback = callback).show()
print(dag.spec_uuid()) # every time, the id will be different because the Callback is not deterministic
dag.run()
```
## Using callbacks in Transformer class
If you must implement a `Transformer`, `OutputTransformer`, `CoTransformer` and `OutputCoTransformer`, then you can use `callback` property as the callback.
```
from fugue import FugueWorkflow, Transformer
class PrintAndReturn(Transformer):
def get_output_schema(self, df):
return df.schema
def transform(self, df):
self.callback(str(df.as_pandas().describe()))
return df
dag = FugueWorkflow()
df = dag.df([[0,0],[1,1],[0,1],[2,2]],"a:int,b:int")
df.partition(by=["a"]).transform(PrintAndReturn, callback = lambda x:print(x)).show()
dag.run()
```
## A real example: ploting mins in real time
```
from fugue import FugueWorkflow
from fugue.rpc import RPCHandler
import pandas as pd
import random
from IPython.display import clear_output
from threading import RLock, Thread
from time import sleep
import matplotlib.pyplot as plt
class PlotMinNow(RPCHandler):
def __init__(self):
super().__init__()
self._update_lock = RLock()
self._values = []
self._updated = False
self._shutdown = False
self._thread = None
def __call__(self, value):
with self._update_lock:
if len(self._values)==0 or value<self._values[-1]:
self._values.append(value)
self._updated=True
def start_handler(self):
def thread():
def _plot():
with self._update_lock:
data = list(self._values) if self._updated else []
if len(data)>0:
clear_output()
pd.Series(data).plot()
plt.show()
self._updated=False
while not self._shutdown:
_plot()
sleep(1)
_plot()
self._thread = Thread(target=thread)
self._thread.start()
def stop_handler(self):
self._shutdown=True
self._thread.join()
with PlotMinNow().start() as p:
p(10)
p(9.5)
p(8)
def create() -> pd.DataFrame:
return pd.DataFrame([[100-x] for x in range(100)], columns=["a"])
def plot(df:pd.DataFrame, p:callable) -> None:
random.seed(0)
for v in df["a"]/100.0:
p(random.random()*v)
sleep(0.2)
with FugueWorkflow() as dag:
dag.create(create).out_transform(plot, callback=PlotMinNow())
```
## Use with caution
Callbacks may be convenient to use, but you should use with caution. For example, it may not be a good idea to direct worker sider logs to driver using this approach because the amount of data can be unexpectedly large.
Also, when you implement the driver side logic for the callbacks, you should be careful about the contention and latency. Take a look at the `_plot` in `PlotMinNow`, it's a consumer competing with the data producers (remote callers), so it minimizes the logic inside the locked part to reduce such contention.
The CPU usage is also a concern, when multiple workers are calling back, it could overload the system. So you should consider decoupling producers and consumers and moving the expensive operations to the consumer side so that you have better control of the load. See `__call__` in `PlotMinNow`, it has very cheap operations, if we re-draw the chart in side `__call__`, it may be a bad idea.
## Callback settings and RPCServer combination (for interfaceless)
For interfaceless settings, you have 3 options on the transformer function: without callable, with callable, with optional callable. And you have the option whether to add the callback handler when you invoke the transformer. And you have the option to set the RPCServer.
The following table is a full list of possible combinations and indicated scenarios
| Callback in transformers | Provide callback handler when using transformers | Customize RPCServer | Scenario
| :---: | :---: | :---: | :---
| No callback | No | No | **Most common**, the callback feature is not used at all
| No callback | No | Yes | *Meaningless*, plus you may introduce overhead to start and stop the server
| No callback | Yes | No | *Meaningless*, plus you may introduce overhead to start and stop the callback handler
| No callback | Yes | Yes | *Meaningless*, plus you may introduce overhead to start and stop the callback handler and the server
| Required | No | No | *Invalid*, Fugue compile time exception will be thrown
| Required | No | Yes | *Invalid*, Fugue compile time exception will be thrown
| Required | Yes | No | **Local only**, you can use it to test your callbacks using `NativeExecutionEngine` only. If you use a distributed engine, serialization exception will be thrown
| Required | Yes | Yes | **Common**, a typical way to use callbacks both locally and distributedly
| Optional | No | No | **Flexible**, the callback on the transformer side will be None, you need to check. For a transformer to run with and without a callback, this is a solution
| Optional | No | Yes | *Meaningless*, plus you may introduce overhead to start and stop the server
| Optional | Yes | No | **Local only**, you can use it to test your callbacks using `NativeExecutionEngine` only. If you use a distributed engine, serialization exception will be thrown
| Optional | Yes | Yes | **Common**, a typical way to use callbacks both locally and distributedly
| github_jupyter |
# Unpaired data loading
> Loading of the dataset into the fastai `DataLoaders` class.
```
#default_exp data.unpaired
#export
from fastai.vision.all import *
from fastai.basics import *
from typing import List
from fastai.vision.gan import *
#hide
from nbdev.showdoc import *
```
## Example Dataset - Horse to Zebra conversion
Here, we are going to use the `horse2zebra` dataset provided by UC Berkeley. Let's download it with the fastai `untar_data` function. Additionally, we can view the directory with `Path.ls()` (added by fastai).
```
horse2zebra = untar_data('https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/horse2zebra.zip')
folders = horse2zebra.ls().sorted()
print(folders)
```
We can see that we have four directories, a train and test directory for both domains.
## Create DataLoaders object:
We can treat the image in Domain A as the input and the image in Domain B as the target. We want to be able to index the dataset for a fixed image in domain A but a random image in domain B, in order to avoid fixed pairs.
A brief summary of how fastai Datasets works:
> "A Datasets creates a tuple from items (typically input,target) by applying to them each list of Transform (or Pipeline) in tfms."
(from [docs](http://dev.fast.ai/data.core#Datasets))
So for transforms we will have a list of list of transforms. Each list of transforms are used to obtain, process, and return the inputs (in this case Domain A) and the targets (Domain B) as a tuple.
Let's first get our image paths:
```
trainA_path = folders[2]
trainB_path = folders[3]
testA_path = folders[0]
testB_path = folders[1]
```
We can use `get_image_files` to get the image files from the directories:
```
filesA = get_image_files(trainA_path)
filesB = get_image_files(trainB_path)
filesA
```
Now, we can have a Transform that randomly selects an image in domain B for the current pair:
```
#export
class RandPair(Transform):
"Returns a random image from domain B, resulting in a random pair of images from domain A and B."
def __init__(self,itemsB): self.itemsB = itemsB
def encodes(self,i): return random.choice(self.itemsB)
show_doc(RandPair, default_cls_level=3)
test_ne(RandPair(filesB)(0),RandPair(filesB)(0))
test_eq(type(RandPair(filesB)(0)),type(Path('.')))
```
Now let's make our `Datasets` (assume no split for now). We load as a `PILImage`, convert to a `Tensor`, and resize:
```
size=128
dsets = Datasets(filesA, tfms=[[PILImage.create, ToTensor, Resize(size)],
[RandPair(filesB),PILImage.create, ToTensor, Resize(size)]],splits=None)
```
Now we can create a `DataLoader`. Note that fastai allows for batch-level transforms that can be performed on an accelerator like a GPU. Let's normalize the dataset:
```
batch_tfms = [IntToFloatTensor, Normalize.from_stats(mean=0.5, std=0.5)]
dls = dsets.dataloaders(bs=4, num_workers=2, after_batch=batch_tfms)
```
We can also show the batch:
```
dls.show_batch()
xb,yb = dls.one_batch()
xb.shape
plt.imshow(dls.after_batch.decode(xb)[0].cpu().permute(1,2,0).numpy())
```
Let's add model processing to our library. Note that we don't have a validation set (not necessary for CycleGAN training). Also note that we load the images with size `load_size` and take a random crop of the image with size `crop_size` (default of 256x256) to load into the model.
```
#export
def get_dls(pathA, pathB, load_size=512, crop_size=256, bs=4, num_workers=2):
"""
Given image files from two domains (`pathA`, `pathB`), create `DataLoaders` object.
Loading and randomly cropped sizes of `load_size` and `crop_size` are set to defaults of 512 and 256.
Batch size is specified by `bs` (default=4).
"""
filesA = get_image_files(pathA)
filesB = get_image_files(pathB)
dsets = Datasets(filesA, tfms=[[PILImage.create, ToTensor, Resize(load_size),RandomCrop(crop_size)],
[RandPair(filesB),PILImage.create, ToTensor, Resize(load_size),RandomCrop(crop_size)]], splits=None)
batch_tfms = [IntToFloatTensor, Normalize.from_stats(mean=0.5, std=0.5), FlipItem(p=0.5)]
dls = dsets.dataloaders(bs=bs, num_workers=num_workers, after_batch=batch_tfms)
return dls
```
### Quick tests:
```
load_size=512
crop_size=256
bs=4
dls = get_dls(trainA_path, trainB_path,load_size=load_size,crop_size=crop_size,bs=bs)
test_eq(type(dls[0]),TfmdDL)
test_eq(len(dls[0]),int(len(trainA_path.ls())/bs))
test_eq(len(dls[1]),0)
xb,yb = next(iter(dls[0]))
test_eq(xb.shape,yb.shape)
test_eq(xb.shape,torch.Size([bs, 3, crop_size, crop_size]))
dls.show_batch()
#hide
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'text.usetex':True})
params= {'text.latex.preamble' : r'\usepackage{amsmath},\usepackage{amssymb}'}
plt.rcParams.update(params)
def Fem(p, n=2, eps=1, delta=1):
y = -np.log(p) * (1 - 1 / (1 + (n-1)*p))
return y * 2 * delta / eps
def Fpf(p, n=2, eps=1, delta=1):
y = np.log(p) * ((1 - (1-p)**n) / (n*p) - 1)
return y * 2 * delta / eps
def Flb(p, n=2, eps=1, delta=1):
return 0.5*delta/eps*np.log(n)*np.ones_like(p)
def Fub(p, n=2, eps=1, delta=1):
return 2*delta/eps*np.log(n)*np.ones_like(p)
p = np.linspace(1e-6,1-1e-6,100)
plt.plot(p, Fem(p, n=3), linewidth=3, label='$\mathcal{M}_{EM}$')
plt.plot(p, Fpf(p, n=3), linewidth=3, label='$\mathcal{M}_{PF}$')
#plt.plot(p, Fub(p, n=3), '--', linewidth=3, label='$\\frac{2 \Delta}{\epsilon} \log{(n)}$')
plt.legend(fontsize='xx-large')
plt.xlabel('$p$', fontsize='xx-large')
plt.xticks(fontsize='x-large')
plt.yticks([0,0.2,0.4,0.6,0.8,1.0],fontsize='x-large')
plt.ylabel('$\mathbb{E}[\\mathcal{E}(\mathcal{M}, \\vec q)]$', fontsize='xx-large')
plt.tight_layout()
plt.savefig('em_vs_pf_1.pdf')
p = np.linspace(1e-6,1-1e-6,100)
plt.plot(p, Fem(p, n=2)/Fpf(p, n=2), linewidth=3, label='$n=2$')
plt.plot(p, Fem(p, n=3)/Fpf(p, n=3), linewidth=3, label='$n=3$')
plt.plot(p, Fem(p, n=4)/Fpf(p, n=4), linewidth=3, label='$n=4$')
plt.plot(p, Fem(p, n=10)/Fpf(p, n=10), linewidth=3, label='$n=10$')
#plt.plot(p, Fem(p, n=3), linewidth=3)
#plt.plot(p, Fpf(p, n=3), linewidth=3)
plt.legend(fontsize='xx-large')
plt.xlabel('$p$', fontsize='xx-large')
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
plt.ylabel('$\\frac{\mathbb{E}[\mathcal{E}(\mathcal{M}_{EM}, \\vec q)]}{\mathbb{E}[\mathcal{E}(\mathcal{M}_{PF}, \\vec q)]}$', fontsize='xx-large')
plt.tight_layout()
plt.savefig('em_vs_pf_2.pdf')
from scipy.optimize import minimize_scalar
def foo(n):
a = minimize_scalar(lambda p: -Fem(p,n), bounds=(0,1), method='bounded').fun
b = minimize_scalar(lambda p: -Fpf(p,n), bounds=(0,1), method='bounded').fun
return a/b
ns = np.arange(2, 1000)
y = [foo(n) for n in ns]
plt.plot(ns, y, linewidth=3)
plt.ylim(1, 2)
plt.xscale('log')
plt.xticks([1,10,100,1000],fontsize='x-large')
plt.yticks(fontsize='x-large')
plt.xlabel('n', fontsize='xx-large')
plt.ylabel('$\\frac{\max_{\\vec q} \quad \mathcal{F}(\mathcal{M}_{EM}, \\vec q)}{\max_{\\vecq} \quad \mathcal{F}(\mathcal{M}_{PF}, \\vec q)}$', fontsize='xx-large')
plt.tight_layout()
plt.savefig('em_vs_pf_3.pdf')
from scipy.optimize import minimize_scalar
def bar(n, F=Fem):
return -minimize_scalar(lambda p: -F(p,n), bounds=(0,1), method='bounded').fun
eps, delta = 1.0, 1.0
ns = np.arange(2, 10000)
lb = 0.5*delta/eps*np.log(ns)
ub = 2.0*delta/eps*np.log(ns)
mid = 1.0*delta/eps*np.log(ns)
y = [bar(n, Fem) for n in ns]
z = [bar(n, Fpf) for n in ns]
plt.plot(ns, y, linewidth=3, label='$\mathcal{M}_{EM}$')
plt.plot(ns, z, linewidth=3, label='$\mathcal{M}_{PF}$')
plt.plot(ns, lb, '--', linewidth=3, label='$\\frac{\Delta}{2 \epsilon} \log{(n)}$')
plt.plot(ns, ub, '--', linewidth=3, label='$\\frac{2 \Delta}{\epsilon} \log{(n)}$')
#plt.plot(ns, mid, 'k--', linewidth=3, label='$\\frac{\Delta}{\epsilon} \log{(n)}$')
#plt.xscale('log')
plt.xlim(1,10000)
plt.xticks([1,10,100,1000,10000],fontsize='x-large')
plt.yticks(fontsize='x-large')
plt.xlabel('n', fontsize='xx-large')
plt.loglog()
#plt.xscale('log')
plt.legend(fontsize='x-large', loc='lower right')
plt.ylabel('Worst-case Expected Error', fontsize='xx-large')
#plt.ylabel('$\\frac{\max_{\\vec q} \quad \mathcal{F}(\mathcal{M}_{EM}, \\vec q)}{\max_{\\vecq} \quad \mathcal{F}(\mathcal{M}_{PF}, \\vec q)}$', fontsize='xx-large')
plt.tight_layout()
plt.savefig('em_vs_pf_4.pdf')
np.exp(1)
```
| github_jupyter |
# General E(2)-Equivariant Steerable CNNs - A concrete example
```
import torch
from e2cnn import gspaces
from e2cnn import nn
```
Finally, we build a **Steerable CNN** and try it MNIST.
Let's also use a group a bit larger: we now build a model equivariant to $8$ rotations.
We indicate the group of $N$ discrete rotations as $C_N$, i.e. the **cyclic group** of order $N$.
In this case, we will use $C_8$.
Because the inputs are still gray-scale images, the input type of the model is again a *scalar field*.
However, internally we use *regular fields*: this is equivalent to a *group-equivariant convolutional neural network*.
Finally, we build *invariant* features for the final classification task by pooling over the group using *Group Pooling*.
The final classification is performed by a two fully connected layers.
# The model
Here is the definition of our model:
```
class C8SteerableCNN(torch.nn.Module):
def __init__(self, n_classes=10):
super(C8SteerableCNN, self).__init__()
# the model is equivariant under rotations by 45 degrees, modelled by C8
self.r2_act = gspaces.Rot2dOnR2(N=8)
# the input image is a scalar field, corresponding to the trivial representation
in_type = nn.FieldType(self.r2_act, [self.r2_act.trivial_repr])
# we store the input type for wrapping the images into a geometric tensor during the forward pass
self.input_type = in_type
# convolution 1
# first specify the output type of the convolutional layer
# we choose 24 feature fields, each transforming under the regular representation of C8
out_type = nn.FieldType(self.r2_act, 24*[self.r2_act.regular_repr])
self.block1 = nn.SequentialModule(
nn.MaskModule(in_type, 29, margin=1),
nn.R2Conv(in_type, out_type, kernel_size=7, padding=1, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
# convolution 2
# the old output type is the input type to the next layer
in_type = self.block1.out_type
# the output type of the second convolution layer are 48 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 48*[self.r2_act.regular_repr])
self.block2 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=2, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
self.pool1 = nn.SequentialModule(
nn.PointwiseAvgPoolAntialiased(out_type, sigma=0.66, stride=2)
)
# convolution 3
# the old output type is the input type to the next layer
in_type = self.block2.out_type
# the output type of the third convolution layer are 48 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 48*[self.r2_act.regular_repr])
self.block3 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=2, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
# convolution 4
# the old output type is the input type to the next layer
in_type = self.block3.out_type
# the output type of the fourth convolution layer are 96 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 96*[self.r2_act.regular_repr])
self.block4 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=2, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
self.pool2 = nn.SequentialModule(
nn.PointwiseAvgPoolAntialiased(out_type, sigma=0.66, stride=2)
)
# convolution 5
# the old output type is the input type to the next layer
in_type = self.block4.out_type
# the output type of the fifth convolution layer are 96 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 96*[self.r2_act.regular_repr])
self.block5 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=2, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
# convolution 6
# the old output type is the input type to the next layer
in_type = self.block5.out_type
# the output type of the sixth convolution layer are 64 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 64*[self.r2_act.regular_repr])
self.block6 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=1, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
self.pool3 = nn.PointwiseAvgPoolAntialiased(out_type, sigma=0.66, stride=1, padding=0)
self.gpool = nn.GroupPooling(out_type)
# number of output channels
c = self.gpool.out_type.size
# Fully Connected
self.fully_net = torch.nn.Sequential(
torch.nn.Linear(c, 64),
torch.nn.BatchNorm1d(64),
torch.nn.ELU(inplace=True),
torch.nn.Linear(64, n_classes),
)
def forward(self, input: torch.Tensor):
# wrap the input tensor in a GeometricTensor
# (associate it with the input type)
x = nn.GeometricTensor(input, self.input_type)
# apply each equivariant block
# Each layer has an input and an output type
# A layer takes a GeometricTensor in input.
# This tensor needs to be associated with the same representation of the layer's input type
#
# The Layer outputs a new GeometricTensor, associated with the layer's output type.
# As a result, consecutive layers need to have matching input/output types
x = self.block1(x)
x = self.block2(x)
x = self.pool1(x)
x = self.block3(x)
x = self.block4(x)
x = self.pool2(x)
x = self.block5(x)
x = self.block6(x)
# pool over the spatial dimensions
x = self.pool3(x)
# pool over the group
x = self.gpool(x)
# unwrap the output GeometricTensor
# (take the Pytorch tensor and discard the associated representation)
x = x.tensor
# classify with the final fully connected layers)
x = self.fully_net(x.reshape(x.shape[0], -1))
return x
```
Let's try the model on *rotated* MNIST
```
# download the dataset
!wget -nc http://www.iro.umontreal.ca/~lisa/icml2007data/mnist_rotation_new.zip
# uncompress the zip file
!unzip -n mnist_rotation_new.zip -d mnist_rotation_new
from torch.utils.data import Dataset
from torchvision.transforms import RandomRotation
from torchvision.transforms import Pad
from torchvision.transforms import Resize
from torchvision.transforms import ToTensor
from torchvision.transforms import Compose
import numpy as np
from PIL import Image
device = 'cuda' if torch.cuda.is_available() else 'cpu'
```
Build the dataset
```
class MnistRotDataset(Dataset):
def __init__(self, mode, transform=None):
assert mode in ['train', 'test']
if mode == "train":
file = "mnist_rotation_new/mnist_all_rotation_normalized_float_train_valid.amat"
else:
file = "mnist_rotation_new/mnist_all_rotation_normalized_float_test.amat"
self.transform = transform
data = np.loadtxt(file, delimiter=' ')
self.images = data[:, :-1].reshape(-1, 28, 28).astype(np.float32)
self.labels = data[:, -1].astype(np.int64)
self.num_samples = len(self.labels)
def __getitem__(self, index):
image, label = self.images[index], self.labels[index]
image = Image.fromarray(image)
if self.transform is not None:
image = self.transform(image)
return image, label
def __len__(self):
return len(self.labels)
# images are padded to have shape 29x29.
# this allows to use odd-size filters with stride 2 when downsampling a feature map in the model
pad = Pad((0, 0, 1, 1), fill=0)
# to reduce interpolation artifacts (e.g. when testing the model on rotated images),
# we upsample an image by a factor of 3, rotate it and finally downsample it again
resize1 = Resize(87)
resize2 = Resize(29)
totensor = ToTensor()
```
Let's build the model
```
model = C8SteerableCNN().to(device)
```
The model is now randomly initialized.
Therefore, we do not expect it to produce the right class probabilities.
However, the model should still produce the same output for rotated versions of the same image.
This is true for rotations by multiples of $\frac{\pi}{2}$, but is only approximate for rotations by $\frac{\pi}{4}$.
Let's test it on a random test image:
we feed eight rotated versions of the first image in the test set and print the output logits of the model for each of them.
```
def test_model(model: torch.nn.Module, x: Image):
# evaluate the `model` on 8 rotated versions of the input image `x`
model.eval()
wrmup = model(torch.randn(1, 1, 29, 29).to(device))
del wrmup
x = resize1(pad(x))
print()
print('##########################################################################################')
header = 'angle | ' + ' '.join(["{:6d}".format(d) for d in range(10)])
print(header)
with torch.no_grad():
for r in range(8):
x_transformed = totensor(resize2(x.rotate(r*45., Image.BILINEAR))).reshape(1, 1, 29, 29)
x_transformed = x_transformed.to(device)
y = model(x_transformed)
y = y.to('cpu').numpy().squeeze()
angle = r * 45
print("{:5d} : {}".format(angle, y))
print('##########################################################################################')
print()
# build the test set
raw_mnist_test = MnistRotDataset(mode='test')
# retrieve the first image from the test set
x, y = next(iter(raw_mnist_test))
# evaluate the model
test_model(model, x)
```
The output of the model is already almost invariant.
However, we still observe small fluctuations in the outputs.
This is because the model contains some operations which might break equivariance.
For instance, every convolution includes a padding of $2$ pixels per side. This is adds information about the actual orientation of the grid where the image/feature map is sampled because the padding is not rotated with the image.
During training, the model will observe rotated patterns and will learn to ignore the noise coming from the padding.
So, let's train the model now.
The model is exactly the same used to train a normal *PyTorch* architecture:
```
train_transform = Compose([
pad,
resize1,
RandomRotation(180, resample=Image.BILINEAR, expand=False),
resize2,
totensor,
])
mnist_train = MnistRotDataset(mode='train', transform=train_transform)
train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=64)
test_transform = Compose([
pad,
totensor,
])
mnist_test = MnistRotDataset(mode='test', transform=test_transform)
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=64)
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=5e-5, weight_decay=1e-5)
for epoch in range(31):
model.train()
for i, (x, t) in enumerate(train_loader):
optimizer.zero_grad()
x = x.to(device)
t = t.to(device)
y = model(x)
loss = loss_function(y, t)
loss.backward()
optimizer.step()
if epoch % 10 == 0:
total = 0
correct = 0
with torch.no_grad():
model.eval()
for i, (x, t) in enumerate(test_loader):
x = x.to(device)
t = t.to(device)
y = model(x)
_, prediction = torch.max(y.data, 1)
total += t.shape[0]
correct += (prediction == t).sum().item()
print(f"epoch {epoch} | test accuracy: {correct/total*100.}")
# retrieve the first image from the test set
x, y = next(iter(raw_mnist_test))
# evaluate the model
test_model(model, x)
```
| github_jupyter |
```
%matplotlib inline
#The line above is necesary to show Matplotlib's plots inside a Jupyter Notebook
import os
import numpy as np
import matplotlib.pyplot as plt
import imageio
import random
import cv2
from PIL import Image, ImageDraw, ImageFont
from pathlib import Path
import string
#生成字母掩码, 黑色是前景,白色是背景
def WriteChar2Img(letter, width, height, font, fontSize):
img = Image.new('RGB', (width, height), 'white')
d = ImageDraw.Draw(img)
letterWidth, letterHeight = d.textsize(letter, font)
d.text(((width-letterWidth)//2, (height-letterHeight)//3), letter, fill=(0, 0, 0), font=font)
img.save(f'images/{letter}.png')
def PrepareAlphabet(letters):
fontSize= (width+height)*40//100
font = ImageFont.truetype("arial.ttf", fontSize)
for l in letters:
if Path(f'images/{l}.png').is_file() == False:
WriteChar2Img(l, width, height, font, fontSize)
#自己照片
def PhotoToMask(photoName, width, height):
img = cv2.imread(f'images/{photoName}')
#注意微信里面只能使用表情,而且小于1M
img = cv2.resize(img, (width, height), interpolation = cv2.INTER_AREA)
standardName = f'images/_{photoName}'
cv2.imwrite(standardName, img)
#plt.imshow(img)
##plt.show()
return standardName
# 根据字母的形状, 将字母转化为多个随机点
def get_masked_data(letter, intensity=2):
# 多个随机点填充字母
random.seed(520)
x = []
y = []
global width, height
for i in range(intensity):
x = x + random.sample(range(0, width), width)
y = y + random.sample(range(0, height), height)
if letter == ' ':
return x, y
# 获取图片的mask
mask = cv2.imread(f'images/{letter}.png', 0)
mask = cv2.flip(mask, 0)
# 检测点是否在mask中
result_x = []
result_y = []
for i in range(len(x)):
#print(f'i={i}, len(x)={len(x)}, y[i]={y[i]}, x[i]={x[i]})
if (mask[y[i]][x[i]]) == 0:
result_x.append(x[i])
result_y.append(y[i])
# 返回x,y
return result_x, result_y
# 将文字切割成一个个字母
def text_to_data(txt, repeat=True, intensity=2):
print('将文本转换为数据\n')
letters = []
for i in txt:
letters.append(get_masked_data(i, intensity=intensity))
# 如果repeat为1时,重复第一个字母
if repeat:
letters.append(get_masked_data(txt[0], intensity=intensity))
return letters
def build_gif(coordinates_lists, gif_name='movie', n_frames=10, bg_color='#95A4AD',
marker_color='#283F4E', marker_size=25):
print('生成图表\n')
filenames = []
for index in np.arange(0, len(coordinates_lists) - 1):
# 获取当前图像及下一图像的x与y轴坐标值
x = coordinates_lists[index][0]
y = coordinates_lists[index][1]
x1 = coordinates_lists[index + 1][0]
y1 = coordinates_lists[index + 1][1]
# 查看两点差值
while len(x) < len(x1):
diff = len(x1) - len(x)
x = x + x[:diff]
y = y + y[:diff]
while len(x1) < len(x):
diff = len(x) - len(x1)
x1 = x1 + x1[:diff]
y1 = y1 + y1[:diff]
# 计算路径
x_path = np.array(x1) - np.array(x)
y_path = np.array(y1) - np.array(y)
for i in np.arange(0, n_frames):
# 计算当前位置
x_temp = (x + (x_path / n_frames) * i)
y_temp = (y + (y_path / n_frames) * i)
# 绘制图表, figsize->Width???
fig, ax = plt.subplots(figsize=(4, 4), subplot_kw=dict(aspect="equal"))
ax.set_facecolor(bg_color)
plt.xticks([]) # 去掉x轴
plt.yticks([]) # 去掉y轴
plt.axis('off') # 去掉坐标轴
plt.scatter(x_temp, y_temp, c=marker_color, s=marker_size)
plt.xlim(0, width)
plt.ylim(0, height)
# 移除框线
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# 网格线
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed', alpha=0.7)
ax.xaxis.grid(color='gray', linestyle='dashed', alpha=0.7)
# 保存图片
filename = f'images/frame_{index}_{i}.png'
if i == 0:
for j in range(5):
filenames.append(filename)
filenames.append(filename)
# 保存
plt.savefig(filename, dpi=96, facecolor=bg_color)
plt.close()
print('保存图表\n')
# 生成GIF
print('生成GIF\n')
with imageio.get_writer(f'{gif_name}.gif', mode='I') as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
global myLoverPhoto
image = imageio.imread(myLoverPhoto)
for i in range(3):
writer.append_data(image)
print('保存GIF\n')
print('删除图片\n')
# 删除图片
for filename in set(filenames):
os.remove(filename)
os.remove(myLoverPhoto)
print('完成')
#### Main()
#figsize: 3=288, 4=384, 5=480, 6=576
width=384 #注意微信里面只能使用表情,而且小于1M
height=384
myLoverName = input('请输入女票的姓名字母:')
PrepareAlphabet(myLoverName)
coordinates_lists = text_to_data(myLoverName, repeat=False, intensity=50)
myLoverPhoto = PhotoToMask('me.png', width, height)
build_gif(coordinates_lists,
gif_name=myLoverName,
n_frames=7,
bg_color='#FFC0CB',
#marker_color='#FFED33',
marker_color='#FF0000',
marker_size=1.0)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/vlad-danaila/ml-cancer-detection/blob/master/Cancer_Detection_Ensable_V1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
**Download dependencies**
```
!pip3 install torch==1.3.1+cu92 torchvision==0.4.2+cu92 -f https://download.pytorch.org/whl/torch_stable.html
!pip3 install sklearn matplotlib GPUtil pytorch-gradcam
```
**Download data**
Mount my google drive, where I stored the dataset.
In order to acquire the dataset please navigate to:
https://ieee-dataport.org/documents/cervigram-image-dataset
```
try:
from google.colab import drive
drive.mount('/content/drive')
except Exception as e:
print(e)
```
Unzip the dataset into the folder "dataset". For your environment, please adjust the paths accordingly.
```
!rm -vrf "dataset"
!mkdir "dataset"
!cp -r "/content/drive/My Drive/Studiu doctorat leziuni cervicale/cervigram-image-dataset-v2.zip" "dataset/cervigram-image-dataset-v2.zip"
# !cp -r "cervigram-image-dataset-v2.zip" "dataset/cervigram-image-dataset-v2.zip"
!unzip "dataset/cervigram-image-dataset-v2.zip" -d "dataset"
```
**Constants**
For your environment, please modify the paths accordingly.
```
TRAIN_PATH = '/content/dataset/data/train/'
TEST_PATH = '/content/dataset/data/test/'
# TRAIN_PATH = 'dataset/data/train/'
# TEST_PATH = 'dataset/data/test/'
CROP_SIZE = 260
IMAGE_SIZE = 224
BATCH_SIZE = 100
prefix = '/content/drive/My Drive/Studiu doctorat leziuni cervicale/V2/Chekpoints & Notebooks/'
CHECKPOINT_NATURAL_IMG_MODEL = prefix + 'Cancer Detection MobileNetV2 All Natural Images Full Conv32-0.7 6 Dec.tar'
CHECKPOINT_GREEN_LENS_IMG_MODEL = prefix + 'Cancer_Detection_MobileNetV2_Green_Lens_2_Dec Full Conv 64 0.7.tar'
CHECKPOINT_IODINE_SOLUTION_IMG_MODEL = prefix + 'Cancer_Detection_MobileNetV2_Iodine_1_Dec Full Conv32.tar'
CHECKPOINT_ENSAMBLE = prefix + 'Cancer Detection - Ensable Conv 7 Dec.tar'
```
**Imports**
```
import torch as t
import torchvision as tv
import numpy as np
import PIL as pil
import matplotlib.pyplot as plt
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from torch.nn import Linear, BCEWithLogitsLoss
import sklearn as sk
import sklearn.metrics
from os import listdir
import time
import random
import PIL
from torchvision.utils import make_grid, save_image
import os
from gradcam.utils import visualize_cam, Normalize
```
**Deterministic measurements**
This statements help making the experiments reproducible by fixing the random seeds. Despite fixing the random seeds, experiments are usually not reproducible using different PyTorch releases, commits, platforms or between CPU and GPU executions. Please find more details in the PyTorch documentation:
https://pytorch.org/docs/stable/notes/randomness.html
```
SEED = 0
t.manual_seed(SEED)
t.cuda.manual_seed(SEED)
t.cuda.manual_seed_all(SEED)
t.backends.cudnn.deterministic = True
t.backends.cudnn.benchmark = False
np.random.seed(SEED)
random.seed(SEED)
```
**Memory stats**
```
import GPUtil
def memory_stats():
for gpu in GPUtil.getGPUs():
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
memory_stats()
```
**Loading data**
The dataset is structured in multiple small folders of 7 images each. This generator iterates through the folders and returns the category and 7 paths: one for each image in the folder. The paths are ordered; the order is important since each folder contains 3 types of images, first 5 are with acetic acid solution and the last two are through a green lens and having iodine solution(a solution of a dark red color).
```
def sortByLastDigits(elem):
chars = [c for c in elem if c.isdigit()]
return 0 if len(chars) == 0 else int(''.join(chars))
def getImagesPaths(root_path):
for class_folder in [root_path + f for f in listdir(root_path)]:
category = int(class_folder[-1])
for case_folder in listdir(class_folder):
case_folder_path = class_folder + '/' + case_folder + '/'
img_files = [case_folder_path + file_name for file_name in listdir(case_folder_path)]
yield category, sorted(img_files, key = sortByLastDigits)
```
We define datasets which load 3 kinds of images: natural images, images taken through a green lens and images where the doctor applied iodine solution (which gives a dark red color). Each dataset has dynamic and static transformations which could be applied to the data. The static transformations are applied on the initialization of the dataset, while the dynamic ones are applied when loading each batch of data.
```
class SimpleImagesDataset(t.utils.data.Dataset):
def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None):
self.dataset = []
self.transforms_x = transforms_x_dynamic
self.transforms_y = transforms_y_dynamic
for category, img_files in getImagesPaths(root_path):
for i in range(5):
img = pil.Image.open(img_files[i])
if transforms_x_static != None:
img = transforms_x_static(img)
if transforms_y_static != None:
category = transforms_y_static(category)
self.dataset.append((img, category))
def __getitem__(self, i):
x, y = self.dataset[i]
if self.transforms_x != None:
x = self.transforms_x(x)
if self.transforms_y != None:
y = self.transforms_y(y)
return x, y
def __len__(self):
return len(self.dataset)
class GreenLensImagesDataset(SimpleImagesDataset):
def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None):
self.dataset = []
self.transforms_x = transforms_x_dynamic
self.transforms_y = transforms_y_dynamic
for category, img_files in getImagesPaths(root_path):
# Only the green lens image
img = pil.Image.open(img_files[-2])
if transforms_x_static != None:
img = transforms_x_static(img)
if transforms_y_static != None:
category = transforms_y_static(category)
self.dataset.append((img, category))
class RedImagesDataset(SimpleImagesDataset):
def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None):
self.dataset = []
self.transforms_x = transforms_x_dynamic
self.transforms_y = transforms_y_dynamic
for category, img_files in getImagesPaths(root_path):
# Only the green lens image
img = pil.Image.open(img_files[-1])
if transforms_x_static != None:
img = transforms_x_static(img)
if transforms_y_static != None:
category = transforms_y_static(category)
self.dataset.append((img, category))
class TransformsRand:
def __init__(self):
self.angle = random.random()
self.scale = random.random()
self.shear = random.random()
self.hflip = random.random()
class AllImagesDataset(t.utils.data.Dataset):
def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None, is_train = True):
self.is_train = is_train
self.dataset = []
self.transforms_x = transforms_x_dynamic
self.transforms_y = transforms_y_dynamic
for category, img_files in getImagesPaths(root_path):
imgs = []
for i in range(7):
img = pil.Image.open(img_files[i])
if transforms_x_static != None:
img = transforms_x_static(img)
imgs.append(img)
if transforms_y_static != None:
category = transforms_y_static(category)
self.dataset.append((imgs, category))
def __getitem__(self, i):
x, y = self.dataset[i]
if self.transforms_x != None:
if self.is_train:
rand = TransformsRand()
x = [self.transforms_x(_x, rand = rand) for _x in x]
# x = [self.transforms_x(_x) for _x in x]
else:
x = [self.transforms_x(_x) for _x in x]
if self.transforms_y != None:
y = self.transforms_y(y)
return x, y
def __len__(self):
return len(self.dataset)
```
**Preprocess data**
Convert PyTorch tensor to Numpy array.
```
def to_numpy(x):
return x.cpu().detach().numpy()
```
Data transformations for the test and training sets.
```
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
def custom_transforms(x, angle = 45, scale = (1., 2.), shear = 30, rand = None):
if rand == None:
rand = TransformsRand()
angle = angle * rand.angle
scale_value = scale[0] + ((scale[1] - scale[0]) * rand.scale)
shear = shear * rand.shear
x = tv.transforms.functional.affine(x, angle = angle, scale = scale_value, shear = shear, translate = [0, 0])
x = tv.transforms.functional.resize(x, IMAGE_SIZE)
if rand.hflip > .5:
x = tv.transforms.functional.hflip(x)
x = tv.transforms.functional.to_tensor(x).cuda()
x = tv.transforms.functional.normalize(x, mean=norm_mean, std=norm_std)
return x
transforms_train = tv.transforms.Compose([
tv.transforms.RandomAffine(degrees = 45, translate = None, scale = (1., 2.), shear = 30),
# tv.transforms.CenterCrop(CROP_SIZE),
tv.transforms.Resize(IMAGE_SIZE),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
tv.transforms.Lambda(lambda t: t.cuda()),
tv.transforms.Normalize(mean=norm_mean, std=norm_std)
])
transforms_test = tv.transforms.Compose([
# tv.transforms.CenterCrop(CROP_SIZE),
tv.transforms.Resize(IMAGE_SIZE),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=norm_mean, std=norm_std)
])
y_transform = tv.transforms.Lambda(lambda y: t.tensor(y, dtype=t.long, device = 'cuda:0'))
```
Initialize PyTorch datasets and loaders for training and test.
```
def create_loaders():
dataset_train = AllImagesDataset(TRAIN_PATH, transforms_x_dynamic = custom_transforms, transforms_y_dynamic = y_transform)
dataset_test = AllImagesDataset(TEST_PATH, transforms_x_static = transforms_test,
transforms_x_dynamic = tv.transforms.Lambda(lambda t: t.cuda()), transforms_y_dynamic = y_transform, is_train = False)
loader_train = DataLoader(dataset_train, BATCH_SIZE, shuffle = True, num_workers = 0)
loader_test = DataLoader(dataset_test, BATCH_SIZE, shuffle = False, num_workers = 0)
return loader_train, loader_test, len(dataset_train), len(dataset_test)
loader_train_simple_img, loader_test_simple_img, len_train, len_test = create_loaders()
```
**Visualize data**
Load a few images so that we can see the efects of the data augmentation on the training set.
```
def plot_one_prediction(x, label, pred):
x, label, pred = to_numpy(x), to_numpy(label), to_numpy(pred)
x = np.transpose(x, [1, 2, 0])
if x.shape[-1] == 1:
x = x.squeeze()
x = x * np.array(norm_std) + np.array(norm_mean)
plt.title(label, color = 'green' if label == pred else 'red')
plt.imshow(x)
def plot_one(x, norm = True):
x = to_numpy(x)
x = np.transpose(x, [1, 2, 0])
if x.shape[-1] == 1:
x = x.squeeze()
if norm:
x = x * np.array(norm_std) + np.array(norm_mean)
plt.imshow(x)
def plot_predictions(imgs, labels, preds):
fig = plt.figure(figsize = (20, 5))
for i in range(20):
fig.add_subplot(2, 10, i + 1, xticks = [], yticks = [])
plot_one_prediction(imgs[i], labels[i], preds[i])
# x, y = next(iter(loader_train_simple_img))
# for i in range(7):
# plot_predictions(x[i], y, y)
```
**Model**
Define a few models to experiment with.
```
def get_mobilenet_v2():
model = t.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=True)
model.classifier[0] = t.nn.Dropout(p=0.9, inplace=False)
model.classifier[1] = Linear(in_features=1280, out_features=4, bias=True)
model.features[18].add_module('cnn_drop_18', t.nn.Dropout2d(p = .3))
model.features[17]._modules['conv'][1].add_module('cnn_drop_17', t.nn.Dropout2d(p = .2))
model.features[16]._modules['conv'][1].add_module('cnn_drop_16', t.nn.Dropout2d(p = .1))
model = model.cuda()
return model
def get_vgg_19():
model = tv.models.vgg19(pretrained = True)
model = model.cuda()
model.classifier[2].p = .9
model.classifier[6].out_features = 4
return model
def get_res_next_101():
model = t.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')
model.fc = t.nn.Sequential(
t.nn.Dropout(p = .9),
t.nn.Linear(in_features=2048, out_features=4)
)
model = model.cuda()
return model
def get_resnet_18():
model = tv.models.resnet18(pretrained = True)
model.fc = t.nn.Sequential(
t.nn.Dropout(p = .9),
t.nn.Linear(in_features=512, out_features=4)
)
model = model.cuda()
return model
def get_dense_net():
model = tv.models.densenet121(pretrained = True)
model.classifier = t.nn.Sequential(
t.nn.Dropout(p = .9),
t.nn.Linear(in_features = 1024, out_features = 4)
)
model = model.cuda()
return model
```
Define ensemble
```
class WrappedModel(t.nn.Module):
def __init__(self, module):
super().__init__()
self.module = module # that I actually define.
def forward(self, x):
return self.module(x)
class MobileNetV2_FullConv(t.nn.Module):
def __init__(self, end_channels):
super().__init__()
self.cnn = get_mobilenet_v2().features
self.cnn[18] = t.nn.Sequential(
tv.models.mobilenet.ConvBNReLU(320, end_channels, kernel_size=1)
)
self.fc = t.nn.Linear(end_channels, 4)
def forward(self, x):
x = self.cnn(x)
x = x.mean([2, 3])
x = self.fc(x);
return x
class Ensamble(t.nn.Module):
def __init__(self):
super().__init__()
self.model_simple = MobileNetV2_FullConv(32).cnn
self.model_green = MobileNetV2_FullConv(64).cnn
self.model_red = MobileNetV2_FullConv(32).cnn
channels = 32 * 5 + 64 + 32
fc_size = 32
self.classifier = tv.models.mobilenet.InvertedResidual(inp = channels, oup = fc_size, stride = 1, expand_ratio = 5)
self.classifier._modules['conv'][0].add_module('classifier_drop_1', t.nn.Dropout2d(p = .2))
self.classifier._modules['conv'][1].add_module('classifier_drop_1', t.nn.Dropout2d(p = .2))
self.fc = t.nn.Sequential(
# t.nn.Dropout(p = .2),
t.nn.Linear(fc_size, 4)
)
def forward(self, x):
x_list = []
for i in range(5):
x_list.append(self.model_simple(x[i]))
x_list.append(self.model_green(x[5]))
x_list.append(self.model_red(x[6]))
x_concat = t.cat(x_list, 1)
x_concat = self.classifier(x_concat)
x_concat = x_concat.mean([2, 3])
x_concat = self.fc(x_concat)
return x_concat
def cnn(checkpoint_path):
cnn = t.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=False)
cnn.classifier[0] = t.nn.Dropout(p=0, inplace=False)
cnn.classifier[1] = Linear(in_features=1280, out_features=4, bias=True)
checkpoint = t.load(checkpoint_path)
cnn.load_state_dict(checkpoint['model'])
for param in cnn.parameters():
param.requires_grad = False
return cnn
def cnn_from_data_parallel(checkpoint_path):
cnn = t.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=False)
cnn.classifier[0] = t.nn.Dropout(p=0, inplace=False)
cnn.classifier[1] = Linear(in_features=1280, out_features=4, bias=True)
cnn = WrappedModel(cnn)
checkpoint = t.load(checkpoint_path)
cnn.load_state_dict(checkpoint['model'])
for param in cnn.parameters():
param.requires_grad = False
return cnn
def cnn_full_conv(checkpoint_path, end_channels_nb):
cnn = MobileNetV2_FullConv(end_channels_nb)
checkpoint = t.load(checkpoint_path)
cnn.load_state_dict(checkpoint['model'])
for param in cnn.parameters():
param.requires_grad = False
return cnn
def cnn_full_conv_new(end_channels_nb):
cnn = MobileNetV2_FullConv(end_channels_nb)
checkpoint = t.load(checkpoint_path)
cnn.load_state_dict(checkpoint['model'])
for param in cnn.parameters():
param.requires_grad = False
return cnn
```
**Grad CAM ++ Analysis**
**All code below is an adaptation of the sourcecode of pytorch-gradcam (MIT license).**
**The original can be found at:** https://pypi.org/project/pytorch-gradcam/
```
def get_images(index):
x, y = next(iter(loader_test_simple_img))
normed_torch_imgs = []
for i in range(7):
normed_torch_img = x[i][index].unsqueeze_(0)
normed_torch_imgs.append(normed_torch_img)
return normed_torch_imgs, y[index].item()
def compute_mask(gradients, activations, score):
b, k, u, v = gradients.size()
alpha_num = gradients.pow(2)
alpha_denom = gradients.pow(2).mul(2) + \
activations.mul(gradients.pow(3)).view(b, k, u*v).sum(-1, keepdim=True).view(b, k, 1, 1)
alpha_denom = t.where(alpha_denom != 0.0, alpha_denom, t.ones_like(alpha_denom))
alpha = alpha_num.div(alpha_denom+1e-7)
positive_gradients = t.nn.functional.relu(score.exp()*gradients) # ReLU(dY/dA) == ReLU(exp(S)*dS/dA))
weights = (alpha*positive_gradients).view(b, k, u*v).sum(-1).view(b, k, 1, 1)
saliency_map = (weights*activations).sum(1, keepdim=True)
saliency_map = t.nn.functional.relu(saliency_map)
saliency_map = t.nn.functional.upsample(saliency_map, size=(224, 298), mode='bilinear', align_corners=False)
saliency_map_min, saliency_map_max = saliency_map.min(), saliency_map.max()
saliency_map = (saliency_map-saliency_map_min).div(saliency_map_max-saliency_map_min).data
return saliency_map
def extract_gradients_and_compute_mask(normed_torch_imgs):
gradients_natural, gradients_green, gradients_iodine = [], [], []
activations_natural, activations_green, activations_iodine = [], [], []
def backward_hook_natural(module, grad_input, grad_output):
gradients_natural.append(grad_output[0])
return None
def forward_hook_natural(module, input, output):
activations_natural.append(output)
return None
def backward_hook_green(module, grad_input, grad_output):
gradients_green.append(grad_output[0])
return None
def forward_hook_green(module, input, output):
activations_green.append(output)
return None
def backward_hook_iodine(module, grad_input, grad_output):
gradients_iodine.append(grad_output[0])
return None
def forward_hook_iodine(module, input, output):
activations_iodine.append(output)
return None
model = t.nn.DataParallel(Ensamble().cuda())
checkpoint = t.load(CHECKPOINT_ENSAMBLE)
model.load_state_dict(checkpoint['model'])
model.eval()
target_layer_simple = model.module.model_simple[-1][0][0]
target_layer_simple.register_forward_hook(forward_hook_natural)
target_layer_simple.register_backward_hook(backward_hook_natural)
target_layer_green = model.module.model_green[-1][0][0]
target_layer_green.register_forward_hook(forward_hook_green)
target_layer_green.register_backward_hook(backward_hook_green)
target_layer_iodine = model.module.model_red[-1][0][0]
target_layer_iodine.register_forward_hook(forward_hook_iodine)
target_layer_iodine.register_backward_hook(backward_hook_iodine)
b, c, h, w = normed_torch_imgs[0].size()
logit = model.forward(normed_torch_imgs)
score = logit[:, logit.max(1)[-1]].squeeze()
# score = logit[:, 2].squeeze()
model.zero_grad()
score.backward(retain_graph = False)
masks = [compute_mask(gradients_natural[i], activations_natural[i], score) for i in range(5)]
masks.append(compute_mask(gradients_green[0], activations_green[0], score))
masks.append(compute_mask(gradients_iodine[0], activations_iodine[0], score))
return masks, logit.max(1).indices.item()
def visualize_masked_img(mask, img_original):
images = []
mask = t.nn.functional.upsample(mask, size=img_original.shape[1:], mode='bilinear', align_corners=False)
heatmap, result = visualize_cam(mask.cpu(), img_original.cpu())
images.append(t.stack([img_original.cpu(), result], 0))
images = make_grid(t.cat(images, 0), nrow=2)
output_dir = 'outputs'
os.makedirs(output_dir, exist_ok=True)
output_name = 'out.JPEG'
output_path = os.path.join(output_dir, output_name)
save_image(images, output_path)
plt.imshow(PIL.Image.open(output_path))
plt.show()
def visualize_masked_imgs(masks, imgs_original, file_name):
images = []
for i in range(7):
mask = t.nn.functional.upsample(masks[i], size=imgs_original[i].shape[1:], mode='bilinear', align_corners=False)
heatmap, result = visualize_cam(mask.cpu(), imgs_original[i].cpu())
images.append(imgs_original[i].cpu())
images.append(result)
grid = make_grid(images, nrow = 2)
print(grid.shape)
plot_one(grid, norm = False)
plt.show()
output_dir = 'outputs'
os.makedirs(output_dir, exist_ok=True)
output_name = file_name + '.JPEG'
output_path = os.path.join(output_dir, output_name)
save_image(grid, output_path)
dataset_test_raw = AllImagesDataset(TEST_PATH, transforms_x_static = tv.transforms.ToTensor(),
transforms_x_dynamic = tv.transforms.Lambda(lambda t: t.cuda()), transforms_y_dynamic = y_transform, is_train = False)
!rm -vrf "outputs"
def grad_cam_plus_plus_visualize(index):
normed_torch_imgs, y_real = get_images(index)
masks, y_pred = extract_gradients_and_compute_mask(normed_torch_imgs)
visualize_masked_imgs(masks, dataset_test_raw[index][0], 'out_{}_{}'.format(index, y_real == y_pred))
for i in range(3):
grad_cam_plus_plus_visualize(i)
```
| github_jupyter |
# Training analysis for DeepRacer
This notebook has been built based on the `DeepRacer Log Analysis.ipynb` provided by the AWS DeepRacer Team. It has been reorganised and expanded to provide new views on the training data without the helper code which was moved into utility `.py` files.
## Usage
I have expanded this notebook from to present how I'm using this information. It contains descriptions that you may find not that needed after initial reading. Since this file can change in the future, I recommend that you make its copy and reorganize it to your liking. This way you will not lose your changes and you'll be able to add things as you please.
**This notebook isn't complete.** What I find interesting in the logs may not be what you will find interesting and useful. I recommend you get familiar with the tools and try hacking around to get the insights that suit your needs.
## Contributions
As usual, your ideas are very welcome and encouraged so if you have any suggestions either bring them to [the AWS DeepRacer Community](http://join.deepracing.io) or share as code contributions.
## Training environments
Depending on whether you're running your training through the console or using the local setup, and on which setup for local training you're using, your experience will vary. As much as I would like everything to be taylored to your configuration, there may be some problems that you may face. If so, please get in touch through [the AWS DeepRacer Community](http://join.deepracing.io).
## Requirements
Before you start using the notebook, you will need to install some dependencies. If you haven't yet done so, have a look at [The README.md file](/edit/README.md#running-the-notebooks) to find what you need to install.
Apart from the install, you also have to configure your programmatic access to AWS. Have a look at the guides below, AWS resources will lead you by the hand:
AWS CLI: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html
Boto Configuration: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
## Credits
I would like to thank [the AWS DeepRacer Community](http://join.deepracing.io) for all the feedback about the notebooks. If you'd like, follow [my blog](https://codelikeamother.uk) where I tend to write about my experiences with AWS DeepRacer.
# Log Analysis
Let's get to it.
## Imports
Run the imports block below:
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import glob
import os.path
import json
import re
%matplotlib inline
#Shapely Library
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
#Plotly Library
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
from python import track_utils as tu
from python import cw_utils as cw
from python import log_analysis as la
from python import plotly_graph_utils as pg
import math
from pathlib import Path
# Make sure your boto version is >= '1.9.133'
cw.boto3.__version__
```
This block below has been prepared in case you would like to make some changes to the utility code that comes with this notebook. It will reload track_utlis.py, log_analysis.py and cw_utils.py without the need to reload the notebook. In normal usage of the notebook you will not need it.
```
# reload log_analysis and the rest of stuff here if needed
# (handy for code updates in utils, doesn't break anything if no changes)
import importlib
importlib.reload(la)
importlib.reload(cw)
importlib.reload(tu)
importlib.reload(pg)
```
## Load waypoints for the track you want to run analysis on
The track waypoint files usually show up as new races start. Be sure to check for them in repository updates. You only need to load them in the block below.
These files represent the coordinates of characteristic points of the track - the center line, inside border and outside border. Their main purpose is to visualise the track in images below. One thing that you may want to remember is that at the moment not all functions below work with all values of the coordinates. Especially some look awkward with bigger tracks or with negative coordinates. Usually there is an explanation on what to do to fix the view.
The naming of the tracks is not super consistent. I'm also not sure all of them are available in the console or locally. You may want to know that:
* London_Loop and Virtual_May19_Train_track - are the AWS DeepRacer Virtual League London Loop tracks
* Tokyo - is the AWS DeepRacer Virtual League Kumo Torakku track
* New_York - are the AWS DeepRacer Virtual League Empire City training and evaluation tracks
* China - are the AWS Deepracer Virtual League Shanghai Sudu training and evaluation tracks
* reinvent_base - is the re:Invent 2019 racing track
There are also other tracks that you may want to explore. Each of them has its own properties that you might find useful for your model.
Remeber that evaluation npy files are a community effort to visualise the tracks in the trainings, they aren't 100% accurate.
Tracks Available:
```
# Conveniently list available tracks to analyze
available_track_files = glob.glob("../Tracks/**.npy")
available_track_names = list(map(lambda x: os.path.basename(x).split('.npy')[0], available_track_files))
available_track_names
# Configuration
TRACK_NAME = "reinvent_base"
log_dir = "demo-sac"
#Mode can be console | drfc | cw
MODE="console"
#Profile for downloading logs from CloudWatch
PROFILE = "adfs"
#DRFC number of workers for console training set this to 1
workers = 1
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 300)
l_center_line, l_inner_border, l_outer_border, road_poly = tu.load_track(TRACK_NAME, "../")
road_poly
# Plotly plot configuration
plotly_config = {}
plotly_config["height"]=600 #Track scaling height
plotly_config["width"]=900 #Track scaling width
plotly_config["track_name"]=TRACK_NAME
plotly_config["object_avoidance"] = False
```
## Get the logs
Depending on which way you are training your model, you will need a different way to load the data.
**AWS DeepRacer Console**
Download the logs from the Deepracer Console to the Logs folder. Give the name of the directory for log which you want to parse in the variable log_dir.
**DRFC With Multiple Workers**
Specify the number of workers and append the log streams from cloudwatch if you are streaming the logs to cloud watch.
If you are using the logs that are stored inside the `logs` of the EC2 DRFC, use the second block i.e `Inside the EC2 of DRFC`.
**DeepRacer for Dummies/ARCC local training**
Those two setups come with a container that runs Jupyter Notebook (as you noticed if you're using one of them and reading this text). Logs are stored in `/logs/` and you just need to point at the latest file to see the current training. The logs are split for long running training if they exceed 500 MB. The log loading method has been extended to support that.
**Chris Rhodes' repo**
Chris repo doesn't come with logs storage out of the box. I would normally run `docker logs dr > /path/to/logfile` and then load the file.
Below I have prepared a section for each case. In each case you can analyse the logs as the training is being run, just in case of the Console you may need to force downloading of the logs as the `cw.download_log` method has a protection against needless downloads.
Select your preferred way to get the logs below and you can get rid of the rest.
```
def download_logs(robomaker_stream_name):
robomakers=[]
for i in range(len(robomaker_stream_name)):
robomaker_fname = "robomaker-"+str(i+1)+".log"
robomaker_fname = log_path+robomaker_fname
robomakers.append(robomaker_fname)
cw.download_log(fname=robomaker_fname, stream_name=robomaker_stream_name[i], log_group=LOG_GROUP, force=True, profile=PROFILE)
return robomakers
if MODE == "drfc":
# # Inside the EC2 of Deepracer for Cloud
sagemaker_fname = glob.glob("../Logs/"+log_dir+"/*coach*.log")[0]
robomakers = glob.glob("../Logs/"+log_dir+"/*robomaker*.log")
robomaker_fname = robomakers[0]
print(robomakers, sagemaker_fname, robomaker_fname, sep="\n")
elif MODE == "console":
# AWS DeepRacer Console
sagemaker_fname = glob.glob("../Logs/"+log_dir+"/logs/training/**sagemaker.log")[0]
robomaker_fname = glob.glob("../Logs/"+log_dir+"/logs/training/**robomaker.log")[0]
robomakers = [robomaker_fname]
print(sagemaker_fname,robomaker_fname, sep="\n")
elif MODE == "cw":
# Deepracer for Cloud using cloud watch
LOG_GROUP = "/deepracer-for-cloud"
log_path = "../Logs/"+log_dir+"/logs/training/"
Path(log_path).mkdir(parents=True, exist_ok=True)
sagemaker_fname = log_dir + "-drfc-sagemaker.log"
sagemaker_fname = log_path + sagemaker_fname
robomaker_stream_name=[]
#Append depending on number of workers.
robomaker_stream_name.append("stream-name1")
robomaker_stream_name.append("stream-name2")
robomaker_stream_name.append("stream-name3")
sagemaker_stream_name = "deepracer-0_rl_coach.1.1jovv09fgns6asky9ivn0lhn2"
robomakers = download_logs(robomaker_stream_name)
robomaker_fname = robomakers[0]
cw.download_log(fname=sagemaker_fname, stream_name=sagemaker_stream_name, log_group=LOG_GROUP, force=True, profile=PROFILE)
print(robomakers,sagemaker_fname, sep="\n")
```
## Load the trace training log
Now that the data is downloaded, we need to load it into memory. We will first read it from file and then convert to data frames in Pandas. [Pandas](https://pandas.pydata.org/) is a Python library for handling and analysing large amounts of data series. Remember this name, you may want to learn more about how to use it to get more information that you would like to get from the logs. Examples below are hardly scratching the surface.
One important information to enter is the setting of your Episodes per iteration hyperparameter. This is used to group the episodes into iterations. This information is valuable when later looking at graphs showing how the training progresses per iteration. You can use it to detect which iteration gave you better outcomes and, if in local training, you could move to that iteration's outcome for submissions in the AWS DeepRacer League or for continuing the training.
The log files you have just gathered above have lines like this one:
```
SIM_TRACE_LOG:799,111,1.7594,4.4353,3.0875,-0.26,2.50,2,1.0000,False,True,71.5802,49,17.67,1555554451.1110387
```
This is all that matters for us. The first two are some tests I believe and when loading they get skipped, then each next line has the following fields:
* episode number
* step number
* x coordinate
* y coordinate
* yaw of the car (where the car is heading)
* decision about turning (turn value from your action space)
* decision about throttle (speed value from your action space)
* decision index (value from your action space)
* reward value
* is the car going backwards
* are all wheels on track?
* progress in the lap
* closest waypoint
* track length
* timestamp
`la.load_data` and then `la.convert_to_pandas` read it and prepare for your usage. Sorting the values may not be needed, but I have experienced under some circumstances that the log lines were not ordered properly.
```
agent_info = la.agent_and_network(robomaker_fname)
algo = agent_info["training_algorithm"] if "training_algorithm" in agent_info else "clipped_ppo"
action_space_type = agent_info["action_space_type"] if "action_space_type" in agent_info else "discrete"
agent_info
hyperparams = la.hyperparameters(robomaker_fname)
EPISODES_PER_ITERATION = hyperparams["num_episodes_between_training"]
NUM_EPOCHS = hyperparams["num_epochs"] if "num_epochs" in hyperparams else 1
print ('EPISODES_PER_ITERATION: %s' % EPISODES_PER_ITERATION)
EPISODES_PER_ITERATION = int(EPISODES_PER_ITERATION/(workers))
print ('EPISODES_PER_ITERATION per robomaker: %s' % EPISODES_PER_ITERATION)
print ('NUM_EPOCHS: %s' % NUM_EPOCHS)
hyperparams
cst = (workers-1)*EPISODES_PER_ITERATION
for w in range(workers):
data = la.load_data(robomakers[w])
wdf = la.convert_to_pandas(data, episodes_per_iteration=EPISODES_PER_ITERATION, algo=algo,
action_space_type=action_space_type)
wdf = wdf.sort_values(['episode', 'steps'])
wdf['episode'] += (wdf['iteration']-1)*cst+w*EPISODES_PER_ITERATION
if w == 0:
df = wdf.copy()
else :
df = pd.concat([df, wdf], ignore_index=True)
df = df.sort_values(['episode', 'steps'])
# personally I think normalizing can mask too high rewards so I am commenting it out,
# but you might want it.
# la.normalize_rewards(df)
#Uncomment the line of code below to evaluate a different reward function
# la.new_reward(df, l_center_line, 'reward.new_reward') #, verbose=True)
```
### Code to cut down the training till the specified checkpoint episode
Ex : If Best Checkpoint is at episode = 3250, cut down training data until that episode
```
# checkpoint_ep = 3250
# ep = df[df['episode']==checkpoint_ep]
# step_no = int(ep.iloc[[-1]].index.tolist()[0])
# df = df.iloc[0:step_no]
```
## New reward
Note the last line above: it takes a reward class from log-analysis/rewards, imports it, instantiates and recalculates reward values based on the data from the log. This lets you do some testing before you start training and rule out some obvious things.
*If you find this confusing, don't worry, because it is confusing. You can safely ignore it for now and come back to it later.*
This operation is possible because the logs contain all information needed to recreate the params for a given step. That said some could be implemented better and some were ignored for now and should be implemented.
The sample reward mentioned in that line is located in `log-analysis/rewards/reward_sample.py` and looks like this:
```
from time import time
class Reward:
def __init__(self, verbose=False):
self.previous_steps = None
self.initial_time = None
self.verbose = verbose
@staticmethod
def get_time(params):
# remember: this will not return time before
# the first step has completed so the total
# time for lap will be lower by about 0.2s
return params.get('timestamp', None) or time()
def reward_function(self, params):
if self.previous_steps is None \
or self.previous_steps > params['steps']:
# new lap!
self.initial_time = self.get_time(params)
else:
# we're continuing a lap
pass
steering_factor = 1.0
if abs(params['steering_angle']) > 14:
steering_factor = 0.7
reward = float(steering_factor)
self.previous_steps = params['steps']
if self.verbose:
print(params)
return reward
reward_object = Reward()
def reward_function(params):
return reward_object.reward_function(params)
```
After some imports a class is declared, it's called `Reward`, then the class is instantiated and a function `reward_function` is declared. This somewhat bloated structure has a couple benefits:
* It works in console/local training for actual training
* It lets you reload the definition for class Reward and retry the reward function multiple times after changes without much effort
* If you want to rely on state carried over between the steps, it's all contained in a reward object
The reward class hides two or three tricks for you:
* `get_time` lets you abstract from machine time in log analysis - the supporting code adds one extra param, `timestamp`. That lets you get the right time value in new_reward function
* the first condition allows detecting the beginning of an episode or even start of training you can use it for some extra operations between the episodes
* `verbose` can be used to provide some noisier prints in the reward function - you can switch them on when loading the reward function above.
Just remember: not all params are provided, you are free to implement them and raise a Pull Request for log_analysis.df_to_params method.
If you just wrap your reward function like in the above example, you can use it in both log analysis notebook and the training.
Final warning: there is a loss of precision in the logs (rounded numbers) and also potentially potential bugs. If you find any, please fix, please report.
## Graphs
The original notebook has provided some great ideas on what could be visualised in the graphs. Below examples are a slightly extended version. Let's have a look at what they are presenting and what this may mean to your training.
### Training progress
As you have possibly noticed by now, training episodes are grouped into iterations and this notebook also reflects it. What also marks it are checkpoints in the training. After each iteration a set of ckpt files is generated - they contain outcomes of the training, then a model.pb file is built based on that and the car begins a new iteration. Looking at the data grouped by iterations may lead you to a conclusion, that some earlier checkpoint would be a better start for a new training. While this is limited in the AWS DeepRacer Console, with enough disk space you can keep all the checkpoints along the way and use one of them as a start for new training (or even as a submission to a race).
While the episodes in a given iteration are a mixture of decision process and random guesses, mean results per iteration may show a specific trend. Mean values are accompanied by standard deviation to show the concentration of values around the mean.
#### Rewards per Iteration
You can see these values as lines or dots per episode in the AWS DeepRacer console. When the reward goes up, this suggests that a car is learning and improving with regards to a given reward function. **This does not have to be a good thing.** If your reward function rewards something that harms performance, your car will learn to drive in a way that will make results worse.
At first the rewards just grow if the progress achieved grows. Interesting things may happen slightly later in the training:
* The reward may go flat at some level - it might mean that the car can't get any better. If you think you could still squeeze something better out of it, review the car's progress and consider updating the reward function, the action space, maybe hyperparameters, or perhaps starting over (either from scratch or from some previous checkpoint)
* The reward may become wobbly - here you will see it as a mesh of dots zig-zagging. It can be a gradually growing zig-zag or a roughly stagnated one. This usually means the learning rate hyperparameter is too high and the car started doing actions that oscilate around some local extreme. You can lower the learning rate and hope to step closer to the extreme. Or run away from it if you don't like it
* The reward plunges to near zero and stays roughly flat - I only had that when I messed up the hyperparameters or the reward function. Review recent changes and start training over or consider starting from scratch
The Standard deviation says how close from each other the reward values per episode in a given iteration are. If your model becomes reasonably stable and worst performances become better, at some point the standard deviation may flat out or even decrease. That said, higher speeds usually mean there will be areas on track with higher risk of failure. This may bring the value of standard deviation to a higher value and regardless of whether you like it or not, you need to accept it as a part of fighting for significantly better times.
#### Time per iteration
I'm not sure how useful this graph is. I would worry if it looked very similar to the reward graph - this could suggest that slower laps will be getting higher rewards. But there is a better graph for spotting that below.
#### Progress per Iteration
This graph usually starts low and grows and at some point it will get flatter. The maximum value for progress is 100% so it cannot grow without limits. It usually shows similar initial behaviours to reward and time graphs. I usually look at it when I alter an action in training. In such cases this graph usually dips a bit and then returns or goes higher.
#### Total reward per episode
This graph has been taken from the orignal notebook and can show progress on certain groups of behaviours. It usually forms something like a triangle, sometimes you can see a clear line of progress that shows some new way has been first taught and then perfected.
#### Mean completed lap times per iteration
Once we have a model that completes laps reasonably often, we might want to know how fast the car gets around the track. This graph will show you that. I use it quite often when looking for a model to shave a couple more miliseconds. That said it has to go in pair with the last one:
#### Completion rate per iteration
It represents how big part of all episodes in an iteration is full laps. The value is from range [0, 1] and is a result of deviding amount of full laps in iteration by amount of all episodes in iteration. I say it has to go in pair with the previous one because you not only need a fast lapper, you also want a race completer.
The higher the value, the more stable the model is on a given track.
### Stats for all laps
Previous graphs were mainly focused on the state of training with regards to training progress. This however will not give you a lot of information about how well your reward function is doing overall.
In such case `scatter_aggregates` may come handy. It comes with three types of graphs:
* progress/steps/reward depending on the time of an episode - of this I find reward/time and new_reward/time especially useful to see that I am rewarding good behaviours - I expect the reward to time scatter to look roughly triangular
* histograms of time and progress - for all episodes the progress one is usually quite handy to get an idea of model's stability
* progress/time_if_complete/reward to closest waypoint at start - these are really useful during training as they show potentially problematic spots on track. It can turn out that a car gets best reward (and performance) starting at a point that just cannot be reached if the car starts elsewhere, or that there is a section of a track that the car struggles to get past and perhaps it's caused by an aggressive action space or undesirable behaviour prior to that place
Side note: `time_if_complete` is not very accurate and will almost always look better for episodes closer to 100% progress than in case of those 50% and below.
```
simulation_agg = la.simulation_agg(df)
pg.plot_progress_reward_distribution(simulation_agg)
la.analyze_training_progress(simulation_agg, title='Training progress')
la.scatter_aggregates(simulation_agg, 'Stats for all laps')
```
### Stats for complete laps
The graphs here are same as above, but now I am interested in other type of information:
* does the reward scatter show higher rewards for lower completion times? If I give higher reward for a slower lap it might suggest that I am training the car to go slow
* what does the time histogram look like? With enough samples available the histogram takes a normal distribution graph shape. The lower the mean value, the better the chance to complete a fast lap consistently. The longer the tails, the greater the chance of getting lucky in submissions
* is the car completing laps around the place where the race lap starts? Or does it only succeed if it starts in a place different to the racing one?
```
complete_ones = simulation_agg[simulation_agg['progress']==100]
print(len(complete_ones))
if complete_ones.shape[0] > 0:
la.scatter_aggregates(complete_ones, 'Stats for complete laps')
else:
print('No complete laps yet.')
pg.plot_time_hist(complete_ones)
pg.plot_complete_lap_analysis(complete_ones)
pg.plot_distribution(complete_ones,column="steps",percent=25)
pg.plot_distribution(complete_ones,column="time",percent=25)
pg.plot_reward_distribution(df,percent=25)
pg.plot_reward_hist(df)
# Retrieve policy training data from the SageMaker Log file
trn_data = la.parse_sagemaker_logs(sagemaker_fname)
pg.plot_training_metrics(trn_data)
pg.plot_training_metrics(trn_data, "surrogate_loss")
```
### Categories analysis
We're going back to comparing training results based on the training time, but in a different way. Instead of just scattering things in relation to iteration or episode number, this time we're grouping episodes based on a certaing information. For this we use function:
```
analyze_categories(panda, category='quintile', groupcount=5, title=None)
```
The idea is pretty simple - determine a way to cluster the data and provide that as the `category` parameter (alongside the count of groups available). In the default case we take advantage of the aggregated information to which quintile an episode belongs and thus build buckets each containing 20% of episodes which happened around the same time during the training. If your training lasted for five hours, this would show results grouped per each hour.
A side note: if you run the function with `category='start_at'` and `groupcount=20` you will get results based on the waypoint closest to the starting point of an episode. If you need to, you can introduce other types of categories and reuse the function.
The graphs are similar to what we've seen above. I especially like the progress one which shows where the model tends to struggle and whether it's successful laps rate is improving or beginning to decrease. Interestingly, I also had cases where I saw the completion drop on the progress rate only to improve in a later quintile, but with a better time graph.
A second side note: if you run this function for `complete_ones` instead of `simulation_agg`, suddenly the time histogram becomes more interesting as you can see whether completion times improve.
```
la.analyze_categories(simulation_agg, title='Quintiles')
#Percentage completion in each quintile
quintiles = simulation_agg["quintile"].unique().to_list();
q_list = []
for q in quintiles:
total_num_ep = simulation_agg[simulation_agg["quintile"]==q].count()["quintile"]
completed_num_ep = complete_ones[complete_ones["quintile"]==q].count()["quintile"]
q_list.append({"quintile": q, "completed_ep": completed_num_ep, "total_ep": total_num_ep, "percent": round(completed_num_ep/total_num_ep * 100, 2)})
q_data = pd.DataFrame(q_list)
q_data
```
## Data in tables
While a lot can be seen in graphs that cannot be seen in the raw numbers, the numbers let us get into more detail. Below you will find a couple examples. If your model is behaving the way you would like it to, below tables may provide little added value, but if you struggle to improve your car's performance, they may come handy. In such cases I look for examples where high reward is giving to below-expected episode and when good episodes are given low reward.
You can then take the episode number and scatter it below, and also look at reward given per step - this can in turn draw your attention to some rewarding anomalies and help you detect some unexpected outcomes in your reward function.
There is a number of ways to select the data for display:
* `nlargest`/`nsmallest` lets you display information based on a specific value being highest or lowest
* filtering based on a field value, for instance `df[df['episode']==10]` will display only those steps in `df` which belong to episode 10
* `head()` lets you peek into a dataframe
There isn't a right set of tables to display here and the ones below may not suit your needs. Get to know Pandas more and have fun with them. It's almost as addictive as DeepRacer itself.
The examples have a short comment next to them explaining what they are showing.
```
# View ten best rewarded episodes in the training
simulation_agg.nlargest(100, 'new_reward')
## View five fastest complete laps
complete_ones.nsmallest(50, 'time')
# View five best rewarded completed laps
complete_ones.nlargest(50, 'reward')
# View five best rewarded in completed laps (according to new_reward if you are using it)
complete_ones.nlargest(50, 'new_reward')
complete_ones[complete_ones['start_at']==0]
# View all steps data for episode 10
df[df['episode']==10]
```
## Individual episode/s plot
Get the list of episodes that you want to analyse , probably from different quintiles to see the change in path that the episode takes and analyse
```
episodes = [10,20]
for episode_no in episodes:
episode_data = df[df['episode']==episode_no]
pg.plot_episode(episode_data,plotly_config, color='throttle')
```
## Plot Multiple Laps
Use the below function to plot laps between specified times. This is useful to analyse the laps which are incomplete between specified start and end time and also to see the distribution for complete laps. <br>
hover data : [reward , speed] , color = reward
```
# pg.plot_multiple_laps(df,complete_ones,plotly_config,time_start = 7.8,time_end = 8.0,is_complete = True)
pg.plot_multiple_laps(df,simulation_agg,plotly_config,time_start = 7.8,time_end = 8.0,is_complete = False)
iterations = [1]
starting_waypoint = 0
ending_waypoint = len(l_outer_border)
pg.plot_iterations(df,iterations,plotly_config,EPISODES_PER_ITERATION,starting_waypoint,ending_waypoint,is_complete=False)
```
### Path taken for top reward iterations
NOTE: at some point in a single episode the car could go around multiple laps, the episode was terminated when car completed 1000 steps. Currently one episode has at most one lap. This explains why you can see multiple laps in an episode plotted below.
Being able to plot the car's route in an episode can help you detect certain patterns in its behaviours and either promote them more or train away from them. While being able to watch the car go in the training gives some information, being able to reproduce it after the training is much more practical.
Graphs below give you a chance to look deeper into your car's behaviour on track.
```
#Path taken by top 5 fastest laps
sorted_episodes = list(complete_ones.nsmallest(5,'time')['episode'])
# sorted_episodes = list(complete_ones.nlargest(3,'reward')['episode'])
# sorted_episodes = list(complete_ones[complete_ones['start_at']==0].nsmallest(5,'time')['episode'])
# sorted_episodes = list(simulation_agg.nlargest(5,'progress')['episode'])
for episode in sorted_episodes:
episode_data = df[df['episode']==episode]
pg.plot_episode(episode_data,plotly_config,color='throttle')
```
### Plot a heatmap of rewards for current training.
The brighter the colour, the higher the reward granted in given coordinates.
If instead of a similar view as in the example below you get a dark image with hardly any
dots, it might be that your rewards are highly disproportionate and possibly sparse.
Disproportion means you may have one reward of 10.000 and the rest in range 0.01-1.
In such cases the vast majority of dots will simply be very dark and the only bright dot
might be in a place difficult to spot. I recommend you go back to the tables and show highest
and average rewards per step to confirm if this is the case. Such disproportions may
not affect your traning very negatively, but they will make the data less readable in this notebook.
Sparse data means that the car gets a high reward for the best behaviour and very low reward
for anything else, and worse even, reward is pretty much discrete (return 10 for narrow perfect,
else return 0.1). The car relies on reward varying between behaviours to find gradients that can
lead to improvement. If that is missing, the model will struggle to improve.
```
track = la.plot_track(df, l_center_line, l_inner_border, l_outer_border)
plt.title("Reward distribution for all actions ")
im = plt.imshow(track, cmap='hot', interpolation='bilinear', origin="lower")
```
### Plot a particular iteration
This is same as the heatmap above, but just for a single iteration.
```
iteration_id = 10
track = la.plot_track(df[df['iteration'] == iteration_id], l_center_line, l_inner_border, l_outer_border)
plt.title("Reward distribution for all actions ")
im = plt.imshow(track, cmap='hot', interpolation='bilinear', origin="lower")
```
# Action breakdown per iteration and historgram for action distribution for each of the turns - reinvent track
This plot is useful to understand the actions that the model takes for any given iteration. Unfortunately at this time it is not fit for purpose as it assumes six actions in the action space and has other issues. It will require some work to get it to done but the information it returns will be very valuable.
This is a bit of an attempt to abstract away from the brilliant function in the original notebook towards a more general graph that we could use. It should be treated as a work in progress. The track_breakdown could be used as a starting point for a general track information object to handle all the customisations needed in methods of this notebook.
A breakdown track data needs to be available for it. If you cannot find it for the desired track, MAKEIT.
Currently supported tracks:
```
la.track_breakdown.keys()
```
The second parameter is either a single index or a list of indices for df iterations that you would like to view. You can for instance use `sorted_idx` list which is a sorted list of iterations from the highest to lowest reward.
Bear in mind that you will have to provide a proper action naming in parameter `action_names`, this function assumes only six actions by default. I think they need to match numbering of actions in your model's metadata json file.
```
# df[df['progress']<=100]['action'].value_counts().plot(kind='bar',figsize=(20,10))
la.action_breakdown(df, 4,df['iteration'], la.track_breakdown['reinvent2018'], l_center_line, l_inner_border, l_outer_border, ['0', '1', '2', '3',
'4', '5', '6', '7',
'8', '9', '10', '11',
'12', '13', '14', '15',
'16', '17', '18', '19', '20'])
```
| github_jupyter |
850-hPa Geopotential Heights, Temperature, Frontogenesis, and Winds
===================================================================
Frontogenesis at 850-hPa with Geopotential Heights, Temperature, and
Winds
This example uses example data from the GFS analysis for 12 UTC 26
October 2010 and uses xarray as the main read source with using MetPy to
calculate frontogenesis and wind speed with geographic plotting using
Cartopy for a CONUS view.
Import the needed modules.
```
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import metpy.calc as mpcalc
from metpy.units import units
import numpy as np
import xarray as xr
```
Use Xarray to access GFS data from THREDDS resource and uses
metpy accessor to parse file to make it easy to pull data using
common coordinate names (e.g., vertical) and attach units.
```
ds = xr.open_dataset('https://thredds.ucar.edu/thredds/dodsC/casestudies/'
'python-gallery/GFS_20101026_1200.nc').metpy.parse_cf()
```
Subset data based on latitude and longitude values, calculate potential
temperature for frontogenesis calculation.
```
# Set subset slice for the geographic extent of data to limit download
lon_slice = slice(200, 350)
lat_slice = slice(85, 10)
# Grab lat/lon values (GFS will be 1D)
lats = ds.lat.sel(lat=lat_slice).values
lons = ds.lon.sel(lon=lon_slice).values
level = 850 * units.hPa
hght_850 = ds.Geopotential_height_isobaric.metpy.sel(
vertical=level, lat=lat_slice, lon=lon_slice).metpy.unit_array.squeeze()
tmpk_850 = ds.Temperature_isobaric.metpy.sel(
vertical=level, lat=lat_slice, lon=lon_slice).metpy.unit_array.squeeze()
uwnd_850 = ds['u-component_of_wind_isobaric'].metpy.sel(
vertical=level, lat=lat_slice, lon=lon_slice).metpy.unit_array.squeeze()
vwnd_850 = ds['v-component_of_wind_isobaric'].metpy.sel(
vertical=level, lat=lat_slice, lon=lon_slice).metpy.unit_array.squeeze()
# Convert temperatures to degree Celsius for plotting purposes
tmpc_850 = tmpk_850.to('degC')
# Calculate potential temperature for frontogenesis calculation
thta_850 = mpcalc.potential_temperature(level, tmpk_850)
# Get a sensible datetime format
vtime = ds.time.data[0].astype('datetime64[ms]').astype('O')
```
Calculate frontogenesis
-----------------------
Frontogenesis calculation in MetPy requires temperature, wind
components, and grid spacings. First compute the grid deltas using MetPy
functionality, then put it all together in the frontogenesis function.
Note: MetPy will give the output with SI units, but typically
frontogenesis (read: GEMPAK) output this variable with units of K per
100 km per 3 h; a conversion factor is included here to use at plot time
to reflect those units.
```
dx, dy = mpcalc.lat_lon_grid_deltas(lons, lats)
fronto_850 = mpcalc.frontogenesis(thta_850, uwnd_850, vwnd_850, dx, dy, dim_order='yx')
# A conversion factor to get frontogensis units of K per 100 km per 3 h
convert_to_per_100km_3h = 1000*100*3600*3
```
Plotting Frontogenesis
----------------------
Using a Lambert Conformal projection from Cartopy to plot 850-hPa
variables including frontogenesis.
```
# Set map projection
mapcrs = ccrs.LambertConformal(central_longitude=-100, central_latitude=35,
standard_parallels=(30, 60))
# Set projection of the data (GFS is lat/lon)
datacrs = ccrs.PlateCarree()
# Start figure and limit the graphical area extent
fig = plt.figure(1, figsize=(14, 12))
ax = plt.subplot(111, projection=mapcrs)
ax.set_extent([-130, -72, 20, 55], ccrs.PlateCarree())
# Add map features of Coastlines and States
ax.add_feature(cfeature.COASTLINE.with_scale('50m'))
ax.add_feature(cfeature.STATES.with_scale('50m'))
# Plot 850-hPa Frontogenesis
clevs_tmpc = np.arange(-40, 41, 2)
cf = ax.contourf(lons, lats, fronto_850*convert_to_per_100km_3h, np.arange(-8, 8.5, 0.5),
cmap=plt.cm.bwr, extend='both', transform=datacrs)
cb = plt.colorbar(cf, orientation='horizontal', pad=0, aspect=50, extendrect=True)
cb.set_label('Frontogenesis K / 100 km / 3 h')
# Plot 850-hPa Temperature in Celsius
csf = ax.contour(lons, lats, tmpc_850, clevs_tmpc, colors='grey',
linestyles='dashed', transform=datacrs)
plt.clabel(csf, fmt='%d')
# Plot 850-hPa Geopotential Heights
clevs_850_hght = np.arange(0, 8000, 30)
cs = ax.contour(lons, lats, hght_850, clevs_850_hght, colors='black', transform=datacrs)
plt.clabel(cs, fmt='%d')
# Plot 850-hPa Wind Barbs only plotting every fifth barb
wind_slice = (slice(None, None, 5), slice(None, None, 5))
ax.barbs(lons[wind_slice[0]], lats[wind_slice[1]],
uwnd_850[wind_slice].to('kt').m, vwnd_850[wind_slice].to('kt').m,
color='black', transform=datacrs)
# Plot some titles
plt.title('GFS 850-hPa Geopotential Heights (m), Temp (C), and Winds', loc='left')
plt.title('Valid Time: {}'.format(vtime), loc='right')
```
| github_jupyter |
# Evaluation of the GBM GridSearchCV results
This file illustrates how to evaluate the [GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) results for sklearn's [GradientBoostingClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) obtained after first running `sklearn_gbm_tuning.py` in this directory to test various hyperparameter combinations and store the result.
## Imports & Settings
```
%matplotlib inline
import warnings
from pathlib import Path
import os
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import graphviz
from statsmodels.api import OLS, add_constant
from sklearn.tree import DecisionTreeRegressor, export_graphviz
from sklearn.metrics import roc_auc_score
from sklearn.externals import joblib
warnings.filterwarnings('ignore')
sns.set_style("whitegrid")
np.random.seed(42)
pd.options.display.float_format = '{:,.4f}'.format
with pd.HDFStore('model_tuning.h5') as store:
test_feature_data = store['holdout/features']
test_features = test_feature_data.columns
test_target = store['holdout/target']
```
## GBM GridsearchCV with sklearn
Need OneStepTimeSeriesSplit because stored GridSearchCV result expects it
```
class OneStepTimeSeriesSplit:
"""Generates tuples of train_idx, test_idx pairs
Assumes the index contains a level labeled 'date'"""
def __init__(self, n_splits=3, test_period_length=1, shuffle=False):
self.n_splits = n_splits
self.test_period_length = test_period_length
self.shuffle = shuffle
self.test_end = n_splits * test_period_length
@staticmethod
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def split(self, X, y=None, groups=None):
unique_dates = (X.index
.get_level_values('date')
.unique()
.sort_values(ascending=False)
[:self.test_end])
dates = X.reset_index()[['date']]
for test_date in self.chunks(unique_dates, self.test_period_length):
train_idx = dates[dates.date < min(test_date)].index
test_idx = dates[dates.date.isin(test_date)].index
if self.shuffle:
np.random.shuffle(list(train_idx))
yield train_idx, test_idx
def get_n_splits(self, X, y, groups=None):
return self.n_splits
```
### Load Result
Need to first run `sklearn_gbm_tuning.py` to perform gridsearch and store result (not included due to file size).
```
gridsearch_result = joblib.load('gbm_gridsearch.joblib')
```
The GridSearchCV object has several additional attributes after completion that we can access after loading the pickled result to learn which hyperparameter combination performed best and its average cross-validation AUC score, which results in a modest improvement over the default values. This is shown in the following code:
### Best Parameters & AUC Score
```
pd.Series(gridsearch_result.best_params_)
f'{gridsearch_result.best_score_:.4f}'
```
### Evaluate best model
#### Test on hold-out set
```
best_model = gridsearch_result.best_estimator_
preds= best_model.predict(test_feature_data)
roc_auc_score(y_true=test_target, y_score=preds)
```
#### Inspect global feature importance
```
pd.Series(best_model.feature_importances_, index=test_features).plot.barh(figsize=(8,15));
```
### CV Train-Test Scores
```
results = pd.DataFrame(gridsearch_result.cv_results_).drop('params', axis=1)
results.info()
results.head()
```
### Get parameter values & mean test scores
```
test_scores = results.filter(like='param').join(results[['mean_test_score']])
test_scores = test_scores.rename(columns={c: '_'.join(c.split('_')[1:]) for c in test_scores.columns})
test_scores.info()
params = test_scores.columns[:-1].tolist()
test_scores = test_scores.set_index('test_score').stack().reset_index()
test_scores.columns= ['test_score', 'parameter', 'value']
test_scores.head()
test_scores.info()
def get_test_scores(df):
"""Select parameter values and test scores"""
data = df.filter(like='param').join(results[['mean_test_score']])
return data.rename(columns={c: '_'.join(c.split('_')[1:]) for c in data.columns})
```
### Plot Test Scores vs Parameter Settings
The GridSearchCV result stores the average cross-validation scores so that we can analyze how different hyperparameter settings affect the outcome.
The six seaborn swarm plots below show the distribution of AUC test scores for all parameter values. In this case, the highest AUC test scores required a low learning_rate and a large value for max_features. Some parameter settings, such as a low learning_rate, produce a wide range of outcomes that depend on the complementary settings of other parameters. Other parameters are compatible with high scores for all settings use in the experiment:
```
plot_data = get_test_scores(results).drop('min_impurity_decrease', axis=1)
plot_params = plot_data.columns[:-1].tolist()
plot_data.info()
fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(14, 6))
axes = axes.flatten()
for i, param in enumerate(plot_params):
sns.swarmplot(x=param, y='test_score', data=plot_data, ax=axes[i])
axes[i].set_ylim(.63, .69)
fig.suptitle('Mean Test Score Distribution by Hyper Parameter', fontsize=14)
fig.tight_layout()
fig.subplots_adjust(top=.9)
fig.savefig('mean_test_scores_by_param', dpi=300);
```
### Dummy-encode parameters
```
data = get_test_scores(results)
params = data.columns[:-1].tolist()
data = pd.get_dummies(data,columns=params, drop_first=False)
data.info()
```
### Build Regression Tree
We will now explore how hyperparameter settings jointly affect the mean cross-validation score. To gain insight into how parameter settings interact, we can train a DecisionTreeRegressor with the mean test score as the outcome and the parameter settings, encoded as categorical variables in one-hot or dummy format.
The tree structure highlights that using all features (max_features_1), a low learning_rate, and a max_depth over three led to the best results, as shown in the following diagram:
```
reg_tree = DecisionTreeRegressor(criterion='mse',
splitter='best',
max_depth=4,
min_samples_split=5,
min_samples_leaf=10,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=42,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
presort=False)
gbm_features = data.drop('test_score', axis=1).columns
reg_tree.fit(X=data[gbm_features], y=data.test_score)
```
#### Visualize Tree
```
out_file = 'results/gbm_sklearn_tree.dot'
dot_data = export_graphviz(reg_tree,
out_file=out_file,
feature_names=gbm_features,
max_depth=4,
filled=True,
rounded=True,
special_characters=True)
if out_file is not None:
dot_data = Path(out_file).read_text()
graphviz.Source(dot_data)
```
#### Compute Feature Importance
Overfit regression tree to learn detailed rules that classify all samples
```
reg_tree = DecisionTreeRegressor(criterion='mse',
splitter='best',
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=42,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
presort=False)
gbm_features = data.drop('test_score', axis=1).columns
reg_tree.fit(X=data[gbm_features], y=data.test_score)
```
The bar chart below displays the influence of the hyperparameter settings in producing different outcomes, measured by their feature importance for a decision tree that is grown to its maximum depth. Naturally, the features that appear near the top of the tree also accumulate the highest importance scores.
```
gbm_fi = pd.Series(reg_tree.feature_importances_, index=gbm_features).sort_values(ascending=False)
gbm_fi = gbm_fi[gbm_fi > 0]
idx = [p.split('_') for p in gbm_fi.index]
gbm_fi.index = ['_'.join(p[:-1]) + '=' + p[-1] for p in idx]
gbm_fi.sort_values().plot.barh(figsize=(5,5))
plt.title('Hyperparameter Importance')
plt.tight_layout()
plt.savefig('param_importance', dpi=300);
```
### Run linear regression
Alternatively, we can use a linear regression to gain insights into the statistical significance of the linear relationship between hyperparameters and test scores.
```
data = get_test_scores(results)
params = data.columns[:-1].tolist()
data = pd.get_dummies(data,columns=params, drop_first=True)
model = OLS(endog=data.test_score, exog=add_constant(data.drop('test_score', axis=1))).fit(cov_type='HC3')
print(model.summary())
```
| github_jupyter |
# Machine learning for genetic data
## Introduction
The goal of this practical session is to manipulate high-dimensional, low sample-size data that is typical of many genetic applications.
Here we will work with GWAS data from _Arabidopsis thaliana_, which is a plant model organism (https://upload.wikimedia.org/wikipedia/commons/6/6f/Arabidopsis_thaliana.jpg).
The genotypes are hence described by **Single Nucleotide Polymorphisms, or SNPs**. Our goal will be to use this data to identify regions of the genome that can be linked with various growth and flowering traits (**phenotypes**).
```
%pip install --upgrade --force-reinstall pillow
%pylab inline
# imports matplotlib as plt and numpy as np
plt.rc('font', **{'size': 16}) # font size for text on plots
```
## Data description
* `data/athaliana_small.X.txt` is the design matrix. As many rows as samples, as many columns as SNPs
* the SNPs are given (in order) in `data/athaliana_small.snps.txt`.
* the samples are given (in order) in `data/athaliana.samples.txt`.
* the transformed phenotypes are given in `data/athaliana.4W.pheno` and `data/athaliana.2W.pheno`. The first column is the sample's ID, and the second the phenotype.
* `data/athaliana.candidates.txt` contains a list of _A. thaliana_ genes known or strongly suspected to be associated with flowering times.
* the feature network is in `data/athaliana_small.W.txt`. It has been saved as 3 arrays, corresponding to the row, col, and data attributes of a [scipy.sparse coo_matrix](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html).
## Loading the data
We will start by working without the feature network, on the 2W phenotype.
```
# Load the SNP names
with open('data/athaliana_small.snps.txt') as f:
snp_names = f.readline().split()
f.close()
print(len(snp_names))
!wget -O data/athaliana_small.X.txt https://plmbox.math.cnrs.fr/f/e5ca01bf036a4fc5aa60/?dl=1
# Run this cell to download athaliana_small.X.txt
# Load the design matrix -- this can take time!
X = np.loadtxt('data/athaliana_small.X.txt', # file names
dtype = 'int') # values are integers
```
__Q: How many samples are there in the data? How many SNPs are there?__
```
# Answer
X.shape
```
We have 1307 samples and 9 419 SNPs.
```
p = X.shape[1]
```
#### Load the sample names
```
samples = list(np.loadtxt('data/athaliana.samples.txt', # file names
dtype = int)) # values are integers
print(len(samples))
```
#### Load the 2W phenotype data
```
import pandas as pd
df = pd.read_csv('data/athaliana.2W.pheno', # file name
header = None, # columns have no header
delim_whitespace = True) # columns are separated by white space
```
The 2W phenotype is not available for all samples.
```
# Create vector of sample IDs
samples_with_phenotype = list(df[0])
print(len(samples_with_phenotype), "samples have a phenotype")
# Create vector of phenotypes
y_2W = np.array(df[1])
```
We need to restrict X to the samples with a 2W phenotype, in correct order
```
X_2W = X[np.array([samples.index(sample_id) \
for sample_id in samples_with_phenotype]), :]
```
__Q: How many samples do we have now? And how many SNPs? Does this make the task of biomarker detection simpler or harder?__
```
print(X_2W.shape)
```
__A:__ There are the same number of features but (way) fewer samples: the statistical power has decreased! And so the task of detecting explanatory SNPs is harder.
```
# You can delete X now if you want, to free space
del X
```
#### Load the list of candidate genes
```
with open('data/athaliana.candidates.txt') as f:
candidate_genes = f.readline().split()
f.close()
```
#### Load the snps to gene mapping
```
genes_by_snp = {} # key: SNP, value = [genes in/near which this SNP is]
with open('data/athaliana.snps_by_gene.txt') as f:
for line in f:
ls = line.split()
gene_id = ls[0]
for snp_id in ls[1:]:
if not snp_id in genes_by_snp:
genes_by_snp[snp_id] = []
genes_by_snp[snp_id].append(gene_id)
```
### Split the data in a train and test set
In machine learning, we always split the data into a *train* set, which serves to fit the model, and a *test* set, which serves to measure the model's performance.
__Q: Do you remember why? What happens if we do both the training and testing on the same data?__
__A:__ Evaluating a model on the same data used to fit the model favors model that overfit the data at hand, and these models have poor generalization performance.
We will set aside a test set, containing 20% of our samples, on which to evaluate the quality of our predictive models.
__Q: What problem occurs if we set a test set that is too large in proportion? What problem occurs when it is set too small?__
__A:__ If the proportion of samples in the test set is too large, the training set becomes too small to be able to fit the model correctly. If it is too small, the measure of performance of the model will be prone to too much variability.
```
from sklearn import model_selection
X_2W_tr, X_2W_te, y_2W_tr, y_2W_te = \
model_selection.train_test_split(X_2W, y_2W, test_size = 0.2, random_state = 17)
print(X_2W_tr.shape, X_2W_te.shape)
```
## Visualize the phenotype
```
h = plt.hist(y_2W_tr, bins = 30)
```
Visualize the genotype's correlation structure
```
import seaborn as sn
sigma = pd.DataFrame(X_2W_tr).corr()
fig, ax = plt.subplots(1, 2)
ax[0].imshow(sigma.iloc[0:1000, 0:1000])
ax[1].imshow(sigma.iloc[72:120, 72:120])
plt.show()
```
__Q: What observation can you make about the phenotype and genotype?__
__A:__ The phenotype has an unimodal distribution (only one peak), but with a few outlying values.
The genotype has a correlation structure which displays "blocks" of high correlation, called *Linkage Disequilibrium* (LD) blocks.
## T-test
Let us start by running a statistical test for association of each SNP feature with the phenotype.
```
import statsmodels.api as sm
```
### T-test on a single SNP
We will perform a linear regression on a single SNP and test whether this SNP has an effect on the phenotype.
```
est = sm.regression.linear_model.OLS(y_2W_tr, sm.add_constant(X_2W_tr[:, 0])).fit()
print(est.summary())
```
__Q: In the previous table, where is the p-value of the T-test? What can you conclude about the effect of the first SNP on the phenotype?__
__A:__ The p-value is 0.310: we cannot reject $\mathcal{H}_0$, and so the first SNP is estimated to have no effect on the phenotype.
We can also visualize the linear regression:
```
sn.regplot(x = 'x', y = 'y', data = pd.DataFrame({'x': X_2W_tr[:, 0], 'y': y_2W_tr})).set(xlim = (-0.1, 1.1))
```
### T-test on all SNPs
```
pvalues = []
for snp_idx in range(p):
# only look a the column corresponding at that SNP
X_snp = X_2W_tr[:, snp_idx]
# run a linear regression (with bias) between the phenotype and this SNP
X_snp = sm.add_constant(X_snp)
est = sm.regression.linear_model.OLS(y_2W_tr, X_snp)
est2 = est.fit()
# get the p-value from the model
pvalues.append(est2.pvalues[1])
pvalues = np.array(pvalues)
```
### Manhattan plot
The common way to visualize such results is by using a Manhattan plot: we will plot all SNPs on the x-axis, and on the y-axis we'll have the opposite of the log base 10 of the p-value. The lower the p-value, the higher the corresponding marker.
We will also add a horizontal line that corresponds to the _threshold for significance_. Because we are testing multiple hypotheses, we need to lower our threshold accordingly. We will use __Bonferroni correction__ and divide the significance threshold (say, alpha=0.05) by the number of tests, that is, the number of SNPs p.
```
plt.scatter(range(p), # x = SNP position
-np.log10(pvalues)) # y = -log10 p-value
# significance threshold according to Bonferroni correction
t = -np.log10(0.05 / p)
plt.plot([0, p], [t, t])
# plot labels
plt.xlabel("feature")
plt.ylabel("-log10 p-value")
plt.xlim([0, p])
```
__Q: What do you observe? Are any SNPs significantly associated with the phenotype?
Use data/athaliana.snps_by_gene.txt and data/athaliana.candidates.txt to check whether this matches a priori information.__
```
# Answer
thresh = 0.05 / p # significance threshold set using the Bonferroni correction
for snp_idx in np.where(pvalues < thresh)[0]:
print(("%.2e" % pvalues[snp_idx]), snp_names[snp_idx])
for gene_id in genes_by_snp[snp_names[snp_idx]]:
if gene_id in candidate_genes:
print("\t in/near candidate gene %s" % gene_id)
```
__A:__ Chr5_3185806_C is just at significance threshold with a p-value of 2x10^-6. It is in or near gene [AT5G10140](https://www.arabidopsis.org/servlets/TairObject?accession=locus:2184118), which is known to play a role in flowering.
## Linear regression
```
from sklearn import linear_model
model_lr = linear_model.LinearRegression(fit_intercept = True)
model_lr.fit(X_2W_tr, y_2W_tr)
plt.figure(figsize = (12, 5))
plt.scatter(range(p), # x = SNP position
model_lr.coef_, # y = regression weights
s = 10) # point size
plt.xlabel("SNP")
plt.ylabel("regression weight")
plt.xlim([0, p])
```
__Q: What do you observe? How can you interpret these results? Do any of the SNPs strike you as having a strong influence on the phenotype?__
__A:__ The following SNPs are the ones with the ten highest weights (in absolute value). They are all near candidate genes.
```
highest_weights = np.abs(model_lr.coef_)
highest_weights.sort()
highest_weights = highest_weights[-10:]
for w in highest_weights:
for snp_idx in np.where(model_lr.coef_ == w)[0]:
print(w, snp_names[snp_idx])
for gene_id in genes_by_snp[snp_names[snp_idx]]:
if gene_id in candidate_genes:
print("\t in/near candidate gene %s" % gene_id)
```
### Model predictive power
In this section, we measure the performance of our model on the test dataset.
We will now look at the predictive power of the lasso estimated model.
__Q: What is the definition of the variance explained? You may use the [scikit learn documentation](https://sklearn.org/modules/classes.html#sklearn-metrics-metrics). What values can this metric take? and to what cases do the extreme values correspond to?__
__A:__ The explained variance is $1 - \frac{\text{variance of the residuals}}{\text{variance of the output}}$. It is equal to one in the case of a perfect fit (the residuals are all equal to zero, which can only happen with overfitting, and is therefore not desirable). A higher value means the genotype explains a large portion of the variability of the phenotype, which is good. It can be negative (in case of arbitrarily poor fits).
```
from sklearn import metrics
y_2W_lr_pred = model_lr.predict(X_2W_te)
print("Percentage of variance explained (using all SNPs): %.2f" % \
metrics.explained_variance_score(y_2W_te, y_2W_lr_pred))
plt.figure(figsize = (5, 5))
plt.scatter(y_2W_te, y_2W_lr_pred)
plt.xlabel("true phenotype")
plt.ylabel("prediction")
plt.xlim([np.min(y_2W_te) - 5, np.max(y_2W_te) + 5])
plt.ylim([np.min(y_2W_te) - 5, np.max(y_2W_te) + 5])
plt.axline(xy1 = [0, 0], slope = 1, c = "black")
```
## Lasso
Define lasso model
```
lasso = linear_model.Lasso(fit_intercept = True, max_iter = 6000)
```
Define cross-validation grid search and learn lasso with cross-validation.
```
alphas = np.logspace(-4., 1., num = 20)
model_l1 = model_selection.GridSearchCV(lasso, param_grid = {'alpha': alphas},
scoring = 'explained_variance')
model_l1.fit(X_2W_tr, y_2W_tr)
plt.figure(figsize = (6, 4))
plt.scatter(range(p), # x = SNP position
model_l1.best_estimator_.coef_) # y = regression weights
plt.xlabel("SNP")
plt.ylabel("lasso regression weight")
plt.xlim([0, p])
```
__Q: How can you interpret these results? How many SNPs contribute to explaining the phenotype?__
```
print("%d SNPs selected" % \
np.nonzero(model_l1.best_estimator_.coef_)[0].shape)
```
__A:__ Only 167 SNPs have been estimated to have non-zero weights. This means that our model selects 167 explanatory SNPs (out of 9419!)
__Q: How many of theses SNPS belong to the list of *candidate SNPs*? Complete the two missing lines two to compute the number of genes hit that belong to candidate genes and the number of snps that these genes correpond to.__
```
# Answer:
candidate_genes_hit = set([])
num_snps_in_candidate_genes = 0
for snp_idx in np.nonzero(model_l1.best_estimator_.coef_)[0]:
for gene_id in genes_by_snp[snp_names[snp_idx]]:
if gene_id in candidate_genes:
candidate_genes_hit.add(gene_id)
num_snps_in_candidate_genes += 1
print("Of which %d are in %d candidate genes" % (num_snps_in_candidate_genes,
len(candidate_genes_hit)))
```
__A:__ Out of the 167 selected SNPs, 176 SNPs (counted with multiplicity) are "close" to candidate genes.
### Predictive power
```
y_2W_l1_pred = model_l1.best_estimator_.predict(X_2W_te)
print("Percentage of variance explained (using %d SNPs): %.2f" % \
(np.nonzero(model_l1.best_estimator_.coef_)[0].shape[0],
metrics.explained_variance_score(y_2W_te, y_2W_l1_pred)))
```
__Q: How does the lasso compare with the OLS (linear regression) in terms of variance explained? What is the advantage of the lasso model for generating biological hypotheses?__
__A:__ Compared to unpenalized linear regression (also called OLS), the lasso has a similar variance explained, while using way fewer SNPs. So it is a better model: we need fewer mutations to explain the phenotype.
Comparing true and predicted phenotypes
```
plt.figure(figsize = (5, 5))
plt.scatter(y_2W_te, y_2W_l1_pred)
plt.xlabel("true phenotype")
plt.ylabel("prediction")
plt.xlim([np.min(y_2W_te) - 0.05, np.max(y_2W_te) + 0.05])
plt.ylim([np.min(y_2W_te) - 0.05, np.max(y_2W_te) + 0.05])
plt.axline(xy1 = [0, 0], slope = 1, c = "black")
```
| github_jupyter |
## Predict Reporter Sequences (Data-Validation)
I will use the gbm to predict the reporter sequences.
```
import numpy as np
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.pipeline import Pipeline
# my module imports
from optimalcodon.projects.rnastability.dataprocessing import get_data, general_preprocesing_pipeline
reporters = (
pd.read_csv("../../19-03-13-PredictReportersWithModel/reporters.csv")
.rename(columns={'sequence': 'coding'})
.assign(
gene_id = lambda x: x.reporter_id + '|' + x.optimality,
utrlenlog = np.nan,
cdslenlog = lambda x: np.log(x.coding.str.len()),
key = 'k' # tmp var
)
.drop(['reporter_id', 'optimality', 'description'], axis=1)
)
reporters.head()
(train_x, train_y), (test_x, test_y) = get_data("../19-04-30-EDA/results_data/")
dtypefeaturs = (
test_x[['specie', 'cell_type', 'datatype']]
.drop_duplicates()
.reset_index()
.drop('gene_id', axis=1)
.assign(key = 'k')
)
reporters = (
pd.merge(reporters, dtypefeaturs, on='key')
.drop('key', axis=1)
.set_index('gene_id')
)
reporters.head()
```
## Data Pre-processing
```
print("{} points for training and {} for testing with {} features".format(
train_x.shape[0], test_x.shape[0], test_x.shape[1]))
# pre-processing
preprocessing = general_preprocesing_pipeline(train_x)
preprocessing.fit(train_x)
train_x_transformed = preprocessing.transform(train_x)
train_x_transformed.shape
```
## Fit Algorithm
```
## fit gbm parameters found in grid search
gbm = GradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None,
learning_rate=0.01, loss='huber', max_depth=10,
max_features='log2', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=3, min_samples_split=8,
min_weight_fraction_leaf=0.0, n_estimators=2000,
n_iter_no_change=None, presort='auto', random_state=None,
subsample=1.0, tol=0.0001, validation_fraction=0.1, verbose=0,
warm_start=False)
gbm.fit(train_x_transformed, train_y)
reporters['predicted'] = gbm.predict(preprocessing.transform(reporters))
test_x['predicted'] = gbm.predict(preprocessing.transform(test_x))
reporters['observed'] = np.nan
test_x['observed'] = test_y
reporters
(
reporters
.append(test_x)
[['cell_type', 'datatype', 'specie', 'observed', 'predicted']]
.to_csv('results_data/reporters_pr')
)
```
| github_jupyter |
```
import numpy as np
from astropy.io import fits
import os.path
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
%matplotlib inline
# eventlist = "/Users/abigailstevens/Reduced_data/GX339-BQPO/95409-01-17-06/eventlist_1.fits"
eventlist = "/Users/abigailstevens/Dropbox/Academic/Conferences\ \&\ Talks/DC_talks/cygx1_counts.lc"
time_binning = .02 # seconds
lc_length = 2 # seconds
show_fits_info = False
pcu = 2 # -1 = all
channel = -1 # -1 = all
detchans = 64
if not os.path.isfile(eventlist):
raise Exception("ERROR: Event list does not exist: %s" % eventlist)
assert pcu <= 4, "PCU must be between 0 and 4 inclusive, or -1 for all PCUs."
try:
fits_hdu = fits.open(eventlist)
except IOError:
print "Issue opening fits file event list: %s" % eventlist
header = fits_hdu[0].header
data = fits_hdu[1].data
fits_hdu.close()
if show_fits_info:
print header.keys
print "\n", data.columns.names
if pcu != -1:
PCU2_mask = data.field('PCUID') != pcu
data = data[PCU2_mask]
# if channel != -1:
# channel_mask = data.field('CHANNEL') == channel
# data = data[channel_mask]
# print len(data)
absolute_start_time = data.field('TIME')[0]
time = np.asarray(data.field('TIME')) - absolute_start_time
chan = np.asarray(data.field('CHANNEL'))
# start_time = time[0]
start_time = 10
end_time = time[-1]
if lc_length > end_time:
print "Requested lightcurve length is longer than data. Making light curve of whole data set."
lc_length = end_time
time_edges = np.arange(start_time, start_time+lc_length+time_binning, time_binning)
chan_edges = np.arange(0,detchans+1, 1)
lightcurve, t_edges, c_edges = np.histogram2d(time, chan, bins=[time_edges, chan_edges])
t_bin_centers = 0.5 * (t_edges[1:]+t_edges[:-1])
mean_count_rate = np.sum(lightcurve, axis=0) / lc_length
if channel == -1:
lc = np.sum(lightcurve[:,2:26], axis=1)
title = "Reference band"
title = ""
mean_rate = np.sum(mean_count_rate[2:26])
else:
lc = lightcurve[:,channel]
title = "Channel %d" % channel
mean_rate = mean_count_rate[channel]
print "Mean count rate:", mean_rate
font_prop = font_manager.FontProperties(size=18)
fig, ax = plt.subplots(1, 1, figsize=(16, 6), dpi=300)
ax.plot(t_bin_centers-start_time, lc, lw=3, color='purple')
ax.set_title(title)
ax.set_xlabel("Arbitrary time (s)", fontproperties=font_prop)
ax.set_ylabel("Photon counts", fontproperties=font_prop)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.set_ylim(5, 23)
# plt.show()
plt.savefig("/Users/abigailstevens/Dropbox/Academic/95409-01-17-06_ref_lc.eps")
plt.close()
```
| github_jupyter |
### 6.a. Solving for ridge regression
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
y = 10.0
lamb = 24.5
FX = []
BETA = []
steps = np.linspace(-100, 100, 20000)
for beta in steps:
fx = (y - beta)**2 + (lamb*(beta**2))
FX.append(fx)
BETA.append(beta)
plt.xkcd()
plt.figure(figsize=(11, 5))
plt.plot(BETA, FX, color = 'green')
df = pd.DataFrame([BETA, FX]).T
df.columns = ['BETA', 'FX']
df.head()
mindf = min(df['FX'])
mindf
df.loc[df['FX'] == mindf]
```
*So, the corresponding BETA value for the least value (=96.120047) for FX is 0.351759. I will now input the values for
$y_j$ and $\lambda$ to find the estimate and see if they are equivalent.*
```
beta_est = y / (1 + lamb)
beta_est
```
**Therefore, we can see that the estimated $\beta$ is a good approximation for the beta that gives the least value of fx.**
### 6.b. Solving for lasso regression
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
```
#### Case 1: $y_j$ > $\lambda$/2
```
y = 10.0
lamb = 5.5
FX = []
BETA = []
steps = np.linspace(-100, 100, 20000)
for beta in steps:
fx = (y - beta)**2 + (lamb*np.abs(beta))
FX.append(fx)
BETA.append(beta)
plt.xkcd()
plt.figure(figsize=(11, 5))
plt.plot(BETA, FX, color = 'green')
df = pd.DataFrame([BETA, FX]).T
df.columns = ['BETA', 'FX']
df.head()
mindf = min(df['FX'])
mindf
df.loc[df['FX'] == mindf]
```
*So, the corresponding BETA value for the least value (=47.437522) for FX is 7.245362. I will now input the values for
$y_j$ and $\lambda$ to find the estimate and see if they are equivalent.*
```
beta_est = y - (lamb / 2)
beta_est
```
**Therefore, we can see that the estimated $\beta$ is a good approximation for the beta that gives the least value of fx.**
#### Case 2: $y_j$ < -$\lambda$/2
```
y = -100.0
lamb = 25
FX = []
BETA = []
steps = np.linspace(-100, 100, 20000)
for beta in steps:
fx = (y - beta)**2 + (lamb*np.abs(beta))
FX.append(fx)
BETA.append(beta)
plt.xkcd()
plt.figure(figsize=(11, 5))
plt.plot(BETA, FX, color = 'green')
df = pd.DataFrame([BETA, FX]).T
df.columns = ['BETA', 'FX']
df.head()
mindf = min(df['FX'])
mindf
df.loc[df['FX'] == mindf]
```
*So, the corresponding BETA value for the least value (=2343.75) for FX is -87.499375. I will now input the values for
$y_j$ and $\lambda$ to find the estimate and see if they are equivalent.*
```
beta_est = y + (lamb / 2)
beta_est
```
**Therefore, we can see that the estimated $\beta$ is a good approximation for the beta that gives the least value of fx.**
#### Case 3: |$y_j$| $\leq$ $\lambda$/2
```
y = -10.0
lamb = 25
FX = []
BETA = []
steps = np.linspace(-100, 100, 20000)
for beta in steps:
fx = (y - beta)**2 + (lamb*np.abs(beta))
FX.append(fx)
BETA.append(beta)
plt.xkcd()
plt.figure(figsize=(11, 5))
plt.plot(BETA, FX, color = 'green')
df = pd.DataFrame([BETA, FX]).T
df.columns = ['BETA', 'FX']
df.head()
mindf = min(df['FX'])
mindf
df.loc[df['FX'] == mindf]
```
**So, the corresponding BETA value for the lowest fx(=100.025026) is $\approx$0. Therefore, 0 is a good approximation of the BETA that gives the least value of fx.**
| github_jupyter |
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/3f/HubSpot_Logo.svg/220px-HubSpot_Logo.svg.png" alt="drawing" width="200" align='left'/>
# Hubspot - Update linkedinbio from google
<a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Hubspot/Hubspot_update_linkedinbio_from_google.ipynb" target="_parent"><img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg=="/></a>
#hubspot #crm #sales #google
## Input
### Import library
```
from naas_drivers import hubspot
import naas
import pandas as pd
from googlesearch import search
import time
import re
```
### Enter Hubspot api key
```
auth_token = "YOUR_HUBSPOT_API_KEY"
```
### Connect to hubspot
```
hs = hubspot.connect(auth_token)
```
### Schedule your notebook everyday
```
naas.scheduler.add(cron="0 6 * * *")
```
### Get all contacts in Hubspot
```
properties_list = [
"hs_object_id",
"firstname",
"lastname",
"linkedinbio",
]
hubspot_contacts = hs.contacts.get_all(properties_list).fillna("Not Defined")
hubspot_contacts
```
# Model
### Filter to get linkedinbio "Not Defined" and "firstname" and "lastname" defined
```
df_to_update = hubspot_contacts.copy()
# Filter on "Not defined"
df_to_update = df_to_update[(df_to_update.firstname != "Not Defined") &
(df_to_update.lastname != "Not Defined") &
(df_to_update.linkedinbio == "Not Defined")].reset_index(drop=True)
df_to_update
```
### Search bio in Google with firstname and lastname
```
def get_bio(firstname, lastname):
# Init linkedinbio
linkedinbio = None
# Create query
query = f"{firstname}+{lastname}+Linkedin"
print("Google query: ", query)
# Search in Google
for i in search(query, tld="com", num=10, stop=10, pause=2):
pattern = "https:\/\/.+.linkedin.com\/in\/.([^?])+"
result = re.search(pattern, i)
# Return value if result is not None
if result != None:
linkedinbio = result.group(0).replace(" ", "")
return linkedinbio
else:
time.sleep(2)
return linkedinbio
for _, row in df_to_update.iterrows():
firstname = row.firstname
lastname = row.lastname
# Get linkedinbio
linkedinbio = get_bio(firstname, lastname)
df_to_update.loc[_, "linkedinbio"] = linkedinbio
df_to_update
```
# Output
### Update linkedinbio in Hubspot
```
for _, row in df_to_update.iterrows():
# Init data
data = {}
# Get data
hs_object_id = row.hs_object_id
linkedinbio = row.linkedinbio
# Update LK Bio
if linkedinbio != None:
data = {"properties": {"linkedinbio": linkedinbio}}
hs.contacts.patch(hs_object_id, data)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_08_3_keras_hyperparameters.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 8: Kaggle Data Sets**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 8 Material
* Part 8.1: Introduction to Kaggle [[Video]](https://www.youtube.com/watch?v=v4lJBhdCuCU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_1_kaggle_intro.ipynb)
* Part 8.2: Building Ensembles with Scikit-Learn and Keras [[Video]](https://www.youtube.com/watch?v=LQ-9ZRBLasw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_2_keras_ensembles.ipynb)
* **Part 8.3: How Should you Architect Your Keras Neural Network: Hyperparameters** [[Video]](https://www.youtube.com/watch?v=1q9klwSoUQw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_3_keras_hyperparameters.ipynb)
* Part 8.4: Bayesian Hyperparameter Optimization for Keras [[Video]](https://www.youtube.com/watch?v=sXdxyUCCm8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb)
* Part 8.5: Current Semester's Kaggle [[Video]](https://www.youtube.com/watch?v=PHQt0aUasRg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_5_kaggle_project.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
# Startup CoLab
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
```
# Part 8.3: How Should you Architect Your Keras Neural Network: Hyperparameters
You have probably noticed several hyperparameters introduced previously in this course that you need to choose for your neural network. The number of layers, neuron counts per layers, layer types, and activation functions are all choices you must make to optimize your neural network. Some of the categories of hyperparameters for you to choose from come from the following list:
* Number of Hidden Layers and Neuron Counts
* Activation Functions
* Advanced Activation Functions
* Regularization: L1, L2, Dropout
* Batch Normalization
* Training Parameters
The following sections will introduce each of these categories for Keras. While I will provide you with some general guidelines for hyperparameter selection; no two tasks are the same. You will benefit from experimentation with these values to determine what works best for your neural network. In the next part, we will see how machine learning can select some of these values on its own.
### Number of Hidden Layers and Neuron Counts
The structure of Keras layers is perhaps the hyperparameters that most become aware of first. How many layers should you have? How many neurons on each layer? What activation function and layer type should you use? These are all questions that come up when designing a neural network. There are many different [types of layer](https://keras.io/layers/core/) in Keras, listed here:
* **Activation** - You can also add activation functions as layers. Making use of the activation layer is the same as specifying the activation function as part of a Dense (or other) layer type.
* **ActivityRegularization** Used to add L1/L2 regularization outside of a layer. You can specify L1 and L2 as part of a Dense (or other) layer type.
* **Dense** - The original neural network layer type. In this layer type, every neuron connects to the next layer. The input vector is one-dimensional, and placing specific inputs next to each other does not affect.
* **Dropout** - Dropout consists of randomly setting a fraction rate of input units to 0 at each update during training time, which helps prevent overfitting. Dropout only occurs during training.
* **Flatten** - Flattens the input to 1D and does not affect the batch size.
* **Input** - A Keras tensor is a tensor object from the underlying back end (Theano, TensorFlow, or CNTK), which we augment with specific attributes to build a Keras model just by knowing the inputs and outputs of the model.
* **Lambda** - Wraps arbitrary expression as a Layer object.
* **Masking** - Masks a sequence by using a mask value to skip timesteps.
* **Permute** - Permutes the dimensions of the input according to a given pattern. Useful for tasks such as connecting RNNs and convolutional networks.
* **RepeatVector** - Repeats the input n times.
* **Reshape** - Similar to Numpy reshapes.
* **SpatialDropout1D** - This version performs the same function as Dropout; however, it drops entire 1D feature maps instead of individual elements.
* **SpatialDropout2D** - This version performs the same function as Dropout; however, it drops entire 2D feature maps instead of individual elements
* **SpatialDropout3D** - This version performs the same function as Dropout; however, it drops entire 3D feature maps instead of individual elements.
There is always trial and error for choosing a good number of neurons and hidden layers. Generally, the number of neurons on each layer will be larger closer to the hidden layer and smaller towards the output layer. This configuration gives the neural network a somewhat triangular or trapezoid appearance.
### Activation Functions
Activation functions are a choice that you must make for each layer. Generally, you can follow this guideline:
* Hidden Layers - RELU
* Output Layer - Softmax for classification, linear for regression.
Some of the common activation functions in Keras are listed here:
* **softmax** - Used for multi-class classification. Ensures all output neurons behave as probabilities and sum to 1.0.
* **elu** - Exponential linear unit. Exponential Linear Unit or its widely known name ELU is a function that tend to converge cost to zero faster and produce more accurate results. Can produce negative outputs.
* **selu** - Scaled Exponential Linear Unit (SELU), essentially **elu** multiplied by a scaling constant.
* **softplus** - Softplus activation function. $log(exp(x) + 1)$ [Introduced](https://papers.nips.cc/paper/1920-incorporating-second-order-functional-knowledge-for-better-option-pricing.pdf) in 2001.
* **softsign** Softsign activation function. $x / (abs(x) + 1)$ Similar to tanh, but not widely used.
* **relu** - Very popular neural network activation function. Used for hidden layers, cannot output negative values. No trainable parameters.
* **tanh** Classic neural network activation function, though often replaced by relu family on modern networks.
* **sigmoid** - Classic neural network activation. Often used on output layer of a binary classifier.
* **hard_sigmoid** - Less computationally expensive variant of sigmoid.
* **exponential** - Exponential (base e) activation function.
* **linear** - Pass through activation function. Usually used on the output layer of a regression neural network.
For more information about Keras activation functions refer to the following:
* [Keras Activation Functions](https://keras.io/activations/)
* [Activation Function Cheat Sheets](https://ml-cheatsheet.readthedocs.io/en/latest/activation_functions.html)
### Advanced Activation Functions
Hyperparameters are not changed when the neural network trains. You, the network designer, must define the hyperparameters. The neural network learns regular parameters during neural network training. Neural network weights are the most common type of regular parameter. The "[advanced activation functions](https://keras.io/layers/advanced-activations/)," as Keras call them, also contain parameters that the network will learn during training. These activation functions may give you better performance than RELU.
* **LeakyReLU** - Leaky version of a Rectified Linear Unit. It allows a small gradient when the unit is not active, controlled by alpha hyperparameter.
* **PReLU** - Parametric Rectified Linear Unit, learns the alpha hyperparameter.
### Regularization: L1, L2, Dropout
* [Keras Regularization](https://keras.io/regularizers/)
* [Keras Dropout](https://keras.io/layers/core/)
### Batch Normalization
* [Keras Batch Normalization](https://keras.io/layers/normalization/)
* Ioffe, S., & Szegedy, C. (2015). [Batch normalization: Accelerating deep network training by reducing internal covariate shift](https://arxiv.org/abs/1502.03167). *arXiv preprint arXiv:1502.03167*.
Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. Can allow learning rate to be larger.
### Training Parameters
* [Keras Optimizers](https://keras.io/optimizers/)
* **Batch Size** - Usually small, such as 32 or so.
* **Learning Rate** - Usually small, 1e-3 or so.
### Experimenting with Hyperparameters
```
import pandas as pd
from scipy.stats import zscore
# Read the data set
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/jh-simple-dataset.csv",
na_values=['NA','?'])
# Generate dummies for job
df = pd.concat([df,pd.get_dummies(df['job'],prefix="job")],axis=1)
df.drop('job', axis=1, inplace=True)
# Generate dummies for area
df = pd.concat([df,pd.get_dummies(df['area'],prefix="area")],axis=1)
df.drop('area', axis=1, inplace=True)
# Missing values for income
med = df['income'].median()
df['income'] = df['income'].fillna(med)
# Standardize ranges
df['income'] = zscore(df['income'])
df['aspect'] = zscore(df['aspect'])
df['save_rate'] = zscore(df['save_rate'])
df['age'] = zscore(df['age'])
df['subscriptions'] = zscore(df['subscriptions'])
# Convert to numpy - Classification
x_columns = df.columns.drop('product').drop('id')
x = df[x_columns].values
dummies = pd.get_dummies(df['product']) # Classification
products = dummies.columns
y = dummies.values
import pandas as pd
import os
import numpy as np
import time
import tensorflow.keras.initializers
import statistics
import tensorflow.keras
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.model_selection import StratifiedShuffleSplit
from tensorflow.keras.layers import LeakyReLU,PReLU
from tensorflow.keras.optimizers import Adam
def evaluate_network(dropout,lr,neuronPct,neuronShrink):
SPLITS = 2
# Bootstrap
boot = StratifiedShuffleSplit(n_splits=SPLITS, test_size=0.1)
# Track progress
mean_benchmark = []
epochs_needed = []
num = 0
neuronCount = int(neuronPct * 5000)
# Loop through samples
for train, test in boot.split(x,df['product']):
start_time = time.time()
num+=1
# Split train and test
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
# Construct neural network
# kernel_initializer = tensorflow.keras.initializers.he_uniform(seed=None)
model = Sequential()
layer = 0
while neuronCount>25 and layer<10:
#print(neuronCount)
if layer==0:
model.add(Dense(neuronCount,
input_dim=x.shape[1],
activation=PReLU()))
else:
model.add(Dense(neuronCount, activation=PReLU()))
model.add(Dropout(dropout))
neuronCount = neuronCount * neuronShrink
model.add(Dense(y.shape[1],activation='softmax')) # Output
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=lr))
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3,
patience=100, verbose=0, mode='auto', restore_best_weights=True)
# Train on the bootstrap sample
model.fit(x_train,y_train,validation_data=(x_test,y_test),
callbacks=[monitor],verbose=0,epochs=1000)
epochs = monitor.stopped_epoch
epochs_needed.append(epochs)
# Predict on the out of boot (validation)
pred = model.predict(x_test)
# Measure this bootstrap's log loss
y_compare = np.argmax(y_test,axis=1) # For log loss calculation
score = metrics.log_loss(y_compare, pred)
mean_benchmark.append(score)
m1 = statistics.mean(mean_benchmark)
m2 = statistics.mean(epochs_needed)
mdev = statistics.pstdev(mean_benchmark)
# Record this iteration
time_took = time.time() - start_time
tensorflow.keras.backend.clear_session()
return (-m1)
print(evaluate_network(
dropout=0.2,
lr=1e-3,
neuronPct=0.2,
neuronShrink=0.2))
```
| github_jupyter |
```
%matplotlib inline
```
# Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
```
# Authors: Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Chris Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
from mne.viz import centers_to_edges
```
## Simulate data
We'll simulate data with a known spectro-temporal structure.
```
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = 1024 # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
epochs.average().plot()
```
## Calculate a time-frequency representation (TFR)
Below we'll demonstrate the output of several TFR functions in MNE:
* `mne.time_frequency.tfr_multitaper`
* `mne.time_frequency.tfr_stockwell`
* `mne.time_frequency.tfr_morlet`
### Multitaper transform
First we'll use the multitaper method for calculating the TFR.
This creates several orthogonal tapering windows in the TFR estimation,
which reduces variance. We'll also show some of the parameters that can be
tweaked (e.g., ``time_bandwidth``) that will result in different multitaper
properties, and thus a different TFR. You can trade time resolution or
frequency resolution or both in order to get a reduction in variance.
```
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
```
# **( Least smoothing (most variance/background fluctuations)**
```
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
```
**(2) Less frequency smoothing, more time smoothing.**
```
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
```
**(3) Less time smoothing, more frequency smoothing.**
```
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
```
### Stockwell (S) transform
Stockwell uses a Gaussian window to balance temporal and spectral resolution.
Importantly, frequency bands are phase-normalized, hence strictly comparable
with regard to timing, and, the input signal can be recoverd from the
transform in a lossless way if we disregard numerical errors. In this case,
we control the spectral / temporal resolution by specifying different widths
of the gaussian window using the ``width`` parameter.
```
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
```
### Morlet Wavelets
Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
with a gaussian envelope. We can control the balance between spectral and
temporal resolution with the ``n_cycles`` parameter, which defines the
number of cycles to include in the window.
```
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
```
## Calculating a TFR without averaging over epochs
It is also possible to calculate a TFR without averaging across trials.
We can do this by using ``average=False``. In this case, an instance of
:class:`mne.time_frequency.EpochsTFR` is returned.
```
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
```
## Operating on arrays
MNE also has versions of the functions above which operate on numpy arrays
instead of MNE objects. They expect inputs of the shape
``(n_epochs, n_channels, n_times)``. They will also return a numpy array
of shape ``(n_epochs, n_channels, n_freqs, n_times)``.
```
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
freqs=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
x, y = centers_to_edges(epochs.times * 1000, freqs)
mesh = ax.pcolormesh(x, y, power[0], cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
```
| github_jupyter |
```
!python --version
!pip install statsmodels
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import statsmodels
import statsmodels.api as sm
print("Pandas==",pd.__version__, sep='')
print("Numpy==", np.__version__, sep='')
print("matplotlib==", matplotlib.__version__, sep='')
print("sklearn==", np.__version__, sep='')
print("statsmodels==", np.__version__, sep='')
```
## Read Data
Da es noch ein paar Schwierigkeiten mit dem Import der preprocessor files gab, werden die jeweiligen Funktionen in
diesem Notebook definiert und ausgeführt.
```
# read data power plants
data_power_plant_a = pd.read_csv("../data/data_power_plants/A.csv")
data_power_plant_b = pd.read_csv("../data/data_power_plants/B.csv")
data_power_plant_c = pd.read_csv("../data/data_power_plants/C.csv")
data_weather = pd.read_csv("../data/data_weather/weather_aargau_2019.csv")
def _format_columns(df):
columns_lower = df.columns.str.lower()
columns_clean = columns_lower.str.replace("-", "")
df.columns = columns_clean
return df
def _set_datetime_index(df):
""" create datetime index based on local_time,
and resampled mean per hour"""
df["timestamp"] = pd.to_datetime(df["timestamp"])
df.set_index(df["timestamp"], inplace=True)
df = df.resample("h").mean()
return df
data_power_plant_a = _format_columns(data_power_plant_a)
data_power_plant_b = _format_columns(data_power_plant_b)
data_power_plant_c = _format_columns(data_power_plant_c)
data_power_plant_a = _set_datetime_index(data_power_plant_a)
data_power_plant_b = _set_datetime_index(data_power_plant_b)
data_power_plant_c = _set_datetime_index(data_power_plant_c)
```
## Data Visualisation
Eine einfache Visualisierung der Daten für einen ersten Überblick.
--> Erkenntnisse:
- Power Plant C hat weniger Daten (Spalten) als die anderen zwei
- Alle Power Plans unterscheiden sich stark, was ihre Energieproduktion angeht
```
data_power_plant_a
data_power_plant_b
data_power_plant_c
plt.plot(data_power_plant_a)
plt.title("Power Plant A")
plt.legend(data_power_plant_a.columns)
plt.show()
plt.plot(data_power_plant_b)
plt.title("Power Plant B")
plt.legend(data_power_plant_b.columns)
plt.show()
plt.plot(data_power_plant_c)
plt.title("Power Plant C")
plt.legend(data_power_plant_c.columns)
plt.show()
plt.hist(data_power_plant_a["grid_feedin_kw"]
, bins = 30
, range = (0.0001, max(data_power_plant_a["grid_feedin_kw"]))
)
plt.title("Power Plant A (>0)")
plt.show()
plt.hist(data_power_plant_b["grid_feedin_kw"]
, bins = 30
, range = (0.0001, max(data_power_plant_b["grid_feedin_kw"]))
)
plt.title("Power Plant B (>0)")
plt.show()
plt.hist(data_power_plant_c["grid_feedin_kw"]
, bins = 30
, range = (0.0001, max(data_power_plant_c["grid_feedin_kw"]))
)
plt.title("Power Plant C (>0)")
plt.show()
```
## Pre Process Weather Data + Visualization
Findings:
- Wetterdaten fehlen für eine Stunde --> interpoliert
```
def weather_format_columns(df):
columns_lower = df.columns.str.lower()
columns_clean = columns_lower.str.replace("-", "")
df.columns = columns_clean
return df
def weather_set_datetime_index(df):
""" create datetime index based on local_time,
and resampled mean per hour"""
df["timestamp"] = pd.to_datetime(df["local_time"])
df.set_index(df["timestamp"], inplace=True)
df = df.resample("h").mean()
return df
data_weather = weather_format_columns(data_weather)
data_weather = weather_set_datetime_index(data_weather)
data_weather
```
Find Rows with missing Data
```
data_weather[data_weather.isnull().any(axis=1)]
```
Look at the data of this day (Raw output) --> Only temperature has a value, which would not be correct, if we simply
take the average of the time before and after the missing values
```
data_weather.loc['2019-03-31']
```
Interpolate missing data
```
data_weather_clean = data_weather.interpolate()
plt.plot(data_weather_clean["temperature"])
plt.title("temperature")
plt.show()
plt.plot(data_weather_clean["precipitation"])
plt.title("precipitation")
plt.show()
plt.plot(data_weather_clean["snowfall"])
plt.title("snowfall")
plt.show()
plt.plot(data_weather_clean["snow_mass"])
plt.title("snow_mass")
plt.show()
plt.plot(data_weather_clean["air_density"])
plt.title("air_density")
plt.show()
plt.plot(data_weather_clean["radiation_surface"])
plt.title("radiation_surface")
plt.show()
plt.plot(data_weather_clean["radiation_toa"])
plt.title("radiation_toa")
plt.show()
plt.plot(data_weather_clean["cloud_cover"])
plt.title("cloud_cover")
plt.show()
data_weather_enhanced = data_weather_clean.copy()
data_weather_enhanced['month'] = data_weather_enhanced.index.month
data_weather_enhanced['day'] = data_weather_enhanced.index.day
data_weather_enhanced['hour'] = data_weather_enhanced.index.hour
```
## Prepare Data for a simple Linear Regression
To Do:
- Daten der Power Plants mit den Wetterdaten verbinden
- Daten pro Power Plant in Test und Validation Set splitten
- Simples Regressionsmodell erstellen
- Regressionsmodell testen
- Sobald das Regressionsmodell genügt -> Output predicten und neues DF generieren (actual output, Works properly)
#### Merge Power Plant and Weather Data
```
# Inner Join weil es keine Wetterdaten für 1.1.2019 00:00 Uhr gibt
df_a = data_power_plant_a.copy().join(data_weather_enhanced, how='inner')
df_b = data_power_plant_b.copy().join(data_weather_enhanced, how='inner')
df_c = data_power_plant_c.copy().join(data_weather_enhanced, how='inner')
```
#### split data
```
df_a_train, df_a_test = train_test_split(df_a, test_size=0.2, random_state=123)
df_b_train, df_b_test = train_test_split(df_b, test_size=0.2, random_state=123)
df_c_train, df_c_test = train_test_split(df_c, test_size=0.2, random_state=123)
print(len(df_a))
print(len(df_a_train))
print(len(df_a_test))
print('-'*20)
print(len(df_b))
print(len(df_b_train))
print(len(df_b_test))
print('-'*20)
print(len(df_c))
print(len(df_c_train))
print(len(df_c_test))
```
#### Create Linear Regression Model
Create a simple Regression Model with all possible input data
```
X_train = df_a_train[["temperature", "precipitation", "snowfall", "snow_mass", "air_density", "radiation_surface", "radiation_toa", "cloud_cover", "month", "day", "hour"]]
y_train = df_a_train["grid_feedin_kw"]
X_test = df_a_test[["temperature", "precipitation", "snowfall", "snow_mass", "air_density", "radiation_surface", "radiation_toa", "cloud_cover", "month", "day", "hour"]]
y_test = df_a_test["grid_feedin_kw"]
regressor_OLS=sm.OLS(endog = y_train, exog = X_train).fit()
regressor_OLS.summary()
print("The model degree of freedom: ",regressor_OLS.df_model)
print("The residual degree of freedom: ", sum(regressor_OLS.resid)/len(regressor_OLS.resid))
print("-"*20, "\n")
y_predict_ols = regressor_OLS.predict(X_test)
print("Average predicted grid_feedin_kw: ", sum(y_predict_ols)/ len(y_predict_ols))
print("Average actual grid_feedin_kw: ", sum(y_test)/ len(y_test))
print("Difference in prediction: ", (sum(y_predict_ols)/ len(y_predict_ols)) / (sum(y_test)/ len(y_test)))
```
Make a Prediction for the whole dataset and create a new pandas dataframe with only the acutal value and the predicted value
```
df_predict_actual = pd.DataFrame(df_a.copy()["grid_feedin_kw"])
df_predict_actual["prediction"] = regressor_OLS.predict(df_a[["temperature", "precipitation", "snowfall", "snow_mass", "air_density", "radiation_surface", "radiation_toa", "cloud_cover", "month", "day", "hour"]])
```
Make all prediction below 0 to 0 (as there can't be any value below 0)
```
df_predict_actual["prediction"] = df_predict_actual["prediction"].apply(lambda x: 0 if x <= 0 else x)
```
Plot the data
```
plt.plot(df_predict_actual["grid_feedin_kw"])
plt.title("Actual produced energy")
plt.show()
plt.plot(df_predict_actual["prediction"])
plt.title("Prediction energy production")
plt.show()
plt.plot(df_predict_actual)
plt.legend(df_predict_actual.columns)
plt.title("Actual vs. Produced")
plt.show()
plt.plot( df_predict_actual["grid_feedin_kw"]- df_predict_actual["prediction"])
plt.title("Absolute distance)")
plt.show()
plt.hist(df_predict_actual["grid_feedin_kw"]- df_predict_actual["prediction"])
plt.title("Absolute distance)")
plt.show()
# relativer Unterschied (lambda, weil division durch 0 nicht geht)
plt.plot(
df_predict_actual["prediction"].apply(lambda x: x+1)
/ df_predict_actual["grid_feedin_kw"].apply(lambda x: x+1)
)
plt.title("Relative distance")
plt.show()
plt.hist(
df_predict_actual["prediction"].apply(lambda x: x+1)
/ df_predict_actual["grid_feedin_kw"].apply(lambda x: x+1)
)
plt.title("Relative distance")
plt.show()
plt.hist(
(df_predict_actual["grid_feedin_kw"]- df_predict_actual["prediction"]).abs()
/ df_predict_actual["grid_feedin_kw"].apply(lambda x: x+1)
, bins = 27
)
plt.title("Absolute Difference relativly to the actual produced power")
plt.show()
plt.hist(
(df_predict_actual["grid_feedin_kw"]- df_predict_actual["prediction"])
/ df_predict_actual["prediction"].apply(lambda x: x+1)
)
plt.title("Relative distance (relative - actual)")
plt.show()
plt.hist(
df_predict_actual["prediction"].loc[df_predict_actual["grid_feedin_kw"] == 0]
)
plt.title("How much energy should be produced, when it actualy produces none")
plt.show()
plt.hist(
df_predict_actual["grid_feedin_kw"].loc[df_predict_actual["prediction"] == 0]
)
plt.title("How much energy is produced, when it actualy is predicted to be none")
plt.show()
```
Bei den obrigen Grafiken ist ersichtlich, dass es schwierig ist ein Verhältnis zu finden, bei dem der produzierte Wert stark vom prognostizierten abweicht
und so ein Fehler vorliegen muss. Aber es zeigt sich, dass es vorkommt, dass die PV nichts produziert, obwohl es laut prediction etwas produzieren sollte.
Umgekehrt wird nichts produziert, wenn die Prognose auch sagt, dass nichts produziert werden sollte.
-> Daher werden die Werte als not working gelabeled, bei denen nichts produziert wird, obwohl das MOdell sagt, dass etwas produziert werden sollte.
## New markdown cell
```
#df_predict_actual.drop('label_machine_working', axis=1, inplace=True)
df_predict_actual['label_machine_working'] = df_predict_actual["prediction"].loc[df_predict_actual["grid_feedin_kw"] == 0].apply(lambda x: x <= 1)
df_predict_actual['label_machine_working'].fillna(value=True, inplace=True)
df_predict_actual
print("True values: ", len(df_predict_actual.loc[df_predict_actual['label_machine_working'] == True]))
print("False values: ", len(df_predict_actual.loc[df_predict_actual['label_machine_working'] == False]))
print("Ratio: ", len(df_predict_actual.loc[df_predict_actual['label_machine_working'] == False]) / len(df_predict_actual))
plt.plot(df_predict_actual["grid_feedin_kw"]
, label=df_predict_actual["label_machine_working"]
)
```
| github_jupyter |
```
import torch
import catboost
from torch import nn
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from catboost import CatBoostClassifier
import gc
import numpy as np
from tqdm import tqdm
from torch.utils.data import TensorDataset,DataLoader
def preprocessing_Y(file_path):
Y = pd.read_csv(file_path).iloc[:,:]
Y = Y[Y.Filename != 'train_01046']
enc = OneHotEncoder().fit(Y[['Label']])
Y_one_hot = enc.transform(Y[['Label']]).toarray()
Y_one_hot = torch.FloatTensor(Y_one_hot)
print('Y_ont_hot shape',Y_one_hot.shape)
print('Y_df shape',Y.shape)
return Y_one_hot,Y
def load_pretrain_senet(model_path):
model = torch.hub.load(
'moskomule/senet.pytorch',
'se_resnet20',
num_classes=6)
model.conv1 = nn.Conv2d(1, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2 ,3])
model.load_state_dict(torch.load(model_path))
model.eval()
return model
def get_senet_output(senet,data):
return senet(data)
def get_all_senet_output(data):
y1 = senet1(data)
y2 = senet2(data)
y3 = senet3(data)
y4 = senet4(data)
y5 = senet5(data)
y6 = senet6(data)
combined = torch.cat([y1,y2,y3,y4,y5,y6],dim=-1)
return combined.detach().cpu().numpy()
def get_X_numpy(X):
X_train_np = np.array([[]])
data_iter = DataLoader(TensorDataset(torch.FloatTensor(X.to(torch.float32))),batch_size=256)
for bx in tqdm(data_iter):
bx = bx[0]
bx = bx.to('cuda:0')
y_hat = get_all_senet_output(bx)
if len(X_train_np) == 1:
X_train_np = y_hat
else:
X_train_np = np.vstack((X_train_np,y_hat))
return X_train_np
```
# 路徑
```
Y_train_path = 'train/meta_train.csv'
senet1 = 'senet20_2021_0604_0750.pt'
senet2 = 'senet20_2021_0604_1312_randomseed168.pt'
senet3 = 'senet20_2021_0604_1345_randomseed210.pt'
senet4 = 'senet20_wu_add_data_seed84.pt'
senet5 = 'senet20_wu_add_data_seed126.pt'
senet6 = 'senet20_wu_add_data_seed210.pt'
```
# load k 個 senet
```
senet1 = load_pretrain_senet(senet1).to('cuda:0')
senet2 = load_pretrain_senet(senet2).to('cuda:0')
senet3 = load_pretrain_senet(senet3).to('cuda:0')
senet4 = load_pretrain_senet(senet4).to('cuda:0')
senet5 = load_pretrain_senet(senet5).to('cuda:0')
senet6 = load_pretrain_senet(senet6).to('cuda:0')
```
# 訓練 和 測試資料
```
X_train_all = torch.load('X_train_吳啟聖教的方法.pt')
X_test_all = torch.load('X_test_吳啟聖教的方法.pt')
Y_train_all,Y_train_df = preprocessing_Y(Y_train_path)
```
# train k個 catboost model
```
from sklearn.model_selection import train_test_split
i = 0
catboost_model = {}
for seed in tqdm([42,42*2,42*3,42*4,42*5,42*6]):
X_train, X_valid, y_train, y_valid = train_test_split(X_train_all, Y_train_all,
test_size=0.2,
random_state=seed,
stratify=Y_train_all)
X_train_np = get_X_numpy(X_train)
y_train_np = y_train.detach().numpy().argmax(axis=1)
X_valid_np = get_X_numpy(X_valid)
y_valid_np = y_valid.detach().numpy().argmax(axis=1)
model = CatBoostClassifier(iterations=250,verbose=False)
model.fit(X_train_np,y_train_np,use_best_model=True,eval_set=(X_valid_np,y_valid_np))
catboost_model[seed] = model.copy()
print(i,model.score(X_valid_np,y_valid_np))
gc.collect()
i +=1
X_test_np = get_X_numpy(X_test_all)
X_test_np.shape
p1 = catboost_model[42*1].predict_proba(X_test_np)
p2 = catboost_model[42*2].predict_proba(X_test_np)
p3 = catboost_model[42*3].predict_proba(X_test_np)
p4 = catboost_model[42*4].predict_proba(X_test_np)
p5 = catboost_model[42*5].predict_proba(X_test_np)
p6 = catboost_model[42*6].predict_proba(X_test_np)
final_prob = (p1+p2+p3+p4+p5+p6) / 6
final_prob
final_prob.sum(axis=1)
sample_submit = pd.read_csv('sample_submission.csv')
sample_submit.iloc[:10000,1:] = final_prob
sample_submit
# save
sample_submit.to_csv('stacking_model_submit.csv',index=False)
print('done')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/vukhanhlinh/atom-assignments/blob/main/python-for-data/Ex04%20-%20Lists.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Exercises 04 - Lists
## 1. Second Element
Complete the function below according to its docstring.
*HINT*: Python starts counting at 0.
```
List = [1,2,3,4,5,6,7]
def select_second(L):
"""Return the second element of the given list.
If the list has no second element, return None.
"""
if len(L) > 1:
print('The second element of the given list is ', L[1])
else:
return None
select_second(List)
```
## 2. Captain of the Worst Team
You are analyzing sports teams. Members of each team are stored in a list. The **Coach** is the first name in the list, the **Captain** is the second name in the list, and other players are listed after that.
These lists are stored in another list, which starts with the best team and proceeds through the list to the worst team last. Complete the function below to select the **captain** of the worst team.
```
def losing_team_captain(teams):
#Given a list of teams, where each team is a list of names, return the 2nd player (captain) from the last listed team
return teams[-1][1]
teamList = [['Q','W','E'],['A','S','D']]
losing_team_captain(teamList)
```
## 3. Purple Shell item
The next iteration of Mario Kart will feature an extra-infuriating new item, the ***Purple Shell***. When used, it warps the last place racer into first place and the first place racer into last place. Complete the function below to implement the Purple Shell's effect.
```
"""Given a list of racers, set the first place racer (at the front of the list) to last
place and vice versa.
>>> r = ["Mario", "Bowser", "Luigi","a"]
>>> purple_shell(r)
>>> r
["Luigi", "Bowser", "Mario"]
"""
def purple_shell(racers):
racers[0] , racers[2] = racers[2] , racers[1]
print(racers)
r = ["Mario", "Bowser", "Luigi",]
purple_shell(r)
```
## 4. Guess the Length!
What are the lengths of the following lists? Fill in the variable `lengths` with your predictions. (Try to make a prediction for each list *without* just calling `len()` on it.)
```
a = [1, 2, 3]
b = [1, [2, 3]]
c = []
d = [1, 2, 3][1:]
# Put your predictions in the list below. Lengths should contain 4 numbers, the
# first being the length of a, the second being the length of b and so on.
lengths = [3, 2, 0, 2]
```
## 5. Fashionably Late <span title="A bit spicy" style="color: darkgreen ">🌶️</span>
We're using lists to record people who attended our party and what order they arrived in. For example, the following list represents a party with 7 guests, in which Adela showed up first and Ford was the last to arrive:
party_attendees = ['Adela', 'Fleda', 'Owen', 'May', 'Mona', 'Gilbert', 'Ford']
A guest is considered **'fashionably late'** if they arrived after at least half of the party's guests. However, they must not be the very last guest (that's taking it too far). In the above example, Mona and Gilbert are the only guests who were fashionably late.
Complete the function below which takes a list of party attendees as well as a person, and tells us whether that person is fashionably late.
```
"""Given an ordered list of arrivals to the party and a name, return whether the guest with that
name was fashionably late.
"""
def fashionably_late(arrivals, name):
num = int(round((len(arrivals) / 2),1))+1
lateList = []
for i in range (num,len(arrivals)-1):
arrivals[i] = 'Late';
# lateList.append(arrivals[i])
return name not in arrivals
party_attendees =['Adela', 'Fleda', 'Owen', 'May', 'Mona', 'Gilbert', 'Ford']
print(fashionably_late(party_attendees, 'Mona'))
```
# Keep Going 💪
| github_jupyter |
## 1. 决策树学习
DecisionTreeClassifier 特征选择参数 <br>
criterion 可以使用"gini"或者"entropy",前者代表基尼系数,后者代表信息增益。一般说使用默认的基尼系数"gini"就可以了,即CART算法。除非你更喜欢类似ID3, C4.5的最优特征选择方法。<br>
参考:https://www.leiphone.com/news/201707/WYsS1Qebkc80axky.html
```
from sklearn.datasets import load_iris
from sklearn import tree
iris = load_iris()
clf = tree.DecisionTreeClassifier()
clf = clf.fit(iris.data, iris.target)
print(clf.predict(iris.data[:1, :]))
print(clf.predict_proba(iris.data[:1, :]))
from IPython.display import Image
from sklearn.externals.six import StringIO
import pydotplus
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=iris.feature_names,
class_names=iris.target_names,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
```
## 2. sklearn_pandas 练习
```
import random
import sklearn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn_pandas import DataFrameMapper
from sklearn.pipeline import make_pipeline
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
# transformers for category variables
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
# transformers for numerical variables
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
# transformers for combined variables
from sklearn.decomposition import PCA
from sklearn.preprocessing import PolynomialFeatures
# user-defined transformers
from sklearn.preprocessing import FunctionTransformer
# classification models
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
# evaluation
from sklearn.metrics import scorer
testdata = pd.DataFrame({'pet': ['cat', 'dog', 'dog', 'fish','cat', 'dog', 'cat', 'fish'],
'age': [4., 6, 3, 3, 2, 3, 5, 4],
'gender': ['male', 'female', 'female', 'male','male', 'female', 'male', 'female'],
'salary': [90,24,44,27,32,59,36,27]})
mapper = DataFrameMapper([ ('pet', LabelEncoder()),
('age', MinMaxScaler()),
('gender', LabelBinarizer()),
('salary',None),
])
testdata = mapper.fit_transform(testdata)
print testdata
```
## 3. Python 自定义Package 路径
```
import sys
sys.path.append('/Users/cisco/.pyenv/versions/3.6.5/lib/python3.6/site-packages')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('./data/voice.csv')
print(data.sample(5))
sns.heatmap(data.corr())
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Automated Machine Learning
_**Load Data using `TabularDataset` for Remote Execution (AmlCompute)**_
## Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Data](#Data)
1. [Train](#Train)
1. [Results](#Results)
1. [Test](#Test)
## Introduction
In this example we showcase how you can use AzureML Dataset to load data for AutoML.
Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.
In this notebook you will learn how to:
1. Create a `TabularDataset` pointing to the training data.
2. Pass the `TabularDataset` to AutoML for a remote run.
## Setup
As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
```
import logging
import pandas as pd
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.core.dataset import Dataset
from azureml.train.automl import AutoMLConfig
ws = Workspace.from_config()
# choose a name for experiment
experiment_name = 'automl-dataset-remote-bai'
# project folder
project_folder = './sample_projects/automl-dataprep-remote-bai'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace Name'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
```
## Data
```
# The data referenced here was a 1MB simple random sample of the Chicago Crime data into a local temporary directory.
example_data = 'https://dprepdata.blob.core.windows.net/demo/crime0-random.csv'
dataset = Dataset.Tabular.from_delimited_files(example_data)
dataset.take(5).to_pandas_dataframe()
```
### Review the data
You can peek the result of a `TabularDataset` at any range using `skip(i)` and `take(j).to_pandas_dataframe()`. Doing so evaluates only `j` records, which makes it fast even against large datasets.
`TabularDataset` objects are immutable and are composed of a list of subsetting transformations (optional).
```
X = dataset.drop_columns(columns=['Primary Type', 'FBI Code'])
y = dataset.keep_columns(columns=['Primary Type'], validate=True)
```
## Train
This creates a general AutoML settings object applicable for both local and remote runs.
```
automl_settings = {
"iteration_timeout_minutes" : 10,
"iterations" : 2,
"primary_metric" : 'AUC_weighted',
"preprocess" : True,
"verbosity" : logging.INFO
}
```
### Create or Attach an AmlCompute cluster
```
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
# Choose a name for your cluster.
amlcompute_cluster_name = "automlc2"
found = False
# Check if this compute target already exists in the workspace.
cts = ws.compute_targets
if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':
found = True
print('Found existing compute target.')
compute_target = cts[amlcompute_cluster_name]
if not found:
print('Creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_V2", # for GPU, use "STANDARD_NC6"
#vm_priority = 'lowpriority', # optional
max_nodes = 6)
# Create the cluster.\n",
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)
print('Checking cluster status...')
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min_node_count is provided, it will use the scale settings for the cluster.
compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)
# For a more detailed view of current AmlCompute status, use get_status().
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
import pkg_resources
# create a new RunConfig object
conda_run_config = RunConfiguration(framework="python")
# Set compute target to AmlCompute
conda_run_config.target = compute_target
conda_run_config.environment.docker.enabled = True
cd = CondaDependencies.create(conda_packages=['numpy','py-xgboost<=0.80'])
conda_run_config.environment.python.conda_dependencies = cd
```
### Pass Data with `TabularDataset` Objects
The `TabularDataset` objects captured above can also be passed to the `submit` method for a remote run. AutoML will serialize the `TabularDataset` object and send it to the remote compute target. The `TabularDataset` will not be evaluated locally.
```
automl_config = AutoMLConfig(task = 'classification',
debug_log = 'automl_errors.log',
path = project_folder,
run_configuration=conda_run_config,
X = X,
y = y,
**automl_settings)
remote_run = experiment.submit(automl_config, show_output = True)
remote_run
```
### Pre-process cache cleanup
The preprocess data gets cache at user default file store. When the run is completed the cache can be cleaned by running below cell
```
remote_run.clean_preprocessor_cache()
```
### Cancelling Runs
You can cancel ongoing remote runs using the `cancel` and `cancel_iteration` functions.
```
# Cancel the ongoing experiment and stop scheduling new iterations.
# remote_run.cancel()
# Cancel iteration 1 and move onto iteration 2.
# remote_run.cancel_iteration(1)
```
## Results
#### Widget for Monitoring Runs
The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.
```
from azureml.widgets import RunDetails
RunDetails(remote_run).show()
```
#### Retrieve All Child Runs
You can also use SDK methods to fetch all the child runs and see individual metrics that we log.
```
children = list(remote_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
```
### Retrieve the Best Model
Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
```
best_run, fitted_model = remote_run.get_output()
print(best_run)
print(fitted_model)
```
#### Best Model Based on Any Other Metric
Show the run and the model that has the smallest `log_loss` value:
```
lookup_metric = "log_loss"
best_run, fitted_model = remote_run.get_output(metric = lookup_metric)
print(best_run)
print(fitted_model)
```
#### Model from a Specific Iteration
Show the run and the model from the first iteration:
```
iteration = 0
best_run, fitted_model = remote_run.get_output(iteration = iteration)
print(best_run)
print(fitted_model)
```
## Test
#### Load Test Data
For the test data, it should have the same preparation step as the train data. Otherwise it might get failed at the preprocessing step.
```
dataset_test = Dataset.Tabular.from_delimited_files(path='https://dprepdata.blob.core.windows.net/demo/crime0-test.csv')
df_test = dataset_test.to_pandas_dataframe()
df_test = df_test[pd.notnull(df_test['Primary Type'])]
y_test = df_test[['Primary Type']]
X_test = df_test.drop(['Primary Type', 'FBI Code'], axis=1)
```
#### Testing Our Best Fitted Model
We will use confusion matrix to see how our model works.
```
from pandas_ml import ConfusionMatrix
ypred = fitted_model.predict(X_test)
cm = ConfusionMatrix(y_test['Primary Type'], ypred)
print(cm)
cm.plot()
```
| github_jupyter |
# End-to-End example: Computing Niño 3.4 Index
In this notebook, we are going to combine all concepts/topics we've covered so far to compute [Niño 3.4 Index](https://climatedataguide.ucar.edu/climate-data/nino-sst-indices-nino-12-3-34-4-oni-and-tni) for the CESM2 submission for [CMIP6 project](https://esgf-node.llnl.gov/projects/cmip6/).
> Niño 3.4 (5N-5S, 170W-120W): The Niño 3.4 anomalies may be thought of as representing the average equatorial SSTs across the Pacific from about the dateline to the South American coast. The Niño 3.4 index typically uses a 5-month running mean, and El Niño or La Niña events are defined when the Niño 3.4 SSTs exceed +/- 0.4C for a period of six months or more.
> Nino X Index computation: (a) Compute area averaged total SST from Niño X region; (b) Compute monthly climatology (e.g., 1950-1979) for area averaged total SST from Niño X region, and subtract climatology from area averaged total SST time series to obtain anomalies; (c) Smooth the anomalies with a 5-month running mean; (d) Normalize the smoothed values by its standard deviation over the climatological period.

At the end of this notebook, you should be able to produce a plot that looks similar [to this one](https://climatedataguide.ucar.edu/sites/default/files/styles/node_lightbox_display/public/key_figures/climate_data_set/indices_oni_2_2_lg.png?itok=61jS7Jz7)

---
## Learning Objectives
- Load data
- Masking data using `.where()` method
- Compute climatologies and anomalies using `.groupby()`
- Use `.rolling()` to compute moving average
- Normalize computed Niño 3.4 Index
- Visualize the computed Niño 3.4 Index
## Prerequisites
| Concepts | Importance | Notes |
| --- | --- | --- |
| [Understanding of xarray core data structures](./01-xarray-fundamentals.ipynb) | Necessary | |
| [Familiarity with xarray indexing and subsetting](./02-indexing-and-subsetting.ipynb) | Necessary | |
| [Familiarity with xarray's plotting functionality](./03-data-visualization.ipynb) | Necessary | |
| [Familiarity with xarray's computation routins](./05-computation.ipynb) | Necessary | |
- **Time to learn**: *20 minutes*
---
## Task 1: Import packages
```
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
```
## Task 2: Open the sea surface temperature dataset and the areacello
```
data = xr.open_dataset(
"./data/tos_Omon_CESM2_historical_r11i1p1f1_gr_200001-201412.nc", engine="netcdf4"
)
areacello = xr.open_dataset(
"./data/areacello_Ofx_CESM2_historical_r11i1p1f1_gr.nc", engine="netcdf4"
)
# Merge the two datasets in a single dataset
ds = xr.merge([data, areacello])
ds
```
## Task 3: Visualize the first time slice to make sure the data looks okay
```
fig = plt.figure(figsize=(12, 6))
ax = plt.axes(projection=ccrs.Robinson(central_longitude=180))
ax.coastlines()
ax.gridlines()
ds.tos.isel(time=0).plot(
robust=True, ax=ax, transform=ccrs.PlateCarree(), cbar_kwargs={'shrink': 0.5}
)
ax.set_global()
```
## Task 4: Select the Niño 3.4 region
There are a few ways for selecting the Niño 3.4 region
1. Use `sel()` or `isel()`
2. Use `where()` and select all vlaues within the bounds of interest
```
tos_nino34 = ds.sel(lat=slice(-5, 5), lon=slice(190, 240))
tos_nino34
```
Another option for selecting our region of interest is to use:
```
# tos_nino34 = ds.where((ds.lat<5) & (ds.lat>-5) & (ds.lon>190) & (ds.lon<240), drop=True)
```
Let's plot the selected region to make sure we are doing the right thing:
```
fig = plt.figure(figsize=(12, 6))
ax = plt.axes(projection=ccrs.Robinson(central_longitude=180))
ax.coastlines()
ax.gridlines()
tos_nino34.tos.isel(time=0).plot(ax=ax, transform=ccrs.PlateCarree(), cbar_kwargs={'shrink': 0.5})
ax.set_extent((120, 300, 10, -10))
```
## Task 5: Compute monthly climatology for area averaged total SST from Niño 3.4 region, and subtract climatology from area averaged total SST time series to obtain anomalies;
```
gb = tos_nino34.tos.groupby('time.month')
tos_nino34_anom = gb - gb.mean(dim='time')
index_nino34 = tos_nino34_anom.weighted(tos_nino34.areacello).mean(dim=['lat', 'lon'])
```
## Task 6: Smooth the anomalies with a 5-month running mean
```
index_nino34_rolling_mean = index_nino34.rolling(time=5).mean()
index_nino34.plot(size=8)
index_nino34_rolling_mean.plot()
plt.legend(['anomaly', '5-month running mean anomaly']);
```
## Task 7: Normalize the smoothed values by its standard deviation over the climatological period
```
std_dev = tos_nino34.tos.std()
std_dev
normalized_index_nino34_rolling_mean = index_nino34_rolling_mean / std_dev
```
## Visualize the computed Niño 3.4
```
fig = plt.figure(figsize=(12, 6))
# Add -0.4/+0.4 lines which are the El Niño 3.4 thresholds
plt.fill_between(
normalized_index_nino34_rolling_mean.time.data,
normalized_index_nino34_rolling_mean.where(normalized_index_nino34_rolling_mean >= 0.4).data,
0.4,
color='red',
alpha=0.9,
)
plt.fill_between(
normalized_index_nino34_rolling_mean.time.data,
normalized_index_nino34_rolling_mean.where(normalized_index_nino34_rolling_mean <= -0.4).data,
-0.4,
color='blue',
alpha=0.9,
)
normalized_index_nino34_rolling_mean.plot(color='black')
plt.axhline(0, color='black', lw=0.5)
plt.axhline(0.4, color='black', linewidth=0.5, linestyle='dotted')
plt.axhline(-0.4, color='black', linewidth=0.5, linestyle='dotted');
```
---
```
%load_ext watermark
%watermark --time --python --updated --iversion
```
## Resources and References
- [Project Pythia Resource Gallery](https://projectpythia.org/gallery.html)
- [Pangeo Gallery](http://gallery.pangeo.io/)
<div class="admonition alert alert-success">
<p class="title" style="font-weight:bold">Previous: <a href="./05-masking.ipynb">Masking Data</a></p>
</div>
| github_jupyter |
# El perceptrón multicapa
Este notebook, donde se introduce el perceptrón multicapa (Multi-Layer Perceptron o MLP), la red neuronal más básica que podemos construir. Vamos a tratar detalladamente cómo crear, entrenar y evaluar una red neuronal usando `pytorch`.
Esta parte se compone de:
1. Introducción al perceptrón
- El perceptrón multicapa
2. Realización guiada
- Preparación y carga de librerías
- Descarga y preparación de los datos
- Creación del modelo
- Entrenar el modelo
- Evaluar el modelo
En resumen, en este pequeño tutorial aprenderemos a usar PyTorch para crear, entrenar y predecir con nuestra primera red neuronal. Una red neuronal consta fundamentalmente de:
* Una **arquitectura**, que especifica cómo se organizan las neuronas y qué funciones aplican a las diferentes entradas para llegar a la salida.
* Una **función de *loss*** o pérdida, que es la que cuantifica cómo de malas son las predicciones de nuestra red neuronal.
* Un **algoritmo de entrenamiento**. Que especifica como, a partir del *loss* se van a actualizar los pesos de la red neuronal para aumentar su precisión.
En esta práctica vamos a comenzar con una red muy sencilla: el **perceptrón multicapa**.
## El perceptrón multicapa
El **perceptrón multicapa** ([Multi-layer perceptron](https://en.wikipedia.org/wiki/Multilayer_perceptron)) es quizá la red neuronal más básica, aunque a pesar de su sencillez es bastante efectiva. Se considera un aproximador universal (ver charla en `00 Práctica Deep Learning - Introducción` donde se explican sus matemáticas), y tiene una estructura básica que consta de:
* Capa de entrada (input layer)
* Capa(s) oculta(s) (hidden layer)
* Capa de salida (output layer)
Cada una de estas capas está compuesta de $i$ neuronas, cada una conectada con todas las neuronas de la capa siguiente. Recordemos la ecuación de una neurona básica:
$$ y_i^n = f(\mathbf{w_i^n}*\mathbf{y^{n-1}}+b_i^n)$$
donde $n$ es el número de capa (la capa de entrada es la 0), $i$ el número de neurona dentro de cada capa y $f()$ es una **función de activación**. De este modo, esa ecuación describe la multiplicación matricial de un vector de pesos $\mathbf{w_i^n}$ de la neurona $i$ de la capa $n$ por las activaciones de la capa anterior $\mathbf{y^{n-1}}$, lo que da un valor más un sesgo o *bias* $b_i^n$, todo pasando por la función de activación.
Este tipo de redes en las que las conexiones van siempre en la misma dirección y no hay conexiones dentro de una capa o que se salten capas se conocen como *feedforward* o *fully connected*. A continuación se detalla la estructura de un perceptrón de una sola capa oculta:

## ¿Y como escribo yo esto en python?
En el notebook anterior ya hemos hecho una introducción al trabajo con tensores de pytorch, aplicación de funciones y al cálculo automático de gradientes. En este documento vamos a construir nuestra primera red neuronal, pero seguiremos utilizando los mismos conceptos básicos que manejábamos en ese tutorial.
### Preparación
Vamos a ir paso a paso. Lo primero que tenemos que hacer es importar pytorch y algunos módulos de la librería:
* `torch.nn`: La librería de redes neuronales que utilizaremos para crear nuestro modelo.
* `torch.autograd`: En concreto el módulo Variable de esta librería que se encarga de manejar las operaciones de los tensores y sus gradientes, que son más complejos que los que vimos en la parte anterior.
* `torchvision.datasets`: El módulo que ayudará a cargar el conjunto de datos que vamos a utilizar y explicaremos más adelante.
* `torchvision.transforms`: Este módulo contiene una serie de funciones que nos ayudarán modificando el dataset.
* `torch.optim`: De aquí usaremos el optimizador para entrenar la red neuronal y modificar sus pesos.
Para ello ejecutamos el siguiente código:
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.optim as optim
```
### Datos, datos, datos
Los datos son la parte más importante del *deep learning*, rivalizando con la arquitectura de red utilizada y cualquier otro parámetro. Para esta práctica-tutorial, vamos a utilizar el conjunto conocido como MNIST. Se trata de un conjunto de imágenes de dígitos escritos a mano, contiene un total de 60000 imágenes para entrenamiento y 10000 para *test*, o sea, para probar la efectividad de nuestro modelo. Todos los dígitos están normalizados en tamaño y centrados en la imagen de tamaño 28x28 en escala de grises. El objetivo de esta base de datos es clasificar cada imagen diciendo a que número entre el 0 y el 9 pertenece.
Para cargar de forma rápida el dataset vamos a utilizar el modulo `datasets` de `torchvision`, además debemos definir que transformaciones vamos a aplicarle a todas las muestras, como convertir el tipo de datos de la imagen a un `torch.Tensor`, el formato con el que esta librería puede realizar cálculos de forma eficiente. `torchvision.transforms` contiene una gran cantidad de transformaciones para aplicar a los datasets que quizá usemos más adelante, por ejemplo para normalizar el dataset, aunque dado que este dataset está normalizado a valores entre 0 y 1, no nos hará falta.
Definimos también en `root` el directorio donde guardamos los datos. Puede ser el que queráis, según el sistema operativo que estéis utilizando. Si juntamos la transformación (`transforms.ToTensor()`) y la guardamos en `trans`, podemos adjuntarla al cargador de los datos en `dset.MNIST` de la forma:
```
trans = transforms.Compose([transforms.ToTensor()]) #Transformador para el dataset
root = './data/'
train_set = dset.MNIST(root=root, train=True, transform=trans, download=True)
test_set = dset.MNIST(root=root, train=False, transform=trans, download=True)
```
El entrenamiento de una red neuronal se realiza mediante *batches*, o lo que es lo mismo, por subconjuntos de los datos de entrenamiento (de 2 en 2, de 5 en 5 o de 256 en 256...), lo que acelera el entrenamiento y hace que la red neuronal aprenda de forma más efeciva. Lo veremos más adelante.
```
batch_size = 128
```
Después definimos un `Dataloader` para el dataset que hemos descargado. Un `Dataloader`, que no es más que un objeto que, cuando lo llamemos con un comando determinado, nos dará las muestras de entrenamiento o test en grupos del tamaño `batch_size`. También contiene funciones que pueden ser útiles, como reordenar aleatoriamente el dataset en cada iteración (`shuffle`). El código será:
```
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=batch_size,
shuffle=False)
```
Podemos saber la cantidad de conjuntos (batches) de tamaño 128 tenemos simplemente utilizando la función `len` de Python. En el caso de que el dataset no sea un múltiplo del tamaño de batch, el último batch será de un tamaño reducido.
```
print ('Trainning batch number: {}'.format(len(train_loader)))
print ('Testing batch number: {}'.format(len(test_loader)))
```
Y podemos mostrarlo utilizando la librería `matplotlib`, y la función `plt.imshow`, que muestra una imagen, así como la función `make_grid` de `torchvision`.
```
import matplotlib.pyplot as plt
from torchvision.utils import make_grid
def imshow(img):
img = img / 2 + 0.5 # desnormalizar
plt.imshow(img.permute(1, 2, 0))
# cambiamos las dimensiones para que el número de canales
# se muestre al final (por defecto en matplotlib)
# convertimos train_loader en un iterador
dataiter = iter(train_loader)
# y recuperamos el i-esimo elemento, un par de valores (imagenes, etiquetas)
images, labels = dataiter.next()
# Usamos la función imshow que hemos definido para mostrar imágenes
imshow(make_grid(images))
```
## Creación del modelo
Una vez tenemos el dataset debemos decidir que topología de red neuronal vamos a utilizar. Nosotros vamos a utilizar una red muy simple con una sola capa oculta con 256 neuronas.
En PyTorch, las redes se definen como una clase que hereda de `nn.Module`. Si no sabéis que es una clase o un objeto, éste es un buen video de [introducción a la programación orientada a objetos](https://www.youtube.com/watch?v=tTPeP5dVuA4) en youtube.
Las clases en python se definen con la palabra clave `class`, y para una red del tipo `nn.Module` debemos definir dos métodos fundamentales: `__init__` y `forward`. En `__init__` se definen que capas va a tener la red:
* La primera capa será nuestra capa oculta que recibe un tamaño de entrada de 28x28 y una salida de 256 neuronas.
* La segunda capa será la capa de salida, y recibirá las 256 salidas de la capa oculta y tendrá 10 neuronas de salida.
Ambas capas serán de tipo `nn.Linear`, que es el tipo básico de operación que hemos descrito con la ecuación anterior.
A continuación, se define la función `forward()`, que implementa las operaciones a realizar desde la entrada de la red a la salida. En nuestro caso, los pasos serán:
* Linealizar las imágenes, es decir, transformarlas en un vector de 784 elementos, ya que un perceptrón requiere un vector de entrada. Esto lo hacemos con la función `view()`.
* Después aplicamos la capa 1 (`self.fc1`) y a su salida, la función de activación `F.relu()`.
* Finalmente aplicamos la capa 2 (`self.fc2`) y a su salida, la función de activación `F.softmax()`.
ReLU es la función [rectified linear unit](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) y la función [softmax](https://en.wikipedia.org/wiki/Softmax_function), que es una función que toma como entrada un vector de K números reales, y lo normaliza en una distribución de probabilidad consistente en K probabilidades proporcionales a los exponenciales de los números de entrada. Esta última nos dará las probabilidades de pertenencia de cada entrada a cada una de las 10 clases, y cogeremos como "etiqueta" aquella que tenga máxima probabilidad.
El código para esta clase es:
```
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = nn.Linear(28*28, 256)#capa oculta
self.fc2 = nn.Linear(256, 10)#capa de salida
def forward(self, x):
x = x.view(-1, 28*28)#transforma las imágenes de tamaño (n, 28, 28) a (n, 784)
x = F.relu(self.fc1(x))#Función de activación relu en la salida de la capa oculta
x = F.softmax(self.fc2(x), dim=1)#Función de activación softmax en la salida de la capa oculta
return x
```
A continuación definimos el criterio de *loss* y el optimizador. Como criterio de *loss* vamos a utilizar la entropía cruzada, una medida basada en la entropía que viene de la teoría de la información, y que calcula la diferencia entre dos distribuciones de probabilidad. Si recordáis, al usar "softmax" en la salida de la red nos da la probabilidad de cada neurona según la entrada. Si la comparamos a la distribución real (las etiquetas), nos dará una buena estimación del error.
En cuanto al optimizador, utilizaremos el [descenso de gradiente estocástico](https://medium.com/metadatos/todo-lo-que-necesitas-saber-sobre-el-descenso-del-gradiente-aplicado-a-redes-neuronales-19bdbb706a78), una técnica de aprendizaje que se basa en el gradiente de la salida con respecto a la referencia, y actualiza los pesos de la red neuronal ajustándolos a dicho gradiente. Para ello es necesario crear el objeto `model`, nuestro modelo, a partir de la clase `MLP()` que hemos definido, y posteriormente pasar sus parámetros al optimizador:
```
model = MLP()
criterion = nn.CrossEntropyLoss() # definimos la pérdida
# y utilizamos descenso de gradiente estocástico con un learning-rate
# (factor que cuantifica cuánto vamos a actualizar los pesos con respecto
# a su valor actual) de 0.01 y un momento de 0.9, que actualiza el learning
# rate en función de sus valores anteriores
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
```
### Entrenar el modelo
Ya tenemos nuestro modelo, con su arquitectura, loss y optimizador creados. Podemos empezar a entrenar el modelo. Esto se realiza mediante diversas iteraciones de entrenamiento, conocidas como **epoch**. La idea es básicamente hacer algo como (en pseudocódigo):
```
for e in epochs:
for (images,labels) in dataset:
salida = model(images) # conocido como "forward pass"
loss = criterio(salida, labels)
propagar gradiente de loss a todas las neuronas, conocido como "backward pass"
optimizar usando dicho gradiente
```
```
n_epochs = 20
for epoch in range(n_epochs):
print("Train") # Esta será la parte de entrenamiento
running_loss = 0.0 # el loss en cada epoch de entrenamiento
running_acc = 0.0 # el accuracy de cada epoch
total = 0
for i, (images, labels) in enumerate(train_loader, 0):
total += labels.shape[0]
# ponemos a cero todos los gradientes en todas las neuronas
optimizer.zero_grad()
outputs = model(images) # forward pass
loss = criterion(outputs, labels) # estimamos el loss
loss.backward() # backward pass
optimizer.step() # optimización
# Mostramos las estadísticas
running_loss += loss.item() # acumulamos el loss de este batch
# extraemos las etiquetas que predice (nº neurona con máxima probabilidad)
_, predicted = torch.max(outputs, 1)
running_acc += torch.sum(predicted==labels) # y acumulamos el número de correctos
print(f'>>> Epoch {epoch} >>>> Loss: {running_loss/total}, Acc: {running_acc/total}')
```
Y al parecer, obtenemos un accuracy alto, de más del 90%. Eso quiere decir que acumula más del 90% de predicciones correctas a partir de los datos. Pero por supuesto, esto es en el conjunto del training.
Sin embargo, para poder ver el poder predictivo de nuestro modelo entrenado, es importante probar su precisión en datos que dicho modelo no ha visto nunca. Y para eso es para lo que sirve el conjunto de test.
### Evaluación del modelo
En la evaluación, que se suele conocer como "test", en lugar del forward y el backward pass, solo vamos a utilizar el forward. Se le pasa el conjunto de test, se obtienen las predicciones y se comparan con las etiquetas reales para estimar la precisión en el test. Las etiquetas que obtenemos con `outputs` son un formato un poco extraño, conocido como *one-hot encoding*, y sería algo similar a: `[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]` para el número 3, o `[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]` para el número 9.
En realidad, al ser la salida de la última capa que al pasar por `softmax` se convierten probabilidades, así que serán cosas como: `[0.01, 0.00, 0.00, 0.04, 0.01, 0.10, 0.04, 0.07, 0.72, 0.00]`, donde tenemos dos posiciones (la 5 y la 8) con probabilidades altas (0.10, y 0.72). Para obtener el valor numérico donde se encuentra la máxima probabilidad (el número que nuestra red neuronal piensa que se corresponde con la entrada), usamos la orden `torch.max(outputs,1)`, que devolverá una lista con la localización del máximo en el eje 1 (el de las columnas).
Es importante deshabilitar la propagación del gradiente durante el foward pass, para que no modifiquemos los pesos de la red durante el test, y no contamine el entrenamiento. Así pues, con estas restricciones, el test se queda:
```
correct = 0
total = 0
with torch.no_grad(): # hay que deshabilitar la propagación de gradiente
for images, labels in test_loader:
outputs = model(images) # forward pass
_, predicted = torch.max(outputs, 1) # obtención de etiquetas numéricas
total += labels.size(0) # aumentamos el número de etiquetas comprobadas para calcular la precisión después
correct += torch.sum(predicted == labels).item() # sumamos el número de etiquetas correctas para calcular la precisión
print(f'Precisión del modelo en las imágenes de test: {correct / total}')
```
Vemos que en test alcanzamos un valor de 93% de precisión, muy similar a la precisión de training, lo cual significa que nuestro modelo será bastante bueno para la detección de dígitos escritos a mano. Para comprender mejor que ha pasado, vamos a visualizar algunos casos correctos y otros que han fallado:
```
def muestra_predicciones(model, loader, n=2):
# numero de elementos de cada categoría a mostrar (nx2 total)
#init:
ncorrect=0
nwrong=0
# tamaño de los bloques.
size = (n,)+loader.dataset[0][0].shape
# este es el tamaño de la salida:
n_salida = list(model.children())[-1].out_features
# para almacenar los datos
im_correct_display = torch.empty(size)
im_wrong_display = torch.empty(size)
output_correct_display = torch.empty((n,n_salida))
output_wrong_display = torch.empty((n,n_salida))
with torch.no_grad(): # hay que deshabilitar la propagación de gradiente
for inputs, labels in loader:
outputs = model(inputs) # forward pass
_, predicted = torch.max(outputs, 1) # obtención de etiquetas numéricas
aciertos = predicted==labels
if sum(aciertos)>0 and ncorrect<n:
indices = torch.where(aciertos)[0] # obtiene los indices de los elementos correctamente clasificados
for i,ix in enumerate(indices[:n-ncorrect]):
im_correct_display[i+ncorrect] = inputs[ix]
output_correct_display[i+ncorrect] = outputs[ix]
ncorrect = ncorrect + i + 1
if sum(aciertos==False)>0 and nwrong<n:
indices = torch.where(aciertos==False)[0] # obtiene los indices de los elementos incorrectamente clasificados
for i,ix in enumerate(indices[:n-nwrong]):
im_wrong_display[i+nwrong] = inputs[ix]
output_wrong_display[i+nwrong] = outputs[ix]
nwrong = nwrong + i + 1
if ncorrect>=n and nwrong>=n:
break # si ya tenemos n correctos y n incorrectos, nos salimos
# Y ahora mostramos todos estos casos:
fig, ax = plt.subplots(n*2, 2) # esto crea un subplot de 4x2
for i in range(n):
ax[i][0].imshow(im_correct_display[i,0])
ax[i][1].bar(range(10),output_correct_display[i])
if i==0:
ax[i][0].set_title('Imagenes')
ax[i][1].set_title('Probabilidades')
ax[n+i][0].imshow(im_wrong_display[i,0])
ax[n+i][1].bar(range(10),output_wrong_display[i])
# y usamos la función:
muestra_predicciones(model, test_loader, n=2)
```
Esta visualización es muy interesante, porque nos permite ver donde están los fallos. La pregunta es... ¿podríamos mejorar esta precisión con otra arquitectura optimizada para imágenes? Lo veremos en la siguiente parte de la práctica, el notebook `02 Práctica Deep Learning - Redes Convolucionales.ipynb`.
| github_jupyter |
# Image Stacking Exercise
#### Written by Gautham Narayan (gnarayan@stsci.edu) for LSST DSFP #5
### In the last couple of exercises, you determined the WCS solution for a couple of images and reprojected them on to a common grid. That process is slow, so for this exercise I'm just going to give you a lot of reprojected images to begin with. While it's not "Big Data" it's bigger than GitHub will manage, so I've stored it on Dropbox. Get this and extract it.
## https://www.dropbox.com/s/7cpuegjdxv6lte7/bundle_sw.tar.gz?dl=0
### Eventually Adam and I will coordinate and this will be on Box and we can edit these notebooks, or add another README file or something, but the Dropbox link isn't going anyway for the forseeable future.
### These images were reprojected using the SWarp code, and when we did this we also produced a (gzipped) noise (sigma) map and a weight (mask) map.
```
!ls ../wdd7_6/warped/ | head -n 10
```
### We used both of these extra images to generate a weight per image for image stacking or when doing image subtraction. Depending on how you proceed with this exercise, you could use one or both, or ignore them altogether.
### Begin by taking a look at a single image's FITS header
```
!imhead ../wdd7_6/warped/wdd7.040920_0452.051_6.sw.fits
```
### You might also want to look at the data (it's ALWAYS a good idea to check) with ds9 and get a sense for it. Are they really aligned or do I just have no idea what I did back when I was a wee grad student (it's ALWAYS a good idea to check...).
### For example, did I actually take out the background? Are the PSFs similar-ish? Are the exposure times the same. WCSTools gethead is your friend.
```
%matplotlib notebook
%pylab
import astropy.io.fits as afits
### astropy can seamlessly handle the gzipped fits images
```
### Then, write a function that takes a filename and loads data from the image and the header that you think might be useful to weight the images by when producing a stack. (hint: SKYADU might be useful, maybe AIRMASS, ZPTMAG, EXPTIME - I'm tossing out suggestions - you can pick and examine what happens with different choices).
```
#### You get to do this ####
```
### Load the data into whatever structure you like - numpy (masked array), list, dictionary - whatever you are comfortable with slicing and dicing.
```
#### You get to do this ####
```
### Now that the data is loaded, decide on how you'd like to weight the data. Normalize them in some sensible way. You'll be comparing this against a median stack and an unweighted simple average.
```
#### You get to do this ####
# Here's an example for weights
#
# weights = 10.**-0.4*zptmag # I'm just using the flux zeropoint to set the weights
# wsum = np.sum(weights)
# weights /= wsum
```
### Create the stacks - median, unweighted mean and using your weighting function
### If you decided to use the masks, make sure you use the corresponding functions in numpy.ma
### if you want to get fancy, you can even try astropy.stats.sigma_clip, and then you'll have tried the most commonly used stacking methods with SWarp
```
#### You get to do this ####
#
# from astropy.stats import sigma_clip
# You'll probably want these to look at the results
from astropy.visualization import ZScaleInterval
zscaler = ZScaleInterval(nsamples=1000, contrast=0.25)
```
### Plot up the stacks you made + one of the original images for comparison. I've saved my example output in the out directory
```
#### You get to do this ####
```
| github_jupyter |
# Welcome to Pynq Audio
This notebook shows the basic recording and playback features of the board.
It uses the audio jack to play back recordings from the built-in microphone, as well as a pre-recorded audio sample. Moreover, visualization with matplotlib and playback with IPython.Audio are shown.
## Create new audio object
```
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
pAudio = base.audio
```
## Record and save
Record a 3-second sample and save it into a file.
```
pAudio.record(3)
pAudio.save("Recording_1.pdm")
```
## Load and play
Load a sample and play the loaded sample.
```
pAudio.load("/home/xilinx/pynq/lib/tests/pynq_welcome.pdm")
pAudio.play()
```
## Play in notebook
Users can also play the audio directly in notebook. To do this, the file format has to be converted from Pulse Density Modulation (PDM) to Pulse Code Modulation (PCM).
For more information, please refer to: https://en.wikipedia.org/wiki/Pulse-density_modulation.
### Step 1: Preprocessing
In this step, we first convert the 32-bit integer buffer to 16-bit. Then we divide 16-bit words (16 1-bit samples each) into 8-bit words with 1-bit sample each.
```
import time
import numpy as np
start = time.time()
af_uint8 = np.unpackbits(pAudio.buffer.astype(np.int16)
.byteswap(True).view(np.uint8))
end = time.time()
print("Time to convert {:,d} PDM samples: {:0.2f} seconds"
.format(np.size(pAudio.buffer)*16, end-start))
print("Size of audio data: {:,d} Bytes"
.format(af_uint8.nbytes))
```
### Step 2: Converting PDM to PCM
We now convert PDM to PCM by decimation. The sample rate is reduced from 3MHz to 32kHz.
We will remove the first and last 10 samples in case there are outliers introduced by decimation. We will also remove the DC offset from the waveform.
```
import time
from scipy import signal
start = time.time()
af_dec = signal.decimate(af_uint8,8,zero_phase=True)
af_dec = signal.decimate(af_dec,6,zero_phase=True)
af_dec = signal.decimate(af_dec,2,zero_phase=True)
af_dec = (af_dec[10:-10]-af_dec[10:-10].mean())
end = time.time()
print("Time to convert {:,d} Bytes: {:0.2f} seconds"
.format(af_uint8.nbytes, end-start))
print("Size of audio data: {:,d} Bytes"
.format(af_dec.nbytes))
del af_uint8
```
### Step 3: Audio Playback in Web Browser
```
from IPython.display import Audio as IPAudio
IPAudio(af_dec, rate=32000)
```
## Plotting PCM data
Users can display the audio data in notebook:
1. Plot the audio signal's amplitude over time.
2. Plot the spectrogram of the audio signal.
#### Amplitude over time
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.figure(num=None, figsize=(15, 5))
time_axis = np.arange(0,((len(af_dec))/32000),1/32000)
plt.title('Audio Signal in Time Domain')
plt.xlabel('Time in s')
plt.ylabel('Amplitude')
plt.plot(time_axis, af_dec)
plt.show()
```
#### Frequency spectrum
```
from scipy.fftpack import fft
yf = fft(af_dec)
yf_2 = yf[1:len(yf)//2]
xf = np.linspace(0.0, 32000//2, len(yf_2))
plt.figure(num=None, figsize=(15, 5))
plt.plot(xf, abs(yf_2))
plt.title('Magnitudes of Audio Signal Frequency Components')
plt.xlabel('Frequency in Hz')
plt.ylabel('Magnitude')
plt.show()
```
#### Frequency spectrum over time
Use the `classic` plot style for better display.
```
import matplotlib
np.seterr(divide='ignore',invalid='ignore')
matplotlib.style.use("classic")
plt.figure(num=None, figsize=(15, 4))
plt.title('Audio Signal Spectogram')
plt.xlabel('Time in s')
plt.ylabel('Frequency in Hz')
_ = plt.specgram(af_dec, Fs=32000)
```
| github_jupyter |
```
import os
import cv2
import csv
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
%matplotlib inline
num_output = 8
input_shape = (512, 512, 3)
batch_size = 10
IMAGES_FOLDER = '/home/sanjeev309/Projects/posebox/resized_frames'
ANNOTATION_FILE = '/home/sanjeev309/Projects/posebox/annotation_formatted.csv'
OUTPUT = '/home/sanjeev309/Projects/posebox/output'
checkpoint_dir = OUTPUT + "/ckpt"
```
### Initialise empty numpy arrays
```
data = np.empty((0,512,512,3), dtype=np.int8)
target = np.empty((0,8), dtype=np.float)
data.shape
target.shape
```
### Read annotation file, fetch image, normalise image and array, compose data and target arrays
```
with open(ANNOTATION_FILE,'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in reader:
print(row)
if line_count == 0:
line_count += 1
else:
image_path = os.path.join(IMAGES_FOLDER, row[0])
image = cv2.imread(image_path)/ 255
image = np.expand_dims(image, axis=0)
points = row[1]
dimen = (float)(row[2])
p = points.strip('][').split(', ')
p = np.array(p, dtype=np.int)
p = np.divide(p, dimen)
p = np.expand_dims(p, axis=0)
if image is not None:
data = np.vstack((data, image))
target = np.vstack((target, p))
line_count += 1
```
### Shuffle data and target synchronously
```
num_samples = data.shape[0]
arr = np.arange(num_samples)
np.random.shuffle(arr)
arr
data = data[arr]
target = target[arr]
print(data.shape)
print(target.shape)
np.save(os.path.join(OUTPUT,'data.npy'), data)
np.save(os.path.join(OUTPUT,'target.npy'), target)
```
### Load data and target
```
data = np.load(os.path.join(OUTPUT,'data.npy'))
target = np.load(os.path.join(OUTPUT,'target.npy'))
```
### Train / Test split
```
TRAIN_RATIO = 0.8
X_train = data[0: int(num_samples * TRAIN_RATIO) - 1]
y_train = target[0: int(num_samples * TRAIN_RATIO) - 1]
X_test = data[int(num_samples * TRAIN_RATIO): num_samples - 1]
y_test = target[int(num_samples * TRAIN_RATIO): num_samples - 1]
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
def build_model():
base_model = keras.applications.MobileNetV2(input_shape=(127, 127,3),
include_top=False,
weights='imagenet')
base_model.trainable = False
print(base_model.summary())
model = keras.Sequential([
layers.Input(shape=input_shape),
layers.Conv2D(3, (2,2),activation='relu'),
layers.MaxPool2D((2,2)),
layers.Conv2D(3,(2,2), activation='relu'),
layers.MaxPool2D((2,2)),
base_model,
layers.Flatten(),
layers.Dense(64),
layers.Dense(32),
layers.Dense(16),
layers.Dense(num_output, activation="sigmoid"),
])
optimizer = keras.optimizers.SGD(0.01)
model.compile(loss=keras.losses.mean_squared_error,
optimizer=optimizer,
metrics=['accuracy'])
return model
def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [checkpoint_dir + '/' + name
for name in os.listdir(checkpoint_dir)]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
print('Restoring from', latest_checkpoint)
return keras.models.load_model(latest_checkpoint)
print('Creating a new model')
return build_model()
model = make_or_restore_model()
callbacks = [
# This callback saves a SavedModel every 5 epochs.
# We include the training loss in the folder name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + '/ckpt-loss={loss:.2f}',
# save_freq=4)
period=5)
]
model.summary()
# del model
train_history = model.fit(data, target, batch_size=batch_size, validation_split= 0.2, epochs=5000,callbacks=callbacks)
```
| github_jupyter |
# NGFC Modelling with IDEAS - pse
This is a modelling example of a NGFC system using idaes-pse framework.
[Refer python modules from https://idaes-pse.readthedocs.io/en/stable/_modules/index.html]
```
from pyomo.environ import ConcreteModel, Constraint, Objective, SolverFactory, TransformationFactory, Constraint, Var
from pyomo.network import Arc
from idaes.core import FlowsheetBlock
from idaes.generic_models.unit_models import Mixer, HeatExchanger, Separator, GibbsReactor, Heater
from idaes.generic_models.unit_models.separator import SplittingType
from idaes.generic_models.unit_models.heat_exchanger import delta_temperature_amtd_callback
from idaes.core.util.model_statistics import degrees_of_freedom as dof
import idaes.generic_models.properties.activity_coeff_models.methane_combustion_ideal as thermo_props
```
### Building Base Flowsheet.
```
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.thermo_params = thermo_props.MethaneParameterBlock()
# Fuel ultilization (Uf): mole reductant consumed in FC per mole of reductant total
Uf = 0.8
# Air ultilization (Ua): mole of air consumed in FC per mole of air feed
Ua = 0.2
# Methane to steam ratio (MS): mole methane per mole water
MS = 2
# Electrical conversion efficiency (EE):
EE = 0.9
#Methane LHV: (https://www.engineeringtoolbox.com/fuels-higher-calorific-values-d_169.html)
LHV = 50000*16.04 # (J/g * g/mol = J/mol)
# Heat of water vaporization @ 25 C: (https://www.engineeringtoolbox.com/water-properties-d_1573.html)
enthalpy_vap = 43988 # (J/mol)
# Feed:
# Reaction:
# Reforming: CH4 + H2O -> CO + 3H2
# Water gas shift: CO + H2O -> CO2 + H2
# Methane combustion: CH4 + 2O2 -> CO2 + 2H2O
# Hydrogen combustion: H2 + 1/2O2 -> H2O
# Carbon monoxide combustion: CO + 1/2O2 -> CO2
n_CH4f = 10
print("mole of methane feed: "+str(n_CH4f)+" mole/s")
n_H2Of = n_CH4f*MS
print("mole of steam feed: "+str(n_H2Of)+" mole/s")
n_O2f = n_CH4f*Uf*2/Ua
n_N2f = n_O2f*0.79/0.21
print("mole of air feed: "+str(n_N2f+n_O2f)+" mole/s")
n_H2ex = 2
n_COex = n_CH4f*(1-Uf)*4-n_H2ex
n_CO2ex = n_CH4f-n_COex
n_H2Oex = n_H2Of+2*n_CH4f-n_H2ex
y_H2ex = n_H2ex/(n_H2ex + n_COex + n_CO2ex + n_H2Oex)
y_COex = n_COex/(n_H2ex + n_COex + n_CO2ex + n_H2Oex)
y_CO2ex = n_CO2ex/(n_H2ex + n_COex + n_CO2ex + n_H2Oex)
y_H2Oex = n_H2Oex/(n_H2ex + n_COex + n_CO2ex + n_H2Oex)
print("Anode exhaust: ")
print("y_H2ex: "+str(y_H2ex))
print("y_COex: "+str(y_COex))
print("y_CO2ex: "+str(y_CO2ex))
print("y_H2Oex: "+str(y_H2Oex))
print("Total mole/s: "+str(n_H2ex + n_COex + n_CO2ex + n_H2Oex))
n_N2ex = n_N2f
n_O2ex = n_O2f - n_CH4f*Uf*2
y_O2ex = n_O2ex/(n_O2ex+n_N2ex)
y_N2ex = n_N2ex/(n_O2ex+n_N2ex)
print("Cathode exhaust: ")
print("y_O2ex: "+str(y_O2ex))
print("y_N2ex: "+str(y_N2ex))
print("Total mole/s: "+str(n_O2ex+n_N2ex))
# Temperature input
# Need to know:
# _temperature of air coming in to fuel cell (FC) (T_FC_air_in)
# _temperature of fuel coming into (FC)/temperature of reformer (T_FC_fuel_in)
# _temperature of exhaust coming out of FC (T_FC_ex_out)
T_FC_air_in = 700 + 273.15
T_FC_fuel_in = 500 + 273.15
T_FC_ex_out = 800 + 273.15
```
### Blue coded solid material streams can be solved from given specifications
```
from IPython.display import Image
Image(filename='../../assets/images/SOFC-P&ID-Simplified.jpeg')
```
### Declare all Units:
```
m.fs.HX1 = Heater(default={"property_package": m.fs.thermo_params})
m.fs.HX2a = Heater(default={"property_package": m.fs.thermo_params})
m.fs.HX2b = Heater(default={"property_package": m.fs.thermo_params})
m.fs.Mix1 = Mixer(default={"dynamic": False,
"property_package": m.fs.thermo_params})
m.fs.Mix2 = Mixer(default={"dynamic": False,
"property_package": m.fs.thermo_params})
m.fs.Mix3 = Mixer(default={"dynamic": False,
"property_package": m.fs.thermo_params})
m.fs.Split1 = Separator(default={"dynamic": False,
"split_basis": SplittingType.componentFlow,
"property_package": m.fs.thermo_params})
m.fs.Reformer = GibbsReactor(default={"dynamic": False,
"property_package": m.fs.thermo_params,
"has_pressure_change": False,
"has_heat_transfer": True})
m.fs.SOFC = GibbsReactor(default={"dynamic": False,
"property_package": m.fs.thermo_params,
"has_pressure_change": False,
"has_heat_transfer": True})
m.fs.Burner = GibbsReactor(default={"dynamic": False,
"property_package": m.fs.thermo_params,
"has_pressure_change": False,
"has_heat_transfer": True})
```
### Declare all Streams:
```
m.fs.stream0 = Arc(source=m.fs.Mix1.outlet,
destination=m.fs.HX1.inlet)
m.fs.stream1 = Arc(source=m.fs.Split1.outlet_1,
destination=m.fs.HX2b.inlet)
m.fs.stream2 = Arc(source=m.fs.HX1.outlet,
destination=m.fs.Reformer.inlet)
m.fs.stream3 = Arc(source=m.fs.Split1.outlet_2,
destination=m.fs.HX2a.inlet)
m.fs.stream4 = Arc(source=m.fs.Reformer.outlet,
destination=m.fs.Mix2.inlet_1)
m.fs.stream5 = Arc(source=m.fs.HX2b.outlet,
destination=m.fs.Mix2.inlet_2)
m.fs.stream6 = Arc(source=m.fs.Mix2.outlet,
destination=m.fs.SOFC.inlet)
m.fs.stream7 = Arc(source=m.fs.HX2a.outlet,
destination=m.fs.Mix3.inlet_2)
m.fs.stream8 = Arc(source=m.fs.SOFC.outlet,
destination=m.fs.Mix3.inlet_1)
m.fs.stream9 = Arc(source=m.fs.Mix3.outlet,
destination=m.fs.Burner.inlet)
TransformationFactory("network.expand_arcs").apply_to(m)
```
### Define known Material Streams:
```
# Fix methane flow to Mix1:
m.fs.Mix1.inlet_1.flow_mol.fix(n_CH4f)
m.fs.Mix1.inlet_1.mole_frac_comp[0.0,:].fix(0.0)
m.fs.Mix1.inlet_1.mole_frac_comp[0.0,"CH4"].fix(1.0)
m.fs.Mix1.inlet_1.temperature.fix(25+273.15)
m.fs.Mix1.inlet_1.pressure.fix(101325)
# Fix water flow to Mix1:
m.fs.Mix1.inlet_2.flow_mol.fix(n_H2Of)
m.fs.Mix1.inlet_2.mole_frac_comp[0.0,:].fix(0.0)
m.fs.Mix1.inlet_2.mole_frac_comp[0.0,"H2O"].fix(1.0)
m.fs.Mix1.inlet_2.temperature.fix(25+273.15)
m.fs.Mix1.inlet_2.pressure.fix(101325)
# Fix air flow to Split1:
m.fs.Split1.inlet.flow_mol.fix(n_N2f+n_O2f)
m.fs.Split1.inlet.mole_frac_comp[0.0,:].fix(0.0)
m.fs.Split1.inlet.mole_frac_comp[0.0,"O2"].fix(0.21)
m.fs.Split1.inlet.mole_frac_comp[0.0,"N2"].fix(0.79)
m.fs.Split1.inlet.temperature.fix(25+273.15)
m.fs.Split1.inlet.pressure.fix(101325)
# Fix O2 flow in Split1 outlet_1:
m.fs.Split1.outlet_1.flow_mol.fix(n_CH4f*Uf*2)
m.fs.Split1.outlet_1.mole_frac_comp[0.0,"CH4"].fix(0.0)
m.fs.Split1.outlet_1.mole_frac_comp[0.0,"CO"].fix(0.0)
m.fs.Split1.outlet_1.mole_frac_comp[0.0,"CO2"].fix(0.0)
m.fs.Split1.outlet_1.mole_frac_comp[0.0,"H2"].fix(0.0)
m.fs.Split1.outlet_1.mole_frac_comp[0.0,"H2O"].fix(0.0)
m.fs.Split1.outlet_1.mole_frac_comp[0.0,"N2"].fix(0.0)
m.fs.Split1.outlet_1.mole_frac_comp[0.0,"O2"].fix(1.0)
```
### Constraints: Adiabatic Burner
```
m.fs.Burner.heat_duty.fix(0.0)
```
### Constraints: Adiabatic Reformer
```
m.fs.Reformer.heat_duty.fix(0.0)
```
### Constraints: Reformer temperature
```
m.fs.Reformer.outlet.temperature.fix(T_FC_fuel_in)
```
### Constraints: SOFC exit temperature
```
m.fs.SOFC.outlet.temperature.fix(T_FC_ex_out)
```
### Constraints: Air heat Exchanger tube-side outlet temperature
```
m.fs.HX2a.outlet.temperature.fix(T_FC_ex_out)
m.fs.HX2b.outlet.temperature.fix(T_FC_air_in)
dof(m)
```
### Initialize Blocks
```
m.fs.Mix1.initialize
m.fs.Mix2.initialize
m.fs.Mix3.initialize
m.fs.HX1.initialize
m.fs.HX2a.initialize
m.fs.HX2b.initialize
m.fs.Split1.initialize
```
### Solve:
```
solver = SolverFactory('glpk')
results = solver.solve(m, tee=True)
# Solution:
print("Burner exhaust temperature: "+format(m.fs.Burner.outlet.temperature[0].value-273.15, ".2f")+ " oC")
print("SOFC energy output: "+format(-m.fs.SOFC.heat_duty[0].value*EE, ".2f")+ " J/s")
print("SOFC efficiency: "+format(-m.fs.SOFC.heat_duty[0].value*EE/(n_CH4f*LHV)*100, ".2f")+ " %")
# Temperature into Reformer:
print("Reformer entrance temperature: "+format(m.fs.Reformer.inlet.temperature[0].value-273.15, ".2f")+ " oC")
```
## Heat Exchanger network
```
h = ConcreteModel()
h.fs = FlowsheetBlock(default={"dynamic": False})
h.fs.thermo_params = thermo_props.MethaneParameterBlock()
```
### All Unit Blocks
```
h.fs.Split2 = Separator(default={"dynamic": False,
"num_outlets": 3,
"property_package": h.fs.thermo_params})
h.fs.HX1a = HeatExchanger(default={"dynamic": False,
"delta_temperature_callback": delta_temperature_amtd_callback,
"shell":{"property_package": h.fs.thermo_params},
"tube":{"property_package": h.fs.thermo_params}})
h.fs.HX2a = HeatExchanger(default={"dynamic": False,
"delta_temperature_callback": delta_temperature_amtd_callback,
"shell":{"property_package": h.fs.thermo_params},
"tube":{"property_package": h.fs.thermo_params}})
h.fs.HX2b = HeatExchanger(default={"dynamic": False,
"delta_temperature_callback": delta_temperature_amtd_callback,
"shell":{"property_package": h.fs.thermo_params},
"tube":{"property_package": h.fs.thermo_params}})
h.fs.HX1b = Heater(default={"property_package": h.fs.thermo_params})
```
### Streams:
```
h.fs.Split2.inlet.flow_mol.fix(m.fs.Burner.outlet.flow_mol[0].value)
h.fs.Split2.inlet.mole_frac_comp[0, "CH4"].fix(m.fs.Burner.outlet.mole_frac_comp[0, "CH4"].value)
h.fs.Split2.inlet.mole_frac_comp[0, "CO"].fix(m.fs.Burner.outlet.mole_frac_comp[0, "CO"].value)
h.fs.Split2.inlet.mole_frac_comp[0, "CO2"].fix(m.fs.Burner.outlet.mole_frac_comp[0, "CO2"].value)
h.fs.Split2.inlet.mole_frac_comp[0, "H2"].fix(m.fs.Burner.outlet.mole_frac_comp[0, "H2"].value)
h.fs.Split2.inlet.mole_frac_comp[0, "H2O"].fix(m.fs.Burner.outlet.mole_frac_comp[0, "H2O"].value)
h.fs.Split2.inlet.mole_frac_comp[0, "N2"].fix(m.fs.Burner.outlet.mole_frac_comp[0, "N2"].value)
h.fs.Split2.inlet.mole_frac_comp[0, "NH3"].fix(m.fs.Burner.outlet.mole_frac_comp[0, "NH3"].value)
h.fs.Split2.inlet.mole_frac_comp[0, "O2"].fix(m.fs.Burner.outlet.mole_frac_comp[0, "O2"].value)
h.fs.Split2.inlet.temperature.fix(m.fs.Burner.outlet.temperature[0].value)
h.fs.Split2.inlet.pressure.fix(m.fs.Burner.outlet.pressure[0].value)
h.fs.HX1a.tube_inlet.flow_mol.fix(m.fs.Mix1.outlet.flow_mol[0].value)
h.fs.HX1a.tube_inlet.mole_frac_comp[0, "CH4"].fix(m.fs.Mix1.outlet.mole_frac_comp[0, "CH4"].value)
h.fs.HX1a.tube_inlet.mole_frac_comp[0, "CO"].fix(m.fs.Mix1.outlet.mole_frac_comp[0, "CO"].value)
h.fs.HX1a.tube_inlet.mole_frac_comp[0, "CO2"].fix(m.fs.Mix1.outlet.mole_frac_comp[0, "CO2"].value)
h.fs.HX1a.tube_inlet.mole_frac_comp[0, "H2"].fix(m.fs.Mix1.outlet.mole_frac_comp[0, "H2"].value)
h.fs.HX1a.tube_inlet.mole_frac_comp[0, "H2O"].fix(m.fs.Mix1.outlet.mole_frac_comp[0, "H2O"].value)
h.fs.HX1a.tube_inlet.mole_frac_comp[0, "N2"].fix(m.fs.Mix1.outlet.mole_frac_comp[0, "N2"].value)
h.fs.HX1a.tube_inlet.mole_frac_comp[0, "NH3"].fix(m.fs.Mix1.outlet.mole_frac_comp[0, "NH3"].value)
h.fs.HX1a.tube_inlet.mole_frac_comp[0, "O2"].fix(m.fs.Mix1.outlet.mole_frac_comp[0, "O2"].value)
h.fs.HX1a.tube_inlet.temperature.fix(m.fs.Mix1.outlet.temperature[0].value)
h.fs.HX1a.tube_inlet.pressure.fix(m.fs.Mix1.outlet.pressure[0].value)
h.fs.HX2a.tube_inlet.flow_mol.fix(m.fs.Split1.outlet_1.flow_mol[0].value)
h.fs.HX2a.tube_inlet.mole_frac_comp[0, "CH4"].fix(m.fs.Split1.outlet_1.mole_frac_comp[0, "CH4"].value)
h.fs.HX2a.tube_inlet.mole_frac_comp[0, "CO"].fix(m.fs.Split1.outlet_1.mole_frac_comp[0, "CO"].value)
h.fs.HX2a.tube_inlet.mole_frac_comp[0, "CO2"].fix(m.fs.Split1.outlet_1.mole_frac_comp[0, "CO2"].value)
h.fs.HX2a.tube_inlet.mole_frac_comp[0, "H2"].fix(m.fs.Split1.outlet_1.mole_frac_comp[0, "H2"].value)
h.fs.HX2a.tube_inlet.mole_frac_comp[0, "H2O"].fix(m.fs.Split1.outlet_1.mole_frac_comp[0, "H2O"].value)
h.fs.HX2a.tube_inlet.mole_frac_comp[0, "N2"].fix(m.fs.Split1.outlet_1.mole_frac_comp[0, "N2"].value)
h.fs.HX2a.tube_inlet.mole_frac_comp[0, "NH3"].fix(m.fs.Split1.outlet_1.mole_frac_comp[0, "NH3"].value)
h.fs.HX2a.tube_inlet.mole_frac_comp[0, "O2"].fix(m.fs.Split1.outlet_1.mole_frac_comp[0, "O2"].value)
h.fs.HX2a.tube_inlet.temperature.fix(m.fs.Split1.outlet_1.temperature[0].value)
h.fs.HX2a.tube_inlet.pressure.fix(m.fs.Split1.outlet_1.pressure[0].value)
h.fs.HX2b.tube_inlet.flow_mol.fix(m.fs.Split1.outlet_2.flow_mol[0].value)
h.fs.HX2b.tube_inlet.mole_frac_comp[0, "CH4"].fix(m.fs.Split1.outlet_2.mole_frac_comp[0, "CH4"].value)
h.fs.HX2b.tube_inlet.mole_frac_comp[0, "CO"].fix(m.fs.Split1.outlet_2.mole_frac_comp[0, "CO"].value)
h.fs.HX2b.tube_inlet.mole_frac_comp[0, "CO2"].fix(m.fs.Split1.outlet_2.mole_frac_comp[0, "CO2"].value)
h.fs.HX2b.tube_inlet.mole_frac_comp[0, "H2"].fix(m.fs.Split1.outlet_2.mole_frac_comp[0, "H2"].value)
h.fs.HX2b.tube_inlet.mole_frac_comp[0, "H2O"].fix(m.fs.Split1.outlet_2.mole_frac_comp[0, "H2O"].value)
h.fs.HX2b.tube_inlet.mole_frac_comp[0, "N2"].fix(m.fs.Split1.outlet_2.mole_frac_comp[0, "N2"].value)
h.fs.HX2b.tube_inlet.mole_frac_comp[0, "NH3"].fix(m.fs.Split1.outlet_2.mole_frac_comp[0, "NH3"].value)
h.fs.HX2b.tube_inlet.mole_frac_comp[0, "O2"].fix(m.fs.Split1.outlet_2.mole_frac_comp[0, "O2"].value)
h.fs.HX2b.tube_inlet.temperature.fix(m.fs.Split1.outlet_2.temperature[0].value)
h.fs.HX2b.tube_inlet.pressure.fix(m.fs.Split1.outlet_2.pressure[0].value)
```
### Flows:
```
h.fs.stream0 = Arc(source=h.fs.Split2.outlet_1,
destination=h.fs.HX1a.shell_inlet)
h.fs.stream1 = Arc(source=h.fs.Split2.outlet_2,
destination=h.fs.HX2a.shell_inlet)
h.fs.stream2 = Arc(source=h.fs.Split2.outlet_3,
destination=h.fs.HX2b.shell_inlet)
h.fs.stream3 = Arc(source=h.fs.HX1a.shell_outlet,
destination=h.fs.HX1b.inlet)
TransformationFactory("network.expand_arcs").apply_to(h)
```
### Design constraints:
```
# heat transfer coeff of heat exchanger:
h.fs.HX1a.overall_heat_transfer_coefficient.fix(200)
h.fs.HX2a.overall_heat_transfer_coefficient.fix(200)
h.fs.HX2b.overall_heat_transfer_coefficient.fix(200)
# HX4/Evaporator:
h.fs.HX1b.heat_duty.fix(-n_H2Of*enthalpy_vap)
# HX cold/tube side outlet temperature:
h.fs.HX1a.tube_outlet.temperature.fix(m.fs.HX1.outlet.temperature[0].value)
h.fs.HX2a.tube_outlet.temperature.fix(T_FC_air_in)
h.fs.HX2b.tube_outlet.temperature.fix(T_FC_air_in)
# HX hot/shell side outlet temperature:
h.fs.HX2a.shell_outlet.temperature.fix(200+273.15)
h.fs.HX2b.shell_outlet.temperature.fix(200+273.15)
```
### DOF:
```
dof(h)
```
### Solve:
```
solver = SolverFactory('ipopt')
results = solver.solve(h, tee=True)
```
### Solutions:
```
print("HX1 - Steam, methane preheat heat exchanger: ")
print("\tTube inlet temperature: \t"+format(h.fs.HX1a.tube_inlet.temperature[0].value-273.15, ".2f")+" oC")
print("\tTube outlet temperature: \t"+format(h.fs.HX1a.tube_outlet.temperature[0].value-273.15, ".2f")+" oC")
print("\tShell inlet temperature: \t"+format(h.fs.HX1a.shell_inlet.temperature[0].value-273.15, ".2f")+" oC")
print("\tShell outlet temperature: \t"+format(h.fs.HX1b.outlet.temperature[0].value-273.15, ".2f")+" oC")
print("HX2 - Air heat exchanger: ")
print("\tTube inlet temperature: \t"+format(h.fs.HX2a.tube_inlet.temperature[0].value-273.15, ".2f")+" oC")
print("\tTube outlet temperature: \t"+format(h.fs.HX2a.tube_outlet.temperature[0].value-273.15, ".2f")+" oC")
print("\tShell inlet temperature: \t"+format(h.fs.HX2a.shell_inlet.temperature[0].value-273.15, ".2f")+" oC")
print("\tShell outlet temperature: \t"+format(h.fs.HX2a.shell_outlet.temperature[0].value-273.15, ".2f")+" oC")
print("\tAssumed heat transfer coefficient: \t"+format(h.fs.HX2a.overall_heat_transfer_coefficient[0].value, ".2f")+ " W/m2.K")
print("\tHeat exchanger area: \t\t"+format(h.fs.HX2a.area.value+h.fs.HX2b.area.value, ".2f")+ " m2")
print("Exhaust split ratio: ")
print("\tSplit fraction to methane, steam heat exchanger: "+format(h.fs.Split2.split_fraction[0, "outlet_1"].value, ".3f"))
print("\tSplit fraction to air heat exchanger: "+format(1-h.fs.Split2.split_fraction[0, "outlet_1"].value, ".3f"))
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Load images
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/images"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial provides a simple example of how to load an image dataset using `tf.data`.
The dataset used in this example is distributed as directories of images, with one class of image per directory.
## Setup
```
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
import tensorflow.compat.v2 as tf
except Exception:
pass
tf.enable_v2_behavior()
AUTOTUNE = tf.data.experimental.AUTOTUNE
```
## Download and inspect the dataset
### Retrieve the images
Before you start any training, you will need a set of images to teach the network about the new classes you want to recognize. You have already created an archive of creative-commons licensed flower photos to use initially:
```
import pathlib
data_root_orig = tf.keras.utils.get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
fname='flower_photos', untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
```
After downloading 218MB, you should now have a copy of the flower photos available:
```
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
```
### Inspect the images
Now let's have a quick look at a couple of the images, so you know what you are dealing with:
```
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
```
### Determine the label for each image
List the available labels:
```
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
```
Assign an index to each label:
```
label_to_index = dict((name, index) for index, name in enumerate(label_names))
label_to_index
```
Create a list of every file, and its label index:
```
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
```
### Load and format the images
TensorFlow includes all the tools you need to load and process images:
```
img_path = all_image_paths[0]
img_path
```
Here is the raw data:
```
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
```
Decode it into an image tensor:
```
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
```
Resize it for your model:
```
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
```
Wrap up these up in simple functions for later.
```
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.io.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
image_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path))
plt.title(label_names[label].title())
print()
```
## Build a `tf.data.Dataset`
### A dataset of images
The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.
Slicing the array of strings, results in a dataset of strings:
```
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
```
The `shapes` and `types` describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
```
print(path_ds)
```
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
```
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n, image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
```
### A dataset of `(image, label)` pairs
Using the same `from_tensor_slices` method you can build a dataset of labels:
```
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
```
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs:
```
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
```
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
```
print(image_label_ds)
```
Note: When you have arrays like `all_image_labels` and `all_image_paths` an alternative to `tf.data.dataset.Dataset.zip` is to slice the pair of arrays.
```
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
```
### Basic methods for training
To train a model with this dataset you will want the data:
* To be well shuffled.
* To be batched.
* To repeat forever.
* Batches to be available as soon as possible.
These features can be easily added using the `tf.data` api.
```
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
```
There are a few things to note here:
1. The order is important.
* A `.shuffle` after a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all).
* A `.shuffle` after a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.
1. You use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.
1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.
1. The shuffeled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.
This last point can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
```
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
```
### Pipe the dataset to a model
Fetch a copy of MobileNet v2 from `tf.keras.applications`.
This will be used for a simple transfer learning example.
Set the MobileNet weights to be non-trainable:
```
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
```
This model expects its input to be normalized to the `[-1,1]` range:
```
help(keras_applications.mobilenet_v2.preprocess_input)
```
<pre>
...
This function applies the "Inception" preprocessing which converts
the RGB values from [0, 255] to [-1, 1]
...
</pre>
Before you pass the input to the MobilNet model, you need to convert it from a range of `[0,1]` to `[-1,1]`:
```
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
```
The MobileNet returns a `6x6` spatial grid of features for each image.
Pass it a batch of images to see:
```
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
```
Build a model wrapped around MobileNet and use `tf.keras.layers.GlobalAveragePooling2D` to average over those space dimensions before the output `tf.keras.layers.Dense` layer:
```
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
```
Now it produces outputs of the expected shape:
```
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
```
Compile the model to describe the training procedure:
```
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=["accuracy"])
```
There are 2 trainable variables - the Dense `weights` and `bias`:
```
len(model.trainable_variables)
model.summary()
```
You are ready to train the model.
Note that for demonstration purposes you will only run 3 steps per epoch, but normally you would specify the real number of steps, as defined below, before passing it to `model.fit()`:
```
steps_per_epoch=tf.math.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
```
## Performance
Note: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/guide/performance/datasets).
The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU, but may not be sufficient for GPU training and is totally inappropriate for any sort of distributed training.
To investigate, first build a simple function to check the performance of our datasets:
```
import time
default_timeit_steps = 2*steps_per_epoch+1
def timeit(ds, steps=default_timeit_steps):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(steps+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(steps, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*steps/duration))
print("Total time: {}s".format(end-overall_start))
```
The performance of the current dataset is:
```
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
```
### Cache
Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is very efficient, especially when the data fits in memory.
Here the images are cached, after being pre-precessed (decoded and resized):
```
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
```
One disadvantage to using an in memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
```
timeit(ds)
```
If the data doesn't fit in memory, use a cache file:
```
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
```
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
```
timeit(ds)
```
### TFRecord File
#### Raw image data
TFRecord files are a simple format to store a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.
First, build a TFRecord file from the raw image data:
```
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.io.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
```
Next, build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier:
```
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
```
Zip that dataset with the labels dataset you defined earlier to get the expected `(image,label)` pairs:
```
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
```
This is slower than the `cache` version because you have not cached the preprocessing.
#### Serialized Tensors
To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
```
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
```
Now instead of a dataset of `.jpeg` strings, you have a dataset of tensors.
To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings:
```
ds = image_ds.map(tf.io.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
```
With the preprocessing cached, data can be loaded from the TFrecord file quite efficiently - just remember to de-serialize tensor before using it:
```
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.io.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
```
Now, add the labels and apply the same standard operations, as before:
```
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
```
| github_jupyter |
# Using Time Series to Predict Financial Endowment
*This is a toy example for me to get acquainted with time series and presenting with Jupyter Notebooks, thus the prediction is not meant to be accurate by any measure. Expect a very introductory presentation, as this is my first foray into forecasting time series.*
## Extract Data
For the data to be analysed, I chose the financial endowment of a few British universities as listed on [Wikipedia](https://en.wikipedia.org/wiki/List_of_UK_universities_by_endowment). The data is *very* limited, and I don't suppose that one can accurately predict the future endowment only based on this data, but it may be a fun learning experience.
Here I go.
### Import the necessary modules
```
from collections import OrderedDict
import datetime
import warnings
# Parsing
from bs4 import BeautifulSoup
import requests
# Plotting
import matplotlib.pyplot as plt
# Numerical computation
import pandas as pd
import numpy as np
# Predicting
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.arima_model import ARIMA
%matplotlib inline
warnings.filterwarnings('ignore')
```
### Extract the 2nd table on the Wikipedia page:
```
#I'm interested in the 2nd table on the page
table_number = 2
url = "http://en.wikipedia.org/wiki/List_of_UK_universities_by_endowment"
soup = BeautifulSoup(requests.get(url).text, "html.parser")
table = soup.find_all('table', class_="wikitable")[table_number - 1] # 0-indexed structures
```
### Create a dictionary to store the data from the table
The dictionary format is such that the key is the university name, and the value is a list of strings indicating their annual endowment.
```
# Using `OrderedDict()` to have the legend ordered later on when plotting the results
unis = OrderedDict()
for row in table.find_all('tr')[1:]:
data = row.text.split('\n')
unis[data[1]] = [money.split('[')[0] for money in data[2:-1]]
```
### Plot all the data in the dictionary
```
years = list(range(2015, 2005, -1)) # Values are stored in reverse chronological order
for uni, money in unis.items():
y = [m.strip("£") for i, m in enumerate(money)]
plt.figure(num=1, figsize=(15,6))
plt.plot(years, y, label=uni)
plt.legend(unis.keys(), bbox_to_anchor=(0.5, 1),)
plt.xlabel('year')
plt.ylabel('$m endowment')
# Don't format the years in scientific notation
ax = plt.gca()
ax.get_xaxis().get_major_formatter().set_useOffset(False)
```
# Extract values for prediction and pre-process data
This should be easily extended to predict multiple series, but I'll use only one here for (my) clarity.
```
# Convert to `datetime` objects for the time series processing
date = [datetime.datetime.strptime(str(year), "%Y") for year in years]
df = pd.DataFrame({'ICL': [float(val[1:]) for val in unis["Imperial College London"]], "Year": date})
df = df.set_index("Year")
ts = df["ICL"][::-1]
plt.plot(ts)
plt.title("ICL Financial Endowment")
plt.ylabel("$m")
plt.show()
```
### Eliminating Trend
#### What?
Now, time series often have two properties that are best eliminated before attempting any predicting algorithm: trend and seasonality:
- **Trend** refers to an overall tendency that the data follows. For instance, the endowment seems to increase over time in the data above.
- **Seasonality** refers to a predictable phenomenon that affects the data regularly. No seasonality for the data above, but think for example an increase in knitwear sales every winter.
#### Why?
To my understanding, there are simply more practical tools and theoretical background developed around **stationary** series (i.e. series without trend or seasonality aka series with stationary mean, variance, and autocorrelation).
#### How?
There are 2 how's:
- How do I tell if the data is stationary?
When you cannot tell from the plot, you may calculate the degree of stationary using [Dickey-Fuller Test](https://en.wikipedia.org/wiki/Dickey–Fuller_test).
- How do I make the data stationary?
There are many methods, but here I'll use [**differencing**](https://en.wikipedia.org/wiki/Data_differencing) because it seems to be effective and simple enough for an introduction tutorial.
##### Differencing
Differencing is a simple process that extracts the relative difference between any ~~two~~ number of adjacent instances, and for this case the number will be 2. Be aware that this means that the first instance will be invalid as there is no previous instance to subtract.
```
ts_diff = ts - ts.shift()
#Drop the invalid instances (i.e. the first one)
ts_diff.dropna(inplace=True)
print(ts) #Print original values
plt.plot(ts_diff)
# Plot the differences
plt.show()
```
## Training a Predictor
The time series above does not show a trend anymore and seems quite stationary (visually, at least). The next step is to use Auto-Regressive Integrated Moving Averages (ARIMA) to forecast the time series.
### Auto-Regressive Terms
**Lag:** In time series analysis, "lag" is a way to refer to previous values of a variable. For example, you may predict a new value based on `p` past values (lags).
Auto-Regressive terms (**AR**IMA) are just the number of lags used for predicting a new value. So if there are 5 auto-regressive terms, then the new value is dependent on 5 past terms.
### Moving Average Terms
**Moving average** refers to a technique to calculate the trend of the data. It chooses a time window and averages the values inside that time window to smooth out the noise. For a visual interpretation, check the graph below.
Moving Average terms (ARI**MA**) are lagged errors used for predicting a new value. So if there are 5 moving average terms, each term is also predicted by 5 errors calculated as the difference between the moving average value at that instance and the actual value.
### How to calculate the terms?
To decide on what values the terms should take, one may use techniques such as Autocorrelation Function (ACF) and Partial Autocorrelation Function (PACF), but there is very little data in this example, so I will simply pick both to be `1`.
```
moving_avg = pd.rolling_mean(ts,2)
plt.plot(moving_avg)
plt.plot(ts)
plt.title('Moving average for a period of 3 years')
plt.show()
# Helper function
def plot_graphs(series1, series2, label1="ICL", label2="ARIMA prediction", title="Predicting ICL endowment"):
plt.plot(series1, label=label1)
plt.plot(series2, label=label2)
plt.legend(loc='best')
plt.title(title)
plt.show()
model = ARIMA(ts_diff, order=(1,1,1))
results_ARIMA = model.fit(disp=-1)
plot_graphs(ts_diff, results_ARIMA.fittedvalues)
```
## Visualising the results
Above, it can be seen that although the ARIMA predictions catch some of the peaks, it underestimates quite a bit at times -- for instance, the actual results are rarely ever negative, while the predictions go as low as `-15`.
Keep in mind that at this point, the plot only displays the difference between each instance. To visualise the actual results, one needs to scale back the results:
```
preds = results_ARIMA.predict(end=13).cumsum() + ts.ix[0]
plot_graphs(ts,preds)
```
## Discussion
In my defense, this is my first time doing time series analysis. I would suppose that this is far from enough data to get good results, but the results may also be bad because it predicted negative differences previously, thus the values have a harder time picking up. To confirm this, I will plot the unscaled predictions:
```
plot_graphs(ts_diff, results_ARIMA.predict(end=13))
```
## Improving results
To improve the results I will try running ARIMA with different parameters, and predict on the non-stationary series.
### No AR terms, 2 MA terms:
```
results_ARIMA = ARIMA(ts_diff, order=(0, 0, 2)).fit(disp=-1)
preds = results_ARIMA.predict(end=13).cumsum() + ts.ix[0]
plot_graphs(ts,preds)
```
### Predicting on non-stationary series:
```
results_ARIMA = ARIMA(ts, order=(1, 1, 0)).fit(disp=-1)
preds = results_ARIMA.predict(end=13)
plot_graphs(ts,preds)
```
## Conclusions
To obtain better results, I believe there's a need for more training data, and a better understanding of the domain -- in this case, better understanding of how ARIMA works and how to tune the parameters would probably help.
The experiment however was successful in the sense that it provided me with an interesting hands-on introduction to time series and using Jupyter Notebooks as a presentation medium.
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from os.path import join
plt.style.use(["seaborn", "thesis"])
plt.rcParams["figure.figsize"] = (8, 4)
```
# Dataset
```
from SCFInitialGuess.utilities.dataset import extract_triu_batch, AbstractDataset
from sklearn.model_selection import train_test_split
#data_path = "../thesis/dataset/EthenT/"
#postfix = "EthenT"
#dim = 72
#N_ELECTRONS = 16
#basis = "6-311++g**"
data_path = "../thesis/dataset/TSmall/"
postfix = "TSmall"
dim = 70
N_ELECTRONS = 30
basis = "3-21++g*"
#data_path = "../../../butadien/data/"
#postfix = ""
#dim = 26
def split(x, y, ind):
return x[:ind], y[:ind], x[ind:], y[ind:]
#[S, P] = np.load()
S = np.load(join(data_path, "S" + postfix + ".npy"))
P = np.load(join(data_path, "P" + postfix + ".npy"))
index = np.load(join(data_path, "index" + postfix + ".npy"))
#ind = int(0.8 * len(index))
ind = 0
molecules = np.load(join(data_path, "molecules" + postfix + ".npy"))
molecules = (molecules[:ind], molecules[ind:])
#molecules = ([], molecules)
s_triu_norm, mu, std = AbstractDataset.normalize(S)
s_train, p_train, s_test, p_test = split(S.reshape(-1, dim, dim), P.reshape(-1, dim, dim), ind)
#s_test = S.reshape(-1, dim, dim)
#p_test = S.reshape(-1, dim, dim)
from SCFInitialGuess.utilities.analysis import make_results_str, measure_all_quantities
from SCFInitialGuess.utilities.dataset import StaticDataset
dataset = StaticDataset(
train=(s_train, p_train),
#train=(None, None),
validation=(None, None),
test=(s_test, p_test),
mu=mu,
std=std
)
def f(x,y):
return 4*x + 6 * y
f(13,3)
from pyscf.scf import hf
h_test = [hf.get_hcore(mol.get_pyscf_molecule()) for mol in molecules[1]]
```
# Utilities
```
from SCFInitialGuess.utilities.dataset import reconstruct_from_triu
def embedd(x, y):
p = x.copy()
p[mask] = (y.copy())[mask]
return p
def embedd_batch(p_batch):
p_embedded = []
for (p_guess, p_conv) in zip(p_batch, p_test):
p_embedded.append(embedd(p_guess, p_conv))
return np.array(p_embedded)
from SCFInitialGuess.utilities.constants import number_of_basis_functions as N_BASIS
mol = molecules[1][0]
mask = np.zeros((dim, dim))
current_dim = 0
for atom in mol.species:
# calculate block range
index_start = current_dim
current_dim += N_BASIS[basis][atom]
index_end = current_dim
# calculate logical vector
L = np.arange(dim)
L = np.logical_and(index_start <= L, L < index_end)
m = np.logical_and.outer(L, L)
mask = np.logical_or(mask, m)
#mask
import seaborn as sns
sns.heatmap(mask.astype("int"))
np.mean(np.abs(p_test.flatten() - embedd_batch(p_test).flatten()))
from pyscf.scf import hf
def fock_from_density_batch(p_batch):
f = []
for p, s, h, mol in zip(p_batch, s_test, h_test, molecules[1]):
f.append(hf.get_fock(None, h1e=h, s1e=s, vhf=hf.get_veff(mol=mol.get_pyscf_molecule(), dm=p), dm=p))
return np.array(f)
from SCFInitialGuess.utilities.dataset import density_from_fock
def density_from_fock_batch(f_batch):
p = []
for (s, f, mol) in zip(s_test, f_batch, molecules[1]):
p.append(density_from_fock(f, s, mol.get_pyscf_molecule()))
return np.array(p)
```
# GWH
```
from pyscf.scf import hf
p_gwh = np.array([
hf.init_guess_by_wolfsberg_helmholtz(mol.get_pyscf_molecule()) for mol in molecules[1]
]).astype("float64")
from SCFInitialGuess.utilities.analysis import make_results_str, measure_all_quantities, mf_initializer
print(make_results_str(measure_all_quantities(
p_gwh,
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=False,
is_dataset_triu=False,
s=S[ind:]
)))
```
# Embedded GWH
```
p_embedded_gwh = embedd_batch(p_gwh)
from SCFInitialGuess.utilities.analysis import mf_initializer as mf_initializer
print(make_results_str(measure_all_quantities(
p_embedded_gwh,
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=False,
is_dataset_triu=False,
s=S[ind:]
)))
51/1001
```
# Embedded GWH + 1 Iteration
```
f_embedded_gwh = fock_from_density_batch(p_embedded_gwh)
p_embedded_gwh_test = density_from_fock_batch(f_embedded_gwh)
from SCFInitialGuess.utilities.analysis import mf_initializer as mf_initializer
print(make_results_str(measure_all_quantities(
p_embedded_gwh_test,
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=False,
is_dataset_triu=False,
s=S[ind:]
)))
```
# SAD
```
from pyscf.scf import hf
p_sad = np.array([
hf.init_guess_by_atom(mol.get_pyscf_molecule()) for mol in molecules[1]
]).astype("float64")
sns.heatmap((abs(p_sad[0]) < 1e-12).astype("int"))
sns.heatmap((abs(p_gwh[0]) < 0.05).astype("int"))
sns.heatmap((abs(p_test[30]) < 0.05).astype("int"))
from SCFInitialGuess.utilities.analysis import make_results_str, measure_all_quantities, mf_initializer
print(make_results_str(measure_all_quantities(
p_sad,
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=False,
is_dataset_triu=False,
s=S[ind:]
)))
47/1001
```
# Embedded zeros
```
p_embedded_zeros = embedd_batch(np.zeros(p_test.shape))
sns.heatmap((abs(p_embedded_zeros[0]) < 0.05).astype("int"))
from SCFInitialGuess.utilities.analysis import mf_initializer as mf_initializer
print(make_results_str(measure_all_quantities(
p_embedded_zeros,
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=False,
is_dataset_triu=False,
s=S[ind:]
)))
```
# Embedded zeros + 1 Iteration?
```
f_embedded_zeros = fock_from_density_batch(p_embedded_zeros)
p_embedded_zeros_test = density_from_fock_batch(f_embedded_zeros,)
from SCFInitialGuess.utilities.analysis import mf_initializer, make_results_str, measure_all_quantities
print(make_results_str(measure_all_quantities(
p_embedded_zeros_test,
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=False,
is_dataset_triu=False,
s=S[ind:]
)))
```
#
```
77/1001
```
# Embedded GWH w/ Self-Overlap
```
from SCFInitialGuess.utilities.constants import number_of_basis_functions as N_BASIS
mol = molecules[1][0]
mask_self_overlap = np.zeros((dim, dim))
current_dim_i = 0
for i, atom_i in enumerate(mol.species):
# calculate block range
index_start_i = current_dim_i
current_dim_i += N_BASIS[basis][atom_i]
index_end_i = current_dim_i
# calculate logical vector
L_i = np.arange(dim)
L_i = np.logical_and(index_start_i <= L_i, L_i < index_end_i)
current_dim_j = 0
for j, atom_j in enumerate(mol.species):
#print(str(i) + ", " + str(j))
#print(str(atom_i) + ", " + str(atom_j))
# calculate block range
index_start_j = current_dim_j
current_dim_j += N_BASIS[basis][atom_j]
index_end_j = current_dim_j
if i == j:
continue
if atom_i == atom_j:
# calculate logical vector
L_j = np.arange(dim)
L_j = np.logical_and(index_start_j <= L_j, L_j < index_end_j)
m = np.logical_and.outer(L_i, L_j)
mask_self_overlap = np.logical_or(mask_self_overlap, m)
#mask
import seaborn as sns
sns.heatmap(mask_self_overlap.astype("int"))
from SCFInitialGuess.utilities.dataset import reconstruct_from_triu
def embedd_self_ovlp(x, y):
p = x.copy()
p[mask_self_overlap] = (y.copy())[mask_self_overlap]
return p
def embedd_batch_self_ovlp(p_batch):
p_embedded = []
for (p_guess, p_conv) in zip(p_batch, p_test):
p_embedded.append(embedd_self_ovlp(p_guess, p_conv))
return np.array(p_embedded)
p_embedded_gwh_self_ovlp = embedd_batch_self_ovlp(p_embedded_gwh)
sns.heatmap(p_embedded_gwh_self_ovlp[0] - p_test[0], square=True)
from SCFInitialGuess.utilities.analysis import mf_initializer, make_results_str, measure_all_quantities
print(make_results_str(measure_all_quantities(
density_from_fock_batch(fock_from_density_batch(p_embedded_gwh_self_ovlp)),
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=False,
is_dataset_triu=False,
s=S[ind:]
)))
6/(len(p_test))
```
# Embedded GWH w/ OFF overlap
```
from SCFInitialGuess.utilities.constants import number_of_basis_functions as N_BASIS
mol = molecules[1][0]
mask_off_overlap = np.zeros((dim, dim))
current_dim_i = 0
for i, atom_i in enumerate(mol.species):
# calculate block range
index_start_i = current_dim_i
current_dim_i += N_BASIS[basis][atom_i]
index_end_i = current_dim_i
# calculate logical vector
L_i = np.arange(dim)
L_i = np.logical_and(index_start_i <= L_i, L_i < index_end_i)
current_dim_j = 0
for j, atom_j in enumerate(mol.species):
#print(str(i) + ", " + str(j))
#print(str(atom_i) + ", " + str(atom_j))
# calculate block range
index_start_j = current_dim_j
current_dim_j += N_BASIS[basis][atom_j]
index_end_j = current_dim_j
if i == j:
continue
if atom_i != atom_j:
# calculate logical vector
L_j = np.arange(dim)
L_j = np.logical_and(index_start_j <= L_j, L_j < index_end_j)
m = np.logical_and.outer(L_i, L_j)
mask_off_overlap = np.logical_or(mask_off_overlap, m)
#mask
import seaborn as sns
sns.heatmap(mask_off_overlap.astype("int"))
from SCFInitialGuess.utilities.dataset import reconstruct_from_triu
def embedd_off_ovlp(x, y):
p = x.copy()
p[mask_off_overlap] = (y.copy())[mask_off_overlap]
return p
def embedd_batch_off_ovlp(p_batch):
p_embedded = []
for (p_guess, p_conv) in zip(p_batch, p_test):
p_embedded.append(embedd_off_ovlp(p_guess, p_conv))
return np.array(p_embedded)
p_embedded_gwh_off_ovlp = embedd_batch_off_ovlp(p_embedded_gwh)
sns.heatmap(p_embedded_gwh_off_ovlp[0] - p_test[0], square=True)
from SCFInitialGuess.utilities.analysis import mf_initializer, make_results_str, measure_all_quantities
print(make_results_str(measure_all_quantities(
density_from_fock_batch(fock_from_density_batch(p_embedded_gwh_off_ovlp)),
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=False,
is_dataset_triu=False,
s=S[ind:]
)))
7/len(p_test)
```
| github_jupyter |
# Prologos
## List of Acronyms
+ DS - Dividing Surface.
+ PODS - Periodic Orbit Dividing Surface.
+ NHIM - Normally Hyperbolic Invariant Manifold.
+ UOP - Unstable Periodic Orbit.
+ RI - Reactive Islands.
+ LDs - Lagrangian Descriptors.
+ PSOS - Poincare Surface of Section.
+ TST - Transition State Theory.
+ TS - Transition State.
+ PES - Potential Energy Surface.
## Introduction & Background
Concepts arising in nonlinear dynamical systems theory, such as periodic orbits, normally hyperbolic invariant manifolds (NHIMs), and stable and unstable manifolds have been introduced into the study of chemical reaction dynamics from the phase space perspective. A fairly substantial literature has been developed on this topic in recent years (see, for example \cite{wiggins2016role, waalkens2010geometrical, waalkens2007wigner, wiggins2013normally}, and references therein), but it is fair to say that it has a more mathematical flavour, which is not surprising since these concepts originated in the dynamical systems literature. In this book we will describe how these dynamical notions arise in a variety of physically motivated settings with the hope of providing a more gentle entry into the field for both applied mathematicians and chemists.
An obstacle in interdiscipliary work is the lack of a common language for describing concepts that are common to different fields. We begin with a list of commonly used terms that will arise repeatedly throughout this book and provide a working definition.
Molecules are made up of a collection of atoms that are connected by chemical bonds and a reaction is concerned with the breaking, and creation, of these bonds. Hence, the following concepts are fundamental to the description of this phenomena.
+ **Coordinates.** The locations of the atoms in a molecule are described by a set of coordinates. The space (that is, all possible values) described by these coordinates is referred to as *configuration space*.
+ **Degrees-of-Freedom (DoF).** The number of DoF is the number of independent coordinates required to describe the configuration of the molecule, that is the dimension of the configuration space.
+ **Reaction.** The breaking of a bond can be described by one or more coordinates characterizing the bond becoming unbounded as the it evolves in time.
+ **Reaction coordinate(s).** The particular coordinate(s) that describe the breaking of the bond are referred to as the *reaction coordinate(s)*.
+ **Energy.** The ability of a bond to break can be characterised by its energy. A bond can be ''energized'' by transferring energy from other bonds in the molecule to a particular bond of interest, or from some external energy source, such as electromagnetic radiation, collision with other molecules, for example.
+ **Total Energy, Hamiltonian, momenta.** The total energy (that is, the sum of kinetic energy and potential energy) can be described by a scalar valued function called the *Hamiltonian*. The Hamiltonian is a function of the configuration space coordinates *and* their corresponding canonically conjugate coordinates, which are referred to as *momentum coordinates*.
+ **Phase Space.** The collection of all the configuration and momentum coordinates is referred to as the phase space of the system. The dynamics of the system (that is, how it changes in time) is described by Hamilton's (differential) equations of motion defined on phase space.
+ **Dimension count.** If the system has *n* configuration space coordinates, it has *n* momentum coordinates and then the phase space dimension is *2n*. The Hamiltonian is a scalar valued function of these *2n* coordinates. The level set of the Hamiltonian, that is the energy surface, is *2n-1* dimensional. For a time-independent (autonomous) Hamiltonian, the system *conserves energy* and the energy surface is invariant.
+ **Transition State Theory (TST).** An approach in reaction rate calculations that is based on the flux across a dividing surface. We give a brief description of the theory in the next section.
+ **Dividing Surface (DS).** A DS on the energy surface is of dimension *2n-2*, that is, 1 dimension less (codimension 1) than the *2n-1* dimensional energy surface. While a codimension one surface has the dimensionality necessary to divide the energy surface into two distinct regions and forms the boundary between them. If properly chosen, the two regions are referred to as *reactants* and *products*, and reaction occurs when trajectories evolve from reactants to products through the DS. A DS has the ''locally no-recrossing'' property, which is equivalent to the Hamiltonian vector field being everywhere transverse to a DS, that is at no point is it tangent to the DS.
+ **Locally no-recrossing.** Computation of the flux is accurate only if the DS has the ''locally no-recrossing'' property. A surface has the ''locally no-recrossing'' property if any trajectory that crosses the surface leaves a neighbourhood of the surface before it can return.
+ **Globally no-recrossing.** A surface has the ''globally no-recrossing'' property if any trajectory that crosses the surface does so only once.
+ **The DS and the Reaction Coordinate.** Following our definitions of reaction, reaction coordinate, and DS it follows that the reaction coordinate should play a role in the definition of the DS. This will be an important point in our discussions that follow.
## Transition State Theory
Transition State Theory is a classical theory developed by Wigner, Eyring, Gwynne Evans and Polanyi,\cite{Wigner1932, Eyring1935,Polanyi1935,Eyring1941} that calculates the rate of the reaction as the equilibrium flux of reactive trajectories through the DS. Although there has been much discussion of quantum mechanical analogs of TST \cite{Pechukas1982, WaalkensWiggins2007}, we will briefly discuss in this book the original version based on classical mechanics, since the dividing surface concept, that we use, is incompatible with quantum mechanics. The overview that we give is not supposed to be a review of the vast literature that exists in this area, but to connect the mathematical study of the geometrical structures that characterize reaction dynamics in phase space.
It is commonly claimed that conventional TST makes two main assumptions. \cite{Mahan1974}
- The first, called the equilibrium assumption, requires that the reactant state and TS be in thermal equilibrium. The maintenance of energetic equilibrium means that the thermalization maintaining this equilibrium is (at least) as fast as the rate at which these states are depopulated. \cite{TruhlarVTST2017,GarretTruhlarGTST1979} The equilibrium condition is usually satisfied for most gas-phase bimolecular reactions and for reactions in the liquid phase, because energy exchange between solutes and solvent is usually rapid enough to maintain the equilibrium.\cite{JBAnderson1973,JBAnderson1995} However, there are cases where equilibrium is not maintained, even in solution.\cite{EssafiHarvey2018} In addition, for unimolecular reactions of intermediates with low barriers to product formation, it is commonly the case that most trajectories coming from the reactant state will have enough energy in a product-forming reaction coordinate to cross the second barrier as soon as they reach it.\cite{Carpenter1985,EzraWiggins2014}
- The second claimed assumption specifies that any trajectory crossing the TS dividing surface from the reactant state is on a path towards the product state and will reach it without recrossing the dividing surface prior to the product being reached.\cite{TruhlarVTST1980} Because the rate is calculated as the flux through the TS, any non-reactive trajectory that crosses the TS dividing surface, or reactive trajectory that crosses it more than once will increase the flux through the dividing surface, thus leading to an overestimate of the rate constant. This means that TST gives us an upper limit on the true rate constant, and that if we found a dividing surface without any recrossing then TST would give the exact value of the rate constant (subject to certain caveats\cite{JCP2016Maug}).
In conventional TST the transition state dividing surface is located at the saddle point, which is the maximum energy point on the minimum energy path from reactants to products. However, TST is most powerful in the form of Variational Transition State Theory (VTST), \cite{TruhlarVTST2017,GarretTruhlarGTST1979,TruhlarVTST1980,TruhlarGarretVTST1984, GarretTruhlarJCP1979,Keck1960,Horiuti1938,Wigner1937} which is a generalization of TST that removes the restriction on the dividing surface to cross the saddle point. In VTST the dividing surface is variationally optimized to minimize the rate constant, usually by finding the maximum free energy along the reaction path. Although this surface is properly located in phase space, most of the VTST calculations assume that the TS can be found in configuration space.\cite{Carpenter1985}
## The index of a saddle point
We consider a *n* degree-of-freedom Hamiltonian of the following form:
\begin{equation}
H(q, p) = \sum_{i=1}^{n} \frac{p_i^2}{2} + V(q), \quad (q,p) \in \mathbb{R}^n \times \mathbb{R}^n,
\label{ham_int}
\end{equation}
where $q \in \mathbb{R}^n$ denote the configuration space variables and $p \in \mathbb{R}^n$ denote the corresponding conjugate momentum variables. This Hamiltonian function gives rise to the corresponding Hamilton's differential equations (or just ''Hamilton's equations'') having the following form:
\begin{eqnarray}
\dot{q}_i & = & p_i, \nonumber \\
\dot{p}_i & = & -\frac{\partial V}{\partial q_i} (q), \quad i=1. \ldots , n.
\label{hameq_int}
\end{eqnarray}
These are a set of *2n* first order differential equations defined on the phase space
$\mathbb{R}^n \times \mathbb{R}^n$.
A critical point of the potential energy function is a point $\bar{q} \in \mathbb{R}^n$ satisfying the following equations:
\begin{equation}
\frac{\partial V}{\partial q_i} (\bar{q}) =0, \quad i=1, \ldots n.
\end{equation}
Once a critical point of the potential energy function is located, we want to ''classify'' it. This is done by examining the second derivative of the potential energy function evaluated at the critical point. The second derivative matrix is referred to as the *Hessian matrix*, and it is given by:
\begin{equation}
\frac{\partial^2 V}{\partial q_i \partial q_j} (\bar{q}) =0, \quad i,j=1, \ldots n,
\label{hessian}
\end{equation}
which is a $n \times n$ symmetric matrix. Hence \eqref{hessian} has *n* real eigenvalues, which we denote by:
\begin{equation}
\sigma_k, \quad k=1, \ldots, n.
\label{eiv_Hess}
\end{equation}
However, returning to dynamics as given by Hamilton's equations \eqref{hameq_int}, the point $(\bar{q}, 0)$ is an equilibrium point of Hamilton's equations, i.e. when this point is substituted into the right-hand-side of \eqref{hameq_int} we obtain $(\dot{q}_1, \ldots, \dot{q}_n, \dot{p}_1, \ldots, \dot{p}_n) = (0, \ldots, 0, 0, \ldots, 0)$, i.e. the point $(\bar{q}, 0)$ does not change in time.
Next, we want to determine the nature of the stability of this equilibrium point. Linearized stability is determined by computing the Jacobian of the right hand side of \eqref{hameq_int}, which we will denote by $M$, evaluating it at the equilibrium point $(\bar{q}, 0)$, and determining its eigenvalues. The following calculation is from \cite{ezra2004impenetrable}.
The Jacobian of the Hamiltonian vector field \eqref{hameq_int} evaluated at $(\bar{q}, 0)$ is given by:
\begin{equation}
M =
\left(
\begin{array}{cc}
0_{n\times n} & \rm{id}_{n \times n} \\
-\frac{\partial^2 V}{\partial q_i \partial q_j} (\bar{q}) & 0_{n\times n}
\end{array}
\right),
\end{equation}
which is a $2n \times 2n$ matrix. The eigenvalues of $M$, denoted by $\lambda$, are given by the solutions of the following characteristic equation:
\begin{equation}
{\rm det} \, \left( M - \lambda \, {\rm id}_{2n \times 2n} \right) =0,
\label{eivM}
\end{equation}
where ${\rm id}_{2n \times 2n}$ denoted the $2n \times 2n$ identity matrix. Writing \eqref{eivM} in detail (i.e. using the explicit expression for the Jacobian of \eqref{hameq_int}) gives:
\begin{equation}
{\rm det} \,
\left(
\begin{array}{cc}
-\lambda \, \rm{id}_{n \times n} & \rm{id}_{n \times n} \\
-\frac{\partial^2 V}{\partial q_i \partial q_j} (\bar{q}) & -\lambda \rm{id}_{n \times n}
\end{array}
\right) = {\rm det} \, \left(\lambda^2 \, \rm{id}_{n \times n} + \frac{\partial^2 V}{\partial q_i \partial q_j} (\bar{q}) \right) =0.
\end{equation}
We can conclude from this calculation that the eigenvalues of the $n \times n$ symmetric matrix $\frac{\partial^2 V}{\partial q_i \partial q_j} (\bar{q})$ are $-\lambda^2$, where $\lambda$ are the eigenvalues of the $n \times n$ matrix $M$. Hence, the eigenvalues of $M$ occur in pairs, denoted by
$\lambda_k, \, \lambda_{k+n}, \, k=1, \ldots n$, which have the form:
\begin{equation}
\lambda_k, \, \lambda_{k+n} = \pm \sqrt{-\sigma_k}, \quad k=1, \ldots, n,
\end{equation}
where $\sigma_k$ are the eigenvalues of the Hessian of the potential energy evaluated at the critical point $\bar{q}$ as denoted in \eqref{eiv_Hess}. Hence,
we see that the existence of equilibrium points of Hamilton's equations of ''saddle-like stability'' implies that there must be *at least* one negative eigenvalue of \eqref{hessian}. In fact, we have the following classification of the linearized stability of saddle-type equilibrium points of Hamilton's equations in terms of the critical points of the potential energy surface.
+ **Index 1 saddle.** One eigenvalue of \eqref{hessian} is positive, the rest are negative. We will assume that none of the eigenvalues of \eqref{hessian} are zero. Zero eigenvalues give rise to special cases that must be dealt with separately. In the mathematics literature, these are often referred to as *saddle-center-$\cdots$-center equilibria*, with the number of center-$\cdots$-center terms equal to the number of pairs of pure imaginary eigenvalues.
+ **Index 2 saddle.** Two eigenvalues of \eqref{hessian} are positive, the rest are negative
and in general,
+ **Index k saddle.** *k* eigenvalues of \eqref{hessian} are positive,thevrestvare negativev($k \le n$).
## References
\bibliography{myBib}
| github_jupyter |
# NumPy
## Задание 1
**1. Импортируйте библиотеку Numpy и дайте ей псевдоним np.
Создать одномерный массив Numpy под названием a из 12 последовательных целых чисел чисел от 12 до 24 не включительно
Создать 5 двумерных массивов разной формы из массива a. Не использовать в аргументах метода reshape число -1.
Создать 5 двумерных массивов разной формы из массива a.
Использовать в аргументах метода reshape число -1 (в трех примерах - для обозначения числа столбцов, в двух - для строк).
Можно ли массив Numpy, состоящий из одного столбца и 12 строк, назвать одномерным?**
```
import numpy as np
a = np.arange(12, 24)
print(a)
a.reshape(3, 4)
a.reshape(4, 3)
a.reshape(2, 6)
a.reshape(6, 2)
a.resize(12, 1)
print(a)
a.reshape(-1, 3)
np.reshape(a, (-1, 6))
np.reshape(a, (-1, 2))
a.reshape(4, -1)
a.reshape(12, -1)
```
*Можно ли массив Numpy, состоящий из одного столбца и 12 строк, назвать одномерным?*
Нет.
```
a.ndim
```
**2. Создать массив из 3 строк и 4 столбцов, состоящий из случайных чисел с плавающей запятой из нормального распределения со средним, равным 0 и среднеквадратичным отклонением, равным 1.0.
Получить из этого массива одномерный массив с таким же атрибутом size, как и исходный массив.**
```
a = np.random.randn(3, 4)
print(a)
b = a.flatten()
print(b)
a.size == b.size
```
**3. Создать массив a, состоящий из целых чисел, убывающих от 20 до 0 невключительно с интервалом 2.
Создать массив b, состоящий из 1 строки и 10 столбцов: целых чисел, убывающих от 20 до 1 невключительно с интервалом 2.
В чем разница между массивами a и b?**
```
a = np.arange(20, 0, -2)
print(a)
b = np.arange(20, 1, -2)
print(b)
```
Никакой разницы. В обоих случаях получаются одинаковые массивы
```
np.array_equal(a, b)
```
**4. Вертикально соединить массивы a и b. a - двумерный массив из нулей, число строк которого больше 1 и на 1 меньше, чем число строк двумерного массива b, состоящего из единиц. Итоговый массив v должен иметь атрибут size, равный 10.**
```
a = np.zeros((3, 2))
b = np.ones((2, 2))
print('Массив a ', a, sep='\n')
print('Массив b ', b, sep='\n')
v = np.concatenate((a, b), axis = 0)
print(v)
v.size
```
**5. Создать одномерный массив а, состоящий из последовательности целых чисел от 0 до 12.
Поменять форму этого массива, чтобы получилась матрица A (двумерный массив Numpy), состоящая из 4 строк и 3 столбцов.
Получить матрицу At путем транспонирования матрицы A.
Получить матрицу B, умножив матрицу A на матрицу At с помощью матричного умножения.
Какой размер имеет матрица B? Получится ли вычислить обратную матрицу для матрицы B и почему?**
```
a = np.arange(0, 12)
print(a)
A = a.reshape(4, 3)
print(A)
At = A.transpose()
print(At)
B = np.dot(A, At)
print(B)
```
При умножении матриц число строк итоговой матрицы (B) равно число строк первой матрицы (A), а число столбцов - второй матрицы (At).
```
B.size
```
Вычислить обратную матрицу для B невозможно, т.к. определитель матрицы B равен 0.
```
np.linalg.det(B)
```
**6. Инициализируйте генератор случайных числе с помощью объекта seed, равного 42.
Создайте одномерный массив c, составленный из последовательности 16-ти случайных равномерно распределенных целых чисел от 0 до 16 невключительно.
Поменяйте его форму так, чтобы получилась квадратная матрица C.
Получите матрицу D, поэлементно прибавив матрицу B из предыдущего вопроса к матрице C, умноженной на 10.
Вычислите определитель, ранг и обратную матрицу D_inv для D.**
```
np.random.seed(42)
a = np.arange(0, 16)
print(a)
np.sqrt(a.size)
C = a.reshape((4, 4))
print(C)
print(B)
C.shape == B.shape
D = 10 * C + B
print(D)
np.linalg.det(D)
D_inv = np.linalg.inv(D)
print(D_inv)
```
**7. Приравняйте к нулю отрицательные числа в матрице D_inv, а положительные - к единице. Убедитесь, что в матрице D_inv остались только нули и единицы.
С помощью функции numpy.where, используя матрицу D_inv в качестве маски, а матрицы B и C - в качестве источников данных, получите матрицу E размером 4x4.
Элементы матрицы E, для которых соответствующий элемент матрицы D_inv равен 1, должны быть равны соответствующему элементу матрицы B, а элементы матрицы E, для которых соответствующий элемент матрицы D_inv равен 0, должны быть равны соответствующему элементу матрицы C.**
```
mask_positive = D_inv > 0
mask_negative = D_inv < 0
D_inv[mask_positive] = 1
D_inv[mask_negative] = 0
print(D_inv)
E = np.where(D_inv, B, C)
print(E)
```
## Задание 2
**Создайте массив Numpy под названием a размером 5x2, то есть состоящий из 5 строк и 2 столбцов.
Первый столбец должен содержать числа 1, 2, 3, 3, 1, а второй - числа 6, 8, 11, 10, 7.
Будем считать, что каждый столбец - это признак, а строка - наблюдение.
Затем найдите среднее значение по каждому признаку, используя метод mean массива Numpy.
Результат запишите в массив mean_a, в нем должно быть 2 элемента.**
```
a = np.array(
[[1, 2, 3, 3, 1],
[6, 8, 11, 10, 7]]
).transpose()
print(a)
mean_a = np.mean(a, axis = 0)
print(mean_a)
```
## Задание 3
**Вычислите массив a_centered, отняв от значений массива а средние значения соответствующих признаков, содержащиеся в массиве mean_a.
Вычисление должно производиться в одно действие.
Получившийся массив должен иметь размер 5x2.**
```
a_centered = a - mean_a
print(a_centered)
```
## Задание 4
**Найдите скалярное произведение столбцов массива a_centered.
В результате должна получиться величина a_centered_sp.
Затем поделите a_centered_sp на N-1, где N - число наблюдений.**
```
a_centered_sp = a_centered.T[0] @ a_centered.T[1]
print(a_centered_sp)
a_centered_sp / (a_centered.shape[0] - 1)
```
## Задание 5**
**Число, которое мы получили в конце задания 3 является ковариацией двух признаков, содержащихся в массиве а. В задании 4 мы делили сумму произведений центрированных признаков на N-1, а не на N, поэтому полученная нами величина является несмещенной оценкой ковариации.
В этом задании проверьте получившееся число, вычислив ковариацию еще одним способом - с помощью функции np.cov.
В качестве аргумента m функция np.cov должна принимать транспонированный массив a.
В получившейся ковариационной матрице (массив Numpy размером 2x2) искомое значение
ковариации будет равно элементу в строке с индексом 0 и столбце с индексом 1.**
*Подробнее узнать о ковариации можно здесь:
[Ссылка](https://studopedia.ru/9_153900_viborochnaya-kovariatsiya-i-viborochnaya-dispersiya.html)*
```
np.cov(a.T)[0, 1]
```
# Pandas
## Задание 1
**A. Импортируйте библиотеку Pandas и дайте ей псевдоним pd.**
```
import pandas as pd
```
**B. Создайте датафрейм authors со столбцами author_id и author_name, в которых соответственно содержатся данные:
[1, 2, 3] и ['Тургенев', 'Чехов', 'Островский'].**
```
authors = pd.DataFrame({'author_id':[1, 2, 3],
'author_name':['Тургенев', 'Чехов', 'Островский']},
columns=['author_id', 'author_name'])
print(authors)
```
**C. Затем создайте датафрейм book cо столбцами author_id, book_title и price,в которых соответственно содержатся данные:
[1, 1, 1, 2, 2, 3, 3],
['Отцы и дети', 'Рудин', 'Дворянское гнездо', 'Толстый и тонкий', 'Дама с собачкой', 'Гроза', 'Таланты и поклонники'],
[450, 300, 350, 500, 450, 370, 290].**
```
book = pd.DataFrame({'author_id':[1, 1, 1, 2, 2, 3, 3],
'book_title':['Отцы и дети', 'Рудин', 'Дворянское гнездо', 'Толстый и тонкий', 'Дама с собачкой', 'Гроза', 'Таланты и поклонники'],
'price':[450, 300, 350, 500, 450, 370, 290]},
columns=['author_id', 'book_title', 'price'])
print(book)
```
## Задание 2
**Получите датафрейм authors_price, соединив датафреймы authors и books по полю author_id.**
```
authors_price = pd.merge(authors, book, on = 'author_id', how = 'outer')
print(authors_price)
```
## Задание 3
**Создайте датафрейм top5, в котором содержатся строки из authors_price с пятью самыми дорогими книгами.**
```
top5 = authors_price.nlargest(5, 'price')
print(top5)
```
## Задание 4
**A. Создайте датафрейм authors_stat на основе информации из authors_price.**
```
authors_stat = authors_price['author_name'].value_counts()
print(authors_stat)
```
**B. В датафрейме authors_stat должны быть четыре столбца:
author_name, min_price, max_price и mean_price, в которых должны содержаться соответственно имя автора, минимальная, максимальная и средняя цена на книги этого автора.**
```
authors_stat = authors_price.groupby('author_name').agg({'price':['min', 'max', 'mean']})
authors_stat = authors_stat.rename(columns={'min':'min_price', 'max':'max_price', 'mean':'mean_price'})
print(authors_stat)
```
## Задание 5
**Создайте новый столбец в датафрейме authors_price под названием cover, в нем будут располагаться данные о том, какая обложка у данной книги - твердая или мягкая.
В этот столбец поместите данные из следующего списка:
['твердая', 'мягкая', 'мягкая', 'твердая', 'твердая', 'мягкая', 'мягкая'].
Просмотрите документацию по функции pd.pivot_table с помощью вопросительного знака.
Для каждого автора посчитайте суммарную стоимость книг в твердой и мягкой обложке.Используйте для этого функцию pd.pivot_table. При этом столбцы должны называться "твердая" и "мягкая",а индексами должны быть фамилии авторов. Пропущенные значения стоимостей заполните нулями,при необходимости загрузите библиотеку Numpy.
Назовите полученный датасет book_info и сохраните его в формат pickle под названием "book_info.pkl".Затем загрузите из этого файла датафрейм и назовите его book_info2.Удостоверьтесь, что датафреймы book_info и book_info2 идентичны**
```
authors_price['cover'] = ['твердая', 'мягкая', 'мягкая', 'твердая', 'твердая', 'мягкая', 'мягкая']
print(authors_price)
book_info = pd.pivot_table(authors_price, values='price', index=['author_name'], columns=['cover'], aggfunc=np.sum)
book_info['мягкая'] = book_info['мягкая'].fillna(0)
book_info['твердая'] = book_info['твердая'].fillna(0)
print(book_info)
book_info.to_pickle('book_info.pkl')
book_info2 = pd.read_pickle('book_info.pkl')
book_info.equals(book_info2)
```
| github_jupyter |
<div align="center">
<h1><img width="30" src="https://madewithml.com/static/images/rounded_logo.png"> <a href="https://madewithml.com/">Made With ML</a></h1>
Applied ML · MLOps · Production
<br>
Join 30K+ developers in learning how to responsibly <a href="https://madewithml.com/about/">deliver value</a> with ML.
<br>
</div>
<br>
<div align="center">
<a target="_blank" href="https://newsletter.madewithml.com"><img src="https://img.shields.io/badge/Subscribe-30K-brightgreen"></a>
<a target="_blank" href="https://github.com/GokuMohandas/MadeWithML"><img src="https://img.shields.io/github/stars/GokuMohandas/MadeWithML.svg?style=social&label=Star"></a>
<a target="_blank" href="https://www.linkedin.com/in/goku"><img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>
<a target="_blank" href="https://twitter.com/GokuMohandas"><img src="https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social"></a>
<br>
🔥 Among the <a href="https://github.com/topics/deep-learning" target="_blank">top ML</a> repositories on GitHub
</div>
<br>
<hr>
# Recurrent Neural Networks (RNN)
In this lesson we will learn how to process sequential data (sentences, time-series, etc.) with recurrent neural networks (RNNs).
<div align="left">
<a target="_blank" href="https://madewithml.com/courses/foundations/recurrent-neural-networks/"><img src="https://img.shields.io/badge/📖 Read-blog post-9cf"></a>
<a href="https://github.com/GokuMohandas/MadeWithML/blob/main/notebooks/13_Recurrent_Neural_Networks.ipynb" role="button"><img src="https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d"></a>
<a href="https://colab.research.google.com/github/GokuMohandas/MadeWithML/blob/main/notebooks/13_Recurrent_Neural_Networks.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
</div>
So far we've processed inputs as whole (ex. applying filters across the entire input to extract features) but we can also process our inputs sequentially. For example we can think of each token in our text as an event in time (timestep). We can process each timestep, one at a time, and predict the class after the last timestep (token) has been processed. This is very powerful because the model now has a meaningful way to account for the sequential order of tokens in our sequence and predict accordingly.
# Overview
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/rnn/vanilla.png" width="500">
</div>
RNN forward pass for a single time step $X_t$:
$h_t = tanh(W_{hh}h_{t-1} + W_{xh}X_t+b_h)$
*where*:
* $W_{hh}$ = hidden units weights| $\in \mathbb{R}^{HXH}$ ($H$ is the hidden dim)
* $h_{t-1}$ = previous timestep's hidden state $\in \mathbb{R}^{NXH}$
* $W_{xh}$ = input weights| $\in \mathbb{R}^{EXH}$
* $X_t$ = input at time step t | $\in \mathbb{R}^{NXE}$ ($N$ is the batch size, $E$ is the embedding dim)
* $b_h$ = hidden units bias $\in \mathbb{R}^{HX1}$
* $h_t$ = output from RNN for timestep $t$
* **Objective:** Process sequential data by accounting for the currend input and also what has been learned from previous inputs.
* **Advantages:**
* Account for order and previous inputs in a meaningful way.
* Conditioned generation for generating sequences.
* **Disadvantages:**
* Each time step's prediction depends on the previous prediction so it's difficult to parallelize RNN operations.
* Processing long sequences can yield memory and computation issues.
* Interpretability is difficult but there are few [techniques](https://arxiv.org/abs/1506.02078) that use the activations from RNNs to see what parts of the inputs are processed.
* **Miscellaneous:**
* Architectural tweaks to make RNNs faster and interpretable is an ongoing area of research.
# Set up
```
import numpy as np
import pandas as pd
import random
import torch
import torch.nn as nn
SEED = 1234
def set_seeds(seed=1234):
"""Set seeds for reproducibility."""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # multi-GPU# Set seeds for reproducibility
set_seeds(seed=SEED)
# Set seeds for reproducibility
set_seeds(seed=SEED)
# Set device
cuda = True
device = torch.device('cuda' if (
torch.cuda.is_available() and cuda) else 'cpu')
torch.set_default_tensor_type('torch.FloatTensor')
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
print (device)
```
## Load data
We will download the [AG News dataset](http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html), which consists of 120K text samples from 4 unique classes (`Business`, `Sci/Tech`, `Sports`, `World`)
```
import numpy as np
import pandas as pd
import re
import urllib
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/news.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
```
## Preprocessing
We're going to clean up our input data first by doing operations such as lower text, removing stop (filler) words, filters using regular expressions, etc.
```
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import re
nltk.download('stopwords')
STOPWORDS = stopwords.words('english')
print (STOPWORDS[:5])
porter = PorterStemmer()
def preprocess(text, stopwords=STOPWORDS):
"""Conditional preprocessing on our text unique to our task."""
# Lower
text = text.lower()
# Remove stopwords
pattern = re.compile(r'\b(' + r'|'.join(stopwords) + r')\b\s*')
text = pattern.sub('', text)
# Remove words in paranthesis
text = re.sub(r'\([^)]*\)', '', text)
# Spacing and filters
text = re.sub(r"([-;;.,!?<=>])", r" \1 ", text)
text = re.sub('[^A-Za-z0-9]+', ' ', text) # remove non alphanumeric chars
text = re.sub(' +', ' ', text) # remove multiple spaces
text = text.strip()
return text
# Sample
text = "Great week for the NYSE!"
preprocess(text=text)
# Apply to dataframe
preprocessed_df = df.copy()
preprocessed_df.title = preprocessed_df.title.apply(preprocess)
print (f"{df.title.values[0]}\n\n{preprocessed_df.title.values[0]}")
```
## Split data
```
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Data
X = preprocessed_df["title"].values
y = preprocessed_df["category"].values
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
```
## LabelEncoder
Next we'll define a `LabelEncoder` to encode our text labels into unique indices
```
import itertools
class LabelEncoder(object):
"""Label encoder for tag labels."""
def __init__(self, class_to_index={}):
self.class_to_index = class_to_index
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
def __len__(self):
return len(self.class_to_index)
def __str__(self):
return f"<LabelEncoder(num_classes={len(self)})>"
def fit(self, y):
classes = np.unique(y)
for i, class_ in enumerate(classes):
self.class_to_index[class_] = i
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
return self
def encode(self, y):
encoded = np.zeros((len(y)), dtype=int)
for i, item in enumerate(y):
encoded[i] = self.class_to_index[item]
return encoded
def decode(self, y):
classes = []
for i, item in enumerate(y):
classes.append(self.index_to_class[item])
return classes
def save(self, fp):
with open(fp, 'w') as fp:
contents = {'class_to_index': self.class_to_index}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp):
with open(fp, 'r') as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
# Encode
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
NUM_CLASSES = len(label_encoder)
label_encoder.class_to_index
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.encode(y_train)
y_val = label_encoder.encode(y_val)
y_test = label_encoder.encode(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
```
## Tokenizer
We'll define a `Tokenizer` to convert our text input data into token indices.
```
import json
from collections import Counter
from more_itertools import take
class Tokenizer(object):
def __init__(self, char_level, num_tokens=None,
pad_token='<PAD>', oov_token='<UNK>',
token_to_index=None):
self.char_level = char_level
self.separator = '' if self.char_level else ' '
if num_tokens: num_tokens -= 2 # pad + unk tokens
self.num_tokens = num_tokens
self.pad_token = pad_token
self.oov_token = oov_token
if not token_to_index:
token_to_index = {pad_token: 0, oov_token: 1}
self.token_to_index = token_to_index
self.index_to_token = {v: k for k, v in self.token_to_index.items()}
def __len__(self):
return len(self.token_to_index)
def __str__(self):
return f"<Tokenizer(num_tokens={len(self)})>"
def fit_on_texts(self, texts):
if not self.char_level:
texts = [text.split(" ") for text in texts]
all_tokens = [token for text in texts for token in text]
counts = Counter(all_tokens).most_common(self.num_tokens)
self.min_token_freq = counts[-1][1]
for token, count in counts:
index = len(self)
self.token_to_index[token] = index
self.index_to_token[index] = token
return self
def texts_to_sequences(self, texts):
sequences = []
for text in texts:
if not self.char_level:
text = text.split(' ')
sequence = []
for token in text:
sequence.append(self.token_to_index.get(
token, self.token_to_index[self.oov_token]))
sequences.append(np.asarray(sequence))
return sequences
def sequences_to_texts(self, sequences):
texts = []
for sequence in sequences:
text = []
for index in sequence:
text.append(self.index_to_token.get(index, self.oov_token))
texts.append(self.separator.join([token for token in text]))
return texts
def save(self, fp):
with open(fp, 'w') as fp:
contents = {
'char_level': self.char_level,
'oov_token': self.oov_token,
'token_to_index': self.token_to_index
}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp):
with open(fp, 'r') as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
# Tokenize
tokenizer = Tokenizer(char_level=False, num_tokens=5000)
tokenizer.fit_on_texts(texts=X_train)
VOCAB_SIZE = len(tokenizer)
print (tokenizer)
# Sample of tokens
print (take(5, tokenizer.token_to_index.items()))
print (f"least freq token's freq: {tokenizer.min_token_freq}") # use this to adjust num_tokens
# Convert texts to sequences of indices
X_train = tokenizer.texts_to_sequences(X_train)
X_val = tokenizer.texts_to_sequences(X_val)
X_test = tokenizer.texts_to_sequences(X_test)
preprocessed_text = tokenizer.sequences_to_texts([X_train[0]])[0]
print ("Text to indices:\n"
f" (preprocessed) → {preprocessed_text}\n"
f" (tokenized) → {X_train[0]}")
```
## Padding
We'll need to do 2D padding to our tokenized text.
```
def pad_sequences(sequences, max_seq_len=0):
"""Pad sequences to max length in sequence."""
max_seq_len = max(max_seq_len, max(len(sequence) for sequence in sequences))
padded_sequences = np.zeros((len(sequences), max_seq_len))
for i, sequence in enumerate(sequences):
padded_sequences[i][:len(sequence)] = sequence
return padded_sequences
# 2D sequences
padded = pad_sequences(X_train[0:3])
print (padded.shape)
print (padded)
```
## Datasets
We're going to create Datasets and DataLoaders to be able to efficiently create batches with our data splits.
```
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, y,):
self.X = X
self.y = y
def __len__(self):
return len(self.y)
def __str__(self):
return f"<Dataset(N={len(self)})>"
def __getitem__(self, index):
X = self.X[index]
y = self.y[index]
return [X, len(X), y]
def collate_fn(self, batch):
"""Processing on a batch."""
# Get inputs
batch = np.array(batch, dtype=object)
X = batch[:, 0]
seq_lens = batch[:, 1]
y = np.stack(batch[:, 2], axis=0)
# Pad inputs
X = pad_sequences(sequences=X)
# Cast
X = torch.LongTensor(X.astype(np.int32))
seq_lens = torch.LongTensor(seq_lens.astype(np.int32))
y = torch.LongTensor(y.astype(np.int32))
return X, seq_lens, y
def create_dataloader(self, batch_size, shuffle=False, drop_last=False):
return torch.utils.data.DataLoader(
dataset=self, batch_size=batch_size, collate_fn=self.collate_fn,
shuffle=shuffle, drop_last=drop_last, pin_memory=True)
# Create datasets
train_dataset = Dataset(X=X_train, y=y_train)
val_dataset = Dataset(X=X_val, y=y_val)
test_dataset = Dataset(X=X_test, y=y_test)
print ("Datasets:\n"
f" Train dataset:{train_dataset.__str__()}\n"
f" Val dataset: {val_dataset.__str__()}\n"
f" Test dataset: {test_dataset.__str__()}\n"
"Sample point:\n"
f" X: {train_dataset[0][0]}\n"
f" seq_len: {train_dataset[0][1]}\n"
f" y: {train_dataset[0][2]}")
# Create dataloaders
batch_size = 64
train_dataloader = train_dataset.create_dataloader(
batch_size=batch_size)
val_dataloader = val_dataset.create_dataloader(
batch_size=batch_size)
test_dataloader = test_dataset.create_dataloader(
batch_size=batch_size)
batch_X, batch_seq_lens, batch_y = next(iter(train_dataloader))
print ("Sample batch:\n"
f" X: {list(batch_X.size())}\n"
f" seq_lens: {list(batch_seq_lens.size())}\n"
f" y: {list(batch_y.size())}\n"
"Sample point:\n"
f" X: {batch_X[0]}\n"
f" seq_len: {batch_seq_lens[0]}\n"
f" y: {batch_y[0]}")
```
## Trainer
Let's create the `Trainer` class that we'll use to facilitate training for our experiments.
```
class Trainer(object):
def __init__(self, model, device, loss_fn=None, optimizer=None, scheduler=None):
# Set params
self.model = model
self.device = device
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
def train_step(self, dataloader):
"""Train step."""
# Set model to train mode
self.model.train()
loss = 0.0
# Iterate over train batches
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # Set device
inputs, targets = batch[:-1], batch[-1]
self.optimizer.zero_grad() # Reset gradients
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, targets) # Define loss
J.backward() # Backward pass
self.optimizer.step() # Update weights
# Cumulative Metrics
loss += (J.detach().item() - loss) / (i + 1)
return loss
def eval_step(self, dataloader):
"""Validation or test step."""
# Set model to eval mode
self.model.eval()
loss = 0.0
y_trues, y_probs = [], []
# Iterate over val batches
with torch.no_grad():
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # Set device
inputs, y_true = batch[:-1], batch[-1]
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, y_true).item()
# Cumulative Metrics
loss += (J - loss) / (i + 1)
# Store outputs
y_prob = torch.sigmoid(z).cpu().numpy()
y_probs.extend(y_prob)
y_trues.extend(y_true.cpu().numpy())
return loss, np.vstack(y_trues), np.vstack(y_probs)
def predict_step(self, dataloader):
"""Prediction step."""
# Set model to eval mode
self.model.eval()
y_probs = []
# Iterate over val batches
with torch.no_grad():
for i, batch in enumerate(dataloader):
# Forward pass w/ inputs
inputs, targets = batch[:-1], batch[-1]
y_prob = self.model(inputs, apply_softmax=True)
# Store outputs
y_probs.extend(y_prob)
return np.vstack(y_probs)
def train(self, num_epochs, patience, train_dataloader, val_dataloader):
best_val_loss = np.inf
for epoch in range(num_epochs):
# Steps
train_loss = self.train_step(dataloader=train_dataloader)
val_loss, _, _ = self.eval_step(dataloader=val_dataloader)
self.scheduler.step(val_loss)
# Early stopping
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = self.model
_patience = patience # reset _patience
else:
_patience -= 1
if not _patience: # 0
print("Stopping early!")
break
# Logging
print(
f"Epoch: {epoch+1} | "
f"train_loss: {train_loss:.5f}, "
f"val_loss: {val_loss:.5f}, "
f"lr: {self.optimizer.param_groups[0]['lr']:.2E}, "
f"_patience: {_patience}"
)
return best_model
```
# Vanilla RNN
Inputs to RNNs are sequential like text or time-series.
```
BATCH_SIZE = 64
EMBEDDING_DIM = 100
# Input
sequence_size = 8 # words per input
x = torch.rand((BATCH_SIZE, sequence_size, EMBEDDING_DIM))
seq_lens = torch.randint(high=sequence_size, size=(1, BATCH_SIZE))
print (x.shape)
print (seq_lens.shape)
```
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/rnn/vanilla.png" width="500">
</div>
RNN forward pass for a single time step $X_t$:
$h_t = tanh(W_{hh}h_{t-1} + W_{xh}X_t+b_h)$
*where*:
* $W_{hh}$ = hidden units weights| $\in \mathbb{R}^{HXH}$ ($H$ is the hidden dim)
* $h_{t-1}$ = previous timestep's hidden state $\in \mathbb{R}^{NXH}$
* $W_{xh}$ = input weights| $\in \mathbb{R}^{EXH}$
* $X_t$ = input at time step t | $\in \mathbb{R}^{NXE}$ ($N$ is the batch size, $E$ is the embedding dim)
* $b_h$ = hidden units bias $\in \mathbb{R}^{HX1}$
* $h_t$ = output from RNN for timestep $t$
> At the first time step, the previous hidden state $h_{t-1}$ can either be a zero vector (unconditioned) or initialized (conditioned). If we are conditioning the RNN, the first hidden state $h_0$ can belong to a specific condition or we can concat the specific condition to the randomly initialized hidden vectors at each time step. More on this in the subsequent notebooks on RNNs.
```
RNN_HIDDEN_DIM = 128
DROPOUT_P = 0.1
RNN_DROPOUT_P = 0.1
# Initialize hidden state
hidden_t = torch.zeros((BATCH_SIZE, RNN_HIDDEN_DIM))
print (hidden_t.size())
```
We'll show how to create an RNN cell using PyTorch's [`RNNCell`](https://pytorch.org/docs/stable/generated/torch.nn.RNNCell.html#torch.nn.RNNCell) and the more abstracted [`RNN`](https://pytorch.org/docs/stable/generated/torch.nn.RNN.html#torch.nn.RNN).
```
# Initialize RNN cell
rnn_cell = nn.RNNCell(EMBEDDING_DIM, RNN_HIDDEN_DIM)
print (rnn_cell)
# Forward pass through RNN
x = x.permute(1, 0, 2) # RNN needs batch_size to be at dim 1
# Loop through the inputs time steps
hiddens = []
for t in range(sequence_size):
hidden_t = rnn_cell(x[t], hidden_t)
hiddens.append(hidden_t)
hiddens = torch.stack(hiddens)
hiddens = hiddens.permute(1, 0, 2) # bring batch_size back to dim 0
print (hiddens.size())
# We also could've used a more abstracted layer
x = torch.rand((BATCH_SIZE, sequence_size, EMBEDDING_DIM))
rnn = nn.RNN(EMBEDDING_DIM, RNN_HIDDEN_DIM, batch_first=True)
out, h_n = rnn(x) # h_n is the last hidden state
print ("out: ", out.shape)
print ("h_n: ", h_n.shape)
# The same tensors
print (out[:,-1,:])
print (h_n.squeeze(0))
```
In our model, we want to use the RNN's output after the last relevant token in the sentence is processed. The last relevant token doesn't refer the `<PAD>` tokens but to the last actual word in the sentence and its index is different for each input in the batch. This is why we included a `seq_lens` tensor in our batches.
```
def gather_last_relevant_hidden(hiddens, seq_lens):
"""Extract and collect the last relevant
hidden state based on the sequence length."""
seq_lens = seq_lens.long().detach().cpu().numpy() - 1
out = []
for batch_index, column_index in enumerate(seq_lens):
out.append(hiddens[batch_index, column_index])
return torch.stack(out)
# Get the last relevant hidden state
gather_last_relevant_hidden(hiddens=out, seq_lens=seq_lens).squeeze(0).shape
```
There are many different ways to use RNNs. So far we've processed our inputs one timestep at a time and we could either use the RNN's output at each time step or just use the final input timestep's RNN output. Let's look at a few other possibilities.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/rnn/architectures.png" width="1000">
</div>
## Model
```
import torch.nn.functional as F
HIDDEN_DIM = 100
class RNN(nn.Module):
def __init__(self, embedding_dim, vocab_size, rnn_hidden_dim,
hidden_dim, dropout_p, num_classes, padding_idx=0):
super(RNN, self).__init__()
# Initialize embeddings
self.embeddings = nn.Embedding(
embedding_dim=embedding_dim, num_embeddings=vocab_size,
padding_idx=padding_idx)
# RNN
self.rnn = nn.RNN(embedding_dim, rnn_hidden_dim, batch_first=True)
# FC weights
self.dropout = nn.Dropout(dropout_p)
self.fc1 = nn.Linear(rnn_hidden_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs, apply_softmax=False):
# Embed
x_in, seq_lens = inputs
x_in = self.embeddings(x_in)
# Rnn outputs
out, h_n = self.rnn(x_in)
z = gather_last_relevant_hidden(hiddens=out, seq_lens=seq_lens)
# FC layers
z = self.fc1(z)
z = self.dropout(z)
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Simple RNN cell
model = RNN(
embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE,
rnn_hidden_dim=RNN_HIDDEN_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
model = model.to(device) # set device
print (model.named_parameters)
```
## Training
```
from torch.optim import Adam
NUM_LAYERS = 1
LEARNING_RATE = 1e-4
PATIENCE = 10
NUM_EPOCHS = 50
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values())).to(device)
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Define optimizer & scheduler
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.1, patience=3)
# Trainer module
trainer = Trainer(
model=model, device=device, loss_fn=loss_fn,
optimizer=optimizer, scheduler=scheduler)
# Train
best_model = trainer.train(
NUM_EPOCHS, PATIENCE, train_dataloader, val_dataloader)
```
## Evaluation
```
import json
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Get predictions
test_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)
y_pred = np.argmax(y_prob, axis=1)
# Determine performance
performance = get_performance(
y_true=y_test, y_pred=y_pred, classes=label_encoder.classes)
print (json.dumps(performance['overall'], indent=2))
```
# Gated RNNs: LSTMs & GRUs
While our simple RNNs so far are great for sequentially processing our inputs, they have quite a few disadvantages. They commonly suffer from exploding or vanishing gradients as a result using the same set of weights ($W_{xh}$ and $W_{hh}$) with each timestep's input. During backpropagation, this can cause gradients to explode (>1) or vanish (<1). If you multiply any number greater than 1 with itself over and over, it moves towards infinity (exploding gradients) and similarly, If you multiply any number less than 1 with itself over and over, it moves towards zero (vanishing gradients). To mitigate this issue, gated RNNs were devised to selectively retrain information. If you're interested in learning more of the specifics, this [post](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) is a must-read.
There are two popular types of gated RNNs: Long Short-term Memory (LSTMs) units and Gated Recurrent Units (GRUs).
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/rnn/gated.png" width="600">
</div>
<a href="http://colah.github.io/posts/2015-08-Understanding-LSTMs/">Understanding LSTM Networks</a> - Chris Olah
> When deciding between LSTMs and GRUs, empirical performance is the best factor but in genreal GRUs offer similar performance with less complexity (less weights).
```
# Input
sequence_size = 8 # words per input
x = torch.rand((BATCH_SIZE, sequence_size, EMBEDDING_DIM))
print (x.shape)
# GRU
gru = nn.GRU(input_size=EMBEDDING_DIM, hidden_size=RNN_HIDDEN_DIM, batch_first=True)
# Forward pass
out, h_n = gru(x)
print (f"out: {out.shape}")
print (f"h_n: {h_n.shape}")
```
## Bidirectional RNN
We can also have RNNs that process inputs from both directions (first token to last token and vice versa) and combine their outputs. This architecture is known as a bidirectional RNN.
```
# GRU
gru = nn.GRU(input_size=EMBEDDING_DIM, hidden_size=RNN_HIDDEN_DIM,
batch_first=True, bidirectional=True)
# Forward pass
out, h_n = gru(x)
print (f"out: {out.shape}")
print (f"h_n: {h_n.shape}")
```
Notice that the output for each sample at each timestamp has size 256 (double the `RNN_HIDDEN_DIM`). This is because this includes both the forward and backward directions from the BiRNN.
## Model
```
class GRU(nn.Module):
def __init__(self, embedding_dim, vocab_size, rnn_hidden_dim,
hidden_dim, dropout_p, num_classes, padding_idx=0):
super(GRU, self).__init__()
# Initialize embeddings
self.embeddings = nn.Embedding(embedding_dim=embedding_dim,
num_embeddings=vocab_size,
padding_idx=padding_idx)
# RNN
self.rnn = nn.GRU(embedding_dim, rnn_hidden_dim,
batch_first=True, bidirectional=True)
# FC weights
self.dropout = nn.Dropout(dropout_p)
self.fc1 = nn.Linear(rnn_hidden_dim*2, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs, apply_softmax=False):
# Embed
x_in, seq_lens = inputs
x_in = self.embeddings(x_in)
# Rnn outputs
out, h_n = self.rnn(x_in)
z = gather_last_relevant_hidden(hiddens=out, seq_lens=seq_lens)
# FC layers
z = self.fc1(z)
z = self.dropout(z)
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Simple RNN cell
model = GRU(
embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE,
rnn_hidden_dim=RNN_HIDDEN_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
model = model.to(device) # set device
print (model.named_parameters)
```
## Training
```
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values())).to(device)
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Define optimizer & scheduler
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.1, patience=3)
# Trainer module
trainer = Trainer(
model=model, device=device, loss_fn=loss_fn,
optimizer=optimizer, scheduler=scheduler)
# Train
best_model = trainer.train(
NUM_EPOCHS, PATIENCE, train_dataloader, val_dataloader)
```
## Evaluation
```
from pathlib import Path
# Get predictions
test_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)
y_pred = np.argmax(y_prob, axis=1)
# Determine performance
performance = get_performance(
y_true=y_test, y_pred=y_pred, classes=label_encoder.classes)
print (json.dumps(performance['overall'], indent=2))
# Save artifacts
dir = Path("gru")
dir.mkdir(parents=True, exist_ok=True)
label_encoder.save(fp=Path(dir, 'label_encoder.json'))
tokenizer.save(fp=Path(dir, 'tokenizer.json'))
torch.save(best_model.state_dict(), Path(dir, 'model.pt'))
with open(Path(dir, 'performance.json'), "w") as fp:
json.dump(performance, indent=2, sort_keys=False, fp=fp)
```
## Inference
```
def get_probability_distribution(y_prob, classes):
"""Create a dict of class probabilities from an array."""
results = {}
for i, class_ in enumerate(classes):
results[class_] = np.float64(y_prob[i])
sorted_results = {k: v for k, v in sorted(
results.items(), key=lambda item: item[1], reverse=True)}
return sorted_results
# Load artifacts
device = torch.device("cpu")
label_encoder = LabelEncoder.load(fp=Path(dir, 'label_encoder.json'))
tokenizer = Tokenizer.load(fp=Path(dir, 'tokenizer.json'))
model = GRU(
embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE,
rnn_hidden_dim=RNN_HIDDEN_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
model.load_state_dict(torch.load(Path(dir, 'model.pt'), map_location=device))
model.to(device)
# Initialize trainer
trainer = Trainer(model=model, device=device)
# Dataloader
text = "The final tennis tournament starts next week."
X = tokenizer.texts_to_sequences([preprocess(text)])
print (tokenizer.sequences_to_texts(X))
y_filler = label_encoder.encode([label_encoder.classes[0]]*len(X))
dataset = Dataset(X=X, y=y_filler)
dataloader = dataset.create_dataloader(batch_size=batch_size)
# Inference
y_prob = trainer.predict_step(dataloader)
y_pred = np.argmax(y_prob, axis=1)
label_encoder.decode(y_pred)
# Class distributions
prob_dist = get_probability_distribution(y_prob=y_prob[0], classes=label_encoder.classes)
print (json.dumps(prob_dist, indent=2))
```
> We will learn how to create more context-aware representations and a little bit of interpretability with RNNs in the next lesson on <a target="_blank" href="https://madewithml.com/courses/foundations/attention/">attention</a>.
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import os
import json
import yaml
from IPython.display import display, Markdown
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
# from decode_trf import delivery_data_from_logfile
from pymedphys.msq import mosaiq_connect
from pymedphys.utilities import get_index, get_data_directory, get_filepath, get_gantry_tolerance
from pymedphys.logfile import *
from pymedphys.trf import *
from pymedphys.mudensity import *
from pymedphys.plt import pcolormesh_grid
with open('../config.json') as config_file:
config = json.load(config_file)
index = get_index(config)
data_directory = get_data_directory(config)
cache_filepath = os.path.join(data_directory, 'cache', 'dmlc_comparison.yaml')
cache_scratch_filepath = os.path.join(data_directory, 'cache', 'dmlc_comparison_scratch.yaml')
with open(cache_filepath, 'r') as cache_file:
cache = yaml.load(cache_file)
patient_ids = list(cache.keys())
len(patient_ids)
data = []
for patient_id in patient_ids:
for byfield in cache[patient_id]:
for angle in cache[patient_id][byfield]:
# print(cache[patient_id][byfield][angle].keys())
comparison = cache[patient_id][byfield][angle]['median']
file_hashes = cache[patient_id][byfield][angle]['median_filehash_group']
all_comparisons = cache[patient_id][byfield][angle]['comparisons']
all_file_hashes = cache[patient_id][byfield][angle]['filehash_groups']
all_comparisons_flat = []
for key, value in all_comparisons.items():
all_comparisons_flat.append(value)
data.append([
patient_id, byfield, angle, comparison, file_hashes, tuple(all_comparisons_flat), tuple(all_file_hashes)
])
comparisons_table = pd.DataFrame(
columns=[
'patient_id', 'field_id',
'gantry_angle', 'comparison',
'file_hashes', 'all_comparisons',
'all_file_hashes'
],
data=data
)
comparisons_table = comparisons_table.sort_values('comparison', ascending=False)
top_ten = comparisons_table.iloc[0:10]
top_ten
field_id = 77630
field_ref = comparisons_table['field_id'] == field_id
comparisons_table[field_ref]
worst_row_of_field = comparisons_table[field_ref].iloc[0]
gantry_angle = worst_row_of_field['gantry_angle']
gantry_angle
gantry_angles = comparisons_table[field_ref]['gantry_angle'].values
gantry_angles
worst_row_of_field['all_file_hashes']
worst_file_ref = np.argmax(worst_row_of_field['all_comparisons'])
logfile_group = worst_row_of_field['all_file_hashes'][worst_file_ref]
logfile_group
file_hash = logfile_group[0]
file_hash
# with mosaiq_connect('msqsql') as cursor:
# comparison = compare_logfile_group_bygantry(
# index, config, cursor, logfile_group, gantry_angle)
filepath = get_filepath(index, config, file_hash)
filepath
logfile_delivery_data = delivery_data_from_logfile(filepath)
full_logfile = decode_trf(filepath)
len(logfile_delivery_data.monitor_units)
control_point_label = full_logfile.columns[0]
control_point_label
mu_label = 'Step Dose/Actual Value (Mu)'
np.cumsum([0,1,2,0,0,2])
diff = np.diff(
np.concatenate([[0], full_logfile[mu_label].values])
)
diff[diff < 0] = 0
mu_test = np.cumsum(diff)
# mu_test = np.cumsum(
# np.diff(
# np.concatenate([[0], full_logfile[mu_label].values])
# )
# # )
monitor_units = np.array(logfile_delivery_data.monitor_units)
monitor_units
plt.plot(mu_test)
plt.plot(monitor_units)
np.allclose(mu_test, monitor_units)
np.sum(find_relevant_control_points(monitor_units))
jaw_pos_error_label = 'X1 Diaphragm/Positional Error (mm)'
jaw_pos_error[3249]
full_logfile[mu_label].values[3249]
# jaw_pos_error_label = 'Dlg Y2/Positional Error (mm)'
# gantry_angles = np.array(delivery_data.gantry)
# jaw_pos_error = full_logfile[jaw_pos_error_label].values
# gantry_angle_within_tolerance = (
# np.abs(gantry_angles - gantry_angle) <= gantry_tolerance)
# np.max(np.abs(jaw_pos_error[gantry_angle_within_tolerance]))
delivery_data = logfile_delivery_data
gantry_tolerance = 0.5
monitor_units = np.array(delivery_data.monitor_units)
relevant_control_points = find_relevant_control_points(monitor_units)
mu = monitor_units[relevant_control_points]
mlc = np.array(delivery_data.mlc)[relevant_control_points]
jaw = np.array(delivery_data.jaw)[relevant_control_points]
gantry_angles = np.array(delivery_data.gantry)[relevant_control_points]
control_points = full_logfile[control_point_label].values[relevant_control_points]
jaw_pos_error = full_logfile[jaw_pos_error_label].values[relevant_control_points]
gantry_angle_within_tolerance = (
np.abs(gantry_angles - gantry_angle) <= gantry_tolerance)
diff_mu = np.concatenate([[0], np.diff(mu)])[gantry_angle_within_tolerance]
mu = np.cumsum(diff_mu)
mlc = mlc[gantry_angle_within_tolerance]
jaw = jaw[gantry_angle_within_tolerance]
control_points = control_points[gantry_angle_within_tolerance]
jaw_pos_error = jaw_pos_error[gantry_angle_within_tolerance]
logfile_mu, logfile_mlc, logfile_jaw = mu, mlc, jaw
# logfile_mu, logfile_mlc, logfile_jaw = extract_angle_from_delivery_data(logfile_delivery_data, gantry_angle, gantry_tolerance=0.5)
with mosaiq_connect('msqsql') as cursor:
mosaiq_delivery_data = multi_fetch_and_verify_mosaiq(cursor, field_id)
mosaiq_mu, mosaiq_mlc, mosaiq_jaw = extract_angle_from_delivery_data(mosaiq_delivery_data, gantry_angle)
# logfile_delivery_data
grid_resolution = 0.25
# mosaiq_mu_density = calc_mu_density_bygantry(
# mosaiq_delivery_data, gantry_angle, grid_resolution)
# normalisation = calc_normalisation(mosaiq_delivery_data)
# logfile_mu_density = calc_logfile_mu_density_bygantry(
# index, config, logfile_group, gantry_angle, grid_resolution)
# grid_xx = logfile_mu_density[0]
# grid_yy = logfile_mu_density[1]
# logfile_mu_density = logfile_mu_density[2]
# mosaiq_mu_density = mosaiq_mu_density[2]
save_file_path = r'S:\Physics\Programming\data\LinacLogFiles\results\EPSM2018_77630_investigation.json'
with open(save_file_path, 'r') as save_file:
loaded_data = json.load(save_file)
x = np.array(loaded_data['x'])
y = np.array(loaded_data['y'])
logfile_mu_density = np.array(loaded_data['logfile'])
mosaiq_mu_density = np.array(loaded_data['mosaiq'])
min_val = np.min([logfile_mu_density, mosaiq_mu_density])
max_val = np.max([logfile_mu_density, mosaiq_mu_density])
# x = grid_xx[0,:]
# y = grid_yy[:,0]
# x, y = pcolormesh_grid(x, y)
# x = -x
# x
# to_save = {
# 'x': x.tolist(),
# 'y': y.tolist(),
# 'logfile': np.round(logfile_mu_density, 2).tolist(),
# 'mosaiq': np.round(mosaiq_mu_density, 2).tolist()
# }
# save_file_path = r'S:\Physics\Programming\data\LinacLogFiles\results\EPSM2018_77630_investigation.json'
# with open(save_file_path, 'w') as save_file:
# json.dump(to_save, save_file)
figsize = (6.5, 9.5)
plt.figure(figsize=figsize)
plt.pcolormesh(x, y, logfile_mu_density, vmin=min_val, vmax=max_val)
plt.colorbar(label='MU density')
plt.title('Logfile MU density')
plt.xlabel('MLC direction (mm)')
plt.ylabel('Jaw direction (mm)')
plt.axis('equal')
plt.xlim([-67, 60])
plt.ylim([60, -75])
plt.savefig('logfile.png')
plt.figure(figsize=figsize)
plt.pcolormesh(x, y, mosaiq_mu_density, vmin=min_val, vmax=max_val)
plt.colorbar(label='MU density')
plt.title('Mosaiq MU density')
plt.xlabel('MLC direction (mm)')
plt.ylabel('Jaw direction (mm)')
plt.axis('equal')
plt.xlim([-67, 60])
plt.ylim([60, -75])
plt.savefig('mosaiq.png')
difference = logfile_mu_density - mosaiq_mu_density
max_range = np.max(np.abs(difference))
plt.figure(figsize=figsize)
plt.pcolormesh(x, y, difference, vmin=-max_range, vmax=max_range, cmap='bwr')
plt.colorbar(label='Logfile - Mosaiq MU density')
plt.title('MU density difference')
plt.xlabel('MLC direction (mm)')
plt.ylabel('Jaw direction (mm)')
plt.axis('equal')
plt.xlim([-67, 60])
plt.ylim([60, -75])
plt.savefig('diff.png')
figsize2 = (8.5, 9.5)
plt.figure(figsize=figsize2)
plt.pcolormesh(x, y, difference, vmin=-max_range, vmax=max_range, cmap='bwr')
plt.colorbar(label='Logfile - Mosaiq MU density')
plt.title('MU density difference')
plt.xlabel('MLC direction (mm)')
plt.ylabel('Jaw direction (mm)')
plt.axis('equal')
plt.xlim([-67, 60])
plt.ylim([60, -75])
plt.savefig('diff.png')
np.shape(mosaiq_mu)
np.shape(mosaiq_jaw)
new_control_point = logfile_mu[np.diff(np.concatenate([[-1], control_points])) != 0]
new_control_point
jaw_interp = interp1d(mosaiq_mu, mosaiq_jaw[:,0])
mosaiq_resampled = jaw_interp(logfile_mu)
np.max(np.abs(logfile_jaw[:,0] - mosaiq_resampled))
np.max(np.abs(jaw_pos_error))
plt.plot(mosaiq_mu, mosaiq_jaw[:,0])
plt.plot(logfile_mu, logfile_jaw[:,0])
# plt.plot(logfile_mu, control_points)
for mu in new_control_point:
plt.plot([mu,mu], [40,65], 'k--', alpha=0.2)
plt.ylim([44,61])
plot_new_control_point = np.concatenate([new_control_point, [np.max(mosaiq_mu)]])
jaw_pos_error
np.max(np.abs(jaw_pos_error))
plt.figure(figsize=figsize)
plt.title('Jaw vs MU')
plt.xlabel('Cumulative Monitor Units')
plt.ylabel('Jaw position (mm)')
plt.plot(mosaiq_mu, mosaiq_jaw[:,0], label='Mosaiq Record')
plt.plot(logfile_mu, logfile_jaw[:,0], label='Logfile Record')
plt.plot(logfile_mu, logfile_jaw[:,0] + jaw_pos_error, label='Logfile Record')
for mu in plot_new_control_point:
plt.plot([mu,mu], [40,65], 'k--', alpha=0.2)
plt.plot([mu,mu], [100,100], 'k--', alpha=0.2, label='Control Point Boundaries')
# plt.plot([mu2,mu2], [40,65], 'k-', alpha=0.5)
plt.ylim([44,61])
plt.legend()
plt.savefig('jaw.png')
plt.show()
# plt.plot(logfile_mu, control_points)
# for mu in new_control_point:
# plt.plot([mu,mu], [40,65], 'k--', alpha=0.2)
for i, (mu1, mu2) in enumerate(zip(plot_new_control_point[0:-1], plot_new_control_point[1::])):
plt.figure()
plt.title('Jaw vs MU')
plt.xlabel('Cumulative Monitor Units')
plt.ylabel('Jaw position (mm)')
plt.plot(mosaiq_mu, mosaiq_jaw[:,0])
plt.plot(logfile_mu, logfile_jaw[:,0])
for mu in plot_new_control_point:
plt.plot([mu,mu], [40,65], 'k--', alpha=0.2)
plt.plot([mu1,mu1], [40,65], 'k-', alpha=0.5)
plt.plot([mu2,mu2], [40,65], 'k-', alpha=0.5)
plt.ylim([44,61])
plt.savefig('{}_jaw.png'.format(i))
plt.show()
len(new_control_point)
np.sort(np.unique(control_points))
# slice_ref = control_point == control_points
# index_ref = np.where(slice_ref)[0]
# index_ref
# slice_ref = control_point == control_points
# index_ref = np.where(slice_ref)[0]
# if index_ref[0] != 0:
# index_ref = np.concatenate([[index_ref[0] - 1], index_ref])
# index_ref
mosaiq_cp_mu = []
logfile_cp_mu = []
for i, control_point in enumerate(np.sort(np.unique(control_points))):
a_slice = slice(i, i + 2, 1)
slice_ref = control_point == control_points
index_ref = np.where(slice_ref)[0]
if index_ref[0] != 0:
index_ref = np.concatenate([[index_ref[0] - 1], index_ref])
mosaiq_cp_mu.append(np.max(mosaiq_mu[a_slice]) - np.min(mosaiq_mu[a_slice]))
logfile_cp_mu.append(np.max(logfile_mu[index_ref]) - np.min(logfile_mu[index_ref]))
plt.plot(mosaiq_cp_mu)
plt.plot(logfile_cp_mu)
mu_density_by_slice = []
for i, control_point in enumerate(np.sort(np.unique(control_points))):
results = dict()
a_slice = slice(i, i + 2, 1)
slice_ref = control_point == control_points
index_ref = np.where(slice_ref)[0]
if index_ref[0] != 0:
index_ref = np.concatenate([[index_ref[0] - 1], index_ref])
results['mosaiq'] = calc_mu_density(
mosaiq_mu[a_slice], mosaiq_mlc[a_slice, :, :],
mosaiq_jaw[a_slice, :], grid_resolution=grid_resolution)
results['logfile'] = calc_mu_density(
logfile_mu[index_ref], logfile_mlc[index_ref, :, :],
logfile_jaw[index_ref, :], grid_resolution=grid_resolution)
mu_density_by_slice.append(results)
# a_slice = slice(0, 2, 1)
# mu = mosaiq_mu[a_slice]
# mlc = mosaiq_mlc[a_slice, :, :]
# jaw = mosaiq_jaw[a_slice, :]
# mosaiq_control_point_mu_density = calc_mu_density(mu, mlc, jaw, grid_resolution=grid_resolution)
# slice_ref = control_points[0] == control_points
# mu = logfile_mu[slice_ref]
# mlc = logfile_mlc[slice_ref, :, :]
# jaw = logfile_jaw[slice_ref, :]
# logfile_control_point_mu_density = calc_mu_density(mu, mlc, jaw, grid_resolution=grid_resolution)
# mosaiq_control_point_mu_density
max_all = np.array([
np.max([a_mu_slice['logfile'], a_mu_slice['mosaiq']])
for a_mu_slice in mu_density_by_slice
])
max_val = np.max(max_all)
differences = [
a_mu_slice['logfile'] - a_mu_slice['mosaiq']
for a_mu_slice in mu_density_by_slice
]
max_diff_per_control_point = np.max(np.abs(differences))
def plot_and_save_per_control_point(i):
logfile_control_point_mu_density = mu_density_by_slice[i]['logfile']
mosaiq_control_point_mu_density = mu_density_by_slice[i]['mosaiq']
slice_difference = differences[i]
figsize = (6.5, 9.5)
plt.figure(figsize=figsize)
plt.pcolormesh(x, y, logfile_control_point_mu_density, vmin=0, vmax=max_val)
plt.colorbar(label='MU density')
plt.title('Logfile MU density')
plt.xlabel('MLC direction (mm)')
plt.ylabel('Jaw direction (mm)')
plt.axis('equal')
plt.xlim([-67, 60])
plt.ylim([60, -75])
plt.savefig("{}_logfile.png".format(i))
plt.show()
plt.figure(figsize=figsize)
plt.pcolormesh(x, y, mosaiq_control_point_mu_density, vmin=0, vmax=max_val)
plt.colorbar(label='MU density')
plt.title('Mosaiq MU density')
plt.xlabel('MLC direction (mm)')
plt.ylabel('Jaw direction (mm)')
plt.axis('equal')
plt.xlim([-67, 60])
plt.ylim([60, -75])
plt.savefig("{}_mosaiq.png".format(i))
plt.show()
plt.figure(figsize=figsize)
plt.pcolormesh(
x, y, slice_difference,
vmin=-max_diff_per_control_point, vmax=max_diff_per_control_point,
cmap='bwr'
)
plt.colorbar(label='Logfile - Mosaiq MU density')
plt.title('MU density difference')
plt.xlabel('MLC direction (mm)')
plt.ylabel('Jaw direction (mm)')
plt.axis('equal')
plt.xlim([-67, 60])
plt.ylim([60, -75])
plt.savefig("{}_diff.png".format(i))
plt.show()
for i in range(19):
plot_and_save_per_control_point(i)
# plot_and_save_per_control_point(1)
plt.plot(mosaiq_mu, mosaiq_jaw[:,1])
plt.plot(logfile_mu, logfile_jaw[:,1])
# logfile_delivery_data
control_points
control_points
```
| github_jupyter |
# Chapter 06 -- Hierarchical Indexing
## Topics Covered
<a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%2006%20--%20Hierarchical%20Indexing.ipynb#MultiIndexing">MultiIndexing </a>
<a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%2006%20--%20Hierarchical%20Indexing.ipynb#Basic-Indexing">Multi-Indexed Selection</a>
<a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%2006%20--%20Hierarchical%20Indexing.ipynb#xs()-method-for-cross-sections"> xs() method for cross sections </a>
<a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%2006%20--%20Hierarchical%20Indexing.ipynb#Advanced-Indexing-with-.loc-indexer"> Advanced Indexing with .loc indexer
<a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%2006%20--%20Hierarchical%20Indexing.ipynb#Using-Boolean-operators-with-.loc-indexer"> Using Boolean operators with .loc indexer </a>
<a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%2006%20--%20Hierarchical%20Indexing.ipynb#stack()-and-unstack()-methods"> stack() and unstack() methods </a>
<a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%2006%20--%20Hierarchical%20Indexing.ipynb#Resources"> Resources </a>
```
import pandas as pd
import numpy as np
from pandas import Series, DataFrame, Index
from IPython.display import Image
```
Simply put, a MultiIndex (also referred to as a hierarchical index) allows multiple index levels within a single index. Higher dimensional data can be represented in a one-dimensional Series or a two-dimensional DataFrame. Start with the creation of a synthetic DataFrame whose values are used to represent the number of traffic violations issued in cell #3 below.
pandas provide constructor methods allowing components to be defined separately. The construction of the index and columns below is self-explanatory.
For the data component: The first data = assignment constructs an array using numpy's random number generator with the round() method used to return the integer portion. The second date = assignment multiples the array of integers by 100 and floor divides by 5, and finally taking the absolute value.
The tickets = assignment constructs the DataFrame from the constituents.
## MultiIndexing
```
index = pd.MultiIndex.from_product([[2012, 2013, 2014, 2015], ['1', '2', '3']],
names=['year', 'month'])
columns=pd.MultiIndex.from_product([['city', 'suburbs', 'rural'], ['day', 'night']],
names=['area', 'when'])
data = np.round(np.random.randn(12, 6),2)
data = abs(np.floor_divide(data[:] * 100, 5))
tickets = pd.DataFrame(data, index=index, columns=columns).sort_index().sort_index(axis=1)
```
```
index
```
Notice how the MultiIndex displayed above contains multiple levels of indexing, in this case 'year' and 'month' along the row dimension. The levels act as labels for each data value encoding these names.
In SAS, the terminology is 'month' nested within 'year'. Said another way, 'month' is the inner-most level of the index.
```
tickets.columns
```
Similiarly, the indexing for columns contains multiple levels, with the name 'area' labeling the outer levels 'city', 'rural', and 'suburbs'. 'when' is the name for the inner levels 'day' and 'night'.
```
tickets
```
```
tickets.shape
```
Another SAS-like way to think of a MultiIndex is nested group-by's illustrated in cell below with PROC SORT. The resulting SAS data set tickets is 'tall' and 'skinny' with one observation for each of the nested levels.
Below, we will illustrate how <a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%206%20--%20Hierarchical%20Indexing.ipynb#stack-and-unstack-methods"> 'stacking' </a> the DataFrame can be used to reshape it to a form similiar to the SAS data set tickets.
````
/******************************************************/
/* c06_tickets_proc_sort.sas */
/******************************************************/
34 proc sort;
35 by year month area nd;
NOTE: 72 observations were read from "WORK.tickets"
NOTE: Data set "WORK.tickets" has 72 observation(s) and 5 variable(s)
````
A SAS analog of the tickets Dataframe is rendered using PROC TABULATE. Unlike the DataFrame methods illustrated here, the physical shape of the SAS data set is not changed. In this case PROC TABULATE processes the 'tall' SAS data set to produce printed output shaped like the 'tickets' DataFrame.
````
/******************************************************/
/* c06_tickets_proc_tabulate.sas */
/******************************************************/
12 data tickets;
13 length area $ 7;
14 call streaminit(time());
15 do year = 2012 to 2015;
16 do month = 1 to 3;
17 do area = 'City', 'Suburbs', 'Rural';
18 do nd = 'Night', 'Day';
19 tickets = abs(int(rand( 'NORMAL')*100));
20 output;
21 end;
22 end;
23 end;
24 end;
25 proc tabulate;
26 var tickets;;
27 class area nd year month;
28 table year * month ,
29 area=' ' * nd=' ' * sum=' ' * tickets=' ';
````
```
Image(filename='Anaconda3\\output\\tabulate.JPG')
```
## Multi-Indexed Selection
The tickets DataFrame is essentially four-dimensional with area nested within when for columns, and month nested within year for rows. The multi-indexing for both rows and columns make sub-setting operations straightforward. In this case, selecting by the index label 'rural' returns an entire DataFrame. Strictly speaking, 'rural' is a level in the column MultiIndex illustrated in cell #5 above.
Select all rows by the 'rural' level.
```
tickets['rural']
```
Select all rows for tickets issued in the city at night.
```
tickets['city', 'night']
```
Multiple keys can be specified. Above, the request is for those tickets issued at 'night' in the 'city'. In this instance a Series is returned.
The examples in cells #8 and #9 work by selecting labels from 'area' which is the outer-most MultiIndex level for columns in the tickets DataFrame. Said another way, 'when' ('day' or 'night') is nested inside 'area'.
Another method to select rows and columns nested inside a hierarchical index is the DataFrame xs() method. This cross-section method generally results in a smaller DataFrame than the original.
## xs() method for cross sections
The DataFrame xs() method uses the level argument to select data for rows and columns. The example in the cell below selects the 1st month for each year.
```
tickets.xs(('1'), level='month')
```
Likewise, you can request just the rows for 2013.
```
tickets.xs((2013), level='year')
```
Selections can use multiple keys. In this case, 2013 and month '1'.
```
tickets.xs((2013, '1'), level=('year', 'month'))
```
The .xs method also works for columns with the optional axis=1 (for columns) argument. In this case, the column 'night' in each of the 3 areas is selected.
```
tickets.xs(('night'), level='when', axis=1)
```
## Advanced Indexing with .loc indexer
Chapter 5, Understanding Indexes covers the <a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%205%20--%20Understanding%20Indexes.ipynb#.loc-Indexer"> .loc indexer </a> which can also be used with hierarchical indicies.
The colon (:) to the left of the comma (,) selects all rows. The selection to the right of the comma (,) requests the levels 'city' and 'suburbs' from 'area'.
```
tickets.loc[:,['city', 'suburbs']]
```
Below is a an example of partial slicing.
```
tickets.loc[2013:2014, ['city', 'suburbs']]
```
The example below illustrates slicing with a range of values by providing <a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/blob/master/Chapter%202%20--%20Data%20Structures.ipynb#tuple"> tuples </a> containing both the row slice ( 2nd month, 2013 to 3rd month 2014) and the column slice ('rural' 'day' to 'suburbs' 'day').
```
tickets.loc[(2013, '2') : (2014, '3'), ('rural', 'day') : ('suburbs', 'day')]
```
The example below illustrates the .loc() indexer using multiple keys for row and column slices.
```
tickets.loc[(2013, '3'): (2014, '1')]
```
```
tickets.loc[(2013, '3'): (2014, '1'), "city"]
```
## Using Boolean operators with .loc indexer
Using boolean operators with the .loc indexer permits boolean evaluations across the Dataframe values. You can use a pd.IndexSlice to permit syntax using the colon (:) rather then slice(None) syntax when specifying ranges.
The mask object returns the boolean value True which is applied to the column MultiIndex 'night' as the inner-most index level within 'city'. Notice the shape of the DataFrame is defined by just those rows in the 'night' within 'city' column construct that evaluate True.
```
idx = pd.IndexSlice
mask = tickets[('city','night')]>25
tickets.loc[idx[mask,:,]]
```
The example in the cell below applies the boolean 'mask2' to all values in the DataFrame
```
mask2 = tickets[('rural', 'day')]>15
tickets.loc[idx[mask2, 'city':'rurual']]
```
The example below applies boolean 'mask2' and also scopes the column request.
```
tickets.loc[idx[mask2, 'rural']]
```
## stack() and unstack() methods
DataFrames with hierarchical indices can use the .stack and .unstack attributes to reshape data from 'tall and skinny' to 'short and fat' formats and vice-versa.
The .stack() attribute pivots columns into rows. The .unstack() attribute pivots rows into columns. Consider the examples in the cell below.
The .unstack() attribute used without any arguments in this case pivots the 'month' rows to the inner-most index level of the columns. 'month' was the inner-most index level for rows. Also notice how we assigned the 'unstacked' DataFrame to create the new one, df_u.
```
df_u = tickets.unstack()
df_u
```
The MultiIndex for the columns from DataFrame df_u is shown below. Contrast this column index with the one for tickets in cell #4 above.
```
df_u.columns
```
The .stack() attribute performs the reverse operation of the .unstack() attribute. The .stack() attribute takes the inner most index level of the columns level and pivots them to the inner-most index level of the rows. In this case the new DataFrame is called df_s.
```
df_s = tickets.stack()
df_s
```
DataFrames containing MultiIndexes can 'stack' and 'unstack' multiple levels at a time.
```
df_like_sas = pd.DataFrame(tickets.stack(level=['when', 'area']))
```
We can 'stack' the original tickets DataFrame by the levels 'when' and 'area' from the column MultiIndex to create a new DataFrame shaped similiarly to the SAS data set tickets.
```
df_like_sas.shape
```
Display the first 12 rows.
```
df_like_sas.head(12)
```
## Resources
<a href="http://shop.oreilly.com/product/0636920034919.do"> Python Data Science Handbook </a>, Essential Tools for Working With Data, by Jake VanderPlas.
<a href="http://pandas.pydata.org/pandas-docs/stable/reshaping.html#reshaping-by-pivoting-dataframe-objects"> Reshaping by pivoting DataFrame objects </a>, from the pandas 0.19.0 documentation.
<a href="http://pandas.pydata.org/pandas-docs/stable/reshaping.html#reshaping-by-stacking-and-unstacking"> Reshaping by stacking and unstacking </a> from the pandas 0.19.0 documentation.
<a href="http://pandas.pydata.org/pandas-docs/stable/advanced.html#multiindex-advanced-indexing"> MultiIndex / Advanced Indexing </a> from the pandas 0.19.0 documentation.
## Navigation
<a href="http://nbviewer.jupyter.org/github/RandyBetancourt/PythonForSASUsers/tree/master/"> Return to Chapter List </a>
| github_jupyter |
```
import tqdm
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import warnings
warnings.filterwarnings("ignore")
### LOAD DATA ###
df = pd.read_csv('Steel_Plates_Faults.csv')
label = df.Fault
df = df.drop('Fault',axis=1)
print(df.shape)
df.head()
### TARGET DISTRIBUTION ###
label.value_counts().plot.pie(figsize=(6,6))
### GENERATE TRAIN TEST ###
X_train, X_test, y_train, y_test = train_test_split(df, label, random_state = 42,test_size=0.2)
print("train:", X_train.shape, "test:", X_test.shape)
```
# FIRST MODEL
```
gbc = GradientBoostingClassifier(n_estimators=500, random_state=42)
gbc.fit(X_train, y_train)
print(accuracy_score(y_test, gbc.predict(X_test)))
print(classification_report(y_test, gbc.predict(X_test)))
def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues):
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=25)
#plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90, fontsize=15)
plt.yticks(tick_marks, classes, fontsize=15)
fmt = '.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize = 14)
plt.ylabel('True label', fontsize=20)
plt.xlabel('Predicted label', fontsize=20)
plt.figure(figsize=(7,7))
cnf_matrix = confusion_matrix(y_test, gbc.predict(X_test))
plot_confusion_matrix(cnf_matrix, classes=np.unique(label), title="Confusion matrix")
```
# VISUALIZATION
```
### PLOT WITH OTHER_FAULTS ###
scaler = StandardScaler()
scaler.fit(X_train)
tsne = TSNE(n_components=2, random_state=42, n_iter=300, perplexity=5)
T = tsne.fit_transform(scaler.transform(df))
plt.figure(figsize=(16,9))
colors = {0:'red', 1:'blue', 2:'green', 3:'pink', 4:'black', 5:'orange', 6:'cyan'}
plt.scatter(T.T[0], T.T[1], c=[colors[i] for i in LabelEncoder().fit_transform(label)])
### PLOT WITHOUT OTHER_FAULTS ###
tsne = TSNE(n_components=2, random_state=42, n_iter=300, perplexity=5)
T = tsne.fit_transform(scaler.transform(df[label != 'Other_Faults']))
plt.figure(figsize=(16,9))
colors = {0:'red', 1:'blue', 2:'green', 3:'black', 4:'orange', 5:'cyan'}
plt.scatter(T.T[0], T.T[1], c=[colors[i] for i in LabelEncoder().fit_transform(label[label != 'Other_Faults'])])
### EXCLUDE OTHER_FAULTS FROM TRAIN & TEST ###
X_train2, y_train2 = X_train[y_train != 'Other_Faults'].copy(), y_train[y_train != 'Other_Faults'].copy()
X_test2, y_test2 = X_test[y_test != 'Other_Faults'].copy(), y_test[y_test != 'Other_Faults'].copy()
print("train:", X_train2.shape, "test:", X_test2.shape)
```
# SECOND MODEL
```
gbc2 = GradientBoostingClassifier(n_estimators=500, random_state=42)
gbc2.fit(X_train2, y_train2)
print(accuracy_score(y_test2, gbc2.predict(X_test2)))
print(classification_report(y_test2, gbc2.predict(X_test2)))
```
# CREATE THRESHOLD
```
def predict(feature, model, threshold_map=None):
confidence = model.predict_proba(feature).max()
label = model.predict(feature)[0]
if threshold_map and label in threshold_map:
if confidence >= threshold_map[label]:
return {"label": label, "confidence": confidence}
else:
return {"label": "OTHERS", "confidence": confidence}
elif threshold_map == None:
return {"label": label, "confidence": confidence}
else:
print(label, 'not in threshold map')
### TRY PREDICT FUNCTION ###
predict([df[label == 'Other_Faults'].values[4]], gbc2)
### MAKE PRED ON OTHER_FAULTS ###
pred_lab = []
pred_conf = []
for row in tqdm.tqdm(X_train[y_train == 'Other_Faults'].values):
pred = predict([row], gbc2)
pred_lab.append(pred['label'])
pred_conf.append(pred['confidence'])
### PLOT SCORES ###
other_pred = pd.DataFrame({'label':pred_lab, 'pred':pred_conf})
diz_score = other_pred.groupby('label')['pred'].apply(list).to_dict()
plt.figure(figsize=(18,5))
plt.boxplot(diz_score.values(), labels=diz_score.keys())
plt.grid(False); plt.show()
### IMPOSE 30 PERCENTILE THRESHOLD ###
threshold_p= {}
for lab in diz_score.keys():
threshold_p[lab] = np.percentile(diz_score[lab],30)
print(threshold_p)
### PLOT SCORES WITH THRESHOLD ###
plt.figure(figsize=(18,5))
plt.boxplot(list(diz_score.values()), labels=list(diz_score.keys()))
plt.plot(range(1,len(threshold_p.keys())+1), list(threshold_p.values()), 'rs')
plt.grid(False); plt.show()
### MAKE PREDICTION ON TEST WITHOT OTHER_FAULTS + THRESHOLD ###
final_pred = []
for row in tqdm.tqdm(X_test2.values):
final_pred.append(predict([row], gbc2, threshold_map=threshold_p)["label"])
print(accuracy_score(y_test2, final_pred))
print(classification_report(y_test2, final_pred))
### TRY TO PREDICT INCLUDING OTHER_FAULTS ###
y_train = []
for row in tqdm.tqdm(X_train.values):
y_train.append(predict([row], gbc2, threshold_map=threshold_p)["label"])
y_test = []
for row in tqdm.tqdm(X_test.values):
y_test.append(predict([row], gbc2, threshold_map=threshold_p)["label"])
other_final_pred = y_train + y_test
pd.value_counts(other_final_pred)
```
# FINAL VISUALIZATION
```
### PLOT WITH THE 'NEW' OTHER_FAULTS CLASS ###
tsne = TSNE(n_components=2, random_state=42, n_iter=300, perplexity=5)
T = tsne.fit_transform(scaler.transform(pd.concat([X_train,X_test])))
plt.figure(figsize=(16,9))
colors = {0:'red', 1:'blue', 2:'green', 3:'pink', 4:'black', 5:'orange', 6:'cyan'}
plt.scatter(T.T[0], T.T[1], c=[colors[i] for i in LabelEncoder().fit_transform(other_final_pred)])
```
# FINAL MODEL
```
gbc3 = GradientBoostingClassifier(n_estimators=500, random_state=42)
gbc3.fit(X_train, y_train)
print(accuracy_score(y_test, gbc3.predict(X_test)))
print(classification_report(y_test, gbc3.predict(X_test)))
```
| github_jupyter |
```
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
import re
import math
if "original_dir" not in locals():
original_dir = os.getcwd()
os.chdir(original_dir)
os.chdir('..')
os.getcwd()
dataset_names = {0:'usps',\
1:'satimage.scale',\
2:'dna.scale',\
3:'letter.scale',\
4:'pendigits',\
5:'news20.scale',\
6:'sector.scale',\
7:'mnist.scale',\
8:'cifar10',\
9:'toy',\
10:'rcv1',\
11:'aloi.scale'\
}
nr_classes = {'usps':10,\
'satimage.scale':6,\
'dna.scale':3,\
'letter.scale':.26,\
'pendigits':10,\
'news20.scale':20,\
'sector.scale':105,\
'mnist.scale':10,\
'cifar10':10,\
'toy':3,\
'rcv1':53,\
'aloi.scale':1000\
}
# as powers of ten
regs = {'usps':.1,\
'satimage.scale':1,\
'dna.scale':1,\
'letter.scale':.1,\
'pendigits':.01,\
'news20.scale':.1,\
'sector.scale':1,\
'mnist.scale':.01,\
'cifar10':1,\
'toy':1,\
'rcv1':1,\
'aloi.scale':.01\
}
iters = {'usps':2048,\
'satimage.scale':64,\
'dna.scale':1024,\
'letter.scale':64,\
'pendigits':8192,\
'news20.scale':32,\
'sector.scale':32,\
'mnist.scale':64,\
'cifar10':32,\
'toy':32,\
'rcv1':32,\
'aloi.scale':32\
}
ds = dataset_names[1]
ds
def get_exp_name(ds, reg):
return ds+'_'+ str(int(math.log10(reg)))
exp_name = get_exp_name(ds,regs[ds])
exp_name
def run_experiment(dataset_name, regularizer, num_iterations, variant):
if variant == "Ours":
FILENAME = 'results/Ours/' + get_exp_name(dataset_name, regularizer)
COMMAND = './train -s 30 -c ' + str(regularizer) + ' -t ' + str(num_iterations) + ' data/' + dataset_name + ' >> '+ FILENAME
elif variant == "Shark":
FILENAME = 'results/Shark/' + get_exp_name(dataset_name, regularizer)
COMMAND = './train -s 31 -c ' + str(regularizer) + ' -t ' + str(num_iterations) + ' data/' + dataset_name + ' >> '+ FILENAME
else:
print("Invalid variant")
return
os.system('rm ' + FILENAME)
os.system(COMMAND)
run_experiment(ds, regs[ds], iters[ds], "Ours")
run_experiment(ds, regs[ds], iters[ds], "Shark")
def get_results(FILENAME):
# f = open(FILENAME, "r")
from numpy import genfromtxt
result_np = genfromtxt(FILENAME, delimiter=',')
result_np[:,0] = result_np[:,0]/1000
new_col =result_np[:,1] - result_np[:,2]
new_col = new_col.reshape(len(new_col),1)
new_col.shape
result_np = np.append(result_np, new_col, 1)
return result_np
result_RC = get_results("results/Ours/"+exp_name)
result_Shark = get_results("results/Shark/"+exp_name)
colnames = ['time (sec)', 'sum of dual vars', 'norm/2', 'C*hinge risk', 'primal obj', 'dual obj']
cn = {colnames[i] : i for i in range(len(colnames))}
fig, ax = plt.subplots()
ax.semilogx(result_RC[:,cn['time (sec)']],\
result_RC[:,cn['primal obj']],\
linestyle='-', \
color='k')
ax.semilogx(result_RC[:,cn['time (sec)']], \
result_RC[:,cn['dual obj']], \
linestyle='--', \
color='k')
orange = (0.9,0.6,0)
ax.semilogx(result_Shark[:,cn['time (sec)']], \
result_Shark[:,cn['primal obj']], \
linestyle = '-.', \
color = orange,\
linewidth = 2)
ax.semilogx(result_Shark[:,cn['time (sec)']],\
result_Shark[:,cn['dual obj']],\
linestyle = ':', \
color = orange,\
linewidth = 2)
plt.xlabel("Time (sec)")
plt.ylabel("Objective value")
plt.title(ds + ', k = ' + str(nr_classes[ds]) +', C = ' +str(regs[ds]))
ax.legend(['IC primal','IC dual','Shark primal','Shark dual'])
plt.savefig('figures/' + ds + '_' + str(int(math.log10(regs[ds])))+'_ob.eps',format='eps')
fig, ax = plt.subplots()
ax.loglog(result_RC[:,cn['time (sec)']],\
result_RC[:,cn['primal obj']]-result_RC[:,cn['dual obj']] ,\
linestyle='-', \
color='k')
orange = (0.9,0.6,0)
ax.loglog(result_Shark[:,cn['time (sec)']], \
result_Shark[:,cn['primal obj']]-result_Shark[:,cn['dual obj']], \
linestyle = '-.', \
color = orange,\
linewidth = 2)
plt.xlabel("Time (sec)")
plt.ylabel("Duality gap")
plt.title(ds + ', k = ' + str(nr_classes[ds]) +', C = ' +str(regs[ds]))
ax.legend(['IC','Shark'])
plt.savefig('figures/'+ds+'_'+ str(int(math.log10(regs[ds])))+'_dg.eps',format='eps')
```
| github_jupyter |
# Exploratory Data Analysis of Cancer Genomics data using TCGA
In this notebook, we will take a look at one of the canonical datasets, if not _the_ dataset, in cancer genomics: TCGA.
We'll start with investigating the RNA Sequencing (rnaseq) and Clinical data available for a type of liver cancer known as hepatocellular carcinoma (HCC). Hepatocellular carcinoma is the most common form of liver cancer in the United States, making up [more than 80% of cases](https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga/studied-cancers/liver). The TCGA dataset is abbreviated LIHC.
Some examples of what researchers have learned from the LIHC dataset at the DNA-level include confirmed [frequent mutations](https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga/studied-cancers/liver) in:
- The TERT promotor region, associated with regulating cell survival
- TP53, one of the most frequently mutated genes in cancer
- CTNNB1, a member of the Wnt signaling pathway that mediates cell growth and differentiation
There are currently several therapies under development that target these genes.
In addition to DNA alterations however, different biological and tumor microenvrionment factors can [influence disease progression](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6237857/). A transcriptomic survey of tissues at various stages of disease progression could help elucidate some of the underlying pathways contributing to tumorigenesis.
### Today, we'll be focusing on using RNA-seq data from LIHC combined with clinical attributes to identify biomarkers for disease progression.
The data is stored in the R package _[RTCGA](http://rtcga.github.io/RTCGA/)_
## Load libraries
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
## Set variables
```
data_dir=""
response_name="patient.race"
rnaseq_file=data_dir+"lihc_rnaseq.csv.gz"
clinical_file=data_dir+"lihc_clinical.csv.gz"
```
## Load data
The data is stored in the RTCGA package in the R programming language. I've outputted it for easy use within python.
We will be investigating the Hepatocellular carcinoma dataset. Read about it [here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5680778/).
The TCGA RNASeq data is illumina hiseq Level 3 RSEM normalized expression data. You can read about thec RSEM method [here](https://academic.oup.com/bioinformatics/article/26/4/493/243395).
Essentially this is the raw counts of reads that aligned to the gene transcript, though it's only a guess by the program. Since it's a guess, the values are rational numbers. To simplify things, we'll round the values to the next whole integer.
We will be using clinical data from the [GDC Data portal](https://portal.gdc.cancer.gov/projects/TCGA-LIHC).
```
rnaseq = (pd.
read_csv(rnaseq_file,compression="gzip").
set_index('bcr_patient_barcode').
applymap(lambda x : int(np.ceil(x)))
)
display(rnaseq.shape)
display(rnaseq.head())
gene_name_logical = [len(x[0])>1 for x in rnaseq.columns.str.split('|')]
sub = rnaseq.loc[:,gene_name_logical]
sub.columns = [x[0] for x in sub.columns.str.split('|')]
rnaseq_sub = sub.copy()
rnaseq_sub.head()
```
The clinical data is within the RTCGA package, but is also available [here](https://portal.gdc.cancer.gov/projects/TCGA-LIHC). More cdescription of the clinical attributes are [here](https://gdc.cancer.gov/about-data/data-harmonization-and-generation/clinical-data-harmonization).
```
clinical = pd.read_csv('clinical.tsv', sep='\t')
clinical['submitter_id'] = clinical['submitter_id'].map(lambda x: x.lower())
clinical.head()
rnaseq_sub.index = rnaseq_sub.index.map(lambda x: '-'.join(x.split('-')[:3]).lower())
rnaseq_sub.head()
rnaseq_sub.reset_index()
full_df = pd.merge(rnaseq_sub.reset_index(), clinical[['submitter_id','tumor_stage']], left_on='bcr_patient_barcode', right_on='submitter_id', how='inner') \
.set_index('bcr_patient_barcode') \
.drop('submitter_id', axis=1)
full_df.head()
tumor_stages = clinical['tumor_stage'].value_counts()
plt.hist(tumor_stages.values)
tumor_stages
# Subset out the recognizable stages
tumor_stages[tumor_stages.index.str.startswith('stage')]
# Subset full dataframe for patient samples that have a corresponding tumor stage
full_df = full_df.loc[full_df['tumor_stage'].str.startswith('stage')]
# Since there are substages (eg, stage iia and stage iib), we will conver them to the 4 main stages
full_df['tumor_stage'] = full_df['tumor_stage'].str.replace('stage ', '') \
.str.replace('a', '') \
.str.replace('b', '') \
.str.replace('c', '') \
.str.replace('v', '')
full_df
```
# Pre-process
With ~20k features, we can reduce the feature set in a number of ways:
- Removing lowly expressed genes (one way we can define this as less than 1 copy of a gene per person)
- Subsetting the genes that show the most variation between samples
- Using genes known to be associated with the hallmarks of cancer
Using all or some of these techniques, reduce the number of features from our dataset. A dictionary with genes from the hallmarks of cancer can be loaded as follows:
```
# Filter out low expressed genes
processed_df = full_df.drop('tumor_stage', axis=1)
processed_df = processed_df.T[processed_df.sum() > processed_df.shape[1]].T
processed_df.shape
import pickle
descr_dict = pickle.load(open('../../hallmarks_of_cancer_description_dictionary.pkl','rb'))
geneset_dict = pickle.load(open('../../hallmarks_of_cancer_geneset_dictionary.pkl','rb'))
processed_df
# Filter out non-Hallmark genes
gene_list = list(set([a for b in geneset_dict.values() for a in b]))
common_cols = [gene for gene in gene_list if gene in processed_df.columns]
processed_df = processed_df[common_cols]
```
As high dimensionality can be problematic for certain machine learning models, another method for reducing the feature space is to perform dimension reduction. A technique known as [sparse principal component analysis](https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-019-2929-8) (SPCA). SPCA is a varition on the widely-used principle component analysis (PCA), that uses lasso to produce principle components with sparse loadings and make more interpretable components.
Try to further reduce the dimensional space using SPCA We can start with 10 components.
```
from sklearn.decomposition import MiniBatchSparsePCA
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
pca = PCA(n_components=2)
pca_vals = pca.fit_transform(full_df.drop('tumor_stage', axis=1))
cmapping = full_df['tumor_stage'].str.replace('iii', 'orange') \
.str.replace('ii', 'maroon') \
.str.replace('i', 'blue') \
.values
fig = plt.figure(figsize=(6,6))
plt.scatter(pca_vals[:,0], pca_vals[:,1], c=cmapping)
plt.title('PCA plot')
plt.xlabel('Component 1')
plt.ylabel('Component 2');
tsne = TSNE(n_components=2)
tsne_vals = tsne.fit_transform(full_df.drop('tumor_stage', axis=1))
cmapping = full_df['tumor_stage'].str.replace('iii', 'orange') \
.str.replace('ii', 'maroon') \
.str.replace('i', 'blue') \
.values
fig = plt.figure(figsize=(6,6))
plt.scatter(tsne_vals[:,0], tsne_vals[:,1], c=cmapping)
plt.title('t-SNE plot')
plt.xlabel('t-SNE 1')
plt.ylabel('t-SNE 2');
```
## Processed df
```
pca = PCA(n_components=2)
pca_vals = pca.fit_transform(processed_df)
cmapping = full_df['tumor_stage'].str.replace('iii', 'orange') \
.str.replace('ii', 'maroon') \
.str.replace('i', 'blue') \
.values
fig = plt.figure(figsize=(6,6))
plt.scatter(pca_vals[:,0], pca_vals[:,1], c=cmapping)
plt.title('PCA plot')
plt.xlabel('Component 1')
plt.ylabel('Component 2');
tsne = TSNE(n_components=2)
tsne_vals = tsne.fit_transform(processed_df)
cmapping = full_df['tumor_stage'].str.replace('iii', 'orange') \
.str.replace('ii', 'maroon') \
.str.replace('i', 'blue') \
.values
fig = plt.figure(figsize=(6,6))
plt.scatter(tsne_vals[:,0], tsne_vals[:,1], c=cmapping)
plt.title('t-SNE plot')
plt.xlabel('t-SNE 1')
plt.ylabel('t-SNE 2');
spca = MiniBatchSparsePCA(n_components=10)
full_df_spca = spca.fit_transform(full_df.drop('tumor_stage', axis=1))
```
## Semi-supervised clustering of patient samples
```
from sklearn.semi_supervised import LabelPropagation, LabelSpreading
lp = LabelPropagation()
lp.fit()
```
## Supervised learning for tumor stage imputation
We'll be investigating how gene expression varies based on tumor stage. We can subset and filter out any lowly-expressed genes, genes that haven't been identified as being significant within the [Hallmarks of Cancer](http://software.broadinstitute.org/gsea/msigdb/search.jsp), and genes that have shown high variation within our samples.
Let's start by generating a Random Forest model, since we're interested in the gene signature that may be responsible for progressing tumor stage.
An example of how models can be used to identify genes of interest can be found in [Wenrick and Shemirani, 2018](https://www.frontiersin.org/articles/10.3389/fgene.2018.00297/full) and [Rendleman et al, 2019](https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-019-2929-8)
```
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
```
Perform a grid search of params for random forest classifier with the full set of sample data
```
X, y = full_df.drop('tumor_stage', axis=1), full_df['tumor_stage']
X_train, y_train, x_test, y_test = train_test_split(X, y, test_size=0.3)
rf = RandomForestClassifier()
params = {'n_estimators': [10,100,300],
'min_samples_split': [2,4, 8],
'min_samples_leaf': [1, 3, 6],
'min_weight_fraction_leaf': [0.0],
'max_features': ['auto'],
'max_leaf_nodes': [None],
}
clf = GridSearchCV(rf, params)
clf.fit(X, y)
clf.best_score_
clf.best_params_
```
Let's try the pre-processed data now...
```
X, y = processed_df, full_df['tumor_stage']
X_train, y_train, x_test, y_test = train_test_split(X, y, test_size=0.3)
rf = RandomForestClassifier()
params = {'n_estimators': [10,100,1000],
'min_samples_split': [2,4],
'min_samples_leaf': [1, 3],
'min_weight_fraction_leaf': [0.0],
'max_features': ['auto'],
'max_leaf_nodes': [None],
}
clf = GridSearchCV(rf, params)
clf.fit(X, y)
clf.best_score_
# Plotting feature importances
def plot_feat_importances(feat_importances, n=20):
feat_importances.sort_values('importance', ascending=False, inplace=True)
fig = plt.figure(figsize=(8,8))
genes = feat_importances[:n][::-1].index
importances = feat_importances[:n].values.T[0][::-1]
plt.barh(genes, importances)
plt.title('Feature Importances')
plt.xlabel('Feature Importances')
plot_feat_importances(feat_importances)
def reduce_features(X, y, clf, params, cv=3, max_tries=3):
tries = 0
print('Performing grid search {}'.format(tries))
def perform_grid_search_return_feat_importances(X, y, params, cv):
rf = RandomForestClassifier()
clf_gs = GridSearchCV(rf, params, cv=cv)
clf_gs.fit(X, y)
best_score = clf_gs.best_score_
print(best_score)
# Use best params to get feature_importances
best_params = clf_gs.best_params_
# if best_params['max_features'] > X.shape[1]:
# best_params['max_features'] = X.shape[1]
rf = RandomForestClassifier(
max_features = best_params['max_features'],
max_leaf_nodes = best_params['max_leaf_nodes'],
min_samples_leaf = best_params['min_samples_leaf'],
min_samples_split = best_params['min_samples_split'],
min_weight_fraction_leaf = best_params['min_weight_fraction_leaf'],
n_estimators = best_params['n_estimators'],
)
rf.fit(X, y)
feat_importances = pd.DataFrame(data=rf.feature_importances_, index=X.columns, columns=['importance'])
return feat_importances, clf
feat_importances, clf = perform_grid_search_return_feat_importances(X, y, params, cv)
while tries < max_tries:
tries += 1
print('Performing grid search {}'.format(tries))
most_important_genes = list(feat_importances[feat_importances['importance'] > 0].index)
X = X[most_important_genes]
print(X.shape, y.shape)
if X.shape[1] <= 10:
break
params['max_features'] = [max_feat for max_feat in params['max_features'] if max_feat <= X.shape[1]]
feat_importances, clf = perform_grid_search_return_feat_importances(X, y, params, cv)
plot_feat_importances(feat_importances)
return feat_importances, X, y, clf
params = {'n_estimators': [10,100,200],
'min_samples_split': [4,8],
'min_samples_leaf': [1, 3,],
'min_weight_fraction_leaf': [0.0, 0.25, 0.5],
'max_features': [1,20],
'max_leaf_nodes': [None, 5],
}
X, y = full_df.drop('tumor_stage', axis=1), full_df['tumor_stage']
clf = RandomForestClassifier()
feat_importances1, x1, y1, clf1 = reduce_features(X, y, clf, params, cv=3, max_tries=3)
```
Try using SPCA to reduce the feature set further
```
spca = MiniBatchSparsePCA(n_components=10)
processed_df_spca = spca.fit_transform(processed_df)
X, y = processed_df_spca, full_df['tumor_stage']
X_train, y_train, x_test, y_test = train_test_split(X, y, test_size=0.3)
rf = RandomForestClassifier()
params = {'n_estimators': [10,100,200],
'min_samples_split': [4, 8],
'min_samples_leaf': [1, 3, 6],
'min_weight_fraction_leaf': [0.0],
'max_features': ['auto'],
'max_leaf_nodes': [None],
}
clf = GridSearchCV(rf, params)
clf.fit(X, y)
clf.best_score_
clf.best_params_
```
We can try taking the genes with the highest variation...
```
genes_highest_var = processed_df.var().sort_values()[:100].index
X, y = processed_df[genes_highest_var], full_df['tumor_stage']
rf = RandomForestClassifier()
params = {'n_estimators': [10,100,200],
'min_samples_split': [4, 8],
'min_samples_leaf': [1, 3, 6],
'min_weight_fraction_leaf': [0.0],
'max_features': ['auto'],
'max_leaf_nodes': [None],
}
clf = GridSearchCV(rf, params)
clf.fit(X, y)
clf.best_score_
clf.best_params_
test_df = full_df.copy()
test_df['tumor_stage'] = test_df['tumor_stage'].str.replace('iv', '4') \
.str.replace('iii', '3') \
.str.replace('ii', '2') \
.str.replace('i', '1') \
.astype(int)
test_df['tumor_stage']
test_df['tumor_stage'].value_counts()
X, y = test_df.drop('tumor_stage', axis=1)[common_cols], test_df['tumor_stage']
rf = RandomForestClassifier()
params = {'n_estimators': [10, 50],
'min_samples_split': [4, 8],
'min_samples_leaf': [1, 3],
'min_weight_fraction_leaf': [0.0, 0.5],
'max_features': [100],
'max_leaf_nodes': [None, 5, 10],
}
clf = GridSearchCV(rf, params, cv=5)
clf.fit(X, y)
clf.best_score_
clf.best_params_
max_features = 100,
max_leaf_nodes = 5,
min_samples_leaf = 1,
min_samples_split = 4,
min_weight_fraction_leaf = 0.0,
n_estimators = 10
rf = RandomForestClassifier(max_features = 100,
max_leaf_nodes = 5,
min_samples_leaf = 1,
min_samples_split = 4,
min_weight_fraction_leaf = 0.0,
n_estimators = 10)
rf.fit(X, y)
rf.feature_importances_
importances = pd.DataFrame(data=rf.feature_importances_, index=X.columns, columns=['importance'])
important_genes = importances[importances['importance'] > 0].index
important_genes
X, y = test_df.drop('tumor_stage', axis=1)[important_genes], test_df['tumor_stage']
rf = RandomForestClassifier()
params = {'n_estimators': [10, 50, 100],
'min_samples_split': [4, 8, 12, 16],
'min_samples_leaf': [1, 3, 6],
'min_weight_fraction_leaf': [0.0, 0.5],
'max_features': [1, 20],
'max_leaf_nodes': [None, 5, 10],
}
clf = GridSearchCV(rf, params, cv=5)
clf.fit(X, y)
clf.best_score_
clf.best_params_
```
## Annotate genes with Hallmarks
```
from functools import partial
from collections import Counter
geneset_dict
def get_hallmark(gene, geneset_dict):
results = []
for k,v in geneset_dict.items():
if gene in v:
results.append(k)
return results
mapfunc = partial(get_hallmark, geneset_dict=geneset_dict)
repr_hallmarks = [i for j in list(map(mapfunc, important_genes)) for i in j]
Counter(repr_hallmarks)
```
It looks like Oxidative phosphorylation G2M checkpoints are the most predictive pathways of tumor stage. The unfolded protein response, E2F targets, MTORC1 signaling, IL2/Stat5 signaling, and cholesterol homeostasis were also represented by several genes
| github_jupyter |
# Lecture 6: Monty Hall, Simpson's Paradox
## Stat 110, Prof. Joe Blitzstein, Harvard University
----
## The Monty Hall Problem
In case you did not grow up watching way too much daytime television in America during the 70's and early 80's, here is [Monty Hall on YouTube talking about the background of this math problem involving his popular game show, Let's Make A Deal](https://www.youtube.com/watch?v=c1BSkquWkDo).
* There are three doors.
* A car is behind one of the doors.
* The other two doors have goats behind them.
* You choose a door, but before you see what's behind your choice, Monty opens one of the other doors to reveal a goat.
* Monty offers you the chance to switch doors.
_Should you switch?_
### Defining the problem
Let $S$ be the event of winning when you switch.
Let $D_j$ be the event of the car being behind door $j$.
### Solving with a probability tree

With a probability tree, it is easy to represent the case where you condition on Monty opening door 2. Given that you initially choose door 1, you can quickly see that if you stick with door 1, you have a $\frac{1}{3}~$ chance of winning.
You have a $\frac{2}{3}~$ chance of winning if you switch.
### Solving with the Law of Total Probability
This is even easier to solve using the Law of Total Probability.
\begin{align}
P(S) &= P(S|D_1)P(D_1) + P(S|D_2)P(D_2) + P(S|D_3)P(D_3) \\
&= 0 \frac{1}{3} + 1 \frac{1}{3} + 1 \frac{1}{3} \\
&= \frac{2}{3}
\end{align}
### A more general solution
Let $n = 7$ be the number of doors in the game.
Let $m=3$ be the number of doors with goats that Monty opens after you select your initial door choice.
Let $S$ be the event where you win _by sticking with your original door choice of door 1_.
Let $C_j$ be the event that the car is actually behind door $j$.
Conditioning only on which door has the car, we have
\begin{align}
& &P(S) &= P(S|C_1)P(C_1) + \dots + P(S|C_n)P(C_n) & &\text{Law of Total Probability} \\
& & &= P(C_1) \\
& & &= \frac{1}{7} \\
\end{align}
Let $M_{i,j,k}$ be the event that Monty opens doors $i,j,k$. Conditioning on Monty opening up doors $i,j,k$, we have
\begin{align}
& &P(S) &= \sum_{i,j,k} P(S|M_{i,j,k})P(M_{i,j,k}) & &\text{summed over all i, j, k with } 2 \le i \lt j \lt k \le 7 \\
\\
& &\Rightarrow P(S|M_{i,j,k}) &= P(S) & &\text{by symmetry} \\
& & &=\frac{1}{7}
\end{align}
Note that we can now generalize this to the case where:
* there are $n \ge 3$ doors
* after you choose a door, Monty opens $m$ of the remaining doors $n-1$ doors to reveal a goat (with $1 \le m \le n-m-2$)
The probability of winning with the strategy of _sticking to your initial choice_ is $\frac{1}{n}$, whether __unconditional or conditioning on the doors Monty opens__.
After Monty opens $m$ doors, each of the remaining $n-m-1$ doors has __conditional__ probability of $\left(\frac{n-1}{n-m-1}\right) \left(\frac{1}{n}\right)$.
Since $\frac{1}{n} \lt \left(\frac{n-1}{n-m-1}\right) \left(\frac{1}{n}\right)$, you will always have a greater chance of winning if you switch.
----
## Simpson's Paradox
_Is it possible for a certain set of events to be more (or less) probable than another without conditioning, and then be less (or more) probable with conditioning?_

Assume that we have the above rates of success/failure for Drs. [Hibbert](http://simpsons.wikia.com/wiki/Julius_Hibbert) and [Nick](http://simpsons.wikia.com/wiki/Nick_Riviera) for two types of surgery: heart surgery and band-aid removal.
### Defining the problem
Let $A$ be the event of a successful operation.
Let $B$ be the event of treatment by Dr. Nick.
Let $C$ be the event of heart surgery.
\begin{align}
P(A|B,C) &< P(A|B^c,C) & &\text{Dr. Nick is not as skilled as Dr. Hibbert in heart surgery} \\
P(A|B,C^c) &< P(A|B^c,C^c) & &\text{neither is he all that good at band-aid removal} \\
\end{align}
And yet $P(A|B) > P(A|B^c)$?
### Explaining with the Law of Total Probability
To explain this paradox, let's try to use the Law of Total Probability.
\begin{align}
P(A|B) &= P(A|B,C)P(C|B) + P(A|B,C^c)P(C^c|B) \\
\\
\text{but } P(A|B,C) &< P(A|B^c,C) \\
\text{and } P(A|B,C^c) &< P(A|B^c,C^c)
\end{align}
Look at $P(C|B$ and $P(C|B^c)$. These weights are what makes this paradox possible, as they are what make the inequality relation sign flip.
Event $C$ is a case of __confounding__
### Another example
_Is it possible to have events $A_1, A_2, B, C$ such that_
\begin{align}
P(A_1|B) &\gt P(A_1|C) \text{ and } P(A_2|B) \gt P(A_2|C) & &\text{ ... yet...} \\
P(A_1 \cup A_2|B) &\lt P(A_1 \cup A_2|C)
\end{align}
Yes, and this is just another case of Simpson's Paradox.
Note that
\begin{align}
P(A_1 \cup A_2|B) &= P(A_1|B) + P(A_2|B) - P(A_1 \cap A_2|B)
\end{align}
So this is _not_ possible if $A_1$ and $A_2$ are disjoint and $P(A_1 \cup A_2|B) = P(A_1|B) + P(A_2|B)$.
It is crucial, therefore, to consider the _intersection_ $P(A_1 \cap A_2|B)$, so let's look at the following example where $P(A_1 \cap A_2|B) \gg P(A_1 \cap A_2|C)$ in order to offset the other inequalities.
Consider two basketball players each shooting a pair of free throws.
Let $A_j$ be the event basketball free throw scores on the $j^{th}$ try.
Player $B$ always either makes both $P(A_1 \cap A_2|B) = 0.8$, or misses both.
\begin{align}
P(A_1|B) = P(A_2|B) = P(A_1 \cap A_2|B) = P(A_1 \cup A_2|B) = 0.8
\end{align}
Player $C$ makes free throw shots with probability $P(A_j|C) = 0.7$, independently, so we have
\begin{align}
P(A_1|C) &= P(A_2|C) = 0.7 \\
P(A_1 \cap A_2|C) &= P(A_1|C) P(A_2|C) = 0.49 \\
P(A_1 \cup A_2|C) &= P(A_1|C) + P(A_2|C) - P(A_1 \cap A_2|C) \\
&= 2 \times 0.7 - 0.49 \\
&= 0.91
\end{align}
And so we have our case where
\begin{align}
P(A_1|B) = 0.8 &\gt P(A_1|C) = 0.7 \\
P(A_2|B) = 0.8 &\gt P(A_2|C) = 0.7 \\
\\
\text{ ... and yet... } \\
\\
P(A_1 \cup A_2|B) &\lt P(A_1 \cup A_2|C) ~~~~ \blacksquare
\end{align}
----
View [Lecture 6: Monty Hall, Simpson's Paradox | Statistics 110](http://bit.ly/2nPjJ7S) on YouTube.
| github_jupyter |
# Quantization of Signals
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).*
## Requantization of a Speech Signal
The following example illustrates the requantization of a speech signal. The signal was originally recorded with a wordlength of $w=16$ bits. It is requantized with a [uniform mid-tread quantizer](linear_uniform_characteristic.ipynb#Mid-Tread-Chacteristic-Curve) to various wordlengths. The SNR is computed and a portion of the (quantized) signal is plotted. It is further possible to listen to the requantized signal and the quantization error.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
idx = 130000 # index to start plotting
def uniform_midtread_quantizer(x, w):
# quantization step
Q = 1/(2**(w-1))
# limiter
x = np.copy(x)
idx = np.where(x <= -1)
x[idx] = -1
idx = np.where(x > 1 - Q)
x[idx] = 1 - Q
# linear uniform quantization
xQ = Q * np.floor(x/Q + 1/2)
return xQ
def evaluate_requantization(x, xQ):
e = xQ - x
# SNR
SNR = 10*np.log10(np.var(x)/np.var(e))
print('SNR: %f dB'%SNR)
# plot signals
plt.figure(figsize=(10, 4))
plt.plot(x[idx:idx+100], label=r'signal $x[k]$')
plt.plot(xQ[idx:idx+100], label=r'requantized signal $x_Q[k]$')
plt.plot(e[idx:idx+100], label=r'quantization error $e[k]$')
plt.xlabel(r'$k$')
plt.legend()
# normalize error
e = .2 * e / np.max(np.abs(e))
return e
# load speech sample
x, fs = sf.read('../data/speech.wav')
x = x/np.max(np.abs(x))
```
Original Signal
<audio src="../data/speech.wav" controls>Your browser does not support the audio element.</audio>
[../data/speech.wav](../data/speech.wav)
### Requantization to 8 bit
```
xQ = uniform_midtread_quantizer(x, 8)
e = evaluate_requantization(x, xQ)
sf.write('speech_8bit.wav', xQ, fs)
sf.write('speech_8bit_error.wav', e, fs)
```
Requantized Signal
<audio src="speech_8bit.wav" controls>Your browser does not support the audio element.</audio>
[speech_8bit.wav](speech_8bit.wav)
Quantization Error
<audio src="speech_8bit_error.wav" controls>Your browser does not support the audio element.</audio>
[speech_8bit_error.wav](speech_8bit_error.wav)
### Requantization to 6 bit
```
xQ = uniform_midtread_quantizer(x, 6)
e = evaluate_requantization(x, xQ)
sf.write('speech_6bit.wav', xQ, fs)
sf.write('speech_6bit_error.wav', e, fs)
```
Requantized Signal
<audio src="speech_6bit.wav" controls>Your browser does not support the audio element.</audio>
[speech_6bit.wav](speech_6bit.wav)
Quantization Error
<audio src="speech_6bit_error.wav" controls>Your browser does not support the audio element.</audio>
[speech_6bit_error.wav](speech_6bit_error.wav)
### Requantization to 4 bit
```
xQ = uniform_midtread_quantizer(x, 4)
e = evaluate_requantization(x, xQ)
sf.write('speech_4bit.wav', xQ, fs)
sf.write('speech_4bit_error.wav', e, fs)
```
Requantized Signal
<audio src="speech_4bit.wav" controls>Your browser does not support the audio element.</audio>
[speech_4bit.wav](speech_4bit.wav)
Quantization Error
<audio src="speech_4bit_error.wav" controls>Your browser does not support the audio element.</audio>
[speech_4bit_error.wav](speech_4bit_error.wav)
### Requantization to 2 bit
```
xQ = uniform_midtread_quantizer(x, 2)
e = evaluate_requantization(x, xQ)
sf.write('speech_2bit.wav', xQ, fs)
sf.write('speech_2bit_error.wav', e, fs)
```
Requantized Signal
<audio src="speech_2bit.wav" controls>Your browser does not support the audio element.</audio>
[speech_2bit.wav](speech_2bit.wav)
Quantization Error
<audio src="speech_2bit_error.wav" controls>Your browser does not support the audio element.</audio>
[speech_2bit_error.wav](speech_2bit_error.wav)
**Copyright**
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2017*.
| github_jupyter |
```
### instructions:
# 0.
# update fork from upstream:
# git fetch upstream
# git checkout master
# git rebase upstream/master
# git push origin master
# 1.
# * copy-paste ipythonwidget.py
# 2. for debug:
# * comment self._init_static()
# 3. for release check:
# * uncomment self._init_static()
# * replace
# return os.path.join(os.path.dirname(__file__), file_name)
# with
# return os.path.join(os.path.dirname('__your_local_repo_dir__/catboost/python-package/catboost/widget/'), file_name)
import os
import time
import json
from threading import Thread
from IPython.core.display import display, HTML
from traitlets import Unicode, Dict, default
from ipywidgets import DOMWidget, Layout, widget_serialization
class MetricVisualizer(DOMWidget):
_view_name = Unicode('CatboostIpythonWidgetView').tag(sync=True)
_view_module = Unicode('catboost_module').tag(sync=True)
data = Dict({}).tag(sync=True, **widget_serialization)
def __init__(self, train_dirs, subdirs=False):
super(self.__class__, self).__init__()
if isinstance(train_dirs, str):
train_dirs = [train_dirs]
if subdirs:
train_subdirs = []
for train_dir in train_dirs:
train_subdirs.extend(self._get_subdirectories(train_dir))
train_dirs = train_subdirs
self._train_dirs = train_dirs[:]
self._names = []
curdir = os.path.abspath(os.path.curdir)
for train_dir in train_dirs:
abspath = os.path.abspath(train_dir)
self._names.append(os.path.basename(abspath) if abspath != curdir else 'current')
@default('layout')
def _default_layout(self):
return Layout(height='500px', align_self='stretch')
def start(self):
# wait for start train (meta.tsv)
# self._init_static()
time.sleep(1.0)
self._update_data()
display(self)
while self._need_update:
self._update_data()
time.sleep(2.0)
def _run_update(self):
thread = Thread(target=self.start, args=())
thread.start()
def _get_subdirectories(self, a_dir):
return [os.path.join(a_dir, name) for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
def _update_data(self):
data = {}
need_update = False
dirs = [{'name': name, 'path': path} for name, path in zip(self._names, self._train_dirs)]
for dir_info in dirs:
path = dir_info.get('path')
content = self._update_data_from_dir(path)
if not content:
continue
data[path] = {
'path': path,
'name': dir_info.get('name'),
'content': content
}
if not need_update:
need_update = data[path]['content']['passed_iterations'] + 1 < data[path]['content']['total_iterations']
self.data = data
self._need_update = need_update
def _update_data_from_dir(self, path):
data = {
'iterations': {},
'meta': {}
}
training_json = os.path.join(path, 'catboost_training.json')
if os.path.isfile(training_json):
with open(training_json, 'r') as json_data:
training_data = json.load(json_data)
data['meta'] = training_data['meta']
data['iterations'] = training_data['iterations']
else:
return None
return {
'passed_iterations': data['iterations'][-1]['iteration'] if data['iterations'] else 0,
'total_iterations': data['meta']['iteration_count'],
'data': data
}
@staticmethod
def _get_static_path(file_name):
return os.path.join(os.path.dirname('/Users/ivan-karev/github/catboost/catboost/python-package/catboost/widget/'), file_name)
def _init_static(self):
with open(self._get_static_path('CatboostIpython.css')) as f:
css = f.read()
js = ''
# never use require in your projects
js += 'window.__define = window.define;window.__require = window.require;window.define = undefined;window.require = undefined;'
with open(self._get_static_path('plotly-basic.min.js')) as f:
js += f.read()
js += 'window.define = window.__define;window.require = window.__require;window.__define = undefined; window.__require = undefined;'
with open(self._get_static_path('CatboostIpythonPlotly.js')) as f:
js += f.read()
with open(self._get_static_path('CatboostIpythonInit.js')) as f:
js += f.read()
html = """
<style>
{}
</style>
<script>
{}
</script>
""".format(css, js)
display(HTML(html))
%%javascript
// http://stackoverflow.com/questions/32170197/how-do-can-i-use-a-custom-js-file-under-jupyter-notebook
// instructions:
// 1.
// copy-paste CatboostIpythonInit.js
// 2.
// replace
// var debug = false;
// with
// var debug = true;
// 3. setup local notebook:
/*
mkdir ~/.jupyter/custom
cd ~/.jupyter/custom
ln -s __your_local_repo_dir__/catboost/python-package/catboost/widget/CatboostIpythonPlotly.js CatboostIpythonPlotly.js
ln -s __your_local_repo_dir__/catboost/python-package/catboost/widget/CatboostIpython.css CatboostIpython.css
ln -s __your_local_repo_dir__/catboost/python-package/catboost/widget/plotly-basic.min.js plotly-basic.min.js
*/
var debug = true;
if (debug) {
require.config({
shim:{
"custom/CatboostIpythonPlotly":{
deps:["custom/plotly-basic.min"]
}
}
})
require.undef('catboost_module');
require.undef('custom/CatboostIpythonPlotly');
}
var moduleBase = '@jupyter-widgets/base';
var modules = [moduleBase];
if (debug) {
modules.push('custom/CatboostIpythonPlotly');
}
define('catboost_module', modules, function(widgets) {
var getInstance = function(el) {
var id = $(el).attr('catboost-id');
if (!id) {
return null;
}
id = id.replace('catboost_', '');
if (!window.catboostIpythonInstances[id]) {
return null;
}
return window.catboostIpythonInstances[id];
},
addInstance = function(el) {
$(el).attr('catboost-id', 'catboost_' + window.catboostIpythonIndex);
var catboostIpython = new CatboostIpython();
catboostIpython.index = window.catboostIpythonIndex;
catboostIpython.plotly = window.Plotly;
if (debug) {
catboostIpython.loadStyles('/custom/CatboostIpython.css', function(){catboostIpython.resizeCharts();})
}
window.catboostIpythonInstances[window.catboostIpythonIndex] = catboostIpython;
window.catboostIpythonIndex++;
return catboostIpython;
};
var CatboostIpythonWidget = widgets.DOMWidgetView.extend({
initialize: function() {
CatboostIpythonWidget.__super__.initialize.apply(this, arguments);
if (!window.catboostIpythonInstances) {
window.catboostIpythonInstances = {};
}
if (typeof window.catboostIpythonIndex === 'undefined') {
window.catboostIpythonIndex = 0;
}
var catboostIpythonInstance = getInstance(this.el);
if (!catboostIpythonInstance) {
catboostIpythonInstance = addInstance(this.el);
}
catboostIpythonInstance.init();
},
render: function() {
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
update: function() {
this.value_changed();
},
value_changed: function() {
this.el.style['width'] = this.model.get('width');
this.el.style['height'] = this.model.get('height');
this.displayed.then(_.bind(this.render_charts, this));
},
process_all: function(parent, params) {
var data = params.data;
for (var path in data) {
if (data.hasOwnProperty(path)) {
this.process_row(parent, data[path])
}
}
},
process_row: function(parent, data) {
var catboostIpython = getInstance(parent),
path = data.path,
content = data.content,
items = content.data.iterations,
firstIndex = 0,
chunks = [];
if (!items || !items.length) {
return;
}
if (!catboostIpython.lastIndex) {
catboostIpython.lastIndex = {}
}
if (catboostIpython.lastIndex[path]) {
firstIndex = catboostIpython.lastIndex[path] + 1;
}
catboostIpython.lastIndex[path] = items.length - 1;
for (var i = firstIndex; i < items.length; i++) {
chunks.push(items[i]);
}
catboostIpython.addMeta(data.path, content.data.meta);
catboostIpython.addPoints(parent, {
chunks: chunks,
train: data.name,
path: data.path
});
},
render_charts: function () {
this.process_all(this.el, {
data: this.model.get('data')
});
return this;
}
});
return {
CatboostIpythonWidgetView: CatboostIpythonWidget
};
});
w = MetricVisualizer('accurancy')
w.start()
w = MetricVisualizer('new_cv_subdirs', subdirs = True)
w.start()
w = MetricVisualizer('new_cv_1')
w.start()
w = MetricVisualizer('new_cv_2')
w.start()
w = MetricVisualizer('new_cv_3')
w.start()
w = MetricVisualizer('new_train')
w.start()
w = MetricVisualizer('new_train_with_nan')
w.start()
w = MetricVisualizer('new_train_best')
w.start()
w = MetricVisualizer('new_eval')
w.start()
w = MetricVisualizer('subdirs', subdirs = True)
w.start()
```
| github_jupyter |
# Predicting Red Wine Quality Using Regression Techniques
## Introduction
This notebook describes how wine quality can be predicted using Machine Learning regression techniques.
[Wine Quality Data Set](https://archive.ics.uci.edu/ml/datasets/wine+quality) from [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php) is used to build various regression models to predict the quality of a wine.
## Importing the libraries
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
```
## Importing the dataset
Red wine data is available in file './data/winequality-red.csv' and the columns are separated by ';' (semicolon). Let us use [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) method to read the file into a DataFrame.
```
dataset = pd.read_csv('./data/winequality-red.csv', sep=';')
```
## Exploratory Data Analysis (EDA)
Let us understand the data first and try to gather as many insights from it.
### DataFrame summary
To start with, let us print a concise summary of the DataFrame using the [info()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.info.html) method.
```
dataset.info()
```
From the above output we can understand that:
- There are 1599 rows and 12 columns.
- Data has only float and integer values.
- Data looks very clean and there are no missing values.
### A closer look at the Data
Let us check the first five rows of the dataset.
```
dataset.head(5)
```
By looking at the above output we can undertand that first 11 columns are input variables and the last coumn (quality) is the output variable. We will be using various regression techniques to predict the value of the quality column.
### Correlation matrix
Let us see the correlations between the variables we have. This will help us to get a much better understanding of the relationships between the variables.
```
corr = dataset.corr()
plt.subplots(figsize=(12,8))
sns.heatmap(corr, xticklabels=corr.columns,
yticklabels=corr.columns, annot=True,
cmap=sns.diverging_palette(220, 20, as_cmap=True))
```
From above plot we can see that some variables are strongly correlated to quality. Probably, these variables may become important features in machine learning model we will create later.
### Distribution of the quality variable
Let us have a look at the distribution of the quality variable we are trying to predict.
```
print(dataset['quality'].value_counts())
sns.displot(dataset, x="quality", discrete=True, shrink=.8)
```
From the above results we can understand that quality variable has discrete values ranging from 3 to 8. And number of poor (quality <= 4) quality or excellent(quality >= 7) wines are much lesser than normal wines. This indicates that the dataset is imbalanced.
### Insights after EDA
- There are 1599 rows (observations) and 12 columns (11 + output attribute).
- Data has only float and integer values.
- Data looks very clean and there are no missing values.
- Some variables are strongly correlated to quality.
- Quality variable has discrete values ranging from 3 to 8.
- The dataset is unbalanced. Number of normal wines is much higher than excellent or poor ones.
## ML Modeling
Let us split the dataset into the training and test sets and then apply different regression algorithms to see which algorithm gives better accuracy.
### Splitting the dataset into the Training set and Test set
```
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import confusion_matrix
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
```
### Model 1: Linear Regression
```
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = np.round(regressor.predict(X_test))
print('Linear Regression RMSE: ' + str(np.sqrt(mean_squared_error(y_test, y_pred))))
```
### Model 2: Polynomial Regression
```
from sklearn.preprocessing import PolynomialFeatures
poly_feat = PolynomialFeatures(degree = 2)
X_poly = poly_feat.fit_transform(X_train)
regressor = LinearRegression()
regressor.fit(X_poly, y_train)
y_pred = np.round(regressor.predict(poly_feat.transform(X_test)))
print('Polynomial Regression RMSE: ' + str(np.sqrt(mean_squared_error(y_test, y_pred))))
```
### Model 3: Support Vector Regression
```
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
sc_X = StandardScaler()
sc_y = StandardScaler()
X_train_sc = sc_X.fit_transform(X_train)
y_train_sc = sc_y.fit_transform(y_train.reshape(-1, 1))
regressor = SVR(kernel = 'rbf')
regressor.fit(X_train_sc, np.ravel(y_train_sc,order='C'))
y_pred = np.round(sc_y.inverse_transform(regressor.predict(sc_X.transform(X_test))))
print('SVR RMSE: ' + str(np.sqrt(mean_squared_error(y_test, y_pred))))
```
### Model 4: Decision Tree Regression
```
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X_train, y_train)
y_pred = np.round(regressor.predict(X_test))
print('Decision Tree Regressor RMSE: ' + str(np.sqrt(mean_squared_error(y_test, y_pred))))
```
### Model 5: Random Forest Regression
```
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor.fit(X_train, np.ravel(y_train,order='C'))
y_pred = np.round(regressor.predict(X_test))
print('Random Forest Regressor RMSE: ' + str(np.sqrt(mean_squared_error(y_test, y_pred))))
```
### Model Evaluation
From the above results we can see that Random Forest Regression model has the lowest RMSE (.6349) and hence it is the best model.
## Conclusion
Even though we were able to predict the quality of wines with reasonable accuracy, the dataset, anaylysis & modeling techniques we used have some limitations.
First the data set was unbalanced - Number of normal wines is much higher than excellent or poor ones. These values made it harder to identify different variables influencing “high” or “low” quality of a wine. In order to improve our predictive model, we need more balanced data.
In future, we can try better feature engineering techniques and other machine learning algorithms for getting better results.
| github_jupyter |
```
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow as tf
import keras
from keras.datasets import fashion_mnist, cifar10
from keras.layers import Dense, Flatten, Normalization, Dropout, Conv2D, MaxPooling2D, RandomFlip, RandomRotation, RandomZoom, BatchNormalization, Activation, InputLayer
from keras.models import Sequential
from keras.losses import SparseCategoricalCrossentropy, CategoricalCrossentropy
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras import utils
import os
from keras.preprocessing.image import ImageDataGenerator
import matplotlib as mpl
import matplotlib.pyplot as plt
import datetime
```
# Transfer Learning
### Feature Extraction and Classification
One of the key concepts needed with transfer learning is the separating of the feature extraction from the convolutional layers and the classification done in the fully connected layers.
<ul>
<li> The convolutional layer finds features in the image. I.e. the output of the end of the convolutional layers is a set of image-y features.
<li> The fully connected layers take those features and classify the thing.
</ul>
The idea behind this is that we allow someone (like Google) to train their fancy network on a bunch of fast computers, using millions and millions of images. These classifiers get very good at extracting features from objects.
When using these models we take those convolutional layers and slap on our own classifier at the end, so the pretrained convolutional layers extract a bunch of features with their massive amount of training, then we use those features to predict our data!
```
epochs = 10
acc = keras.metrics.CategoricalAccuracy(name="accuracy")
pre = keras.metrics.Precision(name="precision")
rec = keras.metrics.Recall(name="recall")
metric_list = [acc, pre, rec]
```
### Download Model
There are several models that are pretrained and available to us to use. VGG16 is one developed to do image recognition, the name stands for "Visual Geometry Group" - a group of researchers at the University of Oxford who developed it, and ‘16’ implies that this architecture has 16 layers. The model got ~93% on the ImageNet test that we mentioned a couple of weeks ago.

#### Slide Convolutional Layers from Classifier
When downloading the model we specifiy that we don't want the top - that's the classification part. When we remove the top we also allow the model to adapt to the shape of our images, so we specify the input size as well.
```
from keras.applications.vgg16 import VGG16
from keras.layers import Input
from keras.models import Model
from keras.applications.vgg16 import preprocess_input
```
### Preprocessing Data
Our VGG 16 model comes with a preprocessing function to prepare the data in a way it is happy with. For this model the color encoding that it was trained on is different, so we should prepare the data properly to get good results.
```
import pathlib
import PIL
from keras.applications.vgg16 import preprocess_input
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file(origin=dataset_url,
fname='flower_photos',
untar=True)
data_dir = pathlib.Path(data_dir)
#Flowers
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
def preprocess(images, labels):
return tf.keras.applications.vgg16.preprocess_input(images), labels
train_ds = train_ds.map(preprocess)
val_ds = val_ds.map(preprocess)
```
#### Add on New Classifier
If we look at the previous summary of the model we can see that the last layer we have is a MaxPool layer. When making our own CNN this is the last layer before we add in the "normal" stuff for making predictions, this is the same.
We need to flatten the data, then use dense layers and an output layer to classify the predictions.
We end up with the pretrained parts finding features in images, and the custom part classifying images based on those features.
### Make Model
We take the model without the top, set the input image size, and then add our own classifier. Loading the model is simple, there are just a few things to specify:
<ul>
<li> weights="imagenet" - tells the model to use the weights from its imagenet training. This is what brings the "smarts", so we want it.
<li> include_top=False - tells the model to not bring over the classifier bits that we wnat to replace.
<li> input_shape - the model is trained on specific data sizes (224x224x3). We can repurpose it by changing the input size.
</ul>
We also set the VGG model that we download to be not trainable. We don't want to overwrite all of the training that already exists, coming from the original training. What we want to be trained are the final dense parts we added on to classify our specific scenario.
```
## Loading VGG16 model
from keras.applications.vgg16 import preprocess_input
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(180,180,3))
base_model.trainable = False ## Not trainable weights
# Add Dense Stuff
flatten_layer = Flatten()
dense_layer_1 = Dense(50, activation='relu')
dense_layer_2 = Dense(20, activation='relu')
prediction_layer = Dense(5)
model = Sequential([
base_model,
flatten_layer,
dense_layer_1,
dense_layer_2,
prediction_layer
])
model.summary()
```
#### Compile and Train
```
# Model
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="adam",
metrics=keras.metrics.SparseCategoricalAccuracy(name="accuracy"))
log_dir = "logs/fit/VGG" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
callback = EarlyStopping(monitor='loss', patience=3, restore_best_weights=True)
model.fit(train_ds,
epochs=epochs,
verbose=1,
validation_data=val_ds,
callbacks=[tensorboard_callback, callback])
```
### Fine Tune Models
Lastly, we can adapt the entire model to our data. We'll unfreeze the original model, and then train the model. The key addition here is that we set the learning rate to be extremely low (here it is 2 orders of magnitude smaller than the default) so the model doesn't totally rewrite all of the weights while training, rather it will only change a little bit - fine tuning its predictions to the actual data!
The end result is a model that can take advantage of all of the training that the original model received before we downloaded it. That ability of extracting features from images is then reapplied to our data for making predictions based on the features identified in the original model. Finally we take the entire model and just gently train it to be a little more suited to our data. The best of all worlds!
```
#Save a copy of the above model for next test.
copy_model = model
base_model.trainable = True
model.summary()
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5), # Low learning rate
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=keras.metrics.SparseCategoricalAccuracy(name="accuracy")
)
model.fit(train_ds, epochs=epochs, validation_data=val_ds)
```
Yay, that's probably pretty accurate!
### More Retraining
If we are extra ambitious we can also potentially slice the model even deeper, and take smaller portions to mix with our own models.
The farther "into" the model you slice, the more of the original training will be removed and the more the model will learn from our training data. If done, this is a balancing act - we want to keep all of the smarts that the model has gotten from the original training, while getting the benefits of adaptation to our data.
```
## Loading VGG16 model
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(180,180,3))
#base_model.trainable = False ## Not trainable weights
base_model.summary()
for layer in base_model.layers[:12]:
layer.trainable = False
base_model.summary()
```
Now we have larger portions of the model that can be trained. We will be losing some of the pretrained knowldge, replacing it with the training coming from our data.
```
# Add Dense Stuff
flatten_layer = Flatten()
dense_layer_1 = Dense(50, activation='relu')
dense_layer_2 = Dense(20, activation='relu')
prediction_layer = Dense(5)
model = Sequential([
base_model,
flatten_layer,
dense_layer_1,
dense_layer_2,
prediction_layer
])
model.summary()
# Model
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="adam",
metrics=keras.metrics.SparseCategoricalAccuracy(name="accuracy"))
log_dir = "logs/fit/VGG" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
callback = EarlyStopping(monitor='loss', patience=3, restore_best_weights=True)
model.fit(train_ds,
epochs=epochs,
verbose=1,
validation_data=val_ds,
callbacks=[tensorboard_callback, callback])
```
## Exercise - ResNet50
This is another pretrained network, containing 50 layers. We can use this one similarly to the last.
```
def preprocess50(images, labels):
return tf.keras.applications.resnet50.preprocess_input(images), labels
train_ds = train_ds.map(preprocess50)
val_ds = val_ds.map(preprocess50)
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
# Model Stuff...
# Fine Tune
```
| github_jupyter |
> Code to accompany **Chapter 6: Methods for Generating Adversarial Perturbation**
# Fashion-MNIST Foolbox Boundary Attack
The boundary attack iteratively refines the adversarial perturbation based on query responses from the DNN model. It is a black-box attack requiring no knowledge of the model architecture or its parameters.
See chapter 6 for a detailed description of this attack.
The boundary attack was proposed by Brendel et al. in ["Decision-Based Adversarial Attacks: Reliable Attacks Against Black-Box Machine Learning Models", Proceedings of the International Conference on Learning Representations (2018)](https://arxiv.org/abs/1712.04248).
## Load in the model and its predictions
Import the key libraries.
```
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
```
Load in the Fashion-MNIST model and get its predictions.
```
fashion_mnist = keras.datasets.fashion_mnist
_, (test_images, test_labels) = fashion_mnist.load_data()
test_images = test_images/255.0
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
model = tf.keras.models.load_model('../models/fashionMNIST.h5')
predictions = model.predict(test_images)
```
## Select the original (non-adversarial) image
The boundary attack begins with the target classification image rather than the original image. It iteratively moves the image closer to the original, while never allowing each iterative step to alter the adversarial classification by straying across the classification boundary.
First, we select and display the original image - this is the one that we want the adversarial image to look like.
```
original_image_num = 9
x = test_images[original_image_num]
y = np.argmax(predictions[original_image_num])
y_name = class_names[y]
print("Prediction for original image:", y, y_name)
plt.imshow(x, cmap=plt.cm.binary)
```
Now, let's get a target image. This gives us our target adversarial classification.
```
starting_point_image_num = 52
starting_point_image = test_images[starting_point_image_num]
y_adv = np.argmax(predictions[starting_point_image_num])
y_adv_name = class_names[y_adv]
print("Prediction for starting point image:", y_adv, y_adv_name)
import matplotlib.pyplot as plt
plt.imshow(starting_point_image, cmap=plt.cm.binary)
```
## Get a Foolbox model from our Keras one¶
Create a Foolbox model from our Keras one.
```
import foolbox
fmodel = foolbox.models.TensorFlowModel.from_keras(model, bounds=(0, 1))
```
## Define the attack specificity
[foolbox.criteria](https://foolbox.readthedocs.io/en/latest/modules/criteria.html) defines the attach specificity (targeted versus untargeted) and the probability threshold required. In this case, we specify a target attack and the target class.
```
attack_criterion = foolbox.criteria.TargetClass(y_adv)
```
## Define the attack method
```
attack = foolbox.attacks.BoundaryAttack(fmodel, criterion=attack_criterion)
```
## Run the attack
Remember, we start from the target and gradually creap back to the original. We are making the target look more and more like the original, without every allowing it to take a step that will change its classification.
```
x_adv = attack(input_or_adv=x,
label = y,
starting_point = starting_point_image,
unpack = False,
iterations = 2000,
log_every_n_steps = 500)
```
## Display the results
If the attack was successful, the images should be different. Take a look to see whether this is the case.
```
if ((x_adv is None) or (np.array_equal(x, x_adv.image))):
print("No adversarial example was found")
else:
preds = model.predict(np.array([x_adv.image]))
plt.figure()
# Plot the original image
plt.subplot(1, 3, 1)
plt.title(y_name)
plt.imshow(x, cmap=plt.cm.binary)
plt.axis('off')
# Plot the adversarial image
plt.subplot(1, 3, 2)
plt.title(class_names[np.argmax(preds[0])])
plt.imshow(x_adv.image, cmap=plt.cm.binary)
plt.axis('off')
# Plot the difference
plt.subplot(1, 3, 3)
plt.title('Difference')
difference = x_adv.image - x
plt.imshow(difference, vmin=0, vmax=1, cmap=plt.cm.binary)
plt.axis('off')
plt.show()
```
## Create and view the intermediate perturbations
If you are interested in seeing the images
```
num_iterations = 12
perturbed_images = np.empty((num_iterations,28,28))
for iterations in range (num_iterations):
print(iterations)
x_adv = attack(input_or_adv = x,
label = y,
iterations = iterations*100,
starting_point = starting_point_image,
log_every_n_steps = 100)
perturbed_images[iterations] = x_adv
plt.figure(figsize=(15, 10))
perturbed_predictions = model.predict(perturbed_images)
for i in range(num_iterations):
plt.subplot(3,4,i+1)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(perturbed_images[i], cmap=plt.cm.binary)
predicted_label = np.argmax(perturbed_predictions[i]) # the label should always be the target
plt.xlabel("{},({:.2f})".format(class_names[predicted_label],
perturbed_predictions[i][predicted_label]))
```
## Where next?
Take a look at other attacks. For example:
* The Foolbox Gradient Attack (white box). Here's the [Jupyter notebook on GitHub](https://github.com/katywarr/strengthening-dnns/blob/master/chapter06/fashionMNIST_foolbox_gradient.ipynb) and here's the [relative path to the Jypyter notebook](./fashionMNIST_foolbox_gradient.ipynb) if you want to run it.
Want to move on to more complicated images? Try:
* The Foolbox saliency attack using the ResNet50 classifier. Here's the [Jupyter notebook on GitHub](https://github.com/katywarr/strengthening-dnns/blob/master/chapter06/resnet50_foolbox_saliency.ipynb) and here's the [relative path to the Jypyter notebook](./resnet50_foolbox_saliency.ipynb) if you want to run it.
There are many more examples of adversarial attacks online. See [RESOURCES.md](https://github.com/katywarr/strengthening-dnns/blob/master/RESOURCES.md) for links to collaborative projects for creating and defending against attacks.
| github_jupyter |
# ETL Pipeline Preparation
Follow the instructions below to help you create your ETL pipeline.
### 1. Import libraries and load datasets.
- Import Python libraries
- Load `messages.csv` into a dataframe and inspect the first few lines.
- Load `categories.csv` into a dataframe and inspect the first few lines.
```
# import libraries
import pandas as pd
from sqlalchemy import create_engine
# load messages dataset
messages = pd.read_csv('messages.csv')
messages.head()
# load categories dataset
categories = pd.read_csv('categories.csv')
categories.head()
```
### 2. Merge datasets.
- Merge the messages and categories datasets using the common id
- Assign this combined dataset to `df`, which will be cleaned in the following steps
```
# merge datasets
df = pd.merge(messages, categories, how='inner', on='id')
df.head()
```
### 3. Split `categories` into separate category columns.
- Split the values in the `categories` column on the `;` character so that each value becomes a separate column. You'll find [this method](https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.Series.str.split.html) very helpful! Make sure to set `expand=True`.
- Use the first row of categories dataframe to create column names for the categories data.
- Rename columns of `categories` with new column names.
```
# create a dataframe of the 36 individual category columns
categories = df.categories.str.split(';',expand=True)
categories.head()
# select the first row of the categories dataframe
row = categories.loc[0]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = list(row.apply(lambda x: x[:-2])) #[x[:-2] for x in row]
print(category_colnames)
# rename the columns of `categories`
categories.columns = category_colnames
categories.head()
```
### 4. Convert category values to just numbers 0 or 1.
- Iterate through the category columns in df to keep only the last character of each string (the 1 or 0). For example, `related-0` becomes `0`, `related-1` becomes `1`. Convert the string to a numeric value.
- You can perform [normal string actions on Pandas Series](https://pandas.pydata.org/pandas-docs/stable/text.html#indexing-with-str), like indexing, by including `.str` after the Series. You may need to first convert the Series to be of type string, which you can do with `astype(str)`.
```
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str[-1]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
categories.head()
```
### 5. Replace `categories` column in `df` with new category columns.
- Drop the categories column from the df dataframe since it is no longer needed.
- Concatenate df and categories data frames.
```
# drop the original categories column from `df`
df.drop(columns='categories', inplace=True)
df.head()
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df,categories], axis=1)
df.head()
print(categories.shape)
```
### 6. Remove duplicates.
- Check how many duplicates are in this dataset.
- Drop the duplicates.
- Confirm duplicates were removed.
```
# check number of duplicates
print(df.duplicated().sum())
# drop duplicates
df.drop_duplicates(inplace=True)
# check number of duplicates
print(df.duplicated().sum())
for col in categories.columns:
if df[(df[col]!=1) & (df[col]!=0)].shape[0]>0:
print(col, ":", df[(df[col]!=1) & (df[col]!=0)].shape)
for col in categories.columns:
df = df[(df[col]==1) | (df[col]==0)]
for col in categories.columns:
if df[(df[col]!=1) & (df[col]!=0)].shape[0]>0:
print(col, ":", df[(df[col]!=1) & (df[col]!=0)].shape)
```
### 7. Save the clean dataset into an sqlite database.
You can do this with pandas [`to_sql` method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html) combined with the SQLAlchemy library. Remember to import SQLAlchemy's `create_engine` in the first cell of this notebook to use it below.
```
engine = create_engine('sqlite:///InsertDatabaseName.db')
df.to_sql('InsertTableName', engine, index=False)
```
### 8. Use this notebook to complete `etl_pipeline.py`
Use the template file attached in the Resources folder to write a script that runs the steps above to create a database based on new datasets specified by the user. Alternatively, you can complete `etl_pipeline.py` in the classroom on the `Project Workspace IDE` coming later.
| github_jupyter |
## Coast Train metadata plots: class distribution
Daniel Buscombe, Marda Science contracted to U.S Geological Survey Pacific Coastal and Marine Science Center
January, 2022
These plots describe the Coast Train "v1" dataset
Import libraries
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
```
Read data
```
A = pd.read_csv('metadata/A_naip_meta_served.csv')
B = pd.read_csv('metadata/B_quads_gulf_meta_served.csv')
C = pd.read_csv('metadata/C_s2_meta_served.csv')
D = pd.read_csv('metadata/D_s2_4classes_meta_served.csv')
E = pd.read_csv('metadata/E_L8_meta_all_served.csv')
F = pd.read_csv('metadata/F_elwha_l8_served.csv')
G = pd.read_csv('metadata/G_madeira_meta_served.csv')
H = pd.read_csv('metadata/H_dauphin_meta_served.csv')
I = pd.read_csv('metadata/I_sandwich_metadata_served.csv')
J = pd.read_csv('metadata/J_naip_6class_meta_served.csv')
```
Define a list of dataset names
```
names = [
'A: NAIP',
'B: Quads',
'C: S2',
'D: S2-4class',
'E: Landsat-8',
'F: Landsat-8-Elwha',
'G: Madeira',
'H: Dauphin',
'I: Sandwich-8',
'I: Sandwich-11',
'J: NAIP-6class'
]
```
Cycle through each dataset and plot the number of images that contain each class
```
plt.rcParams["figure.autolayout"] = True
C_per_set = []
plt.figure(figsize=(16,18))
# plt.subplots_adjust(hspace=0.5)
for counter,(dat,name) in enumerate(zip([A,B,C,D,E,F,G,H,I[:99],I[99:],J], names)):
classes = dat['classes_array'].values
classes = [c.strip() for c in classes[0].split(',')]
all = dat['classes_present_integer'].values
classes_present = []
for tmp in all:
tmp = tmp.split(',')
classes_present.append(np.array(tmp, 'int'))
i = np.hstack(classes_present)
n_bins = dat['num_classes'].values[0]
bins = np.arange(n_bins+1)
C_per_set.append([classes[item] for item in i])
plt.subplot(6,2,counter+1)
v = [np.sum(i==n) for n in bins]
plt.bar(bins,v,width=1, edgecolor='white', linewidth=1)
#n, bins, patches = plt.hist(i, bins=n_bins, edgecolor='white', linewidth=1)
#ticks = [(patch._x0 + patch._x1)/2 for patch in patches]
#ticklabels = [i for i in range(1,n_bins+1)]
plt.xticks(bins[:-1], classes, rotation=45)
plt.ylabel('Number of Labeled Images')
plt.xlabel('Class Label')
plt.title(name.replace(':',')'), loc='left')
plt.savefig('Num_images_per_datarecord_containing_class.png',dpi=200, bbox_inches='tight')
# plt.show()
```
## Merge classes and enumerate merged classes
All the unique class labels in the dataset
```
uniq_classes = np.unique(np.hstack(C_per_set))
print(len(uniq_classes))
print(sorted(uniq_classes))
```
Define super classes and make a remapping to integer
```
# common_classes = ['water', 'whitewater']
super_classes = ['water', 'whitewater', 'sediment','developed', 'vegetation','natural_terrain', 'other']
super_integers = np.arange(len(super_classes))
remap_super = dict(zip(super_classes, super_integers))
remap_super
```
Define all aliases, one per unique class --> superclass
```
aliases = {
"water": "water",
"sediment_plume": "water",
"whitewater": "whitewater",
"surf":"whitewater",
"sediment": "sediment",
"sand":"sediment",
"gravel":"sediment",
"gravel_shell": "sediment",
"cobble_boulder": "sediment",
"mud_silt": "sediment",
"developed": "developed",
"dev":"developed",
"coastal_defense":"developed",
"pavement_road": "developed",
"other_anthro": "developed",
"vehicles": "developed",
"development":"developed",
"buildings":"developed",
"other_natural_terrain": "natural_terrain",
"other_bare_natural_terrain":"natural_terrain",
"bare_ground":"natural_terrain",
"bedrock":"natural_terrain",
"non-vegetated-wet":"natural_terrain",
"non-vegetated-dry":"natural_terrain",
"non_vegetated_dry":"natural_terrain",
"non_vegetated_wet":"natural_terrain",
"agricultural":"natural_terrain",
"bare_ground":"natural_terrain",
"vegetated":"vegetation",
"vegetated_surface":"vegetation",
"vegtated_ground":"vegetation",
"terrestrial_vegetation":"vegetation",
"marsh_vegetation":"vegetation",
"herbaceous vegetation":"vegetation",
"herbaceous_veg":"vegetation",
"woody vegetation":"vegetation",
"woody_veg":"vegetation",
"unknown":"other",
"unusual":"other",
"nodata":"other",
"people": "other",
"ice_snow": "other",
"cloud": "other",
"other":"other"
}
len(aliases)
# sorted(aliases.keys())
```
Plot number of images per recoded superclass per dataset
```
plt.rcParams["figure.autolayout"] = True
S_per_set = []
plt.figure(figsize=(16,18))
for counter,(dat,name) in enumerate(zip([A,B,C,D,E,F,G,H,I[:99],I[99:],J], names)):
classes = dat['classes_array'].values
classes = [c.strip() for c in classes[0].split(',')]
all = dat['classes_present_integer'].values
classes_present = []
for tmp in all:
tmp = tmp.split(',')
classes_present.append(np.array(tmp, 'int'))
i = np.hstack(classes_present)
classes_present_string = [classes[item].strip("'") for item in i]
recoded = [aliases[i] for i in classes_present_string]
recoded_integer = [remap_super[i] for i in recoded]
i = np.hstack(recoded_integer)
S_per_set.append(i)
n_bins = len(remap_super)
bins = np.arange(n_bins)#+1)
plt.subplot(6,2,counter+1)
# n, bins, patches = plt.hist(i, bins=n_bins, edgecolor='white', linewidth=1)
# ticks = [(patch._x0 + patch._x1)/2 for patch in patches]
# plt.xticks(ticks, super_classes, rotation=90)
v = [np.sum(i==n) for n in bins]
plt.bar(bins,v,width=1, edgecolor='white', linewidth=1)
plt.xticks(bins, super_classes, rotation=90)#[:-1]
plt.ylabel('Number of Labeled Images')
plt.xlabel('Super-class Label')
plt.title(name.replace(':',')'), loc='left')
plt.savefig('Num_images_per_datarecord_containing_superclass.png',dpi=200, bbox_inches='tight')
plt.close('all')
all_superclasses_summed = [np.sum(np.hstack(S_per_set)==i) for i in super_integers]
plt.bar(super_integers,all_superclasses_summed)
plt.xticks(super_integers, super_classes, rotation=90)
plt.xlabel('Super-class')
plt.ylabel('Number of images labeled')
plt.savefig('NumLabel_all_datarecords_per_superlabel.png',dpi=200, bbox_inches='tight')
plt.close('all')
```
| github_jupyter |
<img src="img/Act2_Pic01_Short.png">
<img src="img/Act2_Pic02_Alt.png">
### Setup
```
import arcpy
import arcgis
import pandas as pd
import os
gis = arcgis.gis.GIS(url="https://ndirt.maps.arcgis.com", username="ANieto_ndirt")
bridge_item = gis.content.search("DCVAMD_NBI_Bridges", item_type="feature service")[0]
bridge_item
deficient_bridge_item = gis.content.search("DCVAMD_CBSA_DeficientBridges", item_type="feature service")[0]
deficient_bridge_item
analyzed_bridges_item = gis.content.search("Bridges_Analyzed", item_type="feature service")[0]
analyzed_bridges_item
analyzed_bridges_lyr = analyzed_bridges_item.layers[0]
analyzed_bridges_lyr
analyzed_bridges_item.layers[0].query("1=1")
analyzed_bridges_top_ranked = analyzed_bridges_item.layers[0].query("cbsa_rank = 1")
analyzed_bridges_top_ranked_df = analyzed_bridges_top_ranked.df
analyzed_bridges_top_ranked_df
commute_df = pd.DataFrame.from_csv(r"D:\5_Data\Transportation\Transit\commute_table.csv")
tract_polys = gis.content.search("DCVAMD_CBSA_Tracts_Polygons", item_type="feature service")[0]
tract_points = gis.content.search("DCVAMD_CBSA_Tracts_Centroids", item_type="feature service")[1]
tract_polys
tract_points
route_items = gis.content.search("Test Routes", item_type="feature service")
origin_dest_points = route_items[0]
normal_route = route_items[2]
impaired_route = route_items[1]
normal_route
impaired_route
import time
def animate_layer_addition_to_map(map_widget, list_of_items, zoom_level, basemap='gray-vector'):
# The map widget
m = map_widget
m.basemap = basemap
# 1. Parse the find-routes analysis results
# Extract the output data from the analysis results
# Store the output points and lines in pandas dataframes
lines_df = result.output_routes.df
lines_fset = arcgis.features.FeatureSet.from_dataframe(lines_df)
# 2. Define the map symbology
# Allocation lines
allocation_line_symbol_1 = {'type': 'esriSLS', 'style': 'esriSLSSolid',
'color': [255,255,255,153], 'width': 0.7}
allocation_line_symbol_2 = {'type': 'esriSLS', 'style': 'esriSLSSolid',
'color': [0,255,197,39], 'width': 3}
allocation_line_symbol_3 = {'type': 'esriSLS', 'style': 'esriSLSSolid',
'color': [0,197,255,39], 'width': 5}
allocation_line_symbol_4 = {'type': 'esriSLS', 'style': 'esriSLSSolid',
'color': [0,92,230,39], 'width': 7}
time.sleep(1.5)
m.draw(shape=result.output_routes, symbol=allocation_line_symbol_4)
m.draw(shape=result.output_routes, symbol=allocation_line_symbol_2)
m.draw(shape=result.output_routes, symbol=allocation_line_symbol_1)
m.add_layer(stops_layer)
m.zoom = zoom_level
```
# Spatial Analysis Methodology
### 1. Ask questions:
Formulate hypotheses and spatial
questions.
### 2. Explore the data:
Examine the data quality,
completeness, and measurement limitations (scale
and resolution) to determine the level of analysis and
interpretation that can be supported.
### 3. Analyze and model:
Break the problem down into
solvable components that can be modeled. Quantify
and evaluate the spatial questions.
### 4. Interpret the results:
Evaluate and analyze the results
in the context of the question posed, data limitations,
accuracy, and other implications.
### 5. Repeat as necessary:
Spatial analysis is a continuous
and iterative process that often leads to further
questions and refinements.
### 6. Present the results:
The best information and
analysis becomes increasingly valuable when it can be
effectively presented and shared with a larger audience.
### 7. Make a decision:
Spatial analysis and GIS are used to support the
decision-making process. A successful spatial analysis
process often leads to the understanding necessary to
drive decisions and action.
# 1. Ask Questions
## What would be the impact to commuters if a structurally deficient bridge is impaired?
# 2. Explore Data
### Exploring Bridge Data
#### Bridges in the DC Area (CBSA)
```
bridges_map = gis.map('Fairfax County', zoomlevel=8)
bridges_map.basemap = 'gray-vector'
display(bridges_map)
bridges_map.add_layer(bridge_item)
bridges_sdf = arcgis.features.SpatialDataFrame.from_layer(bridge_item.layers[0]); bridges_sdf.head()
print([col for col in bridges_sdf.columns])
```
National Bridge Inventory (NBI) Schema: https://www.fhwa.dot.gov/bridge/mtguide.pdf
#### Structurally Deficient Bridges in the DC Area (CBSA)
```
deficient_bridges_map = gis.map('Fairfax County', zoomlevel=8)
deficient_bridges_map.basemap = 'gray-vector'
display(deficient_bridges_map)
deficient_bridges_map.add_layer(deficient_bridge_item)
```
### Exploring Commuting Data
#### Map of all DC CBSA Tracts
```
tracts_map = gis.map('Arlington, VA', zoomlevel=9)
tracts_map.basemap = 'streets-night-vector'
tracts_map.add_layer(tract_points)
display(tracts_map)
```
#### Table of Commuting Patterns by Tract
Census Journey to Work Data: https://www.census.gov/topics/employment/commuting.html
```
commute_df = pd.DataFrame.from_csv(r"D:\5_Data\Transportation\Transit\commute_table.csv")
commute_df
```
# 3. Analyze and model:
## Analysis Question: What would be the impact to commuters if a structurally deficient bridge is impaired?
```
exploration_map = gis.map('Arlington, VA', zoomlevel=9)
exploration_map.basemap = 'streets-night-vector'
exploration_map.add_layer(tract_points)
display(exploration_map)
exploration_map.add_layer(deficient_bridge_item)
```
### Let's explore an example commute...
```
m2 = gis.map('Washington Navy Yard', zoomlevel=12)
m2.basemap = 'gray-vector'
display(m2)
m2.add_layer(origin_dest_points)
m2.add_layer(normal_route)
m2.add_layer(deficient_bridge_item)
m2.add_layer(impaired_route)
```
## Prototype Analysis Workflow
<img src="img/Analysis_Process.png">
<img src="img/odmc_restriction_01.png">
## Automated Workflow Steps:
### 1. Set Environment and Retrieve Bridge and Commute Pattern Data
```
# Set workspace
processing_workspace = r"D:\ANieto_SolutionEngineer\Projects\FedGIS\ArcGISAPIforPython_Workspace\bridge_processing"
workspace_gdb = r"D:\ANieto_SolutionEngineer\Projects\FedGIS\ArcGISAPIforPython_Workspace\BridgeCriticality_Arcpy_Workspace.gdb"
# workspace_gdb = "C:\\Users\\albe9057\\Documents\\ANieto_SolutionEngineering\\Projects\\FedGIS\\FedGIS_2018\\Plenary_ArcGISAPIforPython\\Work\\Bridge_Criticality_Analysis\\BridgeCriticality_Arcpy_Workspace.gdb"
# Set Arcpy environment
arcpy.env.workspace = workspace_gdb
arcpy.env.overwriteOutput = True
# Set reference to origins
origin_tracts = r"D:\ANieto_SolutionEngineer\Projects\FedGIS\ArcGISAPIforPython_Workspace\BridgeCriticality_Arcpy_Workspace.gdb\DCVAMD_CBSA_Tracts_Centroids"
# origin_tracts = "C:\\Users\\albe9057\\Documents\\ANieto_SolutionEngineering\\Projects\\FedGIS\\FedGIS_2018\\Plenary_ArcGISAPIforPython\\Work\\Bridge_Criticality_Analysis\\Bridge_Criticality_Analysis.gdb\\DCVAMD_CBSA_Tracts_Centroids"
origins_id_field = "ID"
origins_name_field = "NAME"
# Set reference to destinations
dest_tracts = r"D:\ANieto_SolutionEngineer\Projects\FedGIS\ArcGISAPIforPython_Workspace\BridgeCriticality_Arcpy_Workspace.gdb\DCVAMD_CBSA_Tracts_Centroids"
# dest_tracts = "C:\\Users\\albe9057\\Documents\\ANieto_SolutionEngineering\\Projects\\FedGIS\\FedGIS_2018\\Plenary_ArcGISAPIforPython\\Work\\Bridge_Criticality_Analysis\\Bridge_Criticality_Analysis.gdb\\DCVAMD_CBSA_Tracts_Centroids"
dest_id_field = "ID"
dest_name_field = "NAME"
# Set reference to bridges
bridges_fc = "C:\\Users\\albe9057\\Documents\\ANieto_SolutionEngineering\\Projects\\FedGIS\\FedGIS_2018\\Plenary_ArcGISAPIforPython\\Work\\Bridge_Criticality_Analysis\\Bridge_Criticality_Analysis.gdb\\DCVAMD_CBSA_DeficientBridges"
polybarrier_bridges_fc = "C:\\Users\\albe9057\\Documents\\ANieto_SolutionEngineering\\Projects\\FedGIS\\FedGIS_2018\\Plenary_ArcGISAPIforPython\\Work\\Bridge_Criticality_Analysis\\Bridge_Criticality_Analysis.gdb\\DCVAMD_CBSA_DeficientBridges_Polybarriers"
polybarriers_id_field = "OBJECTID"
# Set reference to the network dataset
network_dataset = "C:\\ArcGIS\\Business Analyst\\US_2015\\Data\\Streets Data\\NAVTEQ_2014_Q3_NA.gdb\\Routing\\Routing_ND"
# Set reference to commute table
commute_table = "C:\\Users\\albe9057\\Documents\\ANieto_SolutionEngineering\\Projects\\FedGIS\\FedGIS_2018\\Plenary_ArcGISAPIforPython\\Work\\Bridge_Criticality_Analysis\\Bridge_Criticality_Analysis.gdb\\ctpp_journey_to_work"
# Set reference to impedance values needed for odcm
impedance_value=99999
impedance_attribute="Minutes"
accumulate_attributes = ["Minutes", "Miles"]
```
### 2. Perform Bridge Commuting Impacts Analysis
#### Option 1: Python API ODCM via WebGIS
```
arcgis.network.analysis.generate_origin_destination_cost_matrix?
```
#### Option 2: Custom ODCM via ArcPy
##### Custom ODCM Helper Function
```
def create_odcm(gdb,
origins_fc,
origins_id_field,
origins_name_field,
destinations_fc,
destinations_id_field,
destinations_name_field,
odcm_name,
network_dataset,
impedance_value,
impedance_attribute,
impedance_attribute_field_name="Dij",
use_lines=False,
out_na_layer_name="Origins2Destinations",
validate_inputs=False,
method_message="\t\tcreate_odcm: ",
output_origin_id_field_name='origin_id',
output_origin_name_field_name='origin_name',
output_dest_id_field_name='destination_id',
output_dest_name_field_name='destination_name',
logger_object=None):
"""
create_odcm: Creates an origin-destination cost matrix with additional output handling
"""
# Set standardized method messaging title
general_utils.log_print("{0}Initializing Origin-Destination Cost Matrix process...".format(method_message),
"INFO",
logger_object)
# Establish workspace parameters
workspace = gdb
arcpy.env.workspace = workspace
arcpy.env.overwriteOutput = True
# Determine which version of arcgis desktop is being used
DesktopVersion = ArcGISVersionChecker()[2]
lines_param = "STRAIGHT_LINES" if use_lines else "NO_LINES"
general_utils.log_print("DEVNOTE: lines_param={0}".format(lines_param), "DEBUG", logger_object)
general_utils.log_print("{0}Acquiring Network Analyst extension...".format(method_message), "DEBUG", logger_object)
# Acquire Network Analyst extension
if arcpy.CheckExtension("Network") != "Available":
# Raise a custom exception
## raise LicenseError
general_utils.log_print("{0}ERROR: A Network Analyst License is required in order to create the ODCM; the ODCM will not be produced. Please consult with the GIS Developer if a license is expected to be available...".format(method_message),
"ERROR",
logger_object)
raise ValueError("Unable to acquire a network analyst license!")
elif arcpy.CheckExtension("Network") == "Available":
arcpy.CheckOutExtension("Network")
if validate_inputs:
# Perform verification of origins and destinations feature classes
general_utils.log_print("{0}Acquiring Origins...".format(method_message), "DEBUG", logger_object)
if arcpy.Exists(origins_fc):
pass
else:
general_utils.log_print("Unable to run create_odcm with provided origins!", "ERROR", logger_object)
raise ValueError("Unable to run create_odcm with provided origins!")
print("{0}Acquiring Destinations...".format(method_message))
if arcpy.Exists(destinations_fc):
pass
else:
general_utils.log_print("Unable to run create_odcm with provided destinations!", "ERROR", logger_object)
raise ValueError("Unable to run create_odcm with provided destinations!")
general_utils.log_print("{0}Establishing Network Analyst Layer...".format(method_message), "DEBUG", logger_object)
outlayerfile = out_na_layer_name + ".lyr"
general_utils.log_print("{0}The established impedance attribute is: {1}".format(method_message, str(impedance_attribute)), "DEBUG", logger_object)
# Create variable that refers to the Impedance Attribute Field from the default ODCM Table
impedance_attribute_field = "Total_" + impedance_attribute
general_utils.log_print("{0}Establishing Destination Search Distance Cut-Off...".format(method_message), "DEBUG", logger_object)
# Import user parameter 'Impedance Cutoff'
general_utils.log_print("{0}Impedance Cutoff: {1}".format(method_message, str(impedance_value)), "DEBUG", logger_object)
# Create the Composite Origin-Destination Cost Matrix Network Analysis Layer.
general_utils.log_print("{0}Creating Origin-Destination Cost Matrix...".format(method_message), "DEBUG", logger_object)
out_na_layer = arcpy.MakeODCostMatrixLayer_na(network_dataset,
out_na_layer_name,
impedance_attribute,
impedance_value, "", "", "", "",
"USE_HIERARCHY", "",
lines_param).getOutput(0)
# Acquire the SubLayers from the Composite Origin-Destination Cost Matrix Network Analysis Layer
general_utils.log_print("{0}Acquiring Composite Network Analysis SubLayers...".format(method_message), "DEBUG", logger_object)
sublayer_names = arcpy.na.GetNAClassNames(out_na_layer)
# Acquire the Origin's SubLayer
general_utils.log_print("{0}Acquiring Origins SubLayer...".format(method_message), "DEBUG", logger_object)
origins_layername = sublayer_names["Origins"]
# Create a Field Map object to Map the 'CovLogic_Centroid' IDs to the Origins field of the Origin-Destination Cost Matrix
origins_fieldmap = arcpy.na.NAClassFieldMappings(out_na_layer, origins_layername)
origins_fieldmap["Name"].mappedFieldName = origins_id_field
# Load the Origins into the Composite Network Analysis Layer.
general_utils.log_print("{0}Loading Origins into Composite Network Analysis Layer...".format(method_message), "DEBUG", logger_object)
arcpy.na.AddLocations(out_na_layer, origins_layername, origins_fc, origins_fieldmap)
# Acquire the Destinations SubLayer.
general_utils.log_print("{0}Acquiring Destinations SubLayer...".format(method_message), "DEBUG", logger_object)
destinations_layername = sublayer_names["Destinations"]
# Create a Field Map object to map the 'proForma' DIDs to the Destinations field of the Origin-Destination Cost Matrix.
destinations_fieldmap = arcpy.na.NAClassFieldMappings(out_na_layer, destinations_layername)
destinations_fieldmap["Name"].mappedFieldName = destinations_id_field
# Load the Destinations into the Composite Network Analysis Layer.
general_utils.log_print("{0}Loading Destinations into Composite Network Analysis Layer...".format(method_message), "DEBUG", logger_object)
arcpy.na.AddLocations(out_na_layer, destinations_layername, destinations_fc, destinations_fieldmap)
# Solve the Network
general_utils.log_print("{0}Solving Network 'Origins2Destinations' Origin-Destination Cost Matrix...".format(method_message), "DEBUG", logger_object)
arcpy.na.Solve(out_na_layer)
# Verify if the directory, C:\Temp exists on the client system
if not os.path.exists(r"C:\Temp"):
# IF the directory, C:\Temp does not exist, create it
os.makedirs(r"C:\Temp")
# Set the Workspace to C:\Temp
general_utils.log_print("{0}Resetting Workspace to C:\Temp...".format(method_message), "DEBUG", logger_object)
arcpy.env.workspace = r"C:\Temp"
# Extract the 'in_memory' result layer and save it as a Layer File in the workspace.
general_utils.log_print("{0}Extracting Result Layer from memory...".format(method_message), "DEBUG", logger_object)
arcpy.SaveToLayerFile_management(out_na_layer, outlayerfile, "RELATIVE")
# Establish a reference to the Result Layer
general_utils.log_print("{0}Acquiring Result Layer...".format(method_message), "DEBUG", logger_object)
ResultLayer = arcpy.mapping.Layer(r"C:\Temp\{0}.lyr".format(out_na_layer_name))
# Reset the Workspace to the workspace
general_utils.log_print("{0}Resetting Workspace to {1}...".format(method_message, str(workspace)), "DEBUG", logger_object)
arcpy.env.workspace = workspace
# Establish a reference to a standard ESRI Map Template
general_utils.log_print("{0}Acquiring ESRI Template MXD...".format(method_message), "DEBUG", logger_object)
TempMXD = arcpy.mapping.MapDocument(r"C:\Program Files (x86)\ArcGIS\\{0}\\MapTemplates\Traditional Layouts\LetterPortrait.mxd".format(str(DesktopVersion)))
# Establish a reference to the DataFrame within the ESRI Map Template
general_utils.log_print("{0}Acquiring ESRI Template MXD DataFrame...".format(method_message), "DEBUG", logger_object)
TempDF = arcpy.mapping.ListDataFrames(TempMXD)[0]
# Add the 'ResultLayer' to the DataFrame in the 'TempMXD'
general_utils.log_print("{0}Adding Result Layer to ESRI Template MXD...".format(method_message), "DEBUG", logger_object)
arcpy.mapping.AddLayer(TempDF, ResultLayer)
# Create a container and dynamically populate it with the layer in the Dataframe named 'Lines'
lines_lyr = arcpy.mapping.ListLayers(TempMXD, "Lines", TempDF)
if len(lines_lyr) > 1:
raise ValueError("Multiple OD Cost Matrices populated in Template MXD. Cannot identify correct OD Cost Matrix.")
elif len(lines_lyr) < 1:
raise ValueError("OD Cost Matrix was not populated in Template MXD. Unable to extract result.")
else:
for lyr in lines_lyr:
# Export the table associated with the 'Lines' layer to a new table in the Workspace
general_utils.log_print("{0}Extracting Retail Node Sites Origin-Destination Cost Matrix...".format(method_message), "DEBUG", logger_object)
arcpy.TableToTable_conversion(lyr, workspace, "ODCM_{0}".format(str(odcm_name)))
# Remove the layer from the TempMXD's DataFrame
general_utils.log_print("{0}Removing Result Layer from ESRI Template MXD...".format(method_message), "DEBUG", logger_object)
arcpy.mapping.RemoveLayer(TempDF, lyr)
# Delete the 'ResultLayer' file from disk
if not use_lines:
general_utils.log_print("{0}Deleting Result Layer from disk...".format(method_message), "DEBUG", logger_object)
arcpy.Delete_management(r"C:\Temp\{0}.lyr".format(out_na_layer_name))
# Establish a reference to the ProForma Sites Origin-Destination Cost Matrix
general_utils.log_print("{0}Acquiring Origin-Destination Cost Matrix...".format(method_message), "DEBUG", logger_object)
odcm = "{0}\\ODCM_{1}".format(workspace, str(odcm_name))
# Display a message to the user that the Origin-Destination Cost Matrix generation process completed
general_utils.log_print("{0}Origin-Destination Cost Matrix data loading process complete.".format(method_message), "DEBUG", logger_object)
""" [SP] Hydrate Origin-Destination Cost Matrix"""
# Delete any unnecessary fields ('DestinationID', 'OriginID', 'DestinationRank') from the current odcm
general_utils.log_print(
"{0}Performing ODCM preparation and preliminary calculations...".format(method_message), "DEBUG", logger_object)
general_utils.log_print(
"{0}Deleting fields 'DestinationID' | 'OriginID' from ODCM...".format(method_message), "DEBUG", logger_object)
arcpy.DeleteField_management(odcm, ["DestinationID", "OriginID"])
# Create a new fields for origin and destinations in the 'ODCM' table
# Add id field for origin ids
general_utils.log_print("{0}Creating new field '{1}'...".format(method_message, output_origin_id_field_name), "DEBUG", logger_object)
arcpy.AddField_management(odcm, output_origin_id_field_name, "TEXT", "", "", 20, output_origin_id_field_name,
"NULLABLE",
"REQUIRED")
# Add name field for origins
general_utils.log_print("{0}Creating new field '{1}'...".format(method_message, output_origin_name_field_name), "DEBUG", logger_object)
arcpy.AddField_management(odcm, output_origin_name_field_name, "TEXT", "", "", 100,
output_origin_name_field_name,
"NULLABLE", "REQUIRED")
# Destinations fields
# Add id field for destinations
general_utils.log_print("{0}Creating new field '{1}'...".format(method_message, output_dest_id_field_name), "DEBUG", logger_object)
arcpy.AddField_management(odcm, output_dest_id_field_name, "TEXT", "", "", 20,
output_dest_id_field_name,
"NULLABLE", "REQUIRED")
# Add name field for destinations
general_utils.log_print("{0}Creating new field '{1}'...".format(method_message, output_dest_name_field_name), "DEBUG", logger_object)
arcpy.AddField_management(odcm, output_dest_name_field_name, "TEXT", "", "", 100,
output_dest_name_field_name,
"NULLABLE", "REQUIRED")
# Calculate the 'OriginID' and 'DestinationID' fields in the 'ODCM' table,
# populating the field with the components from the default 'Name' field in the odcm table
general_utils.log_print("{0}Calculating '{1}', '{2}' fields...".format(method_message,
output_origin_id_field_name,
output_dest_id_field_name), "DEBUG", logger_object)
with arcpy.da.UpdateCursor(odcm, ['Name', output_origin_id_field_name, output_dest_id_field_name]) as cursor:
for row in cursor:
string = row[0]
origin_id = string.split(' - ')[0]
dest_id = string.split(' - ')[1]
row[1] = origin_id
row[2] = dest_id
cursor.updateRow(row)
# Create a new field 'Dij' in the 'ODCM' table
general_utils.log_print("{0}Creating new field '{1}'...".format(method_message, impedance_attribute_field_name), "DEBUG", logger_object)
arcpy.AddField_management(odcm,
impedance_attribute_field_name,
"DOUBLE", 15, 5, "",
impedance_attribute_field_name, "NULLABLE",
"REQUIRED")
# Calculate the 'Dij' field in the 'ODCM' table
general_utils.log_print("{0}Calculating '{1}' field...".format(method_message, impedance_attribute_field_name), "DEBUG", logger_object)
arcpy.CalculateField_management(odcm,
impedance_attribute_field_name,
"!" + impedance_attribute_field + "!",
"PYTHON")
# Round the values held in the 'Dij' field in the 'ODCM' table to the nearest 5 significant digits
arcpy.CalculateField_management(odcm,
impedance_attribute_field_name,
"round(!{0}!, 5)".format(impedance_attribute_field_name),
"PYTHON")
# Delete the default impedence attribute field from the 'ODCM' table
general_utils.log_print("{0}Removing default impedance attribute field...".format(method_message), "DEBUG", logger_object)
arcpy.DeleteField_management(odcm, str(impedance_attribute_field))
general_utils.log_print("{0}Operation complete. Go Gators.".format(method_message), "DEBUG", logger_object)
arcpy.CheckInExtension("Network")
return odcm
```
##### Run ODCM for Nominal Commute
```
if os.path.isfile("nominal_odcm.csv"):
print("Found nominal ODCM. Using processed data...")
nominal_odcm_df = pd.DataFrame.from_csv("nominal_odcm.csv")
else:
# Run nominal ODCM using tracts to tracts
nominal_odcm = create_odcm(gdb=workspace_gdb,
origins_fc=origin_tracts,
origins_id_field=origins_id_field,
origins_name_field=origins_name_field,
destinations_fc=dest_tracts,
destinations_id_field=dest_id_field,
destinations_name_field=dest_name_field,
odcm_name="nominal_baseline",
network_dataset=network_dataset,
impedance_value=impedance_value,
impedance_attribute=impedance_attribute,
accumulate_attribute_name=accumulate_attributes,
polybarrier_fc=None,
polybarrier_id_field=None,
polybarrier_name_field=None,
impedance_attribute_field_name="Dij",
use_lines=False,
out_na_layer_name="Origins2Destinations",
validate_inputs=False,
method_message="create_odcm: ",
output_origin_id_field_name='origin_id',
output_origin_name_field_name='origin_name',
output_dest_id_field_name='destination_id',
output_dest_name_field_name='destination_name',
logger_object=None)
# Convert the odcm gis table to a pandas dataframe
print("Calculating ID Fields...")
nominal_odcm_df = convert_gis_table_to_pddataframe(nominal_odcm)
nominal_odcm_df['OriginID'] = nominal_odcm_df.apply(lambda row: get_origin_id_from_odid(row['Name']), axis=1)
nominal_odcm_df['DestinationID'] = nominal_odcm_df.apply(lambda row: get_dest_id_from_odid(row['Name']), axis=1)
nominal_odcm_df.to_csv("nominal_odcm.csv")
nominal_odcm_df.head()
```
##### Run Bridge Iteration: For each Bridge, Run ODCM with Bridge Feature as a Network Barrier
```
# Set iteration: Structurally Deficient Bridges in DC Metropolitan Region from NBI data
# Build list of buffered bridges
bridges_list = [row[0] for row in arcpy.da.SearchCursor(polybarrier_bridges_fc, polybarriers_id_field)]
# Make bridges processing directory
bridge_dir_path = r"D:\ANieto_SolutionEngineer\Projects\FedGIS\ArcGISAPIforPython_Workspace\bridge_processing"
if not os.path.isdir(bridge_dir_path):
os.mkdir(bridge_dir_path)
brigde_dir = bridge_dir_path
else:
bridge_dir = bridge_dir_path
os.chdir(bridge_dir)
# For each bridge
for bridge in bridges_list:
print("\nChecking bridge {0} of {1}...".format(str(bridge), str(len(bridges_list))))
if os.path.isfile("impacted_commuters_odcm_{0}.csv".format(str(bridge))):
print("Found bridge. Using processed data...")
impacted_commuters_df = pd.DataFrame.from_csv("impacted_commuters_odcm_{0}.csv".format(str(bridge)))
else:
print("\nProcessing bridge {0} of {1}...".format(str(bridge), str(len(bridges_list))))
# Set where clause
bridge_sql = "{0} = {1}".format(arcpy.AddFieldDelimiters(polybarrier_bridges_fc, polybarriers_id_field), bridge)
# Export feature to act as a single polyline barrier
polybarrier_bridge_fc = arcpy.Select_analysis(polybarrier_bridges_fc, "{0}/Bridge_{1}".format(workspace_gdb, str(bridge)), bridge_sql)
# Run impaired ODCM using tracts to tracts, using a bridge feature as a polygon barrier
impaired_odcm = create_odcm(gdb=workspace_gdb,
origins_fc=origin_tracts,
origins_id_field=origins_id_field,
origins_name_field=origins_name_field,
destinations_fc=dest_tracts,
destinations_id_field=dest_id_field,
destinations_name_field=dest_name_field,
odcm_name="impaired_test",
network_dataset=network_dataset,
impedance_value=impedance_value,
impedance_attribute=impedance_attribute,
accumulate_attribute_name=accumulate_attributes,
polybarrier_fc=polybarrier_bridge_fc,
polybarrier_id_field=polybarriers_id_field,
polybarrier_name_field="ITEM6A",
impedance_attribute_field_name="Dij",
use_lines=False,
out_na_layer_name="Origins2Destinations",
validate_inputs=False,
method_message="create_odcm: ",
output_origin_id_field_name='origin_id',
output_origin_name_field_name='origin_name',
output_dest_id_field_name='destination_id',
output_dest_name_field_name='destination_name',
logger_object=None)
# Convert the odcm gis table to a pandas dataframe
impaired_odcm_csv = gis_table_to_csv(impaired_odcm, bridge_dir, "impaired_odcm_{0}.csv".format(str(bridge)))
impaired_odcm_df = pd.DataFrame.from_csv(impaired_odcm_csv)
# impaired_odcm_df = convert_gis_table_to_pddataframe(impaired_odcm)
impaired_odcm_df['OriginID'] = impaired_odcm_df.apply(lambda row: get_origin_id_from_odid(row['Name']), axis=1)
impaired_odcm_df['DestinationID'] = impaired_odcm_df.apply(lambda row: get_dest_id_from_odid(row['Name']), axis=1)
impaired_odcm_df.to_csv("impaired_odcm_{0}.csv".format(str(bridge)))
# Join nominal and impaired ODCM dataframes
nom_imp_odcm_df = pd.merge(nominal_odcm_df, impaired_odcm_df, how="left", on="Name")
# Join nominal+impaired ODCM dataframe to commute dataframe (inner join; remove anything not common)
commute_impacts_df = pd.merge(commute_df, nom_imp_odcm_df, how="inner", left_on="ORIGIN_DESTINATION_ID", right_on="Name")
commute_impacts_df.to_csv("commute_impacts_odcm_{0}.csv".format(str(bridge)))
# Identify deltas in impedance
commute_impacts_df['minutes_diff'] = commute_impacts_df['Total_Minutes_x'] - commute_impacts_df['Total_Minutes_y']
commute_impacts_df['miles_diff'] = commute_impacts_df['Total_Miles_x'] - commute_impacts_df['Total_Miles_y']
impacted_commuters_df = commute_impacts_df.loc[commute_impacts_df['minutes_diff'] > 0]
impacted_commuters_df.to_csv("impacted_commuters_odcm_{0}.csv".format(str(bridge)))
# Calculate count and impedance sum in deltas
routes_impacted = impacted_commuters_df.shape[0]
commuters_impacted = commuters_impacted = impacted_commuters_df['EST'].sum()
total_additional_minutes = impacted_commuters_df['minutes_diff'].sum()
total_additional_miles = impacted_commuters_df['miles_diff'].sum()
bridges_df.loc[bridge, "routes_impacted"] = routes_impacted
bridges_df.loc[bridge, "commuters_impacted"] = commuters_impacted
bridges_df.loc[bridge, "total_additional_minutes"] = total_additional_minutes
bridges_df.loc[bridge, "total_additional_miles"] = total_additional_miles
print("Complete.")
bridges_df.to_csv(r"C:\Users\albe9057\Documents\GitHub\ArcGISPythonAPI_Projects\Presentation\FedGIS2018\bridges_processed.csv")
```
##### Calculate "Impact Rank" for bridges for entire study area (CBSA) and for each county
```
# Stack rank all bridges based on criticality score
bridges_df['cbsa_rank'] = bridges_df['total_additional_miles'].rank(ascending=False); bridges_df
# Group-by for county rankings
bridges_df['county_rank'] = bridges_df.groupby('ITEM3')['total_additional_miles'].rank(ascending=False); bridges_df
```
#### 3. Publish Outputs to WebGIS
```
# Publish outputs
bridges_df.to_csv(r"C:\Users\albe9057\Documents\GitHub\ArcGISPythonAPI_Projects\Presentation\FedGIS2018\bridges_analyzed.csv")
# bridges_processed_csv = r"C:\Users\albe9057\Documents\GitHub\ArcGISPythonAPI_Projects\Presentation\FedGIS2018\bridges_processed.csv"
bridges_analyzed_csv = r"C:\Users\albe9057\Documents\GitHub\ArcGISPythonAPI_Projects\Presentation\FedGIS2018\bridges_analyzed.csv"
# Publish csv item
bridges_analyzed_csv_item = gis.content.add({}, bridges_analyzed_csv)
# Convert csv item to hosted layer in ArcGIS Online
bridges_analyzed_lyr = bridges_analyzed_csv_item.publish()
```
### The ArcGIS API for Python let us document, design, prototype, and run our workflow helped us get to an analysis that can now be deployed.
# 4. Interpret Results
```
display(analyzed_bridges_item)
```
#### Map of Analyzed Bridges
```
analyzed_bridges_map = gis.map('Fairfax County', zoomlevel=8)
analyzed_bridges_map.basemap = 'gray-vector'
display(analyzed_bridges_map)
analyzed_bridges_map.add_layer(analyzed_bridges_item)
```
#### Map of Most Critical Bridge, with nominal commutes, and alternative commutes
```
most_critical_bridge_map = gis.map('Fairfax County', zoomlevel=8)
most_critical_bridge_map.basemap = 'gray-vector'
display(most_critical_bridge_map)
```
# 5. Repeat as Necessary:
# 6. Present the Results:
# 7. Make a Decision:
| github_jupyter |
# <img style="float: left; padding-right: 10px; width: 150px" src="https://sc.acamica.com/icons/1j7w9h/social-300x300.png"> Acámica DS-COR3 - Ejercicio de Machine Learning
### Mayo 2019
<hr style="height:2pt">
## Descripción
El objetivo de este notebook es ejercitar el "ajuste fino" de un modelo de Machine Learning sobre un conjunto de datos referido a pasajeros del Titanic. Se parte del ejercicio de la semana anterior (predecir la condición de supervicencia de un pasajero del Titanic) para concentrarse ahora en aplicar técnicas de búsqueda de hiper-parámetros para encontrar la mejor configuración posible del modelo a ser ajustado con los datos de entrada.
**Link a dataset:** https://www.kaggle.com/c/titanic/data
**Ejemplos asociados:** Dado que es un dataset popular, se puede encontrar una gran variedad de kernels disponibles para sacar ideas y corroborar otras: https://www.kaggle.com/c/titanic/kernels.
Como guía, se pretende que el pipeline a desarrollar cubra los siguientes aspectos (no necesariamente en este estricto orden):
- **Carga de datos:** Lectura de CSV "train.csv" en un DataFrame para ajustar el modelo, y "test.csv" para validaciones al final.
- **Pre-procesamiento:** Se puede utilizar las features que se obtuvieron en el ejercicio anterior, o bien las de [este notebook](https://www.kaggle.com/sinakhorami/titanic-best-working-classifier) ya sugerido anteriormente.
- **Configurar modelo a ajustar:** En particular, utilizar el algoritmo `sklearn.neighbors.KNeighborsClassifier` para modelar el clasificador. A partir de ello, proponer un rango de valores a explorar por cada uno de los siguientes hiperparámetros:
- `n_neighbors` (decidir el mayor valor posible de K que tenga sentido y no tienda al overfitting).
- `algorithm` (probar los 3 casos: "ball_tree", "kd_tree", "brute")
- `metric` (basarse en la [documentación](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html), y extraer aquellas que tengan sentido a partir del tipo de las features usadas)
- `weights` (probar tanto "uniform" como "distance" para medir impacto).
- **Ajustar modelo:** Aplicar [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) para explorar el espacio de hiper-parámetros definido, utilizando la estrategia de CV que se desee (e.g. K-fold, leave k out, etc).
- **Validar modelo:** Utilizar conjunto de validación para validar el desempeño del mejor modelo ajustado usando métricas adecuadas para problemas de regresión.
- En este caso se sugiere utilizar precision, recall y f1-score mediante la [API de scikit-learn](https://scikit-learn.org/stable/modules/model_evaluation.html#classification-metrics).
- Además, plotear curva ROC de al menos 3 de los modelos obtenidos en cada experimento de la búsqueda.
> TIP: Recordar que la función [roc_curve](https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html) debe recibir en "y_score" las probabilidades de la clase positiva, no las clases binarias de dichas predicciones (i.e. debe recibir la probabilidad 0.7 en lugar de la clase 1 de una predicción dada).
- **Reajuste del modelo:** A partir de la configuración ganadora, obtener un nuevo modelo ajustado sobre el total de los datos de "train.csv" y validar su desempeño con los de "test.csv" para definir si los resultados son buenos para la tarea asignada.
### Ejercicios extra:
- Responder: Por qué conviene aplicar Grid Search en lugar de Random Search? Cuando conviene al revés? Pensar en el espacio de parámetros y el esfuerzo de ajustar un modelo...
- Utilizar una métrica "personalizada" mediante una función de Python, para medir la distancia entre los puntos del KNN.
- Plotear curva Precision - Recall y responder:
- Cuál es la diferencia principal entre este gráfico y el ROC?
- Cuál sería la mejor métrica a priorizar para el caso "detectar la mayor cantidad de NO supervivientes posible"?
**Recursos**:
- https://medium.com/fintechexplained/how-to-fine-tune-your-machine-learning-models-to-improve-forecasting-accuracy-e18e67e58898
- https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
- https://classeval.wordpress.com/introduction/introduction-to-the-precision-recall-plot/
| github_jupyter |
```
import pycalphad.io.tdb
from pycalphad.io.tdb import expand_keyword
def _process_typedef(targetdb, typechar, line):
"""
Process the TYPE_DEFINITION command.
"""
# GES A_P_D BCC_A2 MAGNETIC -1 0.4
tokens = line.replace(',', '').split()
if len(tokens) < 4:
return
keyword = expand_keyword(['DISORDERED_PART', 'MAGNETIC', 'NEVER_DISORDERS'], tokens[3].upper())[0]
if len(keyword) == 0:
raise ValueError('Unknown keyword: {}'.format(tokens[3]))
if keyword == 'MAGNETIC':
# magnetic model (IHJ model assumed by default)
targetdb.tdbtypedefs[typechar] = {
'ihj_magnetic':[float(tokens[4]), float(tokens[5])]
}
# GES A_P_D L12_FCC DIS_PART FCC_A1
if keyword == 'DISORDERED_PART':
# order-disorder model
targetdb.tdbtypedefs[typechar] = {
'disordered_phase': tokens[4].upper(),
'ordered_phase': tokens[2].upper()
}
if tokens[2].upper() in targetdb.phases:
# Since TDB files do not enforce any kind of ordering
# on the specification of ordered and disordered phases,
# we need to handle the case of when either phase is specified
# first. In this case, we imagine the ordered phase is
# specified first. If the disordered phase is specified
# first, we will have to catch it in _process_phase().
targetdb.phases[tokens[2].upper()].model_hints.update(
targetdb.tdbtypedefs[typechar]
)
# GES AMEND_PHASE_DESCRIPTION SIGMA NEVER_DIS SIGMA_DIS
if keyword == 'NEVER_DISORDERS':
if len(tokens) < 4:
return
targetdb.tdbtypedefs[typechar] = {'never_disorders': True}
if tokens[2].upper() in targetdb.phases:
targetdb.phases[tokens[2].upper()].model_hints.update(
targetdb.tdbtypedefs[typechar]
)
def _process_phase(targetdb, name, typedefs, subls):
"""
Process the PHASE command.
"""
splitname = name.split(':')
phase_name = splitname[0].upper()
options = None
if len(splitname) > 1:
options = splitname[1]
targetdb.add_structure_entry(phase_name, phase_name)
model_hints = {}
for typedef in list(typedefs):
if typedef in targetdb.tdbtypedefs.keys():
if 'ihj_magnetic' in targetdb.tdbtypedefs[typedef].keys():
model_hints['ihj_magnetic_afm_factor'] = \
targetdb.tdbtypedefs[typedef]['ihj_magnetic'][0]
model_hints['ihj_magnetic_structure_factor'] = \
targetdb.tdbtypedefs[typedef]['ihj_magnetic'][1]
if 'ordered_phase' in targetdb.tdbtypedefs[typedef].keys():
model_hints['ordered_phase'] = \
targetdb.tdbtypedefs[typedef]['ordered_phase']
model_hints['disordered_phase'] = \
targetdb.tdbtypedefs[typedef]['disordered_phase']
if model_hints['disordered_phase'] in targetdb.phases:
targetdb.phases[model_hints['disordered_phase']]\
.model_hints.update({'ordered_phase': model_hints['ordered_phase'],
'disordered_phase': model_hints['disordered_phase']})
if 'never_disorders' in targetdb.tdbtypedefs[typedef].keys():
model_hints['never_disorders'] = \
targetdb.tdbtypedefs[typedef]['never_disorders']
targetdb.add_phase(phase_name, model_hints, subls)
pycalphad.io.tdb._TDB_PROCESSOR['TYPE_DEFINITION'] = _process_typedef
pycalphad.io.tdb._TDB_PROCESSOR['PHASE'] = _process_phase
from pycalphad import Model
from sympy import S, Add
import pycalphad.variables as v
class NeverDisordersModel(Model):
def atomic_ordering_energy(self, dbe):
"""
Return the atomic ordering contribution in symbolic form.
Description follows Servant and Ansara, Calphad, 2001.
Also includes "never disordering" model following Lukas, Fries and Sundman, 2007, p. 145.
"""
phase = dbe.phases[self.phase_name]
ordered_phase_name = phase.model_hints.get('ordered_phase', None)
disordered_phase_name = phase.model_hints.get('disordered_phase', None)
never_disorders = phase.model_hints.get('never_disorders', False)
if phase.name != ordered_phase_name:
return S.Zero
disordered_model = self.__class__(dbe, sorted(self.components),
disordered_phase_name)
constituents = [sorted(set(c).intersection(self.components)) \
for c in dbe.phases[ordered_phase_name].constituents]
# Fix variable names
variable_rename_dict = {}
for atom in disordered_model.energy.atoms(v.SiteFraction):
# Replace disordered phase site fractions with mole fractions of
# ordered phase site fractions.
# Special case: Pure vacancy sublattices
all_species_in_sublattice = \
dbe.phases[disordered_phase_name].constituents[
atom.sublattice_index]
if atom.species == 'VA' and len(all_species_in_sublattice) == 1:
# Assume: Pure vacancy sublattices are always last
vacancy_subl_index = \
len(dbe.phases[ordered_phase_name].constituents)-1
variable_rename_dict[atom] = \
v.SiteFraction(
ordered_phase_name, vacancy_subl_index, atom.species)
else:
# All other cases: replace site fraction with mole fraction
variable_rename_dict[atom] = \
self.mole_fraction(
atom.species,
ordered_phase_name,
constituents,
dbe.phases[ordered_phase_name].sublattices
)
# Save all of the ordered energy contributions
# This step is why this routine must be called _last_ in build_phase
ordered_energy = Add(*list(self.models.values()))
self.models.clear()
# Copy the disordered energy contributions into the correct bins
for name, value in disordered_model.models.items():
self.models[name] = value.xreplace(variable_rename_dict)
# All magnetic parameters will be defined in the disordered model
self.TC = self.curie_temperature = disordered_model.TC
self.TC = self.curie_temperature = self.TC.xreplace(variable_rename_dict)
molefraction_dict = {}
# Construct a dictionary that replaces every site fraction with its
# corresponding mole fraction in the disordered state
for sitefrac in ordered_energy.atoms(v.SiteFraction):
all_species_in_sublattice = \
dbe.phases[ordered_phase_name].constituents[
sitefrac.sublattice_index]
if sitefrac.species == 'VA' and len(all_species_in_sublattice) == 1:
# pure-vacancy sublattices should not be replaced
# this handles cases like AL,NI,VA:AL,NI,VA:VA and
# ensures the VA's don't get mixed up
continue
molefraction_dict[sitefrac] = \
self.mole_fraction(sitefrac.species,
ordered_phase_name, constituents,
dbe.phases[ordered_phase_name].sublattices)
if never_disorders:
# Remove disordered model ideal mixing contribution
self.models['idmix'] = 0.0
return ordered_energy
else:
return ordered_energy - ordered_energy.subs(molefraction_dict,
simultaneous=True)
%matplotlib inline
import matplotlib.pyplot as plt
from pycalphad import Database, ternplot
dbf = Database('MoNiRe_BEF.TDB')
fig = plt.figure(figsize=(9,6))
conds = {v.T: 500, v.P:101325, v.X('NI'): (0,1,0.015), v.X('RE'): (0,1,0.015)}
phases = sorted(dbf.phases.keys())
ternplot(dbf, ['MO', 'NI', 'RE', 'VA'], phases, conds,
x=v.X('NI'), y=v.X('RE'), eq_kwargs={'model': NeverDisordersModel})
plt.show()
sorted(dbf.phases.keys())
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Introduction to graphs and functions
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/intro_to_graphs"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
# Introduction to Graphs and `tf.function`
This guide goes beneath the surface of TensorFlow and Keras to see how TensorFlow works. If you instead want to immediately get started with Keras, please see [our collection of Keras guides](keras/).
In this guide you'll see the core of how TensorFlow allows you to make simple changes to your code to get graphs, and how they are stored and represented, and how you can use them to accelerate and export your models.
Note: For those of you who are only familiar with TensorFlow 1.x, this guide demonstrates a very different view of graphs.
This is a short-form introduction; for a full introduction to these concepts, see [the `tf.function` guide](function).
## What are graphs?
In the previous three guides, you have seen TensorFlow running **eagerly**. This means TensorFlow operations are executed by Python, operation by operation, and returning results back to Python. Eager TensorFlow takes advantage of GPUs, allowing you to place variables, tensors, and even operations on GPUs and TPUs. It is also easy to debug.
For some users, you may never need or want to leave Python.
However, running TensorFlow op-by-op in Python prevents a host of accelerations otherwise available. If you can extract tensor computations from Python, you can make them into a *graph*.
**Graphs are data structures that contain a set of `tf.Operation` objects, which represent units of computation; and `tf.Tensor` objects, which represent the units of data that flow between operations.** They are defined in a `tf.Graph` context. Since these graphs are data structures, they can be saved, run, and restored all without the original Python code.
This is what a simple two-layer graph looks like when visualized in TensorBoard.

## The benefits of graphs
With a graph, you have a great deal of flexibility. You can use your TensorFlow graph in environments that don't have a Python interpreter, like mobile applications, embedded devices, and backend servers. TensorFlow uses graphs as the format for saved models when it exports them from Python.
Graphs are also easily optimized, allowing the compiler to do transformations like:
* Statically infer the value of tensors by folding constant nodes in your computation *("constant folding")*.
* Separate sub-parts of a computation that are independent and split them between threads or devices.
* Simplify arithmetic operations by eliminating common subexpressions.
There is an entire optimization system, [Grappler](./graph_optimization.ipynb), to perform this and other speedups.
In short, graphs are extremely useful and let your TensorFlow run **fast**, run **in parallel**, and run efficiently **on multiple devices**.
However, you still want to define our machine learning models (or other computations) in Python for convenience, and then automatically construct graphs when you need them.
# Tracing graphs
The way you create a graph in TensorFlow is to use `tf.function`, either as a direct call or as a decorator.
```
import tensorflow as tf
import timeit
from datetime import datetime
# Define a Python function
def function_to_get_faster(x, y, b):
x = tf.matmul(x, y)
x = x + b
return x
# Create a `Function` object that contains a graph
a_function_that_uses_a_graph = tf.function(function_to_get_faster)
# Make some tensors
x1 = tf.constant([[1.0, 2.0]])
y1 = tf.constant([[2.0], [3.0]])
b1 = tf.constant(4.0)
# It just works!
a_function_that_uses_a_graph(x1, y1, b1).numpy()
```
`tf.function`-ized functions are [Python callables]() that work the same as their Python equivalents. They have a particular class (`python.eager.def_function.Function`), but to you they act just as the non-traced version.
`tf.function` recursively traces any Python function it calls.
```
def inner_function(x, y, b):
x = tf.matmul(x, y)
x = x + b
return x
# Use the decorator
@tf.function
def outer_function(x):
y = tf.constant([[2.0], [3.0]])
b = tf.constant(4.0)
return inner_function(x, y, b)
# Note that the callable will create a graph that
# includes inner_function() as well as outer_function()
outer_function(tf.constant([[1.0, 2.0]])).numpy()
```
If you have used TensorFlow 1.x, you will notice that at no time did you need to define a `Placeholder` or `tf.Sesssion`.
## Flow control and side effects
Flow control and loops are converted to TensorFlow via `tf.autograph` by default. Autograph uses a combination of methods, including standardizing loop constructs, unrolling, and [AST](https://docs.python.org/3/library/ast.html) manipulation.
```
def my_function(x):
if tf.reduce_sum(x) <= 1:
return x * x
else:
return x-1
a_function = tf.function(my_function)
print("First branch, with graph:", a_function(tf.constant(1.0)).numpy())
print("Second branch, with graph:", a_function(tf.constant([5.0, 5.0])).numpy())
```
You can directly call the Autograph conversion to see how Python is converted into TensorFlow ops. This is, mostly, unreadable, but you can see the transformation.
```
# Don't read the output too carefully.
print(tf.autograph.to_code(my_function))
```
Autograph automatically converts `if-then` clauses, loops, `break`, `return`, `continue`, and more.
Most of the time, Autograph will work without special considerations. However, there are some caveats, and the [tf.function guide](./function.ipynb) can help here, as well as the [complete autograph reference](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md)
## Seeing the speed up
Just wrapping a tensor-using function in `tf.function` does not automatically speed up your code. For small functions called a few times on a single machine, the overhead of calling a graph or graph fragment may dominate runtime. Also, if most of the computation was already happening on an accelerator, such as stacks of GPU-heavy convolutions, the graph speedup won't be large.
For complicated computations, graphs can provide a significant speedup. This is because graphs reduce the Python-to-device communication and perform some speedups.
This code times a few runs on some small dense layers.
```
# Create an oveerride model to classify pictures
class SequentialModel(tf.keras.Model):
def __init__(self, **kwargs):
super(SequentialModel, self).__init__(**kwargs)
self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))
self.dense_1 = tf.keras.layers.Dense(128, activation="relu")
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense_2 = tf.keras.layers.Dense(10)
def call(self, x):
x = self.flatten(x)
x = self.dense_1(x)
x = self.dropout(x)
x = self.dense_2(x)
return x
input_data = tf.random.uniform([60, 28, 28])
eager_model = SequentialModel()
graph_model = tf.function(eager_model)
print("Eager time:", timeit.timeit(lambda: eager_model(input_data), number=10000))
print("Graph time:", timeit.timeit(lambda: graph_model(input_data), number=10000))
```
### Polymorphic functions
When you trace a function, you create a `Function` object that is **polymorphic**. A polymorphic function is a Python callable that encapsulates several concrete function graphs behind one API.
You can use this `Function` on all different kinds of `dtypes` and shapes. Each time you invoke it with a new argument signature, the original function gets re-traced with the new arguments. The `Function` then stores the `tf.Graph` corresponding to that trace in a `concrete_function`. If the function has already been traced with that kind of argument, you just get your pre-traced graph.
Conceptually, then:
* A **`tf.Graph`** is the raw, portable data structure describing a computation
* A **`Function`** is a caching, tracing, dispatcher over ConcreteFunctions
* A **`ConcreteFunction`** is an eager-compatible wrapper around a graph that lets you execute the graph from Python
### Inspecting polymorphic functions
You can inspect `a_function`, which is the result of calling `tf.function` on the Python function `my_function`. In this example, calling `a_function` with three kinds of arguments results in three different concrete functions.
```
print(a_function)
print("Calling a `Function`:")
print("Int:", a_function(tf.constant(2)))
print("Float:", a_function(tf.constant(2.0)))
print("Rank-1 tensor of floats", a_function(tf.constant([2.0, 2.0, 2.0])))
# Get the concrete function that works on floats
print("Inspecting concrete functions")
print("Concrete function for float:")
print(a_function.get_concrete_function(tf.TensorSpec(shape=[], dtype=tf.float32)))
print("Concrete function for tensor of floats:")
print(a_function.get_concrete_function(tf.constant([2.0, 2.0, 2.0])))
# Concrete functions are callable
# Note: You won't normally do this, but instead just call the containing `Function`
cf = a_function.get_concrete_function(tf.constant(2))
print("Directly calling a concrete function:", cf(tf.constant(2)))
```
In this example, you are seeing pretty far into the stack. Unless you are specifically managing tracing, you will not normally need to call concrete functions directly as shown here.
# Reverting to eager execution
You may find yourself looking at long stack traces, specially ones that refer to `tf.Graph` or `with tf.Graph().as_default()`. This means you are likely running in a graph context. Core functions in TensorFlow use graph contexts, such as Keras's `model.fit()`.
It is often much easier to debug eager execution. Stack traces should be relatively short and easy to comprehend.
In situations where the graph makes debugging tricky, you can revert to using eager execution to debug.
Here are ways you can make sure you are running eagerly:
* Call models and layers directly as callables
* When using Keras compile/fit, at compile time use **`model.compile(run_eagerly=True)`**
* Set global execution mode via **`tf.config.run_functions_eagerly(True)`**
### Using `run_eagerly=True`
```
# Define an identity layer with an eager side effect
class EagerLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(EagerLayer, self).__init__(**kwargs)
# Do some kind of initialization here
def call(self, inputs):
print("\nCurrently running eagerly", str(datetime.now()))
return inputs
# Create an override model to classify pictures, adding the custom layer
class SequentialModel(tf.keras.Model):
def __init__(self):
super(SequentialModel, self).__init__()
self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))
self.dense_1 = tf.keras.layers.Dense(128, activation="relu")
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense_2 = tf.keras.layers.Dense(10)
self.eager = EagerLayer()
def call(self, x):
x = self.flatten(x)
x = self.dense_1(x)
x = self.dropout(x)
x = self.dense_2(x)
return self.eager(x)
# Create an instance of this model
model = SequentialModel()
# Generate some nonsense pictures and labels
input_data = tf.random.uniform([60, 28, 28])
labels = tf.random.uniform([60])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
```
First, compile the model without eager. Note that the model is not traced; despite its name, `compile` only sets up loss functions, optimization, and other training parameters.
```
model.compile(run_eagerly=False, loss=loss_fn)
```
Now, call `fit` and see that the function is traced (twice) and then the eager effect never runs again.
```
model.fit(input_data, labels, epochs=3)
```
If you run even a single epoch in eager, however, you can see the eager side effect twice.
```
print("Running eagerly")
# When compiling the model, set it to run eagerly
model.compile(run_eagerly=True, loss=loss_fn)
model.fit(input_data, labels, epochs=1)
```
### Using `run_functions_eagerly`
You can also globally set everything to run eagerly. This is a switch that bypasses the polymorphic function's traced functions and calls the original function directly. You can use this for debugging.
```
# Now, globally set everything to run eagerly
tf.config.run_functions_eagerly(True)
print("Run all functions eagerly.")
# Create a polymorphic function
polymorphic_function = tf.function(model)
print("Tracing")
# This does, in fact, trace the function
print(polymorphic_function.get_concrete_function(input_data))
print("\nCalling twice eagerly")
# When you run the function again, you will see the side effect
# twice, as the function is running eagerly.
result = polymorphic_function(input_data)
result = polymorphic_function(input_data)
# Don't forget to set it back when you are done
tf.config.experimental_run_functions_eagerly(False)
```
# Tracing and performance
Tracing costs some overhead. Although tracing small functions is quick, large models can take noticeable wall-clock time to trace. This investment is usually quickly paid back with a performance boost, but it's important to be aware that the first few epochs of any large model training can be slower due to tracing.
No matter how large your model, you want to avoid tracing frequently. This [section of the tf.function guide](function.ipynb#when_to_retrace) discusses how to set input specifications and use tensor arguments to avoid retracing. If you find you are getting unusually poor performance, it's good to check to see if you are retracing accidentally.
You can add an eager-only side effect (such as printing a Python argument) so you can see when the function is being traced. Here, you see extra retracing because new Python arguments always trigger retracing.
```
# Use @tf.function decorator
@tf.function
def a_function_with_python_side_effect(x):
print("Tracing!") # This eager
return x * x + tf.constant(2)
# This is traced the first time
print(a_function_with_python_side_effect(tf.constant(2)))
# The second time through, you won't see the side effect
print(a_function_with_python_side_effect(tf.constant(3)))
# This retraces each time the Python argument changes,
# as a Python argument could be an epoch count or other
# hyperparameter
print(a_function_with_python_side_effect(2))
print(a_function_with_python_side_effect(3))
```
# Next steps
You can read a more in-depth discussion at both the `tf.function` API reference page and at the [guide](./function.ipynb).
| github_jupyter |
```
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn import metrics
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.utils import shuffle
df = pd.read_csv('output_http_csic_2010_weka_with_duplications_utf8_escd_v02_full.csv')
df.head()
# Remove columns that contain the same value
df = df.drop(['userAgent', 'pragma', 'cacheControl', 'acceptEncoding', 'acceptCharset', 'acceptLanguage'], 1)
df = df.drop(['connection', 'cookie', 'accept', 'protocol'], 1)
# Since everything is localhost let's keep only the port and remove the host part
df['port'] = df['host'].str.split(':', expand=True)[1]
df = df.drop(['host'], 1)
df.head()
# Split the dataset in two to avoid mixed indices
df_anom = df[df['label']=='anom']
df_norm = df[df['label']=='norm']
print df_anom.describe()
print df_norm.describe()
df2_anom = df_anom[['index', 'payload', 'label']]
df2_anom = df2_anom.dropna()
print(df2_anom.head())
df2_norm = df_norm[['index', 'payload', 'label']]
df2_norm = df2_norm.dropna()
print(df2_norm.head())
#df3 = df2[['payload','label']].groupby(df2['index']).sum()
#df3 = df2[['payload','label']].groupby(df2['index']).agg(lambda x: ' '.join(set(x)))
df3_anom = df2_anom[['payload','label']].groupby(df2_anom['index']).agg(lambda x: ' '.join(set(x)))
df3_anom["payload"] = df3_anom['payload'].apply(lambda x: x.replace("=", " "))
print(df3_anom.head())
df3_anom['label'] = 1
print(df3_anom.head())
df3_norm = df2_norm[['payload','label']].groupby(df2_norm['index']).agg(lambda x: ' '.join(set(x)))
df3_norm["payload"] = df3_norm['payload'].apply(lambda x: x.replace("=", " "))
print(df3_norm.head())
df3_norm['label'] = 0
print(df3_norm.head())
df4 = pd.concat([df3_norm, df3_anom])
print(df4.head())
print(df4.describe())
print(df4.label.value_counts())
# Vectorize the payload by creating character n-grams
vec = TfidfVectorizer(analyzer='word',ngram_range=(3,3))
#vec = HashingVectorizer(analyzer='char',ngram_range=(6,6))
y = df4['label']
X = vec.fit_transform(df4['payload'].dropna())
print(X.shape, y.shape)
# Use a chi-squared test to extract features
ch2 = SelectKBest(chi2, k=600)
X_train = ch2.fit_transform(X, y)
print(X_train.shape)
X1, y1 = shuffle(X_train, y)
offset = int(X1.shape[0] * 0.8)
# Random Forest Classifier
clf = RandomForestClassifier(n_estimators=1000)
clf.fit(X1[:offset], y1[:offset])
pred = clf.predict(X1[offset:,:])
accuracy = metrics.accuracy_score(y1[offset:], pred)
f1_score = metrics.f1_score(y1[offset:], pred)
conf_matrix = metrics.confusion_matrix(y1[offset:], pred)
print(accuracy, f1_score)
print(conf_matrix)
# Gradient Boosting Classifier
clf = GradientBoostingClassifier(n_estimators=1000)
clf.fit(X1[:offset], y1[:offset])
pred = clf.predict(X1[offset:,:].toarray())
accuracy = metrics.accuracy_score(y1[offset:], pred)
f1_score = metrics.f1_score(y1[offset:], pred)
conf_matrix = metrics.confusion_matrix(y1[offset:], pred)
print(accuracy, f1_score)
print(conf_matrix)
clf = LinearSVC(penalty="l2", dual=False, tol=2, C=1, max_iter=10000)
clf.fit(X1[:offset], y1[:offset])
pred = clf.predict(X1[offset:,:])
accuracy = metrics.accuracy_score(y1[offset:], pred)
f1_score = metrics.f1_score(y1[offset:], pred)
conf_matrix = metrics.confusion_matrix(y1[offset:], pred)
print(accuracy, f1_score)
print(conf_matrix)
clf = KNeighborsClassifier(n_neighbors=20)
clf.fit(X1[:offset].toarray(), y1.values[:offset])
pred = clf.predict(X1[offset:,:].toarray())
accuracy = metrics.accuracy_score(y1[offset:], pred)
f1_score = metrics.f1_score(y1[offset:], pred)
conf_matrix = metrics.confusion_matrix(y1[offset:], pred)
print(accuracy, f1_score)
print(conf_matrix)
clf = SGDClassifier(alpha=.01, n_iter=10000)
clf.fit(X1[:offset], y1[:offset])
pred = clf.predict(X1[offset:,:])
accuracy = metrics.accuracy_score(y1[offset:], pred)
f1_score = metrics.f1_score(y1[offset:], pred)
conf_matrix = metrics.confusion_matrix(y1[offset:], pred)
print(accuracy, f1_score)
print(conf_matrix)
clf = PassiveAggressiveClassifier(n_iter=50)
clf.fit(X1[:offset].toarray(), y1[:offset])
pred = clf.predict(X1[offset:,:].toarray())
accuracy = metrics.accuracy_score(y1[offset:], pred)
f1_score = metrics.f1_score(y1[offset:], pred)
conf_matrix = metrics.confusion_matrix(y1[offset:], pred)
print(accuracy, f1_score)
print(conf_matrix)
```
| github_jupyter |
# Introduction
In this study, we will start by studying the applicability of using the raw data from the seismometer as an input to the neural network for recognition and prediction of earthquakes. The raw data is a time series data of 100 Hz frequency which discribes the vertical motion of the earth. The figure below show a simple illustration of how it is measured. Note that the current tools rely on a more sophisticated and accurate tools but the concept is still the same.
As mentioned above, two problems are studied described in the figure above. Obviously, the second problem is more complicated as there is no known method to do this. In fact, it is not known that the signal contain information for the prediction. The two problems are:
1. The recognition of the earthquakes signal after they occur.
2. The prediction of earthquakes signal before they occur.
<img src="Figures/For Notbooks/Seismometer.png" width="700">
# Dataset
The dataset was harvested from servers specifically for this study. After compiling numerous datasets with varying [minimum earthquake magnitude](https://en.wikipedia.org/wiki/Richter_magnitude_scale), lateral coverage, and number of stations, this study area ([The Geysers](https://en.wikipedia.org/wiki/The_Geysers)) of the is specifically chosen because:
1. It contains a spatially dense seismometers coverage.
2. The seismicmometers have been operating for a relatively long time.
3. The area is tectonically active because of the existance of the geysers.
4. Enhanced geothermal system are operating in the area creating more earthquakes.
<img src="Figures/For Notbooks/Map.png" width="600">
# Package and helper functions
We will use [Keras](https://keras.io/) to build the neural network, [Matplotlib](https://matplotlib.org/) for plotting results if needed, and [Numpy](http://www.numpy.org/) for matrix manipulation.
```
# Numpy (The God package)
import numpy as np
# MatplotLib
import matplotlib.pyplot as plt
from matplotlib import mlab
from matplotlib.colors import Normalize
# Other
import math as M
from skimage.transform import resize
from obspy.imaging.cm import obspy_sequential
# Keras
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D, Conv2D
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape, Flatten, MaxPooling1D, MaxPooling2D
from keras.optimizers import Adam
# Sklearn
from sklearn.metrics import confusion_matrix
```
We will use this cell to create some helper functions that will be used in the analysis. We decided to have all the functions needed in one script to facilitate readibility and understanding.
```
def splitDataset2D(X, Y, proportions=np.array([.8, .2])):
"""
Splits the dataset into a number of smaller datasets with certain given ratios.
Argument:
X -- Input dataset
Y -- Labels
Returns:
Xs -- Splitted dataset (dictionary with numbers as keys)
Ys -- Splitted labels (dictionary with numbers as keys)
"""
# Make sure proportions are a numpy array
proportions = np.array(proportions)
# Handle an option to provide one ratio
if (np.sum(proportions)!=1):
ratios = np.concatenate((proportions, np.array([1-np.sum(proportions)])))
# Create the random indices
m = X.shape[0]
randomIndecies = np.random.permutation(m)
# Calculate the number of boundaries of indecies in each set
nSamples = np.round(proportions * m)
nSamples = np.concatenate((np.array([0]), np.round(proportions * m)))
nSamples = np.cumsum(nSamples)
nSamples = nSamples.astype(int)
# Extract teh sets
Xs = []
Ys = []
for i in range(0, proportions.size):
chosenSamples = randomIndecies[nSamples[i]:nSamples[i+1]]
Xs.append(X[chosenSamples, :, :, :])
Ys.append(Y[chosenSamples, :])
return Xs, Ys
# ======================================================================
def normalizeData(data, axis=None, keepdims=True):
"""
Computes the zscore of data along a certain axis.
Argument:
data -- Data to be normalized
axis -- Axis to normalized (if no axis is given, we
Returns:
normalizedData -- normalized dataset
Ys -- Splitted labels (dictionary with numbers as keys)
"""
# Zscore normalization
normalizedData = (data - np.mean(data, axis = axis, keepdims= keepdims)) / np.std(data, axis = axis, keepdims = keepdims)
return normalizedData
# ======================================================================
def spectrogramAnalysis(signal, fs, newShape=None, isNormalize=False, isPlot=False):
"""
Runs the spectogram analysis, resize, and normalize in one go.
Argument:
signal -- Data to be analyzed (1D)
fs -- Sampling frequency
newSahpe -- New size to scale histogram
isNormalize -- Normalize the result or not
isPlot -- To show the results in a figure
Returns:
specgram -- Histogram (2D image, 1 channel)
freq -- Frequency vector
time -- Time vector
"""
specgram, freq, time = getSpectrogram(signal, fs, dbscale=True)
if (newShape != None):
specgram = resize(specgram,newShape)
if (isNormalize == True):
specgram = normalizeData(specgram)
if (isPlot == True):
fig = plt.figure(num=None, figsize=(10, 8), dpi=100)
plt.imshow(specgram)
plt.gca().invert_yaxis()
return specgram, freq, time
# ======================================================================
def plotAccuracy(history, isError=True):
trainValues = np.array(history.history["acc"])
validValues = np.array(history.history["val_acc"])
if (isError == True):
trainValues = 1-trainValues
validValues = 1-validValues
trainAxe, = plt.plot(trainValues, label="Training set")
validAxe, = plt.plot(validValues, label="Validation set")
if (isError == True):
plt.ylabel('Error (fraction)')
else:
plt.ylabel('Accuracy (fraction)')
plt.xlabel('Epoch')
plt.legend(handles=[trainAxe, validAxe])
# ======================================================================
def getSpectrogram(data, samp_rate, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, zorder=None, title=None,
show=True, sphinx=False, clip=[0.0, 1.0]):
"""
Computes Spectogram of a 1D signal. This functions are based on Obspy internal functions
"""
# enforce float for samp_rate
samp_rate = float(samp_rate)
# set wlen from samp_rate if not specified otherwise
if not wlen:
wlen = samp_rate / 100.
npts = len(data)
# nfft needs to be an integer, otherwise a deprecation will be raised
# XXX add condition for too many windows => calculation takes for ever
nfft = int(_nearest_pow_2(wlen * samp_rate))
if nfft > npts:
nfft = int(_nearest_pow_2(npts / 8.0))
if mult is not None:
mult = int(_nearest_pow_2(mult))
mult = mult * nfft
nlap = int(nfft * float(per_lap))
data = data - data.mean()
end = npts / samp_rate
specgram, freq, time = mlab.specgram(data, Fs=samp_rate, NFFT=nfft,
pad_to=mult, noverlap=nlap)
# db scale and remove zero/offset for amplitude
if dbscale:
specgram = 10 * np.log10(specgram[1:, :])
else:
specgram = np.sqrt(specgram[1:, :])
freq = freq[1:]
vmin, vmax = clip
if vmin < 0 or vmax > 1 or vmin >= vmax:
msg = "Invalid parameters for clip option."
raise ValueError(msg)
_range = float(specgram.max() - specgram.min())
vmin = specgram.min() + vmin * _range
vmax = specgram.min() + vmax * _range
norm = Normalize(vmin, vmax, clip=True)
return specgram, freq, time
# ======================================================================
def _nearest_pow_2(x):
"""
Finds power of two nearest to x.
Argument:
x -- number
Returns:
a/b -- Nearest power of 2 to x
"""
a = M.pow(2, M.ceil(np.log2(x)))
b = M.pow(2, M.floor(np.log2(x)))
if abs(a - x) < abs(b - x):
return a
else:
return b
```
# Dataset
Now, we will load the dataset. The dataset is already saved in binary numpy format using another script written specifically for this study. The data loaded is the time series data and the labels. Note that we have two datasets.
```
folderName = "Datasets"
datasetNumber = 9
testPercent = .1
fs = 100
spectogramShape = [64, 128]
data = np.load(folderName + "\Data_D" + str(datasetNumber) + ".npy")
label = np.load(folderName + "\Label_D" + str(datasetNumber) + ".npy")
# for recognition dataset (12,13,14)
if (datasetNumber == 12 or datasetNumber == 13 or datasetNumber == 14):
data = data[25000:35000,:,:]
# for prediction dataset (9,10,11)
if (datasetNumber == 9 or datasetNumber == 10 or datasetNumber == 11):
data = data[29500:30000,:,:]
```
We will calculate the spectogram for each 1D signal. The spectogram images are then collected as a dataset that will be used in the analysis.
```
specgram, freq, time = spectrogramAnalysis(data[:,80,0], fs, spectogramShape, isPlot=True)
# Calculate all spectrograms
dataSpectrogram = np.zeros((data.shape[1], spectogramShape[0], spectogramShape[1], data.shape[2]))
for i in range(data.shape[1]):
for k in range(data.shape[2]):
dataToAnalyze = data[:,i,k]
specgram, freq, time = spectrogramAnalysis(dataToAnalyze, fs, spectogramShape)
dataSpectrogram[i,:, :,k] = specgram
# Calculate the training percentage
trainingPercent = 1 - testPercent;
# Split the dataset
Xs, Ys = splitDataset2D(dataSpectrogram, label, proportions=[trainingPercent, testPercent])
# Get training data
xTrain = Xs[0]
yTrain = Ys[0][:,0]
# Get test data
xTest = Xs[1]
yTest = Ys[1][:,0]
```
# Neural network model
The model that will be used for this 1D convolutional network is as discribed in the figure.
```
def constructModel(inputShape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
xInput = Input(shape = inputShape)
X = Conv2D(filters=16, kernel_size=[1,4], strides=1, dilation_rate=1)(xInput)
X = MaxPooling2D(pool_size=[1,4], strides=None, padding='valid')(X)
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Dropout(.2)(X)
X = Conv2D(filters=32, kernel_size=[4,1], strides=1, dilation_rate=2)(X)
X = MaxPooling2D(pool_size=[2,1], strides=None, padding='valid')(X)
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Dropout(.2)(X)
X = Conv2D(filters=32, kernel_size=[1,4], strides=1, dilation_rate=3)(X)
X = MaxPooling2D(pool_size=[1,2], strides=None, padding='valid')(X)
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Conv2D(filters=32, kernel_size=[4,1], strides=1, dilation_rate=4)(X)
X = MaxPooling2D(pool_size=[2,1], strides=None, padding='valid')(X)
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Flatten()(X)
#X = Dense(2, activation = "relu")(X)
X = Dense(1, activation = "sigmoid")(X)
model = Model(inputs = xInput, outputs = X)
return model
model = constructModel(inputShape = (xTrain.shape[1], xTrain.shape[2], xTrain.shape[3]))
model.summary()
```
# Training the model
```
model = constructModel(inputShape = (xTrain.shape[1], xTrain.shape[2], xTrain.shape[3]))
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])
history = model.fit(xTrain, yTrain, batch_size = 256, epochs=5, validation_split=.1)
loss, acc = model.evaluate(xTest, yTest)
print("Test set accuracy = ", acc)
print("Guess Accuracy = ", 1-np.sum(label)/label.size)
yPred = model.predict(xTest) > .5
print(confusion_matrix(yTest, yPred)/yTest.size)
plotAccuracy(history, False)
```
| github_jupyter |
# Kernel Density Estimation
Kernel density estimation is the process of estimating an unknown probability density function using a *kernel function* $K(u)$. While a histogram counts the number of data points in somewhat arbitrary regions, a kernel density estimate is a function defined as the sum of a kernel function on every data point. The kernel function typically exhibits the following properties:
1. Symmetry such that $K(u) = K(-u)$.
2. Normalization such that $\int_{-\infty}^{\infty} K(u) \ du = 1$ .
3. Monotonically decreasing such that $K'(u) < 0$ when $u > 0$.
4. Expected value equal to zero such that $\mathrm{E}[K] = 0$.
For more information about kernel density estimation, see for instance [Wikipedia - Kernel density estimation](https://en.wikipedia.org/wiki/Kernel_density_estimation).
A univariate kernel density estimator is implemented in `sm.nonparametric.KDEUnivariate`.
In this example we will show the following:
* Basic usage, how to fit the estimator.
* The effect of varying the bandwidth of the kernel using the `bw` argument.
* The various kernel functions available using the `kernel` argument.
```
%matplotlib inline
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.distributions.mixture_rvs import mixture_rvs
```
## A univariate example
```
np.random.seed(12345) # Seed the random number generator for reproducible results
```
We create a bimodal distribution: a mixture of two normal distributions with locations at `-1` and `1`.
```
# Location, scale and weight for the two distributions
dist1_loc, dist1_scale, weight1 = -1 , .5, .25
dist2_loc, dist2_scale, weight2 = 1 , .5, .75
# Sample from a mixture of distributions
obs_dist = mixture_rvs(prob=[weight1, weight2], size=250,
dist=[stats.norm, stats.norm],
kwargs = (dict(loc=dist1_loc, scale=dist1_scale),
dict(loc=dist2_loc, scale=dist2_scale)))
```
The simplest non-parametric technique for density estimation is the histogram.
```
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Scatter plot of data samples and histogram
ax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size)),
zorder=15, color='red', marker='x', alpha=0.5, label='Samples')
lines = ax.hist(obs_dist, bins=20, edgecolor='k', label='Histogram')
ax.legend(loc='best')
ax.grid(True, zorder=-5)
```
## Fitting with the default arguments
The histogram above is discontinuous. To compute a continuous probability density function,
we can use kernel density estimation.
We initialize a univariate kernel density estimator using `KDEUnivariate`.
```
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit() # Estimate the densities
```
We present a figure of the fit, as well as the true distribution.
```
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Plot the histrogram
ax.hist(obs_dist, bins=20, density=True, label='Histogram from samples',
zorder=5, edgecolor='k', alpha=0.5)
# Plot the KDE as fitted using the default arguments
ax.plot(kde.support, kde.density, lw=3, label='KDE from samples', zorder=10)
# Plot the true distribution
true_values = (stats.norm.pdf(loc=dist1_loc, scale=dist1_scale, x=kde.support)*weight1
+ stats.norm.pdf(loc=dist2_loc, scale=dist2_scale, x=kde.support)*weight2)
ax.plot(kde.support, true_values, lw=3, label='True distribution', zorder=15)
# Plot the samples
ax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/40,
marker='x', color='red', zorder=20, label='Samples', alpha=0.5)
ax.legend(loc='best')
ax.grid(True, zorder=-5)
```
In the code above, default arguments were used. We can also vary the bandwidth of the kernel, as we will now see.
## Varying the bandwidth using the `bw` argument
The bandwidth of the kernel can be adjusted using the `bw` argument.
In the following example, a bandwidth of `bw=0.2` seems to fit the data well.
```
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Plot the histrogram
ax.hist(obs_dist, bins=25, label='Histogram from samples',
zorder=5, edgecolor='k', density=True, alpha=0.5)
# Plot the KDE for various bandwidths
for bandwidth in [0.1, 0.2, 0.4]:
kde.fit(bw=bandwidth) # Estimate the densities
ax.plot(kde.support, kde.density, '--', lw=2, color='k', zorder=10,
label='KDE from samples, bw = {}'.format(round(bandwidth, 2)))
# Plot the true distribution
ax.plot(kde.support, true_values, lw=3, label='True distribution', zorder=15)
# Plot the samples
ax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/50,
marker='x', color='red', zorder=20, label='Data samples', alpha=0.5)
ax.legend(loc='best')
ax.set_xlim([-3, 3])
ax.grid(True, zorder=-5)
```
## Comparing kernel functions
In the example above, a Gaussian kernel was used. Several other kernels are also available.
```
from statsmodels.nonparametric.kde import kernel_switch
list(kernel_switch.keys())
```
### The available kernel functions
```
# Create a figure
fig = plt.figure(figsize=(12, 5))
# Enumerate every option for the kernel
for i, (ker_name, ker_class) in enumerate(kernel_switch.items()):
# Initialize the kernel object
kernel = ker_class()
# Sample from the domain
domain = kernel.domain or [-3, 3]
x_vals = np.linspace(*domain, num=2**10)
y_vals = kernel(x_vals)
# Create a subplot, set the title
ax = fig.add_subplot(2, 4, i + 1)
ax.set_title('Kernel function "{}"'.format(ker_name))
ax.plot(x_vals, y_vals, lw=3, label='{}'.format(ker_name))
ax.scatter([0], [0], marker='x', color='red')
plt.grid(True, zorder=-5)
ax.set_xlim(domain)
plt.tight_layout()
```
### The available kernel functions on three data points
We now examine how the kernel density estimate will fit to three equally spaced data points.
```
# Create three equidistant points
data = np.linspace(-1, 1, 3)
kde = sm.nonparametric.KDEUnivariate(data)
# Create a figure
fig = plt.figure(figsize=(12, 5))
# Enumerate every option for the kernel
for i, kernel in enumerate(kernel_switch.keys()):
# Create a subplot, set the title
ax = fig.add_subplot(2, 4, i + 1)
ax.set_title('Kernel function "{}"'.format(kernel))
# Fit the model (estimate densities)
kde.fit(kernel=kernel, fft=False, gridsize=2**10)
# Create the plot
ax.plot(kde.support, kde.density, lw=3, label='KDE from samples', zorder=10)
ax.scatter(data, np.zeros_like(data), marker='x', color='red')
plt.grid(True, zorder=-5)
ax.set_xlim([-3, 3])
plt.tight_layout()
```
## A more difficult case
The fit is not always perfect. See the example below for a harder case.
```
obs_dist = mixture_rvs([.25, .75], size=250, dist=[stats.norm, stats.beta],
kwargs = (dict(loc=-1, scale=.5), dict(loc=1, scale=1, args=(1, .5))))
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit()
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.hist(obs_dist, bins=20, density=True, edgecolor='k', zorder=4, alpha=0.5)
ax.plot(kde.support, kde.density, lw=3, zorder=7)
# Plot the samples
ax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/50,
marker='x', color='red', zorder=20, label='Data samples', alpha=0.5)
ax.grid(True, zorder=-5)
```
## The KDE is a distribution
Since the KDE is a distribution, we can access attributes and methods such as:
- `entropy`
- `evaluate`
- `cdf`
- `icdf`
- `sf`
- `cumhazard`
```
obs_dist = mixture_rvs([.25, .75], size=1000, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1, scale=.5), dict(loc=1, scale=.5)))
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit(gridsize=2**10)
kde.entropy
kde.evaluate(-1)
```
### Cumulative distribution, it's inverse, and the survival function
```
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.cdf, lw=3, label='CDF')
ax.plot(np.linspace(0, 1, num = kde.icdf.size), kde.icdf, lw=3, label='Inverse CDF')
ax.plot(kde.support, kde.sf, lw=3, label='Survival function')
ax.legend(loc = 'best')
ax.grid(True, zorder=-5)
```
### The Cumulative Hazard Function
```
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.cumhazard, lw=3, label='Cumulative Hazard Function')
ax.legend(loc = 'best')
ax.grid(True, zorder=-5)
```
| github_jupyter |
# Multivariate Resemblance Analysis (MRA) Dataset F
```
#import libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import os
print('Libraries imported!!')
#define directory of functions and actual directory
HOME_PATH = '' #home path of the project
FUNCTIONS_DIR = 'EVALUATION FUNCTIONS/RESEMBLANCE'
ACTUAL_DIR = os.getcwd()
#change directory to functions directory
os.chdir(HOME_PATH + FUNCTIONS_DIR)
#import functions for univariate resemblance analisys
from multivariate_resemblance import get_numerical_correlations
from multivariate_resemblance import plot_correlations
from multivariate_resemblance import get_categorical_correlations
from multivariate_resemblance import compute_mra_score
#change directory to actual directory
os.chdir(ACTUAL_DIR)
print('Functions imported!!')
```
## 1. Read real and synthetic datasets
In this part real and synthetic datasets are read.
```
#Define global variables
DATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP']
SYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP']
FILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/F_IndianLiverPatient_Real_Train.csv',
'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/F_IndianLiverPatient_Synthetic_GM.csv',
'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/F_IndianLiverPatient_Synthetic_SDV.csv',
'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/F_IndianLiverPatient_Synthetic_CTGAN.csv',
'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/F_IndianLiverPatient_Synthetic_WGANGP.csv'}
categorical_columns = ['gender','class']
data = dict()
#iterate over all datasets filepaths and read each dataset
for name, path in FILEPATHS.items() :
data[name] = pd.read_csv(path)
for col in categorical_columns :
data[name][col] = data[name][col].astype('category')
data
```
## 2. Plot PPC matrixes and calculate matrixes norms
```
#compute correlation matrixes for all datasets
cors_numerical = dict()
norms_numerical = dict()
for name in DATA_TYPES :
cors_numerical[name], norms_numerical[name] = get_numerical_correlations(data[name])
norms_numerical
fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15, 2.5))
axs_idxs = range(6)
idx = dict(zip(DATA_TYPES,axs_idxs))
for name_idx, name in enumerate(DATA_TYPES) :
ax = axs[idx[name]]
matrix = cors_numerical[name]
if name_idx != len(DATA_TYPES) - 1:
plot_correlations(matrix, ax, color_bar=False)
else:
plot_correlations(matrix, ax, color_bar=True)
if name_idx > 0:
ax.set_yticks([])
if name == 'Real' :
ax.set_title(name)
else :
score = compute_mra_score(cors_numerical['Real'], matrix)
ax.set_title(name + ' (' + str(score) + ')')
fig.savefig('MULTIVARIATE RESEMBLANCE RESULTS/PPC_Matrices.svg', bbox_inches='tight')
```
## 3. Plot correlations for categorical variables and calculate matrixes norms
```
#compute correlation matrixes for all datasets
cors_categorical = dict()
norms_categorical = dict()
for name in DATA_TYPES :
cors_categorical[name], norms_categorical[name] = get_categorical_correlations(data[name])
norms_categorical
fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15, 2.5))
axs_idxs = range(6)
idx = dict(zip(DATA_TYPES,axs_idxs))
first = True
for name_idx, name in enumerate(DATA_TYPES) :
ax = axs[idx[name]]
matrix = cors_categorical[name]
if name_idx != len(DATA_TYPES) - 1:
plot_correlations(matrix, ax, color_bar=False)
else:
plot_correlations(matrix, ax, color_bar=True)
if name_idx > 0:
ax.set_yticks([])
if name == 'Real' :
ax.set_title(name)
else :
score = compute_mra_score(cors_numerical['Real'], matrix)
ax.set_title(name + ' (' + str(score) + ')')
fig.savefig('MULTIVARIATE RESEMBLANCE RESULTS/Categorical_Matrices.svg', bbox_inches='tight')
```
## 4. Explore the results
```
norms_numerical
norms_categorical
norms_data = [np.asarray(list(norms_numerical.values())), np.asarray(list(norms_categorical.values()))]
df_norms = pd.DataFrame(data=norms_data, columns=DATA_TYPES, index=['PPC_MATRIX_NORMS','CATEGORICAL_CORS_MATRIX_NORMS'])
df_norms.to_csv('MULTIVARIATE RESEMBLANCE RESULTS/Correlation_Matrix_Norms.csv')
df_norms
```
| github_jupyter |
```
import os
import sys
from typing import Union, Any, Optional, Callable, Tuple
ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd()))
if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR)
import numpy as np
import pandas as pd
import proplot as plot
import eagerpy as ep
import torch
from scipy.stats import pearsonr as linear_correlation
from torch import nn, optim
from torch.nn import functional as F
from DeepSparseCoding.tf1x.utils.logger import Logger as tfLogger
import DeepSparseCoding.tf1x.analysis.analysis_picker as ap
from DeepSparseCoding.tf1x.data.dataset import Dataset
import DeepSparseCoding.tf1x.utils.data_processing as tfdp
from DeepSparseCoding.utils.file_utils import Logger
import DeepSparseCoding.utils.dataset_utils as dataset_utils
import DeepSparseCoding.utils.loaders as loaders
import DeepSparseCoding.utils.plot_functions as pf
import foolbox
from foolbox import PyTorchModel
from foolbox.attacks.projected_gradient_descent import LinfProjectedGradientDescentAttack
from foolbox.types import Bounds
from foolbox.models.base import Model
from foolbox.attacks.base import T
from foolbox.criteria import Misclassification
from foolbox.attacks.base import raise_if_kwargs
from foolbox.attacks.base import get_criterion
rand_state = np.random.RandomState(123)
```
### Load PyTorch Foolbox models & data
```
class ModelWithTemperature(nn.Module):
"""
A thin decorator, which wraps a model with temperature scaling
model (nn.Module):
A classification neural network
NB: Output of the neural network should be the classification logits,
NOT the softmax (or log softmax)!
"""
def __init__(self, model, init_temp):
super(ModelWithTemperature, self).__init__()
self.model = model
self.params = model.params
self.temperature = nn.Parameter(torch.ones(1) * init_temp)
def forward(self, input_tensor):
logits = self.model.forward(input_tensor)
return self.temperature_scale(logits)
def temperature_scale(self, logits):
"""
Perform temperature scaling on logits
"""
# Expand temperature to match the size of logits
temperature = self.temperature.unsqueeze(1).expand(logits.size(0), logits.size(1))
return logits / temperature
def bin_conf_acc_prop(softmaxes, labels, bin_boundaries):
bin_lowers = bin_boundaries[:-1]
bin_uppers = bin_boundaries[1:]
confidences, predictions = softmaxes.max(axis=1)
#accuracies = predictions == labels
accuracies = predictions.eq(labels)
bin_confidence = []
bin_accuracy = []
bin_prop = []
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
bin_prop.append(in_bin.float().mean())
if bin_prop[-1].item() > 0:
bin_accuracy.append(accuracies[in_bin].float().mean())
bin_confidence.append(confidences[in_bin].mean())
return bin_confidence, bin_accuracy, bin_prop
class _ECELoss(nn.Module):
"""
Calculates the Expected Calibration Error of a model.
(This isn't necessary for temperature scaling, just a cool metric).
The input to this loss is the logits of a model, NOT the softmax scores.
This divides the confidence outputs into equally-sized interval bins.
In each bin, we compute the confidence gap:
bin_gap = | avg_confidence_in_bin - accuracy_in_bin |
We then return a weighted average of the gaps, based on the number
of samples in each bin
See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht.
"Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI.
2015.
"""
def __init__(self, n_bins=15):
"""
n_bins (int): number of confidence interval bins
"""
super(_ECELoss, self).__init__()
self.bin_boundaries = torch.linspace(0, 1, n_bins + 1)
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ece = torch.zeros(1, device=logits.device)
bin_stats = bin_conf_acc_prop(softmaxes, labels, self.bin_boundaries)
for avg_confidence_in_bin, accuracy_in_bin, prop_in_bin in zip(*bin_stats):
if prop_in_bin.item() > 0:
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
def set_temperature(model, valid_loader, device):
"""
Tune the tempearature of the model (using the validation set).
We're going to set it to optimize NLL.
valid_loader (DataLoader): validation set loader
"""
nll_criterion = nn.CrossEntropyLoss().to(device)
ece_criterion = _ECELoss().to(device)
# First: collect all the logits and labels for the validation set
logits_list = []
labels_list = []
with torch.no_grad():
for input_tensor, label_tensor in valid_loader:
input_tensor = input_tensor.reshape((input_tensor.shape[0], 784))
input_tensor = input_tensor.to(device)
logits = model(input_tensor)
logits_list.append(logits)
labels_list.append(label_tensor)
logits = torch.cat(logits_list).to(device)
labels = torch.cat(labels_list).to(device)
# Calculate NLL and ECE before temperature scaling
before_temperature_nll = nll_criterion(logits, labels).item()
before_temperature_ece = ece_criterion(logits, labels).item()
print('Before temperature - NLL: %.3f, ECE: %.3f' % (before_temperature_nll, before_temperature_ece))
# Next: optimize the temperature w.r.t. NLL
optimizer = optim.LBFGS([model.temperature], lr=0.01, max_iter=50)
def eval():
loss = nll_criterion(model.temperature_scale(logits), labels)
loss.backward()
return loss
optimizer.step(eval)
# Calculate NLL and ECE after temperature scaling
after_temperature_nll = nll_criterion(model.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(model.temperature_scale(logits), labels).item()
print('Optimal temperature: %.3f' % model.temperature.item())
print('After temperature - NLL: %.3f, ECE: %.3f' % (after_temperature_nll, after_temperature_ece))
return model
def create_mnist_dsc(log_file, cp_file, calibrate=False, init_temp=1.0):
logger = Logger(log_file, overwrite=False)
log_text = logger.load_file()
params = logger.read_params(log_text)[-1]
params.cp_latest_filename = cp_file
params.standardize_data = False
params.rescale_data_to_one = True
params.shuffle_data = False
params.batch_size = 50
train_loader, val_loader, test_loader, data_params = dataset_utils.load_dataset(params)
for key, value in data_params.items():
setattr(params, key, value)
model = loaders.load_model(params.model_type)
model.setup(params, logger)
model.params.analysis_out_dir = os.path.join(
*[model.params.model_out_dir, 'analysis', model.params.version])
model.params.analysis_save_dir = os.path.join(model.params.analysis_out_dir, 'savefiles')
if not os.path.exists(model.params.analysis_save_dir):
os.makedirs(model.params.analysis_save_dir)
model.load_checkpoint()
if calibrate:
model = ModelWithTemperature(model, init_temp)
model.to(params.device)
#model = set_temperature(model, test_loader, params.device)
else:
model.to(params.device)
fmodel = PyTorchModel(model.eval(), bounds=(0, 1))
fmodel.params = params
return fmodel, model, test_loader, model.params.batch_size, model.params.device
run_full_test_set = True
calibrate = True
fb_mlp_temp = 1.69
fb_lca_temp = 1.50
log_files = [
os.path.join(*[ROOT_DIR, 'Torch_projects', 'mlp_768_mnist', 'logfiles', 'mlp_768_mnist_v0.log']),
os.path.join(*[ROOT_DIR, 'Torch_projects', 'lca_768_mlp_mnist', 'logfiles', 'lca_768_mlp_mnist_v0.log'])
]
cp_latest_filenames = [
os.path.join(*[ROOT_DIR,'Torch_projects', 'mlp_768_mnist', 'checkpoints', 'mlp_768_mnist_latest_checkpoint_v0.pt']),
os.path.join(*[ROOT_DIR, 'Torch_projects', 'lca_768_mlp_mnist', 'checkpoints', 'lca_768_mlp_mnist_latest_checkpoint_v0.pt'])
]
fmodel_mlp, dsc_model_mlp, test_loader, batch_size, device = create_mnist_dsc(log_files[0], cp_latest_filenames[0], calibrate=calibrate, init_temp=fb_mlp_temp)
if calibrate:
fmodel_mlp.model_type = 'MLP_calibrated'
else:
fmodel_mlp.model_type = 'MLP'
print(fmodel_mlp.model_type)
fmodel_lca, dsc_model_lca = create_mnist_dsc(log_files[1], cp_latest_filenames[1], calibrate=calibrate, init_temp=fb_lca_temp)[:2]
if calibrate:
fmodel_lca.model_type = 'LCA_calibrated'
else:
fmodel_lca.model_type = 'LCA'
print(fmodel_lca.model_type)
fmodels = [fmodel_mlp, fmodel_lca]
fb_image_batch, fb_label_batch = next(iter(test_loader))
fb_image_batch = fb_image_batch.reshape((batch_size, 784))
```
### Load DeepSparseCoding analyzer & data
```
class params(object):
def __init__(self):
self.device = "/gpu:0"
self.analysis_dataset = "test"
self.save_info = "analysis_temp_" + self.analysis_dataset
self.overwrite_analysis_log = False
self.do_class_adversaries = True
self.do_run_analysis = False
self.do_evals = False
self.do_basis_analysis = False
self.do_inference = False
self.do_atas = False
self.do_recon_adversaries = False
self.do_neuron_visualization = False
self.do_full_recon = False
self.do_orientation_analysis = False
self.do_group_recons = False
#Adversarial params
self.adversarial_attack_method = "kurakin_targeted"
self.adversarial_step_size = 0.005 # learning rate for optimizer
self.adversarial_num_steps = 500 # Number of iterations adversarial attacks
self.confidence_threshold = 0.9
self.adversarial_max_change = 1.0 # maximum size of adversarial perturation (epsilon)
self.carlini_change_variable = False # whether to use the change of variable trick from carlini et al
self.adv_optimizer = "sgd" # attack optimizer
self.adversarial_target_method = "random" # Not used if attack_method is untargeted#TODO support specified
self.adversarial_clip = True # whether or not to clip the final perturbed image
self.adversarial_clip_range = [0.0, 1.0] # Maximum range of image values
#self.carlini_recon_mult = 0.1#list(np.arange(.5, 1, .1))
self.adversarial_save_int = 1 # Interval at which to save adv examples to the npz file
self.eval_batch_size = 50 # batch size for computing adv examples
self.adversarial_input_id = None # Which adv images to use; None to use all
self.adversarial_target_labels = None # Parameter for "specified" target_method. Only for class attacks. Needs to be a list or numpy array of size [adv_batch_size]
analysis_params = params()
analysis_params.projects_dir = os.path.expanduser("~")+"/Work/Projects/"
#model_names = ['mlp_cosyne_mnist', 'slp_lca_768_latent_75_steps_mnist']
#model_names = ['mlp_768_mnist', 'slp_lca_768_latent_mnist']
model_names = ['mlp_1568_mnist', 'slp_lca_1568_latent_mnist']
model_types = ['MLP', 'LCA']
analyzers = []
for model_type, model_name in zip(model_types, model_names):
analysis_params.model_name = model_name
analysis_params.version = '0.0'
analysis_params.model_dir = analysis_params.projects_dir+analysis_params.model_name
model_log_file = (analysis_params.model_dir+"/logfiles/"+analysis_params.model_name
+"_v"+analysis_params.version+".log")
model_logger = tfLogger(model_log_file, overwrite=False)
model_log_text = model_logger.load_file()
model_params = model_logger.read_params(model_log_text)[-1]
analysis_params.model_type = model_params.model_type
analyzer = ap.get_analyzer(analysis_params.model_type)
analysis_params.save_info = "analysis_tmp_test_" + analysis_params.analysis_dataset
analysis_params.save_info += (
"_linf_"+str(analysis_params.adversarial_max_change)
+"_ss_"+str(analysis_params.adversarial_step_size)
+"_ns_"+str(analysis_params.adversarial_num_steps)
+"_pgd_targeted"
)
analyzer.setup(analysis_params)
analyzer.model_type = model_type
analyzer.confidence_threshold = analysis_params.confidence_threshold
analyzers.append(analyzer)
mnist_data = test_loader.dataset.data.numpy().astype(np.float32)
mnist_data /= 255
dsc_data = {
'test':Dataset(
np.expand_dims(mnist_data, axis=-1),
tfdp.dense_to_one_hot(test_loader.dataset.targets.numpy(), 10),
None,
rand_state
)
}
dsc_data = analyzers[0].model.reshape_dataset(dsc_data, analyzer.model_params)
for analyzer in analyzers:
analyzer.model_params.data_shape = list(dsc_data["test"].shape[1:])
analyzer.setup_model(analyzer.model_params)
dsc_image_batch, dsc_label_batch, _ = dsc_data['test'].next_batch(batch_size, shuffle_data=False)
dsc_data['test'].reset_counters()
dsc_all_images = dsc_data['test'].images
dsc_all_images = dsc_all_images.reshape((dsc_all_images.shape[0], 784))
dsc_all_labels = dsc_data['test'].labels
```
### Compare DeepSparseCoding & Foolbox data
```
img_idx = np.random.randint(batch_size)
fig, axs = plot.subplots(ncols=3)
im = axs[0].imshow(fb_image_batch.numpy()[img_idx,...].reshape(28, 28), cmap='greys_r')
axs[0].format(title=f'PyTorch loader digit class {fb_label_batch[img_idx]}')
axs[0].colorbar(im)
im = axs[1].imshow(dsc_image_batch[img_idx,...].reshape(28, 28), cmap='greys_r')
axs[1].format(title=f'DSC dataset digit class {tfdp.one_hot_to_dense(dsc_label_batch)[img_idx]}')
axs[1].colorbar(im)
diff_img = np.abs(dsc_image_batch[img_idx,...].reshape(28, 28) - fb_image_batch.numpy()[img_idx,...].reshape(28, 28))
im = axs[2].imshow(diff_img, cmap='greys_r')
axs[2].format(title=f'Difference image')
axs[2].colorbar(im)
pf.clear_axes(axs)
plot.show()
```
### Compare DeepSparseCoding & Foolbox confidence and accuracy
```
for fmodel in fmodels:
fmodel.softmaxes = []
fmodel.logits = []
for input_tensor, label_tensor in test_loader:
input_tensor = input_tensor.reshape((input_tensor.shape[0], 784))
input_tensor = input_tensor.to(fmodel.params.device)
label_tensor = label_tensor.to(fmodel.params.device)
input_tensor, label_tensor = ep.astensors(input_tensor, label_tensor)
fmodel.logits.append(fmodel(input_tensor))
fmodel.softmaxes.append(torch.nn.functional.softmax(fmodel.logits[-1], dim=-1))
fmodel.softmaxes = ep.stack(fmodel.softmaxes, axis=0)
fmodel.num_batches, fmodel.batch_size, fmodel.num_classes = fmodel.softmaxes.shape
fmodel.softmaxes = fmodel.softmaxes.reshape((fmodel.num_batches*fmodel.batch_size, fmodel.num_classes)).numpy()
fmodel.logits = ep.stack(fmodel.logits, axis=0)
fmodel.logits = fmodel.logits.reshape((fmodel.num_batches*fmodel.batch_size, fmodel.num_classes)).numpy()
for analyzer in analyzers:
print(analyzer.analysis_params.model_name)
analyzer.logits = np.squeeze(analyzer.compute_activations(dsc_all_images, batch_size=50, activation_operation=analyzer.model.get_logits))
dsc_data['test'].reset_counters()
print('bleh')
analyzer.softmaxes = np.squeeze(analyzer.compute_activations(dsc_all_images, batch_size=50, activation_operation=analyzer.model.get_label_est))
dsc_data['test'].reset_counters()
n_bins = 50
bin_boundaries = np.linspace(0, 1, n_bins + 1)
fig, axs = plot.subplots(ncols=4)
for ax, model, codebase in zip(axs, fmodels+analyzers, ['FB', 'FB', 'DSC', 'DSC']):
#fig, axs = plot.subplots(ncols=2)
#for ax, model, codebase in zip(axs, analyzers, ['DSC', 'DSC']):
#fig, axs = plot.subplots(ncols=2)
#for ax, model, codebase in zip(axs, fmodels, ['FB', 'FB']):
confidence, accuracy, props = bin_conf_acc_prop(
torch.from_numpy(model.softmaxes).to(device),
test_loader.dataset.targets.to(device),
bin_boundaries
)
confidence = [conf.cpu().numpy() for conf in confidence]
accuracy = [acc.cpu().numpy() for acc in accuracy]
props = [prop.cpu().numpy() for prop in props]
ece = 0
for avg_confidence_in_bin, accuracy_in_bin, prop_in_bin in zip(confidence, accuracy, props):
ece += np.abs(avg_confidence_in_bin.item() - accuracy_in_bin.item()) * prop_in_bin.item()
ece *= 100
ax.scatter(confidence, accuracy, s=[prop*500 for prop in props if prop > 0], color='k')
ax.plot([0,1], [0,1], 'k--', linewidth=0.1)
ax.format(title=f'{codebase}_{model.model_type}\nECE = {ece.round(3)}%')
axs.format(
suptitle='Reliability of classifier confidence on test set',
xlabel='Confidence',
ylabel='Accuracy',
xlim=[0, 1],
ylim=[0, 1]
)
plot.show()
fb_logit_forward = [fmodel.logits for fmodel in fmodels]
fb_logit_forward = np.stack(fb_logit_forward, axis=0)
fb_softmax_forward = [fmodel.softmaxes for fmodel in fmodels]
fb_softmax_forward = np.stack(fb_softmax_forward, axis=0)
dsc_logit_forward = [analyzer.logits for analyzer in analyzers]
dsc_logit_forward = np.stack(dsc_logit_forward, axis=0)
dsc_softmax_forward = [analyzer.softmaxes for analyzer in analyzers]
dsc_softmax_forward = np.stack(dsc_softmax_forward, axis=0)
all_softmax_results = np.concatenate((fb_softmax_forward.reshape(2, -1), dsc_softmax_forward.reshape(2, -1)), axis=0).T
fig, axs = plot.subplots(ncols=2, nrows=2)
axs[0,0].bar(np.arange(10), np.squeeze(fb_logit_forward[0, img_idx, :]))
axs[0,0].format(title=f'FB_{fmodels[0].model_type}')
axs[0,1].bar(np.arange(10), np.squeeze(fb_logit_forward[1, img_idx, :]))
axs[0,1].format(title=f'FB_{fmodels[1].model_type}')
axs[1,0].bar(np.arange(10), np.squeeze(dsc_logit_forward[0, img_idx, :]))
axs[1,0].format(title=f'DSC_{analyzers[0].model_type}')
axs[1,1].bar(np.arange(10), np.squeeze(dsc_logit_forward[1, img_idx, :]))
axs[1,1].format(title=f'DSC_{analyzers[1].model_type}')
axs.format(suptitle='Logit outputs for a single image', xtickminor=False, xticks=1)
plot.show()
fig, axs = plot.subplots(ncols=2, nrows=2)
axs[0,0].bar(np.squeeze(fb_softmax_forward[0, img_idx, :]))
axs[0,0].format(title=f'FB_{fmodels[0].model_type}')
axs[0,1].bar(np.squeeze(fb_softmax_forward[1, img_idx, :]))
axs[0,1].format(title=f'FB_{fmodels[1].model_type}')
axs[1,0].bar(np.squeeze(dsc_softmax_forward[0, img_idx, :]))
axs[1,0].format(title=f'DSC_{analyzers[0].model_type}')
axs[1,1].bar(np.squeeze(dsc_softmax_forward[1, img_idx, :]))
axs[1,1].format(title=f'DSC_{analyzers[1].model_type}')
axs.format(suptitle='Softmax confidence for a single image', xtickminor=False, xticks=1, ylim=[0, 1])
plot.show()
names = ['FB_MLP', 'FB_LCA', 'DSC_MLP', 'DSC_LCA']
data = pd.DataFrame(
all_softmax_results,
columns=pd.Index(names, name='Model')
)
fig, ax = plot.subplots(ncols=1, axwidth=2.5, share=0)
ax.format(
grid=False,
suptitle='Softmax confidence for the test set'
)
obj1 = ax.boxplot(
data, linewidth=0.7, marker='.', fillcolor='gray5',
medianlw=1, mediancolor='k', meancolor='k', meanlw=1
)
ax.format(yscale='log', yformatter='sci')
num_bins = 100
fig, axs = plot.subplots(ncols=2, nrows=2)
for ax, model, atk_type in zip(axs, fmodels+analyzers, ['FB', 'FB', 'DSC', 'DSC']):
max_confidence = np.max(model.softmaxes, axis=1) # max is across categories, per image
conf_lim = [0, 1]
bins = np.linspace(conf_lim[0], conf_lim[1], num_bins)
count, bin_edges = np.histogram(max_confidence, bins)
bin_left, bin_right = bin_edges[:-1], bin_edges[1:]
bin_centers = bin_left + (bin_right - bin_left)/2
ax.bar(bin_centers, count, color='k')
mean_confidence = np.mean(max_confidence)
mean_idx = np.abs(bin_edges - mean_confidence).argmin()
mean_conf_bin = bin_edges[mean_idx].round(4)
ax.axvline(mean_conf_bin, lw=1, ls='--', color='r')
ax.format(
title=f'{atk_type}_{model.model_type}\nMean confidence = {mean_conf_bin}',
yscale='log',
xlim=conf_lim
)
axs.format(
suptitle='Softmax confidence on the clean test set correct label',
ylabel='Count',
xlabel='Confidence'
)
```
### Run DeepSparseCoding adversarial attack
```
def get_adv_indices(softmax_conf, all_kept_indices, confidence_threshold, num_images, labels):
softmax_conf[np.arange(num_images, dtype=np.int32), labels] = 0 # zero confidence at true label
confidence_indices = np.max(softmax_conf, axis=-1) # highest non-true label confidence
adversarial_labels = np.argmax(softmax_conf, axis=-1) # index of highest non-true label
all_above_thresh = np.nonzero(np.squeeze(confidence_indices>confidence_threshold))[0]
keep_indices = np.array([], dtype=np.int32)
for adv_index in all_above_thresh:
if adv_index not in set(all_kept_indices):
keep_indices = np.append(keep_indices, adv_index)
return keep_indices, confidence_indices, adversarial_labels
#if run_full_test_set:
# data = dsc_all_images
# labels = dsc_all_labels
#else:
data = dsc_image_batch
labels = dsc_label_batch
for analyzer in analyzers:
analyzer.class_adversary_analysis(
data,
labels,
batch_size=analyzer.analysis_params.eval_batch_size,
input_id=analyzer.analysis_params.adversarial_input_id,
target_method = analyzer.analysis_params.adversarial_target_method,
target_labels = analyzer.analysis_params.adversarial_target_labels,
save_info=analyzer.analysis_params.save_info)
labels = tfdp.one_hot_to_dense(labels.astype(np.int32))
for analyzer in analyzers:
store_data = np.zeros_like(data)
store_time_step = -1*np.ones(data.shape[0], dtype=np.int32)
store_labels = np.zeros(data.shape[0], dtype=np.int32)
store_confidence = np.zeros(data.shape[0], dtype=np.float32)
store_mses = np.zeros(data.shape[0], dtype=np.float32)
all_kept_indices = []
for adv_step in range(1, analyzer.analysis_params.adversarial_num_steps+1): # first one is original
keep_indices, confidence_indices, adversarial_labels = get_adv_indices(
analyzer.adversarial_outputs[0, adv_step, ...],
all_kept_indices,
analyzer.confidence_threshold,
data.shape[0],
labels)
if keep_indices.size > 0:
all_kept_indices.extend(keep_indices)
store_data[keep_indices, ...] = analyzer.adversarial_images[0, adv_step, keep_indices, ...]
store_time_step[keep_indices] = adv_step
store_confidence[keep_indices] = confidence_indices[keep_indices]
store_mses[keep_indices] = analyzer.adversarial_input_adv_mses[0, adv_step, keep_indices]
store_labels[keep_indices] = adversarial_labels[keep_indices]
batch_indices = np.arange(data.shape[0], dtype=np.int32)[:,None]
failed_indices = np.array([val for val in batch_indices if val not in all_kept_indices])
if len(failed_indices) > 0:
store_confidence[failed_indices] = confidence_indices[failed_indices]
store_labels[failed_indices] = adversarial_labels[failed_indices]
store_data[failed_indices, ...] = data[failed_indices, ...]
store_mses[failed_indices] = analyzer.adversarial_input_adv_mses[0, -1, failed_indices]
analyzer.adversarial_images = [store_data]
analyzer.adversarial_time_step = [store_time_step]
analyzer.adversarial_confidence = [store_confidence]
analyzer.failed_indices = [failed_indices]
analyzer.success_indices = [list(set(all_kept_indices))]
analyzer.adversarial_labels = [store_labels]
analyzer.mean_squared_distances = [store_mses]
analyzer.num_failed = [data.shape[0] - len(set(all_kept_indices))]
print(f'model {analyzer.model_type} had {analyzer.num_failed} failed indices')
```
### Run Foolbox adversarial attack
```
class LinfProjectedGradientDescentAttackWithStopping(LinfProjectedGradientDescentAttack):
def __init__(
self,
*,
rel_stepsize: float = 0.025,
abs_stepsize: Optional[float] = None,
steps: int = 50,
random_start: bool = True,
):
super().__init__(
rel_stepsize=rel_stepsize,
abs_stepsize=abs_stepsize,
steps=steps,
random_start=random_start,
)
#def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
# return x0 + ep.clip(x - x0, -epsilon, epsilon)
def normalize(
self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
) -> ep.Tensor:
return gradients.sign()
def run(
self,
model: Model,
inputs: T,
criterion: Union[Misclassification, T],
*,
epsilon: float,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x0, restore_type = ep.astensor_(inputs)
criterion_ = get_criterion(criterion)
del inputs, criterion, kwargs
if not isinstance(criterion_, Misclassification):
raise ValueError("unsupported criterion")
labels = criterion_.labels
loss_fn = self.get_loss_fn(model, labels)
if self.abs_stepsize is None:
stepsize = self.rel_stepsize * epsilon
else:
stepsize = self.abs_stepsize
orig_x = x0.numpy().copy()
x = x0
if self.random_start:
x = self.get_random_start(x0, epsilon)
x = ep.clip(x, *model.bounds)
else:
x = x0
store_x = np.zeros_like(x, dtype=np.float32)
store_time_step = -1*np.ones(x.shape[0], dtype=np.int32)
store_labels = np.zeros(x.shape[0], dtype=np.int32)
store_confidence = np.zeros(x.shape[0], dtype=np.float32)
all_kept_indices = []
time_step = 0
num_failed = 0
while len(set(all_kept_indices)) < x.shape[0]:
loss, gradients = self.value_and_grad(loss_fn, x)
gradients = self.normalize(gradients=gradients, x=x, bounds=model.bounds)
x = x + stepsize * gradients
x = self.project(x, x0, epsilon)
x = ep.clip(x, *model.bounds)
keep_indices, confidence_indices, adversarial_labels = get_adv_indices(
ep.softmax(model(x)).numpy().copy(),
all_kept_indices,
model.confidence_threshold,
x.shape[0],
labels.numpy())
if keep_indices.size > 0:
all_kept_indices.extend(keep_indices)
store_x[keep_indices, ...] = x.numpy()[keep_indices, ...]
store_labels[keep_indices] = adversarial_labels[keep_indices]
store_time_step[keep_indices] = time_step
store_confidence[keep_indices] = confidence_indices[keep_indices]
time_step += 1
if time_step == self.steps-1:
num_failed = x.shape[0] - len(set(all_kept_indices))
print(f'Max steps = {self.steps} reached for model {model.model_type}, {num_failed} images did not achieve adversarial confidence threshold of {model.confidence_threshold}')
break
batch_indices = np.arange(x.shape[0], dtype=np.int32)[:,None]
failed_indices = np.array([val for val in batch_indices if val not in all_kept_indices])
if len(failed_indices) > 0:
store_confidence[failed_indices] = confidence_indices[failed_indices]
store_x[failed_indices, ...] = x[failed_indices, ...]
reduc_dim = tuple(range(1, len(orig_x.shape)))
msd = np.mean((store_x - orig_x)**2, axis=reduc_dim)
model.adversarial_images.append(store_x)
model.adversarial_time_step.append(store_time_step)
model.adversarial_labels.append(store_labels)
model.adversarial_confidence.append(store_confidence)
model.success_indices.append(np.array(all_kept_indices, dtype=np.int32))
model.failed_indices.append(failed_indices)
model.mean_squared_distances.append(msd)
model.num_failed.append(len(failed_indices))
return restore_type(x)
attack_params = {
'LinfPGD': {
'random_start':False,
'abs_stepsize':analysis_params.adversarial_step_size,
'steps':analysis_params.adversarial_num_steps # maximum number of steps
}
}
epsilons = [analysis_params.adversarial_max_change]
attack = LinfProjectedGradientDescentAttackWithStopping(**attack_params['LinfPGD'])
for fmodel in fmodels:
fmodel.confidence_threshold = analysis_params.confidence_threshold
fmodel.adversarial_images = []
fmodel.adversarial_labels = []
fmodel.adversarial_time_step = []
fmodel.adversarial_confidence = []
fmodel.failed_indices = []
fmodel.mean_squared_distances = []
fmodel.num_failed = []
fmodel.success_indices = []
fmodel.success = []
for batch_idx, (input_tensor, label_tensor) in enumerate(test_loader):
if not run_full_test_set and batch_idx >= 1:
pass
input_tensor = input_tensor.reshape((input_tensor.shape[0], 784))
input_tensor = input_tensor.to(fmodel.params.device)
label_tensor = label_tensor.to(fmodel.params.device)
input_tensor, label_tensor = ep.astensors(input_tensor, label_tensor)
advs, _, success = attack(
fmodel,
input_tensor,
label_tensor,
epsilons=epsilons
)
fmodel.success.append(success.numpy())
#fmodel.num_batches, fmodel.batch_size, fmodel.num_classes = fmodel.softmaxes.shape
fmodel.num_batches = batch_idx+1
fmodel.adversarial_images = np.stack(fmodel.adversarial_images, axis=0)
fmodel.adversarial_images = fmodel.adversarial_images.reshape(
(fmodel.num_batches*fmodel.batch_size,
*list(fmodel.adversarial_images.shape[2:]))
)
fmodel.adversarial_labels = np.stack(fmodel.adversarial_labels, axis=0)
fmodel.adversarial_labels = fmodel.adversarial_labels.reshape(
(fmodel.num_batches*fmodel.batch_size,
*list(fmodel.adversarial_labels.shape[2:]))
)
fmodel.adversarial_time_step = np.stack(fmodel.adversarial_time_step, axis=0)
fmodel.adversarial_time_step = fmodel.adversarial_time_step.reshape(
(fmodel.num_batches*fmodel.batch_size,
*list(fmodel.adversarial_time_step.shape[2:]))
)
fmodel.adversarial_confidence = np.stack(fmodel.adversarial_confidence, axis=0)
fmodel.adversarial_confidence = fmodel.adversarial_confidence.reshape(
(fmodel.num_batches*fmodel.batch_size,
*list(fmodel.adversarial_confidence.shape[2:]))
)
fmodel.failed_indices = np.stack(fmodel.failed_indices, axis=0)
fmodel.mean_squared_distances = np.stack(fmodel.mean_squared_distances, axis=0)
fmodel.mean_squared_distances = fmodel.mean_squared_distances.reshape(
(fmodel.num_batches*fmodel.batch_size,
*list(fmodel.mean_squared_distances.shape[2:]))
)
fmodel.num_failed = np.stack(fmodel.num_failed, axis=0)
fmodel.success_indices = np.stack(fmodel.success_indices, axis=0)
fmodel.success_indices = fmodel.success_indices.reshape(
(fmodel.num_batches*fmodel.batch_size,
*list(fmodel.success_indices.shape[2:]))
)
fmodel.success = np.stack(fmodel.success, axis=0)
print(f'model {fmodel.model_type} had {fmodel.num_failed.sum()} failed indices')
```
### Compare DeepSparseCoding & Foolbox adversarial attacks
```
for analyzer in analyzers:
analyzer.accuracy = analyzer.adversarial_clean_accuracy.item()
print(f'DSC {analyzer.model_type} clean accuracy = {analyzer.accuracy} and adv accuracy = {analyzer.adversarial_adv_accuracy}')
for fmodel in fmodels:
fmodel.accuracy = foolbox.accuracy(fmodel, fb_image_batch.to(device), fb_label_batch.to(device))
print(f'FB {fmodel.model_type} clean accuracy = {fmodel.accuracy} and adv accuracy = {1.0 - fmodel.success[0].mean(axis=-1).round(2)}')
def stars(p):
if p < 0.0001:
return '****'
elif (p < 0.001):
return '***'
elif (p < 0.01):
return '**'
elif (p < 0.05):
return '*'
else:
return 'n.s.'
names = ['MLP 2L;768N','LCA 2L;768N']
fb_all_success_indices = np.intersect1d(*[fmodel.success_indices for fmodel in fmodels]).astype(np.int32)
fb_adv_results_list = [np.array(fmodel.mean_squared_distances)[fb_all_success_indices] for fmodel in fmodels]
fb_all_results = np.stack(fb_adv_results_list, axis=-1).squeeze()
fb_dataframe = pd.DataFrame(
fb_all_results,
columns=pd.Index(names, name='Model')
)
dsc_all_success_indices = np.intersect1d(*[analyzer.success_indices for analyzer in analyzers]).astype(np.int32)
dsc_adv_results_list = [analyzer.mean_squared_distances[0][dsc_all_success_indices] for analyzer in analyzers]
dsc_all_results = np.stack(dsc_adv_results_list, axis=-1).squeeze()
dsc_dataframe = pd.DataFrame(
dsc_all_results,
columns=pd.Index(names, name='Model')
)
dsc_p_value = linear_correlation(dsc_all_results[:,0], dsc_all_results[:,1])[1]
fb_p_value = linear_correlation(fb_all_results[:,0], fb_all_results[:,1])[1]
fig, axs = plot.subplots(ncols=2, axwidth=2.5, share=0)
axs.format(grid=False, suptitle='L infinity Attack Mean Squared Distances')
ax = axs[0]
obj1 = ax.boxplot(
fb_dataframe, linewidth=0.7, marker='.', fillcolor='gray5',
medianlw=1, mediancolor='k', meancolor='k', meanlw=1
)
ax_y_max = max(ax.get_ylim())
ax.text(0.5, ax_y_max-0.1*(ax_y_max), stars(fb_p_value),
horizontalalignment='center',
verticalalignment='center',
fontsize=14)
ax.format(title='Foolbox')
ax = axs[1]
obj2 = ax.boxplot(
dsc_dataframe, linewidth=0.7, marker='.', fillcolor='gray5',
medianlw=1, mediancolor='k', meancolor='k', meanlw=1
)
ax_y_max = max(ax.get_ylim())
ax.text(0.5, ax_y_max-0.1*(ax_y_max), stars(dsc_p_value),
horizontalalignment='center',
verticalalignment='center',
fontsize=24)
ax.format(title='Deep Sparse Coding')
```
## attack images
```
fig, axs = plot.subplots(nrows=3, ncols=len(fmodels+analyzers))
pf.clear_axes(axs)
top_level = zip(fmodels+analyzers, fb_adv_results_list+dsc_adv_results_list, ['FB', 'FB', 'DSC', 'DSC'])
for model_idx, (model, adv_results_list, atk_type) in enumerate(top_level):
if atk_type == 'DSC':
adv_imgs = model.adversarial_images[0]
adv_labels = model.adversarial_labels[0]
else:
adv_imgs = model.adversarial_images
adv_labels = model.adversarial_labels
adv_results = adv_results_list#[0]
adv_min_idx = np.abs(adv_results - adv_results.min()).argmin()
adv_mean_idx = np.abs(adv_results - adv_results.mean()).argmin()
adv_max_idx = np.abs(adv_results - adv_results.max()).argmin()
for row_idx, image_idx in enumerate([adv_min_idx, adv_mean_idx, adv_max_idx]):
img = adv_imgs[image_idx, ...].reshape(28, 28).astype(np.float32)
h = axs[row_idx, model_idx].imshow(img, cmap='grays')
axs[row_idx, model_idx].colorbar(h, loc='r', ticks=1)
axs[row_idx, model_idx].format(title=f'{atk_type}_{model.model_type} adversarial label = {adv_labels[row_idx]}')
axs[row_idx, 0].format(llabels=['Min MSD', 'Mean MSD', 'Max MSD'])
plot.show()
```
| github_jupyter |
# GATK Tutorial | Hard Filtering | March 2019
This GATK tutorial corresponds to a section of the GATK Workshop _2b. Germline Hard Filtering Tutorial_ worksheet. The goal is to become familiar with germline variant annotations. The notebook illustrates the following steps.
- Use GATK to stratify a variant callset against a truthset
- Use R's ggplot2 package to plot the distribution of various annotation values
- Hard-filter based on annotation thresholds and calculate concordance metrics
### First, make sure the notebook is using a Python 3 kernel in the top right corner.
A kernel is a _computational engine_ that executes the code in the notebook. We use Python 3 in this notebook to execute GATK commands using _Python Magic_ (`!`). Later we will switch to another notebook to do some plotting in R.
### How to run this notebook:
- **Click to select a gray cell and then pressing SHIFT+ENTER to run the cell.**
- **Write results to `/home/jupyter-user/`. To access the directory, click on the upper-left jupyter icon.**
### Enable reading Google bucket data
```
# Check if data is accessible. The command should list several gs:// URLs.
! gsutil ls gs://gatk-tutorials/workshop_1702/variant_discovery/data/resources/
! gsutil ls gs://gatk-tutorials/workshop_1702/variant_discovery/data/intervals/motherHighconf.bed
! gsutil ls gs://gatk-tutorials/workshop_1702/variant_discovery/data/inputVcfs/
# If you do not see gs:// URLs listed above, run this cell to install Google Cloud Storage.
# Afterwards, restart the kernel with Kernel > Restart.
#! pip install google-cloud-storage
```
---
## 1. Subset variants to SNPs of a single sample with SelectVariants
Subset the trio callset to just the SNPs of the mother (sample NA12878). Make sure to remove sites for which the sample genotype is homozygous-reference and remove unused alleles, including spanning deletions.
> The tool recalculates depth of coverage (DP) per site as well as the allele count in genotypes for each ALT allele (AC), allele frequency for each ALT allele (AF), and total number of alleles in called genotypes (AN), to reflect only the subset sample(s).
```
! gatk SelectVariants \
-V gs://gatk-tutorials/workshop_1702/variant_discovery/data/inputVcfs/trio.vcf.gz \
-sn NA12878 \
-select-type SNP \
--exclude-non-variants \
--remove-unused-alternates \
-O /home/jupyter-user/motherSNP.vcf.gz
# Peruse the resulting file
! zcat /home/jupyter-user/motherSNP.vcf.gz | grep -v '##' | head
```
---
## 2. Annotate intersecting true positives with VariantAnnotator
We use VariantAnnotator to annotate which variants in our callset are also present in the truthset (GIAB), which are considered true positives. Variants not present in the truthset are considered false positives. Here we produce a callset where variants that are present in the truthset are annotated with the giab.callsets annotation plus a value indicating how many of the callsets used to develop the truthset agreed with that call.
```
! gatk VariantAnnotator \
-V /home/jupyter-user/motherSNP.vcf.gz \
--resource:giab gs://gatk-tutorials/workshop_1702/variant_discovery/data/resources/motherGIABsnps.vcf.gz \
-E giab.callsets \
-O /home/jupyter-user/motherSNP.giab.vcf.gz
# Peruse the resulting file
! zcat /home/jupyter-user/motherSNP.giab.vcf.gz | grep -v '##' | head
```
---
## 3. Tabulate annotations of interest with VariantsToTable
Convert the information from the callset into a tab delimited table using VariantsToTable, so that we can parse it easily in R. The tool parameters differentiate INFO/site-level fields fields (`-F`) and FORMAT/sample-level fields genotype fields (`-GF`). This step produces a table where each line represents a variant record from the VCF, and each column represents an annotation we have specified. Wherever the requested annotations are not present, e.g. RankSum annotations at homozygous sites, the value will be replaced by NA.
```
! gatk VariantsToTable \
-V /home/jupyter-user/motherSNP.giab.vcf.gz \
-F CHROM -F POS -F QUAL \
-F BaseQRankSum -F MQRankSum -F ReadPosRankSum \
-F DP -F FS -F MQ -F QD -F SOR \
-F giab.callsets \
-GF GQ \
-O /home/jupyter-user/motherSNP.giab.txt
# Peruse the resulting file
! cat /home/jupyter-user/motherSNP.giab.txt | head -n300
# Focus in on a few columns
! cat /home/jupyter-user/motherSNP.giab.txt | cut -f1,2,7,12 | head -n300
```
---
## 4. Make density and scatter plots in R and determine filtering thresholds
<span style="color:red">Load the R notebook now to run the plots for this next section. Continue below only after you've finished with the other notebook.</span>
---
## 5. Apply filters with VariantFiltration and evaluate results
### A. Filter on QUAL and tabulate baseline concordance
Based on the plots we generated, we're going to apply some filters to weed out false positives. To illustrate how VariantFiltration works, and to establish baseline performance, we first filter on QUAL < 30. By default, GATK GenotypeGVCFs filters out variants with QUAL < 10. This step produces a VCF with all the original variants; those that failed the filter are annotated with the filter name in the FILTER column.
```
# Filter callset on one annotation, QUAL < 30
! gatk VariantFiltration \
-R gs://gatk-tutorials/workshop_1702/variant_discovery/data/ref/ref.fasta \
-V /home/jupyter-user/motherSNP.vcf.gz \
--filter-expression "QUAL < 30" \
--filter-name "qual30" \
-O /home/jupyter-user/motherSNPqual30.vcf.gz
# Peruse the results; try adding 'grep "qual30"'
! zcat /home/jupyter-user/motherSNPqual30.vcf.gz | grep -v '##' | head -n10
# Calculate concordance metrics using GATK4 BETA tool Concordance
! gatk Concordance \
-eval /home/jupyter-user/motherSNPqual30.vcf.gz \
-truth gs://gatk-tutorials/workshop_1702/variant_discovery/data/resources/motherGIABsnps.vcf.gz \
-L gs://gatk-tutorials/workshop_1702/variant_discovery/data/intervals/motherHighconf.bed \
-S /home/jupyter-user/motherSNPqual30.txt
# View the results
! echo ""
! cat /home/jupyter-user/motherSNPqual30.txt
```
### B. Filter on multiple annotations simultaneously using VariantFiltration
To filter on multiple expressions, provide each in separate expression. For INFO level annotations, the parameter is `-filter`, which should be immediately followed by the corresponding `–-filter-name` label. Here we show basic hard-filtering thresholds.
- If an annotation is missing, VariantFiltration skips any judgement on that annotation. To conservatively fail such missing annotation sites, set the `--missing-values-evaluate-as-failing` flag.
- To filter based on FORMAT level annotations, use `--genotype-filter-expression` and `--genotype-filter-name`.
```
# Filter callset on multiple annotations.
# Iterate on thresholds to improve precision while maintaining high sensitivity.
! gatk VariantFiltration \
-V /home/jupyter-user/motherSNP.vcf.gz \
-filter "QD < 2.0" --filter-name "QD2" \
-filter "QUAL < 30.0" --filter-name "QUAL30" \
-filter "SOR > 3.0" --filter-name "SOR3" \
-filter "FS > 60.0" --filter-name "FS60" \
-filter "MQ < 40.0" --filter-name "MQ40" \
-filter "MQRankSum < -12.5" --filter-name "MQRankSum-12.5" \
-filter "ReadPosRankSum < -8.0" --filter-name "ReadPosRankSum-8" \
-O /home/jupyter-user/motherSNPfilters.vcf.gz
# Sanity-check that filtering is as expected by examining filtered records and PASS records.
! zcat /home/jupyter-user/motherSNPfilters.vcf.gz | grep -v '##' | grep -v 'PASS' | head -n20 | cut -f6-10
! zcat /home/jupyter-user/motherSNPfilters.vcf.gz | grep -v '#' | grep 'PASS' | head | cut -f6-10
# Calculate concordance metrics using GATK4 BETA tool Concordance
! gatk Concordance \
-eval /home/jupyter-user/motherSNPfilters.vcf.gz \
-truth gs://gatk-tutorials/workshop_1702/variant_discovery/data/resources/motherGIABsnps.vcf.gz \
-L gs://gatk-tutorials/workshop_1702/variant_discovery/data/intervals/motherHighconf.bed \
-S /home/jupyter-user/motherSNPfilters.txt
#Now lets re-run concordance from just using QUAL filtering first
!cat /home/jupyter-user/motherSNPqual30.txt
# View the results from filtering on multiple annotations
! echo ""
! cat /home/jupyter-user/motherSNPfilters.txt
```
---
We performed hard-filtering to learn about germline variant annotations. Remember that GATK recommends _Variant Quality Score Recalibration_ (VQSR) for germline variant callset filtering. For more complex variant filtering and annotation, see the Broad [Hail.is](https://hail.is/index.html) framework.
| github_jupyter |
```
import copy
import glob
import os
import time
from collections import deque
import csv
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from a2c_ppo_acktr import algo
from a2c_ppo_acktr.arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.icm import ICM
from a2c_ppo_acktr.storage import RolloutStorage
from a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule
from a2c_ppo_acktr.visualize import visdom_plot
device = torch.device("cuda:0")
device
norm_pos = 3150
num_proc = 1
eval_envs = make_vec_envs(
'SuperMarioBrosNoFrameskip-1-1-v0', 1, num_proc,
0.9, '', False, device, True, 'dense')
actor_critic = torch.load('./trained_models/ppo/SuperMarioBrosNoFrameskip-1-1-v0.pt')[0]
actor_critic.to(device)
vec_norm = get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = get_vec_normalize(envs).ob_rms
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(num_proc,
actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(num_proc, 1, device=device)
positions = deque(maxlen=400)
while len(eval_episode_rewards) < 1:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs, eval_recurrent_hidden_states, eval_masks, deterministic=True)
# Obser reward and next obs
obs, reward, done, infos = eval_envs.step(action)
eval_envs.render()
eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0]
for done_ in done]).cuda()
for i, finished in enumerate(done):
if finished:
percentile = infos[i]['x_pos']/norm_pos
eval_episode_rewards.append(percentile)
with open(eval_file, 'a', newline='') as sfile:
writer = csv.writer(sfile)
writer.writerows([[percentile]])
#to prevent the agent from getting stuck
positions.append(infos[0]['x_pos'])
pos_ar = np.array(positions)
if (len(positions) >= 200) and (pos_ar < pos_ar[-1] + 20).all() and (pos_ar > pos_ar[-1] - 20).all():
percentile = infos[0]['x_pos']/norm_pos
eval_episode_rewards.append(percentile)
with open(eval_file, 'a', newline='') as sfile:
writer = csv.writer(sfile)
writer.writerows([[percentile]])
eval_envs.close()
positions.clear()
import pandas as pd
import numpy as np
import glob
path ='./log_dir/model_ppo_dense/' # use your path
default = glob.glob(path + "train*.csv")
list_ = []
for file_ in default:
df = pd.read_csv(file_, header=None)
list_.append(df)
frame_def = pd.concat(list_, axis = 1, ignore_index = True)
means_def = np.nanmean(frame_def.values, axis=1)
std_def = np.nanstd(frame_def.values, axis=1)
maxi_def = np.nanmax(frame_def.values, axis=1)
mini_def = np.nanmin(frame_def.values, axis=1)
n_itr_def = frame_def.shape[0]
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
import matplotlib.pyplot as plt
plt.plot(moving_average(means_def[:-10], 10), color='red')
plt.fill_between(np.arange(n_itr_def), means_def + std_def, means_def - std_def , alpha=0.3 )
plt.show()
std_cur[-50:].mean()
std_def[-50:].mean()
```
| github_jupyter |
```
%load_ext watermark
%watermark -p torch,pytorch_lightning,torchmetrics,matplotlib
```
The three extensions below are optional, for more information, see
- `watermark`: https://github.com/rasbt/watermark
- `pycodestyle_magic`: https://github.com/mattijn/pycodestyle_magic
- `nb_black`: https://github.com/dnanhkhoa/nb_black
```
%load_ext pycodestyle_magic
%flake8_on --ignore W291,W293,E703,E402,E999 --max_line_length=100
%load_ext nb_black
```
<a href="https://pytorch.org"><img src="https://raw.githubusercontent.com/pytorch/pytorch/master/docs/source/_static/img/pytorch-logo-dark.svg" width="90"/></a> <a href="https://www.pytorchlightning.ai"><img src="https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/docs/source/_static/images/logo.svg" width="150"/></a>
# All-Convolutional Neural Network Trained on MNIST.
All-Convolutional neural network [1][2] Trained on MNIST [3]
### References
- [1] L14.4.1 Replacing Max-Pooling with Convolutional Layers (08:19), https://www.youtube.com/watch?v=Lq83NFkkJCk
- [2] Striving for Simplicity: The All Convolutional Net, https://arxiv.org/abs/1412.6806
- [3] https://en.wikipedia.org/wiki/MNIST_database
## General settings and hyperparameters
- Here, we specify some general hyperparameter values and general settings.
```
BATCH_SIZE = 128
NUM_EPOCHS = 20
LEARNING_RATE = 0.005
NUM_WORKERS = 4
```
- Note that using multiple workers can sometimes cause issues with too many open files in PyTorch for small datasets. If we have problems with the data loader later, try setting `NUM_WORKERS = 0` and reload the notebook.
## Implementing a Neural Network using PyTorch Lightning's `LightningModule`
- In this section, we set up the main model architecture using the `LightningModule` from PyTorch Lightning.
- In essence, `LightningModule` is a wrapper around a PyTorch module.
- We start with defining our neural network model in pure PyTorch, and then we use it in the `LightningModule` to get all the extra benefits that PyTorch Lightning provides.
```
import torch
import torch.nn.functional as F
class PyTorchModel(torch.nn.Module):
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
self.features = torch.nn.Sequential(
# 28x28x1 => 28x28x4
torch.nn.Conv2d(in_channels=1,
out_channels=4,
kernel_size=(3, 3),
stride=(1, 1),
padding=1), # (1(28-1) - 28 + 3) / 2 = 1
torch.nn.ReLU(),
# 28x28x4 => 14x14x4
torch.nn.Conv2d(in_channels=4,
out_channels=4,
kernel_size=(3, 3),
stride=(2, 2),
padding=1),
torch.nn.ReLU(),
# 14x14x4 => 14x14x8
torch.nn.Conv2d(in_channels=4,
out_channels=8,
kernel_size=(3, 3),
stride=(1, 1),
padding=1), # (1(14-1) - 14 + 3) / 2 = 1
torch.nn.ReLU(),
# 14x14x8 => 7x7x8
torch.nn.Conv2d(in_channels=8,
out_channels=8,
kernel_size=(3, 3),
stride=(2, 2),
padding=1),
torch.nn.ReLU(),
# 7x7x8 => 7x7x16
torch.nn.Conv2d(in_channels=8,
out_channels=16,
kernel_size=(3, 3),
stride=(1, 1),
padding=1), # (1(7-1) - 7 + 3) / 2 = 1
torch.nn.ReLU(),
# 7x7x16 => 4x4x16
torch.nn.Conv2d(in_channels=16,
out_channels=16,
kernel_size=(3, 3),
stride=(2, 2),
padding=1),
torch.nn.ReLU(),
# 4x4x16 => 4x4xnum_classes
torch.nn.Conv2d(in_channels=16,
out_channels=self.num_classes,
kernel_size=(3, 3),
stride=(1, 1),
padding=1), # (1(7-1) - 7 + 3) / 2 = 1
torch.nn.ReLU(),
torch.nn.AdaptiveAvgPool2d(output_size=1),
torch.nn.Flatten()
)
def forward(self, x):
logits = self.features(x)
return logits
# %load ../code_lightningmodule/lightningmodule_classifier_basic.py
import pytorch_lightning as pl
import torchmetrics
# LightningModule that receives a PyTorch model as input
class LightningModel(pl.LightningModule):
def __init__(self, model, learning_rate):
super().__init__()
self.learning_rate = learning_rate
# The inherited PyTorch module
self.model = model
if hasattr(model, "dropout_proba"):
self.dropout_proba = model.dropout_proba
# Save settings and hyperparameters to the log directory
# but skip the model parameters
self.save_hyperparameters(ignore=["model"])
# Set up attributes for computing the accuracy
self.train_acc = torchmetrics.Accuracy()
self.valid_acc = torchmetrics.Accuracy()
self.test_acc = torchmetrics.Accuracy()
# Defining the forward method is only necessary
# if you want to use a Trainer's .predict() method (optional)
def forward(self, x):
return self.model(x)
# A common forward step to compute the loss and labels
# this is used for training, validation, and testing below
def _shared_step(self, batch):
features, true_labels = batch
logits = self(features)
loss = torch.nn.functional.cross_entropy(logits, true_labels)
predicted_labels = torch.argmax(logits, dim=1)
return loss, true_labels, predicted_labels
def training_step(self, batch, batch_idx):
loss, true_labels, predicted_labels = self._shared_step(batch)
self.log("train_loss", loss)
# Do another forward pass in .eval() mode to compute accuracy
# while accountingfor Dropout, BatchNorm etc. behavior
# during evaluation (inference)
self.model.eval()
with torch.no_grad():
_, true_labels, predicted_labels = self._shared_step(batch)
self.train_acc(predicted_labels, true_labels)
self.log("train_acc", self.train_acc, on_epoch=True, on_step=False)
self.model.train()
return loss # this is passed to the optimzer for training
def validation_step(self, batch, batch_idx):
loss, true_labels, predicted_labels = self._shared_step(batch)
self.log("valid_loss", loss)
self.valid_acc(predicted_labels, true_labels)
self.log(
"valid_acc",
self.valid_acc,
on_epoch=True,
on_step=False,
prog_bar=True,
)
def test_step(self, batch, batch_idx):
loss, true_labels, predicted_labels = self._shared_step(batch)
self.test_acc(predicted_labels, true_labels)
self.log("test_acc", self.test_acc, on_epoch=True, on_step=False)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
```
## Setting up the dataset
- In this section, we are going to set up our dataset.
### Inspecting the dataset
```
# %load ../code_dataset/dataset_mnist_check.py
from collections import Counter
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
train_dataset = datasets.MNIST(
root="./data", train=True, transform=transforms.ToTensor(), download=True
)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
drop_last=True,
shuffle=True,
)
test_dataset = datasets.MNIST(
root="./data", train=False, transform=transforms.ToTensor()
)
test_loader = DataLoader(
dataset=test_dataset,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
drop_last=False,
shuffle=False,
)
train_counter = Counter()
for images, labels in train_loader:
train_counter.update(labels.tolist())
test_counter = Counter()
for images, labels in test_loader:
test_counter.update(labels.tolist())
print("\nTraining label distribution:")
sorted(train_counter.items())
print("\nTest label distribution:")
sorted(test_counter.items())
```
### Performance baseline
- Especially for imbalanced datasets, it's pretty helpful to compute a performance baseline.
- In classification contexts, a useful baseline is to compute the accuracy for a scenario where the model always predicts the majority class -- we want our model to be better than that!
```
# %load ../code_dataset/performance_baseline.py
majority_class = test_counter.most_common(1)[0]
print("Majority class:", majority_class[0])
baseline_acc = majority_class[1] / sum(test_counter.values())
print("Accuracy when always predicting the majority class:")
print(f"{baseline_acc:.2f} ({baseline_acc*100:.2f}%)")
```
## A quick visual check
```
# %load ../code_dataset/plot_visual-check_basic.py
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import torchvision
for images, labels in train_loader:
break
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Training images")
plt.imshow(np.transpose(torchvision.utils.make_grid(
images[:64],
padding=2,
normalize=True),
(1, 2, 0)))
plt.show()
```
### Setting up a `DataModule`
- There are three main ways we can prepare the dataset for Lightning. We can
1. make the dataset part of the model;
2. set up the data loaders as usual and feed them to the fit method of a Lightning Trainer -- the Trainer is introduced in the following subsection;
3. create a LightningDataModule.
- Here, we will use approach 3, which is the most organized approach. The `LightningDataModule` consists of several self-explanatory methods, as we can see below:
```
# %load ../code_lightningmodule/datamodule_mnist_basic.py
from torch.utils.data.dataset import random_split
class DataModule(pl.LightningDataModule):
def __init__(self, data_path="./"):
super().__init__()
self.data_path = data_path
def prepare_data(self):
datasets.MNIST(root=self.data_path, download=True)
return
def setup(self, stage=None):
# Note transforms.ToTensor() scales input images
# to 0-1 range
train = datasets.MNIST(
root=self.data_path,
train=True,
transform=transforms.ToTensor(),
download=False,
)
self.test = datasets.MNIST(
root=self.data_path,
train=False,
transform=transforms.ToTensor(),
download=False,
)
self.train, self.valid = random_split(train, lengths=[55000, 5000])
def train_dataloader(self):
train_loader = DataLoader(
dataset=self.train,
batch_size=BATCH_SIZE,
drop_last=True,
shuffle=True,
num_workers=NUM_WORKERS,
)
return train_loader
def val_dataloader(self):
valid_loader = DataLoader(
dataset=self.valid,
batch_size=BATCH_SIZE,
drop_last=False,
shuffle=False,
num_workers=NUM_WORKERS,
)
return valid_loader
def test_dataloader(self):
test_loader = DataLoader(
dataset=self.test,
batch_size=BATCH_SIZE,
drop_last=False,
shuffle=False,
num_workers=NUM_WORKERS,
)
return test_loader
```
- Note that the `prepare_data` method is usually used for steps that only need to be executed once, for example, downloading the dataset; the `setup` method defines the dataset loading -- if we run our code in a distributed setting, this will be called on each node / GPU.
- Next, let's initialize the `DataModule`; we use a random seed for reproducibility (so that the data set is shuffled the same way when we re-execute this code):
```
import torch
torch.manual_seed(1)
data_module = DataModule(data_path='./data')
```
## Training the model using the PyTorch Lightning Trainer class
- Next, we initialize our model.
- Also, we define a call back to obtain the model with the best validation set performance after training.
- PyTorch Lightning offers [many advanced logging services](https://pytorch-lightning.readthedocs.io/en/latest/extensions/logging.html) like Weights & Biases. However, here, we will keep things simple and use the `CSVLogger`:
```
pytorch_model = PyTorchModel(num_classes=10)
# %load ../code_lightningmodule/logger_csv_acc_basic.py
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import CSVLogger
lightning_model = LightningModel(pytorch_model, learning_rate=LEARNING_RATE)
callbacks = [
ModelCheckpoint(
save_top_k=1, mode="max", monitor="valid_acc"
) # save top 1 model
]
logger = CSVLogger(save_dir="logs/", name="my-model")
```
- Now it's time to train our model:
```
# %load ../code_lightningmodule/trainer_nb_basic.py
import time
trainer = pl.Trainer(
max_epochs=NUM_EPOCHS,
callbacks=callbacks,
progress_bar_refresh_rate=50, # recommended for notebooks
accelerator="auto", # Uses GPUs or TPUs if available
devices="auto", # Uses all available GPUs/TPUs if applicable
logger=logger,
deterministic=False,
log_every_n_steps=10,
)
start_time = time.time()
trainer.fit(model=lightning_model, datamodule=data_module)
runtime = (time.time() - start_time) / 60
print(f"Training took {runtime:.2f} min in total.")
```
## Evaluating the model
- After training, let's plot our training ACC and validation ACC using pandas, which, in turn, uses matplotlib for plotting (PS: you may want to check out [more advanced logger](https://pytorch-lightning.readthedocs.io/en/latest/extensions/logging.html) later on, which take care of it for us):
```
# %load ../code_lightningmodule/logger_csv_plot_basic.py
import pandas as pd
import matplotlib.pyplot as plt
metrics = pd.read_csv(f"{trainer.logger.log_dir}/metrics.csv")
aggreg_metrics = []
agg_col = "epoch"
for i, dfg in metrics.groupby(agg_col):
agg = dict(dfg.mean())
agg[agg_col] = i
aggreg_metrics.append(agg)
df_metrics = pd.DataFrame(aggreg_metrics)
df_metrics[["train_loss", "valid_loss"]].plot(
grid=True, legend=True, xlabel="Epoch", ylabel="Loss"
)
df_metrics[["train_acc", "valid_acc"]].plot(
grid=True, legend=True, xlabel="Epoch", ylabel="ACC"
)
plt.show()
```
- The `trainer` automatically saves the model with the best validation accuracy automatically for us, we which we can load from the checkpoint via the `ckpt_path='best'` argument; below we use the `trainer` instance to evaluate the best model on the test set:
```
trainer.test(model=lightning_model, datamodule=data_module, ckpt_path='best')
```
## Predicting labels of new data
- We can use the `trainer.predict` method either on a new `DataLoader` (`trainer.predict(dataloaders=...)`) or `DataModule` (`trainer.predict(datamodule=...)`) to apply the model to new data.
- Alternatively, we can also manually load the best model from a checkpoint as shown below:
```
path = trainer.checkpoint_callback.best_model_path
print(path)
lightning_model = LightningModel.load_from_checkpoint(path, model=pytorch_model)
lightning_model.eval();
```
- For simplicity, we reused our existing `pytorch_model` above. However, we could also reinitialize the `pytorch_model`, and the `.load_from_checkpoint` method would load the corresponding model weights for us from the checkpoint file.
- Now, below is an example applying the model manually. Here, pretend that the `test_dataloader` is a new data loader.
```
# %load ../code_lightningmodule/datamodule_testloader.py
test_dataloader = data_module.test_dataloader()
acc = torchmetrics.Accuracy()
for batch in test_dataloader:
features, true_labels = batch
with torch.no_grad():
logits = lightning_model(features)
predicted_labels = torch.argmax(logits, dim=1)
acc(predicted_labels, true_labels)
predicted_labels[:5]
```
- As an internal check, if the model was loaded correctly, the test accuracy below should be identical to the test accuracy we saw earlier in the previous section.
```
test_acc = acc.compute()
print(f'Test accuracy: {test_acc:.4f} ({test_acc*100:.2f}%)')
```
## Inspecting Failure Cases
- In practice, it is often informative to look at failure cases like wrong predictions for particular training instances as it can give us some insights into the model behavior and dataset.
- Inspecting failure cases can sometimes reveal interesting patterns and even highlight dataset and labeling issues.
```
class_dict = {0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'}
# %load ../code_lightningmodule/plot_failurecases_basic.py
# Append the folder that contains the
# helper_data.py, helper_plotting.py, and helper_evaluate.py
# files so we can import from them
import sys
sys.path.append("../../pytorch_ipynb")
from helper_plotting import show_examples
show_examples(
model=lightning_model, data_loader=test_dataloader, class_dict=class_dict
)
```
- In addition to inspecting failure cases visually, it is also informative to look at which classes the model confuses the most via a confusion matrix:
```
# %load ../code_lightningmodule/plot_confusion-matrix_basic.py
from torchmetrics import ConfusionMatrix
import matplotlib
from mlxtend.plotting import plot_confusion_matrix
cmat = ConfusionMatrix(num_classes=len(class_dict))
for x, y in test_dataloader:
with torch.no_grad():
pred = lightning_model(x)
cmat(pred, y)
cmat_tensor = cmat.compute()
cmat = cmat_tensor.numpy()
fig, ax = plot_confusion_matrix(
conf_mat=cmat,
class_names=class_dict.values(),
norm_colormap=matplotlib.colors.LogNorm()
# normed colormaps highlight the off-diagonals
# for high-accuracy models better
)
plt.show()
%watermark --iversions
```
| github_jupyter |
```
# default_exp Main
```
# Main module
> API details
```
#hide
from nbdev.showdoc import *
#export
from argparse import ArgumentParser, ArgumentTypeError, RawDescriptionHelpFormatter, SUPPRESS
import os, glob, platform
from multiprocessing import cpu_count, Queue
from SEQLinkage.Utils import *
from SEQLinkage.Runner import *
from SEQLinkage.Core import *
from multiprocessing import Process, Queue
from collections import OrderedDict
import itertools
from copy import deepcopy
import sys, faulthandler, platform
import numpy as np
import os
if sys.version_info.major == 2:
from cstatgen import cstatgen_py2 as cstatgen
from cstatgen.egglib import Align
else:
from cstatgen import cstatgen_py3 as cstatgen
import egglib
from egglib import Align
HOMEPAGE = 'http://bioinformatics.org/seqlink' #fixme
class Args:
def __init__(self):
self.parser = ArgumentParser(
description = '''\t{}, linkage analysis using sequence data\n\t[{}]'''.\
format("SEQLinkage", VERSION),
formatter_class = RawDescriptionHelpFormatter,
prog = 'seqlink',
fromfile_prefix_chars = '@', add_help = False,
epilog = '''\tCopyright (c) 2013 - 2014 Gao Wang <wang.gao@columbia.edu>\n\tDistributed under GNU General Public License\n\tHome page: {}'''.format(HOMEPAGE))
self.getEncoderArguments(self.parser)
self.getIOArguments(self.parser)
self.getLinkageArguments(self.parser)
self.getRuntimeArguments(self.parser)
def isalnum(self, string):
if not os.path.basename(string).isalnum():
raise ArgumentTypeError("Illegal path name [%]: must be alphanumerical string." % string)
return string
def get(self):
return self.parser.parse_args()
def getEncoderArguments(self, parser):
vargs = parser.add_argument_group('Collapsed haplotype pattern method arguments')
vargs.add_argument('--bin', metavar = "FLOAT", default = 0, type = float,
help='''Defines theme to collapse variants. Set to 0 for "complete collapsing",
1 for "no collapsing", r2 value between 0 and 1 for "LD based collapsing" and other integer values for customized
collapsing bin sizes. Default to 0 (all variants will be collapsed).''')
vargs.add_argument('-b', '--blueprint', metavar = 'FILE',
help='''Blueprint file that defines regional marker
(format: "chr startpos endpos name avg.distance male.distance female.distance").''')
vargs.add_argument('--single-markers', action='store_true', dest = "single_markers",
help='''Use single variant markers. This switch will overwrite
"--bin" and "--blueprint" arguments.''')
def getIOArguments(self, parser):
vargs = parser.add_argument_group('Input / output options')
vargs.add_argument('--fam', metavar='FILE', required=True, dest = "tfam",
help='''Input pedigree and phenotype information in FAM format.''')
vargs.add_argument('--vcf', metavar='FILE', required=True, help='''Input VCF file, bgzipped.''')
vargs.add_argument('--anno', metavar='FILE', required=False, help='''Input annotation file from annovar.''')
vargs.add_argument('--pop', metavar='FILE', required=False, help='''Input two columns file, first column is family ID, second column population information.''')
vargs.add_argument('--build', metavar='STRING', default='hg19', choices = ["hg19", "hg38"], help='''Reference genome version for VCF file.''')
vargs.add_argument('--prephased', action='store_true', help=SUPPRESS)
vargs.add_argument('--freq', metavar='INFO', default = None,help='''Info field name for allele frequency in VCF file.''')
vargs.add_argument('--freq_by_fam', metavar='INFO', help='''Per family info field name for allele frequency in VCF file.''')
vargs.add_argument('--mle', action='store_true', help='''Estimate allele frequency using MERLIN's MLE method.''')
vargs.add_argument('--rvhaplo', action='store_true', help='''Only using rare variants for haplotyping''')
vargs.add_argument('--recomb_max', metavar='INT', default = 1, type = int, help='''Maximum recombination events allowed per region.''')
vargs.add_argument('--recomb_cross_fam', action='store_true', help='''Code sub-regions with cross family recombination events; otherwise sub-regions are generated on per family basis.''')
vargs.add_argument('--rsq', metavar='R', default=0.0,type=float, help=SUPPRESS)
vargs.add_argument('--include_vars', metavar='FILE', help='''Variants to be included in CHP construction''')
vargs.add_argument('-c', '--maf-cutoff', metavar='P', default=1.0, type=float, dest = "maf_cutoff",
help='''MAF cutoff to define "common" variants to be excluded from analyses.''')
vargs.add_argument('--chrom-prefix', metavar='STRING', dest = 'chr_prefix',
help='''Prefix to chromosome name in VCF file if applicable, e.g. "chr".''')
vargs.add_argument('-o', '--output', metavar='Name', type = self.isalnum,
help='''Output name prefix.''')
vargs.add_argument('-f', '--format', metavar = 'FORMAT', nargs='+',
choices = ["LINKAGE", "MERLIN", "MEGA2", "PLINK"], default=['LINKAGE'],
help='''Output format. Default to LINKAGE.''')
def getRuntimeArguments(self, parser):
vargs = parser.add_argument_group('Runtime arguments')
vargs.add_argument("-h", "--help", action="help", help="Show help message and exit.")
vargs.add_argument('-j', '--jobs', metavar='N', type = int, default = max(min(int(cpu_count() / 2), 8), 1),
help='''Number of CPUs to use.''')
vargs.add_argument('--tempdir', metavar='PATH',
help='''Temporary directory to use.''')
vargs.add_argument('--cache', action='store_false', dest = 'vanilla',
help='''Load cache data for analysis instead of starting from scratch.''')
vargs.add_argument('-q', '--quiet', action='store_true', help='Disable the display of runtime MESSAGE.')
vargs.add_argument('--debug', action='store_true', help=SUPPRESS)
vargs.add_argument('--no-save', action='store_true', dest='no_save', help=SUPPRESS)
def getLinkageArguments(self, parser):
vargs = parser.add_argument_group('LINKAGE options')
vargs.add_argument('-K', '--prevalence', metavar='FLOAT', type=float,
help='Disease prevalence.')
vargs.add_argument('--moi', metavar='STRING', dest = "inherit_mode",
# choices=['AD', 'AR', 'Xlinked', 'Y'],
choices=['AD', 'AR'],
help='Mode of inheritance, AD/AR: autosomal dominant/recessive.')
vargs.add_argument('-W', '--wt-pen', metavar='FLOAT', type=float, dest = "wild_pen",
help='Penetrance for wild type.')
vargs.add_argument('-M', '--mut-pen', metavar='FLOAT', type=float, dest = "muta_pen",
help='Penetrance for mutation.')
vargs.add_argument('--theta-max', metavar='FLOAT', type=float, dest = "theta_max", default = 0.5,
help='Theta upper bound. Default to 0.5.')
vargs.add_argument('--theta-inc', metavar='FLOAT', type=float, dest = "theta_inc", default = 0.05,
help='Theta increment. Default to 0.05.')
if ((platform.system() == 'Linux' or platform.system() == 'Darwin') and platform.architecture()[0] == '64bit'):
vargs.add_argument('--run-linkage', action='store_true', dest = "run_linkage",
help='''Perform Linkage analysis using FASTLINK program.''')
vargs.add_argument('--output-entries', metavar='N', type=int, dest = "output_limit", default = 10,
help='Write the highest N LOD/HLOD scores to output tables. Default to 10.')
#export
def checkParams(args):
'''set default arguments or make warnings'''
env.setoutput(args.output)
env.debug = args.debug
env.quiet = args.quiet
env.prephased = args.prephased
args.vcf = os.path.abspath(os.path.expanduser(args.vcf))
args.tfam = os.path.abspath(os.path.expanduser(args.tfam))
for item in [args.vcf, args.tfam]:
if not os.path.exists(item):
env.error("Cannot find file [{}]!".format(item), exit = True)
if len([x for x in set(getColumn(args.tfam, 6)) if x.lower() not in env.ped_missing]) > 2:
env.trait = 'quantitative'
env.log('{} trait detected in [{}]'.format(env.trait.capitalize(), args.tfam))
if not args.blueprint:
if not args.anno:
args.blueprint = os.path.join(env.resource_dir, 'genemap.{}.txt'.format(args.build))
else:
env.log('Generate regions by annotation')
args.format = [x.lower() for x in set(args.format)]
if args.run_linkage and "linkage" not in args.format:
args.format.append('linkage')
if None in [args.inherit_mode, args.prevalence, args.wild_pen, args.muta_pen] and "linkage" in args.format:
env.error('To generate LINKAGE format or run LINKAGE analysis, please specify all options below:\n\t--prevalence, -K\n\t--moi\n\t--wild-pen, -W\n\t--muta-pen, -M', show_help = True, exit = True)
if args.tempdir is not None:
env.ResetTempdir(args.tempdir)
return True
#export
def main():
'''the main encoder function'''
args = Args().get()
checkParams(args)
download_dir = 'http://bioinformatics.org/spower/download/.private'
downloadResources([('{}/genemap.{}.txt'.format(download_dir, args.build), env.resource_dir),
('{}/{}/mlink'.format(download_dir, platform.system().lower()), env.resource_bin),
('{}/{}/unknown'.format(download_dir, platform.system().lower()), env.resource_bin),
('{}/{}/makeped'.format(download_dir, platform.system().lower()), env.resource_bin),
('{}/{}/pedcheck'.format(download_dir, platform.system().lower()), env.resource_bin)])
if args.no_save:
cache = NoCache()
else:
cache = Cache(env.cache_dir, env.output, vars(args))
cache.setID('vcf')
# STEP 1: write encoded data to TPED format
if not args.vanilla and cache.check():
env.log('Loading regional marker data from archive ...')
cache.load(target_dir = env.tmp_dir, names = ['CACHE'])
env.success_counter.value = sum(map(fileLinesCount, glob.glob('{}/*.tped'.format(env.tmp_cache))))
env.batch = 10
else:
# load data
data = RData(args.vcf, args.tfam,args.anno,args.pop,allele_freq_info=args.freq)
samples_vcf = data.samples_vcf
if len(samples_vcf) == 0:
env.error("Fail to extract samples from [{}]".format(args.vcf), exit = True)
env.log('{:,d} samples found in [{}]'.format(len(samples_vcf), args.vcf))
samples_not_vcf = data.samples_not_vcf
if len(data.families) == 0:
env.error('No valid family to process. ' \
'Families have to be at least trio with at least one member in VCF file.', exit = True)
if len(data.samples) == 0:
env.error('No valid sample to process. ' \
'Samples have to be in families, and present in both TFAM and VCF files.', exit = True)
rewriteFamfile(os.path.join(env.tmp_cache, '{}.tfam'.format(env.output)),
data.tfam.samples, list(data.samples.keys()) + samples_not_vcf)
if args.single_markers:
regions = [(x[0], x[1], x[1], "{}:{}".format(x[0], x[1]), '.', '.', '.')
for x in data.vs.GetGenomeCoordinates()]
args.blueprint = None
elif args.blueprint is not None:
# load blueprint
try:
env.log('Loading marker map from [{}] ...'.format(args.blueprint))
with open(args.blueprint, 'r') as f:
regions = [x.strip().split() for x in f.readlines()]
except IOError:
env.error("Cannot load regional marker blueprint [{}]. ".format(args.blueprint), exit = True)
else:
env.log('separate chromosome to regions')
regions=data.get_regions(step=1000)
env.log('{:,d} families with a total of {:,d} samples will be scanned for {:,d} pre-defined units'.\
format(len(data.families), len(data.samples), len(regions)))
env.jobs = max(min(args.jobs, len(regions)), 1)
env.log('Phasing haplotypes log file: [{}]'.format(env.tmp_log + str(os.getpid()) + '.log'))
try:
if env.jobs>1:
regions.extend([None] * env.jobs)
queue = Queue()
faulthandler.enable(file=open(env.tmp_log + '.SEGV', 'w'))
for i in regions:
queue.put(i)
jobs = [EncoderWorker(
queue, len(regions), data,
RegionExtractor(args.vcf, build = args.build, chr_prefix = args.chr_prefix),
MarkerMaker(args.bin, maf_cutoff = args.maf_cutoff),
LinkageWriter(len(samples_not_vcf)),
) for i in range(env.jobs)]
for j in jobs:
j.start()
for j in jobs:
j.join()
faulthandler.disable()
else:
run_each_region_genotypes(regions,data,RegionExtractor(args.vcf, build = args.build, chr_prefix = args.chr_prefix),
MarkerMaker(args.bin, maf_cutoff = args.maf_cutoff),
LinkageWriter(len(samples_not_vcf)))
except KeyboardInterrupt:
# FIXME: need to properly close all jobs
raise ValueError("Use 'killall {}' to properly terminate all processes!".format(env.prog))
else:
env.log('{:,d} units (from {:,d} variants) processed; '\
'{:,d} Mendelian inconsistencies and {:,d} recombination events handled\n'.\
format(env.success_counter.value,
env.variants_counter.value,
env.mendelerror_counter.value,
env.recomb_counter.value), flush = True)
if env.triallelic_counter.value:
env.log('{:,d} tri-allelic loci were ignored'.format(env.triallelic_counter.value))
if env.commonvar_counter.value:
env.log('{:,d} variants ignored due to having MAF > {}'.\
format(env.commonvar_counter.value, args.maf_cutoff))
if env.null_counter.value:
env.log('{:,d} units ignored due to absence in VCF file'.format(env.null_counter.value))
if env.trivial_counter.value:
env.log('{:,d} units ignored due to absence of variation in samples'.format(env.trivial_counter.value))
fatal_errors = 0
try:
# Error msg from C++ extension
os.system("cat {}/*.* > {}".format(env.tmp_dir, env.tmp_log))
fatal_errors = wordCount(env.tmp_log)['fatal']
except KeyError:
pass
if env.chperror_counter.value:
env.error("{:,d} regional markers failed to be generated due to haplotyping failures!".\
format(env.chperror_counter.value))
if fatal_errors:
env.error("{:,d} or more regional markers failed to be generated due to runtime errors!".\
format(fatal_errors))
env.log('Archiving regional marker data to directory [{}]'.format(env.cache_dir))
cache.write(arcroot = 'CACHE', source_dir = env.tmp_cache)
env.jobs = args.jobs
# STEP 2: write to PLINK or mega2 format
tpeds = [os.path.join(env.tmp_cache, item) for item in os.listdir(env.tmp_cache) if item.startswith(env.output) and item.endswith('.tped')]
for fmt in args.format:
cache.setID(fmt.lower())
if not args.vanilla and cache.check(path=os.path.join(env.outdir,fmt.upper())):
env.log('Loading {} data from archive ...'.format(fmt.upper()))
cache.load(target_dir = env.tmp_dir, names = [fmt.upper()])
else:
env.log('{:,d} units will be converted to {} format'.format(env.success_counter.value, fmt.upper()))
env.format_counter.value = 0
format(tpeds, os.path.join(env.tmp_cache, "{}.tfam".format(env.output)),
args.prevalence, args.wild_pen, args.muta_pen, fmt,
args.inherit_mode, args.theta_max, args.theta_inc)
env.log('{:,d} units successfully converted to {} format\n'.format(env.format_counter.value, fmt.upper()), flush = True)
if env.skipped_counter.value:
# FIXME: perhaps we need to rephrase this message?
env.log('{} region - family pairs skipped'.format(env.skipped_counter.value))
env.log('Archiving {} format to directory [{}]'.format(fmt.upper(), env.cache_dir))
cache.write(arcroot = fmt.upper(),
source_dir = os.path.join(env.tmp_dir, fmt.upper()), mode = 'a')
if args.run_linkage:
cache.setID('analysis')
if not args.vanilla and cache.check(path=os.path.join(env.outdir,'heatmap')):
env.log('Loading linkage analysis result from archive ...'.format(fmt.upper()))
cache.load(target_dir = env.outdir, names = ['heatmap'])
else:
env.log('Running linkage analysis ...'.format(fmt.upper()))
run_linkage(args.blueprint, args.theta_inc, args.theta_max, args.output_limit)
env.log('Linkage analysis succesfully performed for {:,d} units\n'.\
format(env.run_counter.value, fmt.upper()), flush = True)
if env.makeped_counter.value:
env.log('{} "makeped" runtime errors occurred'.format(env.makeped_counter.value))
if env.pedcheck_counter.value:
env.log('{} "pedcheck" runtime errors occurred'.format(env.pedcheck_counter.value))
if env.unknown_counter.value:
env.log('{} "unknown" runtime errors occurred'.format(env.unknown_counter.value))
if env.mlink_counter.value:
env.log('{} "mlink" runtime errors occurred'.format(env.mlink_counter.value))
cache.write(arcroot = 'heatmap', source_dir = os.path.join(env.outdir, 'heatmap'), mode = 'a')
html(args.theta_inc, args.theta_max, args.output_limit)
else:
env.log('Saving data to [{}]'.format(os.path.abspath(env.outdir)))
cache.load(target_dir = env.outdir)
if __name__ == '__main__':
main()
```
| github_jupyter |
# What is probability? A simulated introduction
```
#Import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set()
```
## Learning Objectives of Part 1
- To have an understanding of what "probability" means, in both Bayesian and Frequentist terms;
- To be able to simulate probability distributions that model real-world phenomena;
- To understand how probability distributions relate to data-generating **stories**.
## Probability
> To the pioneers such as Bernoulli, Bayes and Laplace, a probability represented a _degree-of-belief_ or plausibility; how much they thought that something was true, based on the evidence at hand. To the 19th century scholars, however, this seemed too vague and subjective an idea to be the basis of a rigorous mathematical theory. So they redefined probability as the _long-run relative frequency_ with which an event occurred, given (infinitely) many repeated (experimental) trials. Since frequencies can be measured, probability was now seen as an objective tool for dealing with _random_ phenomena.
-- _Data Analysis, A Bayesian Tutorial_, Sivia & Skilling (p. 9)
What type of random phenomena are we talking about here? One example is:
- Knowing that a website has a click-through rate (CTR) of 10%, we can calculate the probability of having 10 people, 9 people, 8 people ... and so on click through, upon drawing 10 people randomly from the population;
- But given the data of how many people click through, how can we calculate the CTR? And how certain can we be of this CTR? Or how likely is a particular CTR?
Science mostly asks questions of the second form above & Bayesian thinking provides a wonderful framework for answering such questions. Essentially Bayes' Theorem gives us a way of moving from the probability of the data given the model (written as $P(data|model)$) to the probability of the model given the data ($P(model|data)$).
We'll first explore questions of the 1st type using simulation: knowing the model, what is the probability of seeing certain data?
## Simulating probabilities
* Let's say that a website has a CTR of 50%, i.e. that 50% of people click through. If we picked 1000 people at random from thepopulation, how likely would it be to find that a certain number of people click?
We can simulate this using `numpy`'s random number generator.
To do so, first note we can use `np.random.rand()` to randomly select floats between 0 and 1 (known as the _uniform distribution_). Below, we do so and plot a histogram:
```
# Draw 1,000 samples from uniform & plot results
x = ___
___;
```
To then simulate the sampling from the population, we check whether each float was greater or less than 0.5. If less than or equal to 0.5, we say the person clicked.
```
# Computed how many people click
clicks = ___
n_clicks = ___
f"Number of clicks = {n_clicks}"
```
The proportion of people who clicked can be calculated as the total number of clicks over the number of people:
```
# Computed proportion of people who clicked
f"Proportion who clicked = {___}"
```
**Discussion**: Did you get the same answer as your neighbour? If you did, why? If not, why not?
**Up for discussion:** Let's say that all you had was this data and you wanted to figure out the CTR (probability of clicking).
* What would your estimate be?
* Bonus points: how confident would you be of your estimate?
**Note:** Although, in the above, we have described _probability_ in two ways, we have not described it mathematically. We're not going to do so rigorously here, but we will say that _probability_ defines a function from the space of possibilities (in the above, the interval $[0,1]$) that describes how likely it is to get a particular point or region in that space. Mike Betancourt has an elegant [Introduction to Probability Theory (For Scientists and Engineers)](https://betanalpha.github.io/assets/case_studies/probability_theory.html) that I can recommend.
### Hands-on: clicking
Use random sampling to simulate how many people click when the CTR is 0.7. How many click? What proportion?
```
# Solution
clicks = ___
n_clicks = ___
print(f"Number of clicks = {___}")
print(f"Proportion who clicked = {___}")
```
_Discussion point_: This model is known as the bias coin flip.
- Can you see why?
- Can it be used to model other phenomena?
### Galapagos finch beaks
You can also calculate such proportions with real-world data. Here we import a dataset of Finch beak measurements from the Galápagos islands. You can find the data [here](https://datadryad.org/resource/doi:10.5061/dryad.9gh90).
```
# Import and view head of data
df_12 = pd.read_csv('../data/finch_beaks_2012.csv')
df_12.head()
# Store lengths in a pandas series
lengths = ___
```
* What proportion of birds have a beak length > 10 ?
```
p = ___
p
```
**Note:** This is the proportion of birds that have beak length $>10$ in your empirical data, not the probability that any bird drawn from the population will have beak length $>10$.
### Proportion: A proxy for probability
As stated above, we have calculated a proportion, not a probability. As a proxy for the probability, we can simulate drawing random samples (with replacement) from the data seeing how many lengths are > 10 and calculating the proportion (commonly referred to as [hacker statistics](https://speakerdeck.com/jakevdp/statistics-for-hackers)):
```
n_samples = 10000
___
```
### Another way to simulate coin-flips
In the above, you have used the uniform distribution to sample from a series of biased coin flips. I want to introduce you to another distribution that you can also use to do so: the **binomial distribution**.
The **binomial distribution** with parameters $n$ and $p$ is defined as the probability distribution of
> the number of heads seen when flipping a coin $n$ times when with $p(heads)=p$.
**Note** that this distribution essentially tells the **story** of a general model in the following sense: if we believe that they underlying process generating the observed data has a binary outcome (affected by disease or not, head or not, 0 or 1, clicked through or not), and that one the of the two outcomes occurs with probability $p$, then the probability of seeing a particular outcome is given by the **binomial distribution** with parameters $n$ and $p$.
Any process that matches the coin flip story is a Binomial process (note that you'll see such coin flips also referred to as Bernoulli trials in the literature). So we can also formulate the story of the Binomial distribution as
> the number $r$ of successes in $n$ Bernoulli trials with probability $p$ of success, is Binomially distributed.
We'll now use the binomial distribution to answer the same question as above:
* If P(heads) = 0.7 and you flip the coin ten times, how many heads will come up?
We'll also set the seed to ensure reproducible results.
```
# Set seed
___
# Simulate one run of flipping the biased coin 10 times
___
```
### Simulating many times to get the distribution
In the above, we have simulated the scenario once. But this only tells us one potential outcome. To see how likely it is to get $n$ heads, for example, we need to simulate it a lot of times and check what proportion ended up with $n$ heads.
```
# Simulate 1,000 run of flipping the biased coin 10 times
x = ___
# Plot normalized histogram of results
plt.hist(x, density=True, bins=10);
```
* Group chat: what do you see in the above?
### Hands-on: Probabilities
- If I flip a biased coin ($P(H)=0.3$) 20 times, what is the probability of 5 or more heads?
```
# Calculate the probability of 5 or more heads for p=0.3
___
```
- If I flip a fair coin 20 times, what is the probability of 5 or more heads?
```
# Calculate the probability of 5 or more heads for p=0.5
___
```
- Plot the normalized histogram of number of heads of the following experiment: flipping a fair coin 10 times.
```
# Plot histogram
x = ___
___;
```
**Note:** you may have noticed that the _binomial distribution_ can take on only a finite number of values, whereas the _uniform distribution_ above can take on any number between $0$ and $1$. These are different enough cases to warrant special mention of this & two different names: the former is called a _probability mass function_ (PMF) and the latter a _probability distribution function_ (PDF). Time permitting, we may discuss some of the subtleties here. If not, all good texts will cover this. I like (Sivia & Skilling, 2006), among many others.
**Question:**
* Looking at the histogram, can you tell me the probability of seeing 4 or more heads?
Enter the ECDF.
## Empirical cumulative distribution functions (ECDFs)
An ECDF is, as an alternative to a histogram, a way to visualize univariate data that is rich in information. It allows you to visualize all of your data and, by doing so, avoids the very real problem of binning.
- can plot control plus experiment
- data plus model!
- many populations
- can see multimodality (though less pronounced) -- a mode becomes a point of inflexion!
- can read off so much: e.g. percentiles.
See Eric Ma's great post on ECDFS [here](https://ericmjl.github.io/blog/2018/7/14/ecdfs/) AND [this twitter thread](https://twitter.com/allendowney/status/1019171696572583936) (thanks, Allen Downey!).
So what is this ECDF?
**Definition:** In an ECDF, the x-axis is the range of possible values for the data & for any given x-value, the corresponding y-value is the proportion of data points less than or equal to that x-value.
Let's define a handy ECDF function that takes in data and outputs $x$ and $y$ data for the ECDF.
```
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points
n = len(data)
# x-data for the ECDF
x = np.sort(data)
# y-data for the ECDF
y = np.arange(1, n+1) / n
return x, y
```
### Hands-on: Plotting ECDFs
Plot the ECDF for the previous hands-on exercise. Read the answer to the following question off the ECDF: he probability of seeing 4 or more heads?
```
# Generate x- and y-data for the ECDF
x_flips, y_flips = ___
# Plot the ECDF
___;
```
## Probability distributions and their stories
**Credit:** Thank you to [Justin Bois](http://bois.caltech.edu/) for countless hours of discussion, work and collaboration on thinking about probability distributions and their stories. All of the following is inspired by Justin & his work, if not explicitly drawn from.
___
In the above, we saw that we could match data-generating processes with binary outcomes to the story of the binomial distribution.
> The Binomial distribution's story is as follows: the number $r$ of successes in $n$ Bernoulli trials with probability $p$ of success, is Binomially distributed.
There are many other distributions with stories also!
### Poisson processes and the Poisson distribution
In the book [Information Theory, Inference and Learning Algorithms](https://www.amazon.com/Information-Theory-Inference-Learning-Algorithms/dp/0521642981) David MacKay tells the tale of a town called Poissonville, in which the buses have an odd schedule. Standing at a bus stop in Poissonville, the amount of time you have to wait for a bus is totally independent of when the previous bus arrived. This means you could watch a bus drive off and another arrive almost instantaneously, or you could be waiting for hours.
Arrival of buses in Poissonville is what we call a Poisson process. The timing of the next event is completely independent of when the previous event happened. Many real-life processes behave in this way.
* natural births in a given hospital (there is a well-defined average number of natural births per year, and the timing of one birth is independent of the timing of the previous one);
* Landings on a website;
* Meteor strikes;
* Molecular collisions in a gas;
* Aviation incidents.
Any process that matches the buses in Poissonville **story** is a Poisson process.
The number of arrivals of a Poisson process in a given amount of time is Poisson distributed. The Poisson distribution has one parameter, the average number of arrivals in a given length of time. So, to match the story, we could consider the number of hits on a website in an hour with an average of six hits per hour. This is Poisson distributed.
```
# Generate Poisson-distributed data
samples = ___
# Plot histogram
___;
```
**Question:** Does this look like anything to you?
In fact, the Poisson distribution is the limit of the Binomial distribution for low probability of success and large number of trials, that is, for rare events.
To see this, think about the stories. Picture this: you're doing a Bernoulli trial once a minute for an hour, each with a success probability of 0.05. We would do 60 trials, and the number of successes is Binomially distributed, and we would expect to get about 3 successes. This is just like the Poisson story of seeing 3 buses on average arrive in a given interval of time. Thus the Poisson distribution with arrival rate equal to np approximates a Binomial distribution for n Bernoulli trials with probability p of success (with n large and p small). This is useful because the Poisson distribution can be simpler to work with as it has only one parameter instead of two for the Binomial distribution.
#### Hands-on: Poisson
Plot the ECDF of the Poisson-distributed data that you generated above.
```
# Generate x- and y-data for the ECDF
x_p, y_p = ___
# Plot the ECDF
___;
```
#### Example Poisson distribution: field goals attempted per game
This section is explicitly taken from the great work of Justin Bois. You can find more [here](https://github.com/justinbois/dataframed-plot-examples/blob/master/lebron_field_goals.ipynb).
Let's first remind ourselves of the story behind the Poisson distribution.
> The number of arrivals of a Poisson processes in a given set time interval is Poisson distributed.
To quote Justin Bois:
> We could model field goal attempts in a basketball game using a Poisson distribution. When a player takes a shot is a largely stochastic process, being influenced by the myriad ebbs and flows of a basketball game. Some players shoot more than others, though, so there is a well-defined rate of shooting. Let's consider LeBron James's field goal attempts for the 2017-2018 NBA season.
First thing's first, the data ([from here](https://www.basketball-reference.com/players/j/jamesle01/gamelog/2018)):
```
fga = [19, 16, 15, 20, 20, 11, 15, 22, 34, 17, 20, 24, 14, 14,
24, 26, 14, 17, 20, 23, 16, 11, 22, 15, 18, 22, 23, 13,
18, 15, 23, 22, 23, 18, 17, 22, 17, 15, 23, 8, 16, 25,
18, 16, 17, 23, 17, 15, 20, 21, 10, 17, 22, 20, 20, 23,
17, 18, 16, 25, 25, 24, 19, 17, 25, 20, 20, 14, 25, 26,
29, 19, 16, 19, 18, 26, 24, 21, 14, 20, 29, 16, 9]
```
To show that this LeBron's attempts are ~ Poisson distributed, you're now going to plot the ECDF and compare it with the the ECDF of the Poisson distribution that has the mean of the data (technically, this is the maximum likelihood estimate).
#### Hands-on: Simulating Data Generating Stories
Generate the x and y values for the ECDF of LeBron's field attempt goals.
```
# Generate x & y data for ECDF
x_ecdf, y_ecdf = ___
```
Now we'll draw samples out of a Poisson distribution to get the theoretical ECDF, plot it with the ECDF of the data and see how they look.
```
# Number of times we simulate the model
n_reps = 1000
# Plot ECDF of data
plt.plot(x_ecdf, y_ecdf, '.', color='black');
# Plot ECDF of model
for _ in range(n_reps):
samples = ___
x_theor, y_theor = ___
plt.plot(x_theor, y_theor, '.', alpha=0.01, color='lightgray');
# Label your axes
plt.xlabel('field goal attempts')
plt.ylabel('ECDF')
```
You can see from the ECDF that LeBron's field goal attempts per game are Poisson distributed.
### Exponential distribution
We've encountered a variety of named _discrete distributions_. There are also named _continuous distributions_, such as the Exponential distribution and the Normal (or Gaussian) distribution. To see what the story of the Exponential distribution is, let's return to Poissonville, in which the number of buses that will arrive per hour are Poisson distributed.
However, the waiting time between arrivals of a Poisson process are exponentially distributed.
So: the exponential distribution has the following story: the waiting time between arrivals of a Poisson process are exponentially distributed. It has a single parameter, the mean waiting time. This distribution is not peaked, as we can see from its PDF.
For an illustrative example, lets check out the time between all incidents involving nuclear power since 1974. It's a reasonable first approximation to expect incidents to be well-modeled by a Poisson process, which means the timing of one incident is independent of all others. If this is the case, the time between incidents should be Exponentially distributed.
To see if this story is credible, we can plot the ECDF of the data with the CDF that we'd get from an exponential distribution with the sole parameter, the mean, given by the mean inter-incident time of the data.
```
# Load nuclear power accidents data & create array of inter-incident times
df = pd.read_csv('../data/nuclear_power_accidents.csv')
df.Date = pd.to_datetime(df.Date)
df = df[df.Date >= pd.to_datetime('1974-01-01')]
inter_times = np.diff(np.sort(df.Date)).astype(float) / 1e9 / 3600 / 24
# Compute mean and sample from exponential
mean = ___
samples = ___
# Compute ECDFs for sample & model
x, y = ___
x_theor, y_theor = ___
# Plot sample & model ECDFs
___;
plt.plot(x, y, marker='.', linestyle='none');
```
We see that the data is close to being Exponentially distributed, which means that we can model the nuclear incidents as a Poisson process.
### Normal distribution
The Normal distribution, also known as the Gaussian or Bell Curve, appears everywhere. There are many reasons for this. One is the following:
> When doing repeated measurements, we expect them to be Normally distributed, owing to the many subprocesses that contribute to a measurement. This is because (a formulation of the Central Limit Theorem) **any quantity that emerges as the sum of a large number of subprocesses tends to be Normally distributed** provided none of the subprocesses is very broadly distributed.
Now it's time to see if this holds for the measurements of the speed of light in the famous Michelson–Morley experiment:
Below, I'll plot the histogram with a Gaussian curve fitted to it. Even if that looks good, though, that could be due to binning bias. SO then you'll plot the ECDF of the data and the CDF of the model!
```
# Load data, plot histogram
import scipy.stats as st
df = pd.read_csv('../data/michelson_speed_of_light.csv')
df = df.rename(columns={'velocity of light in air (km/s)': 'c'})
c = df.c.values
x_s = np.linspace(299.6, 300.1, 400) * 1000
plt.plot(x_s, st.norm.pdf(x_s, c.mean(), c.std(ddof=1)))
plt.hist(c, bins=9, density=True)
plt.xlabel('speed of light (km/s)')
plt.ylabel('PDF')
```
#### Hands-on: Simulating Normal
```
# Get speed of light measurement + mean & standard deviation
michelson_speed_of_light = df.c.values
mean = np.mean(michelson_speed_of_light)
std = np.std(michelson_speed_of_light, ddof=1)
# Generate normal samples w/ mean, std of data
samples = ___
# Generate data ECDF & model CDF
x, y = ___
x_theor, y_theor = ___
# Plot data & model (E)CDFs
___;
___;
plt.xlabel('speed of light (km/s)')
plt.ylabel('CDF')
```
Some of you may ask but is the data really normal? I urge you to check out Allen Downey's post [_Are your data normal? Hint: no._ ](http://allendowney.blogspot.com/2013/08/are-my-data-normal.html)
| github_jupyter |
### [Intuition] Test how the algorithm reacts to differetly scaled input
Investigate numerical stability... just from plain eyesight, this does not seem to affect the hypothesis in the constant case.
```
import os, sys
os.chdir(os.path.join('..','..','..')) # set working directory to root of github repo
%matplotlib inline
import matplotlib.pyplot as plt
from active_learning.evaluation import *
# rate according to different constant values
algorithm = UncertaintySamplingAlgorithm
algo_params_length = {'hypothesis': 'Gaussian Process', 'hypothesis_params':{'transformation':'length'}}
algo_params_tfidf = {'hypothesis': 'Gaussian Process', 'hypothesis_params':{'transformation':'tfidf'}}
def rating_func_constant(c):
return lambda x: c
for c in [0.01,0.02,1,2,4,8,16,32,64]:
rating_func = rating_func_constant(c)
stats_length = Evaluator(algorithm=algorithm, algo_params=algo_params_length,
oracle=FunctionalOracle, oracle_params={'rating_func':rating_func},
batch_size=1, dataset_name='Rotten Tomato').compute()
stats_tfidf = Evaluator(algorithm=algorithm, algo_params=algo_params_tfidf,
oracle=FunctionalOracle, oracle_params={'rating_func':rating_func},
batch_size=1, dataset_name='Rotten Tomato').compute()
print("Constant: {}".format(c))
stats_length['mse'].plot()
stats_tfidf['mse'].plot()
plt.show()
# rate according to the length of the path multiplied by a constant
algorithm = UncertaintySamplingAlgorithm
algo_params_length = {'hypothesis': 'Gaussian Process', 'hypothesis_params':{'transformation':'length'}}
algo_params_tfidf = {'hypothesis': 'Gaussian Process', 'hypothesis_params':{'transformation':'tfidf'}}
def rating_func_length_for_constant(c):
return lambda x: len(x) * c
for c in [0.01,0.02,1,2,4,8,16,32,64]:
rating_func = rating_func_length_for_constant(c)
stats_length = Evaluator(algorithm=algorithm, algo_params=algo_params_length,
oracle=FunctionalOracle, oracle_params={'rating_func':rating_func},
batch_size=1, dataset_name='Rotten Tomato').compute()
stats_tfidf = Evaluator(algorithm=algorithm, algo_params=algo_params_tfidf,
oracle=FunctionalOracle, oracle_params={'rating_func':rating_func},
batch_size=1, dataset_name='Rotten Tomato').compute()
print("Constant: {}".format(c))
stats_length['mse'].plot()
stats_tfidf['mse'].plot()
plt.show()
```
| github_jupyter |
# Assessment - Object-oriented programming
In this exercise, we'll create a few classes to simulate a server that's taking connections from the outside and then a load balancer that ensures that there are enough servers to serve those connections.
<br><br>
To represent the servers that are taking care of the connections, we'll use a Server class. Each connection is represented by an id, that could, for example, be the IP address of the computer connecting to the server. For our simulation, each connection creates a random amount of load in the server, between 1 and 10.
<br><br>
Run the following code that defines this Server class.
```
#Begin Portion 1#
import random
class Server:
def __init__(self):
"""Creates a new server instance, with no active connections."""
self.connections = {}
def add_connection(self, connection_id):
"""Adds a new connection to this server."""
connection_load = random.random()*10+1
# Add the connection to the dictionary with the calculated load
self.connections[connection_id]=connection_load
def close_connection(self, connection_id):
"""Closes a connection on this server."""
# Remove the connection from the dictionary
del self.connections[connection_id]
def load(self):
"""Calculates the current load for all connections."""
total = 0
# Add up the load for each of the connections
for load in self.connections.values():
total += load
return total
def __str__(self):
"""Returns a string with the current load of the server"""
return "{:.2f}%".format(self.load())
#End Portion 1#
```
Now run the following cell to create a Server instance and add a connection to it, then check the load:
```
server = Server()
server.add_connection("192.168.1.1")
print(server.load())
```
After running the above code cell, if you get a **<font color =red>NameError</font>** message, be sure to run the Server class definition code block first.
The output should be 0. This is because some things are missing from the Server class. So, you'll need to go back and fill in the blanks to make it behave properly.
<br><br>
Go back to the Server class definition and fill in the missing parts for the `add_connection` and `load` methods to make the cell above print a number different than zero. As the load is calculated randomly, this number should be different each time the code is executed.
<br><br>
**Hint:** Recall that you can iterate through the values of your connections dictionary just as you would any sequence.
Great! If your output is a random number between 1 and 10, you have successfully coded the `add_connection` and `load` methods of the Server class. Well done!
<br><br>
What about closing a connection? Right now the `close_connection` method doesn't do anything. Go back to the Server class definition and fill in the missing code for the `close_connection` method to make the following code work correctly:
```
server.close_connection("192.168.1.1")
print(server.load())
```
You have successfully coded the `close_connection` method if the cell above prints 0.
<br><br>
**Hint:** Remember that `del` dictionary[key] removes the item with key *key* from the dictionary.
Alright, we now have a basic implementation of the server class. Let's look at the basic LoadBalancing class. This class will start with only one server available. When a connection gets added, it will randomly select a server to serve that connection, and then pass on the connection to the server. The LoadBalancing class also needs to keep track of the ongoing connections to be able to close them. This is the basic structure:
```
#Begin Portion 2#
class LoadBalancing:
def __init__(self):
"""Initialize the load balancing system with one server"""
self.connections = {}
self.servers = [Server()]
def add_connection(self, connection_id):
"""Randomly selects a server and adds a connection to it."""
serverr = random.choice(self.servers)
# Add the connection to the dictionary with the selected server
self.connections[connection_id] = serverr
serverr.add_connection(connection_id)
# Add the connection to the server
server.add_connection(connection_id)
l.ensure_availability()
def close_connection(self, connection_id):
"""Closes the connection on the the server corresponding to connection_id."""
# Find out the right server
#for key in self.connections.keys():
#if connection_id == key:
# Close the connection on the server
server.close_connection(connection_id)
# Remove the connection from the load balancer
del self.connections[connection_id]
def avg_load(self):
"""Calculates the average load of all servers"""
# Sum the load of each server and divide by the amount of servers
average = 0
for numbers in self.connections.values():
average += numbers.load()
return (average/len(self.servers))
def ensure_availability(self):
"""If the average load is higher than 50, spin up a new server"""
if l.avg_load() > 5:
l.servers.append(Server())
def __str__(self):
"""Returns a string with the load for each server."""
loads = [str(server) for server in self.servers]
return "[{}]".format(",".join(loads))
#End Portion 2#
```
As with the Server class, this class is currently incomplete. You need to fill in the gaps to make it work correctly. For example, this snippet should create a connection in the load balancer, assign it to a running server and then the load should be more than zero:
```
l = LoadBalancing()
l.add_connection("fdca:83d2::f20d")
print(l.avg_load())
```
After running the above code, the output is 0. Fill in the missing parts for the `add_connection` and `avg_load` methods of the LoadBalancing class to make this print the right load. Be sure that the load balancer now has an average load more than 0 before proceeding.
What if we add a new server?
```
l.servers.append(Server())
print(l.avg_load())
```
The average load should now be half of what it was before. If it's not, make sure you correctly fill in the missing gaps for the `add_connection` and `avg_load` methods so that this code works correctly.
<br><br>
**Hint:** You can iterate through the all servers in the *self.servers* list to get the total server load amount and then divide by the length of the *self.servers* list to compute the average load amount.
Fantastic! Now what about closing the connection?
```
l.close_connection("fdca:83d2::f20d")
print(l.avg_load())
```
Fill in the code of the LoadBalancing class to make the load go back to zero once the connection is closed.
<br><br>
Great job! Before, we added a server manually. But we want this to happen automatically when the average load is more than 50%. To make this possible, fill in the missing code for the `ensure_availability` method and call it from the `add_connection` method after a connection has been added. You can test it with the following code:
```
for connection in range(20):
l.add_connection(connection)
print(l)
```
The code above adds 20 new connections and then prints the loads for each server in the load balancer. If you coded correctly, new servers should have been added automatically to ensure that the average load of all servers is not more than 50%.
<br><br>
Run the following code to verify that the average load of the load balancer is not more than 50%.
```
print(l.avg_load())
```
Awesome! If the average load is indeed less than 50%, you are all done with this assessment.
| github_jupyter |
# Operating on Data in Pandas
One of the essential pieces of NumPy is the ability to perform quick element-wise operations, both with basic arithmetic (addition, subtraction, multiplication, etc.) and with more sophisticated operations (trigonometric functions, exponential and logarithmic functions, etc.).
Pandas inherits much of this functionality from NumPy, and the ufuncs that we introduced in [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb) are key to this.
Pandas includes a couple useful twists, however: for unary operations like negation and trigonometric functions, these ufuncs will *preserve index and column labels* in the output, and for binary operations such as addition and multiplication, Pandas will automatically *align indices* when passing the objects to the ufunc.
This means that keeping the context of data and combining data from different sources–both potentially error-prone tasks with raw NumPy arrays–become essentially foolproof ones with Pandas.
We will additionally see that there are well-defined operations between one-dimensional ``Series`` structures and two-dimensional ``DataFrame`` structures.
## Ufuncs: Index Preservation
Because Pandas is designed to work with NumPy, any NumPy ufunc will work on Pandas ``Series`` and ``DataFrame`` objects.
Let's start by defining a simple ``Series`` and ``DataFrame`` on which to demonstrate this:
```
import pandas as pd
import numpy as np
rng = np.random.RandomState(42)
ser = pd.Series(rng.randint(0, 10, 4))
ser
df = pd.DataFrame(rng.randint(0, 10, (3, 4)),
columns=['A', 'B', 'C', 'D'])
df
```
If we apply a NumPy ufunc on either of these objects, the result will be another Pandas object *with the indices preserved:*
```
np.exp(ser)
```
Or, for a slightly more complex calculation:
```
np.sin(df * np.pi / 4)
```
Any of the ufuncs discussed in [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb) can be used in a similar manner.
## UFuncs: Index Alignment
For binary operations on two ``Series`` or ``DataFrame`` objects, Pandas will align indices in the process of performing the operation.
This is very convenient when working with incomplete data, as we'll see in some of the examples that follow.
### Index alignment in Series
As an example, suppose we are combining two different data sources, and find only the top three US states by *area* and the top three US states by *population*:
```
area = pd.Series({'Alaska': 1723337, 'Texas': 695662,
'California': 423967}, name='area')
population = pd.Series({'California': 38332521, 'Texas': 26448193,
'New York': 19651127}, name='population')
```
Let's see what happens when we divide these to compute the population density:
```
population / area
```
The resulting array contains the *union* of indices of the two input arrays, which could be determined using standard Python set arithmetic on these indices:
```
area.index | population.index
```
Any item for which one or the other does not have an entry is marked with ``NaN``, or "Not a Number," which is how Pandas marks missing data (see further discussion of missing data in [Handling Missing Data](03.04-Missing-Values.ipynb)).
This index matching is implemented this way for any of Python's built-in arithmetic expressions; any missing values are filled in with NaN by default:
```
A = pd.Series([2, 4, 6], index=[0, 1, 2])
B = pd.Series([1, 3, 5], index=[1, 2, 3])
A + B
```
If using NaN values is not the desired behavior, the fill value can be modified using appropriate object methods in place of the operators.
For example, calling ``A.add(B)`` is equivalent to calling ``A + B``, but allows optional explicit specification of the fill value for any elements in ``A`` or ``B`` that might be missing:
```
A.add(B, fill_value=0)
```
### Index alignment in DataFrame
A similar type of alignment takes place for *both* columns and indices when performing operations on ``DataFrame``s:
```
A = pd.DataFrame(rng.randint(0, 20, (2, 2)),
columns=list('AB'))
A
B = pd.DataFrame(rng.randint(0, 10, (3, 3)),
columns=list('BAC'))
B
A + B
```
Notice that indices are aligned correctly irrespective of their order in the two objects, and indices in the result are sorted.
As was the case with ``Series``, we can use the associated object's arithmetic method and pass any desired ``fill_value`` to be used in place of missing entries.
Here we'll fill with the mean of all values in ``A`` (computed by first stacking the rows of ``A``):
```
fill = A.stack().mean()
A.add(B, fill_value=fill)
```
The following table lists Python operators and their equivalent Pandas object methods:
| Python Operator | Pandas Method(s) |
|-----------------|---------------------------------------|
| ``+`` | ``add()`` |
| ``-`` | ``sub()``, ``subtract()`` |
| ``*`` | ``mul()``, ``multiply()`` |
| ``/`` | ``truediv()``, ``div()``, ``divide()``|
| ``//`` | ``floordiv()`` |
| ``%`` | ``mod()`` |
| ``**`` | ``pow()`` |
## Ufuncs: Operations Between DataFrame and Series
When performing operations between a ``DataFrame`` and a ``Series``, the index and column alignment is similarly maintained.
Operations between a ``DataFrame`` and a ``Series`` are similar to operations between a two-dimensional and one-dimensional NumPy array.
Consider one common operation, where we find the difference of a two-dimensional array and one of its rows:
```
A = rng.randint(10, size=(3, 4))
A
A - A[0]
```
According to NumPy's broadcasting rules (see [Computation on Arrays: Broadcasting](02.05-Computation-on-arrays-broadcasting.ipynb)), subtraction between a two-dimensional array and one of its rows is applied row-wise.
In Pandas, the convention similarly operates row-wise by default:
```
df = pd.DataFrame(A, columns=list('QRST'))
df - df.iloc[0]
```
If you would instead like to operate column-wise, you can use the object methods mentioned earlier, while specifying the ``axis`` keyword:
```
df.subtract(df['R'], axis=0)
```
Note that these ``DataFrame``/``Series`` operations, like the operations discussed above, will automatically align indices between the two elements:
```
halfrow = df.iloc[0, ::2]
halfrow
df - halfrow
```
| github_jupyter |
# Bayesian Parametric Survival Analysis with PyMC3
```
import warnings
import arviz as az
import numpy as np
import pymc3 as pm
import scipy as sp
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import StrMethodFormatter
from statsmodels import datasets
from theano import shared
from theano import tensor as tt
print(f"Running on PyMC3 v{pm.__version__}")
warnings.filterwarnings("ignore")
%config InlineBackend.figure_format = 'retina'
az.style.use("arviz-darkgrid")
```
[Survival analysis](https://en.wikipedia.org/wiki/Survival_analysis) studies the distribution of the time between when a subject comes under observation and when that subject experiences an event of interest. One of the fundamental challenges of survival analysis (which also makes is mathematically interesting) is that, in general, not every subject will experience the event of interest before we conduct our analysis. In more concrete terms, if we are studying the time between cancer treatment and death (as we will in this post), we will often want to analyze our data before every subject has died. This phenomenon is called <a href="https://en.wikipedia.org/wiki/Censoring_(statistics)">censoring</a> and is fundamental to survival analysis.
I have previously [written](http://austinrochford.com/posts/2015-10-05-bayes-survival.html) about Bayesian survival analysis using the [semiparametric](https://en.wikipedia.org/wiki/Semiparametric_model) [Cox proportional hazards model](https://en.wikipedia.org/wiki/Proportional_hazards_model#The_Cox_model). Implementing that semiparametric model in PyMC3 involved some fairly complex `numpy` code and nonobvious probability theory equivalences. This post illustrates a parametric approach to Bayesian survival analysis in PyMC3. Parametric models of survival are simpler to both implement and understand than semiparametric models; statistically, they are also more [powerful](https://en.wikipedia.org/wiki/Statistical_power) than non- or semiparametric methods _when they are correctly specified_. This post will not further cover the differences between parametric and nonparametric models or the various methods for chosing between them.
As in the previous post, we will analyze [mastectomy data](https://vincentarelbundock.github.io/Rdatasets/doc/HSAUR/mastectomy.html) from `R`'s [`HSAUR`](https://cran.r-project.org/web/packages/HSAUR/index.html) package. First, we load the data.
```
sns.set()
blue, green, red, purple, gold, teal = sns.color_palette(n_colors=6)
pct_formatter = StrMethodFormatter("{x:.1%}")
df = datasets.get_rdataset("mastectomy", "HSAUR", cache=True).data.assign(
metastized=lambda df: 1.0 * (df.metastized == "yes"), event=lambda df: 1.0 * df.event
)
df.head()
```
The column `time` represents the survival time for a breast cancer patient after a mastectomy, measured in months. The column `event` indicates whether or not the observation is censored. If `event` is one, the patient's death was observed during the study; if `event` is zero, the patient lived past the end of the study and their survival time is censored. The column `metastized` indicates whether the cancer had [metastized](https://en.wikipedia.org/wiki/Metastasis) prior to the mastectomy. In this post, we will use Bayesian parametric survival regression to quantify the difference in survival times for patients whose cancer had and had not metastized.
## Accelerated failure time models
[Accelerated failure time models](https://en.wikipedia.org/wiki/Accelerated_failure_time_model) are the most common type of parametric survival regression models. The fundamental quantity of survival analysis is the [survival function](https://en.wikipedia.org/wiki/Survival_function); if $T$ is the random variable representing the time to the event in question, the survival function is $S(t) = P(T > t)$. Accelerated failure time models incorporate covariates $\mathbf{x}$ into the survival function as
$$S(t\ |\ \beta, \mathbf{x}) = S_0\left(\exp\left(\beta^{\top} \mathbf{x}\right) \cdot t\right),$$
where $S_0(t)$ is a fixed baseline survival function. These models are called "accelerated failure time" because, when $\beta^{\top} \mathbf{x} > 0$, $\exp\left(\beta^{\top} \mathbf{x}\right) \cdot t > t$, so the effect of the covariates is to accelerate the _effective_ passage of time for the individual in question. The following plot illustrates this phenomenon using an exponential survival function.
```
S0 = sp.stats.expon.sf
fig, ax = plt.subplots(figsize=(8, 6))
t = np.linspace(0, 10, 100)
ax.plot(t, S0(5 * t), label=r"$\beta^{\top} \mathbf{x} = \log\ 5$")
ax.plot(t, S0(2 * t), label=r"$\beta^{\top} \mathbf{x} = \log\ 2$")
ax.plot(t, S0(t), label=r"$\beta^{\top} \mathbf{x} = 0$ ($S_0$)")
ax.plot(t, S0(0.5 * t), label=r"$\beta^{\top} \mathbf{x} = -\log\ 2$")
ax.plot(t, S0(0.2 * t), label=r"$\beta^{\top} \mathbf{x} = -\log\ 5$")
ax.set_xlim(0, 10)
ax.set_xlabel(r"$t$")
ax.yaxis.set_major_formatter(pct_formatter)
ax.set_ylim(-0.025, 1)
ax.set_ylabel(r"Survival probability, $S(t\ |\ \beta, \mathbf{x})$")
ax.legend(loc=1)
ax.set_title("Accelerated failure times");
```
Accelerated failure time models are equivalent to log-linear models for $T$,
$$Y = \log T = \beta^{\top} \mathbf{x} + \varepsilon.$$
A choice of distribution for the error term $\varepsilon$ determines baseline survival function, $S_0$, of the accelerated failure time model. The following table shows the correspondence between the distribution of $\varepsilon$ and $S_0$ for several common accelerated failure time models.
<center>
<table border="1">
<tr>
<th>Log-linear error distribution ($\varepsilon$)</th>
<th>Baseline survival function ($S_0$)</th>
</tr>
<tr>
<td>[Normal](https://en.wikipedia.org/wiki/Normal_distribution)</td>
<td>[Log-normal](https://en.wikipedia.org/wiki/Log-normal_distribution)</td>
</tr>
<tr>
<td>Extreme value ([Gumbel](https://en.wikipedia.org/wiki/Gumbel_distribution))</td>
<td>[Weibull](https://en.wikipedia.org/wiki/Weibull_distribution)</td>
</tr>
<tr>
<td>[Logistic](https://en.wikipedia.org/wiki/Logistic_distribution)</td>
<td>[Log-logistic](https://en.wikipedia.org/wiki/Log-logistic_distribution)</td>
</tr>
</table>
</center>
Accelerated failure time models are conventionally named after their baseline survival function, $S_0$. The rest of this post will show how to implement Weibull and log-logistic survival regression models in PyMC3 using the mastectomy data.
### Weibull survival regression
In this example, the covariates are $\mathbf{x}_i = \left(1\ x^{\textrm{met}}_i\right)^{\top}$, where
$$
\begin{align*}
x^{\textrm{met}}_i
& = \begin{cases}
0 & \textrm{if the } i\textrm{-th patient's cancer had not metastized} \\
1 & \textrm{if the } i\textrm{-th patient's cancer had metastized}
\end{cases}.
\end{align*}
$$
We construct the matrix of covariates $\mathbf{X}$.
```
n_patient, _ = df.shape
X = np.empty((n_patient, 2))
X[:, 0] = 1.0
X[:, 1] = df.metastized
```
We place independent, vague normal prior distributions on the regression coefficients,
$$\beta \sim N(0, 5^2 I_2).$$
```
VAGUE_PRIOR_SD = 5.0
with pm.Model() as weibull_model:
β = pm.Normal("β", 0.0, VAGUE_PRIOR_SD, shape=2)
```
The covariates, $\mathbf{x}$, affect value of $Y = \log T$ through $\eta = \beta^{\top} \mathbf{x}$.
```
X_ = shared(X)
with weibull_model:
η = β.dot(X_.T)
```
For Weibull regression, we use
$$
\begin{align*}
\varepsilon
& \sim \textrm{Gumbel}(0, s) \\
s
& \sim \textrm{HalfNormal(5)}.
\end{align*}
$$
```
with weibull_model:
s = pm.HalfNormal("s", 5.0)
```
We are nearly ready to specify the likelihood of the observations given these priors. Before doing so, we transform the observed times to the log scale and standardize them.
```
y = np.log(df.time.values)
y_std = (y - y.mean()) / y.std()
```
The likelihood of the data is specified in two parts, one for uncensored samples, and one for censored samples. Since $Y = \eta + \varepsilon$, and $\varepsilon \sim \textrm{Gumbel}(0, s)$, $Y \sim \textrm{Gumbel}(\eta, s)$. For the uncensored survival times, the likelihood is implemented as
```
cens = df.event.values == 0.0
cens_ = shared(cens)
with weibull_model:
y_obs = pm.Gumbel("y_obs", η[~cens_], s, observed=y_std[~cens])
```
For censored observations, we only know that their true survival time exceeded the total time that they were under observation. This probability is given by the survival function of the Gumbel distribution,
$$P(Y \geq y) = 1 - \exp\left(-\exp\left(-\frac{y - \mu}{s}\right)\right).$$
This survival function is implemented below.
```
def gumbel_sf(y, μ, σ):
return 1.0 - tt.exp(-tt.exp(-(y - μ) / σ))
```
We now specify the likelihood for the censored observations.
```
with weibull_model:
y_cens = pm.Potential("y_cens", gumbel_sf(y_std[cens], η[cens_], s))
```
We now sample from the model.
```
SEED = 845199 # from random.org, for reproducibility
SAMPLE_KWARGS = {"chains": 3, "tune": 1000, "random_seed": [SEED, SEED + 1, SEED + 2]}
with weibull_model:
weibull_trace = pm.sample(**SAMPLE_KWARGS)
```
The energy plot and Bayesian fraction of missing information give no cause for concern about poor mixing in NUTS.
```
az.plot_energy(weibull_trace);
az.bfmi(weibull_trace)
```
The Gelman-Rubin statistics also indicate convergence.
```
max(np.max(gr_stats) for gr_stats in az.rhat(weibull_trace).values())
```
Below we plot posterior distributions of the parameters.
```
az.plot_posterior(weibull_trace, lw=0, alpha=0.5);
```
These are somewhat interesting (espescially the fact that the posterior of $\beta_1$ is fairly well-separated from zero), but the posterior predictive survival curves will be much more interpretable.
The advantage of using [`theano.shared`](http://deeplearning.net/software/theano_versions/dev/library/compile/shared.html) variables is that we can now change their values to perform posterior predictive sampling. For posterior prediction, we set $X$ to have two rows, one for a subject whose cancer had not metastized and one for a subject whose cancer had metastized. Since we want to predict actual survival times, none of the posterior predictive rows are censored.
```
X_pp = np.empty((2, 2))
X_pp[:, 0] = 1.0
X_pp[:, 1] = [0, 1]
X_.set_value(X_pp)
cens_pp = np.repeat(False, 2)
cens_.set_value(cens_pp)
with weibull_model:
pp_weibull_trace = pm.sample_posterior_predictive(weibull_trace, samples=1500)
```
The posterior predictive survival times show that, on average, patients whose cancer had not metastized survived longer than those whose cancer had metastized.
```
t_plot = np.linspace(0, 230, 100)
weibull_pp_surv = np.greater_equal.outer(
np.exp(y.mean() + y.std() * pp_weibull_trace["y_obs"]), t_plot
)
weibull_pp_surv_mean = weibull_pp_surv.mean(axis=0)
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(t_plot, weibull_pp_surv_mean[0], c=blue, label="Not metastized")
ax.plot(t_plot, weibull_pp_surv_mean[1], c=red, label="Metastized")
ax.set_xlim(0, 230)
ax.set_xlabel("Weeks since mastectomy")
ax.set_ylim(top=1)
ax.yaxis.set_major_formatter(pct_formatter)
ax.set_ylabel("Survival probability")
ax.legend(loc=1)
ax.set_title("Weibull survival regression model");
```
### Log-logistic survival regression
Other accelerated failure time models can be specificed in a modular way by changing the prior distribution on $\varepsilon$. A log-logistic model corresponds to a [logistic](https://en.wikipedia.org/wiki/Logistic_distribution) prior on $\varepsilon$. Most of the model specification is the same as for the Weibull model above.
```
X_.set_value(X)
cens_.set_value(cens)
with pm.Model() as log_logistic_model:
β = pm.Normal("β", 0.0, VAGUE_PRIOR_SD, shape=2)
η = β.dot(X_.T)
s = pm.HalfNormal("s", 5.0)
```
We use the prior $\varepsilon \sim \textrm{Logistic}(0, s)$. The survival function of the logistic distribution is
$$P(Y \geq y) = 1 - \frac{1}{1 + \exp\left(-\left(\frac{y - \mu}{s}\right)\right)},$$
so we get the likelihood
```
def logistic_sf(y, μ, s):
return 1.0 - pm.math.sigmoid((y - μ) / s)
with log_logistic_model:
y_obs = pm.Logistic("y_obs", η[~cens_], s, observed=y_std[~cens])
y_cens = pm.Potential("y_cens", logistic_sf(y_std[cens], η[cens_], s))
```
We now sample from the log-logistic model.
```
with log_logistic_model:
log_logistic_trace = pm.sample(**SAMPLE_KWARGS)
```
All of the sampling diagnostics look good for this model.
```
az.plot_energy(log_logistic_trace);
az.bfmi(log_logistic_trace)
max(np.max(gr_stats) for gr_stats in az.rhat(log_logistic_trace).values())
```
Again, we calculate the posterior expected survival functions for this model.
```
X_.set_value(X_pp)
cens_.set_value(cens_pp)
with log_logistic_model:
pp_log_logistic_trace = pm.sample_posterior_predictive(log_logistic_trace, samples=1500)
log_logistic_pp_surv = np.greater_equal.outer(
np.exp(y.mean() + y.std() * pp_log_logistic_trace["y_obs"]), t_plot
)
log_logistic_pp_surv_mean = log_logistic_pp_surv.mean(axis=0)
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(t_plot, weibull_pp_surv_mean[0], c=blue, label="Weibull, not metastized")
ax.plot(t_plot, weibull_pp_surv_mean[1], c=red, label="Weibull, metastized")
ax.plot(t_plot, log_logistic_pp_surv_mean[0], "--", c=blue, label="Log-logistic, not metastized")
ax.plot(t_plot, log_logistic_pp_surv_mean[1], "--", c=red, label="Log-logistic, metastized")
ax.set_xlim(0, 230)
ax.set_xlabel("Weeks since mastectomy")
ax.set_ylim(top=1)
ax.yaxis.set_major_formatter(pct_formatter)
ax.set_ylabel("Survival probability")
ax.legend(loc=1)
ax.set_title("Weibull and log-logistic\nsurvival regression models");
```
This post has been a short introduction to implementing parametric survival regression models in PyMC3 with a fairly simple data set. The modular nature of probabilistic programming with PyMC3 should make it straightforward to generalize these techniques to more complex and interesting data set.
## Authors
- Originally authored as a blog post by [Austin Rochford](https://austinrochford.com/posts/2017-10-02-bayes-param-survival.html) on October 2, 2017.
- Updated by [George Ho](https://eigenfoo.xyz/) on July 18, 2018.
```
%load_ext watermark
%watermark -n -u -v -iv -w
```
| github_jupyter |
<a href="https://colab.research.google.com/github/BreakoutMentors/Data-Science-and-Machine-Learning/blob/main/machine_learning/lesson%202%20-%20logistic%20regression/challenges/challenge_softmax_regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Softmax regression with Airbnb data
The goal in this challenge is to build a softmax regression classifier to predict the room type of New York City Airbnb listings using other features. Use PyTorch to build, train, and evaluate the model.
Challenges:
1. Load and prepare the Airbnb dataset.
2. Build the model.
3. Train the model.
4. Evaluate the model.
5. Draw conclusions.
```
# import the libraries we need
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# PyTorch
import torch
import torch.nn
```
# 1. Load and prepare the data
Load the dataset into a pandas dataframe, and prepare it for the model.
Hints:
- Define the features ($\mathbf{x}$) and labels ($y$). You will probably want to use the Pandas `get_dummies` function to convert the `room_type` column to the proper numerical representation, think *one-hot encoding*. The model will predict whether the listing is an entire home/apartment, private room, or shared room
- Split the dataset into training and test sets.
- Separate the features and labels in training set and test set.
```
data_url = 'https://raw.githubusercontent.com/BreakoutMentors/Data-Science-and-Machine-Learning/main/datasets/AB_NYC_2019.csv'
# your code here
```
# 2. Build your model
Build a model to model the relationship between the features $x$ (multiple features) and labels $y$ (Type 1).
Hints:
- use the `nn.Linear` class to define your linear model layer
- define your loss function with the [`nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html) class from PyTorch, which uses `nn.Softmax` activation function built-in so no need to use it in the model.
- configure the optimization algorithm with stochastic gradient descent
- track the accuracy metric
```
# your code here
class Logistic_Model(nn.Module):
# Constructor
def __init__(self, num_features, num_classes):
# Todo
# Forward Method
def forward(self, x):
# Todo
return x
num_features = #
num_classes = #
model = Logistic_Model(num_features, num_classes)
loss_fn = #
optimizer = #
```
# 3. Train your model
Now that you have a model, it's time to train it. Train your model for 100 epochs (i.e., iterations), and record the training, validation, and accuracy metrics in lists.
```
# your code here
```
Visualize the accuracy metric or crossentropy over the training process. Hint: create a line chart with the epoch (x) and the accuracy (y).
```
# your code here
```
# 4. Evaluate the model
Now that the model is trained, it's time to evaluate it using the test dataset, which you did not use when training the model. This gives you a sense of how well the model predicts unseen data, which is the case when you use it in the real world. Make sure to evaluate the model and visualize it's predictions against the true values.
Hints:
- Calculate test accuracy.
```
# your code here
```
# 5. Draw conclusions
Write up your conclusions about the model. Report the goal, the model design, and the results. Make sure to contextualize the model results as best you can.
| github_jupyter |
## xcube Data Store Framework - Climate Data Store
*Please checkout the general introduction to xcube data stores in the Jupyter Notebook [Getting Started](./1_getting_started.ipynb) before jumping into this notebook :)*
This notebook walks provides a walk-through demonstrating how to use xcube and the xcube plugin for the [Climate Data Store](https://cds.climate.copernicus.eu) (CDS) to read and explore temperature data from the CDS.
In order to run this notebook you need to install the `xcube_cds` plugin. You may do so by executing the following line in your terminal:
```
$ conda install -c conda-forge xcube-cds
```
Or you can install `xcube_cds` from sources by following the instructions on https://github.com/dcs4cop/xcube-cds.
**Please note:**
To access data from the Climate Data Store, you need a CDS API key. You can obtain the UID and API key as follows:
1. Create a user account on the [CDS Website](https://cds.climate.copernicus.eu/user/register).
2. Log in to the website with your user name and password.
3. Navigate to your user profile on the website. Your API key is shown at the bottom of the page.
Then export the `CDSAPI_URL` and `CDSAPI_KEY` environment variables:
```
$ export CDSAPI_URL=https://cds.climate.copernicus.eu/api/v2
$ export CDSAPI_KEY=<UID>:<API-KEY>
```
Or do it for this Notebook: (Note, we don not recommend this since this introduces a security risk!)
```
# import os
# os.environ["CDSAPI_URL"] = https://cds.climate.copernicus.eu/api/v2
# os.environ["CDSAPI_KEY"] = <UID>:<API-KEY>
# mandatory imports
from xcube.core.store import find_data_store_extensions
from xcube.core.store import get_data_store_params_schema
from xcube.core.store import new_data_store
# Utilities for notebook visualization
from IPython.display import JSON
import matplotlib as mpl
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
```
Configure matplotlib to display graphs inline directly in the notebook and set a sensible default figure size.
```
%matplotlib inline
plt.rcParams["figure.figsize"] = 16,12
```
Check whether the `cds` store is among the available stores, if not please follow the installation information from the top of this notebook.
```
JSON({e.name: e.metadata for e in find_data_store_extensions()})
```
Usually we need more information to get the actual data store object. Which data store parameters are available for `cds`?
```
get_data_store_params_schema('cds')
```
Provide mandatory parameters to instantiate the store class:
```
store = new_data_store('cds')
store
```
Which datasets are provided? (the list may contain both gridded and vector datasets):
```
JSON(list(store.get_data_ids()))
```
Get more info about a specific dataset. This includes a description of the possible open formats:
```
store.describe_data('reanalysis-era5-single-levels-monthly-means:monthly_averaged_reanalysis')
```
There are 4 required parameters, so we need to provide them to open a dataset:
```
dataset = store.open_data('reanalysis-era5-single-levels-monthly-means:monthly_averaged_reanalysis',
variable_names=['2m_temperature'],
bbox=[-10, 45, 40, 65],
spatial_res=0.25,
time_range=['2001-01-01', '2010-12-31'])
dataset
```
Plot the differences between successive time points in the dataset. We can see that the times are monotonically increasing (all the difference values are positive), but not equally spaced, since months are not all of the same length. The lowest values correspond to February; the four-year leap year cycle can also be discerned.
```
dataset.time.diff(dim='time').plot.line(figsize=(20, 4))
```
We can explore these data by plotting a temperature map for selected time points. First, we select January 2001. Land areas – and mountain ranges in particular – show up on the map as colder regions.
```
t2m_2001_jan = dataset.t2m.sel(time='2001-01-01 00:00:00', method='nearest')
t2m_2001_jan.plot.imshow(vmin=260, vmax=300, figsize=(14, 8))
```
For a more elegant and informative map, we define a function to plot a customized orthographic projection with overlaid coastlines and a grid.
```
def plot_map(data_array, colour_scale=(None, None), cmap=None):
mid_lat = 55
mid_lon = 15
proj = ccrs.Orthographic(central_longitude=mid_lon, central_latitude=mid_lat)
ax = plt.axes(projection=proj)
im = data_array.plot.imshow(ax=ax, transform=ccrs.PlateCarree(), add_colorbar=False, vmin=colour_scale[0], vmax=colour_scale[1], cmap=cmap)
ax.coastlines(resolution='50m')
ax.set_extent((-1.9e6, 1.9e6, -1.1e6, 1.3e6), crs=proj)
draw_labels = False # only supported from cartopy 0.18.0 onwards
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=draw_labels, color='#000000', alpha=0.5, linestyle='--')
gl.xlocator = mpl.ticker.FixedLocator([-10, 0, 10, 20, 30, 40])
gl.ylocator = mpl.ticker.FixedLocator([40, 45, 50, 55, 60, 65])
plt.colorbar(im, fraction=0.027, pad=0.04)
common_scale = (260, 300)
plot_map(t2m_2001_jan, common_scale)
```
| github_jupyter |
# How to use `qrng`
```
import sys
sys.path.append('../')
import CreativeQiskit
```
The simplest application of a quantum computer is as a random number generator. We will do this by use of the `qrng` object. When created, this runs a quantum program to generate 40,960 random bits$^1$.
These random bits are then combined into `num` integers of `precision` bits in length. Here `num` and `precision` are set such that `num`$\times$`precision` does not exceed the 40,960 limit. By default, `num=1280` and `precision=32`.
The value of `num` is a kwarg that can be set when defining the object. The `precision` will then be set to the best it can be given the limits. Similarly, the value of `precision` can be set when defining the object and `num` will then be set to the largest value it can be. If values of both are supplied, that of `num` will be ignored.
Only one IBM quantum device is currently compatible with `qrng`. As such, rather than the standard `device` kwarg, there is simply a `sim` kwarg. This is `True` by default, meaning that a simulator is used (and so the random numbers are in fact pseudo-random). Setting it to to `False` means that the `'ibmq_5_tenerife'` device is used. Results are therefore truly quantumly generated random numbers. However, you are not advised to use them for cryptographic purposes.
Though roles of the standard kwargs `shots` and `device` are replaced by the above, the `noisy` kwarg is still present and works as explained in [the README](README.md).
$^1$ <sup>This number is due to two constraints. First, there are 5 qubits that can be used on the device than supports the data output required. Secondly, we are limited to taking no more than 8192 samples. So that gives us a total of $5\times8192$ bits. There are ways around this limit, but they haven't been implemented here. </sup>
```
rng = CreativeQiskit.qrng()
```
The `int_list` attribute lets us directly read out the random integers, and `bit_list` lets us read out the corresponding bit strings. But we can also use the `rand_int()` method to give us the values one-by-one.
For example, here's the first five.
```
for _ in range(5):
print( rng.rand_int() )
```
Similarly, `rand()` is used to output values uniformaly sampled from between $0$ and $1$, generated by dividing the integers by their maximum value
```
for _ in range(5):
print( rng.rand() )
```
The random number generation is done by creating superpositions on five qubits (so as to be compatible with the `'ibmq_5_tenerife'` device). We can also do it purely using noise.
For a noiseless simulation, this would always just output 0.
```
rng = CreativeQiskit.qrng(noise_only=True)
for _ in range(5):
print( rng.rand() )
```
For a noisy simulation, or a run on the real device, it will be random but biased towards the lower end of the range.
```
rng = CreativeQiskit.qrng(noise_only=True,noisy=True)
for _ in range(5):
print( rng.rand() )
```
The amount of noise can be varied on a simulator by assigning a float to the `noisy` kwarg instead of just `True`. For example, here's a lot more noise than for the default.
```
rng = CreativeQiskit.qrng(noise_only=True,noisy=0.2)
for _ in range(5):
print( rng.rand() )
```
| github_jupyter |
```
# !wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.train
# !wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.testa
def parse(file):
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART-' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
left_train, right_train = parse('eng.train')
left_test, right_test = parse('eng.testa')
import re
import numpy as np
import tensorflow as tf
from tqdm import tqdm
def process_string(string):
string = re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
np.unique(right_train,return_counts=True)
word2idx = {'PAD': 0,'NUM':1,'UNK':2}
tag2idx = {'PAD': 0}
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
def parse_XY(texts, labels):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
tag = labels[no]
for c in text:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0],batch.shape[1],maxlen),dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i,k]]):
temp[i,k,-1-no] = char2idx[c]
return temp
X_seq, Y_seq = to_train_seq(train_X, train_Y)
X_char_seq = generate_char_seq(X_seq)
X_seq.shape
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
X_seq_test.shape
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
class Model:
def __init__(
self,
dim_word,
dim_char,
dropout,
learning_rate,
hidden_size_char,
hidden_size_word,
num_layers,
):
def cells(size, reuse = False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(
size,
initializer = tf.orthogonal_initializer(),
reuse = reuse,
),
output_keep_prob = dropout,
)
def bahdanau(embedded, size):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units = hidden_size_word, memory = embedded
)
return tf.contrib.seq2seq.AttentionWrapper(
cell = cells(hidden_size_word),
attention_mechanism = attention_mechanism,
attention_layer_size = hidden_size_word,
)
self.word_ids = tf.placeholder(tf.int32, shape = [None, None])
self.char_ids = tf.placeholder(tf.int32, shape = [None, None, None])
self.labels = tf.placeholder(tf.int32, shape = [None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(
tf.truncated_normal(
[len(word2idx), dim_word], stddev = 1.0 / np.sqrt(dim_word)
)
)
self.char_embeddings = tf.Variable(
tf.truncated_normal(
[len(char2idx), dim_char], stddev = 1.0 / np.sqrt(dim_char)
)
)
word_embedded = tf.nn.embedding_lookup(
self.word_embeddings, self.word_ids
)
char_embedded = tf.nn.embedding_lookup(
self.char_embeddings, self.char_ids
)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(
char_embedded, shape = [s[0] * s[1], s[-2], dim_char]
)
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(hidden_size_char),
cell_bw = cells(hidden_size_char),
inputs = char_embedded,
dtype = tf.float32,
scope = 'bidirectional_rnn_char_%d' % (n),
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(
char_embedded[:, -1], shape = [s[0], s[1], 2 * hidden_size_char]
)
word_embedded = tf.concat([word_embedded, output], axis = -1)
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = bahdanau(word_embedded, hidden_size_word),
cell_bw = bahdanau(word_embedded, hidden_size_word),
inputs = word_embedded,
dtype = tf.float32,
scope = 'bidirectional_rnn_word_%d' % (n),
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen = self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name = 'logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.reset_default_graph()
sess = tf.InteractiveSession()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 128
model = Model(dim_word,dim_char,dropout,learning_rate,
hidden_size_char,hidden_size_word,num_layers)
sess.run(tf.global_variables_initializer())
import time
for e in range(3):
lasttime = time.time()
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
batch_x = train_X[i : min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i : min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i : min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
assert not np.isnan(cost)
train_loss += cost
train_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i : min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
assert not np.isnan(cost)
test_loss += cost
test_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc)
)
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
real_Y, predict_Y = [], []
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'validation minibatch loop'
)
for i in pbar:
batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i : min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]
predicted = pred2label(sess.run(model.tags_seq,
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
},
))
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
from sklearn.metrics import classification_report
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))
```
| github_jupyter |
# Project 1
**Use <code>for</code>, .split(), and <code>if</code> to create a Statement that will print out words that start with 's':**
```
st = 'Print only the words that start with s in this sentence'
for word in st.split():
if word[0] == 's':
print(word)
```
**Use range() to print all the even numbers from 0 to 10.**
```
list(range(0,11,2))
```
**Use List comprehension to create a list of all numbers between 1 and 50 that are divisible by 3.**
```
[x for x in range(1,51) if x%3 == 0]
```
**Go through the string below and if the length of a word is even print "even!"**
```
st = 'Print every word in this sentence that has an even number of letters'
for word in st.split():
if len(word)%2 == 0:
print(word+" <-- has an even length!")
```
____
**Write a program that prints the integers from 1 to 100. But for multiples of three print "Fizz" instead of the number, and for the multiples of five print "Buzz". For numbers which are multiples of both three and five print "FizzBuzz".**
```
for num in range(1,101):
if num % 3 == 0 and num % 5 == 0:
print("FizzBuzz")
elif num % 3 == 0:
print("Fizz")
elif num % 5 == 0:
print("Buzz")
else:
print(num)
```
**Use a List Comprehension to create a list of the first letters of every word in the string below:**
```
st = 'Create a list of the first letters of every word in this string'
[word[0] for word in st.split()]
```
# Project 2
**Write a Python function that accepts a string and calculates the number of upper case letters and lower case letters.**
```
def up_low(s):
d={"upper":0, "lower":0}
for c in s:
if c.isupper():
d["upper"]+=1
elif c.islower():
d["lower"]+=1
else:
pass
print("Original String : ", s)
print("No. of Upper case characters : ", d["upper"])
print("No. of Lower case Characters : ", d["lower"])
s = 'Hello Mr. Rogers, how are you this fine Tuesday?'
up_low(s)
```
**Write a Python function that takes a list and returns a new list with unique elements of the first list.**
Sample List : [1,1,1,1,2,2,3,3,3,3,4,5]
Unique List : [1, 2, 3, 4, 5]
```
def unique_list(lst):
# Also possible to use list(set())
x = []
for a in lst:
if a not in x:
x.append(a)
return x
unique_list([1,1,1,1,2,2,3,3,3,3,4,5])
```
**Write a Python function that checks whether a passed string is palindrome or not.**
Note: A palindrome is word, phrase, or sequence that reads the same backward as forward, e.g., madam or nurses run.
```
def palindrome(s):
s = s.replace(' ','') # This replaces all spaces ' ' with no space ''. (Fixes issues with strings that have spaces)
return s == s[::-1] # Check through slicing. Can I also use reverse.
palindrome('nurses run')
```
**Hard**:
Write a Python function to check whether a string is pangram or not.
Note : Pangrams are words or sentences containing every letter of the alphabet at least once.
For example : "The quick brown fox jumps over the lazy dog"
Hint: Look at the string module
```
import string
def ispangram(str1, alphabet=string.ascii_lowercase):
alphaset = set(alphabet)
return alphaset <= set(str1.lower())
ispangram("The quick brown fox jumps over the lazy dog")
string.ascii_lowercase
```
# Project 3
#### OLD MACDONALD: Write a function that capitalizes the first and fourth letters of a name
```
def old_macdonald(name):
if len(name) > 3:
return name[:3].capitalize() + name[3:].capitalize()
else:
return 'Name is too short!'
# Check
old_macdonald('macdonald')
```
#### SUMMER OF '69: Return the sum of the numbers in the array, except ignore sections of numbers starting with a 6 and extending to the next 9 (every 6 will be followed by at least one 9). Return 0 for no numbers.
summer_69([1, 3, 5]) --> 9
summer_69([4, 5, 6, 7, 8, 9]) --> 9
summer_69([2, 1, 6, 9, 11]) --> 14
```
def summer_69(arr):
total = 0
add = True
for num in arr:
while add:
if num != 6:
total += num
break
else:
add = False
while not add:
if num != 9:
break
else:
add = True
break
return total
# Check
summer_69([2, 1, 6, 9, 11])
```
#### SPY GAME: Write a function that takes in a list of integers and returns True if it contains 007 in order
spy_game([1,2,4,0,0,7,5]) --> True
spy_game([1,0,2,4,0,5,7]) --> True
spy_game([1,7,2,0,4,5,0]) --> False
```
def spy_game(nums):
code = [0,0,7,'x']
for num in nums:
if num == code[0]:
code.pop(0) # code.remove(num) also works
return len(code) == 1
# Check
spy_game([1,2,4,0,0,7,5])
```
#### COUNT PRIMES: Write a function that returns the *number* of prime numbers that exist up to and including a given number
count_primes(100) --> 25
By convention, 0 and 1 are not prime.
```
def count_primes(num):
primes = [2]
x = 3
if num < 2: # for the case of num = 0 or 1
return 0
while x <= num:
for y in range(3,x,2): # test all odd factors up to x-1
if x%y == 0:
x += 2
break
else:
primes.append(x)
x += 2
print(primes)
return len(primes)
# Check
count_primes(100)
```
BONUS: Here's a faster version that makes use of the prime numbers we're collecting as we go!
```
def count_primes2(num):
primes = [2]
x = 3
if num < 2:
return 0
while x <= num:
for y in primes: # use the primes list!
if x%y == 0:
x += 2
break
else:
primes.append(x)
x += 2
print(primes)
return len(primes)
count_primes2(100)
```
# Project 4
**Tranform `['a','b','c']` to {'a': 0, 'b': 1, 'c': 2}**
```
from functools import reduce
def digits_to_num(digits):
return reduce(lambda x,y:x*10 + y,digits)
digits_to_num([3,4,3,2,1])
```
**Use filter() to return the words from a list of words which start with a target letter.**
```
def filter_words(word_list, letter):
return list(filter(lambda word:word[0]==letter,word_list))
words = ['hello','are','cat','dog','ham','hi','go','to','heart']
filter_words(words,'h')
```
**Transform `['A','B'],['a','b']` to `['A-a', 'B-b']`**
```
def concatenate(L1, L2, connector):
return [word1+connector+word2 for (word1,word2) in zip(L1,L2)]
concatenate(['A','B'],['a','b'],'-')
```
**Tranform `['a','b','c']` to {'a': 0, 'b': 1, 'c': 2}**
```
def d_list(L):
return {key:value for value,key in enumerate(L)}
d_list(['a','b','c'])
```
**In `[0,2,2,1,5,5,6,10]` calculate the count of the number of items in the list whose value equals its index.**
```
def count_match_index(L):
return len([num for count,num in enumerate(L) if num==count])
count_match_index([0,2,2,1,5,5,6,10])
```
# Project 5 Bank Account Manager
## Bank Account Manager
Under the Classes section in the list of suggested final capstone projects is a Bank Account Manager program. The goal is to create a class called Account which will be an abstract class for three other classes called CheckingAccount, SavingsAccount and BusinessAccount. Then you should manage credits and debits from these accounts through an ATM style program.
### Project Scope
To tackle this project, first consider what has to happen.
1. There will be three different types of bank account (Checking, Savings, Business)
2. Each account will accept deposits and withdrawals, and will need to report balances
### Project Wishlist
We might consider additional features, like:
* impose a monthly maintenance fee
* waive fees for minimum combined deposit balances
* each account may have additional properties unique to that account:
* Checking allows unlimited transactions, and may keep track of printed checks
* Savings limits the number of withdrawals per period, and may earn interest
* Business may impose transaction fees
* automatically transfer the "change" for debit card purchases from Checking to Savings, <br>where "change" is the amount needed to raise a debit to the nearest whole dollar
* permit savings autodraft overdraft protection
### Project implementation
#### Step 1: Establish an abstract Account class with features shared by all accounts.
Note that abstract classes are never instantiated, they simply provide a base class with attributes and methods to be inherited by any derived class.
```
class Account:
# Define an __init__ constructor method with attributes shared by all accounts:
def __init__(self,acct_nbr,opening_deposit):
self.acct_nbr = acct_nbr
self.balance = opening_deposit
# Define a __str__ mehthod to return a recognizable string to any print() command
def __str__(self):
return f'${self.balance:.2f}'
# Define a universal method to accept deposits
def deposit(self,dep_amt):
self.balance += dep_amt
# Define a universal method to handle withdrawals
def withdraw(self,wd_amt):
if self.balance >= wd_amt:
self.balance -= wd_amt
else:
return 'Funds Unavailable'
```
#### Step 2: Establish a Checking Account class that inherits from Account, and adds Checking-specific traits.
```
class Checking(Account):
def __init__(self,acct_nbr,opening_deposit):
# Run the base class __init__
super().__init__(acct_nbr,opening_deposit)
# Define a __str__ method that returns a string specific to Checking accounts
def __str__(self):
return f'Checking Account #{self.acct_nbr}\n Balance: {Account.__str__(self)}'
```
#### Step 3: TEST setting up a Checking Account object
```
x = Checking(54321,654.33)
print(x)
x.withdraw(1000)
x.withdraw(30)
x.balance
```
#### Step 4: Set up similar Savings and Business account classes
```
class Savings(Account):
def __init__(self,acct_nbr,opening_deposit):
# Run the base class __init__
super().__init__(acct_nbr,opening_deposit)
# Define a __str__ method that returns a string specific to Savings accounts
def __str__(self):
return f'Savings Account #{self.acct_nbr}\n Balance: {Account.__str__(self)}'
class Business(Account):
def __init__(self,acct_nbr,opening_deposit):
# Run the base class __init__
super().__init__(acct_nbr,opening_deposit)
# Define a __str__ method that returns a string specific to Business accounts
def __str__(self):
return f'Business Account #{self.acct_nbr}\n Balance: {Account.__str__(self)}'
```
**At this point** we've met the minimum requirement for the assignment. We have three different bank account classes. Each one can accept deposits, make withdrawals and report a balance, as they each inherit from an abstract Account base class.
So now the fun part - let's add some features!
#### Step 5: Create a Customer class
For this next phase, let's set up a Customer class that holds a customer's name and PIN and can contain any number and/or combination of Account objects.
```
class Customer:
def __init__(self, name, PIN):
self.name = name
self.PIN = PIN
# Create a dictionary of accounts, with lists to hold multiple accounts
self.accts = {'C':[],'S':[],'B':[]}
def __str__(self):
return self.name
def open_checking(self,acct_nbr,opening_deposit):
self.accts['C'].append(Checking(acct_nbr,opening_deposit))
def open_savings(self,acct_nbr,opening_deposit):
self.accts['S'].append(Savings(acct_nbr,opening_deposit))
def open_business(self,acct_nbr,opening_deposit):
self.accts['B'].append(Business(acct_nbr,opening_deposit))
# rather than maintain a running total of deposit balances,
# write a method that computes a total as needed
def get_total_deposits(self):
total = 0
for acct in self.accts['C']:
print(acct)
total += acct.balance
for acct in self.accts['S']:
print(acct)
total += acct.balance
for acct in self.accts['B']:
print(acct)
total += acct.balance
print(f'Combined Deposits: ${total}')
```
#### Step 6: TEST setting up a Customer, adding accounts, and checking balances
```
bob = Customer('Bob',1)
bob.open_checking(321,555.55)
bob.get_total_deposits()
bob.open_savings(564,444.66)
bob.get_total_deposits()
nancy = Customer('Nancy',2)
nancy.open_business(2018,8900)
nancy.get_total_deposits()
```
**Wait!** Why don't Nancy's combined deposits show a decimal? <br>This is easily fixed in the class definition (mostly copied from above, with a change made to the last line of code):
```
class Customer:
def __init__(self, name, PIN):
self.name = name
self.PIN = PIN
self.accts = {'C':[],'S':[],'B':[]}
def __str__(self):
return self.name
def open_checking(self,acct_nbr,opening_deposit):
self.accts['C'].append(Checking(acct_nbr,opening_deposit))
def open_savings(self,acct_nbr,opening_deposit):
self.accts['S'].append(Savings(acct_nbr,opening_deposit))
def open_business(self,acct_nbr,opening_deposit):
self.accts['B'].append(Business(acct_nbr,opening_deposit))
def get_total_deposits(self):
total = 0
for acct in self.accts['C']:
print(acct)
total += acct.balance
for acct in self.accts['S']:
print(acct)
total += acct.balance
for acct in self.accts['B']:
print(acct)
total += acct.balance
print(f'Combined Deposits: ${total:.2f}') # added precision formatting here
```
**So it's fixed, right?**
```
nancy.get_total_deposits()
```
**Nope!** Changes made to the class definition do *not* affect objects created under different sets of instructions.<br>To fix Nancy's account, we have to build her record from scratch.
```
nancy = Customer('Nancy',2)
nancy.open_business(2018,8900)
nancy.get_total_deposits()
```
#### This is why testing is so important!
#### Step 7: Let's write some functions for making deposits and withdrawals.
Be sure to include a docstring that explains what's expected by the function!
```
def make_dep(cust,acct_type,acct_num,dep_amt):
"""
make_dep(cust, acct_type, acct_num, dep_amt)
cust = variable name (Customer record/ID)
acct_type = string 'C' 'S' or 'B'
acct_num = integer
dep_amt = integer
"""
for acct in cust.accts[acct_type]:
if acct.acct_nbr == acct_num:
acct.deposit(dep_amt)
make_dep(nancy,'B',2018,67.45)
nancy.get_total_deposits()
def make_wd(cust,acct_type,acct_num,wd_amt):
"""
make_dep(cust, acct_type, acct_num, wd_amt)
cust = variable name (Customer record/ID)
acct_type = string 'C' 'S' or 'B'
acct_num = integer
wd_amt = integer
"""
for acct in cust.accts[acct_type]:
if acct.acct_nbr == acct_num:
acct.withdraw(wd_amt)
make_wd(nancy,'B',2018,1000000)
nancy.get_total_deposits()
```
**What happened??** We seemed to successfully make a withdrawal, but nothing changed!<br>This is because, at the very beginning, we had our Account class *return* the string 'Funds Unavailable' instead of print it. If we change that here, we'll have to also run the derived class definitions, and Nancy's creation, but *not* the Customer class definition. Watch:
```
class Account:
def __init__(self,acct_nbr,opening_deposit):
self.acct_nbr = acct_nbr
self.balance = opening_deposit
def __str__(self):
return f'${self.balance:.2f}'
def deposit(self,dep_amt):
self.balance += dep_amt
def withdraw(self,wd_amt):
if self.balance >= wd_amt:
self.balance -= wd_amt
else:
print('Funds Unavailable') # changed "return" to "print"
class Checking(Account):
def __init__(self,acct_nbr,opening_deposit):
super().__init__(acct_nbr,opening_deposit)
def __str__(self):
return f'Checking Account #{self.acct_nbr}\n Balance: {Account.__str__(self)}'
class Savings(Account):
def __init__(self,acct_nbr,opening_deposit):
super().__init__(acct_nbr,opening_deposit)
def __str__(self):
return f'Savings Account #{self.acct_nbr}\n Balance: {Account.__str__(self)}'
class Business(Account):
def __init__(self,acct_nbr,opening_deposit):
super().__init__(acct_nbr,opening_deposit)
def __str__(self):
return f'Business Account #{self.acct_nbr}\n Balance: {Account.__str__(self)}'
nancy = Customer('Nancy',2)
nancy.open_business(2018,8900)
nancy.get_total_deposits()
make_wd(nancy,'B',2018,1000000)
nancy.get_total_deposits()
```
| github_jupyter |
# Adware vs Benign
Here, we are attempting to classify between 'Adware' and 'Benign' using the features generated from the `feature selection` notebook on this data file. While this notebook is specifically tailored for the 'Adware' vs 'Benign' problem, only two items need to be changed for this to be applicable to any other 'vs' file we have available. These will be noted below.
```
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, normalize
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
import keras
from keras.models import Sequential
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Dense, Dropout
from keras.metrics import CategoricalAccuracy, TruePositives, TrueNegatives, FalsePositives, FalseNegatives
print('Imports complete.')
```
## Cleaning the Data File
The `adware_vs_benign.csv` file was generated by the `create benign vs x files` notebook. This notebook will generate all of the data files for these experiments, however modifications must be made upstream to allow for these data files to have all of the available features. Here, we import the data file one chunk at a time since we can filter off the necessary columns every chunk.
If you want to change this notebook for another type of 'vs' file, you would only change a handful of variables:
- Change `path` and `datafile` to whatever you need it to be
- Either run the `feature_selection` notebook on your given datafile and pull out the list of features <i>or</i> you can add your own below.
- That's it, actually. A list without three items is a weak list.
```
# Import the data file
path = '../../malware_dataset/'
datafile = 'adware_vs_benign.csv'
# Technique acquired from https://towardsdatascience.com/why-and-how-to-use-pandas-with-large-data-9594dda2ea4c
# I'm using this chunk technique because these files are kind of large and I want to make it as easy on us as
# possible.
df_chunk = pd.read_csv(path + datafile, chunksize=50000)
chunk_list = [] # append each chunk df here
cols_to_keep = ['Down/Up Ratio',
'Fwd PSH Flags',
'SYN Flag Count',
'ACK Flag Count',
'Packet Length Mean',
'Average Packet Size',
'URG Flag Count',
'Protocol',
'Init_Win_bytes_forward',
'Packet Length Variance',
'Total Fwd Packets',
'Subflow Fwd Packets',
'Bwd Packet Length Std',
'Init_Win_bytes_backward',
'Bwd Packet Length Max',
'Min Packet Length',
'Source Port',
'Destination Port',
'Packet Length Std',
'Bwd Packet Length Mean']
cols_to_keep.append('Label')
def filter_columns(chk, chknum):
# Clean the data for the features we want
print('Dropping unnecessary columns for chunk {}...'.format(chknum), end='')
chk.drop(columns=[col for col in chk.columns if col not in cols_to_keep], inplace=True, errors='raise') # Some systems raise an error about this errors='raise' argument (which is ironic, I guess). Feel free to remove it if need be.
chk.dropna(inplace=True)
print('done')
return chk
# Each chunk is in df format
chunkn = 1
for chunk in df_chunk:
# perform data filtering
chunk_filter = filter_columns(chunk, chunkn)
chunkn += 1
# Once the data filtering is done, append the chunk to list
chunk_list.append(chunk_filter)
# concat the list into dataframe
df = pd.concat(chunk_list)
dep_var = 'Label'
print('\nDataset Composition:\n{}\n'.format(df[dep_var].value_counts()))
# Output the features we have active
print('Active Features:')
n=1
for col in df.columns[:-1]:
print('\t{}. {}'.format(n, col))
n+=1
random_state = 1
```
Above, we see the features that have been chosen by the `feature selection` notebook.
```
print('Data before encoding:')
print(df.head())
# Separate the data out into the data and target classification
X = normalize(( df.loc[:, df.columns != dep_var] ).values)
y = df[dep_var]
# One-Hot Encoding for the target classification
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
#y = y.map(lambda classif : 0 if classif == 'BENIGN' else 1)
#y = keras.utils.to_categorical(y, num_classes=y.nunique())
print('Data after encoding:')
for i in range(5):
print('{} {}'.format(X[i, :], y[i]))
#print(X[:5, :])
#print('Label column:')
#print(y[:5])
```
## Training and Testing sets
Here, we are splitting up the X (data) and y (target) sets into specifically a training and testing set. We fit the models on the training set and then record their performance on the testing set to appropriately determine how the models are generalizing the data coming in. It would also be appropriate here to create an additional validation set or conduct 10-fold cross validation for every model. However, since the `train_test_split` function provided by `sklearn` automatically stratifies the dataset (keeps the target data in proper proportions), this will do for the time being as it is a general statement towards the model's performance. Once the results are needed for a formal paper or presentation, 10-fold cs will be conducted.
```
# Split the dataset up into training and testing sets
# This split is not stratified by default according to
# the documentation found here: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, shuffle=True, stratify=y, random_state=random_state)
```
## Model Fitting and Evaluation
The models we are training here are the Random Forest (`RandomForestClassifier`), Decision Tree (`DecisionTreeClassifier`), and k-Nearest Neighbors (`KNeighborsClassifier`) implementations provided by `sklearn`. We can see that the models perform modestly at around 60% accuracy. Just for fun, we are also running the `AdaBoost` model provided by `sklearn`, which extends off of the work of an already-existing model. This `AdaBoost` algorithm doesn't improve the performance of the models enough, though.
```
# Random Forest model training and evaluation
rf = RandomForestClassifier(n_estimators=100, random_state=random_state)
rf.fit(X_train, y_train)
print('Random Forest testing accuracy: {:.2f}%'.format(100*rf.score(X_test, y_test)))
# Decision Tree model training and evaluation
dt = DecisionTreeClassifier(random_state=random_state)
dt.fit(X_train, y_train)
print('Decision Tree testing accuracy: {:.2f}%'.format(100*dt.score(X_test, y_test)))
# k-Nearest Neighbors model training and evaluation
knn = KNeighborsClassifier(n_neighbors=5, algorithm='kd_tree')
knn.fit(X_train, y_train)
print('k-Nearest Neighbors testing accuracy: {:.2f}%'.format(100*knn.score(X_test, y_test)))
# AdaBoost-Decision Tree model training and evaluation
adadt = AdaBoostClassifier(base_estimator=dt, n_estimators=100, random_state=random_state)
adadt.fit(X_train, y_train)
print('AdaBoost-Decision Tree testing accuracy: {:.2f}%'.format(100*adadt.score(X_test, y_test)))
```
## Cleaning the Data File
Wait...what? Why are we here, again? Well, the way we set up the data for the `sklearn` models is different from the way that `keras` wants their deep learning models trained. Removing the columns and selecting the features is exactly the same as before, however configuring `y` (target) should be categorical, hot-encoded scheme rather than a simple 1D array. It can work either way, however I believe this is the best method for the time being<sup>[citation required]</sup>.
```
# Import the data file again because we've had to mess with it and the DL algorithms we're using want the y-data
# in a different format
df_chunk = pd.read_csv(path + datafile, chunksize=50000)
chunk_list = [] # append each chunk df here
# Each chunk is in df format
chunkn = 1
for chunk in df_chunk:
# perform data filtering
chunk_filter = filter_columns(chunk, chunkn)
chunkn += 1
# Once the data filtering is done, append the chunk to list
chunk_list.append(chunk_filter)
# concat the list into dataframe
df = pd.concat(chunk_list)
dep_var = 'Label'
print(df[dep_var].value_counts())
#print('Available features: {}'.format(df.columns))
# Output the features we have active
print('Active Features:')
for col in df.columns[:-1]:
print('\t' + col)
random_state = 1
# Separate the data out into the data and target classification
X = normalize( ( df.loc[:, df.columns != dep_var] ).values )
y = df[dep_var]
# One-Hot Encoding for the target classification
label_encoder = LabelEncoder()
#y = label_encoder.fit_transform(y)
y = y.map(lambda classif : 0 if classif == 'BENIGN' else 1)
y = keras.utils.to_categorical(y, num_classes=y.nunique())
print('Data:')
print(X[:5, :])
print('Targets:')
print(y[:5])
```
## Training and Testing data
This is the same idea as before.
<pre>
<i>"How can you really say that the data
sets are the same as before? Is that
just bad science? Who's running this
circus?"</i>
- you
</pre>
I hear the concerns that you is expressing. However, thanks to someone smarter<sub>(everyday?)</sub> than me, by passing the same `random_state` value, I can control this variable such that I get back the same splits as long as I'm providing the same data (which I am).
```
# Split the dataset up into training and testing sets
# This split is not stratified by default according to
# the documentation found here: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, shuffle=True, stratify=y, random_state=random_state)
```
## Deep Neural Network Model Training and Evaluation
Now, we begin with the crazier stuff. We've set up a `Sequential` model from the `keras` library and added a bunch of `Dense` layers to it. Playing around with the activation and loss functions, this is what we've currently settled on. As we can see, though, the model still only performs as well as flipping a coin (performing worse than some of the simpler models we saw before). We are still investigating potentially solutions to this problem and haven't had much luck in the way of success.
```
# Deep Neural Network model training and evaluation
# Set up the metrics we want to collect
accuracy = CategoricalAccuracy() # Will change this to Categorical if the target classification is categorical
tp = TruePositives() # These could be collected with a confusion matrix, however translating back
tn = TrueNegatives() # and forth from an image may be frustrating (it was last time I did it)
fp = FalsePositives()
fn = FalseNegatives()
metrics = [accuracy, tp, tn, fp, fn]
# The model must be reinitialized otherwise the model will have trained on all of the data (that wouldn't be true 10-fold cv)
model = Sequential()
model.add(Dense(64, input_shape=(len(cols_to_keep) - 1 ,))) # Input layer, needs same shape as input data (9 values 1D)
model.add(LeakyReLU(alpha=0.3))
model.add(Dense(128)) # Hidden layer of nodes
model.add(LeakyReLU(alpha=0.3))
model.add(Dense(32)) # Hidden layer of nodes
model.add(LeakyReLU(alpha=0.3))
model.add(Dense(8)) # Hidden layer of nodes
model.add(LeakyReLU(alpha=0.3))
model.add(Dense(2, activation='sigmoid')) # Output layer of 3 nodes
# "Configures the model for training"
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=metrics)
# Fit and test the model
model.fit(x=X_train, y=y_train, epochs=20, batch_size=512, verbose=0, validation_data=(X_test, y_test))
# Evaluate the performance of the model on the test set
scores = model.evaluate(X_test, y_test, verbose=2)
acc, loss, tpn, tnn, fpn, fnn = scores[1]*100, scores[0]*100, scores[2], scores[3], scores[4], scores[5]
totaln = tpn + tnn + fpn + fnn
print('Baseline: accuracy: {:.2f}%: loss: {:2f}'.format(acc, loss))
print('\tTrue Positive Rate: {} ({})'.format(tpn/totaln, tpn))
print('\tTrue Negative Rate: {} ({})'.format(tnn/totaln, tnn))
print('\tFalse Positive Rate: {} ({})'.format(fpn/totaln, fpn))
print('\tFalse Negative Rate: {} ({})'.format(fnn/totaln, fnn))
```
## Conclusion
Really, the moral of the story right now is <b>growth</b>. <i>Is this the best data science possible?</i> No. But, from here, we can find ways to improve our models and <b>grow</b> as humans. While, right now, we don't have the highest-performing models and we aren't using the most l33t techniques, <i>we can always improve ourselves</i>.
| github_jupyter |
<a href="https://colab.research.google.com/github/crystalloide/Notebooks-SQL-serie-1/blob/main/ex07_Agr%C3%A9gation_des_donn%C3%A9es_avec_GROUP_BY_et_ORDER_BY.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## ex07 - Agrégation des données avec GROUP BY et ORDER BY
La clause ***GROUP BY*** est une clause facultative de l'instruction SELECT.
La clause GROUP BY un groupe sélectionné de lignes en lignes récapitulatives par valeurs d'une ou plusieurs colonnes.
La clause GROUP BY renvoie une ligne pour chaque groupe.
Pour chaque groupe, vous pouvez appliquer une fonction d'agrégation telle que ***MIN, MAX, SUM, COUNT ou AVG*** pour fournir plus d'informations sur chaque groupe.
```
%load_ext sql
```
### 0. Reconnectons notre Google Drive
```
from google.colab import drive
# drive.mount('/content/gdrive')
drive.mount("/content/gdrive", force_remount=True)
```
### 1. Connexion à la Database fournie pour le TP : demo.db3
```
%sql sqlite:////content/gdrive/MyDrive/Partage/Notebooks_Serie_1/demo.db3
```
Si vous ne connaissez toujours pas les tables présentes dans la database de démonstration :-), vous pouvez toujours utiliser la commande suivante pour le savoir.
```
%sql SELECT name FROM sqlite_master WHERE type='table'
```
### 2. Regroupement des données
Prenons la table ***rch*** ici dans notre comme exemple
#### 2.1 Regardons tout d'abord les colonnes de la table
```
%sql SELECT * From rch LIMIT 3
```
#### 2.2 Vérifier les valeurs uniques (distinctes)
Tout d'abord, regardons le nombre de valeurs distinctes que l'on trouve dans la colonne RCH de la table rch.
Nous pouvons utiliser le mot clé ***DISTINCT*** conjointement avec l'instruction SELECT, afin d'éliminer tous les enregistrements en double et récupérer uniquement les enregistrements uniques.
```
%%sql sqlite://
SELECT COUNT(DISTINCT RCH) AS nRCH
FROM rch
```
Nous pourrions également utiliser la clause ***GROUP BY*** afin d'obtenir une sortie plus lisible que la colonne de la table originelle car comportant moins de lignes : seules les valeurs uniques seront renvoyées.
```
%%sql sqlite://
SELECT RCH
FROM rch
GROUP BY RCH
```
#### 2.3 Utiliser les fonctions d'agrégation sur les groupes
Nous pouvons obtenir davanatges de détails en agrégeant des données sur un groupe plutôt que sur des colonnes entières.
```
%%sql sqlite://
SELECT RCH, AVG(FLOW_INcms), AVG(FLOW_OUTcms)
FROM rch
GROUP BY RCH
```
### 3. Ordonner / Trier les enregistrements
Tout d'abord, vérifions les années et les mois avec le maximum FLOW_INcms
```
%%sql sqlite://
SELECT RCH, YR, MO, MAX(FLOW_INcms)
FROM rch
GROUP BY RCH
```
Il est évident que les colonnes ***YR*** "année" et ***MO*** "mois" ne sont pas triées dans un ordre normal.
C'est le bon moment pour utiliser la clause ***ORDER BY***, que vous pouvez placer à la fin d'une instruction SQL (après un WHERE et un GROUP BY).
Nous pouvons trier les résultats de la requête par année, puis par mois.
```
%%sql sqlite://
SELECT RCH, YR, MO, ROUND(MAX(FLOW_INcms),2)
FROM rch
GROUP BY RCH
ORDER BY YR, MO
```
Par défaut, le tri se fait avec le paramètre ***ASC***, qui classe donc les données par ordre croissant.
Nous pouvons trier par ordre décroissant en appliquant l'option ***DESC***.
```
%%sql sqlite://
SELECT RCH, YR, MO, ROUND(MAX(FLOW_INcms),2)
FROM rch
GROUP BY RCH
ORDER BY YR DESC, MO
```
### 4. Filtrer les données sur les groupes avec la clause HAVING
Parfois, nous souhaitons filtrer les enregistrements en fonction d'un groupe ou d'une valeur agrégée.
Alors que le premier réflexe serait peut être d'utiliser une instruction WHERE, cela ne fonctionnera pas car le WHERE filtre les enregistrements et ne filtre pas les agrégations.
Par exemple, nous essayons d'utiliser un WHERE pour filtrer les résultats où MAX (FLOW_INcms) est supérieur à 3000.
Cela obtiendra une sqlite3.OperationalError pour cause de mauvaise utilisation de la fonction d'agrégation.
```
%%sql sqlite://
SELECT RCH, YR, MO, MAX(FLOW_INcms) as MAX_FLOWIN
FROM rch
WHERE MAX_FLOWIN > 3000.0
GROUP BY RCH
ORDER BY YR DESC, MO
```
Dans ce cas, nous utiliserons la clause ***HAVING*** pour spécifier une condition de filtre pour un groupe ou un agrégat.
La clause HAVING est une clause facultative de l'instruction SELECT.
Nous utilisons souvent la clause HAVING avec la clause GROUP BY.
La clause GROUP BY regroupe un ensemble de lignes en un ensemble de lignes ou de groupes récapitulatifs.
La clause HAVING filtre ensuite les groupes en fonction des conditions spécifiées.
*** Il est à noter que la clause HAVING doit suivre strictement la clause GROUP BY. ***
```
%%sql sqlite://
SELECT RCH, YR, MO, MAX(FLOW_INcms) as MAX_FLOWIN
FROM rch
GROUP BY RCH
HAVING MAX_FLOWIN > 3000.0
ORDER BY YR DESC, MO
```
### Conclusion du notebook
Dans ce TP, nous avons appris à utiliser l'opérateur DISTINCT afin d'obtenir des résultats distincts dans nos requêtes, et ainsi éliminer les doublons.
Ensuite, nous avons appris comment agréger et trier les données à l'aide de GROUP BY et ORDER BY.
Nous avons également montré la puissance des fonctions d'agrégation de SUM(), MAX(), MIN(), AVG() et COUNT().
De plus, nous avons utilisé la clause HAVING pour filtrer les champs agrégés, ce qui ne peut pas être traité avec la clause WHERE.
| github_jupyter |
```
%matplotlib inline
```
Braginskii coefficients
=========================
A short example of how to calculate classical transport coefficients
from Bragiński's theory.
```
from astropy import units as u
from plasmapy.formulary import ClassicalTransport
```
We'll use some sample ITER data, without much regard for whether
the regime is even fit for classical transport theory:
```
thermal_energy_per_electron = 8.8 * u.keV
electron_concentration = 10.1e19 / u.m**3
thermal_energy_per_ion = 8.0 * u.keV
ion_concentration = electron_concentration
ion = 'D+' # a crude approximation
```
We now make the default ClassicalTransport object:
```
braginskii = ClassicalTransport(thermal_energy_per_electron,
electron_concentration,
thermal_energy_per_ion,
ion_concentration,
ion)
```
These variables are calculated during initialization and can be
referred to straight away:
```
print(braginskii.coulomb_log_ei)
print(braginskii.coulomb_log_ii)
print(braginskii.hall_e)
print(braginskii.hall_i)
```
These quantities are not calculated during initialization and can be
referred to via methods. To signify the need to calculate them, we
call them via ().
```
print(braginskii.resistivity)
print(braginskii.thermoelectric_conductivity)
print(braginskii.electron_thermal_conductivity)
print(braginskii.ion_thermal_conductivity)
```
They also change with magnetization:
```
mag_braginskii = ClassicalTransport(thermal_energy_per_electron,
electron_concentration,
thermal_energy_per_ion,
ion_concentration,
ion,
B = 0.1 * u.T)
print(mag_braginskii.resistivity)
print(mag_braginskii.thermoelectric_conductivity)
print(mag_braginskii.electron_thermal_conductivity)
print(mag_braginskii.ion_thermal_conductivity)
```
They also change with direction with respect to the magnetic field. Here,
we choose to print out, as arrays, the (parallel, perpendicular,
and cross) directions. Take a look at the docs to `ClassicalTransport`
for more information on these.
```
all_direction_braginskii = ClassicalTransport(thermal_energy_per_electron,
electron_concentration,
thermal_energy_per_ion,
ion_concentration,
ion,
B = 0.1 * u.T,
field_orientation = 'all')
print(all_direction_braginskii.resistivity)
print(all_direction_braginskii.thermoelectric_conductivity)
print(all_direction_braginskii.electron_thermal_conductivity)
print(all_direction_braginskii.ion_thermal_conductivity)
```
The viscosities return arrays:
```
print(braginskii.electron_viscosity)
print(mag_braginskii.electron_viscosity)
print(braginskii.ion_viscosity)
print(mag_braginskii.ion_viscosity)
```
| github_jupyter |
```
```
#Import statemets
```
import matplotlib.pyplot as plt
import numpy as np
!pip install pickle5
import pickle5 as pickle
import PIL
from sklearn.preprocessing import OneHotEncoder
import itertools
from datetime import datetime
from scipy.special import logsumexp
import math
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from google.colab import drive
drive.mount('/content/drive')
```
#Collab connect
```
```
#Utility Functions
### saving model
```
# Saving and Loading models using pickle
def save(filename, obj):
with open(filename, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load(filename):
with open(filename, 'rb') as handle:
return pickle.load(handle)
```
### Preprocess
```
# train_data = load(train_path)
# val_data = load(val_path)
# # Preprocess data to get labels in image as np array
# def get_data(X):
# data = []
# label = []
# for row in X:
# temp = np.array(row[0])
# data.append(temp.flatten())
# label.append(row[1])
# return np.array(data),np.array(label)
# X_train , y_train = get_data(train_data.to_numpy())
# X_val , y_val = get_data(val_data.to_numpy())
# save(data_path + "X_train.pkl", X_train)
# save(data_path + "X_val.pkl", X_val)
# save(data_path + "y_train.pkl", y_train)
# save(data_path + "y_val.pkl", y_val)
# Utility function to normalize the data and one hot encode the labels
def pre_process_data(train_x, train_y, test_x, test_y):
# Normalize
train_x = train_x / 255.
test_x = test_x / 255.
enc = OneHotEncoder(sparse=False, categories='auto')
train_y = enc.fit_transform(train_y.reshape(len(train_y), -1))
test_y = enc.transform(test_y.reshape(len(test_y), -1))
return train_x, train_y, test_x, test_y
```
### Confusion matrix plotting functions
```
# Finding confusion matrix
def confusion_matrix_find(y, y_hat, nclasses):
"""
y [np array]: actual labels [values between 0 to nclasses-1]
y_hat [np array]: predicted labels [values between 0 to nclasses-1]
nclasses [integer]: number of classes in the dataset.
return: confusion matrix of shape [nclasses, nclasses]
"""
y = y.astype(np.int64)
y_hat = y_hat.astype(np.int64)
conf_mat = np.zeros((nclasses, nclasses))
for i in range(y_hat.shape[0]):
true, pred = y[i], y_hat[i]
conf_mat[true, pred] += 1
return conf_mat
# Plotting confusion matrix
def confusion_matrix_plot(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues, figsize=(7,7), path=None, filename=None):
"""
cm: confusion matrix to be plotted.
classes: array of labels or class names.
title: title of the confusion matrix.
cmap: color of the plot matrix.
figsize: tupple (width, height) representiong size of the plot.
path: destination where the plot image will be saved.
filename: name to save the file with on the specified path. (if None, title is used)
# Source: https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
cm = cm.astype(np.int64)
plt.figure(figsize=figsize)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
if path:
if filename:
plt.savefig(path + filename +'-confusion.png')
plt.show()
# confusion_matrix(np.array([[10,2],[4,5]]),[0,1], title='Confusion matrix', cmap=plt.cm.Blues, figsize=(7,7), path=None, filename=None)
```
### ROC curve Plotting functions
```
# (7,7)
#https://www.dlology.com/blog/simple-guide-on-how-to-generate-roc-plot-for-keras-classifier/
def plot_roc(classes, y_test, y_score, figsize=(7,7), path=None, filename=None):
n_classes = len(classes)
# Plot linewidth.
lw = 2
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(1)
plt.figure(figsize=figsize)
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
if path:
if filename:
plt.savefig(path + filename +'-roc.png')
plt.show()
# Calling ROC
# classes = np.unique(y_val2)
# y_pred = mlp.predict_proba(X_val)
# plot_roc(classes, y_val, y_pred)
```
### Graph plotting functions
```
# function to plot Multi line graph
# Plot Multi line between all the models
def plot_multi_line_graph(X, listModel,title,y_name, path=None, filename=None, f_size=(7,5)):
fig = plt.figure(figsize=f_size)
plt.subplot(111)
plt.title(title)
plt.ylabel(y_name)
plt.xlabel('Epochs')
for entry in listModel:
plt.plot(X,entry[0], label=entry[1] , color=entry[2])
if path:
if filename:
plt.savefig(path + filename +'-multi-line-graph.png')
plt.legend(loc='bottom right', prop={'size': 13})
plt.show()
def plot_double_line_graph(X1,Y1,label1 ,X2 ,Y2,label2 ,title,y_name, path=None, filename=None, f_size=(7,5)):
fig = plt.figure(figsize=f_size)
plt.subplot(111)
plt.plot(X1,Y1 ,label=label1 ,marker = "x" , color="blue")
plt.plot(X2, Y2 , label=label2 ,marker = "x" , color="red")
plt.title(title)
plt.ylabel(y_name)
plt.xlabel('Epochs')
plt.legend( loc='upper left',prop={'size': 13})
if path:
if filename:
plt.savefig(path + filename +'-double-line-graph.png')
plt.show()
# Plot single line using X1 , Y1
def plot_single_line_graph(X1,Y1,label1, title,name_y, path=None, filename=None, f_size=(7,5)):
fig = plt.figure(figsize=f_size)
plt.subplot(111)
plt.plot(X1,Y1 ,label=label1 ,marker = "x" , color="blue")
plt.title(title)
plt.ylabel(name_y)
plt.xlabel('Epochs')
plt.legend( loc='lower right',prop={'size': 13})
if path:
if filename:
plt.savefig(path + filename +'-single-line-graph.png')
plt.show()
```
#Load and Preprocess data
```
train_path = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A2/data/train_set.pkl"
val_path = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A2/data/val_set.pkl"
data_path = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A2/data/"
model_path = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A2/models/"
train_path = "drive/My Drive/DL/Assignment2/train_set.pkl"
val_path = "drive/My Drive/DL/Assignment2/val_set.pkl"
data_path = "drive/My Drive/DL/Assignment2/"
model_path = "drive/My Drive/DL/Assignment2/"
X_train2 , y_train2 = load(data_path + "X_train.pkl"), load(data_path + "y_train.pkl")
X_val2 , y_val2 = load(data_path + "X_val.pkl"), load(data_path + "y_val.pkl")
X_train, y_train, X_val, y_val = pre_process_data(X_train2, y_train2, X_val2, y_val2)
```
#Network
```
#Neuron activation
class MLPClassifier:
def __init__(self, layers, num_epochs=4, dropout=0.2, learning_rate=1e-5, activation_function='relu', optimizer='gradient_descent',
weight_init='random', regularization='l2', batch=64, **kwargs):
# weights of network for each layer
self._weights = [None] * (len(layers)-1)
self._bias = [None] * (len(layers)-1)
self._optimizer_weight = [None] * (len(layers)-1)
self._optimizer_bias = [None] * (len(layers)-1)
# For storing forwarded geadients for each layer
self._delta =[None] * (len(layers)-1)
# dw and db to update the weights and baises for each layer
self._db =[None] * (len(layers)-1)
self._dw =[None] * (len(layers)-1)
# For storing the affine and post activation vlaues
self._y = [None] * (len(layers))
self._z = [None] * (len(layers)-1)
# input to be considered for forward prop
# To store the current batch size
self._batch_size = None
self._layers = layers
self._num_epochs = num_epochs
self._dropout = dropout
self._learning_rate = learning_rate
# vectorize activation function to appy over complete array
self._activation = np.vectorize(self.get_activation(activation_function))
self._activation_derivative = np.vectorize(self.get_activation_derivative(activation_function))
self._activation_function = activation_function
self._optimizer = optimizer
self._weight_init = weight_init
self._regularization = regularization
self._batch = batch
self._kwargs = kwargs
self._beta = 0.9
self._eps = 1e-8
self._train_losses = []
self._test_losses = []
self._train_acc = []
self._test_acc = []
# initalise the network
self.initialize_network()
def initialize_network(self):
if self._weight_init == 'random':
np.random.seed(7)
#np.seterr(over='raise')
for i in range(len(self._layers)-1):
self._weights[i] = np.random.rand(self._layers[i+1], self._layers[i])*2-1
self._bias[i] = np.random.rand(self._layers[i+1], 1)*2-1
self._db[i] = np.zeros((self._layers[i+1], 1))
self._dw[i] = np.zeros((self._layers[i+1], self._layers[i]))
self._optimizer_weight[i] = np.zeros((self._layers[i+1], self._layers[i]))
self._optimizer_bias[i] = np.zeros((self._layers[i+1], 1))
if self._optimizer == 'adam':
self._beta2 = 0.9
self._optimizer2_weight = [None] * (len(self._layers)-1)
self._optimizer2_bias = [None] * (len(self._layers)-1)
for i in range(len(self._layers)-1):
self._optimizer2_weight[i] = np.zeros((self._layers[i+1], self._layers[i]))
self._optimizer2_bias[i] = np.zeros((self._layers[i+1], 1))
def zero_grad(self):
for layer in self._dw:
layer.fill(0)
# Compute the average loss across one batch passing the true labels of batch
def get_loss_item(self,log_p,labels,batch_size):
loss = -1*np.sum(np.multiply(labels ,np.log(log_p+self._eps)),axis=1)
avg_loss = np.sum((loss))* 1/self._batch_size
return avg_loss
# Utility function to load the data into batches
# shuffle data firslty and then loader do its work
def loader(self,datas,labels,batch):
for idx in range(0,datas.shape[0],batch):
if idx == 0:
yield datas[:batch,:], labels[:batch,:]
else:
yield datas[idx:idx+batch,:], labels[idx:idx+batch,:]
# functions for activations and derivative of activations
def get_activation(self,name):
if(name == 'sigmoid'):
return self.sigmoid
elif(name == 'relu'):
return self.relu
elif(name == 'tanh'):
return self.tanh
def sigmoid(self, x):
return 1/(1+np.exp(-x))
def relu(self, x):
return max(0,x)
def tanh(self, x):
a = np.exp(x)
b = np.exp(-x)
return (a - b)/(a + b)
def get_activation_derivative(self,name):
if(name == 'sigmoid'):
return self.der_sigmoid
elif(name == 'relu'):
return self.der_relu
elif(name == 'tanh'):
return self.der_tanh
def der_sigmoid(self, x):
return x*(1-x)
def der_relu(self, x):
return 1 if x>0 else 0
def der_tanh(self, x):
return 1-(x**2)
def Mysoftmax(self,a, axis=None):
"""
Computes exp(a)/sumexp(a); relies on scipy logsumexp implementation.
:param a: ndarray/tensor
:param axis: axis to sum over; default (None) sums over everything
"""
lse = logsumexp(a, axis=axis) # this reduces along axis
if axis is not None:
lse = np.expand_dims(lse, axis) # restore that axis for subtraction
return np.exp(a - lse)
def gd(self):
# old = self._weights[0]
for i in range(len(self._weights)):
self._weights[i] = self._weights[i] - self._learning_rate* self._dw[i]
self._bias[i] = self._bias[i] - self._learning_rate* self._db[i]
# print(np.where((old == self._weights[0]) == False,1,0))
# print(np.sum(np.where((old == self._weights[0]) == False,1,0)))
def momentum_gd(self):
for ix in range(len(self._weights)):
self._optimizer_weight[ix] = self._optimizer_weight[ix]*self._beta - self._learning_rate*self._dw[ix]
self._optimizer_bias[ix] = self._optimizer_bias[ix]*self._beta - self._learning_rate*self._db[ix]
self._weights[ix] += self._optimizer_weight[ix]
self._bias[ix] += self._optimizer_bias[ix]
def nesterov_accelerated_gd(self):
for ix in range(len(self._weights)):
self._optimizer_weight[ix] = self._optimizer_weight[ix]*self._beta - self._learning_rate*self._dw[ix]
self._optimizer_bias[ix] = self._optimizer_bias[ix]*self._beta - self._learning_rate*self._db[ix]
self._weights[ix] += self._optimizer_weight[ix]
self._bias[ix] += self._optimizer_bias[ix]
def adagrad(self):
for ix in range(len(self._weights)):
self._optimizer_weight[ix] += np.square(self._dw[ix])
self._optimizer_bias[ix] += np.square(self._db[ix])
self._weights[ix] -= self._dw[ix]*self._learning_rate/np.sqrt(self._optimizer_weight[ix]+self._eps)
self._bias[ix] -= self._db[ix]*self._learning_rate/np.sqrt(self._optimizer_bias[ix]+self._eps)
def rmsprop(self):
for ix in range(len(self._weights)):
self._optimizer_weight[ix] = self._optimizer_weight[ix]*self._beta + (1-self._beta)*self._dw[ix]*self._dw[ix]
self._optimizer_bias[ix] = self._optimizer_bias[ix]*self._beta + (1-self._beta)*self._db[ix]*self._db[ix]
self._weights[ix] -= (self._dw[ix]*self._learning_rate)/np.sqrt(self._optimizer_weight[ix]+self._eps)
self._bias[ix] -= (self._db[ix]*self._learning_rate)/np.sqrt(self._optimizer_bias[ix]+self._eps)
# optimizer 1 is for momentum and uses beta1
# optimizer 2 is for rms and uses beta2
def adam(self, batch_no):
for ix in range(len(self._weights)):
n_beta1 = 1/(1-np.power(self._beta, batch_no+1))
n_beta2 = 1/(1-np.power(self._beta2, batch_no+1))
self._optimizer_weight[ix] = self._optimizer_weight[ix]*self._beta + (1-self._beta)*self._dw[ix]
self._optimizer_bias[ix] = self._optimizer_bias[ix]*self._beta + (1-self._beta)*self._db[ix]
self._optimizer2_weight[ix] = self._optimizer2_weight[ix]*self._beta2 + (1-self._beta2)*self._dw[ix]*self._dw[ix]
self._optimizer2_bias[ix] = self._optimizer2_bias[ix]*self._beta2 + (1-self._beta2)*self._db[ix]*self._db[ix]
self._weights[ix] -= (self._optimizer_weight[ix]*self._learning_rate*n_beta1)/(np.sqrt(self._optimizer2_weight[ix]*n_beta2)+self._eps)
self._bias[ix] -= (self._optimizer_bias[ix]*self._learning_rate*n_beta1)/(np.sqrt(self._optimizer2_bias[ix]*n_beta2)+self._eps)
def forward_propagate_predict(self,X):
temp = X
z = [None]*(len(self._layers)-1)
y = [None]*(len(self._layers)-1)
for idx, (w_i, b_i) in enumerate(zip(self._weights,self._bias)):
z_i = np.dot(temp,w_i.T) + b_i.T
z[idx] = z_i
if (idx == len(self._weights)-1):
y_i = self.Mysoftmax(z_i,axis=1)
else:
y_i = self._activation(z_i)
y[idx] = y_i
temp = y_i
return y[-1]
def forward_propagate(self):
temp = self._y[0]
for idx, (w_i, b_i) in enumerate(zip(self._weights,self._bias)):
z_i = np.dot(temp,w_i.T) + b_i.T
self._z[idx] = z_i
if (idx == len(self._weights)-1):
y_i = self.Mysoftmax(z_i,axis=1)
else:
y_i = self._activation(z_i)
self._y[idx+1] = y_i
temp = y_i
def back_propagate(self, label):
for i in reversed(range(len(self._layers)-1)):
if i == len(self._layers) - 2:
self._delta[-1] = self._y[-1] - label
else:
if self._optimizer == 'nesterov':
self._optimizer_weight[i+1] = self._beta * self._optimizer_weight[i+1]
self._optimizer_bias[i+1] = self._beta * self._optimizer_bias[i+1]
a1 = np.dot(self._delta[i+1], self._weights[i+1]+self._optimizer_weight[i+1])
else:
a1 = np.dot(self._delta[i+1], self._weights[i+1])
b1 = self._activation_derivative(self._y[i+1])
self._delta[i] = np.multiply(a1,b1)
cur_delta = self._delta[i]/self._batch_size
self._db[i] = np.expand_dims(np.sum(cur_delta,axis=0),axis=1)
for del_,inp in zip(cur_delta, self._y[i]):
self._dw[i] += np.matmul(np.expand_dims(del_,axis=1), np.expand_dims(inp,axis=0))
def update_weights(self, b_no):
#print('update_weights')
if self._optimizer == 'gradient_descent':
self.gd()
if self._optimizer == 'momentum':
self.momentum_gd()
elif self._optimizer == 'nesterov':
self.nesterov_accelerated_gd()
elif self._optimizer == 'adagrad':
self.adagrad()
elif self._optimizer == 'rmsprop':
self.rmsprop()
elif self._optimizer == 'adam':
self.adam(b_no)
def fit(self, X_train, y_train, X_val, y_val):
# getting labels form one hot encoding passed
y_val2 = np.argmax(y_val, axis=1)
y_train2 = np.argmax(y_train, axis=1)
test_batch_count = math.ceil(len(X_val)/self._batch)
batch_count = math.ceil(len(X_train)/self._batch)
for epoch in range(self._num_epochs):
print("-----------------------------------------------------")
running_loss = 0
running_loss_test = 0
for b_no, (images, labels) in enumerate(self.loader(X_train, y_train,self._batch)):
# batch_count = batch_count+1
self._batch_size = X_train.shape[0]
self._y[0] = images
self.forward_propagate()
self.back_propagate(labels)
# updating weights
self.update_weights(b_no)
self.zero_grad()
# computing running loss
l1 = self.get_loss_item(self._y[-1],labels,X_train.shape[0])
running_loss += l1
# print statistics
if self._optimizer != 'gradient_descent' and b_no % 50 == 0:
# print every 50 mini-batches
print("MiniBatch: {}.. ".format((b_no)+1),"Training Loss: {:.3f}.. ".format(running_loss/(b_no+1)))
else:
self._train_losses.append(running_loss/batch_count)
# print("Epoch: {}.. ".format(epoch+1),
# "Training Loss: {:.3f}.. ".format(running_loss),
# )
# Testing the validation loss after one epoc
for test_images, test_labels in self.loader(X_val, y_val,self._batch):
# test_batch_count = test_batch_count+1
test_size = X_train.shape[0]
# forward
test_y_hat = self.forward_propagate_predict(test_images)
# get loss
l2 = self.get_loss_item(test_y_hat,test_labels,test_size)
running_loss_test += l2
self._test_losses.append(running_loss_test/test_batch_count)
# print("----",running_loss,running_loss/batch_count,batch_count)
# print("----",running_loss_test,running_loss_test/test_batch_count,test_batch_count)
print("Epoch: {}.. ".format(epoch+1),
"Training Loss: {:.3f}.. ".format(running_loss/batch_count),
"Test Loss: {:.3f}.. ".format(running_loss_test/test_batch_count)
)
self._train_acc.append(self.score(X_train[:1000],y_train2[:1000]))
self._test_acc.append(self.score(X_val,y_val2))
print("Stats:", "Train_acc" ,self._train_acc[-1]*100,"Test_acc",self._test_acc[-1]*100)
def predict_proba(self,X):
'''X: a numpy array of shape (num_examples, num_features)
Output: numpy array of shape (num_examples, num_classes): This 2d matrix contains the
probabilities of each class for all the examples.
'''
return self.forward_propagate_predict(X)
def get_params(self):
# Output: An array of 2d numpy arrays. This array contains the weights of the model.
# Doubt bias array retrun differently or do some manipulation
return self._weights , self._bias
def accuracy(self,y , y_hat):
return np.sum(y == y_hat) / len(y)
def predict(self,X):
# - X: a numpy array of shape (num_examples, num_features)
# numpy array of shape (num_examples) with classification labels of each class.
prob = self.predict_proba(X)
return np.argmax(prob,axis=1)
def score(self,X, y):
# - X: a numpy array of shape (num_examples, num_features): This 2d matrix contains the
# complete dataset.
# - Y: a numpy array of shape (num_examples): This array contains the classification labels
# of the task.
y_hat = self.predict(X)
return self.accuracy(y,y_hat)
def display_architecture(self):
print(f'''
============================================================================================
Optimizer: "{self._optimizer}"
--------------------------------------------------------------------------------------------
Epochs: {self._num_epochs}
--------------------------------------------------------------------------------------------
Activation Fn(Hidden Layers): "{self._activation_function}"
--------------------------------------------------------------------------------------------
Activation Fn(Output Layer): "softmax"
--------------------------------------------------------------------------------------------
Step size: {self._learning_rate}
--------------------------------------------------------------------------------------------
Weight initialization strategy: "{self._weight_init}"
--------------------------------------------------------------------------------------------
Regularization: "{self._regularization}"
--------------------------------------------------------------------------------------------
Dropout: {self._dropout}
--------------------------------------------------------------------------------------------
Batch size: {self._batch}''')
for i in range(len(self._weights)):
print(f'''
--------------------------------------------------------------------------------------------
Layer {i+1}: {self._weights[i].shape}''')
print(f'''
============================================================================================''')
```
# Train
```
# mlp = MLPClassifier([784,128,24,10],num_epochs=450, dropout=0.2, learning_rate=0.01, activation_function='tanh', optimizer='adam',
# weight_init='random', regularization='l2', batch=64)
# mlp.fit(X_train, y_train,X_val, y_val)
```
# saving
```
# activation='tanh'
# epochs = 150
# lr=0.01
# layers=[784, 128, 24, 10]
# for optimizers in ['adam']:
# mlp = MLPClassifier(layers=layers,num_epochs=epochs, dropout=0.2, learning_rate=lr,
# activation_function=activation, weight_init='random', regularization='l2',
# batch=64, optimizer=optimizers)
# mlp.display_architecture()
# mlp.fit(X_train, y_train,X_val, y_val)
# #save(model_path+optimizers, mlp)
# print(f'''
# ==============================================================================
# Final Train Accuracy: {mlp.score(X_train,y_train2)*100}
# Final Test Accuracy: {mlp.score(X_val,y_val2)*100}
# ==============================================================================
# ''')
models = []
layers=[784, 128, 24, 10]
for optimizers in ['adam-tanh-450','momentum-tanh-450','adagrad-tanh-450','rmsprop-tanh-450','gradient_descent-tanh-450', 'nesterov-tanh-450',
'gradient_descent-relu-200','gradient_descent-tanh-200','gradient_descent-sigmoid-200']:
fname = data_path + f'{optimizers}-0.01.model'
mlp = load(fname)
mlp.display_architecture()
```
# Final accuracies
```
filename = 'adam-tanh-450-0.01.model'
mlp = load(model_path+filename)
print("TESTING ACCURACY")
mlp.score(X_val,y_val2) * 100
#On complete dataset
print("TRAINING ACCURACY")
mlp.score(X_train,y_train2) * 100
```
# Plotting
### Loss vs Epochs
```
# Plotting Training loss vs epoch
e = [i for i in range(mlp._num_epochs)]
plot_single_line_graph(e,mlp._train_losses,"train loss",f"TRANNING LOSS VS EPOCHS ({mlp._optimizer})" ,"Train Loss",path=data_path, filename=mlp._optimizer+'-train loss',f_size=(15,10))
# Plotting Testing loss vs epoch
plot_single_line_graph(e,mlp._test_losses,"Val loss",f"VALIDATION LOSS VS EPOCHS ({mlp._optimizer})" ,"Val Loss",path=data_path, filename=mlp._optimizer+'-val loss',f_size=(15,10))
# Double line graph for LOSS vs Epochs
plot_double_line_graph(e,mlp._test_losses,"Val Loss" ,e,mlp._train_losses,"Train Loss" ,f"LOSS VS EPOCHS ({mlp._optimizer})","Loss",path=data_path, filename=mlp._optimizer+'-val loss',f_size=(15,10))
# colors = ['darkviolet', 'crimson', 'orangered', 'darkmagenta', 'forestgreen', 'midnightblue']
# modelList = []
# e = [i for i in range(mlp._num_epochs)]
# for color, model in zip(colors, models):
# modelList.append([model._test_acc, model._optimizer, color])
# print(len(modelList))
# #Multi line graph for LOSS vs Epochs
# plot_multi_line_graph(e, modelList ,f"ACC VS EPOCHS (comparison)","Test Acc",path=data_path, filename='optimizers-val accuracy',f_size=(15,10))
```
### Accuracy vs Epochs
```
# Plotting Training loss vs epoch
plot_single_line_graph(e,mlp._train_acc,"train acc",f"TRANNING ACC VS EPOCHS ({mlp._optimizer})" ,"Train Acc",path=data_path, filename=mlp._optimizer+'-train accuracy',f_size=(15,10))
# Plotting Testing loss vs epoch
plot_single_line_graph(e,mlp._test_acc,"val acc",f"VALIDATION ACC VS EPOCHS ({mlp._optimizer})" ,"Val Acc",path=data_path, filename=mlp._optimizer+'-val accuracy',f_size=(15,10))
# Double line graph for LOSS vs Epochs
plot_double_line_graph(e,mlp._test_acc,"Val acc" ,e,mlp._train_acc,"Train Acc" ,f"ACC VS EPOCHS ({mlp._optimizer})","Acc",path=data_path, filename=mlp._optimizer+'-val accuracy',f_size=(15,10))
```
```
```
### ROC curves
```
plot_roc([0,1,2,3,4,5,6,7,8,9], y_val, mlp.predict_proba(X_val), (10,10),path=data_path, filename=mlp._optimizer)
```
## Confusion Matrics
## Test
```
conf_mat_test = confusion_matrix_find(y_val2, mlp.predict(X_val), 10)
confusion_matrix_plot(np.array(conf_mat_test), [0,1,2,3,4,5,6,7,8,9], title=f'Confusion matrix test ({mlp._optimizer})', cmap=plt.cm.Blues, figsize=(11,11),path=data_path, filename=mlp._optimizer+'-val')
```
## Train
```
conf_mat_train = confusion_matrix_find(y_train2, mlp.predict(X_train), 10)
confusion_matrix_plot(np.array(conf_mat_train), [0,1,2,3,4,5,6,7,8,9], title=f'Confusion matrix train ({mlp._optimizer})', cmap=plt.cm.Blues, figsize=(11,11),path=data_path, filename=mlp._optimizer+'-train')
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import geopandas
import libpysal
import scipy
from dask.distributed import Client, LocalCluster, as_completed
workers = 8
client = Client(LocalCluster(n_workers=workers, threads_per_worker=1))
client
cross_chunk = pd.read_parquet('../../urbangrammar_samba/spatial_signatures/cross-chunk_indices.pq')
# chunks = geopandas.read_parquet('../../urbangrammar_samba/spatial_signatures/local_auth_chunks.pq')
# user = os.environ.get('DB_USER')
# pwd = os.environ.get('DB_PWD')
# host = os.environ.get('DB_HOST')
# port = os.environ.get('DB_PORT')
# db_connection_url = f"postgres+psycopg2://{user}:{pwd}@{host}:{port}/built_env"
def measure(chunk_id):
# load cells of a chunk
cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
cells['keep'] = True
# add neighbouring cells from other chunks
cross_chunk_cells = []
for chunk, inds in cross_chunk.loc[chunk_id].indices.iteritems():
add_cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk}.pq").iloc[inds]
add_cells['keep'] = False
cross_chunk_cells.append(add_cells)
df = cells.append(pd.concat(cross_chunk_cells, ignore_index=True), ignore_index=True)
# read W
w = libpysal.weights.WSP(scipy.sparse.load_npz(f"../../urbangrammar_samba/spatial_signatures/weights/w_{chunk_id}.npz")).to_W()
# alignment
def alignment(x, orientation='stbOri'):
orientations = df[orientation].iloc[w.neighbors[x]]
return abs(orientations - df[orientation].iloc[x]).mean()
df['mtbAli'] = [alignment(x) for x in range(len(df))]
# mean neighbour distance
def neighbor_distance(x):
geom = df.buildings.iloc[x]
if geom is None:
return np.nan
return df.buildings.iloc[w.neighbors[x]].distance(df.buildings.iloc[x]).mean()
df['mtbNDi'] = [neighbor_distance(x) for x in range(len(df))]
# weighted neighbours
df['mtcWNe'] = pd.Series([w.cardinalities[x] for x in range(len(df))], index=df.index) / df.tessellation.length
# area covered by neighbours
def area_covered(x, area='sdcAre'):
neighbours = [x]
neighbours += w.neighbors[x]
return df[area].iloc[neighbours].sum()
df['mdcAre'] = [area_covered(x) for x in range(len(df))]
# read W3 here
w3 = libpysal.weights.WSP(scipy.sparse.load_npz(f"../../urbangrammar_samba/spatial_signatures/weights/w3_{chunk_id}.npz")).to_W()
# weighted reached enclosures
def weighted_reached_enclosures(x, area='sdcAre', enclosure_id='enclosureID'):
neighbours = [x]
neighbours += w3.neighbors[x]
vicinity = df[[area, enclosure_id]].iloc[neighbours]
return vicinity[enclosure_id].unique().shape[0] / vicinity[area].sum()
df['ltcWRE'] = [weighted_reached_enclosures(x) for x in range(len(df))]
# mean interbuilding distance - it takes ages
# define adjacency list from lipysal
adj_list = w.to_adjlist(remove_symmetric=True)
adj_list["distance"] = (
df.buildings.iloc[adj_list.focal]
.reset_index(drop=True)
.distance(df.buildings.iloc[adj_list.neighbor].reset_index(drop=True))
)
adj_list = adj_list.set_index(['focal', 'neighbor'])
def mean_interbuilding_distance(x):
neighbours = [x]
neighbours += w3.neighbors[x]
return adj_list.distance.loc[neighbours, neighbours].mean()
df['ltbIBD'] = [mean_interbuilding_distance(x) for x in range(len(df))]
# Reached neighbors and area on 3 topological steps on tessellation
df['ltcRea'] = [w3.cardinalities[i] for i in range(len(df))]
df['ltcAre'] = [df.sdcAre.iloc[w3.neighbors[i]].sum() for i in range(len(df))]
df[df['keep']].drop(columns=['keep']).to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
# chunk_area = chunks.geometry.iloc[chunk_id].buffer(5000)
# engine = create_engine(db_connection_url)
# sql = f"SELECT * FROM openroads_200803_topological WHERE ST_Intersects(geometry, ST_GeomFromText('{chunk_area.wkt}',27700))"
# streets = geopandas.read_postgis(sql, engine, geom_col='geometry')
# sp = street_profile(streets, blg)
# streets['sdsSPW'] = sp[0]
# streets['sdsSWD'] = sp[1]
# streets['sdsSPO'] = sp[2]
# streets['sdsLen'] = streets.length
# streets['sssLin'] = momepy.Linearity(streets).series
# G = momepy.gdf_to_nx(streets)
# G = momepy.node_degree(G)
# G = momepy.subgraph(
# G,
# radius=5,
# meshedness=True,
# cds_length=False,
# mode="sum",
# degree="degree",
# length="mm_len",
# mean_node_degree=False,
# proportion={0: True, 3: True, 4: True},
# cyclomatic=False,
# edge_node_ratio=False,
# gamma=False,
# local_closeness=True,
# closeness_weight="mm_len",
# verbose=False
# )
# G = momepy.cds_length(G, radius=3, name="ldsCDL", verbose=False)
# G = momepy.clustering(G, name="xcnSCl")
# G = momepy.mean_node_dist(G, name="mtdMDi", verbose=False)
# nodes, edges, sw = momepy.nx_to_gdf(G, spatial_weights=True)
# edges_w3 = momepy.sw_high(k=3, gdf=edges)
# edges["ldsMSL"] = momepy.SegmentsLength(edges, spatial_weights=edges_w3, mean=True, verbose=False).series
# nodes_w5 = momepy.sw_high(k=5, weights=sw)
# nodes["lddNDe"] = momepy.NodeDensity(nodes, edges, nodes_w5, verbose=False).series
# nodes["linWID"] = momepy.NodeDensity(nodes, edges, nodes_w5, weighted=True, node_degree="degree", verbose=False).series
# edges.to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/edges/edges_{chunk_id}.pq")
# nodes.to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/nodes/nodes_{chunk_id}.pq")
return f"Chunk {chunk_id} processed sucessfully."
inputs = iter(range(28, 103))
futures = [client.submit(measure, next(inputs)) for i in range(workers)]
ac = as_completed(futures)
for finished_future in ac:
# submit new future
try:
new_future = client.submit(measure, next(inputs))
ac.add(new_future)
except StopIteration:
pass
print(finished_future.result())
import tracemalloc
%%time
tracemalloc.start()
ret = measure(26)
current, peak = tracemalloc.get_traced_memory()
print(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB")
tracemalloc.stop()
tracemalloc.stop()
```
Current memory usage is 56.681588MB; Peak was 1160.209484MB
CPU times: user 11min 40s, sys: 53.6 s, total: 12min 34s
Wall time: 11min 15s
Excluding IBD
Current memory usage is 38.199543MB; Peak was 1145.271618MB
CPU times: user 1h 41min 3s, sys: 7.36 s, total: 1h 41min 11s
Wall time: 1h 41min 16s
Including IBD
```
client.close()
```
| github_jupyter |
```
#default_exp torch_core
#export
from fastai.imports import *
from fastai.torch_imports import *
from PIL import Image
#hide
from nbdev.showdoc import *
#export
_all_ = ['progress_bar','master_bar']
#export
if torch.cuda.is_available():
if torch.cuda.current_device()==0:
def_gpu = int(os.environ.get('DEFAULT_GPU') or 0)
if torch.cuda.device_count()>=def_gpu: torch.cuda.set_device(def_gpu)
torch.backends.cudnn.benchmark = True
```
# Torch Core
> Basic pytorch functions used in the fastai library
## Arrays and show
```
#export
@delegates(plt.subplots, keep=True)
def subplots(nrows=1, ncols=1, figsize=None, imsize=3, add_vert=0, **kwargs):
if figsize is None: figsize=(ncols*imsize, nrows*imsize+add_vert)
fig,ax = plt.subplots(nrows, ncols, figsize=figsize, **kwargs)
if nrows*ncols==1: ax = array([ax])
return fig,ax
#hide
_,axs = subplots()
test_eq(axs.shape,[1])
plt.close()
_,axs = subplots(2,3)
test_eq(axs.shape,[2,3])
plt.close()
#export
def _fig_bounds(x):
r = x//32
return min(5, max(1,r))
#export
@delegates(plt.Axes.imshow, keep=True, but=['shape', 'imlim'])
def show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs):
"Show a PIL or PyTorch image on `ax`."
# Handle pytorch axis order
if hasattrs(im, ('data','cpu','permute')):
im = im.data.cpu()
if im.shape[0]<5: im=im.permute(1,2,0)
elif not isinstance(im,np.ndarray): im=array(im)
# Handle 1-channel images
if im.shape[-1]==1: im=im[...,0]
ax = ifnone(ax,ctx)
if figsize is None: figsize = (_fig_bounds(im.shape[0]), _fig_bounds(im.shape[1]))
if ax is None: _,ax = plt.subplots(figsize=figsize)
ax.imshow(im, **kwargs)
if title is not None: ax.set_title(title)
ax.axis('off')
return ax
```
`show_image` can show PIL images...
```
im = Image.open(TEST_IMAGE_BW)
ax = show_image(im, cmap="Greys")
```
...and color images with standard `CHW` dim order...
```
im2 = np.array(Image.open(TEST_IMAGE))
ax = show_image(im2, figsize=(2,2))
```
...and color images with `HWC` dim order...
```
im3 = torch.as_tensor(im2).permute(2,0,1)
ax = show_image(im3, figsize=(2,2))
#export
@delegates(show_image, keep=True)
def show_titled_image(o, **kwargs):
"Call `show_image` destructuring `o` to `(img,title)`"
show_image(o[0], title=str(o[1]), **kwargs)
show_titled_image((im3,'A puppy'), figsize=(2,2))
#export
@delegates(subplots)
def show_images(ims, nrows=1, ncols=None, titles=None, **kwargs):
"Show all images `ims` as subplots with `rows` using `titles`"
if ncols is None: ncols = int(math.ceil(len(ims)/nrows))
if titles is None: titles = [None]*len(ims)
axs = subplots(nrows, ncols, **kwargs)[1].flat
for im,t,ax in zip(ims, titles, axs): show_image(im, ax=ax, title=t)
show_images((im,im3), titles=('number','puppy'), imsize=2)
```
`ArrayImage`, `ArrayImageBW` and `ArrayMask` are subclasses of `ndarray` that know how to show themselves.
```
#export
class ArrayBase(ndarray):
"An `ndarray` that can modify casting behavior"
@classmethod
def _before_cast(cls, x): return x if isinstance(x,ndarray) else array(x)
#export
class ArrayImageBase(ArrayBase):
"Base class for arrays representing images"
_show_args = {'cmap':'viridis'}
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
#export
class ArrayImage(ArrayImageBase):
"An array representing an image"
pass
#export
class ArrayImageBW(ArrayImage):
"An array representing an image"
_show_args = {'cmap':'Greys'}
#export
class ArrayMask(ArrayImageBase):
"An array representing an image mask"
_show_args = {'alpha':0.5, 'cmap':'tab20', 'interpolation':'nearest'}
im = Image.open(TEST_IMAGE)
im_t = cast(im, ArrayImage)
test_eq(type(im_t), ArrayImage)
ax = im_t.show(figsize=(2,2))
test_fig_exists(ax)
```
## Basics
```
#export
@patch
def __array_eq__(self:Tensor,b):
return torch.equal(self,b) if self.dim() else self==b
#export
def _array2tensor(x):
if x.dtype==np.uint16: x = x.astype(np.float32)
return torch.from_numpy(x)
#export
@use_kwargs_dict(dtype=None, device=None, requires_grad=False, pin_memory=False)
def tensor(x, *rest, **kwargs):
"Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly."
if len(rest): x = (x,)+rest
# There was a Pytorch bug in dataloader using num_workers>0. Haven't confirmed if fixed
# if isinstance(x, (tuple,list)) and len(x)==0: return tensor(0)
res = (x if isinstance(x, Tensor)
else torch.tensor(x, **kwargs) if isinstance(x, (tuple,list))
else _array2tensor(x) if isinstance(x, ndarray)
else as_tensor(x.values, **kwargs) if isinstance(x, (pd.Series, pd.DataFrame))
else as_tensor(x, **kwargs) if hasattr(x, '__array__') or is_iter(x)
else _array2tensor(array(x), **kwargs))
if res.dtype is torch.float64: return res.float()
return res
test_eq(tensor(torch.tensor([1,2,3])), torch.tensor([1,2,3]))
test_eq(tensor(array([1,2,3])), torch.tensor([1,2,3]))
test_eq(tensor(1,2,3), torch.tensor([1,2,3]))
test_eq_type(tensor(1.0), torch.tensor(1.0))
#export
def set_seed(s, reproducible=False):
"Set random seed for `random`, `torch`, and `numpy` (where available)"
try: torch.manual_seed(s)
except NameError: pass
try: torch.cuda.manual_seed_all(s)
except NameError: pass
try: np.random.seed(s%(2**32-1))
except NameError: pass
random.seed(s)
if reproducible:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
set_seed(2*33)
a1 = np.random.random()
a2 = torch.rand(())
a3 = random.random()
set_seed(2*33)
b1 = np.random.random()
b2 = torch.rand(())
b3 = random.random()
test_eq(a1,b1)
test_eq(a2,b2)
test_eq(a3,b3)
#export
def unsqueeze(x, dim=-1, n=1):
"Same as `torch.unsqueeze` but can add `n` dims"
for _ in range(n): x = x.unsqueeze(dim)
return x
t = tensor([1])
t2 = unsqueeze(t, n=2)
test_eq(t2,t[:,None,None])
#export
def unsqueeze_(x, dim=-1, n=1):
"Same as `torch.unsqueeze_` but can add `n` dims"
for _ in range(n): x.unsqueeze_(dim)
return x
t = tensor([1])
unsqueeze_(t, n=2)
test_eq(t, tensor([1]).view(1,1,1))
#export
def _fa_rebuild_tensor (cls, *args, **kwargs): return cls(torch._utils._rebuild_tensor_v2(*args, **kwargs))
def _fa_rebuild_qtensor(cls, *args, **kwargs): return cls(torch._utils._rebuild_qtensor (*args, **kwargs))
#export
def apply(func, x, *args, **kwargs):
"Apply `func` recursively to `x`, passing on args"
if is_listy(x): return type(x)([apply(func, o, *args, **kwargs) for o in x])
if isinstance(x,dict): return {k: apply(func, v, *args, **kwargs) for k,v in x.items()}
res = func(x, *args, **kwargs)
return res if x is None else retain_type(res, x)
#export
def maybe_gather(x, axis=0):
"Gather copies of `x` on `axis` (if training is distributed)"
if num_distrib()<=1: return x
ndim = x.ndim
res = [x.new_zeros(*x.shape if ndim > 0 else (1,)) for _ in range(num_distrib())]
torch.distributed.all_gather(res, x.contiguous() if ndim > 0 else x[None])
return torch.cat(res, dim=axis) if ndim > 0 else torch.cat(res, dim=axis).mean()
#export
def to_detach(b, cpu=True, gather=True):
"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
def _inner(x, cpu=True, gather=True):
if not isinstance(x,Tensor): return x
x = x.detach()
if gather: x = maybe_gather(x)
return x.cpu() if cpu else x
return apply(_inner, b, cpu=cpu, gather=gather)
```
`gather` only applies during distributed training and the result tensor will be the one gathered across processes if `gather=True` (as a result, the batch size will be multiplied by the number of processes).
```
#export
def to_half(b):
"Recursively map lists of tensors in `b ` to FP16."
return apply(lambda x: x.half() if torch.is_floating_point(x) else x, b)
#export
def to_float(b):
"Recursively map lists of int tensors in `b ` to float."
return apply(lambda x: x.float() if torch.is_floating_point(x) else x, b)
#export
# None: True if available; True: error if not available; False: use CPU
defaults.use_cuda = None
#export
def default_device(use_cuda=-1):
"Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU"
if use_cuda != -1: defaults.use_cuda=use_cuda
use = defaults.use_cuda or (torch.cuda.is_available() and defaults.use_cuda is None)
assert torch.cuda.is_available() or not use
return torch.device(torch.cuda.current_device()) if use else torch.device('cpu')
# cuda
_td = torch.device(torch.cuda.current_device())
test_eq(default_device(None), _td)
test_eq(default_device(True), _td)
test_eq(default_device(False), torch.device('cpu'))
default_device(None);
#export
def to_device(b, device=None):
"Recursively put `b` on `device`."
if defaults.use_cuda==False: device='cpu'
elif device is None: device=default_device()
def _inner(o): return o.to(device, non_blocking=True) if isinstance(o,Tensor) else o.to_device(device) if hasattr(o, "to_device") else o
return apply(_inner, b)
t = to_device((3,(tensor(3),tensor(2))))
t1,(t2,t3) = t
# cuda
test_eq_type(t,(3,(tensor(3).cuda(),tensor(2).cuda())))
test_eq(t2.type(), "torch.cuda.LongTensor")
test_eq(t3.type(), "torch.cuda.LongTensor")
#export
def to_cpu(b):
"Recursively map lists of tensors in `b ` to the cpu."
return to_device(b,'cpu')
t3 = to_cpu(t3)
test_eq(t3.type(), "torch.LongTensor")
test_eq(t3, 2)
#export
def to_np(x):
"Convert a tensor to a numpy array."
return apply(lambda o: o.data.cpu().numpy(), x)
t3 = to_np(t3)
test_eq(type(t3), np.ndarray)
test_eq(t3, 2)
#export
def to_concat(xs, dim=0):
"Concat the element in `xs` (recursively if they are tuples/lists of tensors)"
if not xs: return xs
if is_listy(xs[0]): return type(xs[0])([to_concat([x[i] for x in xs], dim=dim) for i in range_of(xs[0])])
if isinstance(xs[0],dict): return {k: to_concat([x[k] for x in xs], dim=dim) for k in xs[0].keys()}
#We may receive xs that are not concatenable (inputs of a text classifier for instance),
# in this case we return a big list
try: return retain_type(torch.cat(xs, dim=dim), xs[0])
except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])
for i in range_of(o_)) for o_ in xs], L())
test_eq(to_concat([tensor([1,2]), tensor([3,4])]), tensor([1,2,3,4]))
test_eq(to_concat([tensor([[1,2]]), tensor([[3,4]])], dim=1), tensor([[1,2,3,4]]))
test_eq_type(to_concat([(tensor([1,2]), tensor([3,4])), (tensor([3,4]), tensor([5,6]))]), (tensor([1,2,3,4]), tensor([3,4,5,6])))
test_eq_type(to_concat([[tensor([1,2]), tensor([3,4])], [tensor([3,4]), tensor([5,6])]]), [tensor([1,2,3,4]), tensor([3,4,5,6])])
test_eq_type(to_concat([(tensor([1,2]),), (tensor([3,4]),)]), (tensor([1,2,3,4]),))
test_eq(to_concat([tensor([[1,2]]), tensor([[3,4], [5,6]])], dim=1), [tensor([1]),tensor([3, 5]),tensor([4, 6])])
test_eq(type(to_concat([dict(foo=tensor([1,2]), bar=tensor(3,4))])), dict)
```
## Tensor subtypes
```
#export
@patch
def set_meta(self:Tensor, x, copy_meta=False):
"Set all metadata in `__dict__`"
if not hasattr(x,'__dict__'): return
d = x.__dict__
if copy_meta:
d = copy(d)
if '_meta' in d: d['_meta'] = copy(d['_meta'])
self.__dict__ = d
#export
@patch
def get_meta(self:Tensor, n, d=None):
"Set `n` from `self._meta` if it exists and returns default `d` otherwise"
return getattr(self, '_meta', {}).get(n, d)
#export
if not hasattr(torch,'as_subclass'):
setattr(torch, 'as_subclass', torch.Tensor.as_subclass)
#export
@patch
def as_subclass(self:Tensor, typ):
"Cast to `typ` and include `__dict__` and meta"
return retain_meta(self, torch.as_subclass(self, typ))
```
`Tensor.set_meta` and `Tensor.as_subclass` work together to maintain `_meta` after casting.
```
class _T(Tensor): pass
t = tensor(1.).requires_grad_()
t._meta = {'img_size': 1}
t2 = t.as_subclass(_T)
test_eq(t._meta, t2._meta)
test_eq(t2.get_meta('img_size'), 1)
assert(t2.requires_grad_)
#export
class TensorBase(Tensor):
def __new__(cls, x, **kwargs):
res = cast(tensor(x), cls)
if kwargs: res._meta = kwargs
return res
@classmethod
def _before_cast(cls, x): return tensor(x)
def __reduce_ex__(self,proto):
torch.utils.hooks.warn_if_has_hooks(self)
args = (type(self), self.storage(), self.storage_offset(), tuple(self.size()), self.stride())
if self.is_quantized: args = args + (self.q_scale(), self.q_zero_point())
f = _fa_rebuild_qtensor if self.is_quantized else _fa_rebuild_tensor
return (f, args + (self.requires_grad, OrderedDict()))
def gi(self, i):
res = self[i]
return res.as_subclass(type(self)) if isinstance(res,Tensor) else res
def __repr__(self):
return re.sub('tensor', self.__class__.__name__, super().__repr__())
#export
def _patch_tb():
if getattr(TensorBase,'_patched',False): return
TensorBase._patched = True
def get_f(fn):
def _f(self, *args, **kwargs):
cls = self.__class__
res = getattr(super(TensorBase, self), fn)(*args, **kwargs)
return retain_type(res, self, copy_meta=True)
return _f
t = tensor([1])
skips = 'as_subclass imag real __getitem__ __class__ __deepcopy__ __delattr__ __dir__ __doc__ __getattribute__ __hash__ __init__ \
__init_subclass__ __new__ __reduce__ __reduce_ex__ __repr__ __module__ __setstate__'.split()
for fn in dir(t):
if fn in skips: continue
f = getattr(t, fn)
if isinstance(f, (MethodWrapperType, BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType)):
setattr(TensorBase, fn, get_f(fn))
_patch_tb()
#export
class TensorCategory(TensorBase): pass
#export
class TensorMultiCategory(TensorCategory): pass
class _T(TensorBase): pass
t = _T(range(5))
test_eq(t[0], 0)
test_eq_type(t.gi(0), _T(0))
test_eq_type(t.gi(slice(2)), _T([0,1]))
test_eq_type(t+1, _T(range(1,6)))
test_eq(repr(t), '_T([0, 1, 2, 3, 4])')
test_eq(type(pickle.loads(pickle.dumps(t))), _T)
t = tensor([1,2,3])
m = TensorBase([False,True,True])
test_eq(t[m], tensor([2,3]))
t = tensor([[1,2,3],[1,2,3]])
m = cast(tensor([[False,True,True],
[False,True,True]]), TensorBase)
test_eq(t[m], tensor([2,3,2,3]))
t = tensor([[1,2,3],[1,2,3]])
t._meta = {'img_size': 1}
t2 = cast(t, TensorBase)
test_eq(t2._meta, t._meta)
x = retain_type(tensor([4,5,6]), t2)
test_eq(x._meta, t._meta)
t3 = TensorBase([[1,2,3],[1,2,3]], img_size=1)
test_eq(t3._meta, t._meta)
t4 = t2+1
t4._meta['img_size'] = 2
test_eq(t2._meta, {'img_size': 1})
test_eq(t4._meta, {'img_size': 2})
#export
class TensorImageBase(TensorBase):
_show_args = ArrayImageBase._show_args
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
#export
class TensorImage(TensorImageBase): pass
#export
class TensorImageBW(TensorImage): _show_args = ArrayImageBW._show_args
#export
class TensorMask(TensorImageBase):
_show_args = ArrayMask._show_args
def show(self, ctx=None, **kwargs):
codes = self.get_meta('codes')
if codes is not None: kwargs = merge({'vmin': 1, 'vmax': len(codes)}, kwargs)
return super().show(ctx=ctx, **kwargs)
im = Image.open(TEST_IMAGE)
im_t = cast(array(im), TensorImage)
test_eq(type(im_t), TensorImage)
im_t2 = cast(tensor(1), TensorMask)
test_eq(type(im_t2), TensorMask)
test_eq(im_t2, tensor(1))
ax = im_t.show(figsize=(2,2))
test_fig_exists(ax)
#hide (last test of to_concat)
test_eq_type(to_concat([TensorImage([1,2]), TensorImage([3,4])]), TensorImage([1,2,3,4]))
#export
class TitledTensorScalar(TensorBase):
"A tensor containing a scalar that has a `show` method"
def show(self, **kwargs): show_title(self.item(), **kwargs)
```
## L -
```
#export
@patch
def tensored(self:L):
"`mapped(tensor)`"
return self.map(tensor)
@patch
def stack(self:L, dim=0):
"Same as `torch.stack`"
return torch.stack(list(self.tensored()), dim=dim)
@patch
def cat (self:L, dim=0):
"Same as `torch.cat`"
return torch.cat (list(self.tensored()), dim=dim)
show_doc(L.tensored)
```
There are shortcuts for `torch.stack` and `torch.cat` if your `L` contains tensors or something convertible. You can manually convert with `tensored`.
```
t = L(([1,2],[3,4]))
test_eq(t.tensored(), [tensor(1,2),tensor(3,4)])
show_doc(L.stack)
test_eq(t.stack(), tensor([[1,2],[3,4]]))
show_doc(L.cat)
test_eq(t.cat(), tensor([1,2,3,4]))
```
## Chunks
```
#export
def concat(*ls):
"Concatenate tensors, arrays, lists, or tuples"
if not len(ls): return []
it = ls[0]
if isinstance(it,torch.Tensor): res = torch.cat(ls)
elif isinstance(it,ndarray): res = np.concatenate(ls)
else:
res = itertools.chain.from_iterable(map(L,ls))
if isinstance(it,(tuple,list)): res = type(it)(res)
else: res = L(res)
return retain_type(res, it)
a,b,c = [1],[1,2],[1,1,2]
test_eq(concat(a,b), c)
test_eq_type(concat(tuple (a),tuple (b)), tuple (c))
test_eq_type(concat(array (a),array (b)), array (c))
test_eq_type(concat(tensor(a),tensor(b)), tensor(c))
test_eq_type(concat(TensorBase(a),TensorBase(b)), TensorBase(c))
test_eq_type(concat([1,1],1), [1,1,1])
test_eq_type(concat(1,1,1), L(1,1,1))
test_eq_type(concat(L(1,2),1), L(1,2,1))
#export
class Chunks:
"Slice and int indexing into a list of lists"
def __init__(self, chunks, lens=None):
self.chunks = chunks
self.lens = L(map(len,self.chunks) if lens is None else lens)
self.cumlens = np.cumsum(0+self.lens)
self.totlen = self.cumlens[-1]
def __getitem__(self,i):
if isinstance(i,slice): return retain_type(self.getslice(i), old=self.chunks[0])
di,idx = self.doc_idx(i)
return retain_type(self.chunks[di][idx], old=self.chunks[0])
def getslice(self, i):
st_d,st_i = self.doc_idx(ifnone(i.start,0))
en_d,en_i = self.doc_idx(ifnone(i.stop,self.totlen+1))
res = [self.chunks[st_d][st_i:(en_i if st_d==en_d else sys.maxsize)]]
for b in range(st_d+1,en_d): res.append(self.chunks[b])
if st_d!=en_d and en_d<len(self.chunks): res.append(self.chunks[en_d][:en_i])
return concat(*res)
def doc_idx(self, i):
if i<0: i=self.totlen+i # count from end
docidx = np.searchsorted(self.cumlens, i+1)-1
cl = self.cumlens[docidx]
return docidx,i-cl
docs = L(list(string.ascii_lowercase[a:b]) for a,b in ((0,3),(3,7),(7,8),(8,16),(16,24),(24,26)))
b = Chunks(docs)
test_eq([b[ o] for o in range(0,5)], ['a','b','c','d','e'])
test_eq([b[-o] for o in range(1,6)], ['z','y','x','w','v'])
test_eq(b[6:13], 'g,h,i,j,k,l,m'.split(','))
test_eq(b[20:77], 'u,v,w,x,y,z'.split(','))
test_eq(b[:5], 'a,b,c,d,e'.split(','))
test_eq(b[:2], 'a,b'.split(','))
t = torch.arange(26)
docs = L(t[a:b] for a,b in ((0,3),(3,7),(7,8),(8,16),(16,24),(24,26)))
b = Chunks(docs)
test_eq([b[ o] for o in range(0,5)], range(0,5))
test_eq([b[-o] for o in range(1,6)], [25,24,23,22,21])
test_eq(b[6:13], torch.arange(6,13))
test_eq(b[20:77], torch.arange(20,26))
test_eq(b[:5], torch.arange(5))
test_eq(b[:2], torch.arange(2))
docs = L(TensorBase(t[a:b]) for a,b in ((0,3),(3,7),(7,8),(8,16),(16,24),(24,26)))
b = Chunks(docs)
test_eq_type(b[:2], TensorBase(range(2)))
test_eq_type(b[:5], TensorBase(range(5)))
test_eq_type(b[9:13], TensorBase(range(9,13)))
```
## Simple types
```
#export
def show_title(o, ax=None, ctx=None, label=None, color='black', **kwargs):
"Set title of `ax` to `o`, or print `o` if `ax` is `None`"
ax = ifnone(ax,ctx)
if ax is None: print(o)
elif hasattr(ax, 'set_title'):
t = ax.title.get_text()
if len(t) > 0: o = t+'\n'+str(o)
ax.set_title(o, color=color)
elif isinstance(ax, pd.Series):
while label in ax: label += '_'
ax = ax.append(pd.Series({label: o}))
return ax
test_stdout(lambda: show_title("title"), "title")
# ensure that col names are unique when showing to a pandas series
assert show_title("title", ctx=pd.Series(dict(a=1)), label='a').equals(pd.Series(dict(a=1,a_='title')))
#export
class ShowTitle:
"Base class that adds a simple `show`"
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledInt(Int, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledFloat(Float, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledStr(Str, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledTuple(fastuple, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
add_docs(TitledInt, "An `int` with `show`"); add_docs(TitledStr, "An `str` with `show`");
add_docs(TitledFloat, "A `float` with `show`"); add_docs(TitledTuple, "A `fastuple` with `show`")
show_doc(TitledInt, title_level=3)
show_doc(TitledStr, title_level=3)
show_doc(TitledFloat, title_level=3)
test_stdout(lambda: TitledStr('s').show(), 's')
test_stdout(lambda: TitledInt(1).show(), '1')
show_doc(TitledTuple, title_level=3)
#hide
df = pd.DataFrame(index = range(1))
row = df.iloc[0]
x = TitledFloat(2.56)
row = x.show(ctx=row, label='lbl')
test_eq(float(row.lbl), 2.56)
#export
@patch
def truncate(self:TitledStr, n):
"Truncate self to `n`"
words = self.split(' ')[:n]
return TitledStr(' '.join(words))
```
## Other functions
```
#export
if not hasattr(pd.DataFrame,'_old_init'): pd.DataFrame._old_init = pd.DataFrame.__init__
#export
@patch
def __init__(self:pd.DataFrame, data=None, index=None, columns=None, dtype=None, copy=False):
if data is not None and isinstance(data, Tensor): data = to_np(data)
self._old_init(data, index=index, columns=columns, dtype=dtype, copy=copy)
#export
def get_empty_df(n):
"Return `n` empty rows of a dataframe"
df = pd.DataFrame(index = range(n))
return [df.iloc[i] for i in range(n)]
#export
def display_df(df):
"Display `df` in a notebook or defaults to print"
try: from IPython.display import display, HTML
except: return print(df)
display(HTML(df.to_html()))
#export
def get_first(c):
"Get the first element of c, even if c is a dataframe"
return getattr(c, 'iloc', c)[0]
#export
def one_param(m):
"First parameter in `m`"
return first(m.parameters())
#export
def item_find(x, idx=0):
"Recursively takes the `idx`-th element of `x`"
if is_listy(x): return item_find(x[idx])
if isinstance(x,dict):
key = list(x.keys())[idx] if isinstance(idx, int) else idx
return item_find(x[key])
return x
#export
def find_device(b):
"Recursively search the device of `b`."
return item_find(b).device
t2 = to_device(tensor(0))
dev = default_device()
test_eq(find_device(t2), dev)
test_eq(find_device([t2,t2]), dev)
test_eq(find_device({'a':t2,'b':t2}), dev)
test_eq(find_device({'a':[[t2],[t2]],'b':t2}), dev)
#export
def find_bs(b):
"Recursively search the batch size of `b`."
return item_find(b).shape[0]
x = torch.randn(4,5)
test_eq(find_bs(x), 4)
test_eq(find_bs([x, x]), 4)
test_eq(find_bs({'a':x,'b':x}), 4)
test_eq(find_bs({'a':[[x],[x]],'b':x}), 4)
#export
def np_func(f):
"Convert a function taking and returning numpy arrays to one taking and returning tensors"
def _inner(*args, **kwargs):
nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args]
return tensor(f(*nargs, **kwargs))
functools.update_wrapper(_inner, f)
return _inner
```
This decorator is particularly useful for using numpy functions as fastai metrics, for instance:
```
from sklearn.metrics import f1_score
@np_func
def f1(inp,targ): return f1_score(targ, inp)
a1,a2 = array([0,1,1]),array([1,0,1])
t = f1(tensor(a1),tensor(a2))
test_eq(f1_score(a1,a2), t)
assert isinstance(t,Tensor)
#export
class Module(nn.Module, metaclass=PrePostInitMeta):
"Same as `nn.Module`, but no need for subclasses to call `super().__init__`"
def __pre_init__(self, *args, **kwargs): super().__init__()
def __init__(self): pass
show_doc(Module, title_level=3)
class _T(Module):
def __init__(self): self.f = nn.Linear(1,1)
def forward(self,x): return self.f(x)
t = _T()
t(tensor([1.]))
# export
from torch.nn.parallel import DistributedDataParallel
# export
def get_model(model):
"Return the model maybe wrapped inside `model`."
return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model
# export
def one_hot(x, c):
"One-hot encode `x` with `c` classes."
res = torch.zeros(c, dtype=torch.uint8)
if isinstance(x, Tensor) and x.numel()>0: res[x] = 1.
else: res[list(L(x, use_list=None))] = 1.
return res
test_eq(one_hot([1,4], 5), tensor(0,1,0,0,1).byte())
test_eq(one_hot(torch.tensor([]), 5), tensor(0,0,0,0,0).byte())
test_eq(one_hot(2, 5), tensor(0,0,1,0,0).byte())
#export
def one_hot_decode(x, vocab=None):
return L(vocab[i] if vocab else i for i,x_ in enumerate(x) if x_==1)
test_eq(one_hot_decode(tensor(0,1,0,0,1)), [1,4])
test_eq(one_hot_decode(tensor(0,0,0,0,0)), [ ])
test_eq(one_hot_decode(tensor(0,0,1,0,0)), [2 ])
#export
def params(m):
"Return all parameters of `m`"
return [p for p in m.parameters()]
#export
def trainable_params(m):
"Return all trainable parameters of `m`"
return [p for p in m.parameters() if p.requires_grad]
m = nn.Linear(4,5)
test_eq(trainable_params(m), [m.weight, m.bias])
m.weight.requires_grad_(False)
test_eq(trainable_params(m), [m.bias])
#export
norm_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm)
#export
def norm_bias_params(m, with_bias=True):
"Return all bias and BatchNorm parameters"
if isinstance(m, norm_types): return L(m.parameters())
res = L(m.children()).map(norm_bias_params, with_bias=with_bias).concat()
if with_bias and getattr(m, 'bias', None) is not None: res.append(m.bias)
return res
for norm_func in [nn.BatchNorm1d, partial(nn.InstanceNorm1d, affine=True)]:
model = nn.Sequential(nn.Linear(10,20), norm_func(20), nn.Conv1d(3,4, 3))
test_eq(norm_bias_params(model), [model[0].bias, model[1].weight, model[1].bias, model[2].bias])
model = nn.ModuleList([nn.Linear(10,20, bias=False), nn.Sequential(norm_func(20), nn.Conv1d(3,4,3))])
test_eq(norm_bias_params(model), [model[1][0].weight, model[1][0].bias, model[1][1].bias])
model = nn.ModuleList([nn.Linear(10,20), nn.Sequential(norm_func(20), nn.Conv1d(3,4,3))])
test_eq(norm_bias_params(model, with_bias=False), [model[1][0].weight, model[1][0].bias])
#export
def batch_to_samples(b, max_n=10):
"'Transposes' a batch to (at most `max_n`) samples"
if isinstance(b, Tensor): return retain_types(list(b[:max_n]), [b])
else:
res = L(b).map(partial(batch_to_samples,max_n=max_n))
return retain_types(res.zip(), [b])
t = tensor([1,2,3])
test_eq(batch_to_samples([t,t+1], max_n=2), ([1,2],[2,3]))
test_eq(batch_to_samples(tensor([1,2,3]), 10), [1, 2, 3])
test_eq(batch_to_samples([tensor([1,2,3]), tensor([4,5,6])], 10), [(1, 4), (2, 5), (3, 6)])
test_eq(batch_to_samples([tensor([1,2,3]), tensor([4,5,6])], 2), [(1, 4), (2, 5)])
test_eq(batch_to_samples([tensor([1,2,3]), [tensor([4,5,6]),tensor([7,8,9])]], 10),
[(1, (4, 7)), (2, (5, 8)), (3, (6, 9))])
test_eq(batch_to_samples([tensor([1,2,3]), [tensor([4,5,6]),tensor([7,8,9])]], 2), [(1, (4, 7)), (2, (5, 8))])
t = fastuple(tensor([1,2,3]),TensorBase([2,3,4]))
test_eq_type(batch_to_samples(t)[0][1], TensorBase(2))
test_eq(batch_to_samples(t).map(type), [fastuple]*3)
#export
@patch
def interp_1d(x:Tensor, xp, fp):
"Same as `np.interp`"
slopes = (fp[1:]-fp[:-1])/(xp[1:]-xp[:-1])
incx = fp[:-1] - (slopes*xp[:-1])
locs = (x[:,None]>=xp[None,:]).long().sum(1)-1
locs = locs.clamp(0,len(slopes)-1)
return slopes[locs]*x + incx[locs]
brks = tensor(0,1,2,4,8,64).float()
ys = tensor(range_of(brks)).float()
ys /= ys[-1].item()
pts = tensor(0.2,0.5,0.8,3,5,63)
preds = pts.interp_1d(brks, ys)
test_close(preds.numpy(), np.interp(pts.numpy(), brks.numpy(), ys.numpy()))
plt.scatter(brks,ys)
plt.scatter(pts,preds)
plt.legend(['breaks','preds']);
#export
@patch
def pca(x:Tensor, k=2):
"Compute PCA of `x` with `k` dimensions."
x = x-torch.mean(x,0)
U,S,V = torch.svd(x.t())
return torch.mm(x,U[:,:k])
# export
def logit(x):
"Logit of `x`, clamped to avoid inf."
x = x.clamp(1e-7, 1-1e-7)
return -(1/x-1).log()
#export
def num_distrib():
"Return the number of processes in distributed training (if applicable)."
return int(os.environ.get('WORLD_SIZE', 0))
#export
def rank_distrib():
"Return the distributed rank of this process (if applicable)."
return int(os.environ.get('RANK', 0))
#export
def distrib_barrier():
"Place a synchronization barrier in distributed training so that ALL sub-processes in the pytorch process group must arrive here before proceeding."
if num_distrib() > 1 and torch.distributed.is_initialized(): torch.distributed.barrier()
#export
# Saving arrays requires pytables - optional dependency
try: import tables
except: pass
#export
def _comp_filter(lib='lz4',lvl=3): return tables.Filters(complib=f'blosc:{lib}', complevel=lvl)
#export
@patch
def save_array(p:Path, o, complib='lz4', lvl=3):
"Save numpy array to a compressed `pytables` file, using compression level `lvl`"
if isinstance(o,Tensor): o = to_np(o)
with tables.open_file(p, mode='w', filters=_comp_filter(lib=complib,lvl=lvl)) as f: f.create_carray('/', 'data', obj=o)
```
Compression lib can be any of: blosclz, lz4, lz4hc, snappy, zlib or zstd.
```
#export
@patch
def load_array(p:Path):
"Save numpy array to a `pytables` file"
with tables.open_file(p, 'r') as f: return f.root.data.read()
inspect.getdoc(load_array)
str(inspect.signature(load_array))
#export
def base_doc(elt):
"Print a base documentation of `elt`"
name = getattr(elt, '__qualname__', getattr(elt, '__name__', ''))
print(f'{name}{inspect.signature(elt)}\n{inspect.getdoc(elt)}\n')
print('To get a prettier result with hyperlinks to source code and documentation, install nbdev: pip install nbdev')
#export
def doc(elt):
"Try to use doc form nbdev and fall back to `base_doc`"
try:
from nbdev.showdoc import doc
doc(elt)
except: base_doc(elt)
#export
def nested_reorder(t, idxs):
"Reorder all tensors in `t` using `idxs`"
if isinstance(t, (Tensor,L)): return t[idxs]
elif is_listy(t): return type(t)(nested_reorder(t_, idxs) for t_ in t)
if t is None: return t
raise TypeError(f"Expected tensor, tuple, list or L but got {type(t)}")
x = tensor([0,1,2,3,4,5])
idxs = tensor([2,5,1,0,3,4])
test_eq_type(nested_reorder(([x], x), idxs), ([idxs], idxs))
y = L(0,1,2,3,4,5)
z = L(i.item() for i in idxs)
test_eq_type(nested_reorder((y, x), idxs), (z,idxs))
```
## Image helpers
```
#export
def make_cross_image(bw=True):
"Create a tensor containing a cross image, either `bw` (True) or color"
if bw:
im = torch.zeros(5,5)
im[2,:] = 1.
im[:,2] = 1.
else:
im = torch.zeros(3,5,5)
im[0,2,:] = 1.
im[1,:,2] = 1.
return im
plt.imshow(make_cross_image(), cmap="Greys");
plt.imshow(make_cross_image(False).permute(1,2,0));
#export
def show_image_batch(b, show=show_titled_image, items=9, cols=3, figsize=None, **kwargs):
"Display batch `b` in a grid of size `items` with `cols` width"
if items<cols: cols=items
rows = (items+cols-1) // cols
if figsize is None: figsize = (cols*3, rows*3)
fig,axs = plt.subplots(rows, cols, figsize=figsize)
for *o,ax in zip(*to_cpu(b), axs.flatten()): show(o, ax=ax, **kwargs)
show_image_batch(([Image.open(TEST_IMAGE_BW),Image.open(TEST_IMAGE)],['bw','color']), items=2)
```
## Model init
```
#export
def requires_grad(m):
"Check if the first parameter of `m` requires grad or not"
ps = list(m.parameters())
return ps[0].requires_grad if len(ps)>0 else False
tst = nn.Linear(4,5)
assert requires_grad(tst)
for p in tst.parameters(): p.requires_grad_(False)
assert not requires_grad(tst)
#export
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func:
if hasattr(m, 'weight'): func(m.weight)
if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
return m
tst = nn.Linear(4,5)
tst.weight.data.uniform_(-1,1)
tst.bias.data.uniform_(-1,1)
tst = init_default(tst, func = lambda x: x.data.fill_(1.))
test_eq(tst.weight, torch.ones(5,4))
test_eq(tst.bias, torch.zeros(5))
#export
def cond_init(m, func):
"Apply `init_default` to `m` unless it's a batchnorm module"
if (not isinstance(m, norm_types)) and requires_grad(m): init_default(m, func)
tst = nn.Linear(4,5)
tst.weight.data.uniform_(-1,1)
tst.bias.data.uniform_(-1,1)
cond_init(tst, func = lambda x: x.data.fill_(1.))
test_eq(tst.weight, torch.ones(5,4))
test_eq(tst.bias, torch.zeros(5))
tst = nn.BatchNorm2d(5)
init = [tst.weight.clone(), tst.bias.clone()]
cond_init(tst, func = lambda x: x.data.fill_(1.))
test_eq(tst.weight, init[0])
test_eq(tst.bias, init[1])
#export
def apply_leaf(m, f):
"Apply `f` to children of `m`."
c = m.children()
if isinstance(m, nn.Module): f(m)
for l in c: apply_leaf(l,f)
tst = nn.Sequential(nn.Linear(4,5), nn.Sequential(nn.Linear(4,5), nn.Linear(4,5)))
apply_leaf(tst, partial(init_default, func=lambda x: x.data.fill_(1.)))
for l in [tst[0], *tst[1]]: test_eq(l.weight, torch.ones(5,4))
for l in [tst[0], *tst[1]]: test_eq(l.bias, torch.zeros(5))
#export
def apply_init(m, func=nn.init.kaiming_normal_):
"Initialize all non-batchnorm layers of `m` with `func`."
apply_leaf(m, partial(cond_init, func=func))
tst = nn.Sequential(nn.Linear(4,5), nn.Sequential(nn.Linear(4,5), nn.BatchNorm1d(5)))
init = [tst[1][1].weight.clone(), tst[1][1].bias.clone()]
apply_init(tst, func=lambda x: x.data.fill_(1.))
for l in [tst[0], tst[1][0]]: test_eq(l.weight, torch.ones(5,4))
for l in [tst[0], tst[1][0]]: test_eq(l.bias, torch.zeros(5))
test_eq(tst[1][1].weight, init[0])
test_eq(tst[1][1].bias, init[1])
```
## autograd jit functions
```
#export
def script_use_ctx(f):
"Decorator: create jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs): return sf(*args, *ctx.saved_variables, **kwargs)
return update_wrapper(_f,f)
#export
def script_save_ctx(static, *argidx):
"Decorator: create jit script and save args with indices `argidx` using `ctx.save_for_backward`"
def _dec(f):
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs):
if argidx:
save = [args[o] for o in argidx]
ctx.save_for_backward(*save)
if not argidx: args = [ctx]+args
return sf(*args, **kwargs)
if static: _f = staticmethod(_f)
return update_wrapper(_f,f)
return _dec
#export
def script_fwd(*argidx):
"Decorator: create static jit script and save args with indices `argidx` using `ctx.save_for_backward`"
return script_save_ctx(True, *argidx)
#export
def script_bwd(f):
"Decorator: create static jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
return staticmethod(script_use_ctx(f))
#export
def grad_module(cls):
"Decorator: convert `cls` into an autograd function"
class _c(nn.Module):
def forward(self, *args, **kwargs): return cls.apply(*args, **kwargs)
return _c
```
# Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive', force_remount = True)
%tensorflow_version 2.x
!pip install tiffile
!pip install gputools
!pip install imagecodecs
!pip install vollseg
!pip install napari[all]
import os
import glob
from tifffile import imread, imwrite
from vollseg import Augmentation2D
from pathlib import Path
import numpy as np
from scipy.ndimage import gaussian_filter
```
# The size of the images in the directory should be the same else concatenation of arrays would not work
In the cell below we duplicate the image pixels in XY to a desired size for creating optimal training patch
```
image_dir = '/content/drive/My Drive/raw_dir/'
label_dir = '/content/drive/My Drive/label_dir/'
Aug_image_dir = '/content/drive/My Drive/aug_raw_dir/'
Aug_label_dir = '/content/drive/My Drive/aug_label_dir/'
Path(Aug_image_dir).mkdir(exist_ok=True)
Path(Aug_label_dir).mkdir(exist_ok=True)
```
All the parameters that would be needed are provided in the cell below
```
#All choices below are optional
size = [800,800]
gauss_filter_size = 0
#choices for augmentation below are 1 or 2 or None
flip_axis= 1
shift_axis= 1
zoom_axis= 1
#shift range can be between -1 and 1 (-1 and 1 will translate the pixels completely out), zoom range > 0
shift_range= 0.2
zoom_range= 2
rotate_axis= 1
rotate_angle= 'random'
#if zero padding also needs to be done
size_zero = [1000,1000]
#Add poisson noise to data with mu
mu = 5
Raw_path = os.path.join(image_dir, '*tif')
filesRaw = glob.glob(Raw_path)
filesRaw.sort
Label_path = os.path.join(label_dir, '*tif')
filesLabel = glob.glob(Label_path)
filesLabel.sort
Data = []
Label = []
for fname in filesRaw:
for secondfname in filesLabel:
Name = os.path.basename(os.path.splitext(fname)[0])
LabelName = os.path.basename(os.path.splitext(secondfname)[0])
if Name == LabelName:
image = imread(fname)
Data.append(image)
labelimage = gaussian_filter(imread(secondfname), gauss_filter_size)
Label.append(labelimage)
Data = np.asarray(Data)
Label = np.asarray(Label)
```
First perform the extend pixels to a certain size and zero padding operations
```
extend_pixels = Augmentation2D(size=size)
aug_extend_pixels = extend_pixels.build(data=Data, label=Label, batch_size = Data.shape[0])
aug_extend_pixels_pair = np.asarray(next(aug_extend_pixels))
count = 0
for i in range(0, aug_extend_pixels_pair.shape[1]):
Name = 'aug_extend_pixels' + str(count)
imwrite(Aug_image_dir + '/' + Name + '.tif', aug_extend_pixels_pair[0,i,:,:].astype('float32'))
imwrite(Aug_label_dir + '/' + Name + '.tif', aug_extend_pixels_pair[1,i,:,:].astype('uint16'))
count = count + 1
embed_pixels = Augmentation2D(size_zero=size_zero)
aug_embed_pixels = embed_pixels.build(data=Data, label=Label, batch_size = Data.shape[0])
aug_embed_pixels_pair = np.asarray(next(aug_embed_pixels))
count = 0
for i in range(0, aug_embed_pixels_pair.shape[1]):
Name = 'aug_embed_pixels' + str(count)
imwrite(Aug_image_dir + '/' + Name + '.tif', aug_embed_pixels_pair[0,i,:,:].astype('float32'))
imwrite(Aug_label_dir + '/' + Name + '.tif', aug_embed_pixels_pair[1,i,:,:].astype('uint16'))
count = count + 1
```
Now we can perform rotations, flips
```
rotate_pixels = Augmentation2D(rotate_axis = rotate_axis, rotate_angle = rotate_angle)
aug_rotate_pixels = rotate_pixels.build(data=Data, label=Label, batch_size = Data.shape[0])
aug_rotate_pixels_pair = np.asarray(next(aug_rotate_pixels))
count = 0
for i in range(0, aug_rotate_pixels_pair.shape[1]):
Name = 'rotate_pixels' + str(count)
imwrite(Aug_image_dir + '/' + Name + '.tif', aug_rotate_pixels_pair[0,i,:,:].astype('float32'))
imwrite(Aug_label_dir + '/' + Name + '.tif', aug_rotate_pixels_pair[1,i,:,:].astype('uint16'))
count = count + 1
flip_pixels = Augmentation2D(flip_axis = flip_axis)
aug_flip_pixels = flip_pixels.build(data=Data, label=Label, batch_size = Data.shape[0])
aug_flip_pixels_pair = np.asarray(next(aug_flip_pixels))
count = 0
for i in range(0, aug_flip_pixels_pair.shape[1]):
Name = 'aug_flip_pixels' + str(count)
imwrite(Aug_image_dir + '/' + Name + '.tif', aug_flip_pixels_pair[0,i,:,:].astype('float32'))
imwrite(Aug_label_dir + '/' + Name + '.tif', aug_flip_pixels_pair[1,i,:,:].astype('uint16'))
count = count + 1
```
Then Zoom and Shift augmentation
```
zoom_pixels = Augmentation2D(zoom_axis = zoom_axis, zoom_range = zoom_range)
aug_zoom_pixels = zoom_pixels.build(data=Data, label=Label, batch_size = Data.shape[0])
aug_zoom_pixels_pair = np.asarray(next(aug_zoom_pixels))
count = 0
for i in range(0, aug_zoom_pixels_pair.shape[1]):
Name = 'aug_zoom_pixels' + str(count)
imwrite(Aug_image_dir + '/' + Name + '.tif', aug_zoom_pixels_pair[0,i,:,:].astype('float32'))
imwrite(Aug_label_dir + '/' + Name + '.tif', aug_zoom_pixels_pair[1,i,:,:].astype('uint16'))
count = count + 1
shift_pixels = Augmentation2D(shift_axis = shift_axis, shift_range = shift_range)
aug_shift_pixels = shift_pixels.build(data=Data, label=Label, batch_size = Data.shape[0])
aug_shift_pixels_pair = np.asarray(next(aug_shift_pixels))
count = 0
for i in range(0, aug_shift_pixels_pair.shape[1]):
Name = 'aug_shift_pixels' + str(count)
imwrite(Aug_image_dir + '/' + Name + '.tif', aug_shift_pixels_pair[0,i,:,:].astype('float32'))
imwrite(Aug_label_dir + '/' + Name + '.tif', aug_shift_pixels_pair[1,i,:,:].astype('uint16'))
count = count + 1
```
Finally add noise to all the data generated so far
```
Raw_path = os.path.join(Aug_image_dir, '*tif')
filesRaw = glob.glob(Raw_path)
filesRaw.sort
Label_path = os.path.join(Aug_label_dir, '*tif')
filesLabel = glob.glob(Label_path)
filesLabel.sort
Data = []
Label = []
for fname in filesRaw:
for secondfname in filesLabel:
Name = os.path.basename(os.path.splitext(fname)[0])
LabelName = os.path.basename(os.path.splitext(secondfname)[0])
if Name == LabelName:
image = imread(fname)
Data.append(image)
labelimage = imread(secondfname)
Label.append(labelimage)
Data = np.asarray(Data)
Label = np.asarray(Label)
noise_pixels = Augmentation2D(mu = mu)
aug_noise_pixels = noise_pixels.build(data=Data, label=Label, batch_size = Data.shape[0])
aug_noise_pixels_pair = np.asarray(next(aug_noise_pixels))
count = 0
for i in range(0, aug_noise_pixels_pair.shape[1]):
Name = 'aug_noise_pixels' + str(count)
imwrite(Aug_image_dir + '/' + Name + '.tif', aug_noise_pixels_pair[0,i,:,:].astype('float32'))
imwrite(Aug_label_dir + '/' + Name + '.tif', aug_noise_pixels_pair[1,i,:,:].astype('uint16'))
count = count + 1
```
| github_jupyter |
<a href="https://colab.research.google.com/github/simonsanvil/ECG-classification-MLH/blob/master/notebooks/zijun-FeatureExtraction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount('/content/drive',force_remount=True)
ls
%cd ./drive/MyDrive/MLHC
#!git clone 'https://github.com/Seb-Good/ecg-features.git'
%cd ./ecg-features
!pip install biosppy
# Import 3rd party libraries
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import shutil
from sklearn.model_selection import train_test_split
# Import local Libraries
sys.path.insert(0, os.path.dirname(os.getcwd()))
from features.feature_extractor import Features
from utils.plotting.waveforms import plot_waveforms
# Configure Notebook
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%load_ext autoreload
%autoreload 2
%cd './ecg-features'
%ls
sourcepath=os.path.join('./data','trainingData')
sourcefiles = os.listdir(sourcepath)
waveform_path = os.path.join('./data', 'waveforms')
for file in sourcefiles:
if file.endswith('.mat'):
shutil.move(os.path.join(sourcepath,file), os.path.join(waveform_path,file))
# Sampling frequency (Hz)
fs = 300
# Data paths
label_path = os.path.join('./data', 'labels')
waveform_path = os.path.join('./data', 'waveforms')
feature_path = os.path.join('./data', 'features')
# Read labels CSV
labels = pd.read_csv(os.path.join(label_path, 'REFERENCE.csv'), names=['file_name', 'label'])
## encode label
all_label = []
for i in labels['label']:
if i == 'N':
all_label.append(str(0))
elif i == 'A':
all_label.append(str(1))
elif i == 'O':
all_label.append(str(2))
elif i == '~':
all_label.append(str(3))
labels.label = all_label
# View DataFrame
labels.head(10)
# Launch interactive plotting widget
plot_waveforms(labels=labels, waveform_path=waveform_path, fs=fs)
# Instantiate
#ecg_features = Features(file_path=waveform_path, fs=fs, feature_groups=['full_waveform_features'])
# Calculate ECG features
#ecg_features.extract_features(
# filter_bandwidth=[3, 45], n_signals=None, show=True,
# labels=labels, normalize=True, polarity_check=True,
# template_before=0.25, template_after=0.4
#)
# Get features DataFrame
#features = ecg_features.get_features()
# View DataFrame
#features.head(10)
# Save features DataFrame to CSV
features.to_csv(os.path.join(feature_path, 'newFeatures.csv'), index=False)
features.head(10)
# split train val test
features=pd.read_csv(os.path.join(feature_path, 'newFeatures.csv'))
Y=features.label
X=features.drop(['label'],axis=1)
X_train, X_test = train_test_split(features, test_size=0.2, random_state=0)
X_val, X_test = train_test_split(X_test, test_size=0.5, random_state=0)
features2=pd.read_csv(os.path.join(feature_path, 'newFeatures.csv'))
# Read labels CSV
labels = pd.read_csv(os.path.join(label_path, 'REFERENCE.csv'), names=['file_name', 'label'])
## encode label
all_label = []
for i in features2['label']:
if i == 0:
all_label.append('N')
elif i == 1:
all_label.append('A')
elif i == 2:
all_label.append('O')
elif i == 3:
all_label.append('~')
print(len(all_label))
features2.label = all_label
X_train2, X_test2 = train_test_split(features2, test_size=0.2, random_state=0)
X_val2, X_test2 = train_test_split(X_test2, test_size=0.5, random_state=0)
```
###Using H2O to try different models
```
!pip install requests
!pip install tabulate
!pip install future
!pip uninstall h2o
!pip install -f http://h2o-release.s3.amazonaws.com/h2o/latest_stable_Py.html h2o
import h2o
from h2o.automl import H2OAutoML
# Start the H2O cluster (locally)
h2o.init()
# Import a sample binary outcome train/test set into H2O
train = h2o.H2OFrame(X_train2) #X_train[['hosp_exp_flg','mort_day_censored']]
test = h2o.H2OFrame(X_test2)
val= h2o.H2OFrame(X_val2)
# Identify predictors and response
x = train.columns
y = "label"
x.remove(y)
# For binary classification, response should be a factor
#train[y] = train[y].asfactor()
#test[y] = test[y].asfactor()
# Run AutoML for 20 base models
aml = H2OAutoML(max_models=5, seed=1)
aml.train(x=x, y=y, training_frame=train)
#aml.predict(test)
# View the AutoML Leaderboard
lb = aml.leaderboard
lb.head(rows=lb.nrows)
aml.predict(test)
print(X_test2.label)
from h2o.estimators import H2OXGBoostEstimator
h2o.init()
# Import the prostate dataset
prostate = h2o.import_file(os.path.join(feature_path, 'newFeatures.csv'))
# Set the predictor names and the response column name
predictors = prostate.columns
response = "label"
#predictors.remove(response)
# Convert the response column to a factor
prostate['label'] = prostate['label'].asfactor()
# Train a GBM model setting nfolds to 5
prostate_gbm = H2OXGBoostEstimator(nfolds = 6, seed = 1)
prostate_gbm.train(x=predictors, y=response, training_frame=prostate)
# AUC of cross-validated holdout predictions
prostate_gbm.auc(xval=True)
prostate
```
###Using sktime and TFresh
```
!pip install sktime
!pip install --upgrade tsfresh
#https://www.sktime.org/en/latest/examples/feature_extraction_with_tsfresh.html
def read_data_physionet_4_with_val(window_size=3000, stride=500,ind=0):
# read pkl
with open(os.path.join(file_path,'challenge2017.pkl'), 'rb') as fin:
res = pickle.load(fin)
## scale data
all_data = res['data']
for i in range(len(all_data)):
tmp_data = all_data[i]
tmp_std = np.std(tmp_data)
tmp_mean = np.mean(tmp_data)
all_data[i] = (tmp_data - tmp_mean) / tmp_std
## encode label
all_label = []
for i in res['label']:
if i == 'N':
all_label.append(0)
elif i == 'A':
all_label.append(1)
elif i == 'O':
all_label.append(2)
elif i == '~':
all_label.append(3)
all_label = np.array(all_label)
# split train val test
X_train, X_test, Y_train, Y_test = train_test_split(all_data, all_label, test_size=0.2, random_state=0)
X_val, X_test, Y_val, Y_test = train_test_split(X_test, Y_test, test_size=0.5, random_state=0)
# slide and cut
print('before: ')
print(Counter(Y_train), Counter(Y_val), Counter(Y_test))
X_train, Y_train = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride,ind=0)
X_val, Y_val, pid_val = slide_and_cut(X_val, Y_val, window_size=window_size, stride=stride, output_pid=True,ind=0)
X_test, Y_test, pid_test = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True,ind=0)
print('after: ')
print(Counter(Y_train), Counter(Y_val), Counter(Y_test))
# shuffle train
shuffle_pid = np.random.permutation(Y_train.shape[0])
X_train = X_train[shuffle_pid]
Y_train = Y_train[shuffle_pid]
X_train = np.expand_dims(X_train, 1)
X_val = np.expand_dims(X_val, 1)
X_test = np.expand_dims(X_test, 1)
return X_train, X_val, X_test, Y_train, Y_val, Y_test, pid_val, pid_test
def slide_and_cut(X, Y, window_size, stride, output_pid=False, datatype=4, ind=0):
if ind>0:
X=X[:ind]
Y=Y[:ind]
else:
X=X
Y=Y
out_X = []
out_Y = []
out_pid = []
n_sample = X.shape[0]
mode = 0
for i in range(n_sample):
tmp_ts = X[i]
tmp_Y = Y[i]
if tmp_Y == 0:
i_stride = stride
elif tmp_Y == 1:
if datatype == 4:
i_stride = stride//6
elif datatype == 2:
i_stride = stride//10
elif datatype == 2.1:
i_stride = stride//7
elif tmp_Y == 2:
i_stride = stride//2
elif tmp_Y == 3:
i_stride = stride//20
for j in range(0, len(tmp_ts)-window_size, i_stride):
out_X.append(tmp_ts[j:j+window_size])
out_Y.append(tmp_Y)
out_pid.append(i)
if output_pid:
return np.array(out_X), np.array(out_Y), np.array(out_pid)
else:
return np.array(out_X), np.array(out_Y)
def featureExtraction(X):
t = TSFreshFeatureExtractor(default_fc_parameters="efficient", show_warnings=False)
X = t.fit_transform(X)
return X
%cd './drive/My Drive/MLHC/ecg-features/'
import pickle
import os
import pandas as pd
from sktime.transformations.panel.tsfresh import TSFreshFeatureExtractor
#with open(os.path.join('./data/files','challenge2017.pkl'), 'rb') as fin:
data = pd.read_pickle(os.path.join('./data/files','challenge2017.pkl'))
X = data['data']
Y=data['label']
F=featureExtraction(X)
F.head()
```
| github_jupyter |
# About: VM - Go! with prepared VM image
---
Start VM instance with prepared VM image using livbirt.
libvirtがインストールされている仮想化基盤上で、VMを起動するためのNotebook。
すでに**VMイメージ作成Notebook**により、イメージが作成されているものとする。
## *Operation Note*
*This is a cell for your own recording. ここに経緯を記述*
# Notebookと環境のBinding
Inventory中のgroup名でBind対象を指示する。
```
target_group = 'test-hypervisor'
```
Bind対象への疎通状態を確認する。
```
!ansible -m ping {target_group}
```
対象マシンにlibvirtがインストールされているかを確認する。
```
!ansible -b -a 'virsh version' {target_group}
```
# VMイメージの指定
作成対象のVMのあるディレクトリを指定する。**VMイメージ作成Notebook**により生成されたイメージが格納されているディレクトリを指定すること。
```
image_base_dir = '/mnt/ubuntu14.04-base-vm'
```
以下の2つのファイルが存在している必要がある。
- base.img
- libvirt-base.xml
```
!ansible -b -a "ls -la {image_base_dir}" {target_group}
```
作成するVM名のリストを指定する。お手本では例として2つのVMを指定している。
起動したいVM名をlistで指定すること。**既存のVMと重複してはならない。**
```
vm_names = ['testvm-001']
vm_names
```
# VMの作成
VM用のファイルは以下のように作成される。
- /mnt
- (VM名).xml ... libvirtに与えるXML定義
- (VM名).img ... VM用の仮想ディスク
## XML定義の生成
基本となるXML定義を得る。
```
import tempfile
work_dir = tempfile.mkdtemp()
work_dir
!ansible -b -m fetch -a 'src={image_base_dir}/libvirt-base.xml dest={work_dir}/libvirt-base.xml flat=yes' {target_group}
```
基本のXML定義に基づいて、VM用定義を生成する。
```
import xml.etree.ElementTree as ET
import virtinst.util
import os
for n in vm_names:
vmxml = ET.parse(os.path.join(work_dir, 'libvirt-base.xml')).getroot()
vmxml.find('name').text = n
vmxml.find('devices').find('disk').find('source').attrib['file'] = os.path.join('/mnt', n + '.img')
vmxml.find('devices').find('interface').find('mac').attrib['address'] = virtinst.util.randomMAC()
ET.ElementTree(vmxml).write(os.path.join(work_dir, n + '.xml'))
!ls -la {work_dir}/*.xml
```
ホストに定義ファイルをコピーする。
```
for n in vm_names:
!ansible -b -m copy -a 'src={work_dir}/{n}.xml dest=/mnt/{n}.xml' {target_group}
```
## イメージファイルのコピー
イメージファイルをVM用に複製する。
```
for n in vm_names:
!ansible -b -a 'cp {image_base_dir}/base.img /mnt/{n}.img' {target_group}
```
## VMの起動
XMLファイル、仮想ディスクファイルがあるかどうかを確認する。
```
for n in vm_names:
!ansible -a 'ls -la /mnt/{n}.img /mnt/{n}.xml' {target_group}
```
VMを起動する。
```
import time
for n in vm_names:
!ansible -b -a 'virsh create /mnt/{n}.xml' {target_group}
time.sleep(60)
```
VMに設定されたIPアドレスを確認する。
```
import re
def get_mac_address(vmname):
domiflist_stdio = !ansible -b -a "virsh domiflist {vmname}" {target_group}
mac_pattern = re.compile(r'.*bridge.*\s([0-9a-f\:]+)\s*')
vmmac = [mac_pattern.match(line).group(1) for line in domiflist_stdio if mac_pattern.match(line)][0]
return vmmac
def get_ip_address(vmmac):
leases_stdio = !ansible -b -a "grep {vmmac} /var/lib/dnsmasq/dnsmasq.leases" {target_group}
ip_pattern = re.compile(r'.*\s([0-9a-f\:]+)\s+([0-9\.]+)\s.*')
ipaddr = [ip_pattern.match(line).group(2) for line in leases_stdio if ip_pattern.match(line)][0]
return ipaddr
vmdescs = zip(vm_names, map(lambda mac: get_ip_address(mac), map(lambda n: get_mac_address(n), vm_names)))
vmdescs
```
# Inventoryの更新
Inventoryに、作成したマシンのIPアドレスを追加する。変更する前に、現在の内容をコピーしておく。
```
!cp inventory {work_dir}/inventory-old
```
[Inventory](../edit/inventory) を修正する。
```
!diff -ur {work_dir}/inventory-old inventory
```
追加したグループ名でpingが通じるかどうかを確認する。
```
target_vmgroup = 'test-vm'
!ansible -m ping {target_vmgroup}
```
# 後始末
一時ディレクトリを削除する。
```
!rm -fr {work_dir}
```
| github_jupyter |
# SBTi-Finance Tool - Portfolio Aggregation
In this notebook we'll give some examples on how the portfolio aggregation methods can be used.
Please see the [methodology](https://sciencebasedtargets.org/wp-content/uploads/2020/09/Temperature-Rating-Methodology-V1.pdf), [guidance](https://sciencebasedtargets.org/wp-content/uploads/2020/10/Financial-Sector-Science-Based-Targets-Guidance-Pilot-Version.pdf) and the [technical documentation](https://sciencebasedtargets.github.io/SBTi-finance-tool/) for more details on the different aggregation methods.
See 1_analysis_example (on [Colab](https://colab.research.google.com/github/ScienceBasedTargets/SBTi-finance-tool/blob/main/examples/1_analysis_example.ipynb) or [Github](https://github.com/ScienceBasedTargets/SBTi-finance-tool/blob/main/examples/1_analysis_example.ipynb)) for more in depth example of how to work with Jupyter Notebooks in general and SBTi notebooks in particular.
## Setting up
First we will set up the imports, data providers, and load the portfolio.
For more examples of this process, please refer to notebook 1 & 2 (analysis and quick calculation example).
```
%pip install sbti-finance-tool
%load_ext autoreload
%autoreload 2
import SBTi
from SBTi.data.excel import ExcelProvider
from SBTi.portfolio_aggregation import PortfolioAggregationMethod
from SBTi.portfolio_coverage_tvp import PortfolioCoverageTVP
from SBTi.temperature_score import TemperatureScore, Scenario, ScenarioType, EngagementType
from SBTi.target_validation import TargetProtocol
from SBTi.interfaces import ETimeFrames, EScope
%aimport -pandas
import pandas as pd
# Download the dummy data
import urllib.request
import os
if not os.path.isdir("data"):
os.mkdir("data")
if not os.path.isfile("data/data_provider_example.xlsx"):
urllib.request.urlretrieve("https://github.com/ScienceBasedTargets/SBTi-finance-tool/raw/main/examples/data/data_provider_example.xlsx", "data/data_provider_example.xlsx")
if not os.path.isfile("data/example_portfolio.csv"):
urllib.request.urlretrieve("https://github.com/ScienceBasedTargets/SBTi-finance-tool/raw/main/examples/data/example_portfolio.csv", "data/example_portfolio.csv")
provider = ExcelProvider(path="data/data_provider_example.xlsx")
df_portfolio = pd.read_csv("data/example_portfolio.csv", encoding="iso-8859-1")
companies = SBTi.utils.dataframe_to_portfolio(df_portfolio)
scores_collection = {}
temperature_score = TemperatureScore(time_frames=list(SBTi.interfaces.ETimeFrames), scopes=[EScope.S1S2, EScope.S3, EScope.S1S2S3])
amended_portfolio = temperature_score.calculate(data_providers=[provider], portfolio=companies)
```
## Calculate the aggregated temperature score
Calculate an aggregated temperature score. This can be done using different aggregation methods. The termperature scores are calculated per time-frame/scope combination.
### WATS
Weighted Average Temperature Score (WATS): Temperature scores are allocated based on portfolio weights.
This method uses the "investment_value" field to be defined in your portfolio data.
```
temperature_score.aggregation_method = PortfolioAggregationMethod.WATS
aggregated_scores = temperature_score.aggregate_scores(amended_portfolio)
df_wats = pd.DataFrame(aggregated_scores.dict()).applymap(lambda x: round(x['all']['score'], 2))
scores_collection.update({'WATS': df_wats})
df_wats
```
### TETS
Total emissions weighted temperature score (TETS): Temperature scores are allocated based on historical emission weights using total company emissions.
In addition to the portfolios "investment value" the TETS method requires company emissions, please refer to [Data Legends - Fundamental Data](https://ofbdabv.github.io/SBTi/Legends.html#fundamental-data) for more details
```
temperature_score.aggregation_method = PortfolioAggregationMethod.TETS
aggregated_scores = temperature_score.aggregate_scores(amended_portfolio)
df_tets = pd.DataFrame(aggregated_scores.dict()).applymap(lambda x: round(x['all']['score'], 2))
scores_collection.update({'TETS': df_tets})
df_tets
```
### MOTS
Market Owned emissions weighted temperature score (MOTS): Temperature scores are allocated based on an equity ownership approach.
In addition to the portfolios "investment value" the MOTS method requires company emissions and market cap, please refer to [Data Legends - Fundamental Data](https://ofbdabv.github.io/SBTi/Legends.html#fundamental-data) for more details
```
temperature_score.aggregation_method = PortfolioAggregationMethod.MOTS
aggregated_scores = temperature_score.aggregate_scores(amended_portfolio)
df_mots = pd.DataFrame(aggregated_scores.dict()).applymap(lambda x: round(x['all']['score'], 2))
scores_collection.update({'MOTS': df_mots})
df_mots
```
### EOTS
Enterprise Owned emissions weighted temperature score (EOTS): Temperature scores are allocated based
on an enterprise ownership approach.
In addition to the portfolios "investment value" the EOTS method requires company emissions and enterprise value, please refer to [Data Legends - Fundamental Data](https://ofbdabv.github.io/SBTi/Legends.html#fundamental-data) for more details
```
temperature_score.aggregation_method = PortfolioAggregationMethod.EOTS
aggregated_scores = temperature_score.aggregate_scores(amended_portfolio)
df_eots = pd.DataFrame(aggregated_scores.dict()).applymap(lambda x: round(x['all']['score'], 2))
scores_collection.update({'EOTS': df_eots})
df_eots
```
### ECOTS
Enterprise Value + Cash emissions weighted temperature score (ECOTS): Temperature scores are allocated based on an enterprise value (EV) plus cash & equivalents ownership approach.
In addition to the portfolios "investment value" the ECOTS method requires company emissions, company cash equivalents and enterprise value; please refer to [Data Legends - Fundamental Data](https://sciencebasedtargets.github.io/SBTi-finance-tool/Legends.html#fundamental-data) for more details
```
temperature_score.aggregation_method = PortfolioAggregationMethod.ECOTS
aggregated_scores = temperature_score.aggregate_scores(amended_portfolio)
df_ecots = pd.DataFrame(aggregated_scores.dict()).applymap(lambda x: round(x['all']['score'], 2))
scores_collection.update({'ECOTS': df_ecots})
df_ecots
```
### AOTS
Total Assets emissions weighted temperature score (AOTS): Temperature scores are allocated based on a total assets ownership approach.
In addition to the portfolios "investment value" the AOTS method requires company emissions and company total assets; please refer to [Data Legends - Fundamental Data](https://sciencebasedtargets.github.io/SBTi-finance-tool/Legends.html#fundamental-data) for more details
```
temperature_score.aggregation_method = PortfolioAggregationMethod.AOTS
aggregated_scores = temperature_score.aggregate_scores(amended_portfolio)
df_aots = pd.DataFrame(aggregated_scores.dict()).applymap(lambda x: round(x['all']['score'], 2))
scores_collection.update({'AOTS': df_aots})
df_aots
```
### ROTS
Revenue owned emissions weighted temperature score (ROTS): Temperature scores are allocated based on the share of revenue.
In addition to the portfolios "investment value" the ROTS method requires company emissions and company revenue; please refer to [Data Legends - Fundamental Data](https://sciencebasedtargets.github.io/SBTi-finance-tool/Legends.html#fundamental-data) for more details
```
temperature_score.aggregation_method = PortfolioAggregationMethod.ROTS
aggregated_scores = temperature_score.aggregate_scores(amended_portfolio)
df_rots = pd.DataFrame(aggregated_scores.dict()).applymap(lambda x: round(x['all']['score'], 2))
scores_collection.update({'ROTS': df_rots})
df_rots
```
See below how each aggregation method impact the scores on for each time frame and scope combination
```
pd.concat(scores_collection, axis=0)
```
| github_jupyter |
# Deep Q-Network (DQN)
It will be implemented a DQN agent with OpenAI Gym's LunarLander-v2 environment
## LunarLander-v2
https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
Rocket trajectory optimization is a classic topic in Optimal Control.
According to Pontryagin's maximum principle it's optimal to fire engine full throttle or turn it off.
That's the reason this environment is OK to have discreet actions (engine on or off).
To understand LunarLander
- Landing pad is always at coordinates (0,0).
- The coordinates are the first two numbers in the state vector.
- Reward for moving from the top of the screen to landing pad and zero speed is about 100..140 points.
- If lander moves away from landing pad it loses reward back.
- Episode finishes if the lander crashes or comes to rest, receiving additional -100 or +100 points.
- Each leg with ground contact is +10 points.
- Firing the main engine is -0.3 points each frame.
- Firing the side engine is -0.03 points each frame.
- Solved is 200 points.
- Landing outside the landing pad is possible.
- Fuel is infinite, so an agent can learn to fly and then land on its first attempt.
Four discrete actions available:
1. Do nothing.
2. Fire left orientation engine.
3. Fire main engine.
4. Fire right orientation engine.
Please see the source code for details.
https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
- To see a heuristic landing, run: python gym/envs/box2d/lunar_lander.py
- To play yourself, run: python examples/agents/keyboard_agent.py LunarLander-v2
References:
https://github.com/RMiftakhov/LunarLander-v2-drlnd
https://www.katnoria.com/nb_dqn_lunar/
https://drawar.github.io/blog/2019/05/12/lunar-lander-dqn.html
https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
Learn Reinforcement Learning (3) - DQN improvement and Deep SARSA
https://greentec.github.io/reinforcement-learning-third-en/
Introduction to Double Deep Q Learning (DDQN)
https://mc.ai/introduction-to-double-deep-q-learning-ddqn/
CONTINUOUS CONTROL WITH DEEP REINFORCEMENTLEARNING
https://arxiv.org/pdf/1509.02971.pdf
Other references:
https://cugtyt.github.io/blog/rl-notes/201807201658.html
https://www.freecodecamp.org/news/improvements-in-deep-q-learning-dueling-double-dqn-prioritized-experience-replay-and-fixed-58b130cc5682/
https://www.freecodecamp.org/news/an-introduction-to-deep-q-learning-lets-play-doom-54d02d8017d8/
https://adgefficiency.com/dqn-tuning/
https://stackoverflow.com/questions/57106676/weird-results-when-playing-with-dqn-with-targets
### 1. Import the nacessary packages
```
%matplotlib inline
%config InlineBackend.figure_formmat = 'retina'
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
```
### 2. instantiate the environmrnt and agent
```
env = gym.make('LunarLander-v2')
env.seed(0)
print('State shape: ', env.observation_space.shape)
print('Nunber of actions: ', env.action_space.n)
from dqn_agent import Agent
agent = Agent(state_size=8, action_size=4, seed=0)
# Whatch an untrained agent
state = env.reset()
for j in range(6000):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
```
### 3. Train the Agent with DQN
```
def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
'''
Deep Q-Learning
Params:
=======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting epsilon value (epsilon-greedy action selection)
eps_end (float): minimum epsilon value
eps_decay (float): multiplicative factor for decreasing epsilon
'''
scores = []
scores_window = deque(maxlen=100)
eps = eps_start
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score)
scores.append(score)
eps = max(eps_end, eps_decay*eps)
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window) >= 200.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))0)
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.