text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
<a href="https://colab.research.google.com/github/kyle-gao/GRSS_TrackMSD2021/blob/main/MakeTilesDeepGlobe.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
"""
Copyright 2021 Yi Lin(Kyle) Gao
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
```
#DeepGlobe Dataset
https://www.kaggle.com/balraj98/deepglobe-land-cover-classification-dataset#__sid=js0
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import shutil
from PIL import Image
import os
from google.colab import drive
import PIL
drive.mount('/content/drive')
shutil.unpack_archive("/content/drive/MyDrive/DeepGlobeLandCover.zip.zip",'/content/DeepGlobe')
test_x = tf.keras.preprocessing.image.load_img(
"/content/DeepGlobe/train/100694_sat.jpg", grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest')
test_x
data_dir = "/content/DeepGlobe/train"
list_ds = tf.data.Dataset.list_files(str(data_dir+"/*.png"),shuffle=False) #DO NOT SHUFFLE
#dataset is made up of strings
def to_categorical(tensor,class_dict):
"""
converts last dimension to categorical according to keys
"""
for k,v in class_dict.items():
tensor[tensor==k]=v
return tensor
```
##Label
Each satellite image is paired with a mask image for land cover annotation. The mask is a RGB image with 7 classes of labels, using color-coding (R, G, B) as follows.
Urban land: 0,255,255 - Man-made, built up areas with human artifacts (can ignore roads for now which is hard to label)
Agriculture land: 255,255,0 - Farms, any planned (i.e. regular) plantation, cropland, orchards, vineyards, nurseries, and ornamental horticultural areas; confined feeding operations.
Rangeland: 255,0,255 - Any non-forest, non-farm, green land, grass
Forest land: 0,255,0 - Any land with x% tree crown density plus clearcuts.
Water: 0,0,255 - Rivers, oceans, lakes, wetland, ponds.
Barren land: 255,255,255 - Mountain, land, rock, dessert, beach, no vegetation
Unknown: 0,0,0 - Clouds and others
File names for satellite images and the corresponding mask image are id _sat.jpg and id _mask.png. id is a randomized integer.
Please note:
The values of the mask image may not be pure 0 and 255. When converting to labels, please binarize them at threshold 128.|
```
size = (512,512)
filenames = list(list_ds)
padding = 'VALID'
def deepglobe_write_tiles(filenames,size=(512,512),padding='VALID',save_dir = "/content/DeepGlobe224/"):
"""
Args-
filenames: tensorflow list_files dataset object
size: tuple of ints
padding=one of "VALID" "SAME"
save_dir-save directory
"""
(h,w) = size
for f in filenames:
fn=tf.strings.split(f,"_")
image_fn = (fn[0]+"_sat.jpg").numpy()
label_fn = (fn[0]+"_mask.png").numpy()
image = tf.keras.preprocessing.image.load_img(image_fn)
image = tf.keras.preprocessing.image.img_to_array(image)
label = tf.keras.preprocessing.image.load_img(label_fn)
label = tf.keras.preprocessing.image.img_to_array(label)
#(H,W,3)
""" - do this step in preprocessing instead since the encoding rescales everything to 255
#binarization
label [label >= 128] = 255
label[label < 128] = 0
labelnew = label[:,:,0]+0.1*label[:,:,1]+0.01*label[:,:,2] #contracts the last dimension without losing class information
class_dict = {(25.5+2.55):0, (255+25.5):1, (255+2.55):2,(25.5):3,2.55:4,(255+25.5+2.55):5,0:6}
labelnew = to_categorical(labelnew,class_dict) #(H,W) """
image = tf.expand_dims(image,axis=0)
image_tiles = tf.image.extract_patches(images=image,
sizes=[1,h, w, 1],
strides=[1,h, w, 1],
rates=[1, 1, 1, 1],
padding=padding)
image_tiles = tf.reshape(image_tiles, [-1,h,w,3])
#label = tf.expand_dims(labelnew,axis=-1)
label = tf.expand_dims(label,axis=0)
label_tiles = tf.image.extract_patches(images=label,
sizes=[1,h, w, 1],
strides=[1,h, w, 1],
rates=[1, 1, 1, 1],
padding=padding)
label_tiles = tf.reshape(label_tiles, [-1,h,w,3])
if not(os.path.isdir(save_dir)):
os.mkdir(save_dir)
for i in range(label_tiles.shape[0]):
imgtile_fn = (save_dir+fn[0].numpy().decode("utf-8").split("/")[-1]+"_"+str(i)+"_sat.jpg")
labeltile_fn = (save_dir+fn[0].numpy().decode("utf-8").split("/")[-1]+"_"+str(i)+"_mask.png")
tf.keras.preprocessing.image.save_img(imgtile_fn,image_tiles[i,:,:,:])
tf.keras.preprocessing.image.save_img(labeltile_fn,label_tiles[i,:,:,:])
print(image_fn)
deepglobe_write_tiles(filenames)
shutil.make_archive("/content/DeepGlobe_512",'zip',"/content/DeepGlobe224")
shutil.copy2("/content/DeepGlobe_512.zip","/content/drive/MyDrive")
```
| github_jupyter |
# Autoencoder
---
# Tutorial Objectives
## Architecture

```
# @title Video 1: Extensions
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="pgkrU9UqXiU", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
---
# Setup
Please execute the cell(s) below to initialize the notebook environment.
```
# Imports
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
# @title Figure settings
#!pip install plotly --quiet
import plotly.graph_objects as go
from plotly.colors import qualitative
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper functions
def downloadMNIST():
"""
Download MNIST dataset and transform it to torch.Tensor
Args:
None
Returns:
x_train : training images (torch.Tensor) (60000, 28, 28)
x_test : test images (torch.Tensor) (10000, 28, 28)
y_train : training labels (torch.Tensor) (60000, )
y_train : test labels (torch.Tensor) (10000, )
"""
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# Trunk the data
n_train = 60000
n_test = 10000
train_idx = np.arange(0, n_train)
test_idx = np.arange(n_train, n_train + n_test)
x_train, y_train = X[train_idx], y[train_idx]
x_test, y_test = X[test_idx], y[test_idx]
# Transform np.ndarrays to torch.Tensor
x_train = torch.from_numpy(np.reshape(x_train,
(len(x_train),
28, 28)).astype(np.float32))
x_test = torch.from_numpy(np.reshape(x_test,
(len(x_test),
28, 28)).astype(np.float32))
y_train = torch.from_numpy(y_train.astype(int))
y_test = torch.from_numpy(y_test.astype(int))
return (x_train, y_train, x_test, y_test)
def init_weights_kaiming_uniform(layer):
"""
Initializes weights from linear PyTorch layer
with kaiming uniform distribution.
Args:
layer (torch.Module)
Pytorch layer
Returns:
Nothing.
"""
# check for linear PyTorch layer
if isinstance(layer, nn.Linear):
# initialize weights with kaiming uniform distribution
nn.init.kaiming_uniform_(layer.weight.data)
def init_weights_kaiming_normal(layer):
"""
Initializes weights from linear PyTorch layer
with kaiming normal distribution.
Args:
layer (torch.Module)
Pytorch layer
Returns:nossa
Nothing.
"""
# check for linear PyTorch layer
if isinstance(layer, nn.Linear):
# initialize weights with kaiming normal distribution
nn.init.kaiming_normal_(layer.weight.data)
def get_layer_weights(layer):
"""
Retrieves learnable parameters from PyTorch layer.
Args:
layer (torch.Module)
Pytorch layer
Returns:
list with learnable parameters
"""
# initialize ounossatput list
weights = []
# check whether layer has learnable parameters
if layer.parameters():
# copy numpy array representation of each set of learnable parameters
for item in layer.parameters():
weights.append(item.detach().numpy())
return weights
def print_parameter_count(net):
"""
Prints count of learnable parameters per layer from PyTorch network.
Args:
net (torch.Sequential)
Pytorch network
Returns:
Nothing.
"""
params_n = 0
# loop all layers in network
for layer_idx, layer in enumerate(net):
# retrieve learnable parameters
weights = get_layer_weights(layer)
params_layer_n = 0
# loop list of learnable parameters and count them
for params in weights:
params_layer_n += params.size
params_n += params_layer_n
print(f'{layer_idx}\t {params_layer_n}\t {layer}')
print(f'\nTotal:\t {params_n}')
def eval_mse(y_pred, y_true):
"""
Evaluates mean square error (MSE) between y_pred and y_true
Args:
y_pred (torch.Tensor)
prediction samples
v (numpy array of floats)
ground truth samples
Returns:
MSE(y_pred, y_true)
"""
with torch.no_grad():
criterion = nn.MSELoss()
loss = criterion(y_pred, y_true)
return float(loss)
def eval_bce(y_pred, y_true):
"""
Evaluates binary cross-entropy (BCE) between y_pred and y_true
Args:
y_pred (torch.Tensor)
prediction samples
v (numpy array of floats)
ground truth samples
Returns:
BCE(y_pred, y_true)
"""
with torch.no_grad():
criterion = nn.BCELoss()
loss = criterion(y_pred, y_true)
return float(loss)
def plot_row(images, show_n=10, image_shape=None):
"""
Plots rows of images from list of iterables (iterables: list, numpy array
or torch.Tensor). Also accepts single iterable.
Randomly selects images in each list element if item count > show_n.
Args:
images (iterable or list of iterables)
single iterable with images, or list of iterables
show_n (integer)
maximum number of images per row
image_shape (tuple or list)
original shape of image if vectorized form
Returns:
Nothing.
"""
if not isinstance(images, (list, tuple)):
images = [images]
for items_idx, items in enumerate(images):
items = np.array(items)
if items.ndim == 1:
items = np.expand_dims(items, axis=0)
if len(items) > show_n:
selected = np.random.choice(len(items), show_n, replace=False)
items = items[selected]
if image_shape is not None:
items = items.reshape([-1]+list(image_shape))
plt.figure(figsize=(len(items) * 1.5, 2))
for image_idx, image in enumerate(items):
plt.subplot(1, len(items), image_idx + 1)
plt.imshow(image, cmap='gray', vmin=image.min(), vmax=image.max())
plt.axis('off')
plt.tight_layout()
def to_s2(u):
"""
Projects 3D coordinates to spherical coordinates (theta, phi) surface of
unit sphere S2.
theta: [0, pi]
phi: [-pi, pi]
Args:
u (list, numpy array or torch.Tensor of floats)
3D coordinates
Returns:
Sperical coordinates (theta, phi) on surface of unit sphere S2.
"""
x, y, z = (u[:, 0], u[:, 1], u[:, 2])
r = np.sqrt(x**2 + y**2 + z**2)
theta = np.arccos(z / r)
phi = np.arctan2(x, y)
return np.array([theta, phi]).T
def to_u3(s):
"""
Converts from 2D coordinates on surface of unit sphere S2 to 3D coordinates
(on surface of S2), i.e. (theta, phi) ---> (1, theta, phi).
Args:
s (list, numpy array or torch.Tensor of floats)
2D coordinates on unit sphere S_2
Returns:
3D coordinates on surface of unit sphere S_2
"""
theta, phi = (s[:, 0], s[:, 1])
x = np.sin(theta) * np.sin(phi)
y = np.sin(theta) * np.cos(phi)
z = np.cos(theta)
return np.array([x, y, z]).T
def xy_lim(x):
"""
Return arguments for plt.xlim and plt.ylim calculated from minimum
and maximum of x.
Args:
x (list, numpy array or torch.Tensor of floats)
data to be plotted
Returns:
Nothing.
"""
x_min = np.min(x, axis=0)
x_max = np.max(x, axis=0)
x_min = x_min - np.abs(x_max - x_min) * 0.05 - np.finfo(float).eps
x_max = x_max + np.abs(x_max - x_min) * 0.05 + np.finfo(float).eps
return [x_min[0], x_max[0]], [x_min[1], x_max[1]]
def plot_generative(x, decoder_fn, image_shape, n_row=16, s2=False):
"""
Plots images reconstructed by decoder_fn from a 2D grid in
latent space that is determined by minimum and maximum values in x.
Args:
x (list, numpy array or torch.Tensor of floats)
2D or 3D coordinates in latent space
decoder_fn (integer)
function returning vectorized images from 2D latent space coordinates
image_shape (tuple or list)
original shape of image
n_row (integer)
number of rows in grid
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
Returns:
Nothing.
"""
if s2:
x = to_s2(np.array(x))
xlim, ylim = xy_lim(np.array(x))
dx = (xlim[1] - xlim[0]) / n_row
grid = [np.linspace(ylim[0] + dx / 2, ylim[1] - dx / 2, n_row),
np.linspace(xlim[0] + dx / 2, xlim[1] - dx / 2, n_row)]
canvas = np.zeros((image_shape[0] * n_row, image_shape[1] * n_row))
cmap = plt.get_cmap('gray')
for j, latent_y in enumerate(grid[0][::-1]):
for i, latent_x in enumerate(grid[1]):
latent = np.array([[latent_x, latent_y]], dtype=np.float32)
if s2:
latent = to_u3(latent)
with torch.no_grad():
x_decoded = decoder_fn(torch.from_numpy(latent))
x_decoded = x_decoded.reshape(image_shape)
canvas[j * image_shape[0]: (j + 1) * image_shape[0],
i * image_shape[1]: (i + 1) * image_shape[1]] = x_decoded
plt.imshow(canvas, cmap=cmap, vmin=canvas.min(), vmax=canvas.max())
plt.axis('off')
def plot_latent(x, y, show_n=500, s2=False, fontdict=None, xy_labels=None):
"""
Plots digit class of each sample in 2D latent space coordinates.
Args:
x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
n_row (integer)
number of samples
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
fontdict (dictionary)
style option for plt.text
xy_labels (list)
optional list with [xlabel, ylabel]
Returns:
Nothing.
"""
if fontdict is None:
fontdict = {'weight': 'bold', 'size': 12}
if s2:
x = to_s2(np.array(x))
cmap = plt.get_cmap('tab10')
if len(x) > show_n:
selected = np.random.choice(len(x), show_n, replace=False)
x = x[selected]
y = y[selected]
for my_x, my_y in zip(x, y):
plt.text(my_x[0], my_x[1], str(int(my_y)),
color=cmap(int(my_y) / 10.),
fontdict=fontdict,
horizontalalignment='center',
verticalalignment='center',
alpha=0.8)
xlim, ylim = xy_lim(np.array(x))
plt.xlim(xlim)
plt.ylim(ylim)
if s2:
if xy_labels is None:
xy_labels = [r'$\varphi$', r'$\theta$']
plt.xticks(np.arange(0, np.pi + np.pi / 6, np.pi / 6),
['0', '$\pi/6$', '$\pi/3$', '$\pi/2$',
'$2\pi/3$', '$5\pi/6$', '$\pi$'])
plt.yticks(np.arange(-np.pi, np.pi + np.pi / 3, np.pi / 3),
['$-\pi$', '$-2\pi/3$', '$-\pi/3$', '0',
'$\pi/3$', '$2\pi/3$', '$\pi$'])
if xy_labels is None:
xy_labels = ['$Z_1$', '$Z_2$']
plt.xlabel(xy_labels[0])
plt.ylabel(xy_labels[1])
def plot_latent_generative(x, y, decoder_fn, image_shape, s2=False,
title=None, xy_labels=None):
"""
Two horizontal subplots generated with encoder map and decoder grid.
Args:
x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
decoder_fn (integer)
function returning vectorized images from 2D latent space coordinates
image_shape (tuple or list)
original shape of image
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
title (string)
plot title
xy_labels (list)
optional list with [xlabel, ylabel]
Returns:
Nothing.
"""
fig = plt.figure(figsize=(12, 6))
if title is not None:
fig.suptitle(title, y=1.05)
ax = fig.add_subplot(121)
ax.set_title('Encoder map', y=1.05)
plot_latent(x, y, s2=s2, xy_labels=xy_labels)
ax = fig.add_subplot(122)
ax.set_title('Decoder grid', y=1.05)
plot_generative(x, decoder_fn, image_shape, s2=s2)
plt.tight_layout()
plt.show()
def plot_latent_3d(my_x, my_y, show_text=True, show_n=500):
"""
Plot digit class or marker in 3D latent space coordinates.
Args:
my_x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
my_y (list, numpy array or torch.Tensor of floats)
digit class of each sample
show_text (boolean)
whether to show text
image_shape (tuple or list)
original shape of image
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
title (string)
plot title
Returns:
Nothing.
"""
layout = {'margin': {'l': 0, 'r': 0, 'b': 0, 't': 0},
'scene': {'xaxis': {'showspikes': False,
'title': 'z1'},
'yaxis': {'showspikes': False,
'title': 'z2'},
'zaxis': {'showspikes': False,
'title': 'z3'}}
}
selected_idx = np.random.choice(len(my_x), show_n, replace=False)
colors = [qualitative.T10[idx] for idx in my_y[selected_idx]]
x = my_x[selected_idx, 0]
y = my_x[selected_idx, 1]
z = my_x[selected_idx, 2]
text = my_y[selected_idx]
if show_text:
trace = go.Scatter3d(x=x, y=y, z=z, text=text,
mode='text',
textfont={'color': colors, 'size': 12}
)
layout['hovermode'] = False
else:
trace = go.Scatter3d(x=x, y=y, z=z, text=text,
hoverinfo='text', mode='markers',
marker={'size': 5, 'color': colors, 'opacity': 0.8}
)
fig = go.Figure(data=trace, layout=layout)
fig.show()
def runSGD(net, input_train, input_test, criterion='bce',
n_epochs=10, batch_size=32, verbose=False):
"""
Trains autoencoder network with stochastic gradient descent with Adam
optimizer and loss criterion. Train samples are shuffled, and loss is
displayed at the end of each opoch for both MSE and BCE. Plots training loss
at each minibatch (maximum of 500 randomly selected values).
Args:
net (torch network)
ANN object (nn.Module)
input_train (torch.Tensor)
vectorized input images from train set
input_test (torch.Tensor)
vectorized input images from test set
criterion (string)
train loss: 'bce' or 'mse'
n_epochs (boolean)
number of full iterations of training data
batch_size (integer)
number of element in mini-batches
verbose (boolean)
print final loss
Returns:
Nothing.
"""
# Initialize loss function
if criterion == 'mse':
loss_fn = nn.MSELoss()
elif criterion == 'bce':
loss_fn = nn.BCELoss()
else:
print('Please specify either "mse" or "bce" for loss criterion')
# Initialize SGD optimizer
optimizer = optim.Adam(net.parameters())
# Placeholder for loss
track_loss = []
print('Epoch', '\t', 'Loss train', '\t', 'Loss test')
for i in range(n_epochs):
shuffle_idx = np.random.permutation(len(input_train))
batches = torch.split(input_train[shuffle_idx], batch_size)
for batch in batches:
output_train = net(batch)
loss = loss_fn(output_train, batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Keep track of loss at each epoch
track_loss += [float(loss)]
loss_epoch = f'{i+1}/{n_epochs}'
with torch.no_grad():
output_train = net(input_train)
loss_train = loss_fn(output_train, input_train)
loss_epoch += f'\t {loss_train:.4f}'
output_test = net(input_test)
loss_test = loss_fn(output_test, input_test)
loss_epoch += f'\t\t {loss_test:.4f}'
print(loss_epoch)
if verbose:
# Print loss
loss_mse = f'\nMSE\t {eval_mse(output_train, input_train):0.4f}'
loss_mse += f'\t\t {eval_mse(output_test, input_test):0.4f}'
print(loss_mse)
loss_bce = f'BCE\t {eval_bce(output_train, input_train):0.4f}'
loss_bce += f'\t\t {eval_bce(output_test, input_test):0.4f}'
print(loss_bce)
# Plot loss
step = int(np.ceil(len(track_loss) / 500))
x_range = np.arange(0, len(track_loss), step)
plt.figure()
plt.plot(x_range, track_loss[::step], 'C0')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.xlim([0, None])
plt.ylim([0, None])
plt.show()
class NormalizeLayer(nn.Module):
"""
pyTorch layer (nn.Module) that normalizes activations by their L2 norm.
Args:
None.
Returns:
Object inherited from nn.Module class.
"""
def __init__(self):
super().__init__()
def forward(self, x):
return nn.functional.normalize(x, p=2, dim=1)
```
---
# Section 1: Download and prepare MNIST dataset
We use the helper function `downloadMNIST` to download the dataset and transform it into `torch.Tensor` and assign train and test sets to (`x_train`, `y_train`) and (`x_test`, `y_test`).
The variable `input_size` stores the length of *vectorized* versions of the images `input_train` and `input_test` for training and test images.
**Instructions:**
* Please execute the cell below
```
# Download MNIST
x_train, y_train, x_test, y_test = downloadMNIST()
x_train = x_train / 255
x_test = x_test / 255
image_shape = x_train.shape[1:]
input_size = np.prod(image_shape)
input_train = x_train.reshape([-1, input_size])
input_test = x_test.reshape([-1, input_size])
test_selected_idx = np.random.choice(len(x_test), 10, replace=False)
train_selected_idx = np.random.choice(len(x_train), 10, replace=False)
print(f'shape image \t \t {image_shape}')
print(f'shape input_train \t {input_train.shape}')
print(f'shape input_test \t {input_test.shape}')
import sys
sys.path.append("../src/data/")
from file import read_file
import more_itertools as mit
PATH_AUD = '../data/raw/aud'
PATH_VIS = '../data/raw/vis'
PATH_INFO = '../data/raw/info_'
data_aud, data_vis, CHANNEL_NAMES = read_file(PATH_AUD, PATH_VIS)
aud = np.concatenate(data_aud.transpose([1,3,0,2]),0)
np.concatenate(data_aud.transpose([1,3,0,2]),0).shape
aud_32 = np.array(list(mit.windowed(aud, n=32)))
#.astype(np.float64)
por_canal = [torch.from_numpy(aud_.astype(np.float32)) for aud_ in aud_32.T]
240*539*20, 64 -> (240*539*20)-31, 32, 64
[(240*539*20)-31, 32], 64
(240*539*20)-31
```
---
# Section 2: Deeper autoencoder (2D)
The internal representation of shallow autoencoder with 2D latent space is similar to PCA, which shows that the autoencoder is not fully leveraging non-linear capabilities to model data. Adding capacity in terms of learnable parameters takes advantage of non-linear operations in encoding/decoding to capture non-linear patterns in data.
Adding hidden layers enables us to introduce additional parameters, either layerwise or depthwise. The same amount $N$ of additional parameters can be added in a single layer or distributed among several layers. Adding several hidden layers reduces the compression/decompression ratio of each layer.
## Exercise 1: Build deeper autoencoder (2D)
Implement this deeper version of the ANN autoencoder by adding four hidden layers. The number of units per layer in the encoder is the following:
```
784 -> 392 -> 64 -> 2
```
The shallow autoencoder has a compression ratio of **784:2 = 392:1**. The first additional hidden layer has a compression ratio of **2:1**, followed by a hidden layer that sets the bottleneck compression ratio of **32:1**.
The choice of hidden layer size aims to reduce the compression rate in the bottleneck layer while increasing the count of trainable parameters. For example, if the compression rate of the first hidden layer doubles from **2:1** to **4:1**, the count of trainable parameters halves from 667K to 333K.
This deep autoencoder's performance may be further improved by adding additional hidden layers and by increasing the count of trainable parameters in each layer. These improvements have a diminishing return due to challenges associated with training under high parameter count and depth. One option explored in the *Bonus* section is to add a first hidden layer with 2x - 3x the input size. This size increase results in millions of parameters at the cost of longer training time.
Weight initialization is particularly important in deep networks. The availability of large datasets and weight initialization likely drove the deep learning revolution of 2010. We'll implement Kaiming normal as follows:
```
model[:-2].apply(init_weights_kaiming_normal)
```
**Instructions:**
* Add four additional layers and activation functions to the network
* Adjust the definitions of `encoder` and `decoder`
* Check learnable parameter count for this autoencoder by executing the last cell
```
encoding_size = 2#[2,4,8,16]
input_size = 32
model_32 = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)), #32
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(int(input_size / 2), int(input_size / 4)),#16
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(int(input_size / 4), int(input_size / 8)),#8
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(int(input_size / 8), encoding_size), #4
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(encoding_size, int(input_size / 8)), #2
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(int(input_size / 8), int(input_size / 4)),#8
# Add activation function
nn.PReLU(),
# Add another layer
nn.Linear(int(input_size / 4), int(input_size / 2)),#16
# Add activation function
nn.PReLU(),
# Add another layeré´
nn.Linear(int(input_size / 2), input_size),#32
# Add another layer
nn.Sigmoid()
)
model_32[:-2].apply(init_weights_kaiming_normal)
print(f'Autoencoder \n\n {model_32}\n')
```
**Helper function:** `print_parameter_count`
Please uncomment the line below to inspect this function.
```
print_parameter_count(model_32)
```
## Train the autoencoder
Train the network for `n_epochs=10` epochs with `batch_size=128`, and observe how the internal representation successfully captures additional digit classes.
The encoder map shows well-separated clusters that correspond to the associated digits in the decoder grid. The decoder grid also shows that the network is robust to digit skewness, i.e., digits leaning to the left or the right are recognized in the same digit class.
**Instructions:**
* Please execute the cells below
```
n_epochs = 10
batch_size = 128
runSGD(model_32, por_canal[0], por_canal[0], n_epochs=n_epochs,
batch_size=batch_size)
with torch.no_grad():
output_test = model(input_test)
latent_test = encoder(input_test)
plot_row([input_test[test_selected_idx], output_test[test_selected_idx]],
image_shape=image_shape)
plot_latent_generative(latent_test, y_test, decoder, image_shape=image_shape)
```
---
# Section 3: Spherical latent space
The previous architecture generates representations that typically spread in different directions from coordinate $(z_1, z_2)=(0,0)$. This effect is due to the initialization of weights distributed randomly around `0`.
Adding a third unit to the bottleneck layer defines a coordinate $(z_1, z_2, z_3)$ in 3D space. The latent space from such a network will still spread out from $(z_1, z_2, z_3)=(0, 0, 0)$.
Collapsing the latent space on the surface of a sphere removes the possibility of spreading indefinitely from the origin $(0, 0, 0)$ in any direction since this will eventually lead back to the origin. This constraint generates a representation that fills the surface of the sphere.

Projecting to the surface of the sphere is implemented by dividing the coordinates $(z_1, z_2, z_3)$ by their $L_2$ norm.
$(z_1, z_2, z_3)\longmapsto (s_1, s_2, s_3)=(z_1, z_2, z_3)/\|(z_1, z_2, z_3)\|_2=(z_1, z_2, z_3)/ \sqrt{z_1^2+z_2^2+z_3^2}$
This mapping projects to the surface of the [$S_2$ sphere](https://en.wikipedia.org/wiki/N-sphere) with unit radius. (Why?)
## Section 3.1: Build and train autoencoder (3D)
We start by adding one unit to the bottleneck layer and visualize the latent space in 3D.
Please execute the cell below.
```
encoding_size = 3
model = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, encoding_size),
nn.PReLU(),
nn.Linear(encoding_size, encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), input_size),
nn.Sigmoid()
)
model[:-2].apply(init_weights_kaiming_normal)
encoder = model[:6]
decoder = model[6:]
print(f'Autoencoder \n\n {model}')
```
## Section 3.2: Train the autoencoder
Train the network for `n_epochs=10` epochs with `batch_size=128`. Observe how the internal representation spreads from the origin and reaches much lower loss due to the additional degree of freedom in the bottleneck layer.
**Instructions:**
* Please execute the cell below
```
n_epochs = 10
batch_size = 128
runSGD(model, input_train, input_test, n_epochs=n_epochs,
batch_size=batch_size)
```
## Section 3.3: Visualize the latent space in 3D
**Helper function**: `plot_latent_3d`
Please uncomment the line below to inspect this function.
```
# help(plot_latent_3d)
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_3d(latent_test, y_test)
```
### Exercise 2: Build deep autoencoder (2D) with latent spherical space
We now constrain the latent space to the surface of a sphere $S_2$.
**Instructions:**
* Add the custom layer `NormalizeLayer` after the bottleneck layer
* Adjust the definitions of `encoder` and `decoder`
* Experiment with keyword `show_text=False` for `plot_latent_3d`
**Helper function**: `NormalizeLayer`
Please uncomment the line below to inspect this function.
```
# help(NormalizeLayer)
encoding_size = 3
model = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, encoding_size),
nn.PReLU(),
#################################################
## TODO for students: add custom normalize layer
#################################################
# add the normalization layer
# ...,
nn.Linear(encoding_size, encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), input_size),
nn.Sigmoid()
)
model[:-2].apply(init_weights_kaiming_normal)
print(f'Autoencoder \n\n {model}\n')
# Adjust the value n_l to split your model correctly
# n_l = ...
# uncomment when you fill the code
# encoder = model[:n_l]
# decoder = model[n_l:]
# print(f'Encoder \n\n {encoder}\n')
# print(f'Decoder \n\n {decoder}')
# to_remove solution
encoding_size = 3
model = nn.Sequential(
nn.Linear(input_size, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, encoding_size),
nn.PReLU(),
# add the normalization layer
NormalizeLayer(),
nn.Linear(encoding_size, encoding_size * 32),
nn.PReLU(),
nn.Linear(encoding_size * 32, int(input_size / 2)),
nn.PReLU(),
nn.Linear(int(input_size / 2), input_size),
nn.Sigmoid()
)
model[:-2].apply(init_weights_kaiming_normal)
print(f'Autoencoder \n\n {model}\n')
# Adjust the value n_l to split your model correctly
n_l = 7
# uncomment when you fill the code
encoder = model[:n_l]
decoder = model[n_l:]
print(f'Encoder \n\n {encoder}\n')
print(f'Decoder \n\n {decoder}')
```
## Section 3.4: Train the autoencoder
Train the network for `n_epochs=10` epochs with `batch_size=128` and observe how loss raises again and is comparable to the model with 2D latent space.
**Instructions:**
* Please execute the cell below
```
n_epochs = 10
batch_size = 128
runSGD(model, input_train, input_test, n_epochs=n_epochs,
batch_size=batch_size)
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_3d(latent_test, y_test)
```
## Section 3.5: Visualize latent space on surface of $S_2$
The 3D coordinates $(s_1, s_2, s_3)$ on the surface of the unit sphere $S_2$ can be mapped to [spherical coordinates](https://en.wikipedia.org/wiki/Spherical_coordinate_system) $(r, \theta, \phi)$, as follows:
$$
\begin{aligned}
r &= \sqrt{s_1^2 + s_2^2 + s_3^2} \\
\phi &= \arctan \frac{s_2}{s_1} \\
\theta &= \arccos\frac{s_3}{r}
\end{aligned}
$$

What is the domain (numerical range) spanned by ($\theta, \phi)$?
We return to a 2D representation since the angles $(\theta, \phi)$ are the only degrees of freedom on the surface of the sphere. Add the keyword `s2=True` to `plot_latent_generative` to un-wrap the sphere's surface similar to a world map.
Task: Check the numerical range of the plot axis to help identify $\theta$ and $\phi$, and visualize the unfolding of the 3D plot from the previous exercise.
**Instructions:**
* Please execute the cells below
```
with torch.no_grad():
output_test = model(input_test)
plot_row([input_test[test_selected_idx], output_test[test_selected_idx]],
image_shape=image_shape)
plot_latent_generative(latent_test, y_test, decoder,
image_shape=image_shape, s2=True)
```
---
# Summary
We learned two techniques to improve representation capacity: adding a few hidden layers and projecting latent space on the sphere $S_2$.
The expressive power of autoencoder improves with additional hidden layers. Projecting latent space on the surface of $S_2$ spreads out digits classes in a more visually pleasing way but may not always produce a lower loss.
**Deep autoencoder architectures have rich internal representations to deal with sophisticated tasks such as the MNIST cognitive task.**
We now have powerful tools to explore how simple algorithms build robust models of the world by capturing relevant data patterns.
```
# @title Video 2: Wrap-up
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="GnkmzCqEK3E", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
---
# Bonus
## Deep and thick autoencoder
In this exercise, we first expand the first hidden layer to double the input size, followed by compression to half the input size leading to 3.8M parameters. Please **do not train this network during tutorial** due to long training time.
**Instructions:**
* Please uncomment and execute the cells below
```
# encoding_size = 3
# model = nn.Sequential(
# nn.Linear(input_size, int(input_size * 2)),
# nn.PReLU(),
# nn.Linear(int(input_size * 2), int(input_size / 2)),
# nn.PReLU(),
# nn.Linear(int(input_size / 2), encoding_size * 32),
# nn.PReLU(),
# nn.Linear(encoding_size * 32, encoding_size),
# nn.PReLU(),
# NormalizeLayer(),
# nn.Linear(encoding_size, encoding_size * 32),
# nn.PReLU(),
# nn.Linear(encoding_size * 32, int(input_size / 2)),
# nn.PReLU(),
# nn.Linear(int(input_size / 2), int(input_size * 2)),
# nn.PReLU(),
# nn.Linear(int(input_size * 2), input_size),
# nn.Sigmoid()
# )
# model[:-2].apply(init_weights_kaiming_normal)
# encoder = model[:9]
# decoder = model[9:]
# print_parameter_count(model)
# n_epochs = 5
# batch_size = 128
# runSGD(model, input_train, input_test, n_epochs=n_epochs,
# batch_size=batch_size)
# Visualization
# with torch.no_grad():
# output_test = model(input_test)
# plot_row([input_test[test_selected_idx], output_test[test_selected_idx]],
# image_shape=image_shape)
# plot_latent_generative(latent_test, y_test, decoder,
# image_shape=image_shape, s2=True)
```
| github_jupyter |
```
# -*- coding: utf-8 -*-
"""
EVCのためのEV-GMMを構築します. そして, 適応学習する.
詳細 : https://pdfs.semanticscholar.org/cbfe/71798ded05fb8bf8674580aabf534c4dbb8bc.pdf
This program make EV-GMM for EVC. Then, it make adaptation learning.
Check detail : https://pdfs.semanticscholar.org/cbfe/71798ded05fb8bf8674580abf534c4dbb8bc.pdf
"""
from __future__ import division, print_function
import os
from shutil import rmtree
import argparse
import glob
import pickle
import time
import numpy as np
from numpy.linalg import norm
from sklearn.decomposition import PCA
from sklearn.mixture import GMM # sklearn 0.20.0から使えない
from sklearn.preprocessing import StandardScaler
import scipy.signal
import scipy.sparse
%matplotlib inline
import matplotlib.pyplot as plt
import IPython
from IPython.display import Audio
import soundfile as sf
import wave
import pyworld as pw
import librosa.display
from dtw import dtw
import warnings
warnings.filterwarnings('ignore')
"""
Parameters
__Mixtured : GMM混合数
__versions : 実験セット
__convert_source : 変換元話者のパス
__convert_target : 変換先話者のパス
"""
# parameters
__Mixtured = 40
__versions = 'pre-stored0.1.3'
__convert_source = 'input/EJM10/V01/T01/TIMIT/000/*.wav'
__convert_target = 'adaptation/EJM05/V01/T01/ATR503/A/*.wav'
# settings
__same_path = './utterance/' + __versions + '/'
__output_path = __same_path + 'output/EJM05/' # EJF01, EJF07, EJM04, EJM05
Mixtured = __Mixtured
pre_stored_pickle = __same_path + __versions + '.pickle'
pre_stored_source_list = __same_path + 'pre-source/**/V01/T01/**/*.wav'
pre_stored_list = __same_path + "pre/**/V01/T01/**/*.wav"
#pre_stored_target_list = "" (not yet)
pre_stored_gmm_init_pickle = __same_path + __versions + '_init-gmm.pickle'
pre_stored_sv_npy = __same_path + __versions + '_sv.npy'
save_for_evgmm_covarXX = __output_path + __versions + '_covarXX.npy'
save_for_evgmm_covarYX = __output_path + __versions + '_covarYX.npy'
save_for_evgmm_fitted_source = __output_path + __versions + '_fitted_source.npy'
save_for_evgmm_fitted_target = __output_path + __versions + '_fitted_target.npy'
save_for_evgmm_weights = __output_path + __versions + '_weights.npy'
save_for_evgmm_source_means = __output_path + __versions + '_source_means.npy'
for_convert_source = __same_path + __convert_source
for_convert_target = __same_path + __convert_target
converted_voice_npy = __output_path + 'sp_converted_' + __versions
converted_voice_wav = __output_path + 'sp_converted_' + __versions
mfcc_save_fig_png = __output_path + 'mfcc3dim_' + __versions
f0_save_fig_png = __output_path + 'f0_converted' + __versions
converted_voice_with_f0_wav = __output_path + 'sp_f0_converted' + __versions
EPSILON = 1e-8
class MFCC:
"""
MFCC() : メル周波数ケプストラム係数(MFCC)を求めたり、MFCCからスペクトルに変換したりするクラス.
動的特徴量(delta)が実装途中.
ref : http://aidiary.hatenablog.com/entry/20120225/1330179868
"""
def __init__(self, frequency, nfft=1026, dimension=24, channels=24):
"""
各種パラメータのセット
nfft : FFTのサンプル点数
frequency : サンプリング周波数
dimension : MFCC次元数
channles : メルフィルタバンクのチャンネル数(dimensionに依存)
fscale : 周波数スケール軸
filterbankl, fcenters : フィルタバンク行列, フィルタバンクの頂点(?)
"""
self.nfft = nfft
self.frequency = frequency
self.dimension = dimension
self.channels = channels
self.fscale = np.fft.fftfreq(self.nfft, d = 1.0 / self.frequency)[: int(self.nfft / 2)]
self.filterbank, self.fcenters = self.melFilterBank()
def hz2mel(self, f):
"""
周波数からメル周波数に変換
"""
return 1127.01048 * np.log(f / 700.0 + 1.0)
def mel2hz(self, m):
"""
メル周波数から周波数に変換
"""
return 700.0 * (np.exp(m / 1127.01048) - 1.0)
def melFilterBank(self):
"""
メルフィルタバンクを生成する
"""
fmax = self.frequency / 2
melmax = self.hz2mel(fmax)
nmax = int(self.nfft / 2)
df = self.frequency / self.nfft
dmel = melmax / (self.channels + 1)
melcenters = np.arange(1, self.channels + 1) * dmel
fcenters = self.mel2hz(melcenters)
indexcenter = np.round(fcenters / df)
indexstart = np.hstack(([0], indexcenter[0:self.channels - 1]))
indexstop = np.hstack((indexcenter[1:self.channels], [nmax]))
filterbank = np.zeros((self.channels, nmax))
for c in np.arange(0, self.channels):
increment = 1.0 / (indexcenter[c] - indexstart[c])
# np,int_ は np.arangeが[0. 1. 2. ..]となるのをintにする
for i in np.int_(np.arange(indexstart[c], indexcenter[c])):
filterbank[c, i] = (i - indexstart[c]) * increment
decrement = 1.0 / (indexstop[c] - indexcenter[c])
# np,int_ は np.arangeが[0. 1. 2. ..]となるのをintにする
for i in np.int_(np.arange(indexcenter[c], indexstop[c])):
filterbank[c, i] = 1.0 - ((i - indexcenter[c]) * decrement)
return filterbank, fcenters
def mfcc(self, spectrum):
"""
スペクトルからMFCCを求める.
"""
mspec = []
mspec = np.log10(np.dot(spectrum, self.filterbank.T))
mspec = np.array(mspec)
return scipy.fftpack.realtransforms.dct(mspec, type=2, norm="ortho", axis=-1)
def delta(self, mfcc):
"""
MFCCから動的特徴量を求める.
現在は,求める特徴量フレームtをt-1とt+1の平均としている.
"""
mfcc = np.concatenate([
[mfcc[0]],
mfcc,
[mfcc[-1]]
]) # 最初のフレームを最初に、最後のフレームを最後に付け足す
delta = None
for i in range(1, mfcc.shape[0] - 1):
slope = (mfcc[i+1] - mfcc[i-1]) / 2
if delta is None:
delta = slope
else:
delta = np.vstack([delta, slope])
return delta
def imfcc(self, mfcc, spectrogram):
"""
MFCCからスペクトルを求める.
"""
im_sp = np.array([])
for i in range(mfcc.shape[0]):
mfcc_s = np.hstack([mfcc[i], [0] * (self.channels - self.dimension)])
mspectrum = scipy.fftpack.idct(mfcc_s, norm='ortho')
# splrep はスプライン補間のための補間関数を求める
tck = scipy.interpolate.splrep(self.fcenters, np.power(10, mspectrum))
# splev は指定座標での補間値を求める
im_spectrogram = scipy.interpolate.splev(self.fscale, tck)
im_sp = np.concatenate((im_sp, im_spectrogram), axis=0)
return im_sp.reshape(spectrogram.shape)
def trim_zeros_frames(x, eps=1e-7):
"""
無音区間を取り除く.
"""
T, D = x.shape
s = np.sum(np.abs(x), axis=1)
s[s < 1e-7] = 0.
return x[s > eps]
def analyse_by_world_with_harverst(x, fs):
"""
WORLD音声分析合成器で基本周波数F0,スペクトル包絡,非周期成分を求める.
基本周波数F0についてはharvest法により,より精度良く求める.
"""
# 4 Harvest with F0 refinement (using Stonemask)
frame_period = 5
_f0_h, t_h = pw.harvest(x, fs, frame_period=frame_period)
f0_h = pw.stonemask(x, _f0_h, t_h, fs)
sp_h = pw.cheaptrick(x, f0_h, t_h, fs)
ap_h = pw.d4c(x, f0_h, t_h, fs)
return f0_h, sp_h, ap_h
def wavread(file):
"""
wavファイルから音声トラックとサンプリング周波数を抽出する.
"""
wf = wave.open(file, "r")
fs = wf.getframerate()
x = wf.readframes(wf.getnframes())
x = np.frombuffer(x, dtype= "int16") / 32768.0
wf.close()
return x, float(fs)
def preEmphasis(signal, p=0.97):
"""
MFCC抽出のための高域強調フィルタ.
波形を通すことで,高域成分が強調される.
"""
return scipy.signal.lfilter([1.0, -p], 1, signal)
def alignment(source, target, path):
"""
タイムアライメントを取る.
target音声をsource音声の長さに合うように調整する.
"""
# ここでは814に合わせよう(targetに合わせる)
# p_p = 0 if source.shape[0] > target.shape[0] else 1
#shapes = source.shape if source.shape[0] > target.shape[0] else target.shape
shapes = source.shape
align = np.array([])
for (i, p) in enumerate(path[0]):
if i != 0:
if j != p:
temp = np.array(target[path[1][i]])
align = np.concatenate((align, temp), axis=0)
else:
temp = np.array(target[path[1][i]])
align = np.concatenate((align, temp), axis=0)
j = p
return align.reshape(shapes)
"""
pre-stored学習のためのパラレル学習データを作る。
時間がかかるため、利用できるlearn-data.pickleがある場合はそれを利用する。
それがない場合は一から作り直す。
"""
timer_start = time.time()
if os.path.exists(pre_stored_pickle):
print("exist, ", pre_stored_pickle)
with open(pre_stored_pickle, mode='rb') as f:
total_data = pickle.load(f)
print("open, ", pre_stored_pickle)
print("Load pre-stored time = ", time.time() - timer_start , "[sec]")
else:
source_mfcc = []
#source_data_sets = []
for name in sorted(glob.iglob(pre_stored_source_list, recursive=True)):
print(name)
x, fs = sf.read(name)
f0, sp, ap = analyse_by_world_with_harverst(x, fs)
mfcc = MFCC(fs)
source_mfcc_temp = mfcc.mfcc(sp)
#source_data = np.hstack([source_mfcc_temp, mfcc.delta(source_mfcc_temp)]) # static & dynamic featuers
source_mfcc.append(source_mfcc_temp)
#source_data_sets.append(source_data)
total_data = []
i = 0
_s_len = len(source_mfcc)
for name in sorted(glob.iglob(pre_stored_list, recursive=True)):
print(name, len(total_data))
x, fs = sf.read(name)
f0, sp, ap = analyse_by_world_with_harverst(x, fs)
mfcc = MFCC(fs)
target_mfcc = mfcc.mfcc(sp)
dist, cost, acc, path = dtw(source_mfcc[i%_s_len], target_mfcc, dist=lambda x, y: norm(x - y, ord=1))
#print('Normalized distance between the two sounds:' + str(dist))
#print("target_mfcc = {0}".format(target_mfcc.shape))
aligned = alignment(source_mfcc[i%_s_len], target_mfcc, path)
#target_data_sets = np.hstack([aligned, mfcc.delta(aligned)]) # static & dynamic features
#learn_data = np.hstack((source_data_sets[i], target_data_sets))
learn_data = np.hstack([source_mfcc[i%_s_len], aligned])
total_data.append(learn_data)
i += 1
with open(pre_stored_pickle, 'wb') as output:
pickle.dump(total_data, output)
print("Make, ", pre_stored_pickle)
print("Make pre-stored time = ", time.time() - timer_start , "[sec]")
"""
全事前学習出力話者からラムダを推定する.
ラムダは適応学習で変容する.
"""
S = len(total_data)
D = int(total_data[0].shape[1] / 2)
print("total_data[0].shape = ", total_data[0].shape)
print("S = ", S)
print("D = ", D)
timer_start = time.time()
if os.path.exists(pre_stored_gmm_init_pickle):
print("exist, ", pre_stored_gmm_init_pickle)
with open(pre_stored_gmm_init_pickle, mode='rb') as f:
initial_gmm = pickle.load(f)
print("open, ", pre_stored_gmm_init_pickle)
print("Load initial_gmm time = ", time.time() - timer_start , "[sec]")
else:
initial_gmm = GMM(n_components = Mixtured, covariance_type = 'full')
initial_gmm.fit(np.vstack(total_data))
with open(pre_stored_gmm_init_pickle, 'wb') as output:
pickle.dump(initial_gmm, output)
print("Make, ", initial_gmm)
print("Make initial_gmm time = ", time.time() - timer_start , "[sec]")
weights = initial_gmm.weights_
source_means = initial_gmm.means_[:, :D]
target_means = initial_gmm.means_[:, D:]
covarXX = initial_gmm.covars_[:, :D, :D]
covarXY = initial_gmm.covars_[:, :D, D:]
covarYX = initial_gmm.covars_[:, D:, :D]
covarYY = initial_gmm.covars_[:, D:, D:]
fitted_source = source_means
fitted_target = target_means
"""
SVはGMMスーパーベクトルで、各pre-stored学習における出力話者について平均ベクトルを推定する。
GMMの学習を見てみる必要があるか?
"""
timer_start = time.time()
if os.path.exists(pre_stored_sv_npy):
print("exist, ", pre_stored_sv_npy)
sv = np.load(pre_stored_sv_npy)
print("open, ", pre_stored_sv_npy)
print("Load pre_stored_sv time = ", time.time() - timer_start , "[sec]")
else:
sv = []
for i in range(S):
gmm = GMM(n_components = Mixtured, params = 'm', init_params = '', covariance_type = 'full')
gmm.weights_ = initial_gmm.weights_
gmm.means_ = initial_gmm.means_
gmm.covars_ = initial_gmm.covars_
gmm.fit(total_data[i])
sv.append(gmm.means_)
sv = np.array(sv)
np.save(pre_stored_sv_npy, sv)
print("Make pre_stored_sv time = ", time.time() - timer_start , "[sec]")
"""
各事前学習出力話者のGMM平均ベクトルに対して主成分分析(PCA)を行う.
PCAで求めた固有値と固有ベクトルからeigenvectorsとbiasvectorsを作る.
"""
timer_start = time.time()
#source_pca
source_n_component, source_n_features = sv[:, :, :D].reshape(S, Mixtured*D).shape
# 標準化(分散を1、平均を0にする)
source_stdsc = StandardScaler()
# 共分散行列を求める
source_X_std = source_stdsc.fit_transform(sv[:, :, :D].reshape(S, Mixtured*D))
# PCAを行う
source_cov = source_X_std.T @ source_X_std / (source_n_component - 1)
source_W, source_V_pca = np.linalg.eig(source_cov)
print(source_W.shape)
print(source_V_pca.shape)
# データを主成分の空間に変換する
source_X_pca = source_X_std @ source_V_pca
print(source_X_pca.shape)
#target_pca
target_n_component, target_n_features = sv[:, :, D:].reshape(S, Mixtured*D).shape
# 標準化(分散を1、平均を0にする)
target_stdsc = StandardScaler()
#共分散行列を求める
target_X_std = target_stdsc.fit_transform(sv[:, :, D:].reshape(S, Mixtured*D))
#PCAを行う
target_cov = target_X_std.T @ target_X_std / (target_n_component - 1)
target_W, target_V_pca = np.linalg.eig(target_cov)
print(target_W.shape)
print(target_V_pca.shape)
# データを主成分の空間に変換する
target_X_pca = target_X_std @ target_V_pca
print(target_X_pca.shape)
eigenvectors = source_X_pca.reshape((Mixtured, D, S)), target_X_pca.reshape((Mixtured, D, S))
source_bias = np.mean(sv[:, :, :D], axis=0)
target_bias = np.mean(sv[:, :, D:], axis=0)
biasvectors = source_bias.reshape((Mixtured, D)), target_bias.reshape((Mixtured, D))
print("Do PCA time = ", time.time() - timer_start , "[sec]")
"""
声質変換に用いる変換元音声と目標音声を読み込む.
"""
timer_start = time.time()
source_mfcc_for_convert = []
source_sp_for_convert = []
source_f0_for_convert = []
source_ap_for_convert = []
fs_source = None
for name in sorted(glob.iglob(for_convert_source, recursive=True)):
print("source = ", name)
x_source, fs_source = sf.read(name)
f0_source, sp_source, ap_source = analyse_by_world_with_harverst(x_source, fs_source)
mfcc_source = MFCC(fs_source)
#mfcc_s_tmp = mfcc_s.mfcc(sp)
#source_mfcc_for_convert = np.hstack([mfcc_s_tmp, mfcc_s.delta(mfcc_s_tmp)])
source_mfcc_for_convert.append(mfcc_source.mfcc(sp_source))
source_sp_for_convert.append(sp_source)
source_f0_for_convert.append(f0_source)
source_ap_for_convert.append(ap_source)
target_mfcc_for_fit = []
target_f0_for_fit = []
target_ap_for_fit = []
for name in sorted(glob.iglob(for_convert_target, recursive=True)):
print("target = ", name)
x_target, fs_target = sf.read(name)
f0_target, sp_target, ap_target = analyse_by_world_with_harverst(x_target, fs_target)
mfcc_target = MFCC(fs_target)
#mfcc_target_tmp = mfcc_target.mfcc(sp_target)
#target_mfcc_for_fit = np.hstack([mfcc_t_tmp, mfcc_t.delta(mfcc_t_tmp)])
target_mfcc_for_fit.append(mfcc_target.mfcc(sp_target))
target_f0_for_fit.append(f0_target)
target_ap_for_fit.append(ap_target)
# 全部numpy.arrrayにしておく
source_data_mfcc = np.array(source_mfcc_for_convert)
source_data_sp = np.array(source_sp_for_convert)
source_data_f0 = np.array(source_f0_for_convert)
source_data_ap = np.array(source_ap_for_convert)
target_mfcc = np.array(target_mfcc_for_fit)
target_f0 = np.array(target_f0_for_fit)
target_ap = np.array(target_ap_for_fit)
print("Load Input and Target Voice time = ", time.time() - timer_start , "[sec]")
"""
適応話者学習を行う.
つまり,事前学習出力話者から目標話者の空間を作りだす.
適応話者文数ごとにfitted_targetを集めるのは未実装.
"""
timer_start = time.time()
epoch=1000
py = GMM(n_components = Mixtured, covariance_type = 'full')
py.weights_ = weights
py.means_ = target_means
py.covars_ = covarYY
fitted_target = None
for i in range(len(target_mfcc)):
print("adaptation = ", i+1, "/", len(target_mfcc))
target = target_mfcc[i]
for x in range(epoch):
print("epoch = ", x)
predict = py.predict_proba(np.atleast_2d(target))
y = np.sum([predict[:, i: i + 1] * (target - biasvectors[1][i])
for i in range(Mixtured)], axis = 1)
gamma = np.sum(predict, axis = 0)
left = np.sum([gamma[i] * np.dot(eigenvectors[1][i].T,
np.linalg.solve(py.covars_, eigenvectors[1])[i])
for i in range(Mixtured)], axis=0)
right = np.sum([np.dot(eigenvectors[1][i].T,
np.linalg.solve(py.covars_, y)[i])
for i in range(Mixtured)], axis = 0)
weight = np.linalg.solve(left, right)
fitted_target = np.dot(eigenvectors[1], weight) + biasvectors[1]
py.means_ = fitted_target
print("Load Input and Target Voice time = ", time.time() - timer_start , "[sec]")
"""
変換に必要なものを残しておく.
"""
np.save(save_for_evgmm_covarXX, covarXX)
np.save(save_for_evgmm_covarYX, covarYX)
np.save(save_for_evgmm_fitted_source, fitted_source)
np.save(save_for_evgmm_fitted_target, fitted_target)
np.save(save_for_evgmm_weights, weights)
np.save(save_for_evgmm_source_means, source_means)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_08_5_kaggle_project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 8: Kaggle Data Sets**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 8 Material
* Part 8.1: Introduction to Kaggle [[Video]](https://www.youtube.com/watch?v=v4lJBhdCuCU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_1_kaggle_intro.ipynb)
* Part 8.2: Building Ensembles with Scikit-Learn and Keras [[Video]](https://www.youtube.com/watch?v=LQ-9ZRBLasw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_2_keras_ensembles.ipynb)
* Part 8.3: How Should you Architect Your Keras Neural Network: Hyperparameters [[Video]](https://www.youtube.com/watch?v=1q9klwSoUQw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_3_keras_hyperparameters.ipynb)
* Part 8.4: Bayesian Hyperparameter Optimization for Keras [[Video]](https://www.youtube.com/watch?v=sXdxyUCCm8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb)
* **Part 8.5: Current Semester's Kaggle** [[Video]](https://www.youtube.com/watch?v=PHQt0aUasRg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_5_kaggle_project.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
# Start CoLab
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
```
# Part 8.5: Current Semester's Kaggle
Kaggke competition site for current semester (Fall 2020):
* [Spring 2021 Kaggle Assignment](https://www.kaggle.com/c/applications-of-deep-learning-wustl-spring-2021b)
Previous Kaggle competition sites for this class (NOT this semester's assignment, feel free to use code):
* [Fall 2020 Kaggle Assignment](https://www.kaggle.com/c/applications-of-deep-learning-wustl-fall-2020)
* [Spring 2020 Kaggle Assignment](https://www.kaggle.com/c/applications-of-deep-learningwustl-spring-2020)
* [Fall 2019 Kaggle Assignment](https://kaggle.com/c/applications-of-deep-learningwustl-fall-2019)
* [Spring 2019 Kaggle Assignment](https://www.kaggle.com/c/applications-of-deep-learningwustl-spring-2019)
* [Fall 2018 Kaggle Assignment](https://www.kaggle.com/c/wustl-t81-558-washu-deep-learning-fall-2018)
* [Spring 2018 Kaggle Assignment](https://www.kaggle.com/c/wustl-t81-558-washu-deep-learning-spring-2018)
* [Fall 2017 Kaggle Assignment](https://www.kaggle.com/c/wustl-t81-558-washu-deep-learning-fall-2017)
* [Spring 2017 Kaggle Assignment](https://inclass.kaggle.com/c/applications-of-deep-learning-wustl-spring-2017)
* [Fall 2016 Kaggle Assignment](https://inclass.kaggle.com/c/wustl-t81-558-washu-deep-learning-fall-2016)
# Iris as a Kaggle Competition
If the Iris data were used as a Kaggle, you would be given the following three files:
* [kaggle_iris_test.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_iris_test.csv) - The data that Kaggle will evaluate you on. Contains only input, you must provide answers. (contains x)
* [kaggle_iris_train.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_iris_train.csv) - The data that you will use to train. (contains x and y)
* [kaggle_iris_sample.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_iris_sample.csv) - A sample submission for Kaggle. (contains x and y)
Important features of the Kaggle iris files (that differ from how we've previously seen files):
* The iris species is already index encoded.
* Your training data is in a separate file.
* You will load the test data to generate a submission file.
The following program generates a submission file for "Iris Kaggle". You can use it as a starting point for assignment 3.
```
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.callbacks import EarlyStopping
df_train = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/datasets/"+\
"kaggle_iris_train.csv", na_values=['NA','?'])
# Encode feature vector
df_train.drop('id', axis=1, inplace=True)
num_classes = len(df_train.groupby('species').species.nunique())
print("Number of classes: {}".format(num_classes))
# Convert to numpy - Classification
x = df_train[['sepal_l', 'sepal_w', 'petal_l', 'petal_w']].values
dummies = pd.get_dummies(df_train['species']) # Classification
species = dummies.columns
y = dummies.values
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=45)
# Train, with early stopping
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu'))
model.add(Dense(25))
model.add(Dense(y.shape[1],activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3,
patience=5, verbose=1, mode='auto',
restore_best_weights=True)
model.fit(x_train,y_train,validation_data=(x_test,y_test),
callbacks=[monitor],verbose=0,epochs=1000)
```
Now that we've trained the neural network, we can check its log loss.
```
from sklearn import metrics
# Calculate multi log loss error
pred = model.predict(x_test)
score = metrics.log_loss(y_test, pred)
print("Log loss score: {}".format(score))
```
Now we are ready to generate the Kaggle submission file. We will use the iris test data that does not contain a $y$ target value. It is our job to predict this value and submit to Kaggle.
```
# Generate Kaggle submit file
# Encode feature vector
df_test = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/datasets/"+\
"kaggle_iris_test.csv", na_values=['NA','?'])
# Convert to numpy - Classification
ids = df_test['id']
df_test.drop('id', axis=1, inplace=True)
x = df_test[['sepal_l', 'sepal_w', 'petal_l', 'petal_w']].values
y = dummies.values
# Generate predictions
pred = model.predict(x)
#pred
# Create submission data set
df_submit = pd.DataFrame(pred)
df_submit.insert(0,'id',ids)
df_submit.columns = ['id','species-0','species-1','species-2']
# Write submit file locally
df_submit.to_csv("iris_submit.csv", index=False)
print(df_submit)
```
### MPG as a Kaggle Competition (Regression)
If the Auto MPG data were used as a Kaggle, you would be given the following three files:
* [kaggle_mpg_test.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_auto_test.csv) - The data that Kaggle will evaluate you on. Contains only input, you must provide answers. (contains x)
* [kaggle_mpg_train.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_auto_test.csv) - The data that you will use to train. (contains x and y)
* [kaggle_mpg_sample.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_auto_sample.csv) - A sample submission for Kaggle. (contains x and y)
Important features of the Kaggle iris files (that differ from how we've previously seen files):
The following program generates a submission file for "MPG Kaggle".
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
import pandas as pd
import io
import os
import requests
import numpy as np
from sklearn import metrics
save_path = "."
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/datasets/"+\
"kaggle_auto_train.csv",
na_values=['NA', '?'])
cars = df['name']
# Handle missing value
df['horsepower'] = df['horsepower'].fillna(df['horsepower'].median())
# Pandas to Numpy
x = df[['cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'year', 'origin']].values
y = df['mpg'].values # regression
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
# Build the neural network
model = Sequential()
model.add(Dense(25, input_dim=x.shape[1], activation='relu')) # Hidden 1
model.add(Dense(10, activation='relu')) # Hidden 2
model.add(Dense(1)) # Output
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5,
verbose=1, mode='auto', restore_best_weights=True)
model.fit(x_train,y_train,validation_data=(x_test,y_test),
verbose=2,callbacks=[monitor],epochs=1000)
# Predict
pred = model.predict(x_test)
```
Now that we've trained the neural network, we can check its RMSE error.
```
import numpy as np
# Measure RMSE error. RMSE is common for regression.
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Final score (RMSE): {}".format(score))
```
Now we are ready to generate the Kaggle submission file. We will use the MPG test data that does not contain a $y$ target value. It is our job to predict this value and submit to Kaggle.
```
import pandas as pd
# Generate Kaggle submit file
# Encode feature vector
df_test = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/datasets/"+\
"kaggle_auto_test.csv", na_values=['NA','?'])
# Convert to numpy - regression
ids = df_test['id']
df_test.drop('id', axis=1, inplace=True)
# Handle missing value
df_test['horsepower'] = df_test['horsepower'].\
fillna(df['horsepower'].median())
x = df_test[['cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'year', 'origin']].values
# Generate predictions
pred = model.predict(x)
#pred
# Create submission data set
df_submit = pd.DataFrame(pred)
df_submit.insert(0,'id',ids)
df_submit.columns = ['id','mpg']
# Write submit file locally
df_submit.to_csv("auto_submit.csv", index=False)
print(df_submit)
```
# Module 8 Assignment
You can find the first assignment here: [assignment 8](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class8.ipynb)
| github_jupyter |
```
# import packages
%matplotlib inline
import os
import sys
from multiprocessing import Process, Queue
import pandas as pd
import optuna
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
sys.path.append('/opt/conda/GSASII/')
# Configurations
### Change here ###
STUDY_NAME = 'YOUR_MATERIAL'
RANDOM_SEED = 1024
DATA_DIR = '/bbo_rietveld/data/' + STUDY_NAME
# all output files include GSAS project file (*.gpx) will be saved in WORK_DIR
WORK_DIR = '/bbo_rietveld/work/' + STUDY_NAME
# make directories
! rm -f $WORK_DIR/$STUDY_NAME*
! mkdir -p $WORK_DIR
class ProjectBBO:
def __init__(self, trial_number):
import GSASIIscriptable as G2sc
import shutil
# Create a project with a default project name
### Change here ###
shutil.copyfile(DATA_DIR+'/'+'YOUR_PROJECT_FILE.gpx',
WORK_DIR+'/'+'BBO_seed{0}_trial_{1}.gpx'.format(RANDOM_SEED, trial_number))
self.gpx = G2sc.G2Project(gpxfile=os.path.join(WORK_DIR, 'BBO_seed{0}_trial_{1}.gpx'.format(RANDOM_SEED, trial_number)))
# Add two histograms to the project
self.hist1 = self.gpx.histograms()[0]
self.phase0 = self.gpx.phases()[0]
self.hist1.data['Instrument Parameters'][0]['I(L2)/I(L1)'] = [0.5, 0.5, 0]
# Set to use iso
for val in self.phase0.data['Atoms']:
val[9] = 'I'
def refine_and_calc_Rwp(self, param_dict):
self.gpx.do_refinements([param_dict])
for hist in self.gpx.histograms():
_, Rwp = hist.name, hist.get_wR()
return Rwp
def objective(trial):
"""
Parameters
----------
trial : optuna.trial object
Returns
-------
Rwp : float
"""
# Here, you should define search space and perform the refinement.
# Please see other notebook.
# Copy and paste from other notebook and some modifications would be enough.
return Rwp
# Create Optuna study
study = optuna.create_study(study_name=STUDY_NAME + '_seed%s' % (RANDOM_SEED),
sampler=optuna.samplers.TPESampler(n_startup_trials=20, seed=RANDOM_SEED))
```
Run 200 refinements to find the best configuration. It may take abount an hour to complete.
```
# Optimize
study.optimize(objective, n_trials=200, n_jobs=1)
# Results
df = study.trials_dataframe()
df.columns = [' '.join(col).replace('params', '').strip() for col in df.columns.values]
df.rename(columns={'value':'Rwp', 'number':'trial'}, inplace=True)
df.drop(columns=['state', 'system_attrs _number'], inplace=True)
df.sort_values('Rwp')
# Best configuration
study.best_params
# Best Rwp
study.best_value
# Rwp plot
def rwp_plot():
minvalues = [df.iloc[0]['Rwp']]
for i in range(1, df.shape[0]):
minvalues.append(min(minvalues[-1], df.iloc[i]['Rwp']))
minvalues = pd.DataFrame(minvalues)
minvalues.plot(legend=None)
# plt.ylim([6, 16])
plt.grid(color='#cccccc')
plt.ylabel('$R_{wp}$')
plt.xlabel('Number of trials')
plt.show()
rwp_plot()
# Rietveld plot
def rietveld_plot():
import GSASIIscriptable as G2sc
gpx = G2sc.G2Project(
'%s/%s_seed%s_trial_%s.gpx' % (WORK_DIR, STUDY_NAME, RANDOM_SEED, study.best_trial.number))
hist1 = gpx.histograms()[0]
phase0 = gpx.phases()[0]
hist = hist1
i = 5
two_theta = hist.getdata("X")[::i]
Yobs = hist.getdata("Yobs")[::i]
Ycalc = hist.getdata("Ycalc")[::i]
bg = hist.getdata("Background")[::i]
residual = hist.getdata("Residual")[::i]
fig = plt.figure()
gs = GridSpec(5, 1, figure=fig)
ax1 = fig.add_subplot(gs[:4, :])
ax2 = fig.add_subplot(gs[4, :])
fig.subplots_adjust(hspace=0)
ax1.grid(color='#cccccc')
ax1.scatter(two_theta, Yobs, marker='P', lw=0.0001, c='Black', label='XRD (Obs)')
ax1.plot(two_theta, Ycalc, label='XRD (Calc)')
ax1.plot(two_theta, bg, color='red', label='Background (Calc)')
ax1.set_ylabel('Intensity')
ax1.legend()
ax2.plot(two_theta, residual, color='blue')
plt.setp(ax1.get_xticklabels(), visible=False);
# ax2.set_ylim(-6600, 6600)
plt.xlabel(r'$2\theta$ (deg.)')
ax2.set_ylabel('Residual')
# change 2theta range according to your data
ax1.set_xlim(15, 150)
ax2.set_xlim(15, 150)
plt.show()
rietveld_plot()
```
| github_jupyter |
[[source]](../api/alibi.explainers.counterfactual.rst)
# Counterfactual Instances
## Overview
A counterfactual explanation of an outcome or a situation $Y$ takes the form "If $X$ had not occured, $Y$ would not have occured" ([Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/counterfactual.html)). In the context of a machine learning classifier $X$ would be an instance of interest and $Y$ would be the label predicted by the model. The task of finding a counterfactual explanation is then to find some $X^\prime$ that is in some way related to the original instance $X$ but leading to a different prediction $Y^\prime$. Reasoning in counterfactual terms is very natural for humans, e.g. asking what should have been done differently to achieve a different result. As a consequence counterfactual instances for machine learning predictions is a promising method for human-interpretable explanations.
The counterfactual method described here is the most basic way of defining the problem of finding such $X^\prime$. Our algorithm loosely follows Wachter et al. (2017): [Counterfactual Explanations without Opening the Black Box: Automated Decisions and the GDPR](https://arxiv.org/abs/1711.00399). For an extension to the basic method which provides ways of finding higher quality counterfactual instances $X^\prime$ in a quicker time, please refer to [Counterfactuals Guided by Prototypes](CFProto.ipynb).
We can reason that the most basic requirements for a counterfactual $X^\prime$ are as follows:
- The predicted class of $X^\prime$ is different from the predicted class of $X$
- The difference between $X$ and $X^\prime$ should be human-interpretable.
While the first condition is straight-forward, the second condition does not immediately lend itself to a condition as we need to first define "interpretability" in a mathematical sense. For this method we restrict ourselves to a particular definition by asserting that $X^\prime$ should be as close as possible to $X$ without violating the first condition. The main issue with this definition of "interpretability" is that the difference between $X^\prime$ and $X$ required to change the model prediciton might be so small as to be un-interpretable to the human eye in which case [we need a more sophisticated approach](CFProto.ipynb).
That being said, we can now cast the search for $X^\prime$ as a simple optimization problem with the following loss:
$$L = L_{\text{pred}} + \lambda L_{\text{dist}},$$
where the first loss term $L_{\text{pred}}$ guides the search towards points $X^\prime$ which would change the model prediction and the second term $\lambda L_{\text{dist}}$ ensures that $X^\prime$ is close to $X$. This form of loss has a single hyperparameter $\lambda$ weighing the contributions of the two competing terms.
The specific loss in our implementation is as follows:
$$L(X^\prime\vert X) = (f_t(X^\prime) - p_t)^2 + \lambda L_1(X^\prime, X).$$
Here $t$ is the desired target class for $X^\prime$ which can either be specified in advance or left up to the optimization algorithm to find, $p_t$ is the target probability of this class (typically $p_t=1$), $f_t$ is the model prediction on class $t$ and $L_1$ is the distance between the proposed counterfactual instance $X^\prime$ and the instance to be explained $X$. The use of the $L_1$ distance should ensure that the $X^\prime$ is a sparse counterfactual - minimizing the number of features to be changed in order to change the prediction.
The optimal value of the hyperparameter $\lambda$ will vary from dataset to dataset and even within a dataset for each instance to be explained and the desired target class. As such it is difficult to set and we learn it as part of the optimization algorithm, i.e. we want to optimize
$$\min_{X^{\prime}}\max_{\lambda}L(X^\prime\vert X)$$
subject to
$$\vert f_t(X^\prime)-p_t\vert\leq\epsilon \text{ (counterfactual constraint)},$$
where $\epsilon$ is a tolerance parameter. In practice this is done in two steps, on the first pass we sweep a broad range of $\lambda$, e.g. $\lambda\in(10^{-1},\dots,10^{-10}$) to find lower and upper bounds $\lambda_{\text{lb}}, \lambda_{\text{ub}}$ where counterfactuals exist. Then we use bisection to find the maximum $\lambda\in[\lambda_{\text{lb}}, \lambda_{\text{ub}}]$ such that the counterfactual constraint still holds. The result is a set of counterfactual instances $X^\prime$ with varying distance from the test instance $X$.
## Usage
### Initialization
The counterfactual (CF) explainer method works on fully black-box models, meaning they can work with arbitrary functions that take arrays and return arrays. However, if the user has access to a full TensorFlow (TF) or Keras model, this can be passed in as well to take advantage of the automatic differentiation in TF to speed up the search. This section describes the initialization for a TF/Keras model, for fully black-box models refer to [numerical gradients](#Numerical-Gradients).
First we load the TF/Keras model:
```python
model = load_model('my_model.h5')
```
Then we can initialize the counterfactual object:
```python
shape = (1,) + x_train.shape[1:]
cf = Counterfactual(model, shape, distance_fn='l1', target_proba=1.0,
target_class='other', max_iter=1000, early_stop=50, lam_init=1e-1,
max_lam_steps=10, tol=0.05, learning_rate_init=0.1,
feature_range=(-1e10, 1e10), eps=0.01, init='identity',
decay=True, write_dir=None, debug=False)
```
Besides passing the model, we set a number of **hyperparameters** ...
... **general**:
* `shape`: shape of the instance to be explained, starting with batch dimension. Currently only single explanations are supported, so the batch dimension should be equal to 1.
* `feature_range`: global or feature-wise min and max values for the perturbed instance.
* `write_dir`: write directory for Tensorboard logging of the loss terms. It can be helpful when tuning the hyperparameters for your use case. It makes it easy to verify that e.g. not 1 loss term dominates the optimization, that the number of iterations is OK etc. You can access Tensorboard by running `tensorboard --logdir {write_dir}` in the terminal.
* `debug`: flag to enable/disable writing to Tensorboard.
... related to the **optimizer**:
* `max_iterations`: number of loss optimization steps for each value of $\lambda$; the multiplier of the distance loss term.
* `learning_rate_init`: initial learning rate, follows linear decay.
* `decay`: flag to disable learning rate decay if desired
* `early_stop`: early stopping criterion for the search. If no counterfactuals are found for this many steps or if this many counterfactuals are found in a row we change $\lambda$ accordingly and continue the search.
* `init`: how to initialize the search, currently only `"identity"` is supported meaning the search starts from the original instance.
... related to the **objective function**:
* `distance_fn`: distance function between the test instance $X$ and the proposed counterfactual $X^\prime$, currently only `"l1"` is supported.
* `target_proba`: desired target probability for the returned counterfactual instance. Defaults to `1.0`, but it could be useful to reduce it to allow a looser definition of a counterfactual instance.
* `tol`: the tolerance within the `target_proba`, this works in tandem with `target_proba` to specify a range of acceptable predicted probability values for the counterfactual.
* `target_class`: desired target class for the returned counterfactual instance. Can be either an integer denoting the specific class membership or the string `other` which will find a counterfactual instance whose predicted class is anything other than the class of the test instance.
* `lam_init`: initial value of the hyperparameter $\lambda$. This is set to a high value $\lambda=1e^{-1}$ and annealed during the search to find good bounds for $\lambda$ and for most applications should be fine to leave as default.
* `max_lam_steps`: the number of steps (outer loops) to search for with a different value of $\lambda$.
While the default values for the loss term coefficients worked well for the simple examples provided in the notebooks, it is recommended to test their robustness for your own applications.
<div class="alert alert-warning">
Warning
Once a `Counterfactual` instance is initialized, the parameters of it are frozen even if creating a new instance. This is due to TensorFlow behaviour which holds on to some global state. In order to change parameters of the explainer in the same session (e.g. for explaining different models), you will need to reset the TensorFlow graph manually:
```python
import tensorflow as tf
tf.keras.backend.clear_session()
```
You may need to reload your model after this. Then you can create a new `Counterfactual` instance with new parameters.
</div>
### Fit
The method is purely unsupervised so no fit method is necessary.
### Explanation
We can now explain the instance $X$:
```python
explanation = cf.explain(X)
```
The ```explain``` method returns an `Explanation` object with the following attributes:
* *cf*: dictionary containing the counterfactual instance found with the smallest distance to the test instance, it has the following keys:
* *X*: the counterfactual instance
* *distance*: distance to the original instance
* *lambda*: value of $\lambda$ corresponding to the counterfactual
* *index*: the step in the search procedure when the counterfactual was found
* *class*: predicted class of the counterfactual
* *proba*: predicted class probabilities of the counterfactual
* *loss*: counterfactual loss
* *orig_class*: predicted class of original instance
* *orig_proba*: predicted class probabilites of the original instance
* *all*: dictionary of all instances encountered during the search that satisfy the counterfactual constraint but have higher distance to the original instance than the returned counterfactual. This is organized by levels of $\lambda$, i.e. ```explanation['all'][0]``` will be a list of dictionaries corresponding to instances satisfying the counterfactual condition found in the first iteration over $\lambda$ during bisection.
### Numerical Gradients
So far, the whole optimization problem could be defined within the TF graph, making automatic differentiation possible. It is however possible that we do not have access to the model architecture and weights, and are only provided with a ```predict``` function returning probabilities for each class. The counterfactual can then be initialized in the same way as before, but using a prediction function:
```python
# define model
model = load_model('mnist_cnn.h5')
predict_fn = lambda x: cnn.predict(x)
# initialize explainer
shape = (1,) + x_train.shape[1:]
cf = Counterfactual(predict_fn, shape, distance_fn='l1', target_proba=1.0,
target_class='other', max_iter=1000, early_stop=50, lam_init=1e-1,
max_lam_steps=10, tol=0.05, learning_rate_init=0.1,
feature_range=(-1e10, 1e10), eps=0.01, init
```
In this case, we need to evaluate the gradients of the loss function with respect to the input features $X$ numerically:
$$\frac{\partial L_{\text{pred}}}{\partial X} = \frac{\partial L_\text{pred}}{\partial p} \frac{\partial p}{\partial X}
$$
where $L_\text{pred}$ is the predict function loss term, $p$ the predict function and $x$ the input features to optimize. There is now an additional hyperparameter to consider:
* `eps`: a float or an array of floats to define the perturbation size used to compute the numerical gradients of $^{\delta p}/_{\delta X}$. If a single float, the same perturbation size is used for all features, if the array dimension is *(1 x nb of features)*, then a separate perturbation value can be used for each feature. For the Iris dataset, `eps` could look as follows:
```python
eps = np.array([[1e-2, 1e-2, 1e-2, 1e-2]]) # 4 features, also equivalent to eps=1e-2
```
## Examples
[Counterfactual instances on MNIST](../examples/cf_mnist.ipynb)
| github_jupyter |
# Combining Data With Joins
## Overview
Teaching: 15
Exercises: 10
### Questions
- "How do I bring data together from separate tables?"
### Objectives
- "Employ joins to combine data from two tables."
- "Apply functions to manipulate individual values."
- "Employ aliases to assign new names to tables and columns in a query."
### Key points
- "Use the `JOIN` command to combine data from two tables---the `ON` or `USING` keywords specify which columns link the tables."
- "Regular `JOIN` returns only matching rows. Other join commands provide different behavior, e.g., `LEFT JOIN` retains all rows of the table on the left side of the command."
- "`COALESCE` allows you to specify a value to use in place of `NULL`, which can help in joins"
- "`NULLIF` can be used to replace certain values with `NULL` in results"
- "Many other functions like `COALESCE` and `NULLIF` can operate on individual values."
## Setup
### Copy the original dataset to a folder `notebooks/data`
```
! mkdir -p data; cp -R ../files/original-data/* ./data
```
### Configure SQL Magic extension and connect to the database
First enable the SQL magic extension, configure extension to return Pandas data frames (in order for the notebook to handle large result sets better), and then connect to our database again.
```
%load_ext sql
%config SqlMagic.autopandas=True
%sql sqlite:///data/portal_mammals.sqlite
```
## Joins
To combine data from two tables we use the SQL `JOIN` command, which comes after
the `FROM` command.
Database tables are used to organize and group data by common characteristics or principles.
Often, we need to combine elements from separate tables into a single tables or queries for analysis and visualization.
A JOIN is a means for combining columns from multiple tables by using values common to each.
The JOIN command combined with ON is used to combine fields from separate tables.
The `JOIN` command on its own will result in a cross product, where each row in
the first table is paired with each row in the second table. Usually this is not
what is desired when combining two tables with data that is related in some way.
For that, we need to tell the computer which columns provide the link between the two
tables using the word `ON`. What we want is to join the data with the same
species id.
```
%%sql
SELECT *
FROM surveys
JOIN species
ON surveys.species_id = species.species_id;
```
`ON` is like `WHERE`. It filters things out according to a test condition. We use
the `table.colname` format to tell the manager what column in which table we are
referring to.
The output of the `JOIN` command will have columns from the first table plus the
columns from the second table. For the above command, the output will be a table
that has the following column names:
| record_id | month | day | year | plot_id | species_id | sex | hindfoot_length | weight | species_id | genus | species | taxa |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| ... |||||||||||||
| 96 | 8 | 20 | 1997 | 12 | **DM** | M | 36 | 41 | **DM** | Dipodomys | merriami | Rodent |
| ... ||||||||||||||
Alternatively, we can use the word `USING`, as a short-hand. `USING` only
works on columns which share the same name. In this case we are
telling the manager that we want to combine `surveys` with `species` and that
the common column is `species_id`.
```
%%sql
SELECT *
FROM surveys
JOIN species
USING (species_id);
```
The output will only have one **species_id** column
| record_id | month | day | year | plot_id | species_id | sex | hindfoot_length | weight | genus | species | taxa |
|---|---|---|---|---|---|---|---|---|---|---|---|
| ... ||||||||||||
| 96 | 8 | 20 | 1997 | 12 | DM | M | 36 | 41 | Dipodomys | merriami | Rodent |
| ... |||||||||||||
We often won't want all of the fields from both tables, so anywhere we would
have used a field name in a non-join query, we can use `table.colname`.
For example, what if we wanted information on when individuals of each
species were captured, but instead of their species ID we wanted their
actual species names.
```
%%sql
SELECT surveys.year, surveys.month, surveys.day, species.genus, species.species
FROM surveys
JOIN species
ON surveys.species_id = species.species_id;
```
| year | month | day | genus | species |
|---|---|---|---|---|
| ... |||||
| 1977 | 7 | 16 | Neotoma | albigula|
| 1977 | 7 | 16 | Dipodomys | merriami|
|...||||||
Many databases, including SQLite, also support a join through the `WHERE` clause of a query.
For example, you may see the query above written without an explicit JOIN.
```
%%sql
SELECT surveys.year, surveys.month, surveys.day, species.genus, species.species
FROM surveys, species
WHERE surveys.species_id = species.species_id;
```
For the remainder of this lesson, we'll stick with the explicit use of the `JOIN` keyword for
joining tables in SQL.
### Challenge
> - Write a query that returns the genus, the species name, and the weight
> of every individual captured at the site
#### Solution
```
%%sql
SELECT species.genus, species.species, surveys.weight
FROM surveys
JOIN species
ON surveys.species_id = species.species_id;
```
## Different join types
We can count the number of records returned by our original join query.
```
%%sql
SELECT COUNT(*)
FROM surveys
JOIN species
USING (species_id);
```
Notice that this number is smaller than the number of records present in the
survey data.
```
%%sql
SELECT COUNT(*) FROM surveys;
```
This is because, by default, SQL only returns records where the joining value
is present in the joined columns of both tables (i.e. it takes the _intersection_
of the two join columns). This joining behaviour is known as an `INNER JOIN`.
In fact the `JOIN` command is simply shorthand for `INNER JOIN` and the two
terms can be used interchangably as they will produce the same result.
We can also tell the computer that we wish to keep all the records in the first
table by using the command `LEFT OUTER JOIN`, or `LEFT JOIN` for short.
### Challenge
> - Re-write the original query to keep all the entries present in the `surveys`
> table. How many records are returned by this query?
#### Solution
```
%%sql
SELECT * FROM surveys
LEFT JOIN species
USING (species_id);
```
### Challenge
> - Count the number of records in the `surveys` table that have a `NULL` value
> in the `species_id` column.
Remember: In SQL a `NULL` value in one table can never be joined to a `NULL` value in a
second table because `NULL` is not equal to anything, not even itself.
#### Solution
```
%%sql
SELECT COUNT(*)
FROM surveys
WHERE species_id IS NULL;
```
### Combining joins with sorting and aggregation
Joins can be combined with sorting, filtering, and aggregation. So, if we
wanted average mass of the individuals on each different type of treatment, we
could do something like
```
%%sql
SELECT plots.plot_type, AVG(surveys.weight)
FROM surveys
JOIN plots
ON surveys.plot_id = plots.plot_id
GROUP BY plots.plot_type;
```
### Challenge
> - Write a query that returns the number of animals caught of each genus in each plot.
> Order the results by plot number (ascending) and by descending number of individuals in each plot.
#### Solution
```
%%sql
SELECT surveys.plot_id, species.genus, COUNT(*) AS number_indiv
FROM surveys
JOIN species
ON surveys.species_id = species.species_id
GROUP BY species.genus, surveys.plot_id
ORDER BY surveys.plot_id ASC, number_indiv DESC;
```
### Challenge
> - Write a query that finds the average weight of each rodent species (i.e., only include species with Rodent in the taxa field).
#### Solution
```
%%sql
SELECT surveys.species_id, AVG(surveys.weight)
FROM surveys
JOIN species
ON surveys.species_id = species.species_id
WHERE species.taxa = 'Rodent'
GROUP BY surveys.species_id;
```
## Functions `COALESCE` and `NULLIF` and more
SQL includes numerous functions for manipulating data. You've already seen some
of these being used for aggregation (`SUM` and `COUNT`) but there are functions
that operate on individual values as well. Probably the most important of these
are `COALESCE` and `NULLIF`. `COALESCE` allows us to specify a value to use in
place of `NULL`.
We can represent unknown sexes with `'U'` instead of `NULL`:
```
%%sql
SELECT species_id, sex, COALESCE(sex, 'U')
FROM surveys;
```
The lone "sex" column is only included in the query above to illustrate where
`COALESCE` has changed values; this isn't a usage requirement.
### Challenge
> - Write a query that returns 30 instead of `NULL` for values in the
> `hindfoot_length` column.
#### Solution
```
%%sql
SELECT hindfoot_length, COALESCE(hindfoot_length, 30)
FROM surveys;
```
### Challenge
> - Write a query that calculates the average hind-foot length of each species,
> assuming that unknown lengths are 30 (as above).
#### Solution
```
%%sql
SELECT species_id, AVG(COALESCE(hindfoot_length, 30))
FROM surveys
GROUP BY species_id;
```
### COALESCE combined with JOIN
`COALESCE` can be particularly useful in `JOIN`. When joining the `species` and
`surveys` tables earlier, some results were excluded because the `species_id`
was `NULL` in the surveys table. We can use `COALESCE` to include them again, re-writing the `NULL` to
a valid joining value:
```
%%sql
SELECT surveys.year, surveys.month, surveys.day, species.genus, species.species
FROM surveys
JOIN species
ON COALESCE(surveys.species_id, 'AB') = species.species_id;
```
### Challenge
> - Write a query that returns the number of animals caught of each genus in each
> plot, assuming that unknown species are all of the genus "Rodent".
#### Solution
```
%%sql
SELECT plot_id, COALESCE(genus, 'Rodent') AS genus2, COUNT(*)
FROM surveys
LEFT JOIN species
ON surveys.species_id=species.species_id
GROUP BY plot_id, genus2;
```
### NULLIF
The inverse of `COALESCE` is `NULLIF`. This returns `NULL` if the first argument
is equal to the second argument. If the two are not equal, the first argument
is returned. This is useful for "nulling out" specific values.
We can "null out" plot 7:
```
%%sql
SELECT species_id, plot_id, NULLIF(plot_id, 7)
FROM surveys;
```
### Other functions
Some more functions which are common to SQL databases are listed in the table
below:
| Function | Description |
|------------------------------|-------------------------------------------------------------------------------------------------|
| `ABS(n)` | Returns the absolute (positive) value of the numeric expression *n* |
| `COALESCE(x1, ..., xN)` | Returns the first of its parameters that is not NULL |
| `LENGTH(s)` | Returns the length of the string expression *s* |
| `LOWER(s)` | Returns the string expression *s* converted to lowercase |
| `NULLIF(x, y)` | Returns NULL if *x* is equal to *y*, otherwise returns *x* |
| `ROUND(n)` or `ROUND(n, x)` | Returns the numeric expression *n* rounded to *x* digits after the decimal point (0 by default) |
| `TRIM(s)` | Returns the string expression *s* without leading and trailing whitespace characters |
| `UPPER(s)` | Returns the string expression *s* converted to uppercase |
Finally, some useful functions which are particular to SQLite are listed in the
table below:
| Function | Description |
|-------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `RANDOM()` | Returns a random integer between -9223372036854775808 and +9223372036854775807. |
| `REPLACE(s, f, r)` | Returns the string expression *s* in which every occurrence of *f* has been replaced with *r* |
| `SUBSTR(s, x, y)` or `SUBSTR(s, x)` | Returns the portion of the string expression *s* starting at the character position *x* (leftmost position is 1), *y* characters long (or to the end of *s* if *y* is omitted) |
### Challenge
> Write a query that returns genus names (no repeats), sorted from longest genus name down
> to shortest.
#### Solution
```
%%sql
SELECT DISTINCT genus
FROM species
ORDER BY LENGTH(genus) DESC;
```
## Aliases when joining
As we saw before, aliases make things clearer, and are especially useful when joining tables.
```
%%sql
SELECT surv.year AS yr, surv.month AS mo, surv.day AS day, sp.genus AS gen, sp.species AS sp
FROM surveys AS surv
JOIN species AS sp
ON surv.species_id = sp.species_id;
```
To practice we have some optional challenges for you.
### Challenge (optional)
> SQL queries help us *ask* specific *questions* which we want to answer about our data. The real skill with SQL is to know how to translate our scientific questions into a sensible SQL query (and subsequently visualize and interpret our results).
>
> Have a look at the following questions; these questions are written in plain English. Can you translate them to *SQL queries* and give a suitable answer?
>
> 1. How many plots from each type are there?
>
> 2. How many specimens are of each sex are there for each year, including those whose sex is unknown?
>
> 3. How many specimens of each species were captured in each type of plot, excluding specimens of unknown species?
>
> 4. What is the average weight of each taxa?
>
> 5. What are the minimum, maximum and average weight for each species of Rodent?
>
> 6. What is the average hindfoot length for male and female rodent of each species? Is there a Male / Female difference?
>
> 7. What is the average weight of each rodent species over the course of the years? Is there any noticeable trend for any of the species?
#### Proposed solutions
##### 1. Solution:
```
%%sql
SELECT plot_type, COUNT(*) AS num_plots
FROM plots
GROUP BY plot_type;
```
##### 2. Solution:
```
%%sql
SELECT year, sex, COUNT(*) AS num_animal
FROM surveys
GROUP BY sex, year;
```
##### 3. Solution:
```
%%sql
SELECT species_id, plot_type, COUNT(*)
FROM surveys
JOIN plots USING(plot_id)
WHERE species_id IS NOT NULL
GROUP BY species_id, plot_type;
```
##### 4. Solution:
```
%%sql
SELECT taxa, AVG(weight)
FROM surveys
JOIN species ON species.species_id = surveys.species_id
GROUP BY taxa;
```
##### 5. Solution:
```
%%sql
SELECT surveys.species_id, MIN(weight), MAX(weight), AVG(weight) FROM surveys
JOIN species ON surveys.species_id = species.species_id
WHERE taxa = 'Rodent'
GROUP BY surveys.species_id;
```
##### 6. Solution:
```
%%sql
SELECT surveys.species_id, sex, AVG(hindfoot_length)
FROM surveys JOIN species ON surveys.species_id = species.species_id
WHERE (taxa = 'Rodent') AND (sex IS NOT NULL)
GROUP BY surveys.species_id, sex;
```
##### 7. Solution:
```
%%sql
SELECT surveys.species_id, year, AVG(weight) as mean_weight
FROM surveys
JOIN species ON surveys.species_id = species.species_id
WHERE taxa = 'Rodent' GROUP BY surveys.species_id, year;
```
## End of episode
| github_jupyter |
```
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory('./dataset/train',
target_size=(48,48),
batch_size=64,
color_mode='grayscale',
class_mode='categorical')
validation_generator = val_datagen.flow_from_directory('./dataset/test',
target_size=(48,48),
batch_size=64,
color_mode='grayscale',
class_mode='categorical')
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(48,48,1)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Conv2D(128, kernel_size=(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2)))
model.add(tf.keras.layers.Conv2D(128, kernel_size=(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=1024, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(units=7, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_generator,
steps_per_epoch=28709//64,
epochs=30,
validation_data = validation_generator,
validation_steps=7178//64)
model.save('model.h5')
train_generator.class_indices
from tensorflow.keras.models import load_model
from keras.preprocessing import image
import numpy as np
model = load_model('model.h5')
result = {0:'angry',
1:'disgust',
2:'fear',
3:'happy',
4:'neutral',
5:'sad',
6:'surprise'}
test_image = './dataset/test_data/happy.jpg'
img = image.load_img(test_image, target_size=(48,48))
img = img.convert('L')
x = image.img_to_array(img)
x /= 255
x = x.reshape(1, 48, 48, 1)
predict = model.predict(x)
print(predict)
final_prediction = result[np.argmax(predict[0])]
print(final_prediction)
```
| github_jupyter |
# Scikit Learn and the K-nearest Neighbor Algorithm
In this notebook we'll introduce the `sklearn` package and a few important concepts in machine learning:
* Splitting data into test, train, and validation sets.
* Fitting models to a dataset.
* And using "Hyperparameters" to tune models.
Lets revisit the example we saw in the first class:
```
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
# Load the data
heart_dataset = pd.read_csv('../../datasets/uci-heart-disease/heart.csv')
# Split the data into input and labels
labels = heart_dataset['target']
input_data = heart_dataset.drop(columns=['target'])
# Split the data into training and test
training_data, test_data, training_labels, test_labels = train_test_split(
input_data,
labels,
test_size=0.20
)
# Build the model
model = KNeighborsClassifier()
model.fit(training_data, training_labels)
# See how it did.
print("Test accuracy: ", model.score(test_data, test_labels))
```
# Test/Train/Validation Splits:
In machine learning, it's important to avoid something called "overfitting" our models. This happens when a model more or less "memorizes" the training data, but performs poorly on data that is "held out" of the training process. A model that is "overfit" won't "generalize" to unseen data — making it useless in the real world!
To avoid and detect overfitting we will commonly split our data into 3 buckets:
* Training data: the data that the model actually learns from.
* Validation data: data that the model doesn't learn from, but that we use to validate the results throughout the process of building our models.
* Test data: data that is held out entierly during the model building process in order to give an unbiased measure of the final models performance.
* If we use the test data, and then change our model, the test data is no longer "unbiased" as we will have incorporated information from the test data (i.e. our models poor "test performance") into the next version of the model. In this case we have turned the test data into validation data, and we should get new test data.
In the above example we've only made two buckets of data, which we called training and test... In theory we COULD do this:
```
# Split the data into training+validation and test
training_and_validation_data, test_data, training_and_validation_labels, test_labels = train_test_split(
input_data,
labels,
test_size=0.20
)
# Split the training+validation data into training and validation
training_data, validation_data, training_labels, validation_labels = train_test_split(
training_and_validation_data,
training_and_validation_labels,
test_size=0.20
)
# Then use only training and validation to evaluate our model and make changes to the model's performance...
```
While the above CAN be done, it's much more common to make the test/train split much earlier in the data pipeline. Many datasets for ML tasks come with a test set already prepared and separate from the training set. ML practitioners then perform a training/validation split with all of the training data. The training and test data will be saved separately, in their own folders or CSV files or labeled differently in the database/data warehouse software.
If you've collected your own data and want to do ML with it, I strongly suggest you split out 10%-20% of that data, set it aside, and don't look at it until the very end of your ML pipeline to get an unbiased evaluation once you've built a model you like.
# SKLearn's API
Scikit learn has a wonderfully unified API that always follows this pattern:
* Create a model from a class.
* This is where you set the "hyperparameters" of the model.
* Call that model's `.fit` method using the training data to train the model.
* Call that model's `.score` method to evaluate the model against the validation/test data.
For example:
```
# Lets build multiple models using a few different "hyperparameters"
model_one = KNeighborsClassifier()
model_two = KNeighborsClassifier(weights='distance')
model_three = KNeighborsClassifier(n_neighbors=10, weights='distance')
for i, model in enumerate([model_one, model_two, model_three]):
model.fit(training_data, training_labels)
print(f' {i+1} validation accuracy: ', model.score(validation_data, validation_labels))
```
# The K-Nearest Neighbor's Model
So what is the actual difference between these three models? How does KNN actually work?
KNN is a relatively straightforward model. When you want to make a prediction with KNN you simply compare the item you're making a prediction about to the training dataset using a distance function and based on the class of the "nearest" neighbors the model makes a prediction.
K is how many neighbors to look at, if k is 5 the model looks at the 5 nearest neighbors and whichever class is most common among those 5 neighbors is the one selected. Lets look at some pictures from the pre-reading (https://towardsdatascience.com/laymans-introduction-to-knn-c793ed392bc2):




These examples are all in 2-dimensional space, but the algorithm generalizes to n-dimensions (based on the number of features in our training data).
K is controlled in `sklearn` by the `n_neighbors` parameter.
Another hyperparameter in KNN is the `weights` parameter, which has 3 possible values, from the docs (https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html):
* ‘uniform’ : uniform weights. All points in each neighborhood are weighted equally.
* ‘distance’ : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away.
* [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights.
Similarly, the distance metric can be provided:
> metric: str or callable, default=’minkowski’
> the distance metric to use for the tree. The default metric is minkowski, and with p=2 is equivalent to the standard Euclidean metric. See the documentation of DistanceMetric for a list of available metrics. If metric is “precomputed”, X is assumed to be a distance matrix and must be square during fit. X may be a sparse graph, in which case only “nonzero” elements may be considered neighbors.
| github_jupyter |
# Clean Data
### Imports
```
import numpy as np
import pandas as pd
# visualization
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
# personal module scripts
import clean_data
```
## Clean NFL Combine Data
```
combine_file = r'data\nfl_combine_1987_2020.csv'
df_raw_combine = pd.read_csv(combine_file)
df_raw_combine.head()
# Keep raw data import for reference - build copy to modify
df_combine = df_raw_combine
# Drop dummy '0' column and Wonderlic scores data
combine_cols_to_drop = ['Unnamed: 0', 'Wonderlic']
df_combine.drop(columns=combine_cols_to_drop, inplace=True)
# Clean column headers
df_combine.columns = df_combine.columns.str.lower()
df_combine.rename(columns={'college':'school'}, inplace=True)
# Clean school names
df_combine['school'] = df_combine['school'].str.strip()
# Drop years prior to 2000 (no draft data)
print(df_combine.shape)
df_combine.drop(df_combine[df_combine['year']<2000].index, inplace=True)
print('Cleaned combine size: ', df_combine.shape)
df_combine.head()
```
## Import position mapping data
The "combine" dataset maps players to very specific positions (ie, "Free Safety" or "Outside Linebacker").
Map these granular positions to more standard positions. Also classify each position as "Offense" or "Defense", and indicate if the position is a "Skill" position or on the line of scrimmage.
```
df_positions = pd.read_csv('data/position_mapping.csv')
df_positions.head()
```
### Merge the draft and position mapping datasets
```
print('Granular position counts in combine dataset:')
print(df_combine['pos'].value_counts())
df_combine = df_combine.merge(df_positions,
how='left',
on='pos')
print('\nPosition Group counts after merging with position map:')
print(df_combine['pos_group'].value_counts())
df_combine.head()
```
#### Visualize combine performance distributions by position
```
positions = df_combine['pos_group'].unique()
positions_to_drop = ['SN', 'K'] # Long snappers and kickers/punters
positions = [pos for pos in positions if pos not in positions_to_drop]
print(positions)
print('Unique Positions: ', len(positions))
print(df_combine.columns)
stat_columns = ['height (in)', 'weight (lbs)',
'hand size (in)', 'arm length (in)', '40 yard', 'bench press',
'vert leap (in)', 'broad jump (in)', 'shuttle', '3cone', '60yd shuttle']
num_stats = len(stat_columns)
fig, axes = plt.subplots(len(positions), num_stats,
sharex=False,
sharey=True,
figsize=(25,25))
fig.suptitle('NFL Combine Statistics - Distribution by Position (2000-2020)', fontsize=30)
fig.supxlabel('Measurement', fontsize=30)
fig.supylabel('Position', fontsize=30)
fig.tight_layout(rect=[0.03, 0.03, 1, .95])
# Loop over axes and data
for row, pos in enumerate(positions):
x_positions = df_combine[df_combine['pos_group']==pos]
for col, stat in enumerate(stat_columns):
# Get axis
ax = axes[row,col]
x = x_positions[stat]
ax.hist(x,
range=[df_combine[stat].min(),df_combine[stat].max()],
alpha=.5, bins=10)
# Set Y label once per row
if col==0:
ax.set_ylabel(pos, fontsize='xx-large')
# Set X label above first row and below last row
if row == 0:
ax.set_title(stat, fontsize='xx-large')
if row == len(positions) - 1:
ax.set_xlabel(stat, fontsize='xx-large')
fig.show()
fig.savefig('images/stats_by_position.png', format='png')
```
## Import and clean NFL Draft Data
```
draft_file = r'data\espn_draft_history_2000_2021_cleaned.csv'
df_raw_draft = pd.read_csv(draft_file)
# Keep raw data import for reference - build copy to modify
df_draft = df_raw_draft
# Clean column headers
df_draft.columns = df_draft.columns.str.lower()
# Clean school names
df_draft['school'] = df_draft['school'].str.strip()
```
### Are there duplicated names?
```
df_combine['name'].value_counts(sort='descending').head(10)
```
### Answer: Yes
So we cannot simply join the 2 datasets on player 'name' columns. Need to also join on
college and year.
## Do college names match in both datasets?
```
draft_school = pd.DataFrame(df_draft['school'].unique()).rename(columns={0:'school'})
draft_school['source'] = 'draft'
combine_school = pd.DataFrame(df_combine['school'].unique()).rename(columns={0:'school'})
combine_school['source'] = 'combine'
print(type(combine_school))
print(combine_school.head())
schools = draft_school.merge(combine_school, on='school', how='outer',
suffixes=['_draft', '_combine']).sort_values(by='school')
# List all cases with mismatches
na_mask = schools.isna().any(axis=1)
schools[na_mask].head(10)
```
### So we see that the 'combine' dataset frequently has the state appended to the school name;
Ex: "Abilene Christian (TX)". Remove these from school names, with the exception of "Miami (OH).
```
df_combine['school'] = df_combine['school'].str.replace('Miami (OH)', 'Miami - OH')
print(df_combine['school'].head())
regex_replace_parens = r'\([^)]*[a-zA-Z][^)]*\)'
df_combine['school'] = df_combine['school'].str.replace(regex_replace_parens,
'', regex=True)
df_combine['school'].head()
```
## Standardize player names between datasets
Player names in the "Draft" dataset include suffixes including "Jr., II, III, IV", but these are NOT included in the "combine" dataset.
Standardize player names between datasets by removing these values from the "Draft" dataset.
```
regex_suffixes_to_remove = r'Jr\.$|III$|IIII$|IV$|, Jr.$'
df_draft['name'] = df_draft['name'].str.replace(regex_suffixes_to_remove,
'', regex=True)
```
### Merge the Draft and NFL Combine datasets
```
df_merged = df_combine.merge(df_draft, how='left',
on=['name', 'school', 'year'])
df_merged.head()
# df_merged.to_clipboard()
```
## Investigate merged data
```
df_merged.info()
```
## Data Cleaning:
* Very few 60-yard shuttle records; drop column
* Overwrite blank "Round" column values with '8' (will indicate undrafted)
* Drop kickers, long snappers, QBs and Fullbacks (too few, draft status not driven by stats)
```
# Drop 60yd shuttle (too few data points), duplicative columns related to
# player position, and things like year and team name
merged_cols_to_drop = ['year', 'name', 'school', 'pos',
'60yd shuttle',
'pk(ovr)', 'team', 'position']
try:
df_merged.drop(columns=merged_cols_to_drop, inplace=True)
except:
print('Issue dropping columns')
# overwite blank 'round' values with '8' (will indicate undrafted)
df_merged['round'].fillna(8, inplace=True)
#df_merged.dropna(inplace=True)
print('\n Remaining Columns')
print(df_merged.columns)
positions_to_drop = ['SN', 'K', 'QB', 'FB']
positions_mask = \
df_merged[df_merged['pos_group'].isin(positions_to_drop)].index
print(positions_mask)
print(df_merged.shape)
df_merged.drop(positions_mask, inplace=True)
print(df_merged.head())
```
## Drop all but Cornerbacks
```
df_merged.drop(df_merged[df_merged['pos_group']!='CB'].index, inplace=True)
df_merged['pos_group'].value_counts()
```
### How many players don't have much combine data? Drop them
```
metrics_cols = ['height (in)', 'weight (lbs)', 'hand size (in)', 'arm length (in)',
'40 yard', 'bench press', 'vert leap (in)', 'broad jump (in)',
'shuttle', '3cone']
# See count of records by number of missing metrics values
# 10 total metrics values, drop if they don't have at least 8
print('\n Missing metrics per row')
print(df_merged[metrics_cols].isna().sum(axis=1).value_counts())
df_merged.dropna(axis=0, thresh=7,
subset=metrics_cols, inplace=True)
print('\nRemaining missing metrics by row')
print(df_merged[metrics_cols].isna().sum(axis=1).value_counts())
df_merged.head(10)
df_merged.columns
```
## Impute missing values based on average of players with same position
```
print(df_merged.head(10))
df_merged = clean_data.group_imputer(
df=df_merged,
grouping_col='pos_group',
cols_to_impute=metrics_cols)
print('\n')
print(df_merged.head(10))
```
## Begin Modeling
### Imports for modeling
```
from sklearn.model_selection import train_test_split
# encoders
from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
#metrics
from sklearn.metrics import accuracy_score, precision_score
from sklearn.metrics import classification_report, roc_auc_score, confusion_matrix, plot_confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
#pipeline
from sklearn.pipeline import make_pipeline
# machine learning
from sklearn.linear_model import LogisticRegression, RidgeClassifierCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import RandomizedSearchCV
# Boosted Models
# Use this one if you have an M1 chip.
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
# Permutation Importance
from sklearn.inspection import permutation_importance
# for displaying images and html
from IPython.display import Image
from IPython.core.display import HTML
```
### Split data
```
target = 'round'
X = df_merged.drop(columns=target)
y = df_merged[target]
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=.2,
random_state=42)
print(f'X_train shape: {X_train.shape} X_test shape: {X_test.shape}')
```
## Baseline Accuracy
```
baseline = y.value_counts(normalize=True).max()
baseline
```
## Build Random Forest Model/Pipeline
```
# Base Model
model_rf = make_pipeline(
OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(random_state=42)
)
# Parameter distributions for hyperparameter tuning
# Note double underscores __ in keys below
param_distributions = {
'simpleimputer__strategy': ['mean', 'most_frequent'],
'randomforestclassifier__max_depth': range(3,50,5),
'randomforestclassifier__n_estimators': range(10,2000, 10),
'randomforestclassifier__bootstrap': [True, False],
'randomforestclassifier__warm_start': [True, False]
}
tuned_rf = RandomizedSearchCV(
model_rf,
param_distributions=param_distributions,
n_iter=25,
cv=5,
verbose=1,
n_jobs=8
)
model_rf.fit(X_train, y_train)
tuned_rf.fit(X_train, y_train)
```
### Evaluate Random Forest Model/Pipeline
```
print('Tuned RF training best score: ', tuned_rf.best_score_)
print('Tuned RF best parameters: ', tuned_rf.best_params_)
print('Random Forest Training Accuracy', model_rf.score(X_train, y_train))
print('Random Forest Validation Accuracy', model_rf.score(X_test, y_test))
X_train
#y_pred = tuned_rf.predict()
conf_matrix = confusion_matrix(y_true=y_test, y_pred=tuned_rf.predict(X_test))
display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix)
display.plot()
```
### Get Permutation importances for model
```
importances = model_rf.named_steps['randomforestclassifier'].feature_importances_
gini_imp = pd.DataFrame(data=importances, index=X_test.columns, columns=['gini_impurity']).sort_values(by='gini_impurity')
gini_imp.tail(10).plot(kind='barh');
```
## Build XGBoost Model/Pipeline
```
model_xgb = make_pipeline(
OrdinalEncoder(),
SimpleImputer(),
GradientBoostingClassifier(
loss='deviance',
# learning_rate=0.1,
n_estimators=500,
subsample=1,
max_depth=4,
random_state=42
)
)
model_xgb.fit(X_train, y_train)
```
### Evaluate XGBoost Model/Pipeline
```
print('XGBoost Forest Training Accuracy', model_xgb.score(X_train, y_train))
print('XGBoost Forest Validation Accuracy', model_xgb.score(X_test, y_test))
conf_matrix = confusion_matrix(y_true=y_test, y_pred=model_xgb.predict(X_test))
display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix,
display_labels=model_xgb.classes_)
display.plot()
```
## Build Ridge Regression Model/Pipeline
```
# very similar to RandomForest
model_ridge = make_pipeline(
OneHotEncoder(),
SimpleImputer(),
RidgeClassifierCV(cv=5)
)
model_ridge.fit(X_train, y_train)
```
### Evaluate Ridge Regression Model/Pipeline
```
print('Ridge Regression Training Accuracy', model_ridge.score(X_train, y_train))
print('Ridge Regression Validation Accuracy', model_ridge.score(X_test, y_test))
# model_ridge.named_steps.ridgeclassifiercv.coef_
```
# Module 4 Work
(10/14/2021)
* Make at least 1 partial dependence plot to explain your model.
* Make at least 1 Shapley force plot to explain an individual prediction.
* Share at least 1 visualization (of any type) on Slack!
### imports
| github_jupyter |
# 11.3 Date Ranges, Frequencies, and Shifting(日期范围,频度,和位移)
普通的时间序列通常是不规律的,但我们希望能有一个固定的频度,比如每天,每月,或没15分钟,即使有一些缺失值也没关系。幸运的是,pandas中有一套方法和工具来进行重采样,推断频度,并生成固定频度的日期范围。例如,我们可以把样本时间序列变为固定按日的频度,需要调用resample:
```
import pandas as pd
import numpy as np
from datetime import datetime
dates = [datetime(2011, 1, 2), datetime(2011, 1, 5),
datetime(2011, 1, 7), datetime(2011, 1, 8),
datetime(2011, 1, 10), datetime(2011, 1, 12)]
ts = pd.Series(np.random.randn(6), index=dates)
ts
resampler = ts.resample('D')
```
这里的'D'表示按日的频度(daily frequency)。
关于频度(frequency)和重采样(resampling)的转换,会在11.6进行具体介绍,这里我们展示一些基本的用法。
# 1 Generating Date Ranges(生成日期范围)
之前虽然用过,但没有做解释,其实pandas.date_range是用来生成DatetimeIndex的,使用时要根据频度来指明长度:
```
index = pd.date_range('2012-04-01', '2012-06-01')
index
```
默认,date_range会生成按日频度的时间戳。如果我们只传入一个开始或一个结束时间,还必须传入一个数字来表示时期:
```
pd.date_range(start='2012-04-01', periods=20)
pd.date_range(end='2012-06-01', periods=20)
```
开始和结束的日期,严格指定了用于生成日期索引(date index)的边界。例如,如果我们希望日期索引包含每个月的最后一个工作日,我们要设定频度为'BM'(business end of month,每个月的最后一个工作日,更多频度可以看下面的表格),而且只有在这个日期范围内的日期会被包含进去:
```
pd.date_range('2000-01-01', '2000-12-01', freq='BM')
```
时间序列频度:

date_range会默认保留开始或结束的时间戳:
```
pd.date_range('2012-05-02 12:56:31', periods=5)
```
有些时候我们的时间序列数据带有小时,分,秒这样的信息,但我们想要让这些时间戳全部归一化到午夜(normalized to midnight, 即晚上0点),这个时候要用到normalize选项:
```
nor_date = pd.date_range('2012-05-02 12:56:31', periods=5, normalize=True)
nor_date
nor_date[0]
```
可以看到小时,分,秒全部变为0
# 2 Frequencies and Date Offsets(频度和日期偏移)
pandas中的频度由一个基本频度(base frequency)和一个乘法器(multiplier)组成。基本频度通常用一个字符串别名(string alias)来代表,比如'M'表示月,'H'表示小时。对每一个基本频度,还有一个被称之为日期偏移(date offset)的对象。例如,小时频度能用Hour类来表示:
```
from pandas.tseries.offsets import Hour, Minute
hour = Hour()
hour
```
通过传入一个整数,我们可以定义一个乘以偏移的乘法(a multiple of an offset):
```
four_hours = Hour(4)
four_hours
```
在很多情况下,我们不需要创建这些对象,而是使用字符串别名,比如'H'或'4H'。在频度前加一个整数,就能作为一个乘法器:
```
pd.date_range('2000-01-01', '2000-01-03 23:59', freq='4H')
```
很多偏移(offset)还能和加法结合:
```
Hour(2) + Minute(30)
```
同样的,我们可以传入频度字符串,比如'1h30min',这种表达也能被解析:
```
pd.date_range('2000-01-01', periods=10, freq='1h30min')
```
### Week of month dates(月中的第几周日期)
一个有用的类(class)是月中的第几周(Week of month),用WOM表示。丽日我们想得到每个月的第三个星期五:
```
rng = pd.date_range('2012-01-01', '2012-09-01', freq='WOM-3FRI')
rng
list(rng)
```
# 3 Shifting (Leading and Lagging) Data (偏移(提前与推后)数据)
偏移(shifting)表示按照时间把数据向前或向后推移。Series和DataFrame都有一个shift方法实现偏移,索引(index)不会被更改:
```
ts = pd.Series(np.random.randn(4),
index=pd.date_range('1/1/2000', periods=4, freq='M'))
ts
ts.shift(2)
ts.shift(-2)
```
当我们进行位移的时候,就像上面这样会引入缺失值。
shift的一个普通的用法是计算时间序列的百分比变化,可以表示为:
```
ts / ts.shift(1) - 1
```
因为普通的shift不会对index进行修改,一些数据会被丢弃。因此如果频度是已知的,可以把频度传递给shift,这样的话时间戳会自动变化:
```
ts
ts.shift(2)
ts.shift(2, freq='M')
```
其他一些频度也可以导入,能让我们前后移动数据:
```
ts.shift(3, freq='D')
ts.shift(1, freq='90T')
```
T表示分钟。
### Shifting dates with offsets(用偏移量来移动日期)
pandas的日期偏移(date offset)能被用于datetime或Timestamp对象:
```
from pandas.tseries.offsets import Day, MonthEnd
now = datetime(2011, 11, 17)
now + 3 * Day()
```
如果我们添加一个像MonthEnd这样的anchored offset(依附偏移;锚点位置),日期会根据频度规则进行递增:
```
now + MonthEnd()
now + MonthEnd(2)
```
依附偏移可以让日期向前或向后滚动,利用rollforward和rollback方法:
```
offset = MonthEnd()
offset.rollforward(now)
offset.rollback(now)
```
一个比较创造性的日期偏移(date offset)用法是配合groupby一起用:
```
ts = pd.Series(np.random.randn(20),
index=pd.date_range('1/15/2000', periods=20, freq='4d'))
ts
ts.groupby(offset.rollforward).mean()
```
一个简单且快捷的方式是用resample(11.6会进行更详细的介绍):
```
ts.resample('M').mean()
```
| github_jupyter |
```
import matplotlib
from matplotlib.pylab import *
%matplotlib inline
matplotlib.rcParams['font.size'] = 16
import json
repos = []
with open('data/repos_with_annotation_infos.json') as input_file:
for line in input_file:
repos.append(json.loads(line))
repos = repos[1:]
import math
N = math.ceil(sqrt(len(repos)))
import numpy as np
all_annotations = np.zeros((N, N)) + 1
inline_annotations = np.zeros((N, N)) + 1
comment_annotations = np.zeros((N, N)) + 1
pyi_annotations = np.zeros((N, N)) + 1
for i, repo in enumerate(repos):
all_annotations[i%N,i//N] = 1 + sum([file['annotation_hints']+file['return_hints']+file['type_comments']+file['typing_imports']+(1 if file['pyi_file'] else 0) for file in repo['files']])
inline_annotations[i%N,i//N] = 1 + sum([file['annotation_hints']+file['return_hints'] for file in repo['files']])
comment_annotations[i%N,i//N] = 1 + sum([file['type_comments'] for file in repo['files']])
pyi_annotations[i%N,i//N] = 1 + sum([(1 if file['pyi_file'] else 0) for file in repo['files']])
print("All annotations:",np.sum(all_annotations > 1))
print("Inline annotations:",np.sum(inline_annotations > 1))
print("Comment annotations:",np.sum(comment_annotations > 1))
print("Pyi annotations:",np.sum(pyi_annotations > 1))
print("All annotations:",np.sum(all_annotations > 11))
print("Inline annotations:",np.sum(inline_annotations > 11))
print("Comment annotations:",np.sum(comment_annotations > 11))
print("Pyi annotations:",np.sum(pyi_annotations > 11))
print("All annotations:",np.sum(all_annotations > 101))
print("Inline annotations:",np.sum(inline_annotations > 101))
print("Comment annotations:",np.sum(comment_annotations > 101))
print("Pyi annotations:",np.sum(pyi_annotations > 101))
from matplotlib.colors import LogNorm
from matplotlib.ticker import LogFormatterMathtext
figure(figsize=(14,14))
ax = gca()
imshow(pyi_annotations, interpolation=None, aspect='equal', norm=LogNorm())
# Major ticks
ax.set_xticks(np.arange(0, N, 1));
ax.set_yticks(np.arange(0, N, 1));
# Labels for major ticks
ax.set_xticklabels([]);
ax.set_yticklabels([]);
# Minor ticks
ax.set_xticks(np.arange(-.5, N, 1), minor=True);
ax.set_yticks(np.arange(-.5, N, 1), minor=True);
ax.grid(which='minor', color='black', linestyle='-', linewidth=2)
colorbar(orientation='horizontal',format=LogFormatterMathtext(), fraction=0.0465, pad=0.03)
title("Pyi files")
repos_by_count = {}
for repo in repos:
repos_by_count[repo['repo']['full_name']] = sum([file['annotation_hints']+file['return_hints']+file['type_comments']+file['typing_imports']+(1 if file['pyi_file'] else 0) for file in repo['files']])
sorted_repos = sorted(repos_by_count.items(), key = lambda x:-x[1])
print("\n".join(["{}: {}".format(x[0],x[1]) for x in sorted_repos[:20]]))
figure(figsize=(18,4))
N = 20
x = list(range(N))
bar(x, [repo[1] for repo in sorted_repos[:N]])
ax = gca()
ax.set_xticks(x)
ax.set_xticklabels([repo[0] for repo in sorted_repos[:N]], rotation=90);
xlim(left=-0.5, right=N-0.5);
title("Python projects with the most type hints")
sorted_repos[:20]
```
| github_jupyter |
# DSE Course 1, Session 4: Visualization
**Instructor**: Wesley Beckner
**Contact**: wesleybeckner@gmail.com
<br>
---
<br>
In this session we'll be discussing visualization strategies. And, more specifically, how we can manipulate our `pandas dataframes` to give us the visualizations we desire. Before we get there, however, we're going to start by introducing a python module called Matplotlib.
<br>
---
## 4.1 Visualization with Matplotlib
Lets start by importing our `matplotlib` module.
Pyplot is a module of Matplotlib that provides functions to add plot elements like text, lines, and images. typically we import this module like so
```
import matplotlib.pyplot as plt
```
where `plt` is shorthand for the `matplotlib.pyplot` library
```
import matplotlib.pyplot as plt
```
### 4.1.1 The Basics
Matplotlib is strongly object oriented and its principal objects are the **_figure_** and the **_axes_**. But before we get into that I want us to explore the _most basic_ use case. In this basic use case, we don't declare the `figure` and `axes` objects explicitly, but rather work directly in the `pyplot` namespace.
I'm going to create a list of x and y values and plot them with `pyplot`
```
x = [1,2,3,4,5,6,7]
y = [1,1,2,3,5,8,13]
plt.plot(x,y)
```
```
x = [1,2,3,4,5,6,7]
y = [1,1,2,3,5,8,13]
plt.plot(x, y)
```
We can label the axes of our figure using the `xlabel` and `ylabel` attributes, and label our title using the `title` attribute.
```
plt.plot([1,2,3,4,5,6,7],[1,1,2,3,5,8,13])
plt.title('The Fibonacci Sequence')
plt.xlabel('Order in Sequence')
plt.ylabel('Value')
```
```
plt.plot([1,2,3,4,5,6,7],[1,1,2,3,5,8,13])
plt.title('The Fibonacci Sequence')
plt.xlabel('Order in Sequence')
plt.ylabel('Value')
```
> What do we think about the out-of-the-box formatting of `pyplot`? What are some things we can do to make it better? Could we make it bigger? Perhaps different dimensions? Does anyone recognize that default line color?
Before we make any changes, let's become acquianted with the more appropriate way to work in `matplotlib.pyplot`. In this formality, we explicitly create our `figure` and `axes` objects.
```
# This is one way to do it...
# plt.plot([1,2,3,4,5,6,7],[1,1,2,3,5,8,13])
# but we can also do it this way, concretely declaring the figure, axes
# objects directly
# We declare the figure
fig = plt.figure()
# and then add axes
ax = fig.add_subplot(111)
```
You can think of the `figure` as a canvas, where you specify dimensions and possibly unifying attributes of its contents, like, background color, border, etc. You use the canvas, the `figure`, to containerize your other objects, primarily your `axes`, and to save its contents with `savefig`.
You can think of an `axes` as the actual graphs or plots themselves. And when we declare these objects, we have access to all the methods of `matplotlib.pyplot` (e.g. `.plot`, `.scatter`, `.hist` etc.) You can place many of these `axes` into the `figure` container in a variety of ways.
The last component of a `pyplot` figure are the `axis`, the graphical axis we typically think of.
```
# This is one way to do it...
# plt.plot([1,2,3,4,5,6,7],[1,1,2,3,5,8,13])
# but we can also do it this way, concretely declaring the figure, axes
# objects directly
# We declare the figure
fig = plt.figure()
# and then add axes
ax = fig.add_subplot(111)
```
`plt.subplots` returns a `figure` and `axes` object(s) together:
```
### We can also do it in 1 line
fig, ax = plt.subplots(1, 1, figsize=(8,8))
```
and we'll go ahead and adjust the figure size with the parameter `figsize` and set it equal to a tuple containing the `x` and `y` dimensions of the figure in inches.
```
### We can also do it in 1 line
fig, ax = plt.subplots(1, 1, figsize=(10,5))
```
To recap, by convention we typically separate our plots into three components: a Figure, its Axes, and their Axis:
* **_Figure_**: It is a whole `figure` which may contain one or more than one `axes` (plots). You can think of a `figure` as a canvas which contains plots.
* **_Axes_**: It is what we generally think of as a plot. A `figure` can contain many `axes`. It contains two or three (in the case of 3D) `axis` objects. Each `axes` has a title, an x-label and a y-label.
* **_Axis_**: They are the traditional `axis` we think of in a graph and take care of generating the graph limits.
<br>
**Example:**
> `fig, ax = plt.subplots(1, 1, figsize=(8,8))` is creating the figure (`fig`) and axes (`ax`) explicitly, and depending on whether we create 2D or 3D plots, the axes will contain 2-3 `axis`.
#### Exercise 1: Adjust Figure Size
<ol type="a">
<li>create a <code>figure</code> and <code>axes</code> using <code>plt.subplots()</code>. adjust the figure size to be 6 inches (width) by 3 inches (height). Plot the values of the fibonacci sequence we defined earlier</li>
<li>(Bonus) Repeat, this time inverting the y-values using list splicing</li>
<li>(Bonus) Explore other <code>plt.plot()</code> attributes using the built in Colab tooltip</li>
</ol>
Plotting building blocks for Exercise 1:
* `plt.subplots()`
* `ax.plot()`
* slicing `[::]`
```
x = [1,2,3,4,5,6,7]
y = [1,1,2,3,5,8,13]
```
```
# Cell for Exercise 1
x = [1,2,3,4,5,6,7]
y = [1,1,2,3,5,8,13]
```
### 4.1.2 Manipulating Plot Attributes
We can manipulate many parameters of a `figure`'s `axes`: `marker`, `linestyle`, and `color`, to name a few. Each of these parameters takes string values.
```
fig, ax = plt.subplots(1,1, figsize=(5,5))
ax.plot([1,2,3,4,5,6,7],[1,1,2,3,5,8,13], marker='^', linestyle='--',
color='tab:blue')
ax.plot([1,2,3,4,5,6,7],[0,3,8,6,5,4,1], marker='.', linestyle='-.',
color='#59A41F')
ax.set_title('My Random Values')
ax.set_xlabel('Order in Sequence')
ax.set_ylabel('Value')
```
List of marker styles
```
{'': 'nothing',
' ': 'nothing',
'*': 'star',
'+': 'plus',
',': 'pixel',
'.': 'point',
0: 'tickleft',
'1': 'tri_down',
1: 'tickright',
10: 'caretupbase',
11: 'caretdownbase',
'2': 'tri_up',
2: 'tickup',
'3': 'tri_left',
3: 'tickdown',
'4': 'tri_right',
4: 'caretleft',
5: 'caretright',
6: 'caretup',
7: 'caretdown',
'8': 'octagon',
8: 'caretleftbase',
9: 'caretrightbase',
'<': 'triangle_left',
'>': 'triangle_right',
'D': 'diamond',
'H': 'hexagon2',
'None': 'nothing',
None: 'nothing',
'P': 'plus_filled',
'X': 'x_filled',
'^': 'triangle_up',
'_': 'hline',
'd': 'thin_diamond',
'h': 'hexagon1',
'o': 'circle',
'p': 'pentagon',
's': 'square',
'v': 'triangle_down',
'x': 'x',
'|': 'vline'}
```
List of line styles
```
{'': '_draw_nothing',
' ': '_draw_nothing',
'-': '_draw_solid',
'--': '_draw_dashed',
'-.': '_draw_dash_dot',
':': '_draw_dotted',
'None': '_draw_nothing'}
```
List of base colors
```
{'b': (0, 0, 1),
'c': (0, 0.75, 0.75),
'g': (0, 0.5, 0),
'k': (0, 0, 0),
'm': (0.75, 0, 0.75),
'r': (1, 0, 0),
'w': (1, 1, 1),
'y': (0.75, 0.75, 0)}
```
list access
```
import matplotlib as mp
mp.markers.MarkerStyle.markers
mp.lines.lineStyles
mp.colors.BASE_COLORS
```
Taking these long lists of available parameters, I'm going to play around with a few and see how they appear in our plot.
```
import matplotlib as mp
mp.markers.MarkerStyle.markers
# mp.lines.lineStyles
# mp.colors.BASE_COLORS
plt.plot([1,2,3], [4,5,6])
plt.title('title')
ax.title
fig, ax = plt.subplots(1,1, figsize=(5,5))
ax.plot([1,2,3,4,5,6,7],[1,1,2,3,5,8,13],
marker='s',
linestyle=':',
color='tab:blue')
ax.plot([1,2,3,4,5,6,7],[0,3,8,6,5,4,1], marker='.',
linestyle='-.', color='#3E1515')
ax.set_title('My Random Values')
ax.set_xlabel('Order in Sequence')
ax.set_ylabel('Value')
```
If we want to make a *scatter plot* without any lines at all, we set the `linestyle` to an empty string
```
fig, ax = plt.subplots(1,1, figsize=(5,5))
plt.plot([1,2,3,4,5,6,7],[1,1,2,3,5,8,13], marker='*', linestyle='', color='tab:green')
ax.set_title('The Fibonacci Sequence')
ax.set_xlabel('Order in Sequence')
ax.set_ylabel('Value')
```
```
fig, ax = plt.subplots(1,1, figsize=(5,5))
plt.plot([1,2,3,4,5,6,7],[1,1,2,3,5,8,13], marker='h', linestyle='', ms=10,
color='tab:green')
ax.set_title('The Fibonacci Sequence')
ax.set_xlabel('Order in Sequence')
ax.set_ylabel('Value')
```
#### Exercise 2: Choose Lines, Colors, and Markers
<ol type="a">
<li>Recreate the "My Random Values" plot with a variety of markers, linestyles, and colors.</li>
<li>(Bonus) Can you set the markers and lines to colors belonging to the Tableau 20? Try it with and without the hex values!</li>
</ol>
Plotting building blocks for Exercise 2:
* `marker=''`
* `linestyle=''`
* `color=''`
<img src="https://jrnold.github.io/ggthemes/reference/tableau_color_pal-2.png" width=800>
```
# Cell for Exercise 2
### DO NOT CHANGE BELOW ###
x = [1,2,3,4,5,6,7]
y1 = [1,1,2,3,5,8,13]
y2 = [0,3,8,6,5,4,1]
y3 = [10,15,12,9,3,2,1]
y4 = [2,4,2,1,2,4,5]
fig, ax = plt.subplots(1,1, figsize=(5,5))
ax.set_title('My Random Values')
ax.set_xlabel('Order in Sequence')
ax.set_ylabel('Value')
### END OF DO NOT CHANGE ###
### change these lines w/ marker, linestyle, color attributes
ax.plot(x,y1)
ax.plot(x,y2)
ax.plot(x,y3)
ax.plot(x,y4)
```
```
# Cell for Exercise 2
### DO NOT CHANGE BELOW ###
x = [1,2,3,4,5,6,7]
y1 = [1,1,2,3,5,8,13]
y2 = [0,3,8,6,5,4,1]
y3 = [10,15,12,9,3,2,1]
y4 = [2,4,2,1,2,4,5]
fig, ax = plt.subplots(1,1, figsize=(5,5))
ax.set_title('My Random Values')
ax.set_xlabel('Order in Sequence')
ax.set_ylabel('Value')
### END OF DO NOT CHANGE ###
### change these lines w/ marker, linestyle, color attributes
ax.plot(x,y1)
ax.plot(x,y2)
ax.plot(x,y3)
ax.plot(x,y4)
```
### 4.1.3 Subplots
Remember that `fig, ax = plt.subplots()` satement we used earlier? We're now going to use that same approach but this time, the second variable that is returned (what we call `ax` in the cell bellow) is no longer an `axes` object! Instead, it is an `array` of `axes` objects.
I'm also going to introduce another module, **_random_**, to generate some random values
```
import random
fig, ax = plt.subplots(2, 2, figsize=(10,10))
ax[0,1].plot(range(10), [random.random() for i in range(10)],
c='tab:orange')
ax[1,0].plot(range(10), [random.random() for i in range(10)],
c='tab:green')
ax[1,1].plot(range(10), [random.random() for i in range(10)],
c='tab:red')
ax[0,0].plot(range(10), [random.random() for i in range(10)],
c='tab:blue')
```
**_quick note:_** In the above cell we use something called **_list comprehension_** to quickly populate a list of objects (in this case those objects are floats). We won't dive too deeply into that now, but you can think of list comprehension as a more concise way of writing a `for()` loop. In future cases where list comprehension appears in this notebook I will include code snipets of the corresponding for loop.
```
import random
# this list comprehension
print([random.random() for i in range(10)])
# produces the same output as this for loop
ls = []
for i in range(10):
ls.append(random.random())
print(ls)
```
```
import random
random.seed(42)
# this list comprehension
print([random.random() for i in range(10)])
random.seed(42)
# produces the same output as this for loop
ls = []
for i in range(10):
ls.append(random.random())
print(ls)
```
The second thing we'll need to talk about is the grid of the ax object
```
fig, ax = plt.subplots(2,2)
ax.shape
ax
```
This is exactly like accessing a matrix:
`matrix[row,column] = element`
we have the pandas equivalent:
`df.iloc[0,1] = element`
```
import random
fig, ax = plt.subplots(2, 2, figsize=(10,10))
ax[0,0].plot(range(10), [random.random() for i in range(10)],
c='tab:blue')
ax[0,1].plot(range(10), [random.random() for i in range(10)],
c='tab:orange')
ax[1,0].plot(range(10), [random.random() for i in range(10)],
c='tab:green')
ax[1,1].plot(range(10), [random.random() for i in range(10)],
c='tab:red')
```
#### Exercise 3: Subplots
<ol type="a">
<li>Create a 2x1 <code>figure</code> where the first <code>axes</code> is a plot of the fibonacci sequence up to the 10th sequence value and the second <code>axes</code> is a plot of 10 random integers with values between 10 and 20 (exclusive). Use different markers, colors, and lines for each plot.</li>
<li>Since the focus of this tutorial is on visualization, I'll go ahead and provide my own code for generating random integers between 10 and 20 (exclusive). If you have extra time, prove to yourself that this code works!</li>
<li>(remember docstrings are your friend!)</li>
</ol>
```
import random
[round(random.random() * 8) + 11 for i in range(10)]
```
---
```
# Cell for Exercise 3
### DO NOT CHANGE ###
import random
# create the fig, ax objects
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
# generate x, y1, and y2
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y1 = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
y2 = [round(random.random() * 8) + 11 for i in range(10)]
### END OF DO NOT CHANGE ###
# Note: no skeleton code here is given for the figure, I want you to write this
# code out yourself. Here is pseudo-code to get you started:
# plot the left axes, set the title and axes labels
# title: Fibonacci Sequence; xlabel: x values; ylabel: y values
### YOUR CODE HERE ###
# plot the right axes, set the title and axes labels
# title: My Random Values; xlabel: x values; ylabel: y values
### YOUR CODE HERE ###
```
```
# Cell for Exercise 3
### DO NOT CHANGE ###
import random
# create the fig, ax objects
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
# generate x, y1, and y2
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y1 = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
y2 = [round(random.random() * 8) + 11 for i in range(10)]
### END OF DO NOT CHANGE ###
# Note: no skeleton code here is given for the figure, I want you to write this
# code out yourself. Here is pseudo-code to get you started:
# plot the left axes, set the title and axes labels
# title: Fibonacci Sequence; xlabel: x values; ylabel: y values
### YOUR CODE HERE ###
# plot the right axes, set the title and axes labels
# title: My Random Values; xlabel: x values; ylabel: y values
### YOUR CODE HERE ###
```
## 4.2 Visualization with Pandas
Now lets discover the power of `pandas` plots! While the objectives of the exercizes may be to make certain visualizations, throughout our experience we'll be using `pandas` tricks to create the data splices we need, so in the following is a mix of new plotting stuff, with pandas data selection/splicing stuff.
We're also going to import a new module called `seaborn`. It is another plotting library based off `matplotlib`. We're going to use it to pull some stylistic features.
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_boston
```
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from ipywidgets import interact
```
The following few cells should look familiar from last tutorial session, we're going to use some essential `pandas` methods to get a general sense of what our dataset looks like
> There are many ways to construct a dataframe, as an exercise, you might think of otherways to perform that task here.
```
df = pd.read_csv("https://raw.githubusercontent.com/wesleybeckner/ds_for_engineers/main/data/wine_quality/winequalityN.csv")
df.describe()
```
```
# In your subsequent time with pandas you'll discover that there are a host of
# ways to populate a dataframe. In the following, I can create a dataframe
# simply by using read_csv because the data is formated in a way that
# pandas can easily intuit.
df = pd.read_csv("https://raw.githubusercontent.com/wesleybeckner/"\
"ds_for_engineers/main/data/wine_quality/winequalityN.csv")
# we check the shape of our data to see if its as we expect
df.shape
# we check the column names
df.columns
```
Lets start by looking at basic description of our data. This gives us a sense of what visualizations we can employ to begin understanding our dataset.
```
df.describe()
```
The first thing we notice is that all the data is numerical that we can pull standard statistical information from (mean, std, max, etc.)
What kind of visualizations do you think of with data like this?
I tend to think of scatter, box, and histogram plots for numerical data and bar or pie charts for categorical data.
### 4.2.1 Scatter Plots
The way to generate a plot in the fewest keystrokes is to simply call the `plot()` method within the `dataframe` object
```
df.plot()
```
```
# the simplest plot we can make is the following so let's start here.
# We can generate a figure simply by using the plot() method of our dataframe
# object.
df.plot()
```
This gives us a raw view of the data, but here I'd like to introduce some standard plotting steps: recall the `fig`, `ax` format we used previously.
```
fig, ax = plt.subplots(1, 1, figsize = (10, 5))
df.plot(ax=ax)
```
```
fig, ax = plt.subplots(1, 1, figsize = (10, 5))
df.plot(ax=ax)
```
To make this into a scatter plot, we set the linestyle (or ls) to an empty string, and select a marker type.
```
fig, ax = plt.subplots(1, 1, figsize = (10, 5))
df.plot(ax=ax, linestyle='', marker='.')
```
```
fig, ax = plt.subplots(1, 1, figsize = (10, 5))
df.plot(ax=ax, ls='', marker='.', ms=2)
```
#### Exercise 4: Scatter Plots with Pandas
We have some values that range in the single digits and some that are 10+. Make two separate scatter plots within the same figure, stacked on top of each other:
* Top plot: scatter of all feature data that ranges from 0-10
* Bottom plot: scatter of all feature data 10+ range
Logic building blocks for Exercise 4:
* `df.columns`
* `all()`
* `any()`
* `for`
* `if`
* `in`
* `>`, `>=`
In the following code block, use these two sets of column names to create your figure:
```
low_features = [col for col in df.columns if all(df[col] < 10)]
high_features = [col for col in df.columns if any(df[col] >= 10)]
```
```
# Cell for Exercise 4
low_features = [col for col in df.select_dtypes('number').columns if all(df[col] < 10)]
high_features = [col for col in df.select_dtypes('number').columns if any(df[col] >= 10)]
### YOUR CODE HERE ###
```
### 4.2.2 Bar Plots
One of the more common methods of depicting aggregate data is bar plots. We almost always see these kinds of plots used to display and compare between averages, but sometimes between singular data values as well.
```
fig, ax = plt.subplots(1, 1, figsize=(5,5))
df.apply(pd.Series.mean).plot(kind='barh', ax=ax)
```
```
fig, ax = plt.subplots(1, 1, figsize=(5,5))
df.select_dtypes('number').apply(pd.Series.mean).plot(kind='barh', ax=ax)
fig, ax = plt.subplots(1, 1, figsize=(5,5))
df.groupby('type').apply(pd.Series.mean).T.plot(kind='barh', ax=ax)
```
### 4.2.3 Box Plots
Maybe we thought it was usefull to see the feature data in the scatter plots ( we can visually scan for correlations between feature sets, check outliers, etc.) but perhaps more instructive, is a boxplot. A box plot or boxplot is a statistical method for graphically depicting aggregate data through their quartiles. It will be useful to inspect the [boxplot API](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html) to see the default behavior for representing the quartiles and outliers.
```
fig, ax = plt.subplots(1, 1, figsize = (10, 5))
df.plot(kind='box', ax=ax)
```
```
# we call the boxplot method by setting the kind parameter to 'box' within
# dataframe.plot()
fig, ax = plt.subplots(1, 1, figsize = (10, 5))
df.plot(kind='box', ax=ax, rot=90)
```
### 4.2.4 Histograms
What are some other kinds of plots we can make? A good one to be aware of is the histogram.
```
plt.title('Demo Hist')
plt.xlabel('Random Data')
plt.ylabel('Frequency')
plt.hist(df['RAD'])
```
```
plt.title('Demo Hist')
plt.xlabel('Quality')
plt.ylabel('Frequency')
plt.hist(df['density'])
df['quality'].plot(kind='hist')
```
### 4.2.5 Kernel Density Estimates
Another useful plot type for data analysis is the kernel density estimate. You can think of this plot as exactly like a histogram, except instead of creating bins in which to accrue datapoints, you deposit a gaussian distribution around every datapoint in your dataset. By this mechanism, you avoid creating bias in your data summary as you otherwise would be when predifining bin sizes and locations in a histogram.
```
fig, ax = plt.subplots(1, 1, figsize = (10, 5))
df['quality'].plot(kind='kde', ax=ax)
```
#### 4.2.5.1 Skew and Tailedness
While we're on the topic of KDEs/histograms and other statistical plots, this is a convenient time to talk about skew and tailedness or, otherwise known as kurtosis
* `df.skew()` indicates the skewdness of the data
* `df.kurtosis()` indicates the tailedness of the data
```
# from scipy.stats import skewnorm
from ipywidgets import FloatSlider
slider = FloatSlider(
value=0.5,
min=0.5,
max=5,
step=0.5,
description='Shape:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f'
)
import numpy as np
def inspect_kurt_skew(a=slider):
# theres another hyperparameter, lambda for the center
# of weibull. the shape parameter is sometimes called
# a, or k
fig, ax = plt.subplots(1, 1, figsize=(5,5))
s = np.random.weibull(a, 1000)
statdf = pd.DataFrame(s)
statdf[0].plot(kind='kde', ax=ax)
print("skew: {:.2f}".format(statdf[0].skew()))
print("kurtosis: {:.2f}".format(statdf[0].kurt()))
interact(inspect_kurt_skew)
```
### 4.2.6 Correlation Plots
Often, we'll want to quantify the strength of a relationship between input variables. We can do this by calculating correlations.
We won't go into great detail here about how Pearson's correlation is calculated, but the StatQuest videos on this subject are here for reference (and are really good... if you can stomach Starmer's
humor)
* [StatQuest: Correlation](https://youtu.be/xZ_z8KWkhXE?t=822)
* [StatQuest: Covariance](https://www.youtube.com/watch?v=qtaqvPAeEJY&ab_channel=StatQuestwithJoshStarmer)
The main takeaway is that pearson's correlation ranges from -1 to 1 and indicates how positively or negatively correlated the variables in question are. For our purposes, this can give insight into what variables will be important in our machine learning model.
We can get the [pearson's correlation](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) between all the input features using the `dataframe.corr()` method.

Fig: pearson's correlation value and corresponding scatter plot of feature-x and feature-y
```
df.corr()
```
So we have this raw table of pearsons correlations between each of our input features, how do we and how should we turn this into a plot?
Typically we'd use a heat map on an feature vs feature grid to view this kind of data. In the following I'm going to use some `numpy` methods you may not have seen before. Links to the documentation for these methods are at the end of the notebook.
```
import numpy as np
fig, ax = plt.subplots(1, 1, figsize = (10,10))
# create a mask to white-out the upper triangle
mask = np.triu(np.ones_like(df.corr(), dtype=bool))
# we'll want a divergent colormap for this so our eye
# is not attracted to the values close to 0
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(df.corr(), mask=mask, cmap=cmap, ax=ax)
```
```
sns.heatmap(df.corr())
import numpy as np
fig, ax = plt.subplots(1, 1, figsize = (10,10))
# create a mask to white-out the upper triangle
mask = np.triu(np.ones_like(df.corr(), dtype=bool))
# we'll want a divergent colormap for this so our eye
# is not attracted to the values close to 0
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(df.corr(), mask=mask, cmap=cmap, ax=ax)
```
What do we notice?
density and alcohol content are negatively correlated, no surprise there. What about the quality field? If we were to try to predict quality, what other chemical features would we pay close attention to?
Alcohol content has a strong positive correlation with quality; chlorides and volatile acidity a somewhat positive correlation. Let's take a look at alcohol vs quality
```
# what can we conclude about this data view? Outliers?
fig, ax = plt.subplots(1,1,figsize=(7,7))
ax.plot(df['alcohol'], df['quality'], ls='', marker='.')
ax.set_title('alcohol vs quality')
ax.set_xlabel('% Alcohol')
ax.set_ylabel('quality')
```
What do we notice about this? The quality is binned, so it's really hard to see any trend here! A boxplot would be better!
```
fig, ax = plt.subplots(1,1,figsize=(10,5))
df.boxplot(column='alcohol', by='quality', ax=ax)
```
We can continue to change our code by hand to iterate through whatever pairwise combinations of input variables we are interested in... but there is a better way to do this! Introducing... IpyWidgets!
## 4.3 Visualization with IpyWidgets
### 4.3.1 Interact
Here we're going to introduce a very basic use case of IPython's widgets using `interact`. The `interact` method (`ipywidgets.interact`) automatically creates user interface (UI) controls for exploring code and data interactively. It is the easiest way to get started using IPython’s widgets.
```
from ipywidgets import interact
def my_plot(col=df.select_dtypes('number').columns):
fig, ax = plt.subplots(1,1,figsize=(10,5))
df.boxplot(column=col, by='quality', ax=ax)
```
```
def my_plot(col=df.select_dtypes('number').columns):
fig, ax = plt.subplots(1,1,figsize=(10,5))
df.boxplot(column=col, by='quality', ax=ax)
```
After defining our function that returns our plot, and defining input parameters for the fields we would like to interact with, we call our function with `interact`
```
interact(my_plot)
```
```
interact(my_plot)
```
Let's break this down. Normally, I would just set my y-variable to a value, so that when I call my function, my figure is generated with the corresponding data field:
```
def my_plot(col='alcohol'):
fig, ax = plt.subplots(1,1,figsize=(10,5))
df.boxplot(column=col, by='quality', ax=ax)
my_plot()
```
Instead, we want to give `interact()` a list of values for the user to select from, this is the difference between a regular function, and one we might feed into interact.
```
y = ['fixed acidity',
'volatile acidity',
'citric acid',
'residual sugar',
'chlorides',
'free sulfur dioxide',
'total sulfur dioxide',
'density',
'pH',
'sulphates',
'alcohol',
'quality']
```
```
list(df.select_dtypes('number').columns)
```
#### Exercise 5: IpyWidgets and Figures in Functions
In the previous section we created a single dropdown menu to select our y variable for our plot. Here, we would like to expand this functionality to the x variable.
* Return a figure from your function and update the xlabel, ylabel, and title of your plot based on the input fields (x and y)
* add a docstring to your function
* checkout sklearn for inspiration on [docstrings](https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/linear_model/_logistic.py)
* Bonus: what are some other input parameters than we can provide our figure function?
Building blocks for Exercise 5:
* `ax.set_title()`
* `ax.set_xlabel()`
* `ax.set_ylabel()`
```
# Code block for Exercise 5
from ipywidgets import interact
def my_plot(col=df.select_dtypes('number').columns,
by=df.select_dtypes('number').columns[::-1]):
'''
An awesome plot. Long description. a general description.
Parameters
----------
x: dtype
description of x
y: dtype
description of y
Returns
-------
fig: dtype
a description of it
'''
### DO NOT CHANGE BELOW ###
fig, ax = plt.subplots(1,1,figsize=(10,5))
if by == 'quality':
df.boxplot(column=col, by=by, ax=ax)
else:
df.plot(x=col, y=by, ls='', marker='.', ax=ax)
### END OF DO NOT CHANGE ###
### YOUR CODE HERE ###
```
```
# Code block for Exercise 5
from ipywidgets import interact
def my_plot(col=df.select_dtypes('number').columns,
by=df.select_dtypes('number').columns[::-1]):
'''
An awesome plot. Long description. a general description.
Parameters
----------
x: list
default value is the first item in the list. some stuff
y: list
description of y
Returns
-------
fig: matplotlib figure
a description of it
'''
### DO NOT CHANGE BELOW ###
fig, ax = plt.subplots(1,1,figsize=(10,5))
if by == 'quality':
df.boxplot(column=col, by=by, ax=ax)
else:
df.plot(x=col, y=by, ls='', marker='.', ax=ax)
### END OF DO NOT CHANGE ###
### YOUR CODE HERE ###
interact(my_plot)
```
## 4.4 Lab for Visualization
> **_If you would like a more difficult breakout activity, skip forward to the advanced users cell_**
We're going to continue on this idea of interactive ploting with IpyWidgets. The objective is to provide our users with a little more intuitive interface than what we've covered so far. Rather than selecting ad hoc through x, y values, the user will select from a list of x, y pairs that are sorted by the absolute value of their pearson's correlation. For example, the first five selection options will be:
* free sulfur dioxide, total sulfur dioxide, 0.721
* density, alcohol, -0.687
* residual sugar, density, 0.552
* residual sugar, total sulfur dioxide, 0.496
* fixed acidity, density, 0.459
When the dropdown is selected, it should update the plotted data, along with the x and y axes labels and plot title.
Example output:
<img src="https://raw.githubusercontent.com/wesleybeckner/ds_for_engineers/main/assets/C1_S4_breakout_solution.JPG" width=500>
**Bonus**
If you finish the lab, format your output so that on the left side of your image contains the raw feature 1 vs feature 2 plot and the right side contains two sublots of the KDEs of the data.
To get you started, I've created the DataFrame needed to make the dropdown menu.
```
# create a list of lists. The sublist contains feature 1 v feature 2 and their
# pearsons correlation. Think of this: you would like to loop through df.corr()
# and store those values in a [[feature 1, feature 2, corr],...[...]] list
corrs = []
for i in range(df.corr().shape[0]):
for j in range(i+1,df.corr().shape[0]):
corrs.append([df.corr().index[i],df.corr().columns[j],df.corr().iloc[i,j]])
# Now create a new dataframe from this list of lists
# sort this dataframe by the absolute value of the pearsons correlation.
# you will need to reset the index by these sorted values.
dff = pd.DataFrame(corrs, columns=['Feature 1', 'Feature 2', 'Pearsons'])
dff = dff.reindex(dff.Pearsons.abs().sort_values(ascending=False).index)
dff = dff.reset_index(drop=True)
# turn the rows of the new dataframe into list items for the drop down menu
# i.e. create a nicely formatted list of labels
ls = []
for index in dff.index:
txt = ''
for item in dff.iloc[index].values:
if type(item) != str:
txt += '{:.3f}'.format(item)
else:
txt += str(item) + ', '
ls.append(txt)
# set this list as the index of the dataframe to appear in the dropdown menu
dff.index = ls
```
Use this DataFrame to create the dropdown menu for your figure. Good luck!
```
dff.head()
# Code block for breakout
### YOUR CODE ###
def my_plot():
# instead of x and y fields (like exercise 5), parameterize with a x vs y
# field, sorted by abs(pearsons) from the provided DataFrame, dff.
# set the values x, and y based on the dropdown selection
# x =
# y =
corr = df.corr()
pearson = corr[x][y]
fig, ax = plt.subplots(1,1,figsize=(7,7))
ax.plot(df[x], df[y], ls='', marker='.')
ax.set_title('{} vs {} ({:.2f} corr)'.format(x, y, pearson))
ax.set_xlabel('{}'.format(x))
ax.set_ylabel('{}'.format(y))
from ipywidgets import SelectMultiple
wine_types = SelectMultiple(
options=['red', 'white'],
value=['red', 'white']
)
def my_plot(row=dff.index, types=wine_types):
# instead of x and y fields, parameterize with a x vs y field.
# fill the field with the top 10 highest magniuted pearson correlations
# and sort them
x = dff.loc[row]['Feature 1']
y = dff.loc[row]['Feature 2']
corr = df.corr()
pearson = corr[x][y]
fig, ax = plt.subplots(1,1,figsize=(7,7))
ax.set_title('{} vs {} ({:.2f} corr)'.format(x, y, pearson))
ax.set_xlabel('{}'.format(x))
ax.set_ylabel('{}'.format(y))
ax.set_xlim(min(df[x]), max(df[x]))
ax.set_ylim(min(df[y]), max(df[y]))
wht = df.loc[df['type'] == 'white']
red = df.loc[df['type'] == 'red']
if 'red' in types:
ax.plot(red[x], red[y], ls='', marker='.', c='tab:red', alpha=.5)
if 'white' in types:
ax.plot(wht[x], wht[y], ls='', marker='.', c='tab:green', alpha=.5)
interact(my_plot)
```
## Advanced Users
Starting with the visualization we created using IpyWidgets, improve the visualization by listing x vs y pairs in the dropdown menu (instead of separate dropdowns for x and y). Sort these fields programatically by the highest to lowest **absolute** pearson's correlation. For example, the first 5 dropdown selections should be the following:
* RAD TAX 0.910
* NOX DIS -0.769
* INDUS NOX 0.764
* AGE DIS -0.748
* LSTAT TARGET -0.738
When the dropdown is selected, it should update the plotted data, along with the x and y axes labels and plot title.
Example output:
<img src="https://raw.githubusercontent.com/wesleybeckner/ds_for_engineers/main/assets/C1_S4_breakout_solution.JPG" width=500>
**Bonus**
If you finish the breakout room, format your output so that on the left side of your image contains the raw feature 1 vs feature 2 plot and the right side contains two sublots of the KDEs of the data.
```
# create a list of lists. The sublist contains feature 1 v feature 2 and their
# pearsons correlation. Think of this: you would like to loop through df.corr()
# and store those values in a [[feature 1, feature 2, corr],...[...]] list
### YOUR CODE ###
# Now create a new dataframe from this list of lists
# sort this dataframe by the absolute value of the pearsons correlation.
# you will need to reset the index by these sorted values.
# hint:
# 1) <mydf>.reindex(<mydf>.<mycolumn>.abs().sort_values(ascending=False).index)
# 2) <mydf>.reset_index(drop=True)
### YOUR CODE ###
# turn the rows of the new dataframe into list items for the drop down menu
# i.e. create a nicely formatted list of labels
### YOUR CODE ###
# set this list as the index of the dataframe to appear in the dropdown menu
### YOUR CODE ###
def my_plot():
# instead of x and y fields (like exercise 5), parameterize with a x vs y
# field, sorted by abs(pearsons).
# set the values x, and y based on the dropdown selection
corr = df.corr()
pearson = corr[x][y]
fig, ax = plt.subplots(1,1,figsize=(7,7))
ax.plot(df[x], df[y], ls='', marker='.')
ax.set_title('{} vs {} ({:.2f} corr)'.format(x, y, pearson))
ax.set_xlabel('{}'.format(x))
ax.set_ylabel('{}'.format(y))
```
# References
* [`numpy.triu`](https://numpy.org/doc/stable/reference/generated/numpy.triu.html)
* [`numpy.ones_like`](https://numpy.org/doc/stable/reference/generated/numpy.ones_like.html)
* [`ipywidgets`](https://ipywidgets.readthedocs.io/en/latest/)
* [StatQuest: Correlation](https://youtu.be/xZ_z8KWkhXE?t=822)
* [StatQuest: Covariance](https://www.youtube.com/watch?v=qtaqvPAeEJY&ab_channel=StatQuestwithJoshStarmer)
# Enrichment Topics
## 4.4 Other Plot Types
### 4.4.1 Bar Plots (Advanced)
Similar to how we created bar plots with pandas, we can use matplotlib to make barplots
```
pokemon = ['Charmander', 'Pikachu', 'Squirtle', 'Bulbasaur']
hp = [10, 12, 8, 16]
plt.bar(pokemon, hp, color='tab:blue')
plt.title('Pokemon HP')
plt.xlabel('Pokemon')
plt.ylabel('HP')
```
```
pokemon = ['Charmander', 'Pikachu', 'Squirtle', 'Bulbasaur']
hp = [10, 12, 8, 16]
plt.bar(pokemon, hp, color='tab:blue')
plt.title('Pokemon HP')
plt.xlabel('Pokemon')
plt.ylabel('HP')
```
Doing the same but with horizontally oriented bars
```
pokemon = ['Charmander', 'Pikachu', 'Squirtle', 'Bulbasaur']
hp = [10, 12, 8, 16]
plt.barh(pokemon, hp, color='tab:blue')
plt.title('Pokemon HP')
plt.ylabel('Pokemon')
plt.xlabel('HP')
```
```
pokemon = ['Charmander', 'Pikachu', 'Squirtle', 'Bulbasaur']
hp = [10, 12, 8, 16]
plt.barh(pokemon, hp, color='tab:blue')
plt.title('Pokemon HP')
plt.ylabel('Pokemon')
plt.xlabel('HP')
```
We can also add error bars
```
pokemon = ['Charmander', 'Pikachu', 'Squirtle', 'Bulbasaur']
hp = [10, 12, 8, 16]
variance = [i * random.random()*.25 for i in hp]
plt.barh(pokemon, hp, xerr=variance, color='tab:blue')
plt.title('Pokemon HP')
plt.ylabel('Pokemon')
plt.xlabel('HP')
```
for loop version of list comprehension
```
hp = [10, 12, 8, 16]
variance = []
for i in hp:
variance.append(i * random.random()*.25)
print(variance)
```
```
pokemon = ['Charmander', 'Pikachu', 'Squirtle', 'Bulbasaur']
hp = [10, 12, 8, 16]
variance = [i * random.random()*.25 for i in hp]
plt.barh(pokemon, hp, xerr=variance, color='tab:blue')
plt.title('Pokemon HP')
plt.ylabel('Pokemon')
plt.xlabel('HP')
```
### 4.4.2 3D Plots
You can also create 3D plots in matplotlib using `ax.scatter3D`
```
ax = plt.axes(projection='3d')
ax.scatter3D(range(10),[i*random.random()*.25 for i in range(10)])
```
for loop version of list comprehension:
```
ls = []
for i in range(10):
ls.append(i*random.random()*.25)
print(ls)
```
```
ax = plt.axes(projection='3d')
ax.scatter3D(range(10),[i*random.random()*.25 for i in range(10)])
```
## 4.5 Visualization with Plotly
Another great plotting library, that is gaining in popularity (especially in enterprise settings) is plotly. As an added exercise, if you have additional time, explore some of the [plotly examples](https://plotly.com/python/) then recreate the breakout room assignment using plotly instead of matplotlib.
### 4.5.1 Scatter Plot with Size and Color
```
import plotly.express as px
x = 'quality'
y = 'alcohol'
color = 'quality'
size = 'alcohol'
corr = df.corr()
pearson = corr[x][y]
fig = px.scatter(df, x=x, y=y, color=color, size=size,
title='{} vs {} ({:.2f} corr)'.format(x, y, pearson),
width=800, height=800)
fig.show()
```
### 4.5.2 Plotly with IpyWidgets
```
def my_plot(x=df.columns, y=df.columns, color=df.columns, size=df.columns):
corr = df.corr()
pearson = corr[x][y]
fig = px.scatter(df, x=x, y=y, color=color, size=size,
title='{} vs {} ({:.2f} corr)'.format(x, y, pearson),
width=800, height=800)
fig.show()
interact(my_plot)
```
| github_jupyter |
# Submitting and Managing Jobs
Launch this tutorial in a Jupyter Notebook on Binder:
[](https://mybinder.org/v2/gh/htcondor/htcondor-python-bindings-tutorials/master?urlpath=lab/tree/Submitting-and-Managing-Jobs.ipynb)
## What is HTCondor?
An HTCondor pool provides a way for you (as a user) to submit units of work, called **jobs**, to be executed on a distributed network of computing resources.
HTCondor provides tools to monitor your jobs as they run, and make certain kinds of changes to them after submission, which we call "managing" jobs.
In this tutorial, we will learn how to submit and manage jobs *from Python*.
We will see how to submit jobs with various toy executables, how to ask HTCondor for information about them, and how to tell HTCondor to do things with them.
All of these things are possible from the command line as well, using tools like `condor_submit`, `condor_qedit`, and `condor_hold`.
However, working from Python instead of the command line gives us access to the full power of Python to do things like generate jobs programmatically based on user input, pass information consistently from submission to management, or even expose an HTCondor pool to a web application.
We start by importing the HTCondor Python bindings modules, which provide the functions we will need to talk to HTCondor.
```
import htcondor # for submitting jobs, querying HTCondor daemons, etc.
import classad # for interacting with ClassAds, HTCondor's internal data format
```
## Submitting a Simple Job
To submit a job, we must first describe it.
A submit description is held in a `Submit` object.
`Submit` objects consist of key-value pairs, and generally behave like Python dictionaries.
If you're familiar with HTCondor's submit file syntax, you should think of each line in the submit file as a single key-value pair in the `Submit` object.
Let's start by writing a `Submit` object that describes a job that executes the `hostname` command on an execute node, which prints out the "name" of the node.
Since `hostname` prints its results to standard output (stdout), we will capture stdout and bring it back to the submit machine so we can see the name.
```
hostname_job = htcondor.Submit({
"executable": "/bin/hostname", # the program to run on the execute node
"output": "hostname.out", # anything the job prints to standard output will end up in this file
"error": "hostname.err", # anything the job prints to standard error will end up in this file
"log": "hostname.log", # this file will contain a record of what happened to the job
"request_cpus": "1", # how many CPU cores we want
"request_memory": "128MB", # how much memory we want
"request_disk": "128MB", # how much disk space we want
})
print(hostname_job)
```
The available descriptors are documented in the [`condor_submit` manual page](https://htcondor.readthedocs.io/en/latest/man-pages/condor_submit.html).
The keys of the Python dictionary you pass to `htcondor.Submit` should be the same as for the submit descriptors, and the values should be **strings containing exactly what would go on the right-hand side**.
Note that we gave the `Submit` object several relative filepaths.
These paths are relative to the directory containing this Jupyter notebook (or, more generally, the current working directory).
When we run the job, you should see those files appear in the file browser on the left as HTCondor creates them.
Now that we have a job description, let's submit a job.
The `htcondor.Schedd.submit` method returns a `SubmitResult` object that contains information about the job, such as its `ClusterId`.
```
schedd = htcondor.Schedd() # get the Python representation of the scheduler
submit_result = schedd.submit(hostname_job) # submit the job
print(submit_result.cluster()) # print the job's ClusterId
```
The job's `ClusterId` uniquely identifies this submission.
Later in this module, we will use it to ask the HTCondor scheduler for information about our jobs.
For now, our job will hopefully have finished running.
You should be able to see the files in the file browser on the left.
Try opening one of them and seeing what's inside.
We can also look at the output from inside Python:
```
import os
import time
output_path = "hostname.out"
# this is a crude way to wait for the job to finish
# see the Advanced tutorial "Scalable Job Tracking" for better methods!
while not os.path.exists(output_path):
print("Output file doesn't exist yet; sleeping for one second")
time.sleep(1)
with open(output_path, mode = "r") as f:
print(f.read())
```
If you got some text, it worked!
If the file never shows up, it means your job didn't run.
You might try looking at the `log` or `error` files specified in the submit description to see if there is any useful information in them about why the job failed.
## Submitting Multiple Jobs
By default, each `submit` will submit a single job.
A more common use case is to submit many jobs at once, often sharing some base submit description.
Let's write a new submit description which runs `sleep`.
When we have multiple **jobs** in a single **cluster**, each job will be identified not just by its **ClusterId** but also by a **ProcID**.
We can use the ProcID to separate the output and error files for each individual job.
Anything that looks like `$(...)` in a submit description is a **macro**, a placeholder which will be "expanded" later by HTCondor into a real value for that particular job.
The ProcID expands to a series of incrementing integers, starting at 0.
So the first job in a cluster will have ProcID 0, the next will have ProcID 1, etc.
```
sleep_job = htcondor.Submit({
"executable": "/bin/sleep",
"arguments": "10s", # sleep for 10 seconds
"output": "sleep-$(ProcId).out", # output and error for each job, using the $(ProcId) macro
"error": "sleep-$(ProcId).err",
"log": "sleep.log", # we still send all of the HTCondor logs for every job to the same file (not split up!)
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
print(sleep_job)
```
We will submit 10 of these jobs.
All we need to change from our previous `submit` call is to add the `count` keyword argument.
```
schedd = htcondor.Schedd()
submit_result = schedd.submit(sleep_job, count=10) # submit 10 jobs
print(submit_result.cluster())
```
Now that we have a bunch of jobs in flight, we might want to check how they're doing.
We can ask the HTCondor scheduler about jobs by using its `query` method.
We give it a **constraint**, which tells it which jobs to look for, and a **projection**, which tells it what information to return.
```
schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "Out"],
)
```
There are a few things to notice here:
- Depending on how long it took you to run the cell, you may only get a few of your 10 jobs in the query. Jobs that have finished **leave the queue**, and will no longer show up in queries. To see those jobs, you must use the `history` method instead, which behaves like `query`, but **only** looks at jobs that have left the queue.
- The results may not have come back in ProcID-sorted order. If you want to guarantee the order of the results, you must do so yourself.
- Attributes are often renamed between the submit description and the actual job description in the queue. See [the manual](https://htcondor.readthedocs.io/en/latest/classad-attributes/job-classad-attributes.html) for a description of the job attribute names.
- The objects returned by the query are instances of `ClassAd`. ClassAds are the common data exchange format used by HTCondor. In Python, they mostly behave like dictionaries.
## Using Itemdata to Vary Over Parameters
By varying some part of the submit description using the ProcID, we can change how each individual job behaves.
Perhaps it will use a different input file, or a different argument.
However, we often want more flexibility than that.
Perhaps our input files are named after different cities, or by timestamp, or some other naming scheme that already exists.
To use such information in the submit description, we need to use **itemdata**.
Itemdata lets us pass arbitrary extra information when we queue, which we can reference with macros inside the submit description.
This lets use the full power of Python to generate the submit descriptions for our jobs.
Let's mock this situation out by generating some files with randomly-chosen names.
We'll also switch to using `pathlib.Path`, Python's more modern file path manipulation library.
```
from pathlib import Path
import random
import string
import shutil
def random_string(length):
"""Produce a random lowercase ASCII string with the given length."""
return "".join(random.choices(string.ascii_lowercase, k = length))
# make a directory to hold the input files, clearing away any existing directory
input_dir = Path.cwd() / "inputs"
shutil.rmtree(input_dir, ignore_errors = True)
input_dir.mkdir()
# make 5 input files
for idx in range(5):
rs = random_string(5)
input_file = input_dir / "{}.txt".format(rs)
input_file.write_text("Hello from job {}".format(rs))
```
Now we'll get a list of all the files we just created in the input directory.
This is precisely the kind of situation where Python affords us a great deal of flexibility over a submit file: we can use Python instead of the HTCondor submit language to generate and inspect the information we're going to put into the submit description.
```
input_files = list(input_dir.glob("*.txt"))
for path in input_files:
print(path)
```
Now we'll make our submit description.
Our goal is just to print out the text held in each file, which we can do using `cat`.
We will tell HTCondor to transfer the input file to the execute location by including it in `transfer_input_files`.
We also need to call `cat` on the right file via `arguments`.
Keep in mind that HTCondor will move the files in `transfer_input_files` directly to the scratch directory on the execute machine, so instead of the full path, we just need the file's "name", the last component of its path.
`pathlib` will make it easy to extract this information.
```
cat_job = htcondor.Submit({
"executable": "/bin/cat",
"arguments": "$(input_file_name)", # we will pass in the value for this macro via itemdata
"transfer_input_files": "$(input_file)", # we also need HTCondor to move the file to the execute node
"should_transfer_files": "yes", # force HTCondor to transfer files even though we're running entirely inside a container (and it normally wouldn't need to)
"output": "cat-$(ProcId).out",
"error": "cat-$(ProcId).err",
"log": "cat.log",
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
print(cat_job)
```
The itemdata should be passed as a list of dictionaries, where the keys are the macro names to replace in the submit description.
In our case, the keys are `input_file` and `input_file_name`, so should have a list of 10 dictionaries, each with two entries.
HTCondor expects the input file list to be a comma-separated list of POSIX-style paths, so we explicitly convert our `Path` to a POSIX string.
```
itemdata = [{"input_file": path.as_posix(), "input_file_name": path.name} for path in input_files]
for item in itemdata:
print(item)
```
Now we'll submit the jobs, adding the `itemdata` parameter to the `submit` call:
```
schedd = htcondor.Schedd()
submit_result = schedd.submit(cat_job, itemdata = iter(itemdata)) # submit one job for each item in the itemdata
print(submit_result.cluster())
```
Let's do a query to make sure we got the itemdata right (these jobs run fast, so you might need to re-run the jobs if your first run has already left the queue):
```
schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "Out", "Args", "TransferInput"],
)
```
And let's take a look at all the output:
```
# again, this is very crude - see the advanced tutorials!
while not len(list(Path.cwd().glob("cat-*.out"))) == len(itemdata):
print("Not all output files exist yet; sleeping for one second")
time.sleep(1)
for output_file in Path.cwd().glob("cat-*.out"):
print(output_file, "->", output_file.read_text())
```
## Managing Jobs
Once a job is in queue, the scheduler will try its best to execute it to completion.
There are several cases where you may want to interrupt the normal flow of jobs.
Perhaps the results are no longer needed; perhaps the job needs to be edited to correct a submission error.
These actions fall under the purview of **job management**.
There are two `Schedd` methods dedicated to job management:
* `edit()`: Change an attribute for a set of jobs.
* `act()`: Change the state of a job (remove it from the queue, hold it, suspend it, etc.).
The `act` method takes an argument from the `JobAction` enum.
Commonly-used values include:
* `Hold`: put a job on hold, vacating a running job if necessary. A job will stay in the hold
state until told otherwise.
* `Release`: Release a job from the hold state, returning it to Idle.
* `Remove`: Remove a job from the queue. If it is running, it will stop running.
This requires the execute node to acknowledge it has successfully vacated the job, so ``Remove`` may
not be instantaneous.
* `Vacate`: Cause a running job to be killed on the remote resource and return to the Idle state. With
`Vacate`, jobs may be given significant time to cleanly shut down.
To play with this, let's bring back our sleep submit description, but increase the sleep time significantly so that we have time to interact with the jobs.
```
long_sleep_job = htcondor.Submit({
"executable": "/bin/sleep",
"arguments": "10m", # sleep for 10 minutes
"output": "sleep-$(ProcId).out",
"error": "sleep-$(ProcId).err",
"log": "sleep.log",
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
print(long_sleep_job)
schedd = htcondor.Schedd()
submit_result = schedd.submit(long_sleep_job, count=5)
```
As an experiment, let's set an arbitrary attribute on the jobs and check that it worked.
When we're really working, we could do things like change the amount of memory a job has requested by editing its `RequestMemory` attribute.
The job attributes that are built-in to HTCondor are described [here](https://htcondor.readthedocs.io/en/latest/classad-attributes/job-classad-attributes.html), but your site may specify additional, custom attributes as well.
```
# sets attribute foo to the string "bar" for all of our jobs
# note the nested quotes around bar! The outer "" make it a Python string; the inner "" make it a ClassAd string.
schedd.edit(f"ClusterId == {submit_result.cluster()}", "foo", "\"bar\"")
# do a query to check the value of attribute foo
schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "JobStatus", "foo"],
)
```
Although the job status appears to be an attribute, we cannot `edit` it directly.
As mentioned above, we must instead `act` on the job.
Let's hold the first two jobs so that they stop running, but leave the others going.
```
# hold the first two jobs
schedd.act(htcondor.JobAction.Hold, f"ClusterId == {submit_result.cluster()} && ProcID <= 1")
# check the status of the jobs
ads = schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "JobStatus"],
)
for ad in ads:
# the ClassAd objects returned by the query act like dictionaries, so we can extract individual values out of them using []
print(f"ProcID = {ad['ProcID']} has JobStatus = {ad['JobStatus']}")
```
The various job statuses are represented by numbers. `1` means `Idle`, `2` means `Running`, and `5` means `Held`. If you see `JobStatus = 5` above for `ProcID = 0` and `ProcID = 1`, then we succeeded!
The opposite of `JobAction.Hold` is `JobAction.Release`.
Let's release those jobs and let them go back to `Idle`.
```
schedd.act(htcondor.JobAction.Release, f"ClusterId == {submit_result.cluster()}")
ads = schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "JobStatus"],
)
for ad in ads:
# the ClassAd objects returned by the query act like dictionaries, so we can extract individual values out of them using []
print(f"ProcID = {ad['ProcID']} has JobStatus = {ad['JobStatus']}")
```
Note that we simply released all the jobs in the cluster.
Releasing a job that is not held doesn't do anything, so we don't have to be extremely careful.
Finally, let's clean up after ourselves:
```
schedd.act(htcondor.JobAction.Remove, f"ClusterId == {submit_result.cluster()}")
```
## Exercises
Now let's practice what we've learned.
- In each exercise, you will be given a piece of code and a test that does not yet pass.
- The exercises are vaguely in order of increasing difficulty.
- Modify the code, or add new code to it, to pass the test. Do whatever it takes!
- You can run the test by running the block it is in.
- Feel free to look at the test for clues as to how to modify the code.
- Many of the exercises can be solved either by using Python to generate inputs, or by using advanced features of the [ClassAd language](https://htcondor.readthedocs.io/en/latest/misc-concepts/classad-mechanism.html#htcondor-s-classad-mechanism). Either way is valid!
- Don't modify the test. That's cheating!
### Exercise 1: Incrementing Sleeps
Submit five jobs which sleep for `5`, `6`, `7`, `8`, and `9` seconds, respectively.
```
# MODIFY OR ADD TO THIS BLOCK...
incrementing_sleep = htcondor.Submit({
"executable": "/bin/sleep",
"arguments": "1",
"output": "ex1-$(ProcId).out",
"error": "ex1-$(ProcId).err",
"log": "ex1.log",
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
schedd = htcondor.Schedd()
submit_result = schedd.submit(incrementing_sleep)
# ... TO MAKE THIS TEST PASS
expected = [str(i) for i in range(5, 10)]
print("Expected ", expected)
ads = schedd.query(f"ClusterId == {submit_result.cluster()}", projection = ["Args"])
arguments = sorted(ad["Args"] for ad in ads)
print("Got ", arguments)
assert arguments == expected, "Arguments were not what we expected!"
print("The test passed. Good job!")
```
### Exercise 2: Echo to Target
Run a job that makes the text `Echo to Target` appear in a file named `ex3.txt`.
```
# MODIFY OR ADD TO THIS BLOCK...
echo = htcondor.Submit({
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
schedd = htcondor.Schedd()
submit_result = schedd.submit(echo)
# ... TO MAKE THIS TEST PASS
does_file_exist = os.path.exists("ex3.txt")
assert does_file_exist, "ex3.txt does not exist!"
expected = "Echo to Target"
print("Expected ", expected)
contents = open("ex3.txt", mode = "r").read().strip()
print("Got ", contents)
assert expected in contents, "Contents were not what we expected!"
print("The test passed. Good job!")
```
### Exercise 3: Holding Odds
Hold all of the odd-numbered jobs in this large cluster.
- Note that the test block **removes all of the jobs you own** when it runs, to prevent these long-running jobs from corrupting other tests!
```
# MODIFY OR ADD TO THIS BLOCK...
long_sleep = htcondor.Submit({
"executable": "/bin/sleep",
"arguments": "10m",
"output": "ex2-$(ProcId).out",
"error": "ex2-$(ProcId).err",
"log": "ex2.log",
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
schedd = htcondor.Schedd()
submit_result = schedd.submit(long_sleep, count=100)
# ... TO MAKE THIS TEST PASS
import getpass
try:
ads = schedd.query(f"ClusterId == {submit_result.cluster()}", projection = ["ProcID", "JobStatus"])
proc_to_status = {int(ad["ProcID"]): ad["JobStatus"] for ad in sorted(ads, key = lambda ad: ad["ProcID"])}
for proc, status in proc_to_status.items():
print("Proc {} has status {}".format(proc, status))
assert len(proc_to_status) == 100, "Wrong number of jobs (perhaps you need to resubmit them?)."
assert all(status == 5 for proc, status in proc_to_status.items() if proc % 2 != 0), "Not all odd jobs were held."
assert all(status != 5 for proc, status in proc_to_status.items() if proc % 2 == 0), "An even job was held."
print("The test passed. Good job!")
finally:
schedd.act(htcondor.JobAction.Remove, f'Owner=="{getpass.getuser()}"')
```
| github_jupyter |
# What is the Requests Resource?
Requests is an Apache2 Licensed HTTP library, written in Python. It is designed to be used by humans to interact with the language. This means you don’t have to manually add query strings to URLs, or form-encode your POST data. Don’t worry if that made no sense to you. It will in due time.
### What can Requests do?
Requests will allow you to send HTTP/1.1 requests using Python. With it, you can add content like headers, form data, multipart files, and parameters via simple Python libraries. It also allows you to access the response data of Python in the same way.
In programming, a library is a collection or pre-configured selection of routines, functions, and operations that a program can use. These elements are often referred to as modules, and stored in object format.
Libraries are important, because you load a module and take advantage of everything it offers without explicitly linking to every program that relies on them. They are truly standalone, so you can build your own programs with them and yet they remain separate from other programs.
Think of modules as a sort of code template.
To reiterate, Requests is a Python library.
<h3>Step 1: Import the requests library</h3>
```
import requests
```
<h3>Step 2: Send an HTTP request, get the response, and save in a variable</h3>
```
response = requests.get("http://www.epicurious.com/search/Tofu+Chili")
type(response)
```
<h3>Step 3: Check the response status code to see if everything went as planned</h3>
<li>status code 200: the request response cycle was successful
<li>any other status code: it didn't work (e.g., 404 = page not found)
```
print(response.status_code)
```
<h3>Step 4: Get the content of the response</h3>
<li>Convert to utf-8 if necessary
```
response.content.decode('utf-8')
```
<h4>Problem: Get the contents of Wikipedia's main page and look for the string "Did you know" in it</h4>
```
url = "https://en.wikipedia.org/wiki/main_page"
#The rest of your code should go below this line
response1 = requests.get(url)
response1.status_code
response1.content
```
Here we can see letter **b** at the first position. It means that this is the representation called **byte string**. Let's transform it to Unicode. And to decode it into Unicode, we use the function **decode**, and we give it the coding scheme.
The **coding scheme** can vary.
There are lots of coding schemes.
But **UTF8** or **UTF16** are the most common.
So generally, if you're going to an English language web page,
you can expect that the result is going
to come back in UTF8 format.
It's going to come back.
It needs to be decoded using UTF8 as your decoder.
```
response1.content.decode('utf-8')
```
Let's found the subtitle "Did you know..." from the main page and get number of byte where ir is situated.
```
response1.content.decode('utf-8').find("Did_you_know...")
```
<h2>JSON</h2>
<li>The python library - json - deals with converting text to and from JSON
```
import json
data_string = '[{"b": [2, 4], "c": 3.0, "a": "A"}]'
python_data = json.loads(data_string)
print(python_data)
```
<h3>json.loads recursively decodes a string in JSON format into equivalent python objects</h3>
<li>data_string's outermost element is converted into a python list
<li>the first element of that list is converted into a dictionary
<li>the key of that dictionary is converted into a string
<li>the value of that dictionary is converted into a list of two integer elements
```
print(type(data_string),type(python_data))
print(type(python_data[0]),python_data[0])
print(type(python_data[0]['b']),python_data[0]['b'])
```
<h3>json.loads will throw an exception if the format is incorrect</h3>
```
#Correct
json.loads('"Hello"')
```
The next code is wrong. And the reason I get that exception
is because here I have a string, but it
doesn't contain a JSON object.
**To contain a JSON object, it should have a string inside it.**
```
#Wrong
json.loads("Hello")
import json
data_string = json.dumps(python_data)
print(type(data_string))
print(data_string)
```
<h2>requests library and JSON</h2>
Luckily for us, we don't even have to do this.
The **Request Library** has a function
that automatically loads a JSON string into Python.
So for example, if we go to the API
that we saw earlier for Google APIs to get geocoding
and we send our request, instead of having to do response.content.decode and all that kind of stuff,
when we get the request back we can just call the JSON function
on it and it'll automatically load it,
assuming of course that it is a proper JSON string.
```
address="Columbia University, New York, NY"
url="https://maps.googleapis.com/maps/api/geocode/json?address=%s" % (address)
response = requests.get(url).json()
print(type(response))
```
<h3>Exception checking!</h3>
So you should always be ready to face the fact
that your code may not work.
You may be expecting a JSON object back,
but the server instead sends you a malformed JSON object.
Be ready for that, too.
So always check for exceptions.
And that's what we're going to do now.
We're going to make sure that we have everything properly
checked over here.
```
address="Columbia University, New York, NY"
url="https://maps.googleapis.com/maps/api/geocode/json?address=%s" % (address)
try:
response = requests.get(url)
if not response.status_code == 200:
print("HTTP error",response.status_code)
else:
try:
response_data = response.json()
except:
print("Response not in valid JSON format")
except:
print("Something went wrong with requests.get")
print(type(response_data))
```
So let's see what the URL looks like.
```
url
```
We get the response data.
And we notice that it's of type Dictionary.
So let's take a look at what this dictionary looks like.
```
response_data
```
We've got a JSON but it doesn't mean that Google actually
gave us the data we wanted.
Because if Google, if the Google part doesn't work,
they're going to send back a JSON object with the result
of an error inside it.
So the status here will be **'Bad'** instead of **"OK"**.
<h2>Problem 1: Write a function that takes an address as an argument and returns a (latitude, longitude) tuple</h2>
```
response_data['results']
response_data['results'][0]
for thing in response_data['results'][0]:
print(thing)
response_data['results'][0]['geometry']
response_data['results'][0]['geometry']['location']
def get_lat_lng(address):
#python code goes here
import requests, time
url="https://maps.googleapis.com/maps/api/geocode/json?address=%s" % (address)
try:
response = requests.get(url)
if not response.status_code == 200:
print("HTTP error",response.status_code)
else:
try:
response_data = response.json()
except:
print("Response not in valid JSON format")
except:
print("Something went wrong with requests.get")
try:
time.sleep(1)
lat = response_data['results'][0]['geometry']['location']['lat']
lng = response_data['results'][0]['geometry']['location']['lng']
except:
print("Try another one.")
return (lat,lng)
get_lat_lng("Columbia University, New York, NY")
get_lat_lng("Maidan Nezalezhnosti, Kyiv, Ukraine")
```
<h2>Problem 2: Extend the function so that it takes a possibly incomplete address as an argument and returns a list of tuples of the form (complete address, latitude, longitude)</h2>
```
get_lat_lng("London")
address="Lon"
url="https://maps.googleapis.com/maps/api/geocode/json?address=%s" % (address)
try:
response = requests.get(url)
if not response.status_code == 200:
print("HTTP error",response.status_code)
else:
try:
response_data = response.json()
except:
print("Response not in valid JSON format")
except:
print("Something went wrong with requests.get")
print(type(response_data))
response_data['results'][1]['address_components'][0]['long_name']
propos_adr = []
for i in range(len(response_data['results'])):
adr = response_data['results'][i]['address_components'][0]['long_name']
lat = response_data['results'][i]['geometry']['location']['lat']
lng = response_data['results'][i]['geometry']['location']['lng']
propos_adr.append((adr,lat,lng))
propos_adr
def get_lat_lng_incompl(address):
#python code goes here
import requests, time
url="https://maps.googleapis.com/maps/api/geocode/json?address=%s" % (address)
try:
response = requests.get(url)
if not response.status_code == 200:
print("HTTP error",response.status_code)
else:
try:
response_data = response.json()
except:
print("Response not in valid JSON format")
except:
print("Something went wrong with requests.get")
try:
time.sleep(1)
propos_adr = []
for i in range(len(response_data['results'])):
adr = response_data['results'][i]['address_components'][0]['long_name']
lat = response_data['results'][i]['geometry']['location']['lat']
lng = response_data['results'][i]['geometry']['location']['lng']
propos_adr.append((adr,lat,lng))
except:
print("Try another one.")
return propos_adr
get_lat_lng_incompl("Chi")
```
<h1>XML</h1>
<li>The python library - lxml - deals with converting an xml string to python objects and vice versa</li>
```
data_string = """
<Bookstore>
<Book ISBN="ISBN-13:978-1599620787" Price="15.23" Weight="1.5">
<Title>New York Deco</Title>
<Authors>
<Author Residence="New York City">
<First_Name>Richard</First_Name>
<Last_Name>Berenholtz</Last_Name>
</Author>
</Authors>
</Book>
<Book ISBN="ISBN-13:978-1579128562" Price="15.80">
<Remark>
Five Hundred Buildings of New York and over one million other books are available for Amazon Kindle.
</Remark>
<Title>Five Hundred Buildings of New York</Title>
<Authors>
<Author Residence="Beijing">
<First_Name>Bill</First_Name>
<Last_Name>Harris</Last_Name>
</Author>
<Author Residence="New York City">
<First_Name>Jorg</First_Name>
<Last_Name>Brockmann</Last_Name>
</Author>
</Authors>
</Book>
</Bookstore>
"""
from lxml import etree
root = etree.XML(data_string)
print(root.tag,type(root.tag))
print(etree.tostring(root, pretty_print=True).decode("utf-8"))
```
<h3>Iterating over an XML tree</h3>
<li>Use an iterator.
<li>The iterator will generate every tree element for a given subtree
```
for element in root.iter():
print(element)
```
<h4>Or just use the child in subtree construction
```
for child in root:
print(child)
```
<h4>Accessing the tag</h4>
```
for child in root:
print(child.tag)
```
<h4>Using the iterator to get specific tags<h4>
<li>In the below example, only the author tags are accessed
<li>For each author tag, the .find function accesses the First_Name and Last_Name tags
<li>The .find function only looks at the children, not other descendants, so be careful!
<li>The .text attribute prints the text in a leaf node
```
for element in root.iter("Author"):
print(element.find('First_Name').text,element.find('Last_Name').text)
```
<h4>Problem: Find the last names of all authors in the tree “root” using xpath</h4>
```
for element in root.findall("Book/Title"):
print(element.text)
for element in root.findall("Book/Authors/Author/Last_Name"):
print(element.text)
```
<h4>Using values of attributes as filters</h4>
<li>Example: Find the first name of the author of a book that weighs 1.5 oz
```
root.find('Book[@Weight="1.5"]/Authors/Author/First_Name').text
```
<h4>Problem: Print first and last names of all authors who live in New York City</h4>
```
books = root.findall("Book")
for i in range(len(books)):
print(root.findall('Book/Authors/Author[@Residence="New York City"]/First_Name')[i].text,
root.findall('Book/Authors/Author[@Residence="New York City"]/Last_Name')[i].text)
```
| github_jupyter |
```
pip install ta==0.4.7
import glob
import os
import pickle
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import datetime as dt
from ta import add_all_ta_features
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
```
## Get CSVs Filename Into a Variable
```
csv_paths = glob.glob('arb_data/*.csv') # modify to your filepath for data
model_paths = glob.glob('arb_pickles/*.pkl')
len(csv_paths), len(model_paths)
csv_paths[:]
def resample_ohlcv(df, period='5T'):
""" Changes the time period on cryptocurrency ohlcv data.
Period is a string denoted by 'time_in_minutesT'(ex: '1T', '5T', '60T')."""
# Set date as the index. This is needed for the function to run
df = df.set_index(['date'])
# Aggregation function
ohlc_dict = {'open':'first',
'high':'max',
'low':'min',
'close': 'last',
'base_volume': 'sum'}
# Apply resampling.
df = df.resample(period, how=ohlc_dict, closed='left', label='left')
return df
# function to handle nans in the data introduced by resampling
def fill_nan(df):
# forward filling the closing price where there were gaps in ohlcv csv
df['close'] = df['close'].ffill()
# backfilling the rest of the nans
df = df.bfill(axis=1)
# returning the revised dataframe
return df
# function to engineer features that can be engineered pre-merge...
def engineer_features(df, period='5T'):
# convert unix closing_time to datetime
df['date'] = pd.to_datetime(df['closing_time'], unit='s')
# time resampling to fill gaps in data
df = resample_ohlcv(df, period)
# move date off the index
df = df.reset_index()
# create closing_time
closing_time = df.date.values
df.drop(columns='date', inplace=True)
# create feature to indicate where rows were gaps in data
df['nan_ohlcv'] = df['close'].apply(lambda x: 1 if pd.isnull(x) else 0)
# fill gaps in data
df = fill_nan(df)
# adding all the technical analysis features...
df = add_all_ta_features(df, 'open', 'high', 'low', 'close','base_volume', fillna=True)
# add closing time column
df['closing_time'] = closing_time
return df
# the following functions are used in engineering features post-merge...
# function to create column showing which exchange has a higher closing price
def get_higher_closing_price(df):
# i.e., if exchange 1 has the higher closing price...
if (df['close_exchange_1'] - df['close_exchange_2']) > 0:
# return exchange 1
return 1
# otherwise, if exchange 2 has the higher closing price...
elif (df['close_exchange_1'] - df['close_exchange_2']) < 0:
# return exchange 2
return 2
# otherwise, i.e., if neither has a higher closing price...
else:
# return equivalent
return 0
# function to create column showing percentage by which higher price is higher
def get_pct_higher(df):
# i.e., if exchange 1 has a higher closing price than exchange 2...
if df['higher_closing_price'] == 1:
# return the percentage by which the exchange 1 closing price is
# greater than the exchange 2 closing price
return ((df['close_exchange_1'] /
df['close_exchange_2'])-1)*100
# otherwise, if exchange 2 has a higher closing price than exchange 1...
elif df['higher_closing_price'] == 2:
# return the percentage by which the exchange 2 closing price is
# greater than the exchange 1 closing price
return ((df['close_exchange_2'] /
df['close_exchange_1'])-1)*100
# otherwise, i.e., if the closing prices are equivalent...
else:
# return zero
return 0
# function to create column showing available arbitrage opportunities
def get_arbitrage_opportunity(df):
# assuming the total fees are 0.55%, if the higher closing price is less
# than 0.55% higher than the lower closing price...
if df['pct_higher'] < .55:
# return 0, for no arbitrage
return 0
# otherwise, if the exchange 1 closing price is more than 0.55% higher
# than the exchange 2 closing price...
elif df['higher_closing_price'] == 1:
# return -1, for arbitrage from exchange 2 to exchange 1
return -1
# otherwise, if the exchange 2 closing price is more than 0.55% higher
# than the exchange 1 closing price...
elif df['higher_closing_price'] == 2:
# return 1, for arbitrage from exchange 1 to exchange 2
return 1
# function to create column showing how long arbitrage opportunity has lasted
def get_window_length(df):
# converting arbitrage_opportunity column to a list...
target_list = df['arbitrage_opportunity'].to_list()
# setting initial window length to 5, for 5 minutes; will be updated...
window_length = 5
# creating empty list to fill with values and ultimately convert to column
window_lengths = []
# for i in the range of the length of the arbitrage_opportunity column...
for i in range(len(target_list)):
# if a value in the arbitrage_opportunity column is equal to the
# previous value in the arbitrage_opportunity column...
if target_list[i] == target_list[i-1]:
# increase the window length by five minutes...
window_length += 5
# and append that window length to the list.
window_lengths.append(window_length)
# otherwise, i.e., if a value in the arbitrage_opportunity column is
# not equal to the previous value in the arbitrage_opportunity column
else:
# reset the window length to five minutes...
window_length = 5
# and append that window length to the list
window_lengths.append(window_length)
# convert the window lengths list to a column, showing how long arbitrage
# window / no_arbitrage window has lasted.
df['window_length'] = window_lengths
# return the dataframe with the new window length column
return df
def merge_dfs(df1, df2):
print('from merge func:', df1.shape, df2.shape)
# merging two modified ohlcv dfs on closing time to create arbitrage df
df = pd.merge(df1, df2, on='closing_time',
suffixes=('_exchange_1', '_exchange_2'))
print('from merge func:', df.shape)
df['closing_time'] = pd.to_datetime(df['closing_time'])
print(type(df.closing_time[0]))
# Create additional date features.
df['year'] = df['closing_time'].dt.year
df['month'] = df['closing_time'].dt.month
df['day'] = df['closing_time'].dt.day
# getting higher_closing_price feature to create pct_higher feature
df['higher_closing_price'] = df.apply(get_higher_closing_price, axis=1)
# getting pct_higher feature to create arbitrage_opportunity feature
df['pct_higher'] = df.apply(get_pct_higher, axis=1)
# getting arbitrage_opportunity feature
df['arbitrage_opportunity'] = df.apply(get_arbitrage_opportunity, axis=1)
# getting window_length feature
df = get_window_length(df)
return df
# creating target column...
# 1 = ex1 -> ex2, -1 = ex2 -> ex1, 0 = no arb
# function to get target values; takes df and window length to target
def get_target_value(df, interval=30):
# i.e., if the coming arbitrage window is as long as the targeted interval
if df['window_length_shift'] >= interval:
# then if the coming arbitrage window is for exchange 1 to 2...
if df['arbitrage_opportunity_shift'] == 1:
# return 1, which means arbitrage from exchange 1 to 2
return 1
# otherwise, if the coming arbitrage window is for exchange 2 to 1...
elif df['arbitrage_opportunity_shift'] == -1:
# return -1, which means arbitrage from exchange 2 to 1...
return -1
# otherwise, if we are coming up on no arbitrage opportunity...
elif df['arbitrage_opportunity_shift'] == 0:
# return 0, which means no arbitrage opportunity
return 0
# otherwise, i.e., if the coming window is less than our targeted interval
else:
# return 0, which means no arbitrage opportunity
return 0
# function to create target column
def get_target(df, interval=30):
# used to shift rows; assumes candle length is five minutes, interval is
# in minutes
rows_to_shift = int(-1*(interval/5))
# arbitrage_opportunity feature, shifted by length of targeted interval,
# minus one to predict ten minutes in advance rather than five
df['arbitrage_opportunity_shift'] = df['arbitrage_opportunity'].shift(
rows_to_shift - 1)
# window_length feature, shifted by length of targeted interval, minus one
# to predict ten minutes in advance rather than five
df['window_length_shift'] = df['window_length'].shift(rows_to_shift - 1)
# creating target column; this will indicate if an arbitrage opportunity
# that lasts as long as the targeted interval is forthcoming
df['target'] = df.apply(get_target_value, axis=1)
# dropping rows where target could not be calculated due to shift
df = df[:rows_to_shift - 1]
# returning resulting dataframe
return df
def get_close_shift(df, interval=30):
rows_to_shift = int(-1*(interval/5))
df['close_exchange_1_shift'] = df['close_exchange_1'].shift(
rows_to_shift - 2)
df['close_exchange_2_shift'] = df['close_exchange_2'].shift(
rows_to_shift - 2)
return df
# function to create profit feature
def get_profit(df):
# if exchange 1 has the higher closing price...
if df['higher_closing_price'] == 1:
# see how much money you would make if you bought on exchange 2, sold
# on exchange 1, and took account of 0.55% fees
return (((df['close_exchange_1_shift'] /
df['close_exchange_2'])-1)*100)-.55
# otherwise, if exchange 2 has the higher closing price...
elif df['higher_closing_price'] == 2:
# see how much money you would make if you bought on exchange 1, sold
# on exchange 2, and took account of 0.55% fees
return (((df['close_exchange_2_shift'] /
df['close_exchange_1'])-1)*100)-.55
# otherwise, i.e., if the closing prices are the same...
else:
# return zero, because in that case you shouldn't make a trade
return 0
def get_exchange_trading_pair(ex_tp):
if len(ex_tp.split('_')) == 5:
exchange = ex_tp.split('_')[0] + '_' + ex_tp.split('_')[1]
trading_pair = ex_tp.split('_')[2] + '_' + ex_tp.split('_')[3]
else:
exchange = ex_tp.split('_')[0]
trading_pair = ex_tp.split('_')[1] + '_' + ex_tp.split('_')[2]
return exchange, trading_pair
csv_paths = glob.glob('new_arb_csv/*.csv') # modify to your filepath for data
model_paths = glob.glob('arb_pickles/*.pkl')
len(csv_paths), len(model_paths)
def arb(csv_paths, model_paths):
df_dict = {}
for csv_path in csv_paths:
csv_name = csv_path.split('/')[1][:-4]
for model_path in model_paths:
model_name = model_path.split('/')[1][:-4]
if csv_name == model_name:
print("\n \n",csv_name,"==", model_name)
# read csv
df = pd.read_csv("new_arb_csv/"+csv_name+".csv", index_col=0)
# convert str closing_time to datetime
df['closing_time'] = pd.to_datetime(df['closing_time'])
# 70/30 train/test split
test_train_split_row = round(len(df)*.7)
# get closing_time for t/t split
test_train_split_time = df['closing_time'][test_train_split_row]
# remove 1 week from each end of the t/t datasets to create a
# two week gap between the data - prevents data leakage
train_cutoff_time = test_train_split_time - dt.timedelta(days=7)
test_cutoff_time = test_train_split_time + dt.timedelta(days=7)
print('cutoff time:', train_cutoff_time, test_cutoff_time)
# train and test subsets
train = df[df['closing_time'] < train_cutoff_time]
test = df[df['closing_time'] > test_cutoff_time]
# printing shapes to track progress
print('train and test shape: ', train.shape, test.shape)
# specifying features for model to use; not using open, high, or
# low, which are highly correlated with close and do not improve
# model performance
features = ['close_exchange_1','base_volume_exchange_1',
'nan_ohlcv_exchange_1','volume_adi_exchange_1', 'volume_obv_exchange_1',
'volume_cmf_exchange_1', 'volume_fi_exchange_1','volume_em_exchange_1',
'volume_vpt_exchange_1','volume_nvi_exchange_1', 'volatility_atr_exchange_1',
'volatility_bbhi_exchange_1','volatility_bbli_exchange_1',
'volatility_kchi_exchange_1', 'volatility_kcli_exchange_1',
'volatility_dchi_exchange_1','volatility_dcli_exchange_1',
'trend_macd_signal_exchange_1', 'trend_macd_diff_exchange_1', 'trend_adx_exchange_1',
'trend_adx_pos_exchange_1', 'trend_adx_neg_exchange_1',
'trend_vortex_ind_pos_exchange_1', 'trend_vortex_ind_neg_exchange_1',
'trend_vortex_diff_exchange_1', 'trend_trix_exchange_1',
'trend_mass_index_exchange_1', 'trend_cci_exchange_1',
'trend_dpo_exchange_1', 'trend_kst_sig_exchange_1',
'trend_kst_diff_exchange_1', 'trend_aroon_up_exchange_1',
'trend_aroon_down_exchange_1',
'trend_aroon_ind_exchange_1',
'momentum_rsi_exchange_1', 'momentum_mfi_exchange_1',
'momentum_tsi_exchange_1', 'momentum_uo_exchange_1',
'momentum_stoch_signal_exchange_1',
'momentum_wr_exchange_1', 'momentum_ao_exchange_1',
'others_dr_exchange_1', 'close_exchange_2',
'base_volume_exchange_2', 'nan_ohlcv_exchange_2',
'volume_adi_exchange_2', 'volume_obv_exchange_2',
'volume_cmf_exchange_2', 'volume_fi_exchange_2',
'volume_em_exchange_2', 'volume_vpt_exchange_2',
'volume_nvi_exchange_2', 'volatility_atr_exchange_2',
'volatility_bbhi_exchange_2',
'volatility_bbli_exchange_2',
'volatility_kchi_exchange_2',
'volatility_kcli_exchange_2',
'volatility_dchi_exchange_2',
'volatility_dcli_exchange_2',
'trend_macd_signal_exchange_2',
'trend_macd_diff_exchange_2', 'trend_adx_exchange_2',
'trend_adx_pos_exchange_2', 'trend_adx_neg_exchange_2',
'trend_vortex_ind_pos_exchange_2',
'trend_vortex_ind_neg_exchange_2',
'trend_vortex_diff_exchange_2', 'trend_trix_exchange_2',
'trend_mass_index_exchange_2', 'trend_cci_exchange_2',
'trend_dpo_exchange_2', 'trend_kst_sig_exchange_2',
'trend_kst_diff_exchange_2', 'trend_aroon_up_exchange_2',
'trend_aroon_down_exchange_2',
'trend_aroon_ind_exchange_2',
'momentum_rsi_exchange_2', 'momentum_mfi_exchange_2',
'momentum_tsi_exchange_2', 'momentum_uo_exchange_2',
'momentum_stoch_signal_exchange_2',
'momentum_wr_exchange_2', 'momentum_ao_exchange_2',
'others_dr_exchange_2', 'year', 'month', 'day',
'higher_closing_price', 'pct_higher',
'arbitrage_opportunity', 'window_length']
# specifying name of target column
target = 'target'
# separating features from target
ct = test['closing_time']
X_test = test[features]
y_test = test[target]
print('train test shapes:', X_test.shape, y_test.shape)
model = pickle.load(open(model_path, 'rb'))
print('model loaded')
# make predictions
y_preds = model.predict(X_test)
print('predictions made!')
X_test['pred'] = y_preds
df = X_test
df = pd.concat([ct,df], axis=1)
df['pct_diff'] = (df['close_exchange_1'] - df['close_exchange_2']) / df['close_exchange_1']
df['zero'] = 0
# 'arb_ex1_ex2' # 1 = ex1 -> ex2, -1 = ex2 -> ex1, 0 = no arb
df['arb_ex1_ex2'] = df['pred'].apply(lambda x: 1 if x == 1 else np.NaN)
df['arb_ex2_ex1'] = df['pred'].apply(lambda x: 1 if x == -1 else np.NaN)
df['arb_ex1_ex2'] = (df['arb_ex1_ex2'] * df['pct_diff'])
df['arb_ex2_ex1'] = (df['arb_ex2_ex1'] * df['pct_diff'])
df_dict[model_name] = df
return df_dict
df_dict = arb(csv_paths, model_paths)
import numpy as np
from scipy.signal import savgol_filter
# df_dict['bitfinex_coinbase_pro_ltc_usd'][['closing_time', 'close_exchange_1', 'close_exchange_2', 'pred', 'pct_diff']]
fig, ax = plt.subplots(figsize=(40, 8))
sns.set(style="ticks", context="talk")
plt.style.use("dark_background")
flatui = ['#63e4a7', '#4eb9ff', '#4bc1f6', '#4fc9e5', '#57d5c7', '#5dddb9', '#63e4a7']
myFmt = mdates.DateFormatter("%b %d '%y ")
sns.set_palette(flatui)
# Y2 = savgol_filter(df['2_minus_1'], 101, 3)
flatui1 = ['#63e4a7']
sns.set_palette(flatui)
ax1 = sns.lineplot(x='closing_time', y='2_minus_1', data=df[6000:20000], linewidth=.5, label='Price Difference %');
ax3 = sns.regplot(x='closing_time', y='markers', data=df[6000:20000], marker="o", fit_reg=False, scatter_kws={"zorder":10, "color":"red","alpha":.5,"s":10}, label='Arb Ex.1 to Ex.2');
sns.despine(left=True)
ax.tick_params(axis=u'both', which=u'both',length=0)
ax.set_ylabel('');
ax.set_xlabel('');
plt.setp(ax.get_xticklabels(), rotation=45)
ax.xaxis.set_major_formatter(myFmt)
ax.tick_params(axis='x', colors='grey')
ax.tick_params(axis='y', colors='grey')
ax.spines['bottom'].set_color('grey')
ax.spines['bottom'].set_linewidth(0.5)
for tick in ax.get_xticklabels():
tick.set_fontname("avenir")
for tick in ax.get_yticklabels():
tick.set_fontname("avenir")
lines, labels = ax1.get_legend_handles_labels()
leg = plt.legend(lines , labels , loc='upper left')
leg.get_frame().set_linewidth(0.0)
sns.set_context("poster", font_scale = 1, rc={"grid.linewidth": 8})
ax.set_title("bitfinex_coinbase_pro_ltc_usd", fontsize=12);
```
# Arbitrage Visualization Function
csv has 3 columns ['closing_time', '2_minus_1', 'pred']
```
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
# looks thru dir for csv
for filename in os.listdir('simple_csvs'):
if filename.endswith('.csv'):
print(filename)
# create df from csv
df = pd.read_csv('simple_csvs/' + filename, index_col=0)
# cutting df for smaller df (more recent time)
df = df[df['closing_time'] >= 1571356800]
# change closetime to datetime format
df['closing_time'] = pd.to_datetime(df['closing_time'], unit='s')
print("plotting...")
# creating figure and setting size
fig, ax = plt.subplots(figsize=(18, 10))
fig.clf()
sns.set(style="ticks", context="talk")
# setting dark background
plt.style.use("dark_background")
# using these color palette
flatui = ['#4eb9ff', '#4bc1f6', '#4fc9e5',
'#57d5c7', '#5dddb9', '#63e4a7']
# seperate df into 6
sixth_of_length = (len(df)//6)
# create 6 lineplots to create a gradient look
sns.set_palette(flatui)
ax = sns.lineplot(x='closing_time', y='2_minus_1_over_1',
data=df[:sixth_of_length])
for i in range(1, 6):
sns.set_palette(flatui)
if i != 3:
ax = sns.lineplot(x='closing_time', y='2_minus_1_over_1',
data=df[(sixth_of_length*i):
(sixth_of_length*(i+1))])
else:
ax = sns.lineplot(x='closing_time', y='2_minus_1_over_1',
data=df[(sixth_of_length*i):
(sixth_of_length*(i+1))],
label='Percentage Price Difference')
# creating 2 df seperated by prediction
df_1_to_2 = df[df['pred'] == 1]
df_2_to_1 = df[df['pred'] == -1]
# creating 2 scatter plot with new df
# arbitrage indicator
dots_1_to_2 = sns.scatterplot(x='closing_time', y='2_minus_1_over_1',
data=df_1_to_2, color='indigo',
zorder=10, label='Arbitrage from '
'Exchange 1 to 2')
dots_2_to_1 = sns.scatterplot(x='closing_time', y='2_minus_1_over_1',
data=df_2_to_1, color='violet',
zorder=10, label='Arbitrage from '
'Exchange 2 to 1')
sns.despine(left=True, right=True, top=True)
ax.set_xlabel('')
ax.set_ylabel('')
# setting y tick labels
vals = ax.get_yticks()
ax.set_yticklabels(['{:,.2%}'.format(x) for x in vals])
# get rid of '-' on tick label
ax.tick_params(axis=u'both', which=u'both',length=0)
# change x and y tick color grey
ax.tick_params(axis='x', colors='grey')
ax.tick_params(axis='y', colors='grey')
# set bottom spine grey and change linewidth
ax.spines['bottom'].set_color('grey')
ax.spines['bottom'].set_linewidth(0.5)
# change font style
for tick in ax.get_xticklabels():
tick.set_fontname("avenir")
for tick in ax.get_yticklabels():
tick.set_fontname("avenir")
# format datetime for x-axis tick label
myFmt = mdates.DateFormatter("%b. %d ")
ax.xaxis.set_major_formatter(myFmt)
# rotate x-tick labels at a 45 degree angle
plt.setp(ax.get_xticklabels(), rotation=45)
# creates Legend
lines, labels = ax.get_legend_handles_labels()
leg = plt.legend(lines, labels, loc='upper left')
leg.get_frame().set_linewidth(0.0)
currency_pairs = {'bch_btc': 'bch/btc'.upper(),
'bch_usd': 'bch/usd'.upper(),
'bch_usdt': 'bch/usdt'.upper(),
'eos_usdt': 'eos/usdt'.upper(),
'etc_usd': 'etc/usd'.upper(),
'eth_btc': 'eth/btc'.upper(),
'eth_usdc': 'eth/usdc'.upper(),
'ltc_btc': 'ltc/btc'.upper(),
'ltc_usd': 'ltc/usd'.upper(),
'ltc_usdt': 'ltc/usdt'.upper()}
exchange_pairs = {'bitfinex_coinbase_pro_': 'Bitfinex/Coinbase Pro ',
'bitfinex_gemini_': 'Bitfinex/Gemini ',
'bitfinex_hitbtc_': 'Bitfinex/Hitbtc ',
'coinbase_pro_gemini_': 'Coinbase Pro/Gemini ',
'coinbase_pro_hitbtc_': 'Coinbase Pro/Hitbtc ',
'gemini_hitbtc_': 'Gemini/Hitbtc ',
'kraken_gemini_': 'Kraken/Gemini '}
name = filename
for currency_pair in currency_pairs:
if currency_pair in name:
name = name.replace(currency_pair,
currency_pairs[currency_pair])
for exchange_pair in exchange_pairs:
if exchange_pair in name:
name = name.replace(exchange_pair,
exchange_pairs[exchange_pair])
# set titles
ax.set_title(name.replace('.csv', ''), fontsize=12);
# save figures into png folder
fig = ax.get_figure()
fig.savefig('pngs/' + filename.replace('.csv', '.png'))
print("done!")
```
| github_jupyter |
### Coupling GIPL and ECSimpleSnow models
Before you begin, install:
```conda install -c conda-forge pymt pymt_gipl pymt_ecsimplesnow seaborn```
```
import pymt.models
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import matplotlib.colors as mcolors
from matplotlib.colors import LinearSegmentedColormap
sns.set(style='whitegrid', font_scale= 1.2)
```
#### Load ECSimpleSnow module from PyMT
```
ec = pymt.models.ECSimpleSnow()
print(ec.name)
# List input and output variable names.
print(ec.output_var_names)
print(ec.input_var_names)
```
#### Load GIPL module from PyMT
```
gipl = pymt.models.GIPL()
print(gipl.name)
# List input and output variable names.
print(gipl.output_var_names)
print(gipl.input_var_names)
```
Call the setup method on both ECSimpleSnow and GIPL to get default configuration files and data.
```
ec_defaults = ec.setup('.')
print(ec_defaults)
gipl_defaults = gipl.setup('.')
print(gipl_defaults)
ec.initialize('snow_model.cfg')
gipl.initialize('gipl_config.cfg')
# Get soil depth: [unit: m]
depth = gipl.get_grid_z(2)
n_depth = int(len(depth))
# Get the length of forcing data:
ntime = int(gipl.end_time)
# Define a variable to store soil temperature through the time period
tsoil = np.zeros((n_depth, ntime)) * np.nan
print('Final soil temperatures will be ', tsoil.shape)
fig = plt.figure(figsize=[12,6])
ax2 = fig.add_subplot(2,3,1)
ax2.set_title('Air Temperature (Input)')
ax3 = fig.add_subplot(2,3,2)
ax3.set_title('Precipition (Input)')
ax4 = fig.add_subplot(2,3,4)
ax4.set_title('Snow Depth (EC Output)')
ax5 = fig.add_subplot(2,3,5)
ax5.set_title('Snow Density (EC Output)')
ax1 = fig.add_subplot(2,3,(3,6))
ax1.set_ylim([15,0])
ax1.set_xlim([-20,20])
ax1.set_xlabel('Soil Temperature ($^oC$)')
ax1.set_ylabel('Depth (m)')
ax1.plot([0,0],[15,0],'k--')
for i in np.arange(365):
ec.update() # Update Snow Model Once
# Get output from snow model
tair = ec.get_value('land_surface_air__temperature')
prec = ec.get_value('precipitation_mass_flux')
snd = ec.get_value('snowpack__depth', units='m')
rsn = ec.get_value('snowpack__mass-per-volume_density', units = 'g cm-3')
# Pass value to GIPL model
gipl.set_value('land_surface_air__temperature', tair)
gipl.set_value('snowpack__depth', snd)
gipl.set_value('snow__thermal_conductivity', rsn * rsn * 2.846)
gipl.update() # Update GIPL model Once
tsoil[:,i] = gipl.get_value('soil__temperature') # Save results to a matrix
ax1.plot(tsoil[depth>=0,i], depth[depth>=0],color = [0.7,0.7,0.7], alpha = 0.1)
ax2.scatter(i, tair, c = 'k')
ax3.scatter(i, prec, c = 'k')
ax4.scatter(i, snd , c = 'k')
ax5.scatter(i, rsn , c = 'k')
ax1.plot(tsoil[depth>=0,:].max(axis=1), depth[depth>=0], 'r', linewidth = 2, label = 'Max')
ax1.plot(tsoil[depth>=0,:].min(axis=1), depth[depth>=0], 'b', linewidth = 2, label = 'Min')
ax1.plot(tsoil[depth>=0,:].mean(axis=1), depth[depth>=0], 'k', linewidth = 2, label = 'Mean')
ax1.legend()
ax1.set_title('Ground Temperatures (GIPL output)')
ax2.set_xticks([])
ax3.set_xticks([])
fig = plt.figure(figsize=[9,4])
divnorm = mcolors.DivergingNorm(vmin=-25., vcenter=0., vmax=10)
plt.contourf(np.arange(ntime), depth, tsoil, np.linspace(-25,10,15),
norm = divnorm,
cmap="RdBu_r", extend = 'both')
plt.ylim([5,0])
cb = plt.colorbar()
plt.xlabel('Day')
plt.ylabel('Depth (m)')
cb.ax.set_ylabel('Soil Temperature ($^oC$)')
plt.contour(np.arange(ntime), depth, tsoil, [0]) # ZERO
```
| github_jupyter |
## Questionário 22 (Q22)
**Orientações para submissão:**
- Registre suas respostas no questionário de mesmo nome no SIGAA.
- O tempo de registro das respostas no questionário será de 10 minutos. Portanto, resolva primeiro as questões e depois registre-as.
- Haverá apenas 1 (uma) tentativa de resposta.
- Submeta seu arquivo-fonte (utilizado para resolver as questões) em formato _.ipynb_ pelo SIGAA anexando-o à Tarefa denominada "Envio de arquivo" correspondente ao questionário.
*Nota:* o arquivo-fonte será utilizado apenas como prova de execução da tarefa. Nenhuma avaliação será feita quanto ao estilo de programação.
<hr>
**Questão 1**. O texto abaixo é uma mensagem encriptada. Cada grupo de 4 caracteres corresponde a um número hexadecimal.
```
0x45 0x6d 0x20 0x61 0x6c 0x67 0x75 0x6d 0x20 0x6c 0x75 0x67 0x61 0x72 0x2c 0x20 0x61 0x6c 0x67 0x6f 0x20 0x69 0x6e 0x63 0x72 0xed 0x76 0x65 0x6c 0x20 0x65 0x73 0x74 0xe1 0x20 0x65 0x73 0x70 0x65 0x72 0x61 0x6e 0x64 0x6f 0x20 0x70 0x61 0x72 0x61 0x20 0x73 0x65 0x72 0x20 0x64 0x65 0x73 0x63 0x6f 0x62 0x65 0x72 0x74 0x6f 0x2e
```
Use seus conhecimentos de funções _built_in_ para decodificar a mensagem, que é inteligível na Língua Portuguesa. Em seguida, determine quais são os caracteres da mensagem que correspondem, respectivamente, ao maior e menor valor binário entre os elementos da sequência. Assinale a alternativa que melhor descreve a mensagem decodificada, o caracter associado ao maior valor binário e o caracter associado ao menor valor binário, nesta sequência.
A. `'Em nenhum lugar, todo possível está aguardando para ser manifesto'`, `'ê'` e `' '`.
B. `'Em algum lugar, tudo incrível está esperando para ser incompleto.'`, `'s` e `'a'`.
C. `'Em nenhum lugar, algo possível deve aguardar para ser descoberto'`, `'ê'` e `'í'`.
D. `'Em algum lugar, algo incrível está esperando para ser descoberto.'`, `'í` e `' '`.
_Obs.:_ Considere que os espaços na mensagem original não devem ser considerados como caracteres na mensagem decodificada e que ali servem apenas para separar os quartetos hexadecimais.
**Questão 2**. Rindalve é um jovem promissor que conquistou um excelente emprego, mas sofre com a indisciplina financeira. Ele paga o aluguel da casa onde mora sempre com atraso de alguns dias e, extrapola o limite do cartão de crédito com frequência. Neste mês, Jonas pagou seu aluguel de <span> R$</span> 6.500,00 com 12 dias de atraso e hoje faz 6 dias que a fatura de seu cartão, fechada em <span> R$</span> 7.234,77, venceu.
A imobiliária que administra a casa de Jonas usa a seguinte regra para calcular o valor adicional devido em caso de atraso no pagamento do aluguel:
- mora de 6,25% sobre o valor do aluguel + juro simples de 0,025% ao dia
A administradora de seu cartão de crédito, por outro lado, usa a seguinte regra para calcular o valor adicional devido em caso de atraso no pagamento da fatura do cartão:
- juro composto de 1,44% ao dia.
Crie uma função para calcular o valor total atualizado $V_T$ que Jonas deverá desembolsar, em reais, para quitar as despesas citadas. Então, marque a alternativa correta.
A. <span> R$</span> 19.048,09
B. <span> R$</span> 19.396,08
C. <span> R$</span> 14.808,54
D. <span> R$</span> 16.396,77
**Questão 3**. O Ministério da Saúde disponibiliza uma lista de remédios através do programa _Farmácia Popular_. Clicando [aqui](https://antigo.saude.gov.br/images/pdf/2019/janeiro/07/Lista-Medicamentos.pdf), você será redirecionado a uma dessas listas. Crie um _dict_ em Python com as informações relevantes contidas na tabela do arquivo PDF. Em seguida, crie uma função regular que recebe o seu _dict_ como argumento e retorne 3 objetos: um _str_, um _tuple_ e um _int_, os quais nesta, ordem, responderão às seguintes perguntas:
- Para que doença a maior quantidade de remédios na gratuidade é indicada?
- Qual é a quantidade de remédios nas classes _gratuidade_ e _copagamento_?
- Quantos remédios têm a letra C como inicial de seu nome?
Assinale a alternativa correta:
A. `'HIPERTENSÃO', (20, 15), 3`
B. `'ASMA', (20, 15), 7`
C. `'DIABETES', (10, 20), 8`
D. `'ASMA', (18, 17), 6`
_Obs.:_ tente usar funções anônimas sempre que possível.
| github_jupyter |
## Individual Variable Data Exploration Notebook
```
import numpy as np
import pandas as pd
import missingno as msno
import matplotlib.pyplot as plt
import seaborn as sns
data_train = pd.read_csv('claim_data_v2_train.csv')
data_train.sample(3)
def visualize_cat(attr, df=data_train):
df_i = df[['Fraudulent_Claim', attr]].groupby([attr])['Fraudulent_Claim'].agg(['count','sum', 'mean'])
m = 'Pct Fraud'
df_i = df_i.rename({'count': 'frequency', 'sum': 'Survivers', 'mean': m}, axis='columns')
print(df_i)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
df_i['frequency'].plot.bar(ax=ax1, alpha=.8)
df_i[m].plot.line(ax=ax2, color='k')
ax1.set_ylabel('frequency')
ax2.set_ylabel('Pct Fraud')
ax2.set_ylim(bottom=0, top=1, auto=False)
ax2.legend()
data_train["Fraudulent_Claim"].describe()
#Convert Fraud Claim data from Y/N to 1/0
data_train.Fraudulent_Claim.replace(('Y', 'N'), (1, 0), inplace=True)
#Test to see if fraud claim data converted correctly
data_train.head()
data_train.columns
data_train.describe(include='all')
# missing data
msno.matrix(data_train)
corrmat= data_train.corr()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True);
```
# Describe Each Variable - Find Outliers
#### Claim Amount
```
data_train['Claim_Amount'].hist()
```
Log tranformation
```
data_train['Claim_Amount'].apply(np.log).hist()
```
#### Income
Income has a major outlier of $10M
```
data_train['Income'].describe()
```
Top 10 Earners
```
data_train['Income'].sort_values(ascending=False).head(10)
data_train['Income'].plot.box()
```
Drop values above the boxplot (greater than Q3+1.5*IQR)
```
cutoff = data_train['Income'].quantile(.75) + (data_train['Income'].quantile(.75) - data_train['Income'].quantile(.25))
print("Cutoff value:", cutoff)
data_train['Income'][data_train['Income']<cutoff].plot.box()
```
Graph histogram without 0 income and without outliers
```
data_train['Income'][(data_train['Income']<cutoff) & (data_train['Income']>0)].hist()
```
#### Premium
Validate that Annual Premium is 12 times Monthly Premium
```
data_train[data_train['Monthly_Premium'] * 12 != data_train['Annual_Premium']]
data_train['Monthly_Premium'].describe()
```
Premium is approximately uniformly distributed between 50 and 140
```
data_train['Monthly_Premium'].hist()
data_train['Months_Since_Last_Claim'].describe()
data_train['Months_Since_Last_Claim'].plot.box()
data_train['Months_Since_Last_Claim'].hist(bins=range(0, 72, 6))
data_train['Months_Since_Policy_Inception'].hist()
data_train['Outstanding_Balance'].hist()
def get_categorical_dist(attr):
return data_train.groupby(attr).size().sort_values(ascending=False)
get_categorical_dist('State_Code')
get_categorical_dist('Education')
get_categorical_dist('Employment_Status')
get_categorical_dist('Gender')
get_categorical_dist('Marital_Status')
get_categorical_dist('Location')
get_categorical_dist('Claim_Cause')
get_categorical_dist('Claim_Report_Type')
get_categorical_dist('Vehicle_Class')
get_categorical_dist('Vehicle_Model')
get_categorical_dist('Fraudulent_Claim')
get_categorical_dist('Claim_Date')
#missing data
data_train.isna().sum()
sns.catplot(x="Fraudulent_Claim", y="Claim_Amount", kind="violin",data=data_train);
sns.catplot(x="Fraudulent_Claim", y="Income", kind="violin",data=data_train);
```
| github_jupyter |
```
import gust
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from numpy import matrix
import scipy
import scipy.sparse as sp
import torch.distributions as dist
from time import time
from sklearn.model_selection import StratifiedShuffleSplit
from scipy.spatial.distance import squareform
# Load the dataset using `gust` library
# graph.standardize() makes the graph unweighted, undirected and selects
# the largest connected component
# graph.unpack() returns the necessary vectors / matrices
A, X, _, y = gust.load_dataset('cora').standardize().unpack()
sss = StratifiedShuffleSplit(n_splits=5, test_size=0.8, random_state=0)
train_index, test_index = sss.split(self, emb, Y, groups=None)
train_X = A[train_index]
test_X = A[test_index]
train_y = y[train_index]
test_y = y[test_index]
torch.set_default_tensor_type('torch.cuda.FloatTensor')
A, X, _, z = gust.load_dataset('cora_ml').standardize().unpack()
sss = StratifiedShuffleSplit(n_splits=5, test_size=0.8, random_state=0)
#adj = torch.FloatTensor(A.toarray()).cuda()
#A = A[0:4,0:4]
def laplacian(A):
#Transition Matrix P=D-A
num_nodes = A.shape[0]
D = np.ravel(A.sum(1))
L = sp.diags(D) - A
return L
def sym_normalized_laplacian(A):
#Symmetric, Normalized Laplacian P=D^(−1/2)AD^(−1/2)
num_nodes = A.shape[0]
D = np.ravel(A.sum(1))
#D[D == 0] = 1 # avoid division by 0 error
D_sqrt = np.sqrt(D)
a=np.ones(D_sqrt.shape[0])
D_sqrt_inv = np.divide(a, D_sqrt, out=np.zeros_like(a), where=D!=0)
L = sp.diags(D_sqrt_inv) * A * sp.diags(D_sqrt_inv)
#L = A / D_sqrt[:, None] / D_sqrt[None, :]
return L
def Transition(A):
#Laplacian P=D^−1A
num_nodes = A.shape[0]
D = np.ravel(A.sum(1))
#D[D == 0] = 1 # avoid division by 0 error
a=np.ones(D.shape[0])
D_inv = np.divide(a, D, out=np.zeros_like(a), where=D!=0)
L = sp.diags(D_inv) * A
return L
def PPR(A):
#Personalized PageRank Matrix as described in https://openreview.net/pdf?id=H1gL-2A9Ym with the there used hyperparameter alpha=0.1
#P=alpha(I-(1-alpha)*D^-1/2(A+I)D^-1/2)^-1
print(A.toarray())
alpha = 0.1
num_nodes = A.shape[0]
D = np.ravel(A.sum(1))
#D[D == 0] = 1 # avoid division by 0 error
D_sqrt = np.sqrt(D)
a=np.ones(D_sqrt.shape[0])
D_sqrt_inv = np.divide(a, D_sqrt, out=np.zeros_like(a), where=D!=0)
A_tilde = sp.diags(D_sqrt_inv) * (A + sp.identity(A.shape[0])) * sp.diags(D_sqrt_inv)
print('A_tilde: ', A_tilde.toarray())
L_inv = (sp.identity(A.shape[0]) - (1-alpha) * A_tilde)
print('L_inv: ', L_inv.toarray())
L = alpha * np.linalg.pinv(L_inv.toarray())
print(L)
return L
def NetMF(A):
eps=1e-5
#volume of the graph, usually for weighted graphs, here weight 1
vol = A.sum()
#b is the number of negative samples, hyperparameter
b = 3
#T is the window size, as a small window size algorithm is used, set T=10, which showed the best results in the paper
T=10
#Transition Matrix P=D^-1A
num_nodes = A.shape[0]
D = np.ravel(A.sum(1))
#D[D == 0] = 1 # avoid division by 0 error
a=np.ones(D.shape[0])
D_inv = np.divide(a, D, out=np.zeros_like(a), where=D!=0)
P = np.diag(D_inv) * A.todense()
#Compute M = vol(G)/bT (sum_r=1^T P^r)D^-1
sum_np=0
for r in range(1,T+1):
sum_np+=np.linalg.matrix_power(P,r)
M = sum_np * np.diag(D_inv) * vol / (b*T)
M_max = np.maximum(M,np.ones(M.shape[0]))
#Compute SVD of M
u, s, vh = np.linalg.svd(np.log(M_max), full_matrices=True)
#Compute L
L = u*np.diag(np.sqrt(s+eps))
print(L.sum(axis=1))
return L
def simrank_quick(A, C = 0.8, acc = 0.1):
#https://link.springer.com/chapter/10.1007/978-3-642-14246-8_29
#Algorithm 2: PAUG-SimRank: Parallel Accelerative SimRank for Undirected Graphs
#Step 1: Spectral Predecomposition
A = A.todense()
print(torch.tensor(A))
eigvalues, eigvectors = torch.eig(torch.tensor(A), eigenvectors=True)
eigvalues = eigvalues[:,0]
#Step 2: Iterative Elementwise Matrix Multiplication
#for i in range(eigvalues.shape[0]):
return
def simrank(A, C = 0.8, acc = 1e-10):
#https://link.springer.com/chapter/10.1007/978-3-642-14246-8_29
#Algorithm 1: AUG-SimRank: Accelerative SimRank for Undirected Graphs
A_torch = torch.tensor(A.todense())
#Calculate Transition Probability Q
Q_torch = A_torch / A_torch.sum(1, keepdims=True)
Q = np.squeeze(np.asarray((A / np.sum(A,axis = 1))))
#Decompose Q
eigvalues_t, eigvectors_t = torch.eig(Q_torch, eigenvectors=True)
eigvalues_np, eigvectors_np = np.linalg.eig(Q)
#for undirected graphs all eigenvalues are real
eigvectors_np=np.real(eigvectors_np)
eigvalues_np=np.real(eigvalues_np)
eigvalues_t_real = eigvalues_t[:,0]
#Initialize
#S_old = torch.eye(Q.shape[0])
S_old_np = np.identity(Q.shape[0])
S_old_t = torch.eye(Q_torch.shape[0])
M_np = C * np.diag(eigvalues_np) @ np.transpose(np.diag(eigvalues_np))
M_t = C * torch.diag(eigvalues_t_real) @ torch.diag(eigvalues_t_real).T
#Converge
while True:
S_new_np = np.maximum(np.multiply(M_np, S_old_np), np.identity(M_np.shape[0]))
if (np.absolute(S_new_np-S_old_np)).max()<acc:
break
S_old_np = S_new_np
#L = eigvectors @ S_new @ np.linalg.inv(eigvectors)
print('S_new_np: ', S_new_np)
L_np = np.dot(eigvectors_np, np.dot(S_new_np, np.linalg.inv(eigvectors_np)))
#Converge
while True:
S_new_t = torch.max(M_t*S_old_t,torch.eye(M_t.shape[0]))
if torch.max(torch.abs(S_new_t-S_old_t))<acc:
break
S_old_t = S_new_t
print('S_new_t: ', S_new_t)
L_t = eigvectors_t @ S_new_t @ torch.inverse(eigvectors_t)
return L_np, L_t
L = laplacian(A)
N = A.shape[0]
D = 32
Z = nn.Parameter(torch.empty(N, D).normal_(std=0.1))
x = nn.Parameter(torch.empty(N, D).normal_(std=0.1))
opt = torch.optim.Adam([Z], lr=1e-2)
e1, e2 = A.nonzero()
def sig(Z, b=0.1, eps=1e-8):
dist = torch.matmul(Z,Z.T) +b
sigdist = 1/(1+torch.exp(dist+eps)+eps)
logsigdist = torch.log(sigdist+eps)
pos_term = logsigdist[e1,e2]
neg_term = torch.log(1-sigdist)
neg_term[np.diag_indices(N)] = 0.0
return -(pos_term.sum() + neg_term.sum()) / Z.shape[0]**2
def dist(Z, eps=1e-5):
gamma = 0.1
dist = ((Z[:, None] - Z[None, :]).pow(2.0).sum(-1) + eps).sqrt()
neg_term = torch.log(-torch.expm1(-dist)*gamma + eps)
neg_term[np.diag_indices(N)] = 0.0
pos_term = -dist[e1, e2]
neg_term[e1, e2] = 0.0
return -(pos_term.sum() + neg_term.sum()) / Z.shape[0]**2
def exp(Z, eps=1e-8):
#e1, e2 = similarity_measure.nonzero()
emb_abs = torch.FloatTensor.abs(Z)
dist = -torch.matmul(emb_abs, emb_abs.T)
neg_term = dist
neg_term[np.diag_indices(Z.shape[0])] = 0.0
expdist = torch.exp(dist)
embedding = 1 - expdist
logdist = torch.log(embedding + eps)
pos_term = logdist[e1, e2]
size=Z.shape[0]
return -(pos_term.sum() + neg_term.sum()) / Z.shape[0]**2
def kl(L, Z, eps=1e-8):
#P=softmax(ZZ^T)
dist=torch.matmul(Z,Z.T)
sigdist = 1/(1+torch.exp(dist+eps)+eps)
logsigdist = torch.log(sigdist+eps)
losses = T*logsigdist
return losses.sum()
for epoch in range(500):
opt.zero_grad()
loss = dist(Z)
loss.backward()
opt.step()
print(loss.item())
for train_index, test_index in sss.split(X, y):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
```
| github_jupyter |
# Backtesting: EW vs CW
```
import numpy as np
import pandas as pd
import edhec_risk_kit_204 as erk
%load_ext autoreload
%autoreload 2
ind49_rets = erk.get_ind_returns(weighting="vw", n_inds=49)["1974":]
ind49_mcap = erk.get_ind_market_caps(49, weights=True)["1974":]
```
In this section we'll develop a basic infrastructure to backtest various portfolio construction techniques and compare them. Here we'll start with something quite simple and straightforward - we'll write trivially simple weight-optimizers for Equally Weighted and Cap Weighted portfolios and compare them. Obviously, there are no Covariance estimates required for either one of these weighting schemes. In following sessions we'll enhance these optimizers to use more complex weighting schemes that will rely on covariance estimates.
```
def weight_ew(r):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
"""
n = len(r.columns)
return pd.Series(1/n, index=r.columns)
def backtest_ws(r, estimation_window=60, weighting=weight_ew):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window+1)]
# windows is a list of tuples which gives us the (integer) location of the start and stop (non inclusive)
# for each estimation window
weights = [weighting(r.iloc[win[0]:win[1]]) for win in windows]
# List -> DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window-1:].index, columns=r.columns)
# return weights
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
ewr = backtest_ws(ind49_rets, weighting=weight_ew)
ewi = (1+ewr).cumprod()
ewi.plot(figsize=(12,6), title="49 Industries - Equally Weighted");
```
Now, let's add capweighting. We'll need to compute capweights, which we've already been provided through the marketcap file. We can refactor the code we've developed in the past to add a convenience function to our toolkit. Note the use of `**kwargs` to be able to take a variable number of keyword arguments to the function so that we can call any weighting function and let that weighting function take care of whatever arguments it needs. We'll have to refactor `weight_ew` with this new signature, but thats the only change (for now) for `weight_ew`.
```
def weight_ew(r, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
"""
n = len(r.columns)
return pd.Series(1/n, index=r.columns)
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
return cap_weights.loc[r.index[0]]
def backtest_ws(r, estimation_window=60, weighting=weight_ew, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window+1)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert list of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window-1:].index, columns=r.columns)
# return weights
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
ewr = backtest_ws(ind49_rets)
cwr = backtest_ws(ind49_rets, weighting=weight_cw, cap_weights=ind49_mcap)
btr = pd.DataFrame({"EW": ewr, "CW": cwr})
(1+btr).cumprod().plot(figsize=(12,5), title="49 Industries - CapWeighted vs Equally Weighted")
erk.summary_stats(btr.dropna())
```
# Improving EW with CapWeight Tethering
Often in practice, we'll want to implement some sort of a modification of a pure strategy. For instance, although Equal Weight portfolios are popular, they'll be constrained in some way - for instance to match the sector weights of the cap-weighted benchmark or to make sure that microcap stocks are not overweighted. The motivation for doing so could be to make a portfolio more tradeable (e.g. some microcaps may not have the liquidity) or to improve the tracking error to the Cap-Weighted index.
As an illustration of how that can be achieved, we enhance our simple `weight_ew` allocator to (i) drop microcap stocks beyond a particular threshold, and (ii) impose a constraint that ensures that the maximum weight assigned to any stock is no more than some multiple of the weight it would be in a cap-weighted portfolio.
```
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[0]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window+1)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window-1:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
ewr = backtest_ws(ind49_rets)
ewtr = backtest_ws(ind49_rets, cap_weights=ind49_mcap, max_cw_mult=5, microcap_threshold=.005)
cwr = backtest_ws(ind49_rets, weighting=weight_cw, cap_weights=ind49_mcap)
btr = pd.DataFrame({"EW": ewr, "EW-Tethered": ewtr, "CW": cwr})
(1+btr).cumprod().plot(figsize=(12,5))
erk.summary_stats(btr.dropna())
```
Don't forget to add the code we've just developed to the toolkit, we're going to use it in future sessions!
## A Final Note ...
One of the motivations of adding the tethering constraint is to improve tracking error to the cap-weighted portfolio. Let's see if we did manage to achieve that:
```
erk.tracking_error(ewr, cwr),erk.tracking_error(ewtr, cwr)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.offline as po
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import matplotlib.pyplot as plt
import dash
import plotly.express as px
import random
import plotly.figure_factory as ff
```
# Loading Datasets
```
pokemon = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/pokemon_updated.csv")
pokemon.head(10)
stdperf = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/studentp.csv")
stdperf.head(10)
corona = pd.read_csv('C:/Users/DELL/Documents/GitHub/Public/COVID-19/covid/data/countries-aggregated.csv' ,
index_col='Date' , parse_dates=True)
corona.head(10)
spotify = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/spotify.csv" , index_col="Date")
spotify.head(10)
housing = pd.read_csv('C:/Users/DELL/Documents/GitHub/Data-Visualization/housing.csv')
housing.tail()
insurance = pd.read_csv('C:/Users/DELL/Documents/GitHub/Data-Visualization/insurance.csv')
insurance.head(10)
employment = pd.read_excel("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/unemployment.xlsx")
employment.head(10)
helpdesk = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/helpdesk.csv")
helpdesk.head(10)
fish= pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/Fish.csv")
fish.head(10)
exercise = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/exercise.csv")
exercise.head(10)
suicide = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/suicide.csv")
suicide.head(10)
canada = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/canada.csv")
canada.head()
canada.columns
canada.drop(columns=['AREA' , 'DEV', 'DevName' , 'REG', 'Type', 'Coverage' , 'AreaName', 'RegName' ], inplace=True)
canada.head()
canada.rename(columns={'OdName':'Country'} , inplace=True)
canada.set_index(canada.Country,inplace=True)
canada.head()
canada2 = canada.copy()
canada2.head()
canada.index.name=None
canada.head()
del canada['Country']
canada.head()
canada = canada.transpose()
canada.head()
```
# Sankey Diagram
```
#Simple Sankey Diagram
fig = go.Figure(
go.Sankey(
node = {
"label": ["India", "USA", "China", "Pakistan", "Bangladesh", "Mexico"],
},
link = {
"source": [0, 1, 2, 3, 4, 0, 2, 5],
"target": [1, 2, 3, 4, 5, 3, 5, 3],
"value": [300, 400, 200, 450, 700, 200,150, 200]
}
)
)
fig.show()
#Simple Sankey Diagram
fig = go.Figure(
go.Sankey(
node = dict(
thickness = 40, # Changing thickness of nodes
color = "lightgreen", # Changing color of the node
line = dict(color = "red", width = 0.5), # Changing line color
label = ["India", "USA", "China", "Pakistan", "Bangladesh", "Mexico"],
),
link = {
"source": [0, 1, 2, 3, 4, 0, 2, 5],
"target": [1, 2, 3, 4, 5, 3, 5, 3],
"value": [300, 400, 200, 450, 550, 200,150, 200]
}
)
)
fig.show()
#Simple Sankey Diagram
fig = go.Figure(
go.Sankey(
node = {
"label": ["Married: NO", "Married: Yes",
"Pet: No", "Pet: Yes",
"Happy: Yes", "Happy: No"],
"color" : px.colors.qualitative.Set3 # Node color
},
link = dict(
source = [0, 0, 1, 1, 2, 2, 3, 5],
target = [2, 3, 2, 3, 5, 4, 4, 3],
value = [200, 300, 400, 600, 150, 350,700],
color = px.colors.qualitative.Set2 # Color of links
)
)
)
fig.show()
```
# END
| github_jupyter |
<table align="left">
<td>
<a href="https://colab.research.google.com/github/nyandwi/machine_learning_complete/blob/main/6_classical_machine_learning_with_scikit-learn/10_intro_to_unsupervised_learning_with_kmeans_clustering.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
</td>
</table>
*This notebook was created by [Jean de Dieu Nyandwi](https://twitter.com/jeande_d) for the love of machine learning community. For any feedback, errors or suggestion, he can be reached on email (johnjw7084 at gmail dot com), [Twitter](https://twitter.com/jeande_d), or [LinkedIn](https://linkedin.com/in/nyandwi).*
<a name='0'></a>
# Intro to Unsupervised Learning - K-Means Clustering
K-Means clustering is a type of unsupervised learning algorithms. In unsupervised learning, the machine learning model do not get the labels during training. It instead has to figure out the labels itself. It's like learning without instructions. It's like a teacher telling you, "hey, here are 1000 exercises to use while preparing for a test, the test will be only 5 questions from all of those exercises." That can feel like a struggle, you will do all you can to narrow down these 100 exercises to 5. Some questions may be similar, or may be solved by one method, etc..the goal will merely be to narrow down the exercises, while maximizing the chance of passing the test.
That type of example can be compared to clustering. The model is given bunch of data (lacking labels) and the job of the model is to find the labels that can be present according to the supplied data.
K-Means Clustering require the number of clusters to be specified before training. The way this type of algorithm works is beyond the scope of this notebook but here are 3 main steps of how such algorithm work:
* K-Means will randomly assigns samples of data to initial centroids of all clusters. This step is called initialization. A centroid is also referred to as a cluster center and it is the mean of all the sample of data in a cluster.
* It then reassigns the samples to the nearest centroids.
* It also find the new centroids of all clusters by taking the mean value of all of the samples assigned to each previous centroids.
The last two steps are repeated until the stopping criterion is fulfilled or when difference between the old and new centroids is constant.
Unspervised learning has got its applications in areas such as grouping web search results, customer segmentation, news aggregation and more.
## KMeans Clustering
### Contents
* [1 - Imports](#1)
* [2 - Loading the data](#2)
* [3 - Exploratory Analysis](#3)
* [4 - Preprocessing the data](#4)
* [5 - Trainin K-Means Clustering to Find Clusters](#5)
* [6 - Evaluating K-Means Clustering](#6)
* [7 - Final Notes](#7)
<a name='1'></a>
## 1 - Imports
```
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import matplotlib.pyplot as plt
%matplotlib inline
```
<a name='2'></a>
## 2 - Loading the data
In this notebook, we will use a different dataset. Up to this point creating these notebooks, my goal has been to look on the other side, to try something new, to try new a dataset. If you have went through some notebooks about other algorithms, no doubt that you have learned something new or perhaps nothing new but you experienced a new dataset.
In this notebook, we will use a mushroom dataset. The dataset describes mushrooms in terms of their physical characteristics and they are classified into: poisonous or edible.
The dataset also includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like `leaflets three, let it be for Poisonous Oak and Ivy.`
The dataset contains the labels (edibility) but for the purpose of doing clustering, we will remove the labels.
```
# Let's firs hide warnings just in case
import warnings
warnings.filterwarnings('ignore')
from sklearn.datasets import fetch_openml
mushroom_data = fetch_openml(name='mushroom', version=1)
mushroom_data.data.shape
```
As you can see above, there are 8124 examples and 22 features.
```
# Description of the data
print(mushroom_data.DESCR)
# Displaying feature names
mushroom_data.feature_names
# Displaying target name
mushroom_data.target_names
# Getting the whole dataframe
mushroom_data = mushroom_data.frame
```
<a name='3'></a>
## 3 - Exploratory Data Analysis
### Taking a quick look into the dataset
```
mushroom_data.head()
# Displaying the last rows
mushroom_data.tail()
mushroom_data.info()
```
All features are categorical. So we will make sure to handle them.
### Checking Summary Statistics
```
# Summary stats
mushroom_data.describe()
```
### Checking Missing Values
```
# Checking missing values
mushroom_data.isnull().sum()
```
It seems that we have missing values in the feature `stalk-root`.
Usually there are three things to do with if them if they are present:
* We can remove all missing values completely
* We can leave them as they are or
* We can fill them with a given strategy such as mean, media or most frequent value. Either `Sklearn` or Pandas provides a quick ways to fill these kind of values.
We will handle that during the data preprocessing.
### More Data Exploration
Before preprocessing the data, let's take a look into specific features.
I want to also make note that I do not know alot about mushrooms. I thought that it would be interesting to use this real world datasets, and perhaps some people who will come across this may some of mushroom samples and their characteristics.
```
plt.figure(figsize=(12,7))
sns.countplot(data=mushroom_data, x='cap-shape', hue='class')
```
In cap_shape, the letters stands for: `ell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s`. It seems that the convex type is dominant and most of it are edible.
```
plt.figure(figsize=(12,7))
sns.countplot(data=mushroom_data, x='cap-color', hue='class')
```
The above is the cap color. The alphabets stands for `brown=n,buff=b,cinnamon=c,gray=g,green=r,pink=p,purple=u,red=e,white=w,yellow=y `.
Also it seems that most caps are brown(n), either edible or brown.
```
plt.figure(figsize=(12,7))
sns.countplot(data=mushroom_data, x='population')
```
The most populations are most several. Here are what the letters stand for: `abundant=a,clustered=c,numerous=n, scattered=s,several=v,solitary=y`.
```
plt.figure(figsize=(12,7))
sns.countplot(data=mushroom_data, x='habitat')
plt.figure(figsize=(12,7))
sns.countplot(data=mushroom_data, x='stalk-root')
```
Above is the feature that is missing values. We will remove all missing values. Since the missing values are of one category, we will drop it to avoid adding noise in the dataset.
And finally, we can look in the class feature. There are two categories, `e(edible)` and `p(poisonous)`.
```
plt.figure(figsize=(12,7))
sns.countplot(data=mushroom_data, x='class')
```
<a name='4'></a>
## 4 - Data Preprocessing
Let's remove the missing values first.
```
mushroom_df = mushroom_data.dropna()
```
For the purpose of performing clustering, we will remove the labels.
```
mushroom = mushroom_df.drop('class', axis=1)
mushroom_labels = mushroom_df['class']
```
Let's now convert all categorical features into the numerics.
```
from sklearn.preprocessing import OrdinalEncoder
encoder = OrdinalEncoder()
mushroom_prepared = encoder.fit_transform(mushroom)
mushroom_prepared
```
As you can see above, `mushroom_prepared` is a NumPy array. We can convert it back to the Pandas Dataframe although KMeans algorithm can accept both as input.
```
mushroom_prep_df = pd.DataFrame(mushroom_prepared, columns=mushroom.columns)
mushroom_prep_df.head()
```
No alphabets anymore. They were perfectly encoded or converted to numerics representation.
We are now ready to find the labels with KMeans Clustering. Again, this is for the assumption that we do not have labels, or to make it simple, we have a data about the characteristics of different plants, but we do not know if they are edible or not. We want to use unsupervised learning to figure that out.
<a name='5'></a>
## 5 - Training K-Means Clustering to Find Clusters
We are going to create a KMeans model from `sklearn.cluster`. We will remember to provide the number of the clusters, which is 2 in our case.
```
from sklearn.cluster import KMeans
k_clust = KMeans(n_clusters=2, random_state=42)
k_clust.fit(mushroom_prep_df)
```
We can access the cluster centers by `model.cluster_centers_`.
```
k_clust.cluster_centers_
```
Also, we can get the labels that the KMeans provided for each data point.
```
k_labels = k_clust.labels_
k_labels
```
<a name='6'></a>
### 6 -Evaluating K-Means Clustering
In real world, evaluating the performance of KMeans is not an easy thing, because there are not true labels to compare with the clustered labels. In our case since we have them, we can find things like accuracy score, or even find the confusion matrix to display the actual and predicted classes. Not to mention classification report to find things like Recall, Precision, or F1 Score.
But again since we are merely comparing the labels(true and clustered), we do not need that extra metrics.
Before finding the accuracy score, I will first convert the true labels into the numbers or encode them. For simplicity, I will use a map function.
```
map_dict = {
'p':0,
'e':1
}
mushroom_labels_prep = mushroom_labels.map(map_dict)
mushroom_labels_prep
from sklearn.metrics import accuracy_score
accuracy_score(mushroom_labels_prep, k_labels)
```
This is not excellent, but it's so impressive. Why? Well, KMeans never saw the labels, it was only feed the data of different characteristics of poisonous and edible mushrooms and its job was to try to find patterns in the data so as to learn if a given mushroom specy is a poisonous or edible.
KMeans algorithm is very useful in areas where you have a bunch of unlabeled data. Take an example in customer segmentation. You may want to provide different promotions to some groups of your customers but you have no clue of who would benefit from that particular promotion. So, you can try to find the group of customers using this algorithm. It will try to group similar customers according to their interests, and will likely appreciate the promotion.
The same concept can be applied to grouping the equipments that has similar defects in an industry. That was just mentioning few, there are more applications of KMeans clustering.
<a name='7'></a>
### 7 - Final Notes
In this notebook, we learned the idea behind unsupervised learning and KMeans clustering. We also practiced that on mushroom dataset where we were interested in grouping the species that can be poisonous or edible.
If you like mushrooms and you know some of their characteristics, no doubt that you enjoyed this notebook. Maybe pick one edible sample and make it your next meal :)
## [BACK TO TOP](#0)
| github_jupyter |
# Overnight returns
[Overnight Returns and Firm-Specific Investor Sentiment](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2554010)
> **Abtract**: We explore the possibility that overnight returns can serve as a measure of firm-specific investor sentiment by analyzing whether they exhibit characteristics expected of a sentiment measure. First, we document short-term persistence in overnight returns, consistent with existing evidence of short-term persistence in share demand of sentiment-influenced retail investors. Second, we find that short-term persistence is stronger for harder-to-value firms, consistent with evidence that sentiment plays a larger role when there is less objective data available for valuation. Third, we show that stocks with high (low) overnight returns underperform (outperform) over the longer-term, consistent with evidence of temporary sentiment-driven mispricing.
> **p 2, I**: The recent work of Berkman, Koch, Tuttle, and Zhang (2012) suggests that a stock’s
overnight (close-to-open) return can serve as a measure of firm-level sentiment.
> **p 3, I**: Specifically, Berkman et al. (2012) find that attention-generating events (high absolute returns or
strong net buying by retail investors) on one day lead to higher demand by individual investors,
concentrated near the open of the next trading day...This creates temporary price pressure at the
open, resulting in elevated overnight returns that are reversed during the trading day.
> **p 3, I**: We conduct three sets of analyses. **In the first
we test for short-run persistence in overnight returns.** The basis for expecting this from a
measure of sentiment is the evidence in Barber et al. (2009) that the order imbalances of retail
investors, who are the investors most likely to exhibit sentiment, persist for periods extending
over several weeks...In the third analysis we
examine whether stocks with high overnight returns underperform those with low overnight
returns over the long term.
## Install packages
```
import sys
!{sys.executable} -m pip install -r requirements.txt
import cvxpy as cvx
import numpy as np
import pandas as pd
import time
import os
import quiz_helper
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (14, 8)
```
### data bundle
```
import os
import quiz_helper
from zipline.data import bundles
os.environ['ZIPLINE_ROOT'] = os.path.join(os.getcwd(), '..', '..','data','module_4_quizzes_eod')
ingest_func = bundles.csvdir.csvdir_equities(['daily'], quiz_helper.EOD_BUNDLE_NAME)
bundles.register(quiz_helper.EOD_BUNDLE_NAME, ingest_func)
print('Data Registered')
```
### Build pipeline engine
```
from zipline.pipeline import Pipeline
from zipline.pipeline.factors import AverageDollarVolume
from zipline.utils.calendars import get_calendar
universe = AverageDollarVolume(window_length=120).top(500)
trading_calendar = get_calendar('NYSE')
bundle_data = bundles.load(quiz_helper.EOD_BUNDLE_NAME)
engine = quiz_helper.build_pipeline_engine(bundle_data, trading_calendar)
```
### View Data¶
With the pipeline engine built, let's get the stocks at the end of the period in the universe we're using. We'll use these tickers to generate the returns data for the our risk model.
```
universe_end_date = pd.Timestamp('2016-01-05', tz='UTC')
universe_tickers = engine\
.run_pipeline(
Pipeline(screen=universe),
universe_end_date,
universe_end_date)\
.index.get_level_values(1)\
.values.tolist()
universe_tickers
```
# Get Returns data
```
from zipline.data.data_portal import DataPortal
data_portal = DataPortal(
bundle_data.asset_finder,
trading_calendar=trading_calendar,
first_trading_day=bundle_data.equity_daily_bar_reader.first_trading_day,
equity_minute_reader=None,
equity_daily_reader=bundle_data.equity_daily_bar_reader,
adjustment_reader=bundle_data.adjustment_reader)
```
## Get pricing data helper function
```
from quiz_helper import get_pricing
```
## get pricing data into a dataframe
```
returns_df = \
get_pricing(
data_portal,
trading_calendar,
universe_tickers,
universe_end_date - pd.DateOffset(years=5),
universe_end_date)\
.pct_change()[1:].fillna(0) #convert prices into returns
returns_df
```
## Sector data helper function
We'll create an object for you, which defines a sector for each stock. The sectors are represented by integers. We inherit from the Classifier class. [Documentation for Classifier](https://www.quantopian.com/posts/pipeline-classifiers-are-here), and the [source code for Classifier](https://github.com/quantopian/zipline/blob/master/zipline/pipeline/classifiers/classifier.py)
```
from zipline.pipeline.classifiers import Classifier
from zipline.utils.numpy_utils import int64_dtype
class Sector(Classifier):
dtype = int64_dtype
window_length = 0
inputs = ()
missing_value = -1
def __init__(self):
self.data = np.load('../../data/project_4_sector/data.npy')
def _compute(self, arrays, dates, assets, mask):
return np.where(
mask,
self.data[assets],
self.missing_value,
)
sector = Sector()
```
## We'll use 2 years of data to calculate the factor
**Note:** Going back 2 years falls on a day when the market is closed. Pipeline package doesn't handle start or end dates that don't fall on days when the market is open. To fix this, we went back 2 extra days to fall on the next day when the market is open.
```
factor_start_date = universe_end_date - pd.DateOffset(years=2, days=2)
factor_start_date
```
## Walk through "Returns" class
We'll walk through how the `Returns` class works, because we'll create a new class that inherits from `Returns` in order to calculate a customized return.
### Returns inherits from CustomFactor
The zipline package has a class [zipline.pipeline.factors.Returns](https://www.zipline.io/appendix.html?highlight=returns#zipline.pipeline.factors.Returns) which inherits from class [zipline.pipeline.CustomFactor](https://www.zipline.io/appendix.html?highlight=custom%20factor#zipline.pipeline.CustomFactor). The [source code for Returns is here](https://www.zipline.io/_modules/zipline/pipeline/factors/basic.html#Returns), and the [source code for CustomFactor is here.](https://www.zipline.io/_modules/zipline/pipeline/factors/factor.html#CustomFactor)
**Please open the links to the documentation and source code and follow along with our notes about the code**
### Inputs variable
The CustomFactor class takes the `inputs` as a parameter of the constructor for the class, otherwise it looks for a class-level variable named `inputs`. `inputs` takes a list of BoundColumn instances. These help us choose what kind of price-volume data to use as input. The `Returns` class sets this to
```
inputs = [USEquityPricing.close]
```
### USEquityPricing class
The class [USEquityPricing](https://www.zipline.io/appendix.html?highlight=usequitypricing#zipline.pipeline.data.USEquityPricing) has a couple BoundColumn instances that we can choose from.
close = USEquityPricing.close
high = USEquityPricing.high
low = USEquityPricing.low
open = USEquityPricing.open
volume = USEquityPricing.volume
## Quiz 1
If we wish to calculate close to open returns, which columns from USEquityPricing do you think we'll want to put into the list and set as `inputs`?
## Quiz 1 Answer
`USEquityPricing.open` and `USEquityPricing.close`
### window_length variable
The CustomFactor class takes `window_length` (an integer) as a constructor parameter, otherwise it looks for a class-level variable named `window_length`. If we chose a `window_length = 2` then this means that we'll be passing two days' worth of data (two rows) into the `compute` function.
## Quiz 2
What window length would you choose if you were calculating daily close to open returns? Assume we have daily data.
## Answer 2
window length of 2 to have 2 days of data.
### Compute function
The function definition of the `Returns` class includes the `compute` function
```
def compute(self, today, assets, out, close):
out[:] = (close[-1] - close[0]) / close[0]
```
* `today`: this is handled by parent classes; it has the datetime for the "today" row for the given subset of data. We won't use it for this function implementation.
* `assets`: this is handled by parent classes: it has the column header names for the "out" and "close". We won't use it for this function implementation.
* `out`: this points to a numpy array that will store the result of our compute. It stores our "return" value of the `compute` function instead of explicitly returning a variable.
* `*input`: a tuple of numpy arrays that contain input data that we'll use to compute a signal. In the `Returns` definition of `compute`, the input is a single value `close`, but we can list more if we need additional columns of data to compute a return.
If we set the `window_length=2`, then the `compute` function gets two rows worth of data from `close`. The index 1 value is the most recent value, and the index 0 value is the earliest in time. Recall that in Python, the -1 index is the same as getting the highest indexed value, so with a numpy array of just length two, -1 gives us the value at index 1.
So the line of code is calculating the one-day return using the close price, and storing that into the `out` variable.
$ Return = \frac{close_1 - close_0}{close_0} $
## Quiz 3
Given a numpy array for open prices called `open` and a numpy array for close prices called `close`, what code would you write to get the most recent open price? Assume that you have 2 days of data.
## Answer 3
open[1] or open[-1] (because python lets us do reverse indexing) are valid answers. Use the -1 index allows us to get the most recent price (the very last index of the numpy array) regardless of the window length, so you may prefer to use -1 to make your code easier to maintain or modify.
## Close To Open Returns (Overnight Returns)
The close-to-open return is the change in price between when the market closed on one day and when it opened on the next. So it's
$ CloseToOpen = \frac{open_1 - close_0}{close_0}$
We'll now create a class `CTO` that inherits from `Return`, and override the `compute` function.
## Quiz 4
Create a customized class `CloseToOpenReturns` that inherit from the Returns class. Define the compute function to calculate overnight returns.
```
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import Returns
class CloseToOpenReturns(Returns):
"""
"""
# TODO: Set window_length (we're calculating daily returns)
window_length = 2
# TODO: set inputs
inputs = [USEquityPricing.open, USEquityPricing.close]
# The compute method is passed the current day, the assets list, a pre-allocated out vector, and the
# factor's items in the list `inputs`
def compute(self, today, assets, out, opens, closes):
#TODO: calculate close-to-open return and save into out[:]
out[:] = (opens[-1] - closes[0]) / closes[0]
```
## Trailing overnight returns
The cumulative overnight returns over a week may be predictive of future returns; hence it's a kind of momentum signal.
$ TrailingOvernightReturns = \sum_{1}^{Days}CloseToOpen_t$
Where $Days$ could be 5 if we are looking at a weekly window.
So we want to take the `CloseToOpenReturns` as our input into another class, `TrailingOvernightReturns`, which also inherits from `Returns`.
### mask
Note that we're going to create another class that inherits from `Returns`. Recall that `Returns` inherits from [CustomFactor](https://www.zipline.io/appendix.html?highlight=factor#zipline.pipeline.CustomFactor), which has a `mask` parameter for its constructor. The `mask` parameter takes in a `Filter` object, which determines which stock series get passed to the `compute` function. Note that when we used `AverageDollarVolume` and stored its output in the variable `universe`, this `universe` variable is of type `Filter`.
## Quiz 5
If you wanted to create an object of type CloseToOpen, and also define the object so that it only computes returns on the set of stocks in universe that we selected earlier in this notebook, what code would you write?
## Answer 5
We could instantiate a CloseToOpen object with `CloseToOpenReturns(mask=universe)`, and this would only calculate close to open returns for the stocks defined in our `universe` variable.
## numpy.nansum
Numpy has a `nansum` function that treat NaN (not a number) as zeros. Note that by default, if we give numpy.nansum a 2D numpy array, it will calculate a single sum across all rows and columns. For our purposes, we want to compute a sum over 5 days (5 rows), and each column has daily close to open returns for a single stock. It helps to think of the a matrix (2D numpy array) as a nested list of lists. This makes it easier to decide whether to set `axis=0` or `axis=1`.
```
tmp =
[
[stock1day1, stock2day1 ]
[stock1day2, stock2day2 ]
...
]
```
If we look at the outermost list, each element is a list that represents one day's worth of data. If we used `np.nansum(tmp,axis=0)`, this would sum across the days for each stock. If we think of this as a 2D matrix, setting `axis=0` is like calculating a sum for each column.
If we set `axis=0`, this applies `nansum` to the outermost list (axis 0), so that we end up with:
```
[
sum_of_stock_1, sum_of_stock_2
]
```
Alternatively, if we set `axis=1`, this applies `nansum` to the lists nested inside the outermost list. Each of these nested lists represent data for a single day, for all stocks, so that we get:
```
[
sum_of_day_1,
sum_of_day_2,
]
```
## Example using numpy.nansum
```
tmp = np.array([
[1, 2, 3],
[np.nan, np.nan, np.nan],
[1, 1, 1]
])
print(f"Sum across rows and columns: numpy.nansum(tmp) \n{np.nansum(tmp)}")
print(f"Sum for each column: numpy.nansum(tmp,axis=0) \n{np.nansum(tmp,axis=0)}")
print(f"Sum for each row: numpy.nansum(tmp,axis=1) \n{np.nansum(tmp,axis=1)}")
```
## Quiz 6
For our purposes, we want want a sum for each stock series. Which axis do you think we should choose?
## Answer 6
We want to set axis = 0 so that we have a sum for each stock (each column).
## Quiz 7
Create a class TrailingOvernightReturns that inherits from Returns and takes the cumulative weekly sum of overnight returns.
```
class TrailingOvernightReturns(Returns):
"""
Sum of trailing close-to-open returns; we expect sentiment persistence at short horizons, so we
look at the 5-day (ie., 1 week) window
"""
# TODO: choose a window_length to calculate a weekly return
window_length = 5
# TODO: set inputs to a list containing the daily close to open returns
# Filter the close to open returns by our stock universe
inputs = [CloseToOpenReturns(mask=universe)]
def compute(self, today, assets, out, close_to_open):
#TODO: calculate the sum of close_to_open
#choose the axis so that there is a sum for each stock (each column)
#treat NaN as zeros
out[:] = np.nansum(close_to_open, axis=0)
```
## Quiz 8
Create a factor by instantiating the TrailingOvernightReturns class that you just defined. Demean by sector, rank and covnert to a zscore.
```
# TODO: create an overnight_returns_factor variable
overnight_returns_factor = (
TrailingOvernightReturns().
demean(groupby=Sector()).
rank().
zscore()
)
# create a pipeline called p
p = Pipeline(screen=universe)
p.add(overnight_returns_factor, 'Overnight_Sentiment')
```
## Visualize pipeline
```
p.show_graph(format='png')
```
## run pipeline and view the factor data
```
df = engine.run_pipeline(p, factor_start_date, universe_end_date)
df.head()
```
## Visualize factor returns
These are returns that a theoretical portfolio would have if its stock weights were determined by a single alpha factor's values.
```
from quiz_helper import make_factor_plot
make_factor_plot(df, data_portal, trading_calendar, factor_start_date, universe_end_date);
```
## Solutions
Check out the [solution notebook here.](./overnight_returns_solution.ipynb)
| github_jupyter |
<a href="https://colab.research.google.com/github/rajdeepd/tensorflow_2.0_book_code/blob/master/ch04/inception_v3_all_images_25_epochs_colab_modelfit.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import sys
IN_COLAB = 'google.colab' in sys.modules
print('Google Colab? ' + str(IN_COLAB))
if not IN_COLAB:
#!python -m pip show tensorflow
!which python
!python -m pip show tensorflow
!pwd
from google.colab import drive
drive.mount("/content/gdrive")
!ls "/content/gdrive/My Drive/cancer_detection/metastatic_cancer"
%matplotlib inline
import sys
sys.executable
```
https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/applications/vgg19
```
# Imports
import numpy as np
import pandas as pd
from glob import glob
from skimage.io import imread
import os
import shutil
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.model_selection import train_test_split
import tensorflow
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, Average, Input, Concatenate, GlobalMaxPooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
#!pip show tensorflow
TF_VERSION = tensorflow.__version__
TF_VERSION
if IN_COLAB:
BASE = '/content/gdrive/My Drive/cancer_detection/metastatic_cancer'
else:
BASE = '.'
# Output files
model_type='inceptionv3'
no_of_images = 'all'
EPOCHS = 25
if IN_COLAB:
PLOTS = 'plots_'+ TF_VERSION + '_google_collab'
VERSION = TF_VERSION
else:
PLOTS = 'plots_' + TF_VERSION
VERSION = TF_VERSION
_APPEND = '_' + model_type + '_' + str(no_of_images) + '_' + str(EPOCHS)
APPEND = _APPEND + ".png"
if IN_COLAB:
if not os.path.exists(BASE + "/training_logs_" + VERSION):
os.mkdir(BASE + "/training_logs_" + VERSION)
if not os.path.exists(BASE + "/model_summary/"):
os.mkdir(BASE + "/model_summary/")
if not os.path.exists(BASE + "/model_summary/" + "model_summary_" + VERSION):
os.mkdir(BASE + "/model_summary/" + "model_summary_" + VERSION)
if not os.path.exists(BASE + '/' + PLOTS):
os.mkdir(BASE + '/' + PLOTS)
if IN_COLAB:
TRAINING_LOGS_FILE = BASE + "/training_logs_" + VERSION + '/training_logs' + _APPEND + '.csv'
MODEL_SUMMARY_FILE = BASE + "/model_summary/" "model_summary_" + VERSION + "/model_summary" + _APPEND + ".txt"
MODEL_PLOT_FILE = BASE + '/' + PLOTS + "/model_plot_" + APPEND
MODEL_FILE = "model_" + VERSION + "/model_" + model_type + "_all_collab.h5"
TRAINING_PLOT_FILE = BASE + '/'+ PLOTS + "/training" + APPEND
VALIDATION_PLOT_FILE = BASE + '/'+ PLOTS + "/validation" + APPEND
ROC_PLOT_FILE = BASE + '/'+ PLOTS + "/roc" + APPEND
else:
TRAINING_LOGS_FILE = "training_logs_" + VERSION + '/training_logs' + _APPEND + '.csv'
MODEL_SUMMARY_FILE = "model_summary_" + VERSION + "/model_summary" + _APPEND + ".txt"
MODEL_PLOT_FILE = PLOTS + "/model_plot_" + APPEND
MODEL_FILE = "model_" + VERSION + "/model_" + model_type + "_all_collab.h5"
TRAINING_PLOT_FILE = PLOTS + "/training" + APPEND
VALIDATION_PLOT_FILE = PLOTS + "/validation" + APPEND
ROC_PLOT_FILE = PLOTS + "/roc" + APPEND
# Hyperparams
SAMPLE_COUNT = 85000
#TRAINING_RATIO = 0.9
IMAGE_SIZE = 96
IMAGE_SIZE2 = 224
BATCH_SIZE = 192
VERBOSITY = 1
TESTING_BATCH_SIZE = 5000
import pathlib
data_dir = pathlib.Path(BASE)
data_dir
data_dir_training = pathlib.Path(BASE + '/training')
image_count = len(list(data_dir_training.glob('*/*.tif')))
print(image_count)
import PIL
import PIL.Image
zeros = list(data_dir_training.glob('0/*'))
PIL.Image.open(str(zeros[0]))
import PIL
import PIL.Image
zeros = list(data_dir_training.glob('1/*'))
PIL.Image.open(str(zeros[0]))
training_path= BASE + '/training'
validation_path = BASE + '/validation'
# Data augmentation
training_data_generator = ImageDataGenerator(rescale=1./255,
horizontal_flip=True,
vertical_flip=True,
rotation_range=180,
zoom_range=0.4,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.3,
channel_shift_range=0.3)
# Data generation
training_generator = training_data_generator.flow_from_directory(training_path,
target_size=(IMAGE_SIZE2,IMAGE_SIZE2),
batch_size=BATCH_SIZE,
class_mode='binary')
validation_generator = ImageDataGenerator(rescale=1./255).flow_from_directory(validation_path,
target_size=(IMAGE_SIZE2,
IMAGE_SIZE2),
batch_size=BATCH_SIZE,
class_mode='binary')
testing_generator = ImageDataGenerator(rescale=1./255).flow_from_directory(validation_path,
target_size=(IMAGE_SIZE2,IMAGE_SIZE2),
batch_size=BATCH_SIZE,
class_mode='binary',
shuffle=False)
# Model
input_shape = (IMAGE_SIZE2, IMAGE_SIZE2, 3)
inputs = Input(input_shape)
inceptionv3 = InceptionV3(include_top=False, input_shape=(224, 224, 3))(inputs)
outputs = GlobalAveragePooling2D()(inceptionv3)
outputs = Dropout(0.5)(outputs)
outputs = Dense(1, activation='sigmoid')(outputs)
model = Model(inputs, outputs)
model.compile(optimizer=Adam(lr=0.0001, decay=0.00001),
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
plot_model(model,
to_file=MODEL_PLOT_FILE,
show_shapes=True,
show_layer_names=True)
import os
if not os.path.exists("training_logs_" + VERSION):
os.makedirs("training_logs_" + VERSION)
file = open(TRAINING_LOGS_FILE, 'w+')
history = model.fit(training_generator,
steps_per_epoch=len(training_generator),
validation_data=validation_generator,
validation_steps=len(validation_generator),
epochs=EPOCHS,
verbose=VERBOSITY,
callbacks=[#PlotLossesKeras(),
ModelCheckpoint(MODEL_FILE,
monitor='val_accuracy',
verbose=VERBOSITY,
save_best_only=True,
mode='max'),
CSVLogger(TRAINING_LOGS_FILE,
append=True,
separator=';')
])
history.history
# Training plots
common_title = model_type + ' with ' + str(no_of_images) + ' samples'
epochs = [i for i in range(1, len(history.history['loss'])+1)]
plt.plot(epochs, history.history['loss'], color='blue', label="training_loss")
plt.plot(epochs, history.history['val_loss'], color='red', label="validation_loss")
plt.legend(loc='best')
#plt.title('training: ' + common_title)
plt.xlabel('epoch')
plt.savefig(TRAINING_PLOT_FILE, bbox_inches='tight')
plt.close()
plt.plot(epochs, history.history['accuracy'], color='blue', label="training_accuracy")
plt.plot(epochs, history.history['val_accuracy'], color='red',label="validation_accuracy")
plt.legend(loc='best')
plt.title('Validation (TF '+ VERSION + '): ' + common_title )
plt.xlabel('epoch')
plt.savefig(VALIDATION_PLOT_FILE, bbox_inches='tight')
plt.show()
plt.close()
# ROC testing plot
#model.load_weights(MODEL_FILE)
predictions = model.predict_generator(testing_generator, steps=len(testing_generator), verbose=VERBOSITY)
false_positive_rate, true_positive_rate, threshold = roc_curve(testing_generator.classes, predictions)
area_under_curve = auc(false_positive_rate, true_positive_rate)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(false_positive_rate, true_positive_rate, label='AUC = {:.3f}'.format(area_under_curve))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve: ' + common_title + ' TF ' + VERSION)
plt.legend(loc='best')
plt.savefig(ROC_PLOT_FILE, bbox_inches='tight')
plt.show()
plt.close()
```
| github_jupyter |
# # Duramat Webinar: US NREL Electric Futures 2021
This journal simulates the Reference and High Electrification scenarios from Electrification Futures, and comparing to a glass baseline with High bifacial future projection.
Installed Capacity considerations from bifacial installations are not considered here.
Results from this journal were presented during Duramat's webinar April 2021 – “The Impacts of Module Reliability and Lifetime on PV in the Circular Economy" presented by Teresa Barnes, Silvana Ayala, and Heather Mirletz, NREL.
```
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent / 'PV_ICE' / 'TEMP' / 'DURAMAT')
# Another option using relative address; for some operative systems you might need '/' instead of '\'
# testfolder = os.path.abspath(r'..\..\PV_DEMICE\TEMP')
print ("Your simulation will be stored in %s" % testfolder)
MATERIALS = ['glass','silver','silicon', 'copper','aluminium_frames']
MATERIAL = MATERIALS[0]
MODULEBASELINE = r'..\..\baselines\ElectrificationFutures_2021\baseline_modules_US_NREL_Electrification_Futures_2021_basecase.csv'
MODULEBASELINE_High = r'..\..\baselines\ElectrificationFutures_2021\baseline_modules_US_NREL_Electrification_Futures_2021_LowREHighElec.csv'
import PV_ICE
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
PV_ICE.__version__
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 5)
pwd
r1 = PV_ICE.Simulation(name='Simulation1', path=testfolder)
r1.createScenario(name='base', file=MODULEBASELINE)
r1.scenario['base'].addMaterials(MATERIALS, r'..\..\baselines')
r1.createScenario(name='high', file=MODULEBASELINE_High)
r1.scenario['high'].addMaterials(MATERIALS, r'..\..\baselines')
r2 = PV_ICE.Simulation(name='bifacialTrend', path=testfolder)
r2.createScenario(name='base', file=MODULEBASELINE)
r2.scenario['base'].addMaterials(MATERIALS, r'..\..\baselines')
MATERIALBASELINE = r'..\..\baselines\PVSC_2021\baseline_material_glass_bifacialTrend.csv'
r2.scenario['base'].addMaterial('glass', file=MATERIALBASELINE)
r2.createScenario(name='high', file=MODULEBASELINE_High)
r2.scenario['high'].addMaterials(MATERIALS, r'..\..\baselines')
MATERIALBASELINE = r'..\..\baselines\PVSC_2021\baseline_material_glass_bifacialTrend.csv'
r2.scenario['high'].addMaterial('glass', file=MATERIALBASELINE)
IRENA= False
ELorRL = 'EL'
if IRENA:
r1.scenMod_IRENIFY(scenarios=['base', 'high'], ELorRL = ELorRL )
r2.scenMod_IRENIFY(scenarios=['base', 'high'], ELorRL = ELorRL )
title_Method = 'Irena_'+ELorRL
else:
title_Method = 'PVICE'
r1.calculateMassFlow()
r2.calculateMassFlow()
objects = [r1, r2]
scenarios = ['base', 'high']
pvice_Usyearly1, pvice_Uscum1 = r1.aggregateResults()
pvice_Usyearly2, pvice_Uscum2 = r2.aggregateResults()
UScum = pd.concat([pvice_Uscum1, pvice_Uscum2], axis=1)
USyearly = pd.concat([pvice_Usyearly1, pvice_Usyearly2], axis=1)
UScum.to_csv('pvice_USCum.csv')
USyearly.to_csv('pvice_USYearly.csv')
# OLD METHOD
'''
USyearly=pd.DataFrame()
keyword='mat_Total_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium_frames']
# Loop over objects
for kk in range(0, len(objects)):
obj = objects[kk]
# Loop over Scenarios
for jj in range(0, len(scenarios)):
case = scenarios[jj]
for ii in range (0, len(materials)):
material = materials[ii]
foo = obj.scenario[case].material[material].materialdata[keyword].copy()
foo = foo.to_frame(name=material)
USyearly["Waste_"+material+'_'+obj.name+'_'+case] = foo[material]
filter_col = [col for col in USyearly if (col.startswith('Waste_') and col.endswith(obj.name+'_'+case)) ]
USyearly['Waste_Module_'+obj.name+'_'+case] = USyearly[filter_col].sum(axis=1)
# Converting to grams to Tons.
USyearly.head(20)
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium_frames']
# Loop over objects
for kk in range(0, len(objects)):
obj = objects[kk]
# Loop over Scenarios
for jj in range(0, len(scenarios)):
case = scenarios[jj]
for ii in range (0, len(materials)):
material = materials[ii]
foo = obj.scenario[case].material[material].materialdata[keyword].copy()
foo = foo.to_frame(name=material)
USyearly["Waste_EOL_"+material+'_'+obj.name+'_'+case] = foo[material]
filter_col = [col for col in USyearly if (col.startswith('Waste_EOL_') and col.endswith(obj.name+'_'+case)) ]
USyearly['Waste_EOL_Module_'+obj.name+'_'+case] = USyearly[filter_col].sum(axis=1)
# Converting to grams to Tons.
USyearly.head(20)
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium_frames']
# Loop over objects
for kk in range(0, len(objects)):
obj = objects[kk]
# Loop over Scenarios
for jj in range(0, len(scenarios)):
case = scenarios[jj]
for ii in range (0, len(materials)):
material = materials[ii]
foo = obj.scenario[case].material[material].materialdata[keyword].copy()
foo = foo.to_frame(name=material)
USyearly["VirginStock_"+material+'_'+obj.name+'_'+case] = foo[material]
filter_col = [col for col in USyearly if (col.startswith('VirginStock_') and col.endswith(obj.name+'_'+case)) ]
USyearly['VirginStock_Module_'+obj.name+'_'+case] = USyearly[filter_col].sum(axis=1)
# ### Converting to grams to METRIC Tons.
USyearly = USyearly/1000000 # This is the ratio for Metric tonnes
#907185 -- this is for US tons
UScum = USyearly.copy()
UScum = UScum.cumsum()
keyword='Installed_Capacity_[W]'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium_frames']
# Loop over SF Scenarios
for kk in range(0, len(objects)):
obj = objects[kk]
# Loop over Scenarios
for jj in range(0, len(scenarios)):
case = scenarios[jj]
foo = obj.scenario[case].data[keyword]
foo = foo.to_frame(name=keyword)
UScum["Capacity_"+obj.name+'_'+case] = foo[keyword]
USyearly.index = r1.scenario['base'].data['year']
UScum.index = r1.scenario['base'].data['year']
USyearly.to_csv('USyearly_Oldmethod.csv')
UScum.to_csv('UScum_Oldmethod.csv')
''';
```
# ## Mining Capacity
```
mining2020_aluminum = 65267000
mining2020_silver = 22260
mining2020_copper = 20000000
mining2020_silicon = 8000000
objects = [r1, r2]
scenarios = ['base', 'high']
plt.rcParams.update({'font.size': 10})
plt.rcParams['figure.figsize'] = (12, 8)
keyw='VirginStock_'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium_frames']
fig, axs = plt.subplots(1,1, figsize=(4, 6), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = .3, wspace=.2)
# Loop over CASES
name2 = 'Simulation1_high_[Tonnes]'
name0 = 'Simulation1_base_[Tonnes]'
# ROW 2, Aluminum and Silicon: g- 4 aluminum k - 1 silicon orange - 3 copper gray - 2 silver
axs.plot(USyearly[keyw+materials[2]+'_'+name2]*100/mining2020_silver,
color = 'gray', linewidth=2.0, label='Silver')
axs.fill_between(USyearly.index, USyearly[keyw+materials[2]+'_'+name0]*100/mining2020_silver, USyearly[keyw+materials[2]+'_'+name2]*100/mining2020_silver,
color='gray', lw=3, alpha=.3)
axs.plot(USyearly[keyw+materials[1]+'_'+name2]*100/mining2020_silicon,
color = 'k', linewidth=2.0, label='Silicon')
axs.fill_between(USyearly.index, USyearly[keyw+materials[1]+'_'+name0]*100/mining2020_silicon,
USyearly[keyw+materials[1]+'_'+name2]*100/mining2020_silicon,
color='k', lw=3, alpha=.5)
axs.plot(USyearly[keyw+materials[4]+'_'+name2]*100/mining2020_aluminum,
color = 'g', linewidth=2.0, label='Aluminum')
axs.fill_between(USyearly.index, USyearly[keyw+materials[4]+'_'+name0]*100/mining2020_aluminum,
USyearly[keyw+materials[4]+'_'+name2]*100/mining2020_aluminum,
color='g', lw=3, alpha=.3)
axs.plot(USyearly[keyw+materials[3]+'_'+name2]*100/mining2020_copper,
color = 'orange', linewidth=2.0, label='Copper')
axs.fill_between(USyearly.index, USyearly[keyw+materials[3]+'_'+name0]*100/mining2020_copper,
USyearly[keyw+materials[3]+'_'+name2]*100/mining2020_copper,
color='orange', lw=3, alpha=.3)
axs.set_xlim([2020,2050])
axs.legend()
#axs.set_yscale('log')
#axs.set_ylabel('Virgin material needs as a percentage of 2020 global mining production capacity [%]')
fig.savefig(title_Method+' Fig_1x1_MaterialNeeds Ratio to Production_NREL2018.png', dpi=600)
plt.rcParams.update({'font.size': 15})
plt.rcParams['figure.figsize'] = (15, 8)
keyw='VirginStock_'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium_frames']
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [3, 1]})
########################
# SUBPLOT 1
########################
#######################
# loop plotting over scenarios
name2 = 'Simulation1_high_[Tonnes]'
name0 = 'Simulation1_base_[Tonnes]'
# SCENARIO 1 ***************
modulemat = (USyearly[keyw+materials[0]+'_'+name0]+USyearly[keyw+materials[1]+'_'+name0]+
USyearly[keyw+materials[2]+'_'+name0]+USyearly[keyw+materials[3]+'_'+name0]+
USyearly[keyw+materials[4]+'_'+name0])
glassmat = (USyearly[keyw+materials[0]+'_'+name0])
modulemat = modulemat/1000000
glassmat = glassmat/1000000
a0.plot(USyearly.index, modulemat, 'k.', linewidth=5, label='S1: '+name0+' module mass')
a0.plot(USyearly.index, glassmat, 'k', linewidth=5, label='S1: '+name0+' glass mass only')
a0.fill_between(USyearly.index, glassmat, modulemat, color='k', alpha=0.3,
interpolate=True)
# SCENARIO 2 ***************
modulemat = (USyearly[keyw+materials[0]+'_'+name2]+USyearly[keyw+materials[1]+'_'+name2]+
USyearly[keyw+materials[2]+'_'+name2]+USyearly[keyw+materials[3]+'_'+name2]+
USyearly[keyw+materials[4]+'_'+name2])
glassmat = (USyearly[keyw+materials[0]+'_'+name2])
modulemat = modulemat/1000000
glassmat = glassmat/1000000
a0.plot(USyearly.index, modulemat, 'c.', linewidth=5, label='S2: '+name2+' module mass')
a0.plot(USyearly.index, glassmat, 'c', linewidth=5, label='S2: '+name2+' glass mass only')
a0.fill_between(USyearly.index, glassmat, modulemat, color='c', alpha=0.3,
interpolate=True)
a0.legend()
a0.set_title('Yearly Virgin Material Needs by Scenario')
a0.set_ylabel('Mass [Million Tonnes]')
a0.set_xlim([2020, 2050])
a0.set_xlabel('Years')
########################
# SUBPLOT 2
########################
#######################
# Calculate
cumulations2050 = {}
for ii in range(0, len(materials)):
matcum = []
matcum.append(UScum[keyw+materials[ii]+'_'+name0].loc[2050])
matcum.append(UScum[keyw+materials[ii]+'_'+name2].loc[2050])
cumulations2050[materials[ii]] = matcum
dfcumulations2050 = pd.DataFrame.from_dict(cumulations2050)
dfcumulations2050 = dfcumulations2050/1000000 # in Million Tonnes
dfcumulations2050['bottom1'] = dfcumulations2050['glass']
dfcumulations2050['bottom2'] = dfcumulations2050['bottom1']+dfcumulations2050['aluminium_frames']
dfcumulations2050['bottom3'] = dfcumulations2050['bottom2']+dfcumulations2050['silicon']
dfcumulations2050['bottom4'] = dfcumulations2050['bottom3']+dfcumulations2050['copper']
## Plot BARS Stuff
ind=np.arange(2)
width=0.35 # width of the bars.
p0 = a1.bar(ind, dfcumulations2050['glass'], width, color='c')
p1 = a1.bar(ind, dfcumulations2050['aluminium_frames'], width,
bottom=dfcumulations2050['bottom1'])
p2 = a1.bar(ind, dfcumulations2050['silicon'], width,
bottom=dfcumulations2050['bottom2'])
p3 = a1.bar(ind, dfcumulations2050['copper'], width,
bottom=dfcumulations2050['bottom3'])
p4 = a1.bar(ind, dfcumulations2050['silver'], width,
bottom=dfcumulations2050['bottom4'])
a1.yaxis.set_label_position("right")
a1.yaxis.tick_right()
a1.set_ylabel('Virgin Material Cumulative Needs 2020-2050 [Million Tonnes]')
a1.set_xlabel('Scenario')
a1.set_xticks(ind, ('S1', 'S2'))
#plt.yticks(np.arange(0, 81, 10))
a1.legend((p0[0], p1[0], p2[0], p3[0], p4[0] ), ('Glass', 'aluminium_frames', 'Silicon','Copper','Silver'))
f.tight_layout()
f.savefig(title_Method+' Fig_2x1_Yearly Virgin Material Needs by Scenario and Cumulatives_NREL2018.png', dpi=600)
print("Cumulative Virgin Needs by 2050 Million Tones by Scenario")
dfcumulations2050[['glass','silicon','silver','copper','aluminium_frames']].sum(axis=1)
```
# ### Bonus: Bifacial Trend Cumulative Virgin Needs (not plotted, just values)
```
name2 = 'bifacialTrend_high_[Tonnes]'
name0 = 'bifacialTrend_base_[Tonnes]'
cumulations2050 = {}
for ii in range(0, len(materials)):
matcum = []
matcum.append(UScum[keyw+materials[ii]+'_'+name0].loc[2050])
matcum.append(UScum[keyw+materials[ii]+'_'+name2].loc[2050])
cumulations2050[materials[ii]] = matcum
dfcumulations2050 = pd.DataFrame.from_dict(cumulations2050)
dfcumulations2050 = dfcumulations2050/1000000 # in Million Tonnes
print("Cumulative Virgin Needs by 2050 Million Tones by Scenario for Bifacial Trend")
dfcumulations2050[['glass','silicon','silver','copper','aluminium_frames']].sum(axis=1)
```
# ### Waste by year
```
plt.rcParams.update({'font.size': 15})
plt.rcParams['figure.figsize'] = (15, 8)
keyw='WasteAll_'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium_frames']
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [3, 1]})
########################
# SUBPLOT 1
########################
#######################
# loop plotting over scenarios
name2 = 'Simulation1_high_[Tonnes]'
name0 = 'Simulation1_base_[Tonnes]'
# SCENARIO 1 ***************
modulemat = (USyearly[keyw+materials[0]+'_'+name0]+USyearly[keyw+materials[1]+'_'+name0]+
USyearly[keyw+materials[2]+'_'+name0]+USyearly[keyw+materials[3]+'_'+name0]+
USyearly[keyw+materials[4]+'_'+name0])
glassmat = (USyearly[keyw+materials[0]+'_'+name0])
modulemat = modulemat/1000000
glassmat = glassmat/1000000
a0.plot(USyearly.index, modulemat, 'k.', linewidth=5, label='S1: '+name0+' module mass')
a0.plot(USyearly.index, glassmat, 'k', linewidth=5, label='S1: '+name0+' glass mass only')
a0.fill_between(USyearly.index, glassmat, modulemat, color='k', alpha=0.3,
interpolate=True)
# SCENARIO 2 ***************
modulemat = (USyearly[keyw+materials[0]+'_'+name2]+USyearly[keyw+materials[1]+'_'+name2]+
USyearly[keyw+materials[2]+'_'+name2]+USyearly[keyw+materials[3]+'_'+name2]+
USyearly[keyw+materials[4]+'_'+name2])
glassmat = (USyearly[keyw+materials[0]+'_'+name2])
modulemat = modulemat/1000000
glassmat = glassmat/1000000
a0.plot(USyearly.index, modulemat, 'c.', linewidth=5, label='S2: '+name2+' module mass')
a0.plot(USyearly.index, glassmat, 'c', linewidth=5, label='S2: '+name2+' glass mass only')
a0.fill_between(USyearly.index, glassmat, modulemat, color='c', alpha=0.3,
interpolate=True)
a0.legend()
a0.set_title('Yearly Material Waste by Scenario')
a0.set_ylabel('Mass [Million Tonnes]')
a0.set_xlim([2020, 2050])
a0.set_xlabel('Years')
########################
# SUBPLOT 2
########################
#######################
# Calculate
cumulations2050 = {}
for ii in range(0, len(materials)):
matcum = []
matcum.append(UScum[keyw+materials[ii]+'_'+name0].loc[2050])
matcum.append(UScum[keyw+materials[ii]+'_'+name2].loc[2050])
cumulations2050[materials[ii]] = matcum
dfcumulations2050 = pd.DataFrame.from_dict(cumulations2050)
dfcumulations2050 = dfcumulations2050/1000000 # in Million Tonnes
dfcumulations2050['bottom1'] = dfcumulations2050['glass']
dfcumulations2050['bottom2'] = dfcumulations2050['bottom1']+dfcumulations2050['aluminium_frames']
dfcumulations2050['bottom3'] = dfcumulations2050['bottom2']+dfcumulations2050['silicon']
dfcumulations2050['bottom4'] = dfcumulations2050['bottom3']+dfcumulations2050['copper']
## Plot BARS Stuff
ind=np.arange(2)
width=0.35 # width of the bars.
p0 = a1.bar(ind, dfcumulations2050['glass'], width, color='c')
p1 = a1.bar(ind, dfcumulations2050['aluminium_frames'], width,
bottom=dfcumulations2050['bottom1'])
p2 = a1.bar(ind, dfcumulations2050['silicon'], width,
bottom=dfcumulations2050['bottom2'])
p3 = a1.bar(ind, dfcumulations2050['copper'], width,
bottom=dfcumulations2050['bottom3'])
p4 = a1.bar(ind, dfcumulations2050['silver'], width,
bottom=dfcumulations2050['bottom4'])
a1.yaxis.set_label_position("right")
a1.yaxis.tick_right()
a1.set_ylabel('Cumulative Waste by 2050 [Million Tonnes]')
a1.set_xlabel('Scenario')
a1.set_xticks(ind, ('S1', 'S2'))
#plt.yticks(np.arange(0, 81, 10))
a1.legend((p0[0], p1[0], p2[0], p3[0], p4[0] ), ('Glass', 'aluminium_frames', 'Silicon','Copper','Silver'))
f.tight_layout()
f.savefig(title_Method+' Fig_2x1_Yearly WASTE by Scenario and Cumulatives_NREL2018.png', dpi=600)
print("Cumulative Waste by 2050 Million Tones by case")
dfcumulations2050[['glass','silicon','silver','copper','aluminium_frames']].sum(axis=1)
plt.rcParams.update({'font.size': 15})
plt.rcParams['figure.figsize'] = (15, 8)
keyw='WasteEOL_'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium_frames']
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [3, 1]})
########################
# SUBPLOT 1
########################
#######################
# loop plotting over scenarios
name2 = 'Simulation1_high_[Tonnes]'
name0 = 'Simulation1_base_[Tonnes]'
# SCENARIO 1 ***************
modulemat = (USyearly[keyw+materials[0]+'_'+name0]+USyearly[keyw+materials[1]+'_'+name0]+
USyearly[keyw+materials[2]+'_'+name0]+USyearly[keyw+materials[3]+'_'+name0]+
USyearly[keyw+materials[4]+'_'+name0])
glassmat = (USyearly[keyw+materials[0]+'_'+name0])
modulemat = modulemat/1000000
glassmat = glassmat/1000000
a0.plot(USyearly.index, modulemat, 'k.', linewidth=5, label='S1: '+name0+' module mass')
a0.plot(USyearly.index, glassmat, 'k', linewidth=5, label='S1: '+name0+' glass mass only')
a0.fill_between(USyearly.index, glassmat, modulemat, color='k', alpha=0.3,
interpolate=True)
# SCENARIO 2 ***************
modulemat = (USyearly[keyw+materials[0]+'_'+name2]+USyearly[keyw+materials[1]+'_'+name2]+
USyearly[keyw+materials[2]+'_'+name2]+USyearly[keyw+materials[3]+'_'+name2]+
USyearly[keyw+materials[4]+'_'+name2])
glassmat = (USyearly[keyw+materials[0]+'_'+name2])
modulemat = modulemat/1000000
glassmat = glassmat/1000000
a0.plot(USyearly.index, modulemat, 'c.', linewidth=5, label='S2: '+name2+' module mass')
a0.plot(USyearly.index, glassmat, 'c', linewidth=5, label='S2: '+name2+' glass mass only')
a0.fill_between(USyearly.index, glassmat, modulemat, color='c', alpha=0.3,
interpolate=True)
a0.legend()
a0.set_title('Yearly Material Waste by Scenario')
a0.set_ylabel('Mass [Million Tonnes]')
a0.set_xlim([2020, 2050])
a0.set_xlabel('Years')
########################
# SUBPLOT 2
########################
#######################
# Calculate
cumulations2050 = {}
for ii in range(0, len(materials)):
matcum = []
matcum.append(UScum[keyw+materials[ii]+'_'+name0].loc[2050])
matcum.append(UScum[keyw+materials[ii]+'_'+name2].loc[2050])
cumulations2050[materials[ii]] = matcum
dfcumulations2050 = pd.DataFrame.from_dict(cumulations2050)
dfcumulations2050 = dfcumulations2050/1000000 # in Million Tonnes
dfcumulations2050['bottom1'] = dfcumulations2050['glass']
dfcumulations2050['bottom2'] = dfcumulations2050['bottom1']+dfcumulations2050['aluminium_frames']
dfcumulations2050['bottom3'] = dfcumulations2050['bottom2']+dfcumulations2050['silicon']
dfcumulations2050['bottom4'] = dfcumulations2050['bottom3']+dfcumulations2050['copper']
## Plot BARS Stuff
ind=np.arange(2)
width=0.35 # width of the bars.
p0 = a1.bar(ind, dfcumulations2050['glass'], width, color='c')
p1 = a1.bar(ind, dfcumulations2050['aluminium_frames'], width,
bottom=dfcumulations2050['bottom1'])
p2 = a1.bar(ind, dfcumulations2050['silicon'], width,
bottom=dfcumulations2050['bottom2'])
p3 = a1.bar(ind, dfcumulations2050['copper'], width,
bottom=dfcumulations2050['bottom3'])
p4 = a1.bar(ind, dfcumulations2050['silver'], width,
bottom=dfcumulations2050['bottom4'])
a1.yaxis.set_label_position("right")
a1.yaxis.tick_right()
a1.set_ylabel('Cumulative EOL Only Waste by 2050 [Million Tonnes]')
a1.set_xlabel('Scenario')
a1.set_xticks(ind, ('S1', 'S2'))
#plt.yticks(np.arange(0, 81, 10))
a1.legend((p0[0], p1[0], p2[0], p3[0], p4[0] ), ('Glass', 'aluminium_frames', 'Silicon','Copper','Silver'))
f.tight_layout()
f.savefig(title_Method+' Fig_2x1_Yearly EOL Only WASTE by Scenario and Cumulatives_NREL2018.png', dpi=600)
print("Cumulative Eol Only Waste by 2050 Million Tones by case")
dfcumulations2050[['glass','silicon','silver','copper','aluminium_frames']].sum(axis=1)
```
| github_jupyter |
# Fun with FFT and sound files
Based on: https://realpython.com/python-scipy-fft/
Define a function for generating pure sine wave tones
```
import numpy as np
import matplotlib.pyplot as plt
SAMPLE_RATE = 44100 # Hertz
DURATION = 5 # Seconds
def generate_sine_wave(freq, sample_rate, duration):
x = np.linspace(0, duration, sample_rate * duration, endpoint=False)
frequencies = x * freq
# 2pi because np.sin takes radians
y = np.sin((2 * np.pi) * frequencies)
return x, y
# Generate a 2 hertz sine wave that lasts for 5 seconds
x, y = generate_sine_wave(2, SAMPLE_RATE, DURATION)
plt.plot(x, y)
plt.show()
```
Produce two tones, e.g. 400 Hz signal and a 4 kHz high-pitch noise
```
_, nice_tone = generate_sine_wave(400, SAMPLE_RATE, DURATION)
_, noise_tone = generate_sine_wave(4000, SAMPLE_RATE, DURATION)
noise_tone = noise_tone * 0.3
mixed_tone = nice_tone + noise_tone
#mixed_tone = noise_tone
```
For the purposes of storing the tones in an audio file, the amplitude needs to be normalized to the range of 16-bit integer
```
normalized_tone = np.int16((mixed_tone / mixed_tone.max()) * 32767)
plt.plot(normalized_tone[:1000])
plt.show()
```
Store the sound for playback
```
from scipy.io import wavfile as wf
# Remember SAMPLE_RATE = 44100 Hz is our playback rate
wf.write("mysinewave.wav", SAMPLE_RATE, normalized_tone)
```
Can also try to record the sound (NB: won't work on datahub !)
```
# import required libraries
%pip install sounddevice
import sounddevice as sd
print("Recording...")
# Start recorder with the given values
# of duration and sample frequency
recording = sd.rec(int(DURATION * SAMPLE_RATE), samplerate=SAMPLE_RATE, channels=1)
# Record audio for the given number of seconds
sd.wait()
print("Done")
# This will convert the NumPy array to an audio
# file with the given sampling frequency
wf.write("recording0.wav", 400, recording)
```
### Fourier transforms
Now try to transform the time stream into frequency space using FFT
```
from scipy.fft import fft, fftfreq
# Number of samples in normalized_tone
N = SAMPLE_RATE * DURATION
yf = fft(normalized_tone)
xf = fftfreq(N, 1 / SAMPLE_RATE)
print('Type of the output array: ',type(yf[0]))
print('Size of the input array: ',N)
print('Size of the Fourier transform: ',len(xf))
df = xf[1]-xf[0]
print(f'Width of the frequency bins: {df} Hz')
plt.plot(xf, np.abs(yf))
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.show()
plt.figure()
plt.yscale('log')
plt.plot(xf, np.abs(yf))
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.xlim(350,4050)
plt.show()
```
You notice that fft returns data for both positive and negative frequencies, produces the output array of the same size as input, and the output is a set of *complex* numbers. However, the information is reduntant: only half of the output values are unique. The magnitudes of the Fourier coefficients at negative frequencies are the same as at the corresponding positive frequencies. This is the property of the *real* Fourier transform, i.e. the transform applied to real-value signals. More precisely, $\mathrm{fft}(f)=\mathrm{fft}^*(-f)$
```
print(xf[1],xf[-1])
print(yf[1],yf[-1])
```
We can use this fact to save computational time and storage by computing only half of the Fourier coefficients:
```
from scipy.fft import rfft, rfftfreq
# Note the extra 'r' at the front
yf = rfft(normalized_tone)
xf = rfftfreq(N, 1 / SAMPLE_RATE)
print('Type of the output array: ',type(yf[0]))
print('Size of the input array: ',N)
print('Size of the Fourier transform: ',len(xf))
df = xf[1]-xf[0]
print(f'Width of the frequency bins: {df} Hz')
plt.plot(xf, np.abs(yf))
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.show()
```
Now let's look at the Fourier transorm of a sound of a guitar string:
```
rate, data = wf.read("recording0.wav")
N=len(data)
print(rate, N)
time=np.arange(0, N)/rate
plt.plot(time, data)
plt.xlabel('time (sec)')
plt.ylabel('Sound a.u.)')
plt.show()
yf = rfft(data)
xf = rfftfreq(len(data), 1 / rate)
print('Type of the output array: ',type(yf[0]))
print('Size of the input array: ',len(data))
print('Size of the Fourier transform: ',len(xf))
df = xf[1]-xf[0]
print(f'Width of the frequency bins: {df} Hz')
plt.figure()
plt.loglog(xf, np.abs(yf))
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.show()
plt.figure()
plt.plot(xf, np.abs(yf))
plt.yscale('log')
plt.xlim(100,2000)
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.show()
```
| github_jupyter |
## PSO - Particle Swarm Optimisation
**About PSO -**
PSO is an biologically inspired meta-heuristic optimisation algorithm. It takes its inspiration from bird flocking or fish schooling. It works pretty good in practice. So let us code it up and optimise a function.
```
#dependencies
import random
import math
import copy # for array copying
import sys
```
### COST Function
So basically the function we are trying to optimise will become our cost function.
What cost functions we will see:
1. Sum of squares
2. Rastrigin's function
### Rastrigins function:
Rastrgins equation:

3-D Rendering

As you can see its a non-convex function with lot of local minimas (i.e multi-modal : lot of optimal solutions). It is a fairly diffucult problem for testing and we will test this out.
```
# lets code the rastrigins function
def error(position):
err = 0.0
for i in range(len(position)):
xi = position[i]
err += (xi * xi) - (10 * math.cos(2 * math.pi * xi))
err = 10*len(position) + err
return err
```
### Particle
A particle basically maintains the following params:
1. particle position
2. particle velocity
3. best position individual
4. best error individual
5. error individual
The action it can take when traversing over its search space looks like -
```
Update velocity -
w1*towards_current_direction(intertia) + w2*towards_self_best + w3*towards_swarm_best
Update position -
Add current_velocity to previous_postion to obtain new_velocity
```
Now suppose the particle finds some minima/maxima which is better than the global best it has to update the global value. So we have its fitness evaluation function -
```
evaluate fitness -
plug in current_position into test function to get where exactly you are that will give you the minima/maxima value
check against the global minima/maxima whether yours is better
assign value to global accordingly
```
```
# let us construct the class Particle
class Particle:
def __init__(self,x0):
self.position_i=[] # particle position
self.velocity_i=[] # particle velocity
self.pos_best_i=[] # best position individual
self.err_best_i=-1 # best error individual
self.err_i=-1 # error individual
for i in range(0,num_dimensions):
self.velocity_i.append(random.uniform(-1,1))
self.position_i.append(x0[i])
# evaluate current fitness
def evaluate(self,costFunc):
self.err_i=costFunc(self.position_i)
# check to see if the current position is an individual best
if self.err_i < self.err_best_i or self.err_best_i==-1:
self.pos_best_i=self.position_i
self.err_best_i=self.err_i
# update new particle velocity
def update_velocity(self,pos_best_g):
w=0.5 # constant inertia weight (how much to weigh the previous velocity)
c1=1 # cognative constant
c2=2 # social constant
for i in range(0,num_dimensions):
r1=random.uniform(-1,1)
r2=random.uniform(-1,1)
vel_cognitive=c1*r1*(self.pos_best_i[i]-self.position_i[i])
vel_social=c2*r2*(pos_best_g[i]-self.position_i[i])
self.velocity_i[i]=w*self.velocity_i[i]+vel_cognitive+vel_social
# update the particle position based off new velocity updates
def update_position(self,bounds):
for i in range(0,num_dimensions):
self.position_i[i]=self.position_i[i]+self.velocity_i[i]
# adjust maximum position if necessary
if self.position_i[i]>bounds[i][1]:
self.position_i[i]=bounds[i][1]
# adjust minimum position if neseccary
if self.position_i[i] < bounds[i][0]:
self.position_i[i]=bounds[i][0]
```
### __PSO__ (Particle Swarm Optimisation)
In particle swarm optimisation we
1. Initialise a swarm of particles to go on random exploration
2. for each particle we find whether the have discovered any new minima/maxima
3. The overall groups orientation or their velocities is guided to the global minimas
```
# Now let us define a class PSO
class PSO():
def __init__(self,costFunc,x0,bounds,num_particles,maxiter):
global num_dimensions
num_dimensions=len(x0)
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
# establish the swarm
swarm=[]
for i in range(0,num_particles):
swarm.append(Particle(x0))
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
swarm[j].evaluate(costFunc)
# determine if current particle is the best (globally)
if swarm[j].err_i < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j].position_i)
err_best_g=float(swarm[j].err_i)
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j].update_velocity(pos_best_g)
swarm[j].update_position(bounds)
i+=1
# print final results
print ('\nFINAL:')
print (pos_best_g)
print (err_best_g)
%time
initial=[5,5] # initial starting location [x1,x2...]
bounds=[(-10,10),(-10,10)] # input bounds [(x1_min,x1_max),(x2_min,x2_max)...]
PSO(error,initial,bounds,num_particles=15,maxiter=30)
```
Now further on we will try to parallelize PSO algorithm
| github_jupyter |
```
import sys, os
if 'google.colab' in sys.modules:
# https://github.com/yandexdataschool/Practical_RL/issues/256
!pip install tensorflow-gpu==1.13.1
if not os.path.exists('.setup_complete'):
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/setup_colab.sh -O- | bash
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/week07_seq2seq/basic_model_tf.py
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/week07_seq2seq/he-pron-wiktionary.txt
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/week07_seq2seq/main_dataset.txt
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/week07_seq2seq/voc.py
!touch .setup_complete
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
```
## Reinforcement Learning for seq2seq
This time we'll solve a problem of transribing hebrew words in english, also known as g2p (grapheme2phoneme)
* word (sequence of letters in source language) -> translation (sequence of letters in target language)
Unlike what most deep learning practitioners do, we won't only train it to maximize likelihood of correct translation, but also employ reinforcement learning to actually teach it to translate with as few errors as possible.
### About the task
One notable property of Hebrew is that it's consonant language. That is, there are no wovels in the written language. One could represent wovels with diacritics above consonants, but you don't expect people to do that in everyay life.
Therefore, some hebrew characters will correspond to several english letters and others - to none, so we should use encoder-decoder architecture to figure that out.

_(img: esciencegroup.files.wordpress.com)_
Encoder-decoder architectures are about converting anything to anything, including
* Machine translation and spoken dialogue systems
* [Image captioning](http://mscoco.org/dataset/#captions-challenge2015) and [image2latex](https://openai.com/requests-for-research/#im2latex) (convolutional encoder, recurrent decoder)
* Generating [images by captions](https://arxiv.org/abs/1511.02793) (recurrent encoder, convolutional decoder)
* Grapheme2phoneme - convert words to transcripts
We chose simplified __Hebrew->English__ machine translation for words and short phrases (character-level), as it is relatively quick to train even without a gpu cluster.
```
# If True, only translates phrases shorter than 20 characters (way easier).
EASY_MODE = True
# Useful for initial coding.
# If false, works with all phrases (please switch to this mode for homework assignment)
MODE = "he-to-en" # way we translate. Either "he-to-en" or "en-to-he"
# maximal length of _generated_ output, does not affect training
MAX_OUTPUT_LENGTH = 50 if not EASY_MODE else 20
REPORT_FREQ = 100 # how often to evaluate validation score
```
### Step 1: preprocessing
We shall store dataset as a dictionary
`{ word1:[translation1,translation2,...], word2:[...],...}`.
This is mostly due to the fact that many words have several correct translations.
We have implemented this thing for you so that you can focus on more interesting parts.
__Attention python2 users!__ You may want to cast everything to unicode later during homework phase, just make sure you do it _everywhere_.
```
import numpy as np
from collections import defaultdict
word_to_translation = defaultdict(list) # our dictionary
bos = '_'
eos = ';'
with open("main_dataset.txt") as fin:
for line in fin:
en, he = line[:-1].lower().replace(bos, ' ').replace(eos,
' ').split('\t')
word, trans = (he, en) if MODE == 'he-to-en' else (en, he)
if len(word) < 3:
continue
if EASY_MODE:
if max(len(word), len(trans)) > 20:
continue
word_to_translation[word].append(trans)
print("size = ", len(word_to_translation))
# get all unique lines in source language
all_words = np.array(list(word_to_translation.keys()))
# get all unique lines in translation language
all_translations = np.array(
[ts for all_ts in word_to_translation.values() for ts in all_ts])
```
### split the dataset
We hold out 10% of all words to be used for validation.
```
from sklearn.model_selection import train_test_split
train_words, test_words = train_test_split(
all_words, test_size=0.1, random_state=42)
```
### Building vocabularies
We now need to build vocabularies that map strings to token ids and vice versa. We're gonna need these fellas when we feed training data into model or convert output matrices into english words.
```
from voc import Vocab
inp_voc = Vocab.from_lines(''.join(all_words), bos=bos, eos=eos, sep='')
out_voc = Vocab.from_lines(''.join(all_translations), bos=bos, eos=eos, sep='')
# Here's how you cast lines into ids and backwards.
batch_lines = all_words[:5]
batch_ids = inp_voc.to_matrix(batch_lines)
batch_lines_restored = inp_voc.to_lines(batch_ids)
print("lines")
print(batch_lines)
print("\nwords to ids (0 = bos, 1 = eos):")
print(batch_ids)
print("\nback to words")
print(batch_lines_restored)
```
Draw word/translation length distributions to estimate the scope of the task.
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=[8, 4])
plt.subplot(1, 2, 1)
plt.title("words")
plt.hist(list(map(len, all_words)), bins=20)
plt.subplot(1, 2, 2)
plt.title('translations')
plt.hist(list(map(len, all_translations)), bins=20)
```
### Step 3: deploy encoder-decoder (1 point)
__assignment starts here__
Our architecture consists of two main blocks:
* Encoder reads words character by character and outputs code vector (usually a function of last RNN state)
* Decoder takes that code vector and produces translations character by character
Than it gets fed into a model that follows this simple interface:
* __`model.symbolic_translate(inp, **flags) -> out, logp`__ - takes symbolic int32 matrix of hebrew words, produces output tokens sampled from the model and output log-probabilities for all possible tokens at each tick.
* if given flag __`greedy=True`__, takes most likely next token at each iteration. Otherwise samples with next token probabilities predicted by model.
* __`model.symbolic_score(inp, out, **flags) -> logp`__ - takes symbolic int32 matrices of hebrew words and their english translations. Computes the log-probabilities of all possible english characters given english prefices and hebrew word.
* __`model.weights`__ - weights from all model layers [a list of variables]
That's all! It's as hard as it gets. With those two methods alone you can implement all kinds of prediction and training.
```
import tensorflow as tf
tf.reset_default_graph()
s = tf.InteractiveSession()
# ^^^ if you get "variable *** already exists": re-run this cell again
from basic_model_tf import BasicTranslationModel
model = BasicTranslationModel('model', inp_voc, out_voc,
emb_size=64, hid_size=128)
s.run(tf.global_variables_initializer())
# Play around with symbolic_translate and symbolic_score
inp = tf.placeholder_with_default(np.random.randint(
0, 10, [3, 5], dtype='int32'), [None, None])
out = tf.placeholder_with_default(np.random.randint(
0, 10, [3, 5], dtype='int32'), [None, None])
# translate inp (with untrained model)
sampled_out, logp = model.symbolic_translate(inp, greedy=False)
print("\nSymbolic_translate output:\n", sampled_out, logp)
print("\nSample translations:\n", s.run(sampled_out))
# score logp(out | inp) with untrained input
logp = model.symbolic_score(inp, out)
print("\nSymbolic_score output:\n", logp)
print("\nLog-probabilities (clipped):\n", s.run(logp)[:, :2, :5])
# Prepare any operations you want here
input_sequence = tf.placeholder('int32', [None, None])
greedy_translations, logp = <YOUR CODE: build symbolic translations with greedy = True>
def translate(lines):
"""
You are given a list of input lines.
Make your neural network translate them.
:return: a list of output lines
"""
# Convert lines to a matrix of indices
lines_ix = <YOUR CODE>
# Compute translations in form of indices
trans_ix = s.run(greedy_translations, { <YOUR CODE: feed_dict> })
# Convert translations back into strings
return out_voc.to_lines(trans_ix)
print("Sample inputs:", all_words[:3])
print("Dummy translations:", translate(all_words[:3]))
assert isinstance(greedy_translations,
tf.Tensor) and greedy_translations.dtype.is_integer, "trans must be a tensor of integers (token ids)"
assert translate(all_words[:3]) == translate(
all_words[:3]), "make sure translation is deterministic (use greedy=True and disable any noise layers)"
assert type(translate(all_words[:3])) is list and (type(translate(all_words[:1])[0]) is str or type(
translate(all_words[:1])[0]) is unicode), "translate(lines) must return a sequence of strings!"
print("Tests passed!")
```
### Scoring function
LogLikelihood is a poor estimator of model performance.
* If we predict zero probability once, it shouldn't ruin entire model.
* It is enough to learn just one translation if there are several correct ones.
* What matters is how many mistakes model's gonna make when it translates!
Therefore, we will use minimal Levenshtein distance. It measures how many characters do we need to add/remove/replace from model translation to make it perfect. Alternatively, one could use character-level BLEU/RougeL or other similar metrics.
The catch here is that Levenshtein distance is not differentiable: it isn't even continuous. We can't train our neural network to maximize it by gradient descent.
```
import editdistance # !pip install editdistance
def get_distance(word, trans):
"""
A function that takes word and predicted translation
and evaluates (Levenshtein's) edit distance to closest correct translation
"""
references = word_to_translation[word]
assert len(references) != 0, "wrong/unknown word"
return min(editdistance.eval(trans, ref) for ref in references)
def score(words, bsize=100):
"""a function that computes levenshtein distance for bsize random samples"""
assert isinstance(words, np.ndarray)
batch_words = np.random.choice(words, size=bsize, replace=False)
batch_trans = translate(batch_words)
distances = list(map(get_distance, batch_words, batch_trans))
return np.array(distances, dtype='float32')
# should be around 5-50 and decrease rapidly after training :)
[score(test_words, 10).mean() for _ in range(5)]
```
## Step 2: Supervised pre-training
Here we define a function that trains our model through maximizing log-likelihood a.k.a. minimizing crossentropy.
```
# import utility functions
from basic_model_tf import initialize_uninitialized, infer_length, infer_mask, select_values_over_last_axis
class supervised_training:
# variable for inputs and correct answers
input_sequence = tf.placeholder('int32', [None, None])
reference_answers = tf.placeholder('int32', [None, None])
# Compute log-probabilities of all possible tokens at each step. Use model interface.
logprobs_seq = <YOUR CODE>
# compute mean crossentropy
crossentropy = - select_values_over_last_axis(logprobs_seq, reference_answers)
mask = infer_mask(reference_answers, out_voc.eos_ix)
loss = tf.reduce_sum(crossentropy * mask)/tf.reduce_sum(mask)
# Build weights optimizer. Use model.weights to get all trainable params.
train_step = <YOUR CODE>
# intialize optimizer params while keeping model intact
initialize_uninitialized(s)
```
Actually run training on minibatches
```
import random
def sample_batch(words, word_to_translation, batch_size):
"""
sample random batch of words and random correct translation for each word
example usage:
batch_x,batch_y = sample_batch(train_words, word_to_translations,10)
"""
# choose words
batch_words = np.random.choice(words, size=batch_size)
# choose translations
batch_trans_candidates = list(map(word_to_translation.get, batch_words))
batch_trans = list(map(random.choice, batch_trans_candidates))
return inp_voc.to_matrix(batch_words), out_voc.to_matrix(batch_trans)
bx, by = sample_batch(train_words, word_to_translation, batch_size=3)
print("Source:")
print(bx)
print("Target:")
print(by)
from IPython.display import clear_output
from tqdm import tqdm, trange # or use tqdm_notebook,tnrange
loss_history = []
editdist_history = []
for i in trange(25000):
bx, by = sample_batch(train_words, word_to_translation, 32)
feed_dict = {
supervised_training.input_sequence: bx,
supervised_training.reference_answers: by
}
loss, _ = s.run([supervised_training.loss,
supervised_training.train_step], feed_dict)
loss_history.append(loss)
if (i+1) % REPORT_FREQ == 0:
clear_output(True)
current_scores = score(test_words)
editdist_history.append(current_scores.mean())
plt.figure(figsize=(12, 4))
plt.subplot(131)
plt.title('train loss / traning time')
plt.plot(loss_history)
plt.grid()
plt.subplot(132)
plt.title('val score distribution')
plt.hist(current_scores, bins=20)
plt.subplot(133)
plt.title('val score / traning time')
plt.plot(editdist_history)
plt.grid()
plt.show()
print("llh=%.3f, mean score=%.3f" %
(np.mean(loss_history[-10:]), np.mean(editdist_history[-10:])))
# Note: it's okay if loss oscillates up and down as long as it gets better on average over long term (e.g. 5k batches)
for word in train_words[:10]:
print("%s -> %s" % (word, translate([word])[0]))
test_scores = []
for start_i in trange(0, len(test_words), 32):
batch_words = test_words[start_i:start_i+32]
batch_trans = translate(batch_words)
distances = list(map(get_distance, batch_words, batch_trans))
test_scores.extend(distances)
print("Supervised test score:", np.mean(test_scores))
```
## Preparing for reinforcement learning (2 points)
First we need to define loss function as a custom tf operation.
The simple way to do so is through `tensorflow.py_func` wrapper.
```
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
inp = tf.placeholder(tf.float32)
y = tf.py_func(my_func, [inp], tf.float32)
```
__Your task__ is to implement `_compute_levenshtein` function that takes matrices of words and translations, along with input masks, then converts those to actual words and phonemes and computes min-levenshtein via __get_distance__ function above.
```
def _compute_levenshtein(words_ix, trans_ix):
"""
A custom tensorflow operation that computes levenshtein loss for predicted trans.
Params:
- words_ix - a matrix of letter indices, shape=[batch_size,word_length]
- words_mask - a matrix of zeros/ones,
1 means "word is still not finished"
0 means "word has already finished and this is padding"
- trans_mask - a matrix of output letter indices, shape=[batch_size,translation_length]
- trans_mask - a matrix of zeros/ones, similar to words_mask but for trans_ix
Please implement the function and make sure it passes tests from the next cell.
"""
# convert words to strings
words = <YOUR CODE: restore words (a list of strings) from words_ix. Use vocab>
assert type(words) is list and type(
words[0]) is str and len(words) == len(words_ix)
# convert translations to lists
translations = <YOUR CODE: restore trans (a list of lists of phonemes) from trans_ix
assert type(translations) is list and type(
translations[0]) is str and len(translations) == len(trans_ix)
# computes levenstein distances. can be arbitrary python code.
distances = <YOUR CODE: apply get_distance to each pair of [words, translations]>
assert type(distances) in (list, tuple, np.ndarray) and len(
distances) == len(words_ix)
distances = np.array(list(distances), dtype='float32')
return distances
def compute_levenshtein(words_ix, trans_ix):
out = tf.py_func(_compute_levenshtein, [words_ix, trans_ix, ], tf.float32)
out.set_shape([None])
return tf.stop_gradient(out)
```
Simple test suite to make sure your implementation is correct. Hint: if you run into any bugs, feel free to use print from inside _compute_levenshtein.
```
# test suite
# sample random batch of (words, correct trans, wrong trans)
batch_words = np.random.choice(train_words, size=100)
batch_trans = list(map(random.choice, map(
word_to_translation.get, batch_words)))
batch_trans_wrong = np.random.choice(all_translations, size=100)
batch_words_ix = tf.constant(inp_voc.to_matrix(batch_words))
batch_trans_ix = tf.constant(out_voc.to_matrix(batch_trans))
batch_trans_wrong_ix = tf.constant(out_voc.to_matrix(batch_trans_wrong))
# assert compute_levenshtein is zero for ideal translations
correct_answers_score = compute_levenshtein(
batch_words_ix, batch_trans_ix).eval()
assert np.all(correct_answers_score ==
0), "a perfect translation got nonzero levenshtein score!"
print("Everything seems alright!")
# assert compute_levenshtein matches actual scoring function
wrong_answers_score = compute_levenshtein(
batch_words_ix, batch_trans_wrong_ix).eval()
true_wrong_answers_score = np.array(
list(map(get_distance, batch_words, batch_trans_wrong)))
assert np.all(wrong_answers_score ==
true_wrong_answers_score), "for some word symbolic levenshtein is different from actual levenshtein distance"
print("Everything seems alright!")
```
Once you got it working...
* You may now want to __remove/comment asserts__ from function code for a slight speed-up.
* There's a more detailed tutorial on custom tensorflow ops: [`py_func`](https://www.tensorflow.org/api_docs/python/tf/py_func), [`low-level`](https://www.tensorflow.org/api_docs/python/tf/py_func).
## 3. Self-critical policy gradient (2 points)
In this section you'll implement algorithm called self-critical sequence training (here's an [article](https://arxiv.org/abs/1612.00563)).
The algorithm is a vanilla policy gradient with a special baseline.
$$ \nabla J = E_{x \sim p(s)} E_{y \sim \pi(y|x)} \nabla log \pi(y|x) \cdot (R(x,y) - b(x)) $$
Here reward R(x,y) is a __negative levenshtein distance__ (since we minimize it). The baseline __b(x)__ represents how well model fares on word __x__.
In practice, this means that we compute baseline as a score of greedy translation, $b(x) = R(x,y_{greedy}(x)) $.

Luckily, we already obtained the required outputs: `model.greedy_translations, model.greedy_mask` and we only need to compute levenshtein using `compute_levenshtein` function.
```
class trainer:
input_sequence = tf.placeholder('int32', [None, None])
# use model to __sample__ symbolic translations given input_sequence
sample_translations, sample_logp = <YOUR CODE>
# use model to __greedy__ symbolic translations given input_sequence
greedy_translations, greedy_logp = <YOUR CODE>
rewards = - compute_levenshtein(input_sequence, sample_translations)
# compute __negative__ levenshtein for greedy mode
baseline = <YOUR CODE>
# compute advantage using rewards and baseline
advantage = <YOUR CODE: compute advantage>
assert advantage.shape.ndims == 1, "advantage must be of shape [batch_size]"
# compute log_pi(a_t|s_t), shape = [batch, seq_length]
logprobs_phoneme = <YOUR CODE>
# ^-- hint: look at how crossentropy is implemented in supervised learning loss above
# mind the sign - this one should not be multiplied by -1 :)
# Compute policy gradient
# or rather surrogate function who's gradient is policy gradient
J = logprobs_phoneme*advantage[:, None]
mask = infer_mask(sample_translations, out_voc.eos_ix)
loss = - tf.reduce_sum(J*mask) / tf.reduce_sum(mask)
# regularize with negative entropy. Don't forget the sign!
# note: for entropy you need probabilities for all tokens (sample_logp), not just phoneme_logprobs
entropy = <compute entropy matrix of shape[batch, seq_length], H = -sum(p*log_p), don't forget the sign!>
# hint: you can get sample probabilities from sample_logp using math :)
assert entropy.shape.ndims == 2, "please make sure elementwise entropy is of shape [batch,time]"
loss -= 0.01*tf.reduce_sum(entropy*mask) / tf.reduce_sum(mask)
# compute weight updates, clip by norm
grads = tf.gradients(loss, model.weights)
grads = tf.clip_by_global_norm(grads, 50)[0]
train_step = tf.train.AdamOptimizer(
learning_rate=1e-5).apply_gradients(zip(grads, model.weights,))
initialize_uninitialized()
```
# Policy gradient training
```
for i in trange(100000):
bx = sample_batch(train_words, word_to_translation, 32)[0]
pseudo_loss, _ = s.run([trainer.loss, trainer.train_step], {
trainer.input_sequence: bx})
loss_history.append(
pseudo_loss
)
if (i+1) % REPORT_FREQ == 0:
clear_output(True)
current_scores = score(test_words)
editdist_history.append(current_scores.mean())
plt.figure(figsize=(8, 4))
plt.subplot(121)
plt.title('val score distribution')
plt.hist(current_scores, bins=20)
plt.subplot(122)
plt.title('val score / traning time')
plt.plot(editdist_history)
plt.grid()
plt.show()
print("J=%.3f, mean score=%.3f" %
(np.mean(loss_history[-10:]), np.mean(editdist_history[-10:])))
```
### Results
```
for word in train_words[:10]:
print("%s -> %s" % (word, translate([word])[0]))
test_scores = []
for start_i in trange(0, len(test_words), 32):
batch_words = test_words[start_i:start_i+32]
batch_trans = translate(batch_words)
distances = list(map(get_distance, batch_words, batch_trans))
test_scores.extend(distances)
print("Supervised test score:", np.mean(test_scores))
# ^^ If you get Out Of Memory, please replace this with batched computation
```
## Step 6: Make it actually work (5++ pts)
<img src=https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/do_something_scst.png width=400>
In this section we want you to finally __restart with EASY_MODE=False__ and experiment to find a good model/curriculum for that task.
We recommend you to start with the following architecture
```
encoder---decoder
P(y|h)
^
LSTM -> LSTM
^ ^
biLSTM -> LSTM
^ ^
input y_prev
```
__Note:__ you can fit all 4 state tensors of both LSTMs into a in a single state - just assume that it contains, for example, [h0, c0, h1, c1] - pack it in encode and update in decode.
Here are some cool ideas on what you can do then.
__General tips & tricks:__
* In some tensorflow versions and for some layers, it is required that each rnn/gru/lstm cell gets it's own `tf.variable_scope(unique_name, reuse=False)`.
* Otherwise it will complain about wrong tensor sizes because it tries to reuse weights from one rnn to the other.
* You will likely need to adjust pre-training time for such a network.
* Supervised pre-training may benefit from clipping gradients somehow.
* SCST may indulge a higher learning rate in some cases and changing entropy regularizer over time.
* It's often useful to save pre-trained model parameters to not re-train it every time you want new policy gradient parameters.
* When leaving training for nighttime, try setting REPORT_FREQ to a larger value (e.g. 500) not to waste time on it.
__Formal criteria:__
To get 5 points we want you to build an architecture that:
* _doesn't consist of single GRU_
* _works better_ than single GRU baseline.
* We also want you to provide either learning curve or trained model, preferably both
* ... and write a brief report or experiment log describing what you did and how it fared.
### Attention
There's more than one way to connect decoder to encoder
* __Vanilla:__ layer_i of encoder last state goes to layer_i of decoder initial state
* __Every tick:__ feed encoder last state _on every iteration_ of decoder.
* __Attention:__ allow decoder to "peek" at one (or several) positions of encoded sequence on every tick.
The most effective (and cool) of those is, of course, attention.
You can read more about attention [in this nice blog post](https://distill.pub/2016/augmented-rnns/). The easiest way to begin is to use "soft" attention with "additive" or "dot-product" intermediate layers.
__Tips__
* Model usually generalizes better if you no longer allow decoder to see final encoder state
* Once your model made it through several epochs, it is a good idea to visualize attention maps to understand what your model has actually learned
* There's more stuff [here](https://github.com/yandexdataschool/Practical_RL/blob/master/week8_scst/bonus.ipynb)
* If you opted for hard attention, we recommend [gumbel-softmax](https://blog.evjang.com/2016/11/tutorial-categorical-variational.html) instead of sampling. Also please make sure soft attention works fine before you switch to hard.
### UREX
* This is a way to improve exploration in policy-based settings. The main idea is that you find and upweight under-appreciated actions.
* Here's [video](https://www.youtube.com/watch?v=fZNyHoXgV7M&feature=youtu.be&t=3444)
and an [article](https://arxiv.org/abs/1611.09321).
* You may want to reduce batch size 'cuz UREX requires you to sample multiple times per source sentence.
* Once you got it working, try using experience replay with importance sampling instead of (in addition to) basic UREX.
### Some additional ideas:
* (advanced deep learning) It may be a good idea to first train on small phrases and then adapt to larger ones (a.k.a. training curriculum).
* (advanced nlp) You may want to switch from raw utf8 to something like unicode or even syllables to make task easier.
* (advanced nlp) Since hebrew words are written __with vowels omitted__, you may want to use a small Hebrew vowel markup dataset at `he-pron-wiktionary.txt`.
### Bonus hints: [here](https://github.com/yandexdataschool/Practical_RL/blob/master/week8_scst/bonus.ipynb)
```
assert not EASY_MODE, "make sure you set EASY_MODE = False at the top of the notebook."
```
`[your report/log here or anywhere you please]`
__Contributions:__ This notebook is brought to you by
* Yandex [MT team](https://tech.yandex.com/translate/)
* Denis Mazur ([DeniskaMazur](https://github.com/DeniskaMazur)), Oleg Vasilev ([Omrigan](https://github.com/Omrigan/)), Dmitry Emelyanenko ([TixFeniks](https://github.com/tixfeniks)) and Fedor Ratnikov ([justheuristic](https://github.com/justheuristic/))
* Dataset is parsed from [Wiktionary](https://en.wiktionary.org), which is under CC-BY-SA and GFDL licenses.
| github_jupyter |
```
#@title Copyright 2022 The Cirq Developers
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Gate Zoo
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/cirq/gatezoo.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/gatezoo.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/gatezoo.ipynbb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/gatezoo.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
</td>
</table>
## Setup
Note: this notebook relies on unreleased Cirq features. If you want to try these features, make sure you install cirq via `pip install cirq --pre`
```
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet --pre cirq
print("installed cirq.")
import IPython.display as ipd
import cirq
import inspect
def display_gates(*gates):
for gate_name in gates:
ipd.display(ipd.Markdown("---"))
gate = getattr(cirq, gate_name)
ipd.display(ipd.Markdown(f"#### cirq.{gate_name}"))
ipd.display(ipd.Markdown(inspect.cleandoc(gate.__doc__ or "")))
else:
ipd.display(ipd.Markdown("---"))
```
Cirq comes with many gates that are standard across quantum computing. This notebook serves as a reference sheet for these gates.
## Single Qubit Gates
### Gate Constants
Cirq defines constants which are gate instances for particular important single qubit gates.
```
display_gates("X", "Y", "Z", "H", "S", "T")
```
### Traditional Pauli Rotation Gates
Cirq defines traditional single qubit rotations that are rotations in radiants abougt different Pauli directions.
```
display_gates("Rx", "Ry", "Rz")
```
### Pauli PowGates
If you think of the `cirq.Z` gate as phasing the state $|1\rangle$ by $-1$, then you might think that the square root of this gate phases the state $|1\rangle$ by $i=\sqrt{-1}$. The `XPowGate`, `YPowGate` and `ZPowGate`s all act in this manner, phasing the state corresponding to their $-1$ eigenvalue by a prescribed amount. This ends up being the same as the `Rx`, `Ry`, and `Rz` up to a global phase.
```
display_gates("XPowGate", "YPowGate", "ZPowGate")
```
### More Single Qubit Gate
Many quantum computing implementations use qubits whose energy eigenstate are the computational basis states. In these cases it is often useful to move `cirq.ZPowGate`'s through other single qubit gates, "phasing" the other gates. For these scenarios, the following phased gates are useful.
```
display_gates("PhasedXPowGate", "PhasedXZGate", "HPowGate")
```
## Two Qubit Gates
### Gate Constants
Cirq defines convenient constants for common two qubit gates.
```
display_gates("CX", "CZ", "SWAP", "ISWAP", "SQRT_ISWAP", "SQRT_ISWAP_INV")
```
### Parity Gates
If $P$ is a non-identity Pauli matrix, then it has eigenvalues $\pm 1$. $P \otimes P$ similarly has eigenvalues $\pm 1$ which are the product of the eigenvalues of the single $P$ eigenvalues. In this sense, $P \otimes P$ has an eigenvalue which encodes the parity of the eigenvalues of the two qubits. If you think of $P \otimes P$ as phasing its $-1$ eigenvectors by $-1$, then you could consider $(P \otimes P)^{\frac{1}{2}}$ as the gate that phases the $-1$ eigenvectors by $\sqrt{-1} =i$. The Parity gates are exactly these gates for the three different non-identity Paulis.
```
display_gates("XXPowGate", "YYPowGate", "ZZPowGate")
```
There are also constants that one can use to define the parity gates via exponentiating them.
```
display_gates("XX", "YY", "ZZ")
```
### Fermionic Gates
If we think of $|1\rangle$ as an excitation, then the gates that preserve the number of excitations are the fermionic gates. There are two implementations, with differing phase conventions.
```
display_gates("FSimGate", "PhasedFSimGate")
```
### Two qubit PowGates
Just as `cirq.XPowGate` represents a powering of `cirq.X`, our two qubit gate constants also have corresponding "Pow" versions.
```
display_gates("SwapPowGate", "ISwapPowGate", "CZPowGate", "CXPowGate", "PhasedISwapPowGate")
```
## Three Qubit Gates
### Gate Constants
Cirq provides constants for common three qubit gates.
```
display_gates("CCX", "CCZ", "CSWAP")
```
### Three Qubit Pow Gates
Corresponding to some of the above gate constants are the corresponding PowGates.
```
display_gates("CCXPowGate", "CCZPowGate")
```
## N Qubit Gates
### Do Nothing Gates
Sometimes you just want a gate to represent doing nothing.
```
display_gates("IdentityGate", "WaitGate")
```
### Measurement Gates
Measurement gates are gates that represent a measurement and can operate on any number of qubits.
```
display_gates("MeasurementGate")
```
### Matrix Gates
If one has a specific unitary matrix in mind, then one can construct it using matrix gates, or, if the unitary is diagonal, the diagonal gates.
```
display_gates("MatrixGate", "DiagonalGate", "TwoQubitDiagonalGate", "ThreeQubitDiagonalGate")
```
### Pauli String Gates
Pauli strings are expressions like "XXZ" representing the Pauli operator X operating on the first two qubits, and Z on the last qubit, along with a numeric (or symbolic) coefficient. When the coefficient is a unit complex number, then this is a valid unitary gate. Similarly one can construct gates which phases the $\pm 1$ eigenvalues of such a Pauli string.
```
display_gates("DensePauliString", "MutableDensePauliString", "PauliStringPhasorGate")
```
### Algorithm Based Gates
It is useful to define composite gates which correspond to algorithmic primitives, i.e. one can think of the fourier transform as a single unitary gate.
```
display_gates("BooleanHamiltonianGate", "QuantumFourierTransformGate", "PhaseGradientGate")
```
### Classiscal Permutation Gates
Sometimes you want to represent shuffling of qubits.
```
display_gates("QubitPermutationGate")
```
| github_jupyter |
# Breakpoint analysis for damaging winds or rain
Here, we explore the idea that wind/rain damage occurs above some threshold of wind speed, rain rate or rain accumulation.
The damage survey results are classified into damaged/not damaged, and the rate of damaged buildings for a given wind speed/rain rate/rain accumulation is determined by binning the number of damaged buildings per wind speed interval.
We then attempt to determine the threshold at which the damage rate increases significantly, using a Bayesian approach.
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_context("poster")
sns.set_style("whitegrid")
sns.set_palette("hls")
```
Read in the damage dataset
```
#filename = "//nas/gemd/georisk/HaRIA_B_Wind/projects/impact_forecasting/data/exposure/NSW/April_2015_Impact_Assessment/Property_Damage_cleaned.csv"
filename = "C:/Workspace/data/derived/exposure/NSW/Property_Damage_cleaned.csv"
df = pd.read_csv(filename)
```
There are a number of blank fields throughout the data where a value was not entered into the dataset by the assessor. We need to keep track of the missing data, as well as the entered data, so we will find all 'NaN' values in the dataset, and change these to 'Not given' so we can include them in subsequent analyses.
```
df = df.fillna('Not given')
```
Now we add a column that indicates whether the building was damaged or not. Any building which is flagged as 'Minor', 'Major', 'Severe' or 'Destroyed' is tagged as damaged
```
damaged = np.zeros(len(df))
damaged[df['EICU_Degdamage'].isin(['Destroyed - 76-100%',
'Severe Impact - 51-75%',
'Major Impact - 26-50%',
'Minor Impact - 1-25%'])] = 1
df['Damaged'] = damaged
```
Determine the maximum wind speed for all data points, and set up bins to determine the rate of damage.
```
vmax = df[df['Damaged']>0]["combined_alltimes_maxwind_stage4_ens12"].max()
bins = np.arange(0, vmax, 0.5)
hist, edges = np.histogram(df[df['Damaged']==1]["combined_alltimes_maxwind_stage4_ens12"].values,
bins=len(bins),
density=False)
plt.bar(bins, hist, width=0.5)
_ = plt.xlabel("Model forecast wind speed (m/s)")
_ = plt.ylabel("Number of damaged buildings")
```
Now we will explore the onset of damage as a function of wind speed.
```
import pymc
switchpoint = pymc.DiscreteUniform('switchpoint',lower=0, upper=vmax)
early_mean = pymc.Exponential('early_mean', beta=1)
late_mean = pymc.Exponential('late_mean', beta=1)
@pymc.deterministic(plot=False)
def rate(s=switchpoint, e=early_mean, l=late_mean):
out = np.empty(len(bins))
out[:s] = e
out[s:] = l
return out
damage = pymc.Poisson('damage', mu=rate, value=hist, observed=True)
model = pymc.Model([switchpoint, early_mean, late_mean, rate, damage])
mcmc = pymc.MCMC(model)
mcmc.sample(iter=10000, burn=1000, thin=10)
plt.figure(figsize=(12,12))
plt.subplot(311);
plt.plot(mcmc.trace('switchpoint')[:]);
plt.ylabel("Switch point");
plt.subplot(312);
plt.plot(mcmc.trace('early_mean')[:]);
plt.ylabel("Early mean");
plt.subplot(313);
plt.plot(mcmc.trace('late_mean')[:]);
plt.xlabel("Iteration");
plt.ylabel("Late mean");
plt.tight_layout()
plt.figure(figsize=(14,3))
plt.subplot(131);
plt.hist(mcmc.trace('switchpoint')[:], 15,);
plt.xlabel("Switch point")
plt.ylabel("Distribution")
plt.subplot(132);
plt.hist(mcmc.trace('early_mean')[:], 15);
plt.xlabel("Early mean");
plt.subplot(133);
plt.hist(mcmc.trace('late_mean')[:], 15);
plt.xlabel("Late mean");
plt.tight_layout()
yp = np.round(mcmc.trace('switchpoint')[:].mean(), 0)
em = mcmc.trace('early_mean')[:].mean()
es = mcmc.trace('early_mean')[:].std()
lm = mcmc.trace('late_mean')[:].mean()
ls = mcmc.trace('late_mean')[:].std()
print((bins[int(yp)], em, es, lm, ls))
plt.figure(figsize=(12,6));
plt.bar(bins, hist, width=0.5);
plt.axvline(bins[int(yp)], color='k', ls='--', label="Mean breakpoint");
plt.plot([0, bins[int(yp)]], [em, em], '-b', lw=3, label="Average damage count below threshold");
plt.plot([bins[int(yp)], len(bins)], [lm, lm], '-r', lw=3, label="Average damage count above threshold");
plt.legend(loc=10, bbox_to_anchor=(0.5, -0.2), ncol=3)
plt.xlim(0, vmax);
plt.xlabel("Model forecast wind speed (m/s)");
plt.ylabel("Number damaged buildings");
```
Repeat this process, using rainfall rate as the predictor.
```
rmax = df[df['Damaged']>0]["combined_alltimes_accum_ls_rainrate_stage4_ens00"].max()
bins = np.linspace(0, rmax, 100)
hist, edges = np.histogram(df[df['Damaged']==1]["combined_alltimes_accum_ls_rainrate_stage4_ens00"].values,
bins=len(bins),
density=False)
plt.bar(bins, hist,width=(bins[1]-bins[0]))
_ = plt.xlabel("Modelled precipitation rate (kg/m^2/s)")
_ = plt.ylabel("Number of damaged buildings")
switchpoint = pymc.DiscreteUniform('switchpoint',lower=0, upper=rmax)
early_mean = pymc.Exponential('early_mean', beta=1)
late_mean = pymc.Exponential('late_mean', beta=1)
@pymc.deterministic(plot=False)
def rate(s=switchpoint, e=early_mean, l=late_mean):
out = np.empty(len(bins))
out[:s] = e
out[s:] = l
return out
damage = pymc.Poisson('damage', mu=rate, value=hist, observed=True)
model = pymc.Model([switchpoint, early_mean, late_mean, rate, damage])
mcmc = pymc.MCMC(model)
mcmc.sample(iter=10000, burn=1000, thin=10)
plt.figure(figsize=(12,12))
plt.subplot(311);
plt.plot(mcmc.trace('switchpoint')[:]);
plt.ylabel("Switch point");
plt.subplot(312);
plt.plot(mcmc.trace('early_mean')[:]);
plt.ylabel("Early mean");
plt.subplot(313);
plt.plot(mcmc.trace('late_mean')[:]);
plt.xlabel("Iteration");
plt.ylabel("Late mean");
plt.tight_layout()
plt.figure(figsize=(14,3))
plt.subplot(131);
plt.hist(mcmc.trace('switchpoint')[:], 15,);
plt.xlabel("Switch point")
plt.ylabel("Distribution")
plt.subplot(132);
plt.hist(mcmc.trace('early_mean')[:], 15);
plt.xlabel("Early mean");
plt.subplot(133);
plt.hist(mcmc.trace('late_mean')[:], 15);
plt.xlabel("Late mean");
plt.tight_layout()
yp = np.round(mcmc.trace('switchpoint')[:].mean(), 0)
em = mcmc.trace('early_mean')[:].mean()
es = mcmc.trace('early_mean')[:].std()
lm = mcmc.trace('late_mean')[:].mean()
ls = mcmc.trace('late_mean')[:].std()
print((bins[int(yp)], em, es, lm, ls))
plt.figure(figsize=(12,6));
plt.bar(bins, hist, width=bins[1]-bins[0]);
plt.axvline(bins[int(yp)], color='k', ls='--', label="Mean breakpoint");
plt.plot([0, bins[int(yp)]], [em, em], '-b', lw=3, label="Average damage count below threshold");
plt.plot([bins[int(yp)], len(bins)], [lm, lm], '-r', lw=3, label="Average damage count above threshold");
plt.legend(loc=10, bbox_to_anchor=(0.5, -0.2), ncol=3)
plt.xlim(0, rmax);
plt.xlabel("Rainfall rate (kg/m^2/s)");
plt.ylabel("Number damaged buildings");
```
TODO:
* Compare to NEXIS building points per bin (wind speed/rainfall rate) for the region
| github_jupyter |
## Module 2.2: Working with CNNs in Keras (A Review)
We turn to implementing a CNN in the Keras functional API. In this module we will pay attention to:
1. Using the Keras functional API for defining models.
2. Implementing dropout regularization.
Those students who are comfortable with all these matters might consider skipping ahead.
Note that we will not spend time tuning hyper-parameters: The purpose is to show how different techniques can be implemented in Keras, not to solve particular data science problems as optimally as possible. Obviously, most techniques include hyper-parameters that need to be tuned for optimal performance.
We start by importing required libraries.
```
import numpy as np
from sklearn.metrics import confusion_matrix,classification_report
from keras.datasets import cifar10
from keras.models import Sequential
from keras import Model
from keras.layers import Dense,Dropout,Flatten,Activation,Input
from keras.optimizers import Adam
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
import matplotlib.pyplot as plt
```
We will use the CIFAR10 dataset. This consists of small (32 x 32 pixel) color images of 10 different types of objects. It is included in the keras.datasets library.
We load the images. These are already split into training and test cases. We need to normalize the pixel values to be between 0 and 1, and turn our integer labels into one-hot vectors - these are 1d-arrays of length the same as the number of classes, with zeros everywhere except the label specified, which is a 1. They are the probability that the image is of different classes.
We also make a vector of class/label names for display purposes, as the label arrays contain only integers.
```
# Load images
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
# Make versions of the labels that are one-hot vectors
train_labels_array=np_utils.to_categorical(train_labels, 10)
test_labels_array=np_utils.to_categorical(test_labels, 10)
# Make vector of classnames
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
train_labels_array.shape
```
Let's make a function to have a look at the images.
```
def show_images(images,labels,class_names,random=True):
plt.figure(figsize=(10,10))
if random:
indices=np.random.randint(0,images.shape[0],25)
else:
indices=np.array([i for i in range(25)])
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[indices[i]], cmap=plt.cm.binary)
# The CIFAR labels happen to be arrays,
# which is why we need the extra index
plt.xlabel(class_names[labels[indices[i]][0]])
plt.show()
```
Now we run it. We will see 25 random images from the dataset that we pass. If you set random=False you will see the first 25 images, the variety of which reassures us that the data is in a random order. (If this was a real world problem, such re-assurances would be insufficient, and we would shuffle the data.)
```
show_images(train_images,train_labels,class_names,False)
```
Now we create a function that will define the network architecture. Note that we introduce dropout layers for regularization purposes. We discussed these in the last module.
For comparison, the code to specify the same network using the sequential approach is provided in a second function.
```
def get_model():
inputs = Input(shape=(32, 32, 3),name="Input")
conv1 = Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))(inputs)
pool1 = MaxPooling2D((2, 2))(conv1)
drop1 = Dropout(0.5)(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu')(drop1)
pool2 = MaxPooling2D((2, 2))(conv2)
drop2 = Dropout(0.5)(pool2)
conv3 = Conv2D(64, (3, 3), activation='relu')(drop2)
flat = Flatten()(conv3)
dense1 = Dense(64, activation='relu')(flat)
outputs = Dense(10, activation='softmax')(dense1)
model = Model(inputs=inputs,outputs=outputs)
return model
# For comparison, this is how we would use the sequential process
def get_model_seqential():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
return model
```
We will get our model.
```
model=get_model()
```
Now we will define an optimizer and compile it. If you are unfamiliar with the different types of optimizers available in keras, I suggest you read the keras documentation [here](https://keras.io/optimizers/) and play around training the model with different alternatives.
```
opt=Adam()
```
And we compile our model with the optimizer ready for training. We use categorical crossentropy as our loss function as this is a good default choice for working with a multi-class categorical target variable (i.e. the image labels).
```
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
```
Now we fit (train) the model. We will set the training to continue for 100 epochs, but use an early stopping callback which means it should terminate much quicker than this.
```
# Before calling fit, we create the Early Stopping callback.
# We set it up to stop if improvement in the validation loss
# does not occur over 10 epochs. When stopping occurs, the
# weights associated with the best validation loss are restored.
earlyStopping = EarlyStopping(monitor="val_loss",
patience=10,
verbose=1,
restore_best_weights=True)
# We need to use the one-hot vector version of the labels
# This shouldn't go through all 100 epoches, because of the
# early stopping, but can take some time.
history = model.fit(train_images,
train_labels_array,
epochs=100,
shuffle=True,
callbacks=[earlyStopping],
validation_split=.2)
```
We will plot the training history to see a graphical representation of the training.
```
def plot_training_history(history):
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model accuracy and loss')
plt.xlabel('Epoch')
plt.legend(['Accuracy','Validation Accuracy', 'Loss',
'Validation Loss'], loc='upper right')
plt.show()
plot_training_history(history)
```
Finally, for fun lets see how our improved model performs on our test data. But remember that we have not spent any time or effort optimizing this model - for a real problem we would determine good values for the dropout regularization, as well as tune the architecture and optimizer.
We make a function that will show the confusion matrix, and then run it.
```
def test_model(model,x,y):
y_pred = model.predict(x)
y_pred = np.argmax(y_pred,axis=1)
cm = confusion_matrix(y, y_pred)
print("Confusion Matrix:")
print(cm)
print("Classification report:")
print(classification_report(y, y_pred))
test_model(model,test_images,test_labels)
```
| github_jupyter |
## Dependencies
```
import os
import sys
import cv2
import shutil
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import multiprocessing as mp
import matplotlib.pyplot as plt
from tensorflow import set_random_seed
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from keras import backend as K
from keras.models import Model
from keras.utils import to_categorical
from keras import optimizers, applications
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(0)
seed = 0
seed_everything(seed)
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/'))
from efficientnet import *
```
## Load data
```
hold_out_set = pd.read_csv('../input/aptos-split-oldnew/hold-out_5.csv')
X_train = hold_out_set[hold_out_set['set'] == 'train']
X_val = hold_out_set[hold_out_set['set'] == 'validation']
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
print('Number of train samples: ', X_train.shape[0])
print('Number of validation samples: ', X_val.shape[0])
print('Number of test samples: ', test.shape[0])
display(X_train.head())
```
# Model parameters
```
# Model parameters
FACTOR = 4
BATCH_SIZE = 8 * FACTOR
EPOCHS = 20
WARMUP_EPOCHS = 5
LEARNING_RATE = 1e-4 * FACTOR
WARMUP_LEARNING_RATE = 1e-3 * FACTOR
HEIGHT = 224
WIDTH = 224
CHANNELS = 3
TTA_STEPS = 5
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5
LR_WARMUP_EPOCHS_1st = 2
LR_WARMUP_EPOCHS_2nd = 5
STEP_SIZE = len(X_train) // BATCH_SIZE
TOTAL_STEPS_1st = WARMUP_EPOCHS * STEP_SIZE
TOTAL_STEPS_2nd = EPOCHS * STEP_SIZE
WARMUP_STEPS_1st = LR_WARMUP_EPOCHS_1st * STEP_SIZE
WARMUP_STEPS_2nd = LR_WARMUP_EPOCHS_2nd * STEP_SIZE
```
# Pre-procecess images
```
old_data_base_path = '../input/diabetic-retinopathy-resized/resized_train/resized_train/'
new_data_base_path = '../input/aptos2019-blindness-detection/train_images/'
test_base_path = '../input/aptos2019-blindness-detection/test_images/'
train_dest_path = 'base_dir/train_images/'
validation_dest_path = 'base_dir/validation_images/'
test_dest_path = 'base_dir/test_images/'
# Making sure directories don't exist
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
# Creating train, validation and test directories
os.makedirs(train_dest_path)
os.makedirs(validation_dest_path)
os.makedirs(test_dest_path)
def crop_image(img, tol=7):
if img.ndim ==2:
mask = img>tol
return img[np.ix_(mask.any(1),mask.any(0))]
elif img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img>tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def circle_crop(img):
img = crop_image(img)
height, width, depth = img.shape
largest_side = np.max((height, width))
img = cv2.resize(img, (largest_side, largest_side))
height, width, depth = img.shape
x = width//2
y = height//2
r = np.amin((x, y))
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1)
img = cv2.bitwise_and(img, img, mask=circle_img)
img = crop_image(img)
return img
def preprocess_image(image_id, base_path, save_path, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
image = cv2.imread(base_path + image_id)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = circle_crop(image)
image = cv2.resize(image, (HEIGHT, WIDTH))
# image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128)
cv2.imwrite(save_path + image_id, image)
def preprocess_data(df, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
df = df.reset_index()
for i in range(df.shape[0]):
item = df.iloc[i]
image_id = item['id_code']
item_set = item['set']
item_data = item['data']
if item_set == 'train':
if item_data == 'new':
preprocess_image(image_id, new_data_base_path, train_dest_path)
if item_data == 'old':
preprocess_image(image_id, old_data_base_path, train_dest_path)
if item_set == 'validation':
if item_data == 'new':
preprocess_image(image_id, new_data_base_path, validation_dest_path)
if item_data == 'old':
preprocess_image(image_id, old_data_base_path, validation_dest_path)
def preprocess_test(df, base_path=test_base_path, save_path=test_dest_path, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
df = df.reset_index()
for i in range(df.shape[0]):
image_id = df.iloc[i]['id_code']
preprocess_image(image_id, base_path, save_path)
n_cpu = mp.cpu_count()
train_n_cnt = X_train.shape[0] // n_cpu
val_n_cnt = X_val.shape[0] // n_cpu
test_n_cnt = test.shape[0] // n_cpu
# Pre-procecss old data train set
pool = mp.Pool(n_cpu)
dfs = [X_train.iloc[train_n_cnt*i:train_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = X_train.iloc[train_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_data, [x_df for x_df in dfs])
pool.close()
# Pre-procecss validation set
pool = mp.Pool(n_cpu)
dfs = [X_val.iloc[val_n_cnt*i:val_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = X_val.iloc[val_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_data, [x_df for x_df in dfs])
pool.close()
# Pre-procecss test set
pool = mp.Pool(n_cpu)
dfs = [test.iloc[test_n_cnt*i:test_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = test.iloc[test_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_test, [x_df for x_df in dfs])
pool.close()
```
# Data generator
```
datagen=ImageDataGenerator(rescale=1./255,
rotation_range=360,
horizontal_flip=True,
vertical_flip=True)
train_generator=datagen.flow_from_dataframe(
dataframe=X_train,
directory=train_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
valid_generator=datagen.flow_from_dataframe(
dataframe=X_val,
directory=validation_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
test_generator=datagen.flow_from_dataframe(
dataframe=test,
directory=test_dest_path,
x_col="id_code",
batch_size=1,
class_mode=None,
shuffle=False,
target_size=(HEIGHT, WIDTH),
seed=seed)
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""
Cosine decay schedule with warm up period.
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
:param global_step {int}: global step.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param global_step {int}: global step.
:Returns : a float representing learning rate.
:Raises ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (1 + np.cos(
np.pi *
(global_step - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * global_step + warmup_learning_rate
learning_rate = np.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return np.where(global_step > total_steps, 0.0, learning_rate)
class WarmUpCosineDecayScheduler(Callback):
"""Cosine decay with warmup learning rate scheduler"""
def __init__(self,
learning_rate_base,
total_steps,
global_step_init=0,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0,
verbose=0):
"""
Constructor for cosine decay with warmup learning rate scheduler.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param global_step_init {int}: initial global step, e.g. from previous checkpoint.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param verbose {int}: quiet, 1: update messages. (default: {0}).
"""
super(WarmUpCosineDecayScheduler, self).__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.global_step = global_step_init
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.verbose = verbose
self.learning_rates = []
def on_batch_end(self, batch, logs=None):
self.global_step = self.global_step + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
def on_batch_begin(self, batch, logs=None):
lr = cosine_decay_with_warmup(global_step=self.global_step,
learning_rate_base=self.learning_rate_base,
total_steps=self.total_steps,
warmup_learning_rate=self.warmup_learning_rate,
warmup_steps=self.warmup_steps,
hold_base_rate_steps=self.hold_base_rate_steps)
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %02d: setting learning rate to %s.' % (self.global_step + 1, lr))
```
# Model
```
def create_model(input_shape):
input_tensor = Input(shape=input_shape)
base_model = EfficientNetB3(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('../input/efficientnet-keras-weights-b0b5/efficientnet-b3_imagenet_1000_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
final_output = Dense(1, activation='linear', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
```
# Train top layers
```
model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS))
for layer in model.layers:
layer.trainable = False
for i in range(-2, 0):
model.layers[i].trainable = True
cosine_lr_1st = WarmUpCosineDecayScheduler(learning_rate_base=WARMUP_LEARNING_RATE,
total_steps=TOTAL_STEPS_1st,
warmup_learning_rate=0.0,
warmup_steps=WARMUP_STEPS_1st,
hold_base_rate_steps=(2 * STEP_SIZE))
metric_list = ["accuracy"]
callback_list = [cosine_lr_1st]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
callbacks=callback_list,
verbose=2).history
```
# Fine-tune the complete model
```
for layer in model.layers:
layer.trainable = True
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
cosine_lr_2nd = WarmUpCosineDecayScheduler(learning_rate_base=LEARNING_RATE,
total_steps=TOTAL_STEPS_2nd,
warmup_learning_rate=0.0,
warmup_steps=WARMUP_STEPS_2nd,
hold_base_rate_steps=(3 * STEP_SIZE))
callback_list = [es, cosine_lr_2nd]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callback_list,
verbose=2).history
fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 6))
ax1.plot(cosine_lr_1st.learning_rates)
ax1.set_title('Warm up learning rates')
ax2.plot(cosine_lr_2nd.learning_rates)
ax2.set_title('Fine-tune learning rates')
plt.xlabel('Steps')
plt.ylabel('Learning rate')
sns.despine()
plt.show()
```
# Model loss graph
```
fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 14))
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
plt.xlabel('Epochs')
sns.despine()
plt.show()
# Create empty arays to keep the predictions and labels
df_preds = pd.DataFrame(columns=['label', 'pred', 'set'])
train_generator.reset()
valid_generator.reset()
# Add train predictions and labels
for i in range(STEP_SIZE_TRAIN + 1):
im, lbl = next(train_generator)
preds = model.predict(im, batch_size=train_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'train']
# Add validation predictions and labels
for i in range(STEP_SIZE_VALID + 1):
im, lbl = next(valid_generator)
preds = model.predict(im, batch_size=valid_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'validation']
df_preds['label'] = df_preds['label'].astype('int')
def classify(x):
if x < 0.5:
return 0
elif x < 1.5:
return 1
elif x < 2.5:
return 2
elif x < 3.5:
return 3
return 4
# Classify predictions
df_preds['predictions'] = df_preds['pred'].apply(lambda x: classify(x))
train_preds = df_preds[df_preds['set'] == 'train']
validation_preds = df_preds[df_preds['set'] == 'validation']
```
# Model Evaluation
## Confusion Matrix
### Original thresholds
```
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
def plot_confusion_matrix(train, validation, labels=labels):
train_labels, train_preds = train
validation_labels, validation_preds = validation
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')
plt.show()
plot_confusion_matrix((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))
```
## Quadratic Weighted Kappa
```
def evaluate_model(train, validation):
train_labels, train_preds = train
validation_labels, validation_preds = validation
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(np.append(train_preds, validation_preds), np.append(train_labels, validation_labels), weights='quadratic'))
evaluate_model((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))
```
## Apply model to test set and output predictions
```
def apply_tta(model, generator, steps=10):
step_size = generator.n//generator.batch_size
preds_tta = []
for i in range(steps):
generator.reset()
preds = model.predict_generator(generator, steps=step_size)
preds_tta.append(preds)
return np.mean(preds_tta, axis=0)
preds = apply_tta(model, test_generator, TTA_STEPS)
predictions = [classify(x) for x in preds]
results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
# Cleaning created directories
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
```
# Predictions class distribution
```
fig = plt.subplots(sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d").set_title('Test')
sns.despine()
plt.show()
results.to_csv('submission.csv', index=False)
display(results.head())
```
## Save model
```
model.save_weights('../working/effNetB3_img224.h5')
```
| github_jupyter |
# Dependent density regression
In another [example](dp_mix.ipynb), we showed how to use Dirichlet processes to perform Bayesian nonparametric density estimation. This example expands on the previous one, illustrating dependent density regression.
Just as Dirichlet process mixtures can be thought of as infinite mixture models that select the number of active components as part of inference, dependent density regression can be thought of as infinite [mixtures of experts](https://en.wikipedia.org/wiki/Committee_machine) that select the active experts as part of inference. Their flexibility and modularity make them powerful tools for performing nonparametric Bayesian Data analysis.
```
import arviz as az
import numpy as np
import pandas as pd
import pymc3 as pm
import seaborn as sns
from IPython.display import HTML
from matplotlib import animation as ani
from matplotlib import pyplot as plt
from theano import tensor as tt
print(f"Running on PyMC3 v{pm.__version__}")
%config InlineBackend.figure_format = 'retina'
plt.rc("animation", writer="ffmpeg")
blue, *_ = sns.color_palette()
az.style.use("arviz-darkgrid")
SEED = 972915 # from random.org; for reproducibility
np.random.seed(SEED)
```
We will use the LIDAR data set from Larry Wasserman's excellent book, [_All of Nonparametric Statistics_](http://www.stat.cmu.edu/~larry/all-of-nonpar/). We standardize the data set to improve the rate of convergence of our samples.
```
DATA_URI = "http://www.stat.cmu.edu/~larry/all-of-nonpar/=data/lidar.dat"
def standardize(x):
return (x - x.mean()) / x.std()
df = pd.read_csv(DATA_URI, sep=r"\s{1,3}", engine="python").assign(
std_range=lambda df: standardize(df.range), std_logratio=lambda df: standardize(df.logratio)
)
df.head()
```
We plot the LIDAR data below.
```
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(df.std_range, df.std_logratio, color=blue)
ax.set_xticklabels([])
ax.set_xlabel("Standardized range")
ax.set_yticklabels([])
ax.set_ylabel("Standardized log ratio");
```
This data set has a two interesting properties that make it useful for illustrating dependent density regression.
1. The relationship between range and log ratio is nonlinear, but has locally linear components.
2. The observation noise is [heteroskedastic](https://en.wikipedia.org/wiki/Heteroscedasticity); that is, the magnitude of the variance varies with the range.
The intuitive idea behind dependent density regression is to reduce the problem to many (related) density estimates, conditioned on fixed values of the predictors. The following animation illustrates this intuition.
```
fig, (scatter_ax, hist_ax) = plt.subplots(ncols=2, figsize=(16, 6))
scatter_ax.scatter(df.std_range, df.std_logratio, color=blue, zorder=2)
scatter_ax.set_xticklabels([])
scatter_ax.set_xlabel("Standardized range")
scatter_ax.set_yticklabels([])
scatter_ax.set_ylabel("Standardized log ratio")
bins = np.linspace(df.std_range.min(), df.std_range.max(), 25)
hist_ax.hist(df.std_logratio, bins=bins, color="k", lw=0, alpha=0.25, label="All data")
hist_ax.set_xticklabels([])
hist_ax.set_xlabel("Standardized log ratio")
hist_ax.set_yticklabels([])
hist_ax.set_ylabel("Frequency")
hist_ax.legend(loc=2)
endpoints = np.linspace(1.05 * df.std_range.min(), 1.05 * df.std_range.max(), 15)
frame_artists = []
for low, high in zip(endpoints[:-1], endpoints[2:]):
interval = scatter_ax.axvspan(low, high, color="k", alpha=0.5, lw=0, zorder=1)
*_, bars = hist_ax.hist(
df[df.std_range.between(low, high)].std_logratio, bins=bins, color="k", lw=0, alpha=0.5
)
frame_artists.append((interval,) + tuple(bars))
animation = ani.ArtistAnimation(fig, frame_artists, interval=500, repeat_delay=3000, blit=True)
plt.close()
# prevent the intermediate figure from showing
HTML(animation.to_html5_video())
```
As we slice the data with a window sliding along the x-axis in the left plot, the empirical distribution of the y-values of the points in the window varies in the right plot. An important aspect of this approach is that the density estimates that correspond to close values of the predictor are similar.
In the previous example, we saw that a Dirichlet process estimates a probability density as a mixture model with infinitely many components. In the case of normal component distributions,
$$y \sim \sum_{i = 1}^{\infty} w_i \cdot N(\mu_i, \tau_i^{-1}),$$
where the mixture weights, $w_1, w_2, \ldots$, are generated by a [stick-breaking process](https://en.wikipedia.org/wiki/Dirichlet_process#The_stick-breaking_process).
Dependent density regression generalizes this representation of the Dirichlet process mixture model by allowing the mixture weights and component means to vary conditioned on the value of the predictor, $x$. That is,
$$y\ |\ x \sim \sum_{i = 1}^{\infty} w_i\ |\ x \cdot N(\mu_i\ |\ x, \tau_i^{-1}).$$
In this example, we will follow Chapter 23 of [_Bayesian Data Analysis_](http://www.stat.columbia.edu/~gelman/book/) and use a probit stick-breaking process to determine the conditional mixture weights, $w_i\ |\ x$. The probit stick-breaking process starts by defining
$$v_i\ |\ x = \Phi(\alpha_i + \beta_i x),$$
where $\Phi$ is the cumulative distribution function of the standard normal distribution. We then obtain $w_i\ |\ x$ by applying the stick breaking process to $v_i\ |\ x$. That is,
$$w_i\ |\ x = v_i\ |\ x \cdot \prod_{j = 1}^{i - 1} (1 - v_j\ |\ x).$$
For the LIDAR data set, we use independent normal priors $\alpha_i \sim N(0, 5^2)$ and $\beta_i \sim N(0, 5^2)$. We now express this this model for the conditional mixture weights using `PyMC3`.
```
def norm_cdf(z):
return 0.5 * (1 + tt.erf(z / np.sqrt(2)))
def stick_breaking(v):
return v * tt.concatenate(
[tt.ones_like(v[:, :1]), tt.extra_ops.cumprod(1 - v, axis=1)[:, :-1]], axis=1
)
N = len(df)
K = 20
std_range = df.std_range.values[:, np.newaxis]
std_logratio = df.std_logratio.values
with pm.Model(coords={"N": np.arange(N), "K": np.arange(K) + 1, "one": [1]}) as model:
alpha = pm.Normal("alpha", 0.0, 5.0, dims="K")
beta = pm.Normal("beta", 0.0, 5.0, dims=("one", "K"))
x = pm.Data("x", std_range)
v = norm_cdf(alpha + pm.math.dot(x, beta))
w = pm.Deterministic("w", stick_breaking(v), dims=["N", "K"])
```
We have defined `x` as a `pm.Data` container in order to use `PyMC3`'s posterior prediction capabilities later.
While the dependent density regression model theoretically has infinitely many components, we must truncate the model to finitely many components (in this case, twenty) in order to express it using `PyMC3`. After sampling from the model, we will verify that truncation did not unduly influence our results.
Since the LIDAR data seems to have several linear components, we use the linear models
$$
\begin{align*}
\mu_i\ |\ x
& \sim \gamma_i + \delta_i x \\
\gamma_i
& \sim N(0, 10^2) \\
\delta_i
& \sim N(0, 10^2)
\end{align*}
$$
for the conditional component means.
```
with model:
gamma = pm.Normal("gamma", 0.0, 10.0, dims="K")
delta = pm.Normal("delta", 0.0, 10.0, dims=("one", "K"))
mu = pm.Deterministic("mu", gamma + pm.math.dot(x, delta))
```
Finally, we place the prior $\tau_i \sim \textrm{Gamma}(1, 1)$ on the component precisions.
```
with model:
tau = pm.Gamma("tau", 1.0, 1.0, dims="K")
y = pm.Data("y", std_logratio)
obs = pm.NormalMixture("obs", w, mu, tau=tau, observed=y)
pm.model_to_graphviz(model)
```
We now sample from the dependent density regression model.
```
SAMPLES = 20000
BURN = 10000
with model:
step = pm.Metropolis()
trace = pm.sample(SAMPLES, tune=BURN, step=step, random_seed=SEED, return_inferencedata=True)
```
To verify that truncation did not unduly influence our results, we plot the largest posterior expected mixture weight for each component. (In this model, each point has a mixture weight for each component, so we plot the maximum mixture weight for each component across all data points in order to judge if the component exerts any influence on the posterior.)
```
fig, ax = plt.subplots(figsize=(8, 6))
max_mixture_weights = trace.posterior["w"].mean(("chain", "draw")).max("N")
ax.bar(max_mixture_weights.coords.to_index(), max_mixture_weights)
ax.set_xlim(1 - 0.5, K + 0.5)
ax.set_xticks(np.arange(0, K, 2) + 1)
ax.set_xlabel("Mixture component")
ax.set_ylabel("Largest posterior expected\nmixture weight");
```
Since only three mixture components have appreciable posterior expected weight for any data point, we can be fairly certain that truncation did not unduly influence our results. (If most components had appreciable posterior expected weight, truncation may have influenced the results, and we would have increased the number of components and sampled again.)
Visually, it is reasonable that the LIDAR data has three linear components, so these posterior expected weights seem to have identified the structure of the data well. We now sample from the posterior predictive distribution to get a better understand the model's performance.
```
PP_SAMPLES = 5000
lidar_pp_x = np.linspace(std_range.min() - 0.05, std_range.max() + 0.05, 100)
with model:
pm.set_data({"x": lidar_pp_x[:, np.newaxis]})
pp_trace = pm.sample_posterior_predictive(trace, PP_SAMPLES, random_seed=SEED)
```
Below we plot the posterior expected value and the 95% posterior credible interval.
```
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(df.std_range, df.std_logratio, color=blue, zorder=10, label=None)
low, high = np.percentile(pp_trace["obs"], [2.5, 97.5], axis=0)
ax.fill_between(
lidar_pp_x, low, high, color="k", alpha=0.35, zorder=5, label="95% posterior credible interval"
)
ax.plot(lidar_pp_x, pp_trace["obs"].mean(axis=0), c="k", zorder=6, label="Posterior expected value")
ax.set_xticklabels([])
ax.set_xlabel("Standardized range")
ax.set_yticklabels([])
ax.set_ylabel("Standardized log ratio")
ax.legend(loc=1)
ax.set_title("LIDAR Data");
```
The model has fit the linear components of the data well, and also accomodated its heteroskedasticity. This flexibility, along with the ability to modularly specify the conditional mixture weights and conditional component densities, makes dependent density regression an extremely useful nonparametric Bayesian model.
To learn more about depdendent density regression and related models, consult [_Bayesian Data Analysis_](http://www.stat.columbia.edu/~gelman/book/), [_Bayesian Nonparametric Data Analysis_](http://www.springer.com/us/book/9783319189673), or [_Bayesian Nonparametrics_](https://www.google.com/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=bayesian+nonparametrics+book).
This example first appeared [here](http://austinrochford.com/posts/2017-01-18-ddp-pymc3.html).
Author: [Austin Rochford](https://github.com/AustinRochford/)
```
%load_ext watermark
%watermark -n -u -v -iv -w
```
| github_jupyter |
# MLflow end-to-end example
In this example we are going to build a model using `mlflow`, pack and deploy locally using `tempo` (in docker and local kubernetes cluster).
We are are going to use follow the MNIST pytorch example from `mlflow`, check this [link](https://github.com/mlflow/mlflow/tree/master/examples/pytorch/MNIST) for more information.
In this example we will:
* [Train MNIST Model using mlflow and pytorch](#Train-model)
* [Create tempo artifacts](#Save-model-environment)
* [Deploy Locally to Docker](#Deploy-to-Docker)
* [Deploy Locally to Kubernetes](#Deploy-to-Kubernetes)
## Prerequisites
This notebooks needs to be run in the `tempo-examples` conda environment defined below. Create from project root folder:
```bash
conda env create --name tempo-examples --file conda/tempo-examples.yaml
```
## Train model
We train MNIST model below:
### Install prerequisites
```
!pip install mlflow 'torchvision>=0.9.1' torch==1.9.0 pytorch-lightning==1.4.0
!rm -fr /tmp/mlflow
%cd /tmp
!git clone https://github.com/mlflow/mlflow.git
```
### Train model using `mlflow`
```
%cd mlflow/examples/pytorch/MNIST
!mlflow run . --no-conda
!tree -L 1 mlruns/0
```
### Choose test image
```
from torchvision import datasets
mnist_test = datasets.MNIST('/tmp/data', train=False, download=True)
# change the index below to get a different image for testing
mnist_test = list(mnist_test)[0]
img, category = mnist_test
display(img)
print(category)
```
### Tranform test image to numpy
```
import numpy as np
img_np = np.asarray(img).reshape((1, 28*28)).astype(np.float32)
```
## Save model environment
```
import glob
import os
files = glob.glob("mlruns/0/*/")
files.sort(key=os.path.getmtime)
ARTIFACTS_FOLDER = os.path.join(
os.getcwd(),
files[-1],
"artifacts",
"model"
)
assert os.path.exists(ARTIFACTS_FOLDER)
print(ARTIFACTS_FOLDER)
```
### Define `tempo` model
```
from tempo.serve.metadata import ModelFramework
from tempo.serve.model import Model
mlflow_tag = "mlflow"
pytorch_mnist_model = Model(
name="test-pytorch-mnist",
platform=ModelFramework.MLFlow,
local_folder=ARTIFACTS_FOLDER,
# if we deploy to kube, this defines where the model artifacts are stored
uri="s3://tempo/basic/mnist",
description="A pytorch MNIST model",
)
```
### Save model (environment) using `tempo`
Tempo hides many details required to save the model environment for `mlserver`:
- Add required runtime dependencies
- Create a conda pack `environment.tar.gz`
```
from tempo.serve.loader import save
save(pytorch_mnist_model)
```
## Deploy to Docker
```
from tempo import deploy_local
local_deployed_model = deploy_local(pytorch_mnist_model)
local_prediction = local_deployed_model.predict(img_np)
print(np.nonzero(local_prediction.flatten() == 0))
local_deployed_model.undeploy()
```
## Deploy to Kubernetes
### Prerequisites
Create a Kind Kubernetes cluster with Minio and Seldon Core installed using Ansible as described [here](https://tempo.readthedocs.io/en/latest/overview/quickstart.html#kubernetes-cluster-with-seldon-core).
```
%cd -0
!kubectl apply -f k8s/rbac -n production
```
### Upload artifacts to minio
```
from tempo.examples.minio import create_minio_rclone
import os
create_minio_rclone(os.getcwd()+"/rclone.conf")
from tempo.serve.loader import upload
upload(pytorch_mnist_model)
```
### Deploy to `kind`
```
from tempo.serve.metadata import SeldonCoreOptions
runtime_options = SeldonCoreOptions(**{
"remote_options": {
"namespace": "production",
"authSecretName": "minio-secret"
}
})
from tempo import deploy_remote
remote_deployed_model = deploy_remote(pytorch_mnist_model, options=runtime_options)
remote_prediction = remote_deployed_model.predict(img_np)
print(np.nonzero(remote_prediction.flatten() == 0))
remote_deployed_model.undeploy()
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Distributed training with TensorFlow
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/distributed_training"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/distributed_training.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/distributed_training.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/distributed_training.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Overview
`tf.distribute.Strategy` is a TensorFlow API to distribute training
across multiple GPUs, multiple machines or TPUs. Using this API, you can distribute your existing models and training code with minimal code changes.
`tf.distribute.Strategy` has been designed with these key goals in mind:
* Easy to use and support multiple user segments, including researchers, ML engineers, etc.
* Provide good performance out of the box.
* Easy switching between strategies.
`tf.distribute.Strategy` can be used with a high-level API like [Keras](https://www.tensorflow.org/guide/keras), and can also be used to distribute custom training loops (and, in general, any computation using TensorFlow).
In TensorFlow 2.0, you can execute your programs eagerly, or in a graph using [`tf.function`](function.ipynb). `tf.distribute.Strategy` intends to support both these modes of execution. Although we discuss training most of the time in this guide, this API can also be used for distributing evaluation and prediction on different platforms.
You can use `tf.distribute.Strategy` with very few changes to your code, because we have changed the underlying components of TensorFlow to become strategy-aware. This includes variables, layers, models, optimizers, metrics, summaries, and checkpoints.
In this guide, we explain various types of strategies and how you can use them in different situations.
Note: For a deeper understanding of the concepts, please watch [this deep-dive presentation](https://youtu.be/jKV53r9-H14). This is especially recommended if you plan to write your own training loop.
```
# Import TensorFlow
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
```
## Types of strategies
`tf.distribute.Strategy` intends to cover a number of use cases along different axes. Some of these combinations are currently supported and others will be added in the future. Some of these axes are:
* *Synchronous vs asynchronous training:* These are two common ways of distributing training with data parallelism. In sync training, all workers train over different slices of input data in sync, and aggregating gradients at each step. In async training, all workers are independently training over the input data and updating variables asynchronously. Typically sync training is supported via all-reduce and async through parameter server architecture.
* *Hardware platform:* You may want to scale your training onto multiple GPUs on one machine, or multiple machines in a network (with 0 or more GPUs each), or on Cloud TPUs.
In order to support these use cases, there are six strategies available. In the next section we explain which of these are supported in which scenarios in TF 2.0 at this time. Here is a quick overview:
| Training API | MirroredStrategy | TPUStrategy | MultiWorkerMirroredStrategy | CentralStorageStrategy | ParameterServerStrategy | OneDeviceStrategy |
|:----------------------- |:------------------- |:--------------------- |:--------------------------------- |:--------------------------------- |:-------------------------- | :-------------------------------------- |
| **Keras API** | Supported | Experimental support | Experimental support | Experimental support | Supported planned post 2.0 | Supported |
| **Custom training loop** | Experimental support | Experimental support | Support planned post 2.0 | Support planned post 2.0 | No support yet | Supported |
| **Estimator API** | Limited Support | Not supported | Limited Support | Limited Support | Limited Support | Limited Support |
Note: Estimator support is limited. Basic training and evaluation are experimental, and advanced features—such as scaffold—are not implemented. We recommend using Keras or custom training loops if a use case is not covered.
### MirroredStrategy
`tf.distribute.MirroredStrategy` supports synchronous distributed training on multiple GPUs on one machine. It creates one replica per GPU device. Each variable in the model is mirrored across all the replicas. Together, these variables form a single conceptual variable called `MirroredVariable`. These variables are kept in sync with each other by applying identical updates.
Efficient all-reduce algorithms are used to communicate the variable updates across the devices.
All-reduce aggregates tensors across all the devices by adding them up, and makes them available on each device.
It’s a fused algorithm that is very efficient and can reduce the overhead of synchronization significantly. There are many all-reduce algorithms and implementations available, depending on the type of communication available between devices. By default, it uses NVIDIA NCCL as the all-reduce implementation. You can choose from a few other options we provide, or write your own.
Here is the simplest way of creating `MirroredStrategy`:
```
mirrored_strategy = tf.distribute.MirroredStrategy()
```
This will create a `MirroredStrategy` instance which will use all the GPUs that are visible to TensorFlow, and use NCCL as the cross device communication.
If you wish to use only some of the GPUs on your machine, you can do so like this:
```
mirrored_strategy = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"])
```
If you wish to override the cross device communication, you can do so using the `cross_device_ops` argument by supplying an instance of `tf.distribute.CrossDeviceOps`. Currently, `tf.distribute.HierarchicalCopyAllReduce` and `tf.distribute.ReductionToOneDevice` are two options other than `tf.distribute.NcclAllReduce` which is the default.
```
mirrored_strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
```
### CentralStorageStrategy
`tf.distribute.experimental.CentralStorageStrategy` does synchronous training as well. Variables are not mirrored, instead they are placed on the CPU and operations are replicated across all local GPUs. If there is only one GPU, all variables and operations will be placed on that GPU.
Create an instance of `CentralStorageStrategy` by:
```
central_storage_strategy = tf.distribute.experimental.CentralStorageStrategy()
```
This will create a `CentralStorageStrategy` instance which will use all visible GPUs and CPU. Update to variables on replicas will be aggregated before being applied to variables.
Note: This strategy is [`experimental`](https://www.tensorflow.org/guide/versions#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future.
### MultiWorkerMirroredStrategy
`tf.distribute.experimental.MultiWorkerMirroredStrategy` is very similar to `MirroredStrategy`. It implements synchronous distributed training across multiple workers, each with potentially multiple GPUs. Similar to `MirroredStrategy`, it creates copies of all variables in the model on each device across all workers.
It uses [CollectiveOps](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/collective_ops.py) as the multi-worker all-reduce communication method used to keep variables in sync. A collective op is a single op in the TensorFlow graph which can automatically choose an all-reduce algorithm in the TensorFlow runtime according to hardware, network topology and tensor sizes.
It also implements additional performance optimizations. For example, it includes a static optimization that converts multiple all-reductions on small tensors into fewer all-reductions on larger tensors. In addition, we are designing it to have a plugin architecture - so that in the future, you will be able to plugin algorithms that are better tuned for your hardware. Note that collective ops also implement other collective operations such as broadcast and all-gather.
Here is the simplest way of creating `MultiWorkerMirroredStrategy`:
```
multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
```
`MultiWorkerMirroredStrategy` currently allows you to choose between two different implementations of collective ops. `CollectiveCommunication.RING` implements ring-based collectives using gRPC as the communication layer. `CollectiveCommunication.NCCL` uses [Nvidia's NCCL](https://developer.nvidia.com/nccl) to implement collectives. `CollectiveCommunication.AUTO` defers the choice to the runtime. The best choice of collective implementation depends upon the number and kind of GPUs, and the network interconnect in the cluster. You can specify them in the following way:
```
multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
tf.distribute.experimental.CollectiveCommunication.NCCL)
```
One of the key differences to get multi worker training going, as compared to multi-GPU training, is the multi-worker setup. The `TF_CONFIG` environment variable is the standard way in TensorFlow to specify the cluster configuration to each worker that is part of the cluster. Learn more about [setting up TF_CONFIG](#TF_CONFIG).
Note: This strategy is [`experimental`](https://www.tensorflow.org/guide/versions#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future.
### TPUStrategy
`tf.distribute.experimental.TPUStrategy` lets you run your TensorFlow training on Tensor Processing Units (TPUs). TPUs are Google's specialized ASICs designed to dramatically accelerate machine learning workloads. They are available on Google Colab, the [TensorFlow Research Cloud](https://www.tensorflow.org/tfrc) and [Cloud TPU](https://cloud.google.com/tpu).
In terms of distributed training architecture, `TPUStrategy` is the same `MirroredStrategy` - it implements synchronous distributed training. TPUs provide their own implementation of efficient all-reduce and other collective operations across multiple TPU cores, which are used in `TPUStrategy`.
Here is how you would instantiate `TPUStrategy`:
Note: To run this code in Colab, you should select TPU as the Colab runtime. We will have a tutorial soon that will demonstrate how you can use TPUStrategy.
```
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu_address)
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
tpu_strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
```
The `TPUClusterResolver` instance helps locate the TPUs. In Colab, you don't need to specify any arguments to it.
If you want to use this for Cloud TPUs:
- You must specify the name of your TPU resource in the `tpu` argument.
- You must initialize the tpu system explicitly at the *start* of the program. This is required before TPUs can be used for computation. Initializing the tpu system also wipes out the TPU memory, so it's important to complete this step first in order to avoid losing state.
Note: This strategy is [`experimental`](https://www.tensorflow.org/guide/versions#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future.
### ParameterServerStrategy
`tf.distribute.experimental.ParameterServerStrategy` supports parameter servers training on multiple machines. In this setup, some machines are designated as workers and some as parameter servers. Each variable of the model is placed on one parameter server. Computation is replicated across all GPUs of all the workers.
In terms of code, it looks similar to other strategies:
```
ps_strategy = tf.distribute.experimental.ParameterServerStrategy()
```
For multi worker training, `TF_CONFIG` needs to specify the configuration of parameter servers and workers in your cluster, which you can read more about in [TF_CONFIG below](#TF_CONFIG) below.
### OneDeviceStrategy
`tf.distribute.OneDeviceStrategy` runs on a single device. This strategy will place any variables created in its scope on the specified device. Input distributed through this strategy will be prefetched to the specified device. Moreover, any functions called via `strategy.experimental_run_v2` will also be placed on the specified device.
You can use this strategy to test your code before switching to other strategies which actually distributes to multiple devices/machines.
```
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
```
So far we've talked about what are the different strategies available and how you can instantiate them. In the next few sections, we will talk about the different ways in which you can use them to distribute your training. We will show short code snippets in this guide and link off to full tutorials which you can run end to end.
## Using `tf.distribute.Strategy` with Keras
We've integrated `tf.distribute.Strategy` into `tf.keras` which is TensorFlow's implementation of the
[Keras API specification](https://keras.io). `tf.keras` is a high-level API to build and train models. By integrating into `tf.keras` backend, we've made it seamless for you to distribute your training written in the Keras training framework.
Here's what you need to change in your code:
1. Create an instance of the appropriate `tf.distribute.Strategy`
2. Move the creation and compiling of Keras model inside `strategy.scope`.
We support all types of Keras models - sequential, functional and subclassed.
Here is a snippet of code to do this for a very simple Keras model with one dense layer:
```
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
model.compile(loss='mse', optimizer='sgd')
```
In this example we used `MirroredStrategy` so we can run this on a machine with multiple GPUs. `strategy.scope()` indicated which parts of the code to run distributed. Creating a model inside this scope allows us to create mirrored variables instead of regular variables. Compiling under the scope allows us to know that the user intends to train this model using this strategy. Once this is set up, you can fit your model like you would normally. `MirroredStrategy` takes care of replicating the model's training on the available GPUs, aggregating gradients, and more.
```
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100).batch(10)
model.fit(dataset, epochs=2)
model.evaluate(dataset)
```
Here we used a `tf.data.Dataset` to provide the training and eval input. You can also use numpy arrays:
```
import numpy as np
inputs, targets = np.ones((100, 1)), np.ones((100, 1))
model.fit(inputs, targets, epochs=2, batch_size=10)
```
In both cases (dataset or numpy), each batch of the given input is divided equally among the multiple replicas. For instance, if using `MirroredStrategy` with 2 GPUs, each batch of size 10 will get divided among the 2 GPUs, with each receiving 5 input examples in each step. Each epoch will then train faster as you add more GPUs. Typically, you would want to increase your batch size as you add more accelerators so as to make effective use of the extra computing power. You will also need to re-tune your learning rate, depending on the model. You can use `strategy.num_replicas_in_sync` to get the number of replicas.
```
# Compute global batch size using number of replicas.
BATCH_SIZE_PER_REPLICA = 5
global_batch_size = (BATCH_SIZE_PER_REPLICA *
mirrored_strategy.num_replicas_in_sync)
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100)
dataset = dataset.batch(global_batch_size)
LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15}
learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size]
```
### What's supported now?
In TF 2.0 release, `MirroredStrategy`, `TPUStrategy`, `CentralStorageStrategy` and `MultiWorkerMirroredStrategy` are supported in Keras. Except `MirroredStrategy`, others are currently experimental and are subject to change.
Support for other strategies will be coming soon. The API and how to use will be exactly the same as above.
| Training API | MirroredStrategy | TPUStrategy | MultiWorkerMirroredStrategy | CentralStorageStrategy | ParameterServerStrategy | OneDeviceStrategy |
|---------------- |--------------------- |----------------------- |----------------------------------- |----------------------------------- |--------------------------- | --------------------------------------- |
| Keras APIs | Supported | Experimental support | Experimental support | Experimental support | Support planned post 2.0 | Supported |
### Examples and Tutorials
Here is a list of tutorials and examples that illustrate the above integration end to end with Keras:
1. Tutorial to train [MNIST](../tutorials/distribute/keras.ipynb) with `MirroredStrategy`.
2. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/vision/image_classification/resnet_imagenet_main.py) training with ImageNet data using `MirroredStrategy`.
3. [ResNet50](https://github.com/tensorflow/tpu/blob/master/models/experimental/resnet50_keras/resnet50_tf2.py) trained with Imagenet data on Cloud TPUs with `TPUStrategy`.
4. [Tutorial](../tutorials/distribute/multi_worker_with_keras.ipynb) to train MNIST using `MultiWorkerMirroredStrategy`.
5. [NCF](https://github.com/tensorflow/models/blob/master/official/recommendation/ncf_keras_main.py) trained using `MirroredStrategy`.
2. [Transformer]( https://github.com/tensorflow/models/blob/master/official/transformer/v2/transformer_main.py) trained using `MirroredStrategy`.
## Using `tf.distribute.Strategy` with custom training loops
As you've seen, using `tf.distribute.Strategy` with high-level APIs (Estimator and Keras) requires changing only a couple lines of your code. With a little more effort, you can also use `tf.distribute.Strategy` with custom training loops.
If you need more flexibility and control over your training loops than is possible with Estimator or Keras, you can write custom training loops. For instance, when using a GAN, you may want to take a different number of generator or discriminator steps each round. Similarly, the high level frameworks are not very suitable for Reinforcement Learning training.
To support custom training loops, we provide a core set of methods through the `tf.distribute.Strategy` classes. Using these may require minor restructuring of the code initially, but once that is done, you should be able to switch between GPUs, TPUs, and multiple machines simply by changing the strategy instance.
Here we will show a brief snippet illustrating this use case for a simple training example using the same Keras model as before.
First, we create the model and optimizer inside the strategy's scope. This ensures that any variables created with the model and optimizer are mirrored variables.
```
with mirrored_strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
optimizer = tf.keras.optimizers.SGD()
```
Next, we create the input dataset and call `tf.distribute.Strategy.experimental_distribute_dataset` to distribute the dataset based on the strategy.
```
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(1000).batch(
global_batch_size)
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
```
Then, we define one step of the training. We will use `tf.GradientTape` to compute gradients and optimizer to apply those gradients to update our model's variables. To distribute this training step, we put in a function `step_fn` and pass it to `tf.distrbute.Strategy.experimental_run_v2` along with the dataset inputs that we get from `dist_dataset` created before:
```
@tf.function
def train_step(dist_inputs):
def step_fn(inputs):
features, labels = inputs
with tf.GradientTape() as tape:
logits = model(features)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / global_batch_size)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
return cross_entropy
per_example_losses = mirrored_strategy.experimental_run_v2(
step_fn, args=(dist_inputs,))
mean_loss = mirrored_strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_example_losses, axis=0)
return mean_loss
```
A few other things to note in the code above:
1. We used `tf.nn.softmax_cross_entropy_with_logits` to compute the loss. And then we scaled the total loss by the global batch size. This is important because all the replicas are training in sync and number of examples in each step of training is the global batch. So the loss needs to be divided by the global batch size and not by the replica (local) batch size.
2. We used the `tf.distribute.Strategy.reduce` API to aggregate the results returned by `tf.distribute.Strategy.experimental_run_v2`. `tf.distribute.Strategy.experimental_run_v2` returns results from each local replica in the strategy, and there are multiple ways to consume this result. You can `reduce` them to get an aggregated value. You can also do `tf.distribute.Strategy.experimental_local_results` to get the list of values contained in the result, one per local replica.
3. When `apply_gradients` is called within a distribution strategy scope, its behavior is modified. Specifically, before applying gradients on each parallel instance during synchronous training, it performs a sum-over-all-replicas of the gradients.
Finally, once we have defined the training step, we can iterate over `dist_dataset` and run the training in a loop:
```
with mirrored_strategy.scope():
for inputs in dist_dataset:
print(train_step(inputs))
```
In the example above, we iterated over the `dist_dataset` to provide input to your training. We also provide the `tf.distribute.Strategy.make_experimental_numpy_dataset` to support numpy inputs. You can use this API to create a dataset before calling `tf.distribute.Strategy.experimental_distribute_dataset`.
Another way of iterating over your data is to explicitly use iterators. You may want to do this when you want to run for a given number of steps as opposed to iterating over the entire dataset.
The above iteration would now be modified to first create an iterator and then explicitly call `next` on it to get the input data.
```
with mirrored_strategy.scope():
iterator = iter(dist_dataset)
for _ in range(10):
print(train_step(next(iterator)))
```
This covers the simplest case of using `tf.distribute.Strategy` API to distribute custom training loops. We are in the process of improving these APIs. Since this use case requires more work to adapt your code, we will be publishing a separate detailed guide in the future.
### What's supported now?
In TF 2.0 release, training with custom training loops is supported using `MirroredStrategy` as shown above and `TPUStrategy`.
`MultiWorkerMirorredStrategy` support will be coming in the future.
| Training API | MirroredStrategy | TPUStrategy | MultiWorkerMirroredStrategy | CentralStorageStrategy | ParameterServerStrategy | OneDeviceStrategy |
|:----------------------- |:------------------- |:------------------- |:----------------------------- |:------------------------ |:------------------------- | :-------------------------- |
| Custom Training Loop | Experimental support | Experimental support | Support planned post 2.0 | Support planned post 2.0 | No support yet | Supported |
### Examples and Tutorials
Here are some examples for using distribution strategy with custom training loops:
1. [Tutorial](../tutorials/distribute/custom_training.ipynb) to train MNIST using `MirroredStrategy`.
2. [DenseNet](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/densenet/distributed_train.py) example using `MirroredStrategy`.
1. [BERT](https://github.com/tensorflow/models/blob/master/official/nlp/bert/run_classifier.py) example trained using `MirroredStrategy` and `TPUStrategy`.
This example is particularly helpful for understanding how to load from a checkpoint and generate periodic checkpoints during distributed training etc.
2. [NCF](https://github.com/tensorflow/models/blob/master/official/recommendation/ncf_keras_main.py) example trained using `MirroredStrategy` and `TPUStrategy` that can be enabled using the `keras_use_ctl` flag.
3. [NMT](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/nmt_with_attention/distributed_train.py) example trained using `MirroredStrategy`.
## Using `tf.distribute.Strategy` with Estimator (Limited support)
`tf.estimator` is a distributed training TensorFlow API that originally supported the async parameter server approach. Like with Keras, we've integrated `tf.distribute.Strategy` into `tf.Estimator`. If you're using Estimator for your training, you can easily change to distributed training with very few changes to your code. With this, Estimator users can now do synchronous distributed training on multiple GPUs and multiple workers, as well as use TPUs. This support in Estimator is, however, limited. See [What's supported now](#estimator_support) section below for more details.
The usage of `tf.distribute.Strategy` with Estimator is slightly different than the Keras case. Instead of using `strategy.scope`, now we pass the strategy object into the [`RunConfig`](https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig) for the Estimator.
Here is a snippet of code that shows this with a premade Estimator `LinearRegressor` and `MirroredStrategy`:
```
mirrored_strategy = tf.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(
train_distribute=mirrored_strategy, eval_distribute=mirrored_strategy)
regressor = tf.estimator.LinearRegressor(
feature_columns=[tf.feature_column.numeric_column('feats')],
optimizer='SGD',
config=config)
```
We use a premade Estimator here, but the same code works with a custom Estimator as well. `train_distribute` determines how training will be distributed, and `eval_distribute` determines how evaluation will be distributed. This is another difference from Keras where we use the same strategy for both training and eval.
Now we can train and evaluate this Estimator with an input function:
```
def input_fn():
dataset = tf.data.Dataset.from_tensors(({"feats":[1.]}, [1.]))
return dataset.repeat(1000).batch(10)
regressor.train(input_fn=input_fn, steps=10)
regressor.evaluate(input_fn=input_fn, steps=10)
```
Another difference to highlight here between Estimator and Keras is the input handling. In Keras, we mentioned that each batch of the dataset is split automatically across the multiple replicas. In Estimator, however, we do not do automatic splitting of batch, nor automatically shard the data across different workers. You have full control over how you want your data to be distributed across workers and devices, and you must provide an `input_fn` to specify how to distribute your data.
Your `input_fn` is called once per worker, thus giving one dataset per worker. Then one batch from that dataset is fed to one replica on that worker, thereby consuming N batches for N replicas on 1 worker. In other words, the dataset returned by the `input_fn` should provide batches of size `PER_REPLICA_BATCH_SIZE`. And the global batch size for a step can be obtained as `PER_REPLICA_BATCH_SIZE * strategy.num_replicas_in_sync`.
When doing multi worker training, you should either split your data across the workers, or shuffle with a random seed on each. You can see an example of how to do this in the [Multi-worker Training with Estimator](../tutorials/distribute/multi_worker_with_estimator.ipynb).
We showed an example of using `MirroredStrategy` with Estimator. You can also use `TPUStrategy` with Estimator as well, in the exact same way:
```
config = tf.estimator.RunConfig(
train_distribute=tpu_strategy, eval_distribute=tpu_strategy)
```
And similarly, you can use multi worker and parameter server strategies as well. The code remains the same, but you need to use `tf.estimator.train_and_evaluate`, and set `TF_CONFIG` environment variables for each binary running in your cluster.
<a name="estimator_support"></a>
### What's supported now?
In TF 2.0 release, there is limited support for training with Estimator using all strategies except `TPUStrategy`. Basic training and evaluation should work, but a number of advanced features such as scaffold do not yet work. There may also be a number of bugs in this integration. At this time, we do not plan to actively improve this support, and instead are focused on Keras and custom training loop support. If at all possible, you should prefer to use `tf.distribute` with those APIs instead.
| Training API | MirroredStrategy | TPUStrategy | MultiWorkerMirroredStrategy | CentralStorageStrategy | ParameterServerStrategy | OneDeviceStrategy |
|:--------------- |:------------------ |:------------- |:----------------------------- |:------------------------ |:------------------------- | :-------------------------- |
| Estimator API | Limited Support | Not supported | Limited Support | Limited Support | Limited Support | Limited Support |
### Examples and Tutorials
Here are some examples that show end to end usage of various strategies with Estimator:
1. [Multi-worker Training with Estimator](../tutorials/distribute/multi_worker_with_estimator.ipynb) to train MNIST with multiple workers using `MultiWorkerMirroredStrategy`.
2. [End to end example](https://github.com/tensorflow/ecosystem/tree/master/distribution_strategy) for multi worker training in tensorflow/ecosystem using Kubernetes templates. This example starts with a Keras model and converts it to an Estimator using the `tf.keras.estimator.model_to_estimator` API.
3. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/vision/image_classification/resnet_imagenet_main.py) model, which can be trained using either `MirroredStrategy` or `MultiWorkerMirroredStrategy`.
## Other topics
In this section, we will cover some topics that are relevant to multiple use cases.
<a name="TF_CONFIG"></a>
### Setting up TF\_CONFIG environment variable
For multi-worker training, as mentioned before, you need to set `TF_CONFIG` environment variable for each
binary running in your cluster. The `TF_CONFIG` environment variable is a JSON string which specifies what
tasks constitute a cluster, their addresses and each task's role in the cluster. We provide a Kubernetes template in the
[tensorflow/ecosystem](https://github.com/tensorflow/ecosystem) repo which sets
`TF_CONFIG` for your training tasks.
One example of `TF_CONFIG` is:
```
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["host1:port", "host2:port", "host3:port"],
"ps": ["host4:port", "host5:port"]
},
"task": {"type": "worker", "index": 1}
})
```
This `TF_CONFIG` specifies that there are three workers and two ps tasks in the
cluster along with their hosts and ports. The "task" part specifies that the
role of the current task in the cluster, worker 1 (the second worker). Valid roles in a cluster is
"chief", "worker", "ps" and "evaluator". There should be no "ps" job except when using `tf.distribute.experimental.ParameterServerStrategy`.
## What's next?
`tf.distribute.Strategy` is actively under development. We welcome you to try it out and provide and your feedback using [GitHub issues](https://github.com/tensorflow/tensorflow/issues/new).
| github_jupyter |
## How-to guide for Real-Time Forecasting use-case on Abacus.AI platform
This notebook provides you with a hands on environment to build a real-time forecasting model using the Abacus.AI Python Client Library.
We'll be using the [Household Electricity Usage Dataset](https://s3.amazonaws.com/realityengines.exampledatasets/rtforecasting/household_electricity_usage.csv), which contains data about electricity usage in a specified household.
1. Install the Abacus.AI library.
```
!pip install abacusai
```
We'll also import pandas and pprint tools for neat visualization in this notebook.
```
import pandas as pd # A tool we'll use to download and preview CSV files
import pprint # A tool to pretty print dictionary outputs
pp = pprint.PrettyPrinter(indent=2)
```
2. Add your Abacus.AI [API Key](https://abacus.ai/app/profile/apikey) generated using the API dashboard as follows:
```
#@title Abacus.AI API Key
api_key = '' #@param {type: "string"}
```
3. Import the Abacus.AI library and instantiate a client.
```
from abacusai import ApiClient
client = ApiClient(api_key)
```
## 1. Create a Project
Abacus.AI projects are containers that have datasets and trained models. By specifying a business **Use Case**, Abacus.AI tailors the deep learning algorithms to produce the best performing model possible for your data.
We'll call the `list_use_cases` method to retrieve a list of the available Use Cases currently available on the Abacus.AI platform.
```
client.list_use_cases()
```
In this notebook, we're going to create a real-time forecasting model using the Household Electricity Usage dataset. The 'ENERGY' use case is best tailored for this situation.
```
#@title Abacus.AI Use Case
use_case = 'ENERGY' #@param {type: "string"}
```
By calling the `describe_use_case_requirements` method we can view what datasets are required for this use_case.
```
for requirement in client.describe_use_case_requirements(use_case):
pp.pprint(requirement.to_dict())
```
Finally, let's create the project.
```
real_time_project = client.create_project(name='Electricity Usage Forecasting', use_case=use_case)
real_time_project.to_dict()
```
**Note: When feature_groups_enabled is True then the use case supports feature groups (collection of ML features). Feature groups are created at the organization level and can be tied to a project to further use it for training ML models**
## 2. Add Datasets to your Project
Abacus.AI can read datasets directly from `AWS S3` or `Google Cloud Storage` buckets, otherwise you can also directly upload and store your datasets with Abacus.AI. For this notebook, we will have Abacus.AI read the datasets directly from a public S3 bucket's location.
We are using one dataset for this notebook. We'll tell Abacus.AI how the dataset should be used when creating it by tagging the dataset with a special Abacus.AI **Dataset Type**.
- [Household Electricity Usage Dataset](https://s3.amazonaws.com/realityengines.exampledatasets/rtforecasting/household_electricity_usage.csv) (**TIMESERIES**):
This dataset contains information about electricity usage in specified households over a period of time.
### Add the dataset to Abacus.AI
First we'll use Pandas to preview the file, then add it to Abacus.AI.
```
pd.read_csv('https://s3.amazonaws.com/realityengines.exampledatasets/rtforecasting/household_electricity_usage.csv')
```
Using the Create Dataset API, we can tell Abacus.AI the public S3 URI of where to find the datasets. We will also give each dataset a Refresh Schedule, which tells Abacus.AI when it should refresh the dataset (take an updated/latest copy of the dataset).
If you're unfamiliar with Cron Syntax, Crontab Guru can help translate the syntax back into natural language: [https://crontab.guru/#0_12_\*_\*_\*](https://crontab.guru/#0_12_*_*_*)
**Note: This cron string will be evaluated in UTC time zone**
```
real_time_dataset = client.create_dataset_from_file_connector(name='Household Electricity Usage',table_name='Household_Electricity_Usage',
location='s3://realityengines.exampledatasets/rtforecasting/household_electricity_usage.csv',
refresh_schedule='0 12 * * *')
datasets = [real_time_dataset]
for dataset in datasets:
dataset.wait_for_inspection()
```
## 3. Create Feature Groups and add them to your Project
Datasets are created at the organization level and can be used to create feature groups as follows:
```
feature_group = client.create_feature_group(table_name='real_time_forecasting',sql='SELECT * FROM Household_Electricity_Usage')
```
Adding Feature Group to the project:
```
client.add_feature_group_to_project(feature_group_id=feature_group.feature_group_id,project_id = real_time_project.project_id)
```
Setting the Feature Group type according to the use case requirements:
```
client.set_feature_group_type(feature_group_id=feature_group.feature_group_id, project_id = real_time_project.project_id, feature_group_type= "TIMESERIES")
```
Check current Feature Group schema:
```
client.get_feature_group_schema(feature_group_id=feature_group.feature_group_id)
```
#### For each **Use Case**, there are special **Column Mappings** that must be applied to a column to fulfill use case requirements. We can find the list of available **Column Mappings** by calling the *Describe Use Case Requirements* API:
```
client.describe_use_case_requirements(use_case)[0].allowed_feature_mappings
client.set_feature_mapping(project_id = real_time_project.project_id,feature_group_id= feature_group.feature_group_id, feature_name='value',feature_mapping='TARGET')
client.set_feature_mapping(project_id = real_time_project.project_id,feature_group_id= feature_group.feature_group_id, feature_name='time',feature_mapping='DATE')
client.set_feature_mapping(project_id = real_time_project.project_id,feature_group_id= feature_group.feature_group_id, feature_name='id',feature_mapping='ITEM_ID')
```
For each required Feature Group Type within the use case, you must assign the Feature group to be used for training the model:
```
client.use_feature_group_for_training(project_id=real_time_project.project_id, feature_group_id=feature_group.feature_group_id)
```
Now that we've our feature groups assigned, we're almost ready to train a model!
To be sure that our project is ready to go, let's call project.validate to confirm that all the project requirements have been met:
```
real_time_project.validate()
```
## 4. Train a Model
For each **Use Case**, Abacus.AI has a bunch of options for training. We can call the *Get Training Config Options* API to see the available options.
```
real_time_project.get_training_config_options()
```
In this notebook, we'll just train with the default options, but definitely feel free to experiment, especially if you have familiarity with Machine Learning.
```
real_time_model = real_time_project.train_model(training_config={})
real_time_model.to_dict()
```
After we start training the model, we can call this blocking call that routinely checks the status of the model until it is trained and evaluated.
```
real_time_model.wait_for_evaluation()
```
**Note that model training might take some minutes to some hours depending upon the size of datasets, complexity of the models being trained and a variety of other factors**
## **Checkpoint** [Optional]
As model training can take an hours to complete, your page could time out or you might end up hitting the refresh button, this section helps you restore your progress:
```
!pip install abacusai
import pandas as pd
import pprint
pp = pprint.PrettyPrinter(indent=2)
api_key = '' #@param {type: "string"}
from abacusai import ApiClient
client = ApiClient(api_key)
real_time_project = next(project for project in client.list_projects() if project.name == 'Electricity Usage Forecasting')
real_time_model = real_time_project.list_models()[-1]
real_time_model.wait_for_evaluation()
```
## Evaluate your Model Metrics
After your model is done training you can inspect the model's quality by reviewing the model's metrics
```
pp.pprint(real_time_model.get_metrics().to_dict())
```
To get a better understanding on what these metrics mean, visit our [documentation](https://abacus.ai/app/help/useCases/ENERGY/training) page.
## 5. Deploy Model
After the model has been trained, we need to deploy the model to be able to start making predictions. Deploying a model will reserve cloud resources to host the model for Realtime and/or batch predictions.
```
real_time_deployment = client.create_deployment(name='Electricity Usage Deployment',description='Electricity Usage Deployment',model_id=real_time_model.model_id)
real_time_deployment.wait_for_deployment()
```
After the model is deployed, we need to create a deployment token for authenticating prediction requests. This token is only authorized to predict on deployments in this project, so it's safe to embed this token inside of a user-facing application or website.
```
deployment_token = real_time_project.create_deployment_token().deployment_token
deployment_token
```
## 6. Predict
Now that you have an active deployment and a deployment token to authenticate requests, you can make the `get_forecast` API call below.
This command will return a forecast under each percentile for the specified ITEM_ID. The forecast will be performed based on attributes specified in the dataset.
```
ApiClient().get_forecast(deployment_token=deployment_token,
deployment_id=real_time_deployment.deployment_id,
query_data={"id":"MT_001"})
```
| github_jupyter |
```
import os
import requests
import calendar
data = {'formQuery:menuAldId':1,
'formQuery:selectRad':'incidentLevel',
'dateFromCrime':'01/01/2005',
'dateToCrime':'07/10/2017',
'dateFromAcci':'MM/DD/YYYY',
'dateToAcci':'MM/DD/YYYY',
'formQuery:radioFormat':'excel',
'formQuery:buttonQueryId':'Submit',
'formQuery:buttonResetId':'Clear',
'formQuery_SUBMIT':1,
'javax.faces.ViewState':'j_id6:j_id7'}
headers = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'en-US,en;q=0.8',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Content-Length':'289',
'Content-Type':'application/x-www-form-urlencoded',
'Cookie':'__utma=116865050.46345168.1496938709.1498690358.1498763206.4; __utmz=116865050.1498763206.4.4.utmccn=(referral)|utmcsr=city.milwaukee.gov|utmcct=/DownloadMapData3497.htm|utmcmd=referral; JSESSIONID=0001kTUljano4n8x8_sv0IhnhPS:166pun9c5; _gat_gacity=1; _gat_gaitmd=1; _ga=GA1.2.1203834187.1496847807; _gid=GA1.2.1853685424.1499888152',
'Host':'itmdapps.milwaukee.gov',
'Origin':'http://itmdapps.milwaukee.gov',
'Referer':'http://itmdapps.milwaukee.gov/publicApplication_QD/queryDownload/aldermanicDistfm.faces',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
url = 'http://itmdapps.milwaukee.gov/publicApplication_QD/queryDownload/aldermanicDistfm.faces'
month_list = ['jan','feb','mar','april','may','june','july','aug','sep','oct',
'nov','dec']
for x in range(2005,2017):
if(x==2015):
continue
try:
os.mkdir(f'data_{x}')
except:
pass
for index,month in enumerate(month_list):
m_num = 1 + index
month_end = calendar.monthrange(x,m_num)[1]
try:
os.mkdir(f'./data_{x}/{month}')
except:
pass
for ald in range(1,16):
data['formQuery:menuAldId'] = ald
if(len(str(m_num))== 1):
data['dateFromCrime'] = f'0{m_num}/01/{x}'
data['dateToCrime'] = f'0{m_num}/{month_end}/{x}'
else:
data['dateFromCrime'] = f'{m_num}/01/{x}'
data['dateToCrime'] = f'{m_num}/{month_end}/{x}'
r = requests.post(url,data=data, headers=headers)
if(r.status_code == 200):
print(f'Okay:./data_{x}/{month}/ald{ald}.xls')
else:
print(f'Not Okay: ./data_{x}/{month}/ald{ald}.xls')
with open(f'./data_{x}/{month}/ald{ald}.xls','wb') as f:
f.write(r.content)
calendar.monthrange(2002,1)
r = requests.post(url,data=data, headers=headers)
with open('test.xls','wb') as f:
f.write(r.content)
```
| github_jupyter |
```
# Copyright 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
<img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
## Overview
In this notebook, we want to provide an overview what HugeCTR framework is, its features and benefits. We will use HugeCTR to train a basic neural network architecture and deploy the saved model to Triton Inference Server.
<b>Learning Objectives</b>:
* Adopt NVTabular workflow to provide input files to HugeCTR
* Define HugeCTR neural network architecture
* Train a deep learning model with HugeCTR
* Deploy HugeCTR to Triton Inference Server
### Why using HugeCTR?
HugeCTR is a GPU-accelerated recommender framework designed to distribute training across multiple GPUs and nodes and estimate Click-Through Rates (CTRs).<br>
HugeCTR offers multiple advantages to train deep learning recommender systems:
1. **Speed**: HugeCTR is a highly efficient framework written C++. We experienced up to 10x speed up. HugeCTR on a NVIDIA DGX A100 system proved to be the fastest commercially available solution for training the architecture Deep Learning Recommender Model (DLRM) developed by Facebook.
2. **Scale**: HugeCTR supports model parallel scaling. It distributes the large embedding tables over multiple GPUs or multiple nodes.
3. **Easy-to-use**: Easy-to-use Python API similar to Keras. Examples for popular deep learning recommender systems architectures (Wide&Deep, DLRM, DCN, DeepFM) are available.
### Other Features of HugeCTR
HugeCTR is designed to scale deep learning models for recommender systems. It provides a list of other important features:
* Proficiency in oversubscribing models to train embedding tables with single nodes that don’t fit within the GPU or CPU memory (only required embeddings are prefetched from a parameter server per batch)
* Asynchronous and multithreaded data pipelines
* A highly optimized data loader.
* Supported data formats such as parquet and binary
* Integration with Triton Inference Server for deployment to production
### Getting Started
In this example, we will train a neural network with HugeCTR. We will use NVTabular for preprocessing.
#### Preprocessing and Feature Engineering with NVTabular
We use NVTabular to `Categorify` our categorical input columns.
```
# External dependencies
import os
import shutil
import gc
import nvtabular as nvt
import cudf
import numpy as np
from os import path
from sklearn.model_selection import train_test_split
from nvtabular.utils import download_file
```
We define our base directory, containing the data.
```
# path to store raw and preprocessed data
BASE_DIR = "/model/data/"
```
If the data is not available in the base directory, we will download and unzip the data.
```
download_file(
"http://files.grouplens.org/datasets/movielens/ml-25m.zip", os.path.join(BASE_DIR, "ml-25m.zip")
)
```
## Preparing the dataset with NVTabular
First, we take a look at the movie metadata.
Let's load the movie ratings.
```
ratings = cudf.read_csv(os.path.join(BASE_DIR, "ml-25m", "ratings.csv"))
ratings.head()
```
We drop the timestamp column and split the ratings into training and test dataset. We use a simple random split.
```
ratings = ratings.drop("timestamp", axis=1)
train, valid = train_test_split(ratings, test_size=0.2, random_state=42)
train.head()
```
We save our train and valid datasets as parquet files on disk, and below we will read them in while initializing the Dataset objects.
```
train.to_parquet(BASE_DIR + "train.parquet")
valid.to_parquet(BASE_DIR + "valid.parquet")
del train
del valid
gc.collect()
```
Let's define our categorical and label columns. Note that in that example we do not have numerical columns.
```
CATEGORICAL_COLUMNS = ["userId", "movieId"]
LABEL_COLUMNS = ["rating"]
```
Let's add Categorify op for our categorical features, userId, movieId.
```
cat_features = CATEGORICAL_COLUMNS >> nvt.ops.Categorify(cat_cache="device")
```
The ratings are on a scale between 1-5. We want to predict a binary target with 1 are all ratings >=4 and 0 are all ratings <=3. We use the LambdaOp for it.
```
ratings = nvt.ColumnGroup(["rating"]) >> (lambda col: (col > 3).astype("int8"))
```
We can visualize our calculation graph.
```
output = cat_features + ratings
(output).graph
```
We initialize our NVTabular workflow.
```
workflow = nvt.Workflow(output)
```
We initialize NVTabular Datasets, and use the part_size parameter, which defines the size read into GPU-memory at once, in nvt.Dataset.
```
train_dataset = nvt.Dataset(BASE_DIR + "train.parquet", part_size="100MB")
valid_dataset = nvt.Dataset(BASE_DIR + "valid.parquet", part_size="100MB")
```
First, we collect the training dataset statistics.
```
%%time
workflow.fit(train_dataset)
```
This step is slightly different for HugeCTR. HugeCTR expect the categorical input columns as `int64` and continuous/label columns as `float32` We can define output datatypes for our NVTabular workflow.
```
dict_dtypes = {}
for col in CATEGORICAL_COLUMNS:
dict_dtypes[col] = np.int64
for col in LABEL_COLUMNS:
dict_dtypes[col] = np.float32
```
Note: We do not have numerical output columns
```
train_dir = os.path.join(BASE_DIR, "train")
valid_dir = os.path.join(BASE_DIR, "valid")
if path.exists(train_dir):
shutil.rmtree(train_dir)
if path.exists(valid_dir):
shutil.rmtree(valid_dir)
```
In addition, we need to provide the data schema to the output calls. We need to define which output columns are `categorical`, `continuous` and which is the `label` columns. NVTabular will write metadata files, which HugeCTR requires to load the data and optimize training.
```
workflow.transform(train_dataset).to_parquet(
output_path=BASE_DIR + "train/",
shuffle=nvt.io.Shuffle.PER_PARTITION,
cats=CATEGORICAL_COLUMNS,
labels=LABEL_COLUMNS,
dtypes=dict_dtypes,
)
workflow.transform(valid_dataset).to_parquet(
output_path=BASE_DIR + "valid/",
shuffle=False,
cats=CATEGORICAL_COLUMNS,
labels=LABEL_COLUMNS,
dtypes=dict_dtypes,
)
```
## Scaling Accelerated training with HugeCTR
HugeCTR is a deep learning framework dedicated to recommendation systems. It is written in CUDA C++. As HugeCTR optimizes the training in CUDA++, we need to define the training pipeline and model architecture and execute it via the commandline. We will use the Python API, which is similar to Keras models.
HugeCTR has three main components:
* Solver: Specifies various details such as active GPU list, batchsize, and model_file
* Optimizer: Specifies the type of optimizer and its hyperparameters
* Model: Specifies training/evaluation data (and their paths), embeddings, and dense layers. Note that embeddings must precede the dense layers
**Solver**
Let's take a look on the parameter for the `Solver`. We should be familiar from other frameworks for the hyperparameter.
```
solver = hugectr.solver_parser_helper(
- vvgpu: GPU indices used in the training process, which has two levels. For example: [[0,1],[1,2]] indicates that two nodes are used in the first node. GPUs 0 and 1 are used while GPUs 1 and 2 are used for the second node. It is also possible to specify non-continuous GPU indices such as [0, 2, 4, 7]
- max_iter: Total number of training iterations
- batchsize: Minibatch size used in training
- display: Intervals to print loss on the screen
- eval_interval: Evaluation interval in the unit of training iteration
- max_eval_batches: Maximum number of batches used in evaluation. It is recommended that the number is equal to or bigger than the actual number of bathces in the evaluation dataset.
If max_iter is used, the evaluation happens for max_eval_batches by repeating the evaluation dataset infinitely.
On the other hand, with num_epochs, HugeCTR stops the evaluation if all the evaluation data is consumed
- batchsize_eval: Maximum number of batches used in evaluation. It is recommended that the number is equal to or
bigger than the actual number of bathces in the evaluation dataset
- mixed_precision: Enables mixed precision training with the scaler specified here. Only 128,256, 512, and 1024 scalers are supported
)
```
**Optimizer**
The optimizer is the algorithm to update the model parameters. HugeCTR supports the common algorithms.
```
optimizer = CreateOptimizer(
- optimizer_type: Optimizer algorithm - Adam, MomentumSGD, Nesterov, and SGD
- learning_rate: Learning Rate for optimizer
)
```
**Model**
We initialize the model with the solver and optimizer:
```
model = hugectr.Model(solver, optimizer)
```
We can add multiple layers to the model with `model.add` function. We will focus on:
- `Input` defines the input data
- `SparseEmbedding` defines the embedding layer
- `DenseLayer` defines dense layers, such as fully connected, ReLU, BatchNorm, etc.
**HugeCTR organizes the layers by names. For each layer, we define the input and output names.**
Input layer:
This layer is required to define the input data.
```
hugectr.Input(
data_reader_type: Data format to read
source: The training dataset file list.
eval_source: The evaluation dataset file list.
check_type: The data error detection machanism (Sum: Checksum, None: no detection).
label_dim: Number of label columns
label_name: Name of label columns in network architecture
dense_dim: Number of continous columns
dense_name: Name of contiunous columns in network architecture
slot_size_array: The list of categorical feature cardinalities
data_reader_sparse_param_array: Configuration how to read sparse data
sparse_names: Name of sparse/categorical columns in network architecture
)
```
SparseEmbedding:
This layer defines embedding table
```
hugectr.SparseEmbedding(
embedding_type: Different embedding options to distribute embedding tables
max_vocabulary_size_per_gpu: Maximum vocabulary size or cardinality across all the input features
embedding_vec_size: Embedding vector size
combiner: Intra-slot reduction op (0=sum, 1=average)
sparse_embedding_name: Layer name
bottom_name: Input layer names
)
```
DenseLayer:
This layer is copied to each GPU and is normally used for the MLP tower.
```
hugectr.DenseLayer(
layer_type: Layer type, such as FullyConnected, Reshape, Concat, Loss, BatchNorm, etc.
bottom_names: Input layer names
top_names: Layer name
...: Depending on the layer type additional parameter can be defined
)
```
## Let's define our model
We walked through the documentation, but it is useful to understand the API. Finally, we can define our model. We will write the model to `./model.py` and execute it afterwards.
We need the cardinalities of each categorical feature to assign as `slot_size_array` in the model below.
```
from nvtabular.ops import get_embedding_sizes
embeddings = get_embedding_sizes(workflow)
print(embeddings)
```
In addition, we need the total cardinalities to be assigned as `max_vocabulary_size_per_gpu` parameter.
```
total_cardinality = embeddings["userId"][0] + embeddings["movieId"][0]
total_cardinality
%%writefile './model.py'
import hugectr
from mpi4py import MPI # noqa
solver = hugectr.solver_parser_helper(
vvgpu=[[0]],
max_iter=2000,
batchsize=2048,
display=100,
eval_interval=200,
batchsize_eval=2048,
max_eval_batches=160,
i64_input_key=True,
use_mixed_precision=False,
repeat_dataset=True,
snapshot=1900,
)
optimizer = hugectr.optimizer.CreateOptimizer(
optimizer_type=hugectr.Optimizer_t.Adam, use_mixed_precision=False
)
model = hugectr.Model(solver, optimizer)
model.add(
hugectr.Input(
data_reader_type=hugectr.DataReaderType_t.Parquet,
source="/model/data/train/_file_list.txt",
eval_source="/model/data/valid/_file_list.txt",
check_type=hugectr.Check_t.Non,
label_dim=1,
label_name="label",
dense_dim=0,
dense_name="dense",
slot_size_array=[162542, 56586],
data_reader_sparse_param_array=[
hugectr.DataReaderSparseParam(hugectr.DataReaderSparse_t.Distributed, 3, 1, 2)
],
sparse_names=["data1"],
)
)
model.add(
hugectr.SparseEmbedding(
embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
max_vocabulary_size_per_gpu=219128,
embedding_vec_size=16,
combiner=0,
sparse_embedding_name="sparse_embedding1",
bottom_name="data1",
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.Reshape,
bottom_names=["sparse_embedding1"],
top_names=["reshape1"],
leading_dim=32,
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.InnerProduct,
bottom_names=["reshape1"],
top_names=["fc1"],
num_output=128,
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.ReLU,
bottom_names=["fc1"],
top_names=["relu1"],
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.InnerProduct,
bottom_names=["relu1"],
top_names=["fc2"],
num_output=128,
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.ReLU,
bottom_names=["fc2"],
top_names=["relu2"],
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.InnerProduct,
bottom_names=["relu2"],
top_names=["fc3"],
num_output=1,
)
)
model.add(
hugectr.DenseLayer(
layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names=["fc3", "label"],
top_names=["loss"],
)
)
model.compile()
model.summary()
model.fit()
!python model.py
```
We trained our model.
After training terminates, we can see that two `.model` files are generated. We need to move them inside `1` folder under the `movielens_hugectr` folder. Let's create these folders first.
```
!mkdir -p /model/movielens_hugectr/1
```
Now we move our saved `.model` files inside `1` folder.
```
!mv *.model /model/movielens_hugectr/1/
```
Note that these stored `.model` files will be used in the inference. Now we have to create a JSON file for inference which has a similar configuration as our training file. We should remove the solver and optimizer clauses and add the inference clause in the JSON file. The paths of the stored dense model and sparse model(s) should be specified at dense_model_file and sparse_model_file within the inference clause. We need to make some modifications to data in the layers clause. Besides, we need to change the last layer from BinaryCrossEntropyLoss to Sigmoid. The rest of "layers" should be exactly the same as that in the training model.py file.
Now let's create a `movielens.json` file inside the `movielens/1` folder. We have already retrieved the cardinality of each categorical column using `get_embedding_sizes` function above. We will use these cardinalities below in the `movielens.json` file as well.
```
%%writefile '/model/movielens_hugectr/1/movielens.json'
{
"inference": {
"max_batchsize": 64,
"hit_rate_threshold": 0.6,
"dense_model_file": "/model/models/movielens/1/_dense_1900.model",
"sparse_model_file": "/model/models/movielens/1/0_sparse_1900.model",
"label": 1,
"input_key_type": "I64"
},
"layers": [
{
"name": "data",
"type": "Data",
"format": "Parquet",
"slot_size_array": [162542, 56586],
"source": "/model/data/train/_file_list.txt",
"eval_source": "/model/data/valid/_file_list.txt",
"check": "Sum",
"label": {"top": "label", "label_dim": 1},
"dense": {"top": "dense", "dense_dim": 0},
"sparse": [
{
"top": "data1",
"type": "DistributedSlot",
"max_feature_num_per_sample": 3,
"slot_num": 2
}
]
},
{
"name": "sparse_embedding1",
"type": "DistributedSlotSparseEmbeddingHash",
"bottom": "data1",
"top": "sparse_embedding1",
"sparse_embedding_hparam": {
"max_vocabulary_size_per_gpu": 219128,
"embedding_vec_size": 16,
"combiner": 0
}
},
{
"name": "reshape1",
"type": "Reshape",
"bottom": "sparse_embedding1",
"top": "reshape1",
"leading_dim": 32
},
{
"name": "fc1",
"type": "InnerProduct",
"bottom": "reshape1",
"top": "fc1",
"fc_param": {"num_output": 128}
},
{"name": "relu1", "type": "ReLU", "bottom": "fc1", "top": "relu1"},
{
"name": "fc2",
"type": "InnerProduct",
"bottom": "relu1",
"top": "fc2",
"fc_param": {"num_output": 128}
},
{"name": "relu2", "type": "ReLU", "bottom": "fc2", "top": "relu2"},
{
"name": "fc3",
"type": "InnerProduct",
"bottom": "relu2",
"top": "fc3",
"fc_param": {"num_output": 1}
},
{"name": "sigmoid", "type": "Sigmoid", "bottom": "fc3", "top": "sigmoid"}
]
}
```
Now we can save our models to be deployed at the inference stage. To do so we will use `export_hugectr_ensemble` method below. With this method, we can generate the `config.pbtxt` files automatically for each model. In doing so, we should also create a `hugectr_params` dictionary, and define the parameters like where the `movielens.json` file will be read, `slots` which corresponds to number of categorical features, `embedding_vector_size`, `max_nnz`, and `n_outputs` which is number of outputs.
The script below creates an ensemble triton server model where
- `workflow` is the the nvtabular workflow used in preprocessing,
- `hugectr_model_path` is the HugeCTR model that should be served. This path includes the `.model` files.
- `name` is the base name of the various triton models
- `output_path` is the path where is model will be saved to.
```
from nvtabular.inference.triton import export_hugectr_ensemble
hugectr_params = dict()
hugectr_params["config"] = "/model/models/movielens/1/movielens.json"
hugectr_params["slots"] = 2
hugectr_params["max_nnz"] = 2
hugectr_params["embedding_vector_size"] = 16
hugectr_params["n_outputs"] = 1
export_hugectr_ensemble(
workflow=workflow,
hugectr_model_path="/model/movielens_hugectr/1/",
hugectr_params=hugectr_params,
name="movielens",
output_path="/model/models/",
label_columns=["rating"],
cats=CATEGORICAL_COLUMNS,
max_batch_size=64,
)
```
After we run the script above, we will have three model folders saved as `movielens_nvt`, `movielens` and `movielens_ens`. Now we can move to the next notebook, `movielens-HugeCTR-inference`, to send request to the Triton Inference Server using the saved ensemble model.
| github_jupyter |

<div class="alert alert-block alert-info">
<b>Important:</b> This notebook uses <code>ipywidgets</code> that take advantage of the javascript interface in a web browser. The downside is the functionality does not render well on saved notebooks. Run this notebook locally to see the widgets in action.
</div>
# Qiskit Jupyter Tools
Qiskit was designed to be used inside of the Jupyter notebook interface. As such it includes many useful routines that take advantage of this platform, and make performing tasks like exploring devices and tracking job progress effortless.
Loading all the qiskit Jupyter goodness is done via:
```
from qiskit import *
import qiskit.providers.ibmq.jupyter # This is the where the magic happens (literally).
```
## Table of contents
1) [IQX Dashboard](#dashboard)
2) [Backend Details](#details)
To start, load your IQX account information and select a provider:
```
IBMQ.load_account();
provider = IBMQ.get_provider(group='open')
```
## IQX Dashboard <a name="dashboard"></a>
Perhaps the most useful Jupyter tool is the `iqx_dashboard`. This widget consists of a `Devices` tab and a `Jobs` tab. The `Devices` tab provides an overview of all the devices you have access to. The `Jobs` tab automatically tracks and displays information for the jobs submitted in this session.
To start the dashboard you run the Jupyter magic:
```
%iqx_dashboard
```
You should now see a small window titled "IQX Dashboard" in the upper left corner of the notebook. Click on the drop down symbol to see the two tabs. The `Devices` tab may take a few seconds to load as it needs to communicate with the server for device information. The `Jobs` tab should contain no job information as none has been submitted yet.
### Getting an Overview of Backends
The `Devices` tab provides an overview of all the backends you have access to. You can use it to compare, for example, the average CNOT error rates. In addition, the number of pending jobs on the devices is continuously being updated along with the operational status.
### Automatic Job Tracking
The `Jobs` tab automatically tracks and displays information for the jobs submitted in this session.
Now, let's submit a job to a device to see this in action:
```
backend = provider.get_backend('ibmq_essex')
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0,1], [0,1])
job = execute(qc, backend)
```
Click on the `Jobs` tab and you will see that the job has been added to the list of jobs. Its status, queue position (if any), and estimated start time are being automatically tracked and updated. If the job is running, the scheduling mode for the job is also displayed. For example, if the job status is `RUNNING[F]`, that means the job is actively running and was scheduled using a fair-share algorithm. The button to the left of a job ID allows you to cancel the job.
If you want to kill the dashboard you can do so by calling:
```
%disable_ibmq_dashboard
```
Although the dashboard itself is killed, the underlying framework is still tracking jobs for you and will show this information if loaded once again.
## Viewing Backend Details <a name="details"></a>
The IBM Quantum devices contain a large amount of configuration data and properties. This information can be retrieved by calling:
```
config = backend.configuration()
params = backend.properties()
```
However, parsing through this information quickly becomes tedious. Instead, all the information for a single backend can be displayed graphically by just calling the backend instance itself:
```
backend
```
This widget displays all the information about a backend in a single tabbed-window.
```
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
```
| github_jupyter |
# Millikan Oil Drop
___**Meansurement of the electron charge**
```
rho=886 # kg/m^3
dV = .5 #volts
dd = .000005 # meters
dP = 5 # pascals
g=9.8 # m/s^2
eta= 1.8330*10**(-5) # N*s/m^2
b=8.20*10**(-3) # Pa*m
p=101325 #Pa
V=500 #V
e=1.6*10**(-19)
d_array=10**(-3)*np.array([7.55,7.59,7.60,7.60,7.60,7.61]) # unit: m
d=d_array.mean()
d_std=d_array.std()
print("d_mean: ",d_mean)
print("d_std: ",d_std)
def reject_outliers(data, m=2):
'''
remove anomalous data points that outside 2 standard deviation in the array
'''
return data[abs(data - np.mean(data)) < m * np.std(data)]
```
**Load data from files**
```
data_path = "/Users/Angel/Documents/MilikanData/"
statistics=[]
for file_name in os.listdir(data_path):
name=file_name[:3]
obj_drop=pd.read_csv(data_path+file_name).dropna()
# seperate rising and falling velocities, remove anomalous velocities at switching field direction
v_y=obj_drop["v_{y}"].values
y = obj_drop["y"] #y values
n_points=len(v_y)
v_r=reject_outliers(v_y[v_y>0])
v_f=reject_outliers(v_y[v_y<0])
# calculate mean and deviation
(v_r_mean,v_r_std)=(v_r.mean(),v_r.std())
(v_f_mean,v_f_std)=(np.abs(v_f.mean()),v_f.std())
# calculate other properties
a=np.sqrt((b/2/p)**2+9*eta*v_f_mean/2/rho/g)-b/(2*p) #droplet radius
m=4*np.pi/3*a**3*rho # droplet mass
q=m*g*d_mean*(v_f_mean+v_r_mean)/V/v_f_mean #droplet charge
# error propagation
dely = np.roll(y, -2)-y
delt = .4
error_y = 2e-6
error_t = .1
error_v = np.sqrt((2*error_y/dely)**2+(2*error_t/delt)**2)
error_v.pop(n_points-1)
error_v.pop(n_points-2)
error_v = np.append([0.5],error_v)
error_v = np.append(error_v, [0.5])
error_v = np.abs(v_y)*error_v
meanerror_v = error_v[~np.isinf(error_v)].mean()
dqdvf = 2*np.pi*(((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))**(-.5))*((np.sqrt(9*eta*v_f_mean/(2*rho*g)+(b/(2*p))**2)-b/(2*p))**2)*9*eta/(2*rho*g)*rho*g*d*(v_f_mean+v_r_mean)/(V*v_f_mean) + 4*np.pi/3*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**3)*(V*v_f_mean*rho*g*d*v_r_mean-rho*g*d*(v_f_mean+v_r_mean)*V)/((V*v_f_mean)**2)
dqdvr = 4*np.pi/3*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**3)*(rho*g*d/V)
dqdV = -4*np.pi/3*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**3)*(v_f_mean*rho*g*d*(v_f_mean+v_r_mean)/((V*v_f_mean)**2))
dqdd = 4*np.pi/3*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**3)*rho*g*(v_f_mean+v_r_mean)/(V*v_f_mean)
dqdP1 = 2*np.pi*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**2)*rho*g*d*(v_f_mean+v_r_mean)/(V*v_f_mean)
dqdP2 = -(((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))**(-.5))*(b**2)/(2*p**3)+b/(4*p**2)
error_func = np.sqrt(((dqdvf)*(meanerror_v))**2+((dqdvr)*(meanerror_v))**2+((dqdV)*(dV))**2+((dqdd)*(dd))**2+((dqdP1*dqdP2)*(dP))**2)
statistics.append(np.array((name,n_points,v_r_mean,v_r_std,v_f_mean,v_f_std, meanerror_v, a,m,q, error_func)))
```
Calculation of the attached charge
```
labels = ["name","n_points","v_r_mean","v_r_std","v_f_mean","v_f_std","meanerror_v","a","m","q","q_error"]
overall = pd.DataFrame(statistics,columns=labels,dtype="float64")
overall
import matplotlib.pylab as plt
plt.figure().dpi=100
plt.xlabel("Charge attached")
plt.ylabel("Number of droplets")
plt.title("Histogram of charge carried by droplets")
(overall.q/e).hist(bins=21)
def clustering(arr,x):
arr = list(arr/x)
num = int(max(arr))
clusters= []
for i in range(num+1):
clusters.append(list(filter(lambda x:i<x<i+1,arr)))
return clusters
from scipy.optimize import minimize
def obj_error(x):
test = list(map(np.mean,clustering(overall.q,x)))
estimate_delta_q = np.array(test[:-1])-np.array(test[1:])
estimate_e = estimate_delta_q[~np.isnan(estimate_delta_q)]
estimate_e = estimate_e*e
return abs(estimate_e.mean())
obj_error(e)
#valuee = minimize(obj_error,.8e-19)
#print(valuee.x)
```
| github_jupyter |
# 3.6 Refinements with federated learning
## Data loading and preprocessing
```
# read more: https://www.tensorflow.org/federated/tutorials/federated_learning_for_text_generation
import nest_asyncio # pip install nest_asyncio
import tensorflow_federated as tff # pip install tensorflow_federated
import collections
import functools
import os
import time
import numpy as np
import tensorflow as tf
#nest_asyncio.apply()
tf.compat.v1.enable_v2_behavior()
np.random.seed(0)
# Test the TFF is working:
tff.federated_computation(lambda: 'Hello, World!')()
import numpy as np
# A fixed vocabularly of ASCII chars that occur in the works of Shakespeare and Dickens:
vocab = list('dhlptx@DHLPTX $(,048cgkoswCGKOSW[_#\'/37;?bfjnrvzBFJNRVZ"&*.26:\naeimquyAEIMQUY]!%)-159\r')
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
```
## Data
```
train_data, test_data = tff.simulation.datasets.shakespeare.load_data()
# Here the play is "The Tragedy of King Lear" and the character is "King".
raw_example_dataset = train_data.create_tf_dataset_for_client(
'THE_TRAGEDY_OF_KING_LEAR_KING')
# To allow for future extensions, each entry x
# is an OrderedDict with a single key 'snippets' which contains the text.
for x in raw_example_dataset.take(2):
print(x['snippets'])
# Input pre-processing parameters
SEQ_LENGTH = 100
BATCH_SIZE = 8
BUFFER_SIZE = 10000 # For dataset shuffling
```
## Text generation
```
import tensorflow as tf
# Construct a lookup table to map string chars to indexes,
# using the vocab loaded above:
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
keys=vocab, values=tf.constant(list(range(len(vocab))),
dtype=tf.int64)),
default_value=0)
def to_ids(x):
s = tf.reshape(x['snippets'], shape=[1])
chars = tf.strings.bytes_split(s).values
ids = table.lookup(chars)
return ids
def split_input_target(chunk):
input_text = tf.map_fn(lambda x: x[:-1], chunk)
target_text = tf.map_fn(lambda x: x[1:], chunk)
return (input_text, target_text)
def preprocess(dataset):
return (
# Map ASCII chars to int64 indexes using the vocab
dataset.map(to_ids)
# Split into individual chars
.unbatch()
# Form example sequences of SEQ_LENGTH +1
.batch(SEQ_LENGTH + 1, drop_remainder=True)
# Shuffle and form minibatches
.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
# And finally split into (input, target) tuples,
# each of length SEQ_LENGTH.
.map(split_input_target))
example_dataset = preprocess(raw_example_dataset)
print(example_dataset.element_spec)
import os
def load_model(batch_size):
urls = {
1: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch1.kerasmodel',
8: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch8.kerasmodel'}
assert batch_size in urls, 'batch_size must be in ' + str(urls.keys())
url = urls[batch_size]
local_file = tf.keras.utils.get_file(os.path.basename(url), origin=url)
return tf.keras.models.load_model(local_file, compile=False)
def generate_text(model, start_string):
# Evaluation step (generating text using the learned model)
# Number of characters to generate
num_generate = 1000
# Converting our start string to numbers (vectorizing)
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store our results
text_generated = []
# Low temperatures results in more predictable text.
# Higher temperatures results in more surprising text.
# Experiment to find the best setting.
temperature = 1.0
# Here batch size == 1
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# using a categorical distribution to predict the character returned by the model
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
# We pass the predicted character as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
# Text generation requires a batch_size=1 model.
keras_model_batch1 = load_model(batch_size=1)
print(generate_text(keras_model_batch1, 'What of TensorFlow Federated, you ask? '))
BATCH_SIZE = 8 # The training and eval batch size for the rest of this tutorial.
keras_model = load_model(batch_size=BATCH_SIZE)
keras_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
```
## Federated learning
```
import collections
# Clone the keras_model inside `create_tff_model()`, which TFF will
# call to produce a new copy of the model inside the graph that it will
# serialize. Note: we want to construct all the necessary objects we'll need
# _inside_ this method.
def create_tff_model():
# TFF uses a `dummy_batch` so it knows the types and shapes
# that your model expects.
x = np.random.randint(1, len(vocab), size=[BATCH_SIZE, SEQ_LENGTH])
dummy_batch = collections.OrderedDict(x=x, y=x)
keras_model_clone = tf.keras.models.clone_model(keras_model)
return tff.learning.from_keras_model(
keras_model_clone,
dummy_batch=dummy_batch,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
fed_avg = tff.learning.build_federated_averaging_process(
model_fn=create_tff_model,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(lr=0.5))
nest_asyncio.apply()
NUM_ROUNDS = 5
state = fed_avg.initialize()
for _ in range(NUM_ROUNDS):
state, metrics = fed_avg.next(state, [example_dataset.take(5)])
print(f'loss={metrics.loss}')
```
| github_jupyter |
# 0.前言
这个文档主要是用来入门下XGBOOST,主要就是参考的https://blog.csdn.net/qq_24519677/article/details/81869196
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import cross_validation
from sklearn.preprocessing import LabelEncoder
import sklearn
import warnings
warnings.filterwarnings('ignore')
```
# 1.数据特征处理
```
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
train.info() # 打印训练数据的信息
test.info()
```
对数据的缺失值进行处理,这里采用的方法是对连续值用该列的平均值进行填充,非连续值用该列的众数进行填充,还可以使用机器学习的模型对缺失值进行预测,用预测的值来填充缺失值,该方法这里不做介绍:
```
def handle_na(train, test): # 将Cabin特征删除
fare_mean = train['Fare'].mean() # 测试集的fare特征有缺失值,用训练数据的均值填充
test.loc[pd.isnull(test.Fare), 'Fare'] = fare_mean
embarked_mode = train['Embarked'].mode() # 用众数填充
train.loc[pd.isnull(train.Embarked), 'Embarked'] = embarked_mode[0]
train.loc[pd.isnull(train.Age), 'Age'] = train['Age'].mean() # 用均值填充年龄
test.loc[pd.isnull(test.Age), 'Age'] = train['Age'].mean()
return train, test
new_train, new_test = handle_na(train, test) # 填充缺失值
```
由于Embarked,Sex,Pclass特征是离散特征,所以对其进行one-hot/get_dummies编码
```
# 对Embarked和male特征进行one-hot/get_dummies编码
new_train = pd.get_dummies(new_train, columns=['Embarked', 'Sex', 'Pclass'])
new_test = pd.get_dummies(new_test, columns=['Embarked', 'Sex', 'Pclass'])
```
然后再去除掉PassengerId,Name,Ticket,Cabin, Survived列,这里不使用这些特征做预测
```
target = new_train['Survived'].values
# 删除PassengerId,Name,Ticket,Cabin, Survived列
df_train = new_train.drop(['PassengerId','Name','Ticket','Cabin','Survived'], axis=1).values
df_test = new_test.drop(['PassengerId','Name','Ticket','Cabin'], axis=1).values
```
# 2.XGBoost模型
## 2.1使用XGBoost原生版本模型
```
X_train,X_test,y_train,y_test = train_test_split(df_train,target,test_size = 0.3,random_state = 1) # 将数据划分为训练集和测试集
data_train = xgb.DMatrix(X_train, y_train) # 使用XGBoost的原生版本需要对数据进行转化
data_test = xgb.DMatrix(X_test, y_test)
param = {'max_depth': 5, 'eta': 1, 'objective': 'binary:logistic'}
watchlist = [(data_test, 'test'), (data_train, 'train')]
n_round = 3 # 迭代训练3轮
booster = xgb.train(param, data_train, num_boost_round=n_round, evals=watchlist)
# 计算错误率
y_predicted = booster.predict(data_test)
y = data_test.get_label()
accuracy = sum(y == (y_predicted > 0.5))
accuracy_rate = float(accuracy) / len(y_predicted)
print ('样本总数:{0}'.format(len(y_predicted)))
print ('正确数目:{0}'.format(accuracy) )
print ('正确率:{0:.3f}'.format((accuracy_rate)))
```
## 2.2XGBoost的sklearn接口版本
```
X_train,X_test,y_train,y_test = train_test_split(df_train,target,test_size = 0.3,random_state = 1)
model = xgb.XGBClassifier(max_depth=3, n_estimators=200, learn_rate=0.01)
model.fit(X_train, y_train)
test_score = model.score(X_test, y_test)
print('test_score: {0}'.format(test_score))
```
利用xgboost做一次预测。
```
try_pred = X_test[[0,1],:]
try_pred
try_pred_y = y_test[0:2]
try_pred_y
pred = model.predict(try_pred)
pred
```
# 3.使用其他模型于XGBoost进行对比
```
# 应用模型进行预测
model_lr = LogisticRegression()
model_rf = RandomForestClassifier(n_estimators=200)
model_xgb = xgb.XGBClassifier(max_depth=5, n_estimators=200, learn_rate=0.01)
models = [model_lr, model_rf, model_xgb]
model_name = ['LogisticRegression', '随机森林', 'XGBoost']
cv =cross_validation.ShuffleSplit(len(df_train), n_iter=3, test_size=0.3, random_state=1)
for i in range(3):
print(model_name[i] + ":")
model = models[i]
for train, test in cv:
model.fit(df_train[train], target[train])
train_score = model.score(df_train[train], target[train])
test_score = model.score(df_train[test], target[test])
print('train score: {0:.5f} \t test score: {0:.5f}'.format(train_score, test_score))
```
| github_jupyter |
###### Background
- As you know that, the non-zero value is very few in target in this competition. It is like imbalance of target in classfication problem.
- For solving the imbalance in classfication problem, we commonly use the "stratifed sampling".
- For this cometition, we can simply apply the stratified sampling to get more well-distributed sampling for continuous target.
- To compare the effect of this strategy, I forked the good kernel(https://www.kaggle.com/prashantkikani/rstudio-lgb-single-model-lb1-6607) and used same parameters, same random seeds.
- I just change the sampling strategy. Ok, Let's see whether it works.
```
import os
import json
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import time
from datetime import datetime
import gc
import psutil
from sklearn.preprocessing import LabelEncoder
PATH="../input/"
NUM_ROUNDS = 20000
VERBOSE_EVAL = 500
STOP_ROUNDS = 100
N_SPLITS = 10
#the columns that will be parsed to extract the fields from the jsons
cols_to_parse = ['device', 'geoNetwork', 'totals', 'trafficSource']
def read_parse_dataframe(file_name):
#full path for the data file
path = PATH + file_name
#read the data file, convert the columns in the list of columns to parse using json loader,
#convert the `fullVisitorId` field as a string
data_df = pd.read_csv(path,
converters={column: json.loads for column in cols_to_parse},
dtype={'fullVisitorId': 'str'})
#parse the json-type columns
for col in cols_to_parse:
#each column became a dataset, with the columns the fields of the Json type object
json_col_df = json_normalize(data_df[col])
json_col_df.columns = [f"{col}_{sub_col}" for sub_col in json_col_df.columns]
#we drop the object column processed and we add the columns created from the json fields
data_df = data_df.drop(col, axis=1).merge(json_col_df, right_index=True, left_index=True)
return data_df
def process_date_time(data_df):
print("process date time ...")
data_df['date'] = data_df['date'].astype(str)
data_df["date"] = data_df["date"].apply(lambda x : x[:4] + "-" + x[4:6] + "-" + x[6:])
data_df["date"] = pd.to_datetime(data_df["date"])
data_df["year"] = data_df['date'].dt.year
data_df["month"] = data_df['date'].dt.month
data_df["day"] = data_df['date'].dt.day
data_df["weekday"] = data_df['date'].dt.weekday
data_df['weekofyear'] = data_df['date'].dt.weekofyear
data_df['month_unique_user_count'] = data_df.groupby('month')['fullVisitorId'].transform('nunique')
data_df['day_unique_user_count'] = data_df.groupby('day')['fullVisitorId'].transform('nunique')
data_df['weekday_unique_user_count'] = data_df.groupby('weekday')['fullVisitorId'].transform('nunique')
return data_df
def process_format(data_df):
print("process format ...")
for col in ['visitNumber', 'totals_hits', 'totals_pageviews']:
data_df[col] = data_df[col].astype(float)
data_df['trafficSource_adwordsClickInfo.isVideoAd'].fillna(True, inplace=True)
data_df['trafficSource_isTrueDirect'].fillna(False, inplace=True)
return data_df
def process_device(data_df):
print("process device ...")
data_df['browser_category'] = data_df['device_browser'] + '_' + data_df['device_deviceCategory']
data_df['browser_os'] = data_df['device_browser'] + '_' + data_df['device_operatingSystem']
return data_df
def process_totals(data_df):
print("process totals ...")
data_df['visitNumber'] = np.log1p(data_df['visitNumber'])
data_df['totals_hits'] = np.log1p(data_df['totals_hits'])
data_df['totals_pageviews'] = np.log1p(data_df['totals_pageviews'].fillna(0))
data_df['mean_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('mean')
data_df['sum_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('sum')
data_df['max_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('max')
data_df['min_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('min')
data_df['var_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('var')
data_df['mean_pageviews_per_day'] = data_df.groupby(['day'])['totals_pageviews'].transform('mean')
data_df['sum_pageviews_per_day'] = data_df.groupby(['day'])['totals_pageviews'].transform('sum')
data_df['max_pageviews_per_day'] = data_df.groupby(['day'])['totals_pageviews'].transform('max')
data_df['min_pageviews_per_day'] = data_df.groupby(['day'])['totals_pageviews'].transform('min')
return data_df
def process_geo_network(data_df):
print("process geo network ...")
data_df['sum_pageviews_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform('sum')
data_df['count_pageviews_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform('count')
data_df['mean_pageviews_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform('mean')
data_df['sum_hits_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('sum')
data_df['count_hits_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('count')
data_df['mean_hits_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('mean')
return data_df
def process_traffic_source(data_df):
print("process traffic source ...")
data_df['source_country'] = data_df['trafficSource_source'] + '_' + data_df['geoNetwork_country']
data_df['campaign_medium'] = data_df['trafficSource_campaign'] + '_' + data_df['trafficSource_medium']
data_df['medium_hits_mean'] = data_df.groupby(['trafficSource_medium'])['totals_hits'].transform('mean')
data_df['medium_hits_max'] = data_df.groupby(['trafficSource_medium'])['totals_hits'].transform('max')
data_df['medium_hits_min'] = data_df.groupby(['trafficSource_medium'])['totals_hits'].transform('min')
data_df['medium_hits_sum'] = data_df.groupby(['trafficSource_medium'])['totals_hits'].transform('sum')
return data_df
#Feature processing
## Load data
print('reading train')
train_df = read_parse_dataframe('train.csv')
trn_len = train_df.shape[0]
train_df = process_date_time(train_df)
print('reading test')
test_df = read_parse_dataframe('test.csv')
test_df = process_date_time(test_df)
## Drop columns
cols_to_drop = [col for col in train_df.columns if train_df[col].nunique(dropna=False) == 1]
train_df.drop(cols_to_drop, axis=1, inplace=True)
test_df.drop([col for col in cols_to_drop if col in test_df.columns], axis=1, inplace=True)
###only one not null value
train_df.drop(['trafficSource_campaignCode'], axis=1, inplace=True)
###converting columns format
train_df['totals_transactionRevenue'] = train_df['totals_transactionRevenue'].astype(float)
train_df['totals_transactionRevenue'] = train_df['totals_transactionRevenue'].fillna(0)
# train_df['totals_transactionRevenue'] = np.log1p(train_df['totals_transactionRevenue'])
## Features engineering
train_df = process_format(train_df)
train_df = process_device(train_df)
train_df = process_totals(train_df)
train_df = process_geo_network(train_df)
train_df = process_traffic_source(train_df)
test_df = process_format(test_df)
test_df = process_device(test_df)
test_df = process_totals(test_df)
test_df = process_geo_network(test_df)
test_df = process_traffic_source(test_df)
## Categorical columns
print("process categorical columns ...")
num_cols = ['month_unique_user_count', 'day_unique_user_count', 'weekday_unique_user_count',
'visitNumber', 'totals_hits', 'totals_pageviews',
'mean_hits_per_day', 'sum_hits_per_day', 'min_hits_per_day', 'max_hits_per_day', 'var_hits_per_day',
'mean_pageviews_per_day', 'sum_pageviews_per_day', 'min_pageviews_per_day', 'max_pageviews_per_day',
'sum_pageviews_per_network_domain', 'count_pageviews_per_network_domain', 'mean_pageviews_per_network_domain',
'sum_hits_per_network_domain', 'count_hits_per_network_domain', 'mean_hits_per_network_domain',
'medium_hits_mean','medium_hits_min','medium_hits_max','medium_hits_sum']
not_used_cols = ["visitNumber", "date", "fullVisitorId", "sessionId",
"visitId", "visitStartTime", 'totals_transactionRevenue', 'trafficSource_referralPath']
cat_cols = [col for col in train_df.columns if col not in num_cols and col not in not_used_cols]
merged_df = pd.concat([train_df, test_df])
print('Cat columns : ', len(cat_cols))
ohe_cols = []
for i in cat_cols:
if len(set(merged_df[i].values)) < 100:
ohe_cols.append(i)
print('ohe_cols : ', ohe_cols)
print(len(ohe_cols))
merged_df = pd.get_dummies(merged_df, columns = ohe_cols)
train_df = merged_df[:trn_len]
test_df = merged_df[trn_len:]
del merged_df
gc.collect()
for col in cat_cols:
if col in ohe_cols:
continue
#print(col)
lbl = LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
print('FINAL train shape : ', train_df.shape, ' test shape : ', test_df.shape)
#print(train_df.columns)
train_df = train_df.sort_values('date')
X = train_df.drop(not_used_cols, axis=1)
y = train_df['totals_transactionRevenue']
X_test = test_df.drop([col for col in not_used_cols if col in test_df.columns], axis=1)
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn import model_selection, preprocessing, metrics
import matplotlib.pyplot as plt
import seaborn as sns
lgb_params1 = {"objective" : "regression", "metric" : "rmse",
"max_depth": 8, "min_child_samples": 20,
"reg_alpha": 1, "reg_lambda": 1,
"num_leaves" : 257, "learning_rate" : 0.01,
"subsample" : 0.8, "colsample_bytree" : 0.8,
"verbosity": -1}
```
# Stratified sampling
- Before stratified samling, we need to pseudo-label for continous target.
- In this case, I categorize the continous target into 12 class using range of 2.
```
# def categorize_target(x):
# if x < 2:
# return 0
# elif x < 4:
# return 1
# elif x < 6:
# return 2
# elif x < 8:
# return 3
# elif x < 10:
# return 4
# elif x < 12:
# return 5
# elif x < 14:
# return 6
# elif x < 16:
# return 7
# elif x < 18:
# return 8
# elif x < 20:
# return 9
# elif x < 22:
# return 10
# else:
# return 11
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
```
## Target, prediction process
- 1st log1p to target
- 2nd exmp1 predictions
- 3rd sum predictions
- 4th log1p to sum
```
# y_categorized = y.apply(categorize_target)
y_log = np.log1p(y)
y_categorized= pd.cut(y_log, bins=range(0,25,3), include_lowest=True,right=False, labels=range(0,24,3)) # Thanks to Vitaly Portnoy
FOLDs = StratifiedKFold(n_splits=5, shuffle=True, random_state=7)
oof_lgb = np.zeros(len(train_df))
predictions_lgb = np.zeros(len(test_df))
features_lgb = list(X.columns)
feature_importance_df_lgb = pd.DataFrame()
for fold_, (trn_idx, val_idx) in enumerate(FOLDs.split(X, y_categorized)):
trn_data = lgb.Dataset(X.iloc[trn_idx], label=y_log.iloc[trn_idx])
val_data = lgb.Dataset(X.iloc[val_idx], label=y_log.iloc[val_idx])
print("LGB " + str(fold_) + "-" * 50)
num_round = 20000
clf = lgb.train(lgb_params1, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=1000, early_stopping_rounds = 100)
oof_lgb[val_idx] = clf.predict(X.iloc[val_idx], num_iteration=clf.best_iteration)
fold_importance_df_lgb = pd.DataFrame()
fold_importance_df_lgb["feature"] = features_lgb
fold_importance_df_lgb["importance"] = clf.feature_importance()
fold_importance_df_lgb["fold"] = fold_ + 1
feature_importance_df_lgb = pd.concat([feature_importance_df_lgb, fold_importance_df_lgb], axis=0)
predictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / FOLDs.n_splits
#lgb.plot_importance(clf, max_num_features=30)
cols = feature_importance_df_lgb[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:50].index
best_features_lgb = feature_importance_df_lgb.loc[feature_importance_df_lgb.feature.isin(cols)]
plt.figure(figsize=(14,10))
sns.barplot(x="importance", y="feature", data=best_features_lgb.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances.png')
x = []
for i in oof_lgb:
if i < 0:
x.append(0.0)
else:
x.append(i)
cv_lgb = mean_squared_error(x, y_log)**0.5
cv_lgb = str(cv_lgb)
cv_lgb = cv_lgb[:10]
pd.DataFrame({'preds': x}).to_csv('lgb_oof_' + cv_lgb + '.csv', index = False)
print("CV_LGB : ", cv_lgb)
sub_df = test_df[['fullVisitorId']].copy()
predictions_lgb[predictions_lgb<0] = 0
sub_df["PredictedLogRevenue"] = np.expm1(predictions_lgb)
sub_df = sub_df.groupby("fullVisitorId")["PredictedLogRevenue"].sum().reset_index()
sub_df.columns = ["fullVisitorId", "PredictedLogRevenue"]
sub_df["PredictedLogRevenue"] = np.log1p(sub_df["PredictedLogRevenue"])
sub_df.to_csv("submission.csv", index=False)
```
* - My result is LB : 1.4627
# Conclusion
- The improvement seems to be small, but you know that the small result can change the medal winner.
- This strategy would be improved using more category, etc.
- How about using it?
| github_jupyter |
# FIDO - the unified downloader tool for SunPy
### NOTE: Internet access is required in order to use this tutorial
FIDO is a new feature as part of the 0.8 SunPy release. It provides a unified interface to search for and download data from multiple sources and clients. For example, it can be used to search for images via the VSO, or timeseries data via an instrument-specific client, or both simultaneously.
In this tutorial, we will show some examples of how FIDO may be used to both search for and download data of various types.
### Importing modules - getting started
Python is modular, so first we have to import some modules we will need.
```
import sunpy # import sunpy itself
from sunpy.net import Fido #import the Fido module from sunpy
from sunpy.net import attrs as a #these are the attributes that are used to construct searches with the FIDO client
import matplotlib.pyplot as plt # for plotting
import astropy.units as u # Much of SunPy uses astropy units and quantities
```
### Prelude - a quick note on AstroPy Quantities
Some Fido search queries require the use of AstroPy quantities. We are not going to cover these in detail here, but a brief explanation is needed.
In short, an Astropy Quantity is just a value attached so some units or dimensions. In the first cell, you will see that we already imported astropy.units and gave it the name u.
#### Simple quantity example
Here is a very quick example showing how quantities can be used. Here we construct a velocity in km/s and a distance in km. When we calculate the time to travel the distance at the given velocity, the result is unit aware. More details of Units and Quantities can be found in other notebooks.
```
velocity = 100*u.km/u.s
distance = 150000*u.km
time_to_travel = distance/velocity
print(time_to_travel)
print(time_to_travel.unit)
```
### Example 1 - A simple search for AIA data
First, let's construct a simple search for AIA images using the Fido client. To do this, we have to construct a search query using valid *attributes*. Above, we imported the attributes module and gave it the short name *a*.
Let's search for all AIA files between 06:20 and 06:30 on 2011 June 7, during which there was an M-class solar flare.
```
example1_search = Fido.search(a.Time('2011-06-07 06:20','2011-06-07 06:30'), a.Instrument('AIA'))
print(example1_search)
```
From this summary of the search we can understand a few things. First, the search returned 402 files. We can see the properties of these files, such as the wavelength and time interval. Secondly, we can see that these entries were sourced from the Virtual Solar Observatory (VSO) client.
Let's refine this search to return only files with the 171A filter.
```
example1_search = Fido.search(a.Time('2011-06-07 06:20','2011-06-07 06:30'),
a.Instrument('AIA'), a.Wavelength(171*u.angstrom))
print(example1_search)
```
Now we can see that only 51 results were returned, all for files with the 171A filter. Notice also that we had to specify the wavelength using the astropy unit u.angstroms). Many functions in SunPy use these Units, which remove ambiguity in functions. *HINT*: see what happens if you try to carry out the search using just '171' as the wavelength.
#### Example 1 - downloading the result
The Fido module allows us to easily download the search results using the Fido.fetch function.
Let's download just the first file in our search results:
```
Fido.fetch(example1_search[:,0], path='.')
```
Now check that the file was correctly downloaded
```
ls *.fits
```
### Example 2 - querying multiple instruments simultaneously
We often want to retrieve data from different instruments of missions simultaneously, for example we may want SDO/AIA and STEREO/EUV images together. FIDO allows us to easily construct such searches.
Let's search for AIA and STEREO/EUVI data for the same time interval as before, between 06:20 - 06:30 on 2011 June 7.
```
example2_search = Fido.search(a.Time('2011-06-07 06:20','2011-06-07 06:30'),
a.Instrument('AIA') | a.Instrument('EUVI'))
```
Using the '|' symbol we can construct queries with multiple options for a given attribute, in this case 'Instrument'.
```
print(example2_search)
```
Looking at these results, we can see that we have search result in two blocks: the first block contains all the SDO/AIA search results, and the second block contains the STEREO/EUVI search results. As before, there were 402 AIA files found, and 22 STEREO/EUVI files.
These blocks can be indexed and retrieved separately, for example:
```
print(example2_search[1])
```
...returns only the STEREO search results block.
As before, we can download these files using the Fido.fetch command. Let's download just the first result from the STEREO results block.
```
Fido.fetch(example2_search[1,0],path='.')
ls
```
### Example 3 - querying multiple clients for different data types simultaneously
In both examples above, Fido returned results exclusively from the VSO client. Crucially though, Fido supports the search for data files from multiple different clients simulaneously.
As an example, let's construct a query where we want to obtain the AIA data, STEREO/EUVI data, and the GOES/XRS time series data from the 2011 June 7 event simultaneously.
```
example3_search = Fido.search(a.Time('2011-06-07 06:20','2011-06-07 06:30'),
a.Instrument('AIA') | a.Instrument('EUVI') | a.Instrument('GOES'))
print(example3_search)
```
We can see that this time, we have three blocks of responses. The third block contains results from another client, the GOESClient, which is for GOES/XRS time series data. FIDO automatically communicates with all of the different SunPy clients as required to find the requested data.
```
example3_search[2]
```
Once again, we can download just the GOES/XRS timeseries data using Fido.fetch.
```
Fido.fetch(example3_search[2,:],path='.')
ls
```
### Summary
By the end of this notebook you should have a basic understanding of how to use FIDO, the unified downloader client in SunPy. Here are some key things to remember:
- The function Fido.search() allows you to search online for data files from a large suite of solar missions.
- The Fido.fetch() command allows you to download the reults of a search.
- Searches are constructed using *attributes*. Sometimes these attributes need to be in the form of an *AstroPy Quantity*.
- Complex queries can be constructed via combinations of attributes - multiple options for an attribute may be used simultaneously.
- Fido can search for different data types from different sources simultaneously, e.g. timeseries, images and spectra.
| github_jupyter |
## Day Agenda
- Reading the diffent format of data sets
- statistical information about data sets
- Concatination of data frames
- grouping of dataframes
- merging data frames
- filtering data from data frames
-
## Reading the diffent format of data sets
- csv(comma separated values)
- json
- xlsx
- tsv(tab separated values)
```
import pandas as pd
dir(pd)
# Loading the dataset from browser
df=pd.read_json("https://raw.githubusercontent.com/AP-State-Skill-Development-Corporation/Data-Science-Using-Python-Internship-EB1/main/Notebooks/Day23-Pandas%20continue/cafes.json")
df
pd.read_json("cafes.json")# loading dataset from local system
# convert json format into csv data set
df.to_csv("cafes.csv")
df1=pd.read_csv("https://raw.githubusercontent.com/AP-State-Skill-Development-Corporation/Data-Science-Using-Python-Internship-EB1/main/Notebooks/Day23-Pandas%20continue/chipotle.tsv.txt",
sep='\t')
df1.head()
df2=pd.read_csv("https://raw.githubusercontent.com/AP-State-Skill-Development-Corporation/Data-Science-Using-Python-Internship-EB1/main/Notebooks/Day23-Pandas%20continue/weather.csv")
df2.head()
```
## statistical information about data sets
- describe()
- info()
- corr()
- varies from +1(highely corrletaed) to -1(not correlated)
```
df1.head()
df1.describe()
df1.info()
df1.corr()
```
## Concatination of data frames
- pandas.concat(df1,df2)
```
d1=pd.DataFrame({'clg':['spmvv','Gist','aec','Alts'],
'temp':[35,47,68,33],
'hum':[45,23,78,89]})
d1
d2=pd.DataFrame({'clg':['vvit','kits','nec','Anu'],
'temp':[36,50,60,30],
'hum':[47,77,88,90]})
d2
import pandas as pd
d3=pd.concat((d1,d2))
d3
import pandas as pd
d3=pd.concat((d1,d2),ignore_index=True)
d3
import pandas as pd
d3=pd.concat((d1,d2),keys=['k1','k2'])
d3
```
## Grouping of dataframes
- df.groupby
```
help(df1.groupby)
d1
g1=d1.groupby(by='temp')# spliting a specific object
g1
d1=g1.max()# applying a function to groupby object and dispay a result
d1
d1
g2=d1.groupby(by='clg')
d1=g2
d1
```
## merging data frames
df.merge()
```
a=pd.DataFrame({'x':[11,22,33,44],'y':[33,44,35,16]})
b=pd.DataFrame({'x':[11,21,33,44],'y':[3,4,5,6]})
a
b
df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5]})
df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]})
df1
df2
df3=pd.merge(df1,df2,indicator=True)
df3
df3=pd.merge(df1,df2,how='right',indicator=True)
df3
m1=pd.DataFrame({"city":['x','y','z'],
'temp':[11,22,33]})
m1
m2=pd.DataFrame({"city":['x','y','a'],
'hum':[44,55,66]})
m2
m3=pd.merge(m1,m2)
m3
m3=pd.merge(m1,m2,on='city',how='left',indicator=True)
m3
```
| github_jupyter |
# Potentiostats and Galvanostats
## References
---
Adams, Scott D., et al. "MiniStat: Development and evaluation of a mini-potentiostat for electrochemical measurements." Ieee Access 7 (2019): 31903-31912. https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8657694
---
Ainla, Alar, et al. "Open-source potentiostat for wireless electrochemical detection with smartphones." Analytical chemistry 90.10 (2018): 6240-6246. https://gmwgroup.harvard.edu/files/gmwgroup/files/1308.pdf
---
Bianchi, Valentina, et al. "A Wi-Fi cloud-based portable potentiostat for electrochemical biosensors." IEEE Transactions on Instrumentation and Measurement 69.6 (2019): 3232-3240.
---
Dobbelaere, Thomas, Philippe M. Vereecken, and Christophe Detavernier. "A USB-controlled potentiostat/galvanostat for thin-film battery characterization." HardwareX 2 (2017): 34-49. https://doi.org/10.1016/j.ohx.2017.08.001
---
Hoilett, Orlando S., et al. "KickStat: A coin-sized potentiostat for high-resolution electrochemical analysis." Sensors 20.8 (2020): 2407. https://www.mdpi.com/1424-8220/20/8/2407/htm
---
Irving, P., R. Cecil, and M. Z. Yates. "MYSTAT: A compact potentiostat/galvanostat for general electrochemistry measurements." HardwareX 9 (2021): e00163. https://www.sciencedirect.com/science/article/pii/S2468067220300729
> 2, 3, and 4 wire cell configurations with +/- 12 volts at 200ma.
---
Lopin, Prattana, and Kyle V. Lopin. "PSoC-Stat: A single chip open source potentiostat based on a Programmable System on a Chip." PloS one 13.7 (2018): e0201353. https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0201353
---
Matsubara, Yasuo. "A Small yet Complete Framework for a Potentiostat, Galvanostat, and Electrochemical Impedance Spectrometer." (2021): 3362-3370. https://pubs.acs.org/doi/full/10.1021/acs.jchemed.1c00228
> Elegant 2 omp amp current source for a galvanostat.
---
## Application to Electrical Impedence Spectroscopy
---
Wang, Shangshang, et al. "Electrochemical impedance spectroscopy." Nature Reviews Methods Primers 1.1 (2021): 1-21. https://www.nature.com/articles/s43586-021-00039-w.pdf
> Tutorial presentation of EIS, including instrumentation and data analysis.
---
Magar, Hend S., Rabeay YA Hassan, and Ashok Mulchandani. "Electrochemical Impedance Spectroscopy (EIS): Principles, Construction, and Biosensing Applications." Sensors 21.19 (2021): 6578. https://www.mdpi.com/1424-8220/21/19/6578/pdf
> Tutorial introduction with descriptions of application to solutions and reactions at surfaces.
---
Instruments, Gamry. "Basics of electrochemical impedance spectroscopy." G. Instruments, Complex impedance in Corrosion (2007): 1-30. https://www.c3-analysentechnik.eu/downloads/applikationsberichte/gamry/5657-Application-Note-EIS.pdf
> Tutorial introduction to EIS with extensive modeling discussion.
---
| github_jupyter |
# <font color="blue">Failure Cases in Tesseract OCR</font>
In this notebook, we will see some instances where Tesseract does not work as expected and provide logical reasons for them. We will see some examples from scanned documents as well as natural camera images.
We will discuss how to improve the OCR output in the next notebook.
# <font color="blue">Install Tesseract</font>
```
!apt install libtesseract-dev tesseract-ocr > /dev/null
!pip install pytesseract > /dev/null
```
# <font color="blue">Import Libraries </font>
```
import pytesseract
import cv2
import glob
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import Image
```
# <font color="blue">Failure Cases Categories </font>
Even though failure cannot be always attributed to one specific reasons, We have listed a few major reasons for failure of OCR using Tesseract and in general. They are:
1. **Cluttered Background** - The text might not be visibly clear or it might appear camouflaged with the background.
1. **Small Text** - The text might be too small to be detected.
1. **Rotation or Perspective Distortion** - The text might be rotated in the image or the image itself might be distorted.
# <font color="blue">Test Image 1: Image of a Book</font>
```
!wget https://www.dropbox.com/s/uwrdek4jjac4ysz/book2.jpg?dl=1 -O book2.jpg --quiet
```
### <font color="green">Downloaded Image</font>
<img src="https://www.dropbox.com/s/uwrdek4jjac4ysz/book2.jpg?dl=1" width=500>
### <font color="green">Output</font>
```
book2_text = pytesseract.image_to_string('book2.jpg')
print(book2_text)
```
### <font color="green">Observation </font>
We saw this example in the last notebook. The major reason is the relatively cluttered background and low contrast of the white text on orange background. Another issue with these kind of images is the variability of text size. **"Black"** is written in extra-large size whereas **"The impact of"** is normal.
# <font color="blue">Test Image 2: Driving License </font>
```
!wget https://www.dropbox.com/s/rdaha84n8jo3bmw/dl.jpg?dl=0 -O dl.jpg --quiet
```
### <font color="green">Downloaded Image</font>

### <font color="green">Output</font>
```
dl_text = pytesseract.image_to_string('dl.jpg')
print(dl_text)
Image("dl.jpg")
```
### <font color="green">Observation </font>
- It is unable to detect small text( Date of Expiry, Address, etc.)
- Same issue with cluttered background ( New York on Top Left )
# <font color="blue">Test Image 3: License Plate</font>
```
!wget "https://www.dropbox.com/s/xz24vxrp4uvvnri/license_plate.jpg?dl=0" -O lp1.jpg --quiet
```
### <font color="green">Downloaded Image</font>
<img src="https://www.dropbox.com/s/xz24vxrp4uvvnri/license_plate.jpg?dl=1" width=500>
### <font color="green">Output</font>
```
lp_text = pytesseract.image_to_string('lp1.jpg')
print(lp_text)
```
### <font color="green">Observation </font>
Even though the text is very clear to us, Tesseract finds it difficult to recognize. The major issue is with contrast.
# <font color="blue">Street Signs</font>
Street signs are one of the most difficult ones to recognize. Let us see some examples.
# <font color="blue">Test Image 4</font>
```
!wget https://www.dropbox.com/s/uwlnxiihqgni57o/streetsign1.jpg?dl=0 -O streetsign1.jpg --quiet
```
### <font color="green">Downloaded Image</font>
<img src="https://www.dropbox.com/s/uwlnxiihqgni57o/streetsign1.jpg?dl=1" width=500>
### <font color="green">Output</font>
```
ss1_text = pytesseract.image_to_string('streetsign1.jpg')
print(ss1_text)
```
# <font color="blue">Test Image 5</font>
```
!wget https://www.dropbox.com/s/dbkag5gsicxqoqg/streetsign2.jpg?dl=0 -O streetsign2.jpg --quiet
```
### <font color="green">Downloaded Image</font>
<img src="https://www.dropbox.com/s/dbkag5gsicxqoqg/streetsign2.jpg?dl=1" width=500>
### <font color="green">Output</font>
```
ss2_text = pytesseract.image_to_string('streetsign2.jpg')
print(ss2_text)
```
# <font color="blue">Test Image 6</font>
```
!wget https://www.dropbox.com/s/cgni28zl1k9sesk/streetsign3.jpg?dl=0 -O streetsign3.jpg --quiet
```
### <font color="green">Downloaded Image</font>
<img src="https://www.dropbox.com/s/cgni28zl1k9sesk/streetsign3.jpg?dl=1" width=500>
### <font color="green">Output</font>
```
ss3_text = pytesseract.image_to_string('streetsign3.jpg')
print(ss3_text)
```
### <font color="green">Observation</font>
In all the above 3 images, there are multiple issues:
- text is not aligned. It is either rotated within the plane or into the plane.
- Large variation in Text size
- Background clutter
Next, We will discuss how to overcome some of the limitations of Tesseract by using different techniques.
| github_jupyter |
**Note that the name of the callback `AccumulateStepper` has been changed into `AccumulateScheduler`**
https://forums.fast.ai/t/accumulating-gradients/33219/90?u=hwasiti
https://github.com/fastai/fastai/blob/fbbc6f91e8e8e91ba0e3cc98ac148f6b26b9e041/fastai/train.py#L99-L134
```
import fastai
from fastai.vision import *
gpu_device = 0
defaults.device = torch.device(f'cuda:{gpu_device}')
torch.cuda.set_device(gpu_device)
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(42)
path = untar_data(URLs.PETS)
path_anno = path/'annotations'
path_img = path/'images'
fnames = get_image_files(path_img)
pat = re.compile(r'/([^/]+)_\d+.jpg$')
# Simplified RunningBatchNorm
# 07_batchnorm.ipynb (fastai course v3 part2 2019)
class RunningBatchNorm2d(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
# I have added self.nf so that it can be represented when
# printing the model in the extra_repr method below
self.nf = nf
self.mom, self.eps = mom, eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('count', tensor(0.))
self.register_buffer('factor', tensor(0.))
self.register_buffer('offset', tensor(0.))
self.batch = 0
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2,3)
s = x .sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = s.new_tensor(x.numel()/nc)
mom1 = s.new_tensor(1 - (1-self.mom)/math.sqrt(bs-1))
self.sums .lerp_(s , mom1)
self.sqrs .lerp_(ss, mom1)
self.count.lerp_(c , mom1)
self.batch += bs
means = self.sums/self.count
varns = (self.sqrs/self.count).sub_(means*means)
if bool(self.batch < 20): varns.clamp_min_(0.01)
self.factor = self.mults / (varns+self.eps).sqrt()
self.offset = self.adds - means*self.factor
def forward(self, x):
if self.training: self.update_stats(x)
return x*self.factor + self.offset
def extra_repr(self):
return '{nf}, mom={mom}, eps={eps}'.format(**self.__dict__)
class RunningBatchNorm1d(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
# I have added self.nf so that it can be represented when
# printing the model in the extra_repr method below
self.nf = nf
self.mom, self.eps = mom, eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('count', tensor(0.))
self.register_buffer('factor', tensor(0.))
self.register_buffer('offset', tensor(0.))
self.batch = 0
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2)
s = x .sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = s.new_tensor(x.numel()/nc)
mom1 = s.new_tensor(1 - (1-self.mom)/math.sqrt(bs-1))
self.sums .lerp_(s , mom1)
self.sqrs .lerp_(ss, mom1)
self.count.lerp_(c , mom1)
self.batch += bs
means = self.sums/self.count
varns = (self.sqrs/self.count).sub_(means*means)
if bool(self.batch < 20): varns.clamp_min_(0.01)
self.factor = self.mults / (varns+self.eps).sqrt()
self.offset = self.adds - means*self.factor
def forward(self, x):
if self.training: self.update_stats(x)
return x*self.factor + self.offset
def extra_repr(self):
return '{nf}, mom={mom}, eps={eps}'.format(**self.__dict__)
```
### No Grad Acc (BS 64), No running BN
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=64
).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
learn.fit(1)
data.batch_size
```
### No Grad Acc (BS 2), No running BN
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
learn.fit(1)
```
### Naive Grad Acc (BS 2) x 32 steps, No running BN
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=accuracy,
callback_fns=[partial(AccumulateScheduler, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.fit(1)
```
### No Grad Acc (BS 2), Running BN
```
def bn2rbn(bn):
if isinstance(bn, nn.BatchNorm1d): rbn = RunningBatchNorm1d(bn.num_features, eps=bn.eps, mom=bn.momentum)
elif isinstance(bn, nn.BatchNorm2d): rbn = RunningBatchNorm2d(bn.num_features, eps=bn.eps, mom=bn.momentum)
rbn.weight = bn.weight
rbn.bias = bn.bias
return (rbn).to(bn.weight.device)
def convert_bn(list_mods, func=bn2rbn):
for i in range(len(list_mods)):
if isinstance(list_mods[i], bn_types):
list_mods[i] = func(list_mods[i])
elif list_mods[i].__class__.__name__ in ("Sequential", "BasicBlock"):
list_mods[i] = nn.Sequential(*convert_bn(list(list_mods[i].children()), func))
return list_mods
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
# learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.model
learn.summary()
learn.model = nn.Sequential(*convert_bn(list(learn.model.children()), bn2rbn))
learn.model
learn.summary()
%debug
learn.fit(1)
```
### GroupNorm
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy,
callback_fns=[partial(AccumulateScheduler, n_step=32)])
# learn.loss_func = CrossEntropyFlat(reduction='sum')
groups = 64
def bn2group(bn):
groupnorm = nn.GroupNorm(groups, bn.num_features, affine=True)
groupnorm.weight = bn.weight
groupnorm.bias = bn.bias
groupnorm.eps = bn.eps
return (groupnorm).to(bn.weight.device)
def convert_bn(list_mods, func=bn2group):
for i in range(len(list_mods)):
if isinstance(list_mods[i], bn_types):
list_mods[i] = func(list_mods[i])
elif list_mods[i].__class__.__name__ in ("Sequential", "BasicBlock"):
list_mods[i] = nn.Sequential(*convert_bn(list(list_mods[i].children()), func))
return list_mods
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.model = nn.Sequential(*convert_bn(list(learn.model.children()), bn2group))
learn.freeze()
learn.fit(1)
```
### Resnet + GroupNorm
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
def change_all_BN(module):
for i in range(5):
atr = 'bn'+str(i)
if hasattr(module, atr):
setattr(module, atr, bn2group(getattr(module,atr)))
def wrap_BN(model):
for i in range(len(model)):
for j in range(len(model[i])):
if isinstance(model[i][j], bn_types):
model[i][j] = bn2group(model[i][j])
elif model[i][j].__class__.__name__ == "Sequential":
for k in range(len(model[i][j])):
if isinstance(model[i][j][k], bn_types):
model[i][j][k] = bn2group(model[i][j][k])
elif model[i][j][k].__class__.__name__ == "BasicBlock":
change_all_BN(model[i][j][k])
if hasattr(model[i][j][k],'downsample'):
if model[i][j][k].downsample is not None:
for l in range(len(model[i][j][k].downsample)):
if isinstance(model[i][j][k].downsample[l], bn_types):
model[i][j][k].downsample[l] = bn2group(model[i][j][k].downsample[l])
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
```
### Resnet + GroupNorm (No Acc)
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
```
### Resnet + GroupNorm (No Acc) bs = 1
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=1
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
```
| github_jupyter |
# Introduction
## Why the FLUX pipeline?
The aim of the FLUX pipeline is to provide a standard implemented as a tutorial for using some of most common toolboxes to analyze full MEG datasets. The pipeline will be used for education and as well as provide a common framework for MEG data analysis.
This section will focus on [MNE-Python](https://mne.tools/stable/index.html). The core arguments for using MNE-Python are:
- The MNE-Python toolbox is supported by an active and dynamic group of developers ensuring that the latest analysis tools are available.
- The toolbox and pipeline are Open Source and henceforth compatible with Open Science approaches.
- The toolbox is based on Python which is free (as opposed to Matlab, for instance).
- Python is becoming the preferred programming language for novel data analyses approaches including a wealth of machine learning tools. Python modules are under constant development by a huge open sources community ensuring the availability of state-of-the-art data science tools.
## Target users
The target users are researchers new to MNE-Python as well as more advanced users seeking to standardize their analyses. The FLUX pipeline is particular geared towards cognitive neuroscientist with an interest in task-based paradigms. The developers of the pipeline are cognitve neuroscientists with a strong interest on brain oscillations and multivariate approaches.
## Using the pipeline in self-studies and education
The MNE Python toolbox has an excellent selection of [Tutorials](https://mne.tools/stable/auto_tutorials/index.html)
and [Examples](https://mne.tools/dev/auto_examples/index.html). Nevertheless, the options are so many that the learning curve for new users is very steep. The FLUX pipeline provides a set of procedures for what we consider best practice at this moment in time. Consistent with an Open Science approach, the FLUX pipeline provides a validated and documented approach for MEG data analysis. Each analysis step comes with explanations and illustrations. Furthermore, questions are embedded in the tutorials which will be useful for self-study or they can be used in educational settings.
We will not link back to sections on the MNE Python webpage as they will change over time; nevertheless, users are are strongly encouraged to consult the website as they develop their skills and insight.
To use the FLUX toolbox the following installations and downloads are required:
1. Install the Python environment; we recommend Anaconda3 which provide Jupyter notebooks and Spyder; see [anaconda.com](https://www.anaconda.com/)
2. Instal MNE Python: [instructions](https://mne.tools/dev/install/index.html). To make 3D rendering possible we recommend
> conda create --name=mne --channel=conda-forge mne
> conda install --name base nb_conda_kernels
3. Download the example datasets (see [FLUX website](https://neuosc.com/flux/))
4. Get the Jupyther notebooks from Github (see [FLUX website](https://neuosc.com/flux/); alternatively copy/paste the code from Github an execute using e.g. Spyder.
To create forward models based on T1 MRIs for source modeling [Freesurfer](https://surfer.nmr.mgh.harvard.edu/) is required.
## Preregistration and publication
Publication, example:
"The data were analyzed using the open source toolbox MNE Python v0.24.0 (Gramfort et al., 2013) following the standards defined in the FLUX Pipeline (Ferrrante et al., 2022)"
## References
Alexandre Gramfort, Martin Luessi, Eric Larson, Denis A. Engemann, Daniel Strohmeier, Christian Brodbeck, Roman Goj, Mainak Jas, Teon Brooks, Lauri Parkkonen, and Matti S. Hämäläinen. MEG and EEG data analysis with MNE-Python. Frontiers in Neuroscience, 7(267):1–13, 2013. doi:10.3389/fnins.2013.00267.
| github_jupyter |
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# Gmail - Schedule mailbox cleaning
<a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Gmail/Gmail_Schedule_mailbox_cleaning.ipynb" target="_parent"><img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg=="/></a>
**Tags:** #gmail #productivity
## Input
### Import libraries
```
import naas
from naas_drivers import email
import pandas as pd
import numpy as np
import plotly.express as px
```
### Account credentials
```
username = "naas.sanjay22@gmail.com"
password = "atsuwkylwfhucugw"
smtp_server = "imap.gmail.com"
box = "INBOX"
```
Note: You need to create an application password following this procedure - https://support.google.com/mail/answer/185833?hl=en
## Model
### Setting the scheduler
```
naas.scheduler.add(recurrence="0 9 * * *") # Scheduler set for 9 am
```
### Connect to email box
```
emails = naas_drivers.email.connect(username,
password,
username,
smtp_server)
```
### Get email list
```
dataframe = emails.get(criteria="seen")
dataframe
```
### Creating dataframe and inserting values
```
sender_name = []
sender_email = []
for df in dataframe["from"]:
sender_name.append(df['name'])
sender_email.append(df['email'])
result = pd.DataFrame(columns = ['SENDER_NAME','SENDER_EMAIL','COUNT','PERCENTAGE'])
name_unique = np.unique(sender_name)
email_unique = np.unique(sender_email)
total_email = len(emails.get(criteria="seen")) + len(emails.get(criteria="unseen"))
c = 0
for i in np.unique(sender_name):
new_row = {'SENDER_NAME':i,'SENDER_EMAIL':sender_email[c],'COUNT':sender_name.count(i),'PERCENTAGE':round(((sender_name.count(i))/total_email)*100)}
result = result.append(new_row, ignore_index=True)
c+=1
result
```
### Email graph plot
```
fig = px.bar(x=result['COUNT'], y=result['SENDER_NAME'], orientation='h')
fig.show()
```
## Output
### Deleting using email id
```
d_email = "notifications@naas.ai" # email id to be deleted
data_from = dataframe['from']
data_uid = dataframe['uid']
uid = []
```
### Updating the uid values
```
for i in range(len(dataframe)):
if data_from[i]['email'] == d_email:
uid.append(data_uid[i])
print(uid)
```
### Deleting the emails
```
for i in uid:
attachments = emails.set_flag(i, "DELETED", True)
```
### Showing the upated email list
```
dataframe = emails.get(criteria="seen")
dataframe
```
| github_jupyter |
# k-Nearest Neighbor (kNN) exercise
*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
The kNN classifier consists of two stages:
- During training, the classifier takes the training data and simply remembers it
- During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples
- The value of k is cross-validated
In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code.
```
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print X_train.shape, X_test.shape
from cs231n.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
```
We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps:
1. First we must compute the distances between all test examples and all train examples.
2. Given these distances, for each test example we find the k nearest examples and have them vote for the label
Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example.
First, open `cs231n/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.
```
# Open cs231n/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_two_loops(X_test)
print dists.shape
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
```
**Inline Question #1:** Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.)
- What in the data is the cause behind the distinctly bright rows?
- What causes the columns?
**Your Answer**: *fill this in.*
```
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
```
You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`:
```
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
```
You should expect to see a slightly better performance than with `k = 1`.
```
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Let's compare how fast the implementations are
def time_function(f, *args):
"""
Call a function f with args and return the time (in seconds) that it took to execute.
"""
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print 'Two loop version took %f seconds' % two_loop_time
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print 'One loop version took %f seconds' % one_loop_time
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print 'No loop version took %f seconds' % no_loop_time
# you should see significantly faster performance with the fully vectorized implementation
```
### Cross-validation
We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation.
```
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
X_train_folds = []
y_train_folds = []
################################################################################
# TODO: #
# Split up the training data into folds. After splitting, X_train_folds and #
# y_train_folds should each be lists of length num_folds, where #
# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #
# Hint: Look up the numpy array_split function. #
################################################################################
pass
# split self.X_train to 5 folds
avg_size = int(X_train.shape[0] / num_folds) # will abandon the rest if not divided evenly.
for i in range(num_folds):
X_train_folds.append(X_train[i * avg_size : (i+1) * avg_size])
y_train_folds.append(y_train[i * avg_size : (i+1) * avg_size])
################################################################################
# END OF YOUR CODE #
################################################################################
# A dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
k_to_accuracies = {}
################################################################################
# TODO: #
# Perform k-fold cross validation to find the best value of k. For each #
# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #
# where in each case you use all but one of the folds as training data and the #
# last fold as a validation set. Store the accuracies for all fold and all #
# values of k in the k_to_accuracies dictionary. #
################################################################################
pass
for k in k_choices:
accuracies = []
print k
for i in range(num_folds):
X_train_cv = np.vstack(X_train_folds[0:i] + X_train_folds[i+1:])
y_train_cv = np.hstack(y_train_folds[0:i] + y_train_folds[i+1:])
X_valid_cv = X_train_folds[i]
y_valid_cv = y_train_folds[i]
classifier.train(X_train_cv, y_train_cv)
dists = classifier.compute_distances_no_loops(X_valid_cv)
accuracy = float(np.sum(classifier.predict_labels(dists, k) == y_valid_cv)) / y_valid_cv.shape[0]
accuracies.append(accuracy)
k_to_accuracies[k] = accuracies
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print 'k = %d, accuracy = %f' % (k, accuracy)
# plot the raw observations
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 10
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
```
| github_jupyter |
<img src="./pictures/DroneApp_logo.png" style="float:right; max-width: 180px; display: inline" alt="INSA" /></a>
<img src="./pictures/logo_sizinglab.png" style="float:right; max-width: 100px; display: inline" alt="INSA" /></a>
# Application of First Monotonicity Principle to the optimization of MRAV
*Created by Aitor Ochotorena (INSA Toulouse), Toulouse, France.*
Based on the differentiability of continuous mathematical functions, in this Notebook we present a guide to **reduce the excess of constraints** in optimization problems.
The standard expression of an optimization problem has the following form:
<math>\begin{align}
&\underset{\mathbf{x}}{\operatorname{minimize}}& & f(\mathbf{x}) \\
&\operatorname{subject\ to}
& &g_i(\mathbf{x}) \leq 0, \quad i = 1, \dots, m \\
&&&h_i(\mathbf{x}) = 0, \quad i = 1, \dots, p,
\end{align}</math>
where $x \in \mathbb{R}^n$ is the optimization variable, the functions $f, g_1, \ldots, g_m$ are convex, and the functions $h_1, \ldots, h_p$ are equality constraints.
In this notation, the function $f$ is the objective function of the problem, and the functions $g_i$ and $h_i$are referred to as the constraint functions.
**Find out which set of inequality constraints can be turned to equality ones has an enormous importance to reduce the complexity of the problem and the calculation time.**
The applicacion of the 'First Monotonicity Principle',
> P. Y. Papalambros, D. J. Wilde, Principles of Optimal Design, Cambridge University Press, 2018.
permits to evaluate if the objective meets the minimum values w.r.t. the considered variable when the constraint acts an equality. We refer to such constraints as *active constraints* and they are identified by studying the monotonicity behaviour of both the objective and the constraint. In a well constrainted objective function, every (strictly) increasing (decreasing) variable is bounded below (above) by at least one active constraint. For a well constrained minimization problem, there exist at least one $x$ that satisfies the optimality conditions:
<math>\begin{align}
$\displaystyle (\frac{\partial{ f}}{\partial{ x_i}})_*+\sum_{j}\mu_j(\frac{\partial{ g_j}}{\partial{ x_i}})_*=0$
\end{align}</math>
where $\mu_j \geq 0$.
In the case in which the sign of a variable in the objective function is
uniquely opposite to the sign of the same variable for a single constraint, this
constraint can be turned to active and as equality. If the sign of the variable
is opposite to that of the objective function in several cosntraints, then no
evident statement can be given and in that case, the constraints will be left
as inequalities.

*Representative plot of an optimization problem with three constraints. Two of them: $g_1$ and $g_2$ are acting as active constraints, since they bound the function objective as equality.*
## 1. Import Sizing Code from a .py code
Here Python reads the sizing code with all equations used for the drone saved in the folder :
``` '.\SizingCode'```. Our file is called: ``` DroneSystem.py```.
Design variables are defined as symbols using the symbolic calculation of Sympy:
**(This part is specific for every sizing code)**
```
import sys
sys.path.insert(0, 'SizingCode')
from DroneSystem import *
```
## 2. Problem definition
Once the equations are imported from the .py file, we define here the main parameters for the optimization problem: objective ```Obj```, design variables ```Vars```, constraints ```Const``` and bounds ```bounds```. We work in forwards on with symbolic mathematics (SymPy):
**(This part is specific for every sizing code)**
- Objective:
```
Obj=Mtotal_final
```
- Design Variables:
```
Vars=[ Mtotal,ND,Tmot,Ktmot,P_esc,V_bat,C_bat,beta, J, D_ratio, Lbra,Dout]
```
- Constraints:
```
Const=[
-Tmot_max+Qpro_max ,
-Tmot_max+Qpro_cl,
-Tmot+Qpro_hover,
-V_bat+Umot_max,
-V_bat+Umot_cl,
-V_bat+Vesc,
-V_bat*Imax+Umot_max*Imot_max*Npro/0.95,
-V_bat*Imax+Umot_cl*Imot_cl*Npro/0.95,
-P_esc+P_esc_max,
-P_esc+P_esc_cl,
-J*n_pro_cl*Dpro+V_cl,
+J*n_pro_cl*Dpro-V_cl-0.05,
-NDmax+ND,
-NDmax+n_pro_cl*Dpro,
-Lbra+Dpro/2/(math.sin(pi/Narm)),
(-Sigma_max+Tpro_max*Lbra/(pi*(Dout**4-(D_ratio*Dout)**4)/(32*Dout)))
]
```
- Bounds:
```
bounds=[(0,100),#M_total
(0,105000/60*.0254),#ND
(0.01,10),#Tmot
(0,1),#Ktmot
(0,1500),#P_esc
(0,150),#V_bat
(0,20*3600),#C_bat
(0.3,0.6),#beta
(0,0.5),#J
(0,0.99),#D_ratio
(0.01,1),#Lb
(0.001,0.1),#Dout
(1,15),#Nred
]
```
## 3. Monotonicity algorithm
The next step is to evaluate the monotonicity of the functions. This will be done through the study of the differentiability of the functions. We will follow this procedure: A constraint is passed to ```is_increasing()``` or ```is_decreasing()```, which return a predicate of ```lambda x: x > 0``` or ```lambda x: x < 0``` respectively. This method calls ```compute_min_and_max``` which differentiates the constraint with respect to the desired variable, creates a series of random points defined within the bounds and substitute such values into the derivative of the constraint. If the predicate match the output, this method returns a True
To run the design of experiments satisfactorily, update pyDOE: `pip install --upgrade pyDOE`:
This algorithm is saved under the file `Monotonicity.ipynb` :
**(This part is reusable)**
```
# Note the python import here
import reuse, sys
# This is the Ipython hook
sys.meta_path.append(reuse.NotebookFinder())
import Monotonicity
```
## 3. Construction of table of Monotonicity
For each constraint and variable we will study the monotonicity behaviour calling the previous methods defined. If the constraint has an increasing behaviour, a ```+``` will be printed, in case where it is decreasing ```-```, and in the case where both increases and decreases, a ```?``` is displayed. The objective will be studied as well.
**(This part is reusable)**
```
import pandas as pd
M=[["" for x in Vars] for y in Const];
ObjVector=["" for x in Vars];
print('Monotonicity Analysis for the constraints w.r.t. the following variables:')
for Cnumber,C in enumerate(Const): #loop for constraints
print('Constant %d out of %d' %(Cnumber+1,len(Const)))
for Anumber,A in enumerate(Vars): #loop for variables
#print(C,A,bounds,Vars)
if Monotonicity.is_increasing(C,A,bounds,Vars)=='ZERO':
M[Cnumber][Anumber]=' '
elif Monotonicity.is_increasing(C,A,bounds,Vars):
print('* %s is increasing' %A)
M[Cnumber][Anumber]='+'
elif Monotonicity.is_decreasing(C,A,bounds,Vars):
print('* %s is decreasing' %A)
M[Cnumber][Anumber]='-'
else: M[Cnumber][Anumber]='?'
print('\n')
print('Monotonicity Analysis for the objective w.r.t. the following variables:')
for Anumber,A in enumerate(Vars):
if Monotonicity.is_increasing(Obj,A,bounds,Vars)=='ZERO':
ObjVector[Anumber]=' '
elif Monotonicity.is_increasing(Obj,A,bounds,Vars):
print('* %s is increasing' %A)
ObjVector[Anumber]='+'
elif Monotonicity.is_decreasing(Obj,A,bounds,Vars):
print('* %s is decreasing' %A)
ObjVector[Anumber]='-'
else: ObjVector[Anumber]='? '
M.append(ObjVector)
```
Create from np.array a DataFrame:
```
import pandas as pd
indexcol=[i for i in range(len(Const))]
indexcol.append('Objective')
pd.DataFrame(M, columns=Vars, index=indexcol)
```
Last point is the comparison of every constraint's variable to objective's oneActive constraints decision:
```
for index,objvalue in enumerate(ObjVector):
counter=0;
for j,constvalue in enumerate([i[index] for i in M]):
# print(objvalue);
# print(constvalue)
if objvalue=='+' and constvalue=='-':
counter+=1;
x=j #to save the constraint
elif objvalue=='-' and constvalue=='+':
counter+=1;
x=j; #to save the constraint
if counter==1:
print('Const %d w.r.t. %s can be eliminated'%(x,Vars[index]))
```
<a id='section_3'></a>
| github_jupyter |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="fig/cover-small.jpg">
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).*
*The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).*
<!--NAVIGATION-->
< [Defining and Using Functions](08-Defining-Functions.ipynb) | [Contents](Index.ipynb) | [Iterators](10-Iterators.ipynb) >
# Errors and Exceptions
No matter your skill as a programmer, you will eventually make a coding mistake.
Such mistakes come in three basic flavors:
- *Syntax errors:* Errors where the code is not valid Python (generally easy to fix)
- *Runtime errors:* Errors where syntactically valid code fails to execute, perhaps due to invalid user input (sometimes easy to fix)
- *Semantic errors:* Errors in logic: code executes without a problem, but the result is not what you expect (often very difficult to track-down and fix)
Here we're going to focus on how to deal cleanly with *runtime errors*.
As we'll see, Python handles runtime errors via its *exception handling* framework.
## Runtime Errors
If you've done any coding in Python, you've likely come across runtime errors.
They can happen in a lot of ways.
For example, if you try to reference an undefined variable:
```
print(Q)
```
Or if you try an operation that's not defined:
```
1 + 'abc'
```
Or you might be trying to compute a mathematically ill-defined result:
```
2 / 0
```
Or maybe you're trying to access a sequence element that doesn't exist:
```
L = [1, 2, 3]
L[1000]
```
Note that in each case, Python is kind enough to not simply indicate that an error happened, but to spit out a *meaningful* exception that includes information about what exactly went wrong, along with the exact line of code where the error happened.
Having access to meaningful errors like this is immensely useful when trying to trace the root of problems in your code.
## Catching Exceptions: ``try`` and ``except``
The main tool Python gives you for handling runtime exceptions is the ``try``...``except`` clause.
Its basic structure is this:
```
try:
print("this gets executed first")
except:
print("this gets executed only if there is an error")
```
Note that the second block here did not get executed: this is because the first block did not return an error.
Let's put a problematic statement in the ``try`` block and see what happens:
```
try:
print("let's try something:")
x = 1 / 0 # ZeroDivisionError
except:
print("something bad happened!")
```
Here we see that when the error was raised in the ``try`` statement (in this case, a ``ZeroDivisionError``), the error was caught, and the ``except`` statement was executed.
One way this is often used is to check user input within a function or another piece of code.
For example, we might wish to have a function that catches zero-division and returns some other value, perhaps a suitably large number like $10^{100}$:
```
def safe_divide(a, b):
try:
return a / b
except:
return 1E100
safe_divide(1, 2)
safe_divide(2, 0)
```
There is a subtle problem with this code, though: what happens when another type of exception comes up? For example, this is probably not what we intended:
```
safe_divide (1, '2')
```
Dividing an integer and a string raises a ``TypeError``, which our over-zealous code caught and assumed was a ``ZeroDivisionError``!
For this reason, it's nearly always a better idea to catch exceptions *explicitly*:
```
def safe_divide(a, b):
try:
return a / b
except ZeroDivisionError:
return 1E100
safe_divide(1, 0)
safe_divide(1, '2')
```
We're now catching zero-division errors only, and letting all other errors pass through un-modified.
## Raising Exceptions: ``raise``
We've seen how valuable it is to have informative exceptions when using parts of the Python language.
It's equally valuable to make use of informative exceptions within the code you write, so that users of your code (foremost yourself!) can figure out what caused their errors.
The way you raise your own exceptions is with the ``raise`` statement. For example:
```
raise RuntimeError("my error message")
```
As an example of where this might be useful, let's return to our ``fibonacci`` function that we defined previously:
```
def fibonacci(N):
L = []
a, b = 0, 1
while len(L) < N:
a, b = b, a + b
L.append(a)
return L
```
One potential problem here is that the input value could be negative.
This will not currently cause any error in our function, but we might want to let the user know that a negative ``N`` is not supported.
Errors stemming from invalid parameter values, by convention, lead to a ``ValueError`` being raised:
```
def fibonacci(N):
if N < 0:
raise ValueError("N must be non-negative")
L = []
a, b = 0, 1
while len(L) < N:
a, b = b, a + b
L.append(a)
return L
fibonacci(10)
fibonacci(-10)
```
Now the user knows exactly why the input is invalid, and could even use a ``try``...``except`` block to handle it!
```
N = -10
try:
print("trying this...")
print(fibonacci(N))
except ValueError:
print("Bad value: need to do something else")
```
### Exercise
1. Let's come back to one of our previous exercise contexts. Create a function out of one of them and raise an error in this function.
2. Call the above function for an input, which will raise an exception, catch the error that you raised in your function and redefine the reaction to it.
## Diving Deeper into Exceptions
Briefly, I want to mention here some other concepts you might run into.
I'll not go into detail on these concepts and how and why to use them, but instead simply show you the syntax so you can explore more on your own.
### Accessing the error message
Sometimes in a ``try``...``except`` statement, you would like to be able to work with the error message itself.
This can be done with the ``as`` keyword:
```
try:
x = 1 / 0
except ZeroDivisionError as err:
print("Error class is: ", type(err))
print("Error message is:", err)
```
With this pattern, you can further customize the exception handling of your function.
### Defining custom exceptions
In addition to built-in exceptions, it is possible to define custom exceptions through *class inheritance*.
For instance, if you want a special kind of ``ValueError``, you can do this:
```
class MySpecialError(ValueError):
pass
raise MySpecialError("here's the message")
```
This would allow you to use a ``try``...``except`` block that only catches this type of error:
```
try:
print("do something")
raise MySpecialError("[informative error message here]")
except MySpecialError:
print("do something else")
```
You might find this useful as you develop more customized code.
## ``try``...``except``...``else``...``finally``
In addition to ``try`` and ``except``, you can use the ``else`` and ``finally`` keywords to further tune your code's handling of exceptions.
The basic structure is this:
```
try:
print("try something here")
except:
print("this happens only if it fails")
else:
print("this happens only if it succeeds")
finally:
print("this happens no matter what")
```
The utility of ``else`` here is clear, but what's the point of ``finally``?
Well, the ``finally`` clause really is executed *no matter what*: I usually see it used to do some sort of cleanup after an operation completes.
<!--NAVIGATION-->
< [Defining and Using Functions](08-Defining-Functions.ipynb) | [Contents](Index.ipynb) | [Iterators](10-Iterators.ipynb) >
| github_jupyter |
# Automated Gradual Pruning Schedule
Michael Zhu and Suyog Gupta, ["To prune, or not to prune: exploring the efficacy of pruning for model compression"](https://arxiv.org/pdf/1710.01878), 2017 NIPS Workshop on Machine Learning of Phones and other Consumer Devices<br>
<br>
After completing sensitivity analysis, decide on your pruning schedule.
## Table of Contents
1. [Implementation of the gradual sparsity function](#Implementation-of-the-gradual-sparsity-function)
2. [Visualize pruning schedule](#Visualize-pruning-schedule)
3. [References](#References)
```
import numpy
import matplotlib.pyplot as plt
from functools import partial
import torch
from torch.autograd import Variable
from ipywidgets import widgets, interact
```
## Implementation of the gradual sparsity function
The function ```sparsity_target``` implements the gradual sparsity schedule from [[1]](#zhu-gupta):<br><br>
<b><i>"We introduce a new automated gradual pruning algorithm in which the sparsity is increased from an initial sparsity value $s_i$ (usually 0) to a final sparsity value $s_f$ over a span of $n$ pruning steps, starting at training step $t_0$ and with pruning frequency $\Delta t$."</i></b><br>
<br>
<div id="eq:zhu_gupta_schedule"></div>
<center>
$\large
\begin{align}
s_t = s_f + (s_i - s_f) \left(1- \frac{t-t_0}{n\Delta t}\right)^3
\end{align}
\ \ for
\large \ \ t \in \{t_0, t_0+\Delta t, ..., t_0+n\Delta t\}
$
</center>
<br>
Pruning happens once at the beginning of each epoch, until the duration of the pruning (the number of epochs to prune) is exceeded. After pruning ends, the training continues without pruning, but the pruned weights are kept at zero.
```
def sparsity_target(starting_epoch, ending_epoch, initial_sparsity, final_sparsity, current_epoch):
if final_sparsity < initial_sparsity:
return current_epoch
if current_epoch < starting_epoch:
return current_epoch
span = ending_epoch - starting_epoch
target_sparsity = ( final_sparsity +
(initial_sparsity - final_sparsity) *
(1.0 - ((current_epoch-starting_epoch)/span))**3)
return target_sparsity
```
## Visualize pruning schedule
When using the Automated Gradual Pruning (AGP) schedule, you may want to visualize how the pruning schedule will look as a function of the epoch number. This is called the *sparsity function*. The widget below will help you do this.<br>
There are three knobs you can use to change the schedule:
- ```duration```: this is the number of epochs over which to use the AGP schedule ($n\Delta t$).
- ```initial_sparsity```: $s_i$
- ```final_sparsity```: $s_f$
- ```frequency```: this is the pruning frequency ($\Delta t$).
```
def draw_pruning(duration, initial_sparsity, final_sparsity, frequency):
epochs = []
sparsity_levels = []
# The derivative of the sparsity (i.e. sparsity rate of change)
d_sparsity = []
if frequency=='':
frequency = 1
else:
frequency = int(frequency)
for epoch in range(0,40):
epochs.append(epoch)
current_epoch=Variable(torch.FloatTensor([epoch]), requires_grad=True)
if epoch<duration and epoch%frequency == 0:
sparsity = sparsity_target(
starting_epoch=0,
ending_epoch=duration,
initial_sparsity=initial_sparsity,
final_sparsity=final_sparsity,
current_epoch=current_epoch
)
sparsity_levels.append(sparsity)
sparsity.backward()
d_sparsity.append(current_epoch.grad.item())
current_epoch.grad.data.zero_()
else:
sparsity_levels.append(sparsity)
d_sparsity.append(0)
plt.plot(epochs, sparsity_levels, epochs, d_sparsity)
plt.ylabel('sparsity (%)')
plt.xlabel('epoch')
plt.title('Pruning Rate')
plt.ylim(0, 100)
plt.draw()
duration_widget = widgets.IntSlider(min=0, max=100, step=1, value=28)
si_widget = widgets.IntSlider(min=0, max=100, step=1, value=0)
interact(draw_pruning,
duration=duration_widget,
initial_sparsity=si_widget,
final_sparsity=(0,100,1),
frequency='2');
```
<div id="toc"></div>
## References
1. <div id="zhu-gupta"></div> **Michael Zhu and Suyog Gupta**.
[*To prune, or not to prune: exploring the efficacy of pruning for model compression*](https://arxiv.org/pdf/1710.01878),
NIPS Workshop on Machine Learning of Phones and other Consumer Devices,
2017.
| github_jupyter |
# Coursework 2: Neural Networks
This coursework covers the topics covered in class regarding neural networks for image classification.
This coursework includes both coding questions as well as written ones. Please upload the notebook, which contains your code, results and answers as a pdf file onto Cate.
Dependencies: If you work on a college computer in the Computing Lab, where Ubuntu 18.04 is installed by default, you can use the following virtual environment for your work, where relevant Python packages are already installed.
`source /vol/bitbucket/wbai/virt/computer_vision_ubuntu18.04/bin/activate`
Alternatively, you can use pip, pip3 or anaconda etc to install Python packages.
**Note 1:** please read the both the text and code comment in this notebook to get an idea what you are supposed to implement.
**Note 2:** If you are using the virtual environment in the Computing Lab, please run the following command in the command line before opening jupyter-notebook and importing tensorflow. This will tell tensorflow where the Nvidia CUDA libariries are.
`export LD_LIBRARY_PATH=/vol/cuda/9.0.176/lib64/:"${LD_LIBRARY_PATH}}"`
```
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
```
## Question 1 (20 points)
Throughout this coursework you will be working with the Fashion-MNIST dataset. If you are interested, you may find relevant information regarding the dataset in this paper.
[1] Fashion-MNIST: A novel image dataset for benchmarking machine learning algorithms. Han Xiao, Kashif Rasul, Roland Vollgraf. [arXiv:1708.07747](https://arxiv.org/abs/1708.07747)
Be sure that you have the following files in your working directory: data.tar.gz and reader.py. Loading the data can be done as follows:
`from reader import get_images
(x_train, y_train), (x_test, y_test) = get_images()`
The dataset is already split into a set of 60,000 training images and a set of 10,000 test images. The images are of size 28x28 pixels and stored as 784-D vector. So if you would like to visualise the images, you need to reshape the array.
There are in total 10 label classes, which are:
* 0: T-shirt/top
* 1: Trousers
* 2: Pullover
* 3: Dress
* 4: Coat
* 5: Sandal
* 6: Shirt
* 7: Sneaker
* 8: Bag
* 9: Ankle boot
### 1.1 Load data (6 points)
Load the dataset and print the dimensions of the training set and the test set.
```
from reader import get_images
(x_train, y_train), (x_test, y_test) = get_images()
print('dimensions of the training set:',x_train.shape,y_train.shape)
print('dimensions of the test set:',x_test.shape,y_test.shape)
```
### 1.2 Visualize data (6 points)
Visualise 3 training images (T-shirt, trousers and pullover) and 3 test images (dress, coat and sandal).
```
num=0
class_name=['T-shirt','Trousers','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankle B']
image=[np.reshape(x_train[1],(28,28))]*6
for index in range(np.random.randint(9000),10000):
if num<3 and y_train[index]==num:
image[num]=np.reshape(x_train[index],(28,28))
num+=1
if num>=3 and y_test[index]==num:
image[num]=np.reshape(x_test[index],(28,28))
num+=1
if num==6: break
plt.figure
for i in range(6):
plt.subplot(2,3,i+1)
plt.imshow(image[i],cmap='gray')
plt.title(class_name[i])
```
### 1.3 Data balance (4 points)
Print out the number of training samples for each class.
```
dict = {}
for class_ in y_train:
dict[class_] = dict.get(class_, 0) + 1
dictlist=sorted(dict.items(), key = lambda x: x[0], reverse=False)
for i in range(10):
print('Sample Number of No.',dictlist[i][0],' ',class_name[i],'=',dictlist[i][1],sep='')
```
### 1.4 Discussion (4 points)
Is the dataset balanced? What would happen if the dataset is not balanced in the context of image classification?
Well, we can know from the output above that the number of training samples for each class is equal, which is 6000. Traditional classification algorithm, which pay more attention on overall classification accuracy, focus too much on the majority of the class. In result, performance degradation of minority classification can not be inevitable.
## Question 2 (40 points)
Build a neural network and train it with the Fashion-MNIST dataset. Here, we use the keras library, which is a high-level neural network library built upon tensorflow.
```
# Convert the label class into a one-hot representation
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# normalization from 0-255 to 0-1
x_train=x_train.astype('float32')/255
x_test=x_test.astype('float32')/255
```
### 2.1 Build a multi-layer perceptron, also known as multi-layer fully connected network. You need to define the layers, the loss function, the optimiser and evaluation metric. (30 points)
```
model = keras.models.Sequential()
# as input layer in a sequential model:
model.add(Dense(512,activation='relu',input_shape=(784,)))
model.add(Dropout(0.25))
#as hidden layer in the model
model.add(Dense(144,activation='relu'))
model.add(Dropout(0.20))
#as output layer in model
model.add(Dense(num_classes,activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=["accuracy"])
print(model.summary())
```
### 2.2 Define the optimisation parameters including the batch size and the number of epochs and then run the optimiser. (10 points)
We have tested that for an appropriate network architecture, on a personal laptop and with only CPU, it takes about a few seconds per epoch to train the network. For 100 epochs, it takes about a coffee break's time to finish the training. If you run it on a powerful GPU, it would be even much faster.
```
batch_size = 32
epochs = 20
model.fit(x_train, y_train,epochs=epochs,batch_size=batch_size)
```
## Question 3 (20 points)
Evaluate the performance of your network with the test data.
Visualize the performance using appropriate metrics and graphs (eg. confusion matrix).
Comment on your per class performance and how it could be better.
```
# This function is provided for you to display the confusion matrix.
# For more information about the confusion matrix, you can read at
# https://en.wikipedia.org/wiki/Confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
cm: confusion matrix, default to be np.int32 data type
classes: a list of the class labels or class names
normalize: normalize the matrix so that each row amounts to one
cmap: color map
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
```
### 3.1 Evaluate the classification accuracy on the test set (10 points)
```
score = model.evaluate(x_test, y_test)
print('Test Loss','%.4f' %score[0])
print('Test Accuracy',score[1])
```
### 3.2 Calculate and plot the confusion matrix (10 points)
```
from sklearn.metrics import confusion_matrix
y_pred = model.predict(x_test)
y_pred = np.argmax(y_pred, axis=1)
y_test = np.argmax(y_test, axis=1)
# confustion matrix
cm=confusion_matrix(y_test, y_pred)
plot_confusion_matrix(cm,class_name)
```
## Question 4 (20 points)
Take two photos, one of your clothes or shoes that belongs to one of 10 classes, the other that does not belong to any class.
Use either Python or other software (Photoshop, Gimp, or any image editer) to convert the photos into grayscale, crop the region of interest and reshape into the size of 28x28.
### 4.1 Load and visualise your own images (6 points)
```
import matplotlib.image
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
image_name=["Queen's_Tower","T-shirt"]
image_reshape=[]
for i in range(len(image_name)):
img_colour = matplotlib.image.imread(image_name[i]+'.png')
img_grey = rgb2gray(img_colour)
plt.subplot(1,2,i+1)
plt.imshow(img_grey,cmap='gray')
plt.title(image_name[i])
image_reshape.append(np.reshape(img_grey,(1,784)))
```
### 4.2 Test your network on the two images and show the classification results (10 points)
```
for i in range(len(image_reshape)):
pred=model.predict(image_reshape[i])
# print(pred)
class_index=pred.argmax(axis=1)[0]
print('Prediction of',image_name[i]+':',class_name[class_index])
```
### 4.3 Discuss the classification results and provide one method to improve real life performance of the network (4 points)
Well, this classification algorithm identified T-shirt sucsessfully but class the Qieen's Tower as a bag, which is wrong withou suspense. According to the result of this test, we can say that
* This algorithm is good enough to class thoese 10 classes related to clothing and wearing.
* Cannot identify other unlabel classes under structure of muilt-layer connected network and the limited traning data.
There something we can do to bring it into reallife application
1. A large number of sample data and various label classes are needed to adapt to the reallife. Affine transformation can be used to increase the number of data.
2. Combined with neural networks to construct a more complex model is a good way to deal with more data with less parameters.
3. Adding regularisation term is Another method to improve the accuracy of classification
## 5. Survey
How long did the coursework take you to solve?
The whole afternoon of lovely Thursday
| github_jupyter |
```
!python ../input/jigsawsrc/inference.py \
--num_folds 10 \
--base_model ../input/deberta/deberta-large \
--base_model_name microsoft/deberta-large \
--weights_dir ../input/ranking-30-deberta-large \
--data_path ../input/jigsaw-toxic-severity-rating/comments_to_score.csv \
--save_path preds_30.csv
!python ../input/jigsawsrc/inference.py \
--num_folds 5 \
--base_model ../input/deberta/deberta-large \
--base_model_name microsoft/deberta-large \
--weights_dir ../input/ranking-52-deberta-large-e2 \
--data_path ../input/jigsaw-toxic-severity-rating/comments_to_score.csv \
--save_path preds_52.csv
!python ../input/jigsawsrc/inference.py \
--num_folds 5 \
--base_model ../input/rembert/rembert \
--base_model_name google/rembert \
--weights_dir ../input/ranking-58-rembert \
--data_path ../input/jigsaw-toxic-severity-rating/comments_to_score.csv \
--save_path preds_58.csv
!python ../input/jigsawsrc/inference.py \
--num_folds 5 \
--base_model ../input/deberta-v3/deberta-v3-base \
--base_model_name microsoft/deberta-v3-base \
--weights_dir ../input/ranking-61-deberta-v3-base-with-oof/ranking_61_deberta_v3_base \
--data_path ../input/jigsaw-toxic-severity-rating/comments_to_score.csv \
--save_path preds_61.csv
!python ../input/jigsawsrc/inference.py \
--num_folds 5 \
--base_model ../input/deberta-v3/deberta-v3-large \
--base_model_name microsoft/deberta-v3-large \
--weights_dir ../input/ranking-63-deberta-v3-large \
--data_path ../input/jigsaw-toxic-severity-rating/comments_to_score.csv \
--save_path preds_63.csv
!python ../input/jigsawsrc/inference.py \
--num_folds 5 \
--base_model ../input/k/amontgomerie/roberta/distilroberta-base \
--base_model_name distilroberta-base \
--weights_dir ../input/ranking-64-distilroberta-base-with-oof \
--data_path ../input/jigsaw-toxic-severity-rating/comments_to_score.csv \
--save_path preds_64.csv
import pandas as pd
weights = {
'weight_30': 0.4024099870709701,
'weight_52': 0.9011229226395379,
'weight_58': 0.755155406930562,
'weight_61': 0.27330107235558226,
'weight_63': 0.02807554189147518,
'weight_64': 0.029575587654601887,
}
model_preds = {
"preds_30": pd.read_csv("preds_30.csv")["score"],
"preds_52": pd.read_csv("preds_52.csv")["score"],
"preds_58": pd.read_csv("preds_58.csv")["score"],
"preds_61": pd.read_csv("preds_61.csv")["score"],
"preds_63": pd.read_csv("preds_63.csv")["score"],
"preds_64": pd.read_csv("preds_64.csv")["score"],
}
mean_preds = (
(model_preds["preds_30"] * weights["weight_30"])
+ (model_preds["preds_52"] * weights["weight_52"])
+ (model_preds["preds_58"] * weights["weight_58"])
+ (model_preds["preds_61"] * weights["weight_61"])
+ (model_preds["preds_63"] * weights["weight_63"])
+ (model_preds["preds_64"] * weights["weight_64"])
)
test_data = pd.read_csv("../input/jigsaw-toxic-severity-rating/comments_to_score.csv")
submission = pd.DataFrame({"comment_id": test_data.comment_id, "score": mean_preds})
submission["score"] = submission.score.rank()
submission.to_csv("./submission.csv", index=False)
submission
```
| github_jupyter |
## 1. Where are the old left-handed people?
<p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_479/img/Obama_signs_health_care-20100323.jpg" alt="Barack Obama signs the Patient Protection and Affordable Care Act at the White House, March 23, 2010"></p>
<p>Barack Obama is left-handed. So are Bill Gates and Oprah Winfrey; so were Babe Ruth and Marie Curie. A <a href="https://www.nejm.org/doi/full/10.1056/NEJM199104043241418">1991 study</a> reported that left-handed people die on average nine years earlier than right-handed people. Nine years! Could this really be true? </p>
<p>In this notebook, we will explore this phenomenon using age distribution data to see if we can reproduce a difference in average age at death purely from the changing rates of left-handedness over time, refuting the claim of early death for left-handers. This notebook uses <code>pandas</code> and Bayesian statistics to analyze the probability of being a certain age at death given that you are reported as left-handed or right-handed.</p>
<p>A National Geographic survey in 1986 resulted in over a million responses that included age, sex, and hand preference for throwing and writing. Researchers Avery Gilbert and Charles Wysocki analyzed this data and noticed that rates of left-handedness were around 13% for people younger than 40 but decreased with age to about 5% by the age of 80. They concluded based on analysis of a subgroup of people who throw left-handed but write right-handed that this age-dependence was primarily due to changing social acceptability of left-handedness. This means that the rates aren't a factor of <em>age</em> specifically but rather of the <em>year you were born</em>, and if the same study was done today, we should expect a shifted version of the same distribution as a function of age. Ultimately, we'll see what effect this changing rate has on the apparent mean age of death of left-handed people, but let's start by plotting the rates of left-handedness as a function of age.</p>
<p>This notebook uses two datasets: <a href="https://www.cdc.gov/nchs/data/statab/vs00199_table310.pdf">death distribution data</a> for the United States from the year 1999 (source website <a href="https://www.cdc.gov/nchs/nvss/mortality_tables.htm">here</a>) and rates of left-handedness digitized from a figure in this <a href="https://www.ncbi.nlm.nih.gov/pubmed/1528408">1992 paper by Gilbert and Wysocki</a>. </p>
```
# import libraries
# ... YOUR CODE FOR TASK 1 ...
import pandas as pd
import matplotlib.pyplot as plt
# load the data
data_url_1 = "https://gist.githubusercontent.com/mbonsma/8da0990b71ba9a09f7de395574e54df1/raw/aec88b30af87fad8d45da7e774223f91dad09e88/lh_data.csv"
lefthanded_data = pd.read_csv(data_url_1)
# plot male and female left-handedness rates vs. age
%matplotlib inline
fig, ax = plt.subplots() # create figure and axis objects
ax.plot("Age", "Female", data=lefthanded_data, marker = 'o') # plot "Female" vs. "Age"
ax.plot("Age", "Male", data=lefthanded_data, marker = 'x') # plot "Male" vs. "Age"
ax.legend() # add a legend
ax.set_xlabel("Sex")
ax.set_ylabel("Age")
```
## 2. Rates of left-handedness over time
<p>Let's convert this data into a plot of the rates of left-handedness as a function of the year of birth, and average over male and female to get a single rate for both sexes. </p>
<p>Since the study was done in 1986, the data after this conversion will be the percentage of people alive in 1986 who are left-handed as a function of the year they were born. </p>
```
# create a new column for birth year of each age
# ... YOUR CODE FOR TASK 2 ...
lefthanded_data["Birth_year"] = 1986 - lefthanded_data["Age"]
# create a new column for the average of male and female
# ... YOUR CODE FOR TASK 2 ...
lefthanded_data["Mean_lh"] = lefthanded_data[["Female","Male"]].mean(axis=1)
# create a plot of the 'Mean_lh' column vs. 'Birth_year'
fig, ax = plt.subplots()
ax.plot("Birth_year", "Mean_lh", data=lefthanded_data) # plot 'Mean_lh' vs. 'Birth_year'
ax.set_xlabel("Mean_lh") # set the x label for the plot
ax.set_ylabel("Birth_year") # set the y label for the plot
```
## 3. Applying Bayes' rule
<p><strong>Bayes' rule</strong> or <strong>Bayes' theorem</strong> is a statement about conditional probability which allows us to update our beliefs after seeing evidence. The probability of outcome or event A, given that outcome or event B has happened (or is true) is not the same as the probability of outcome B given that outcome A has happened. We need to take into account the <strong>prior</strong> probability that A has happened (the probability that A has happened is written P(A)). Bayes' rule can be written as follows:</p>
<p>$$P(A | B) = \frac{P(B|A) P(A)}{P(B)}$$</p>
<p>The quantity we ultimately want to calculate is the probability of dying at a particular age A, <em>given that</em> your family thinks you are left-handed. Let's write this in shorthand as P(A | LH). We also want the same quantity for right-handers: P(A | RH). As we go, we will figure out or approximate the other three quantities to find out what difference in age of death we might expect purely from the changing rates of left-handedness plotted above.</p>
<p>Here's Bayes' rule in the context of our discussion:</p>
<p>$$P(A | LH) = \frac{P(LH|A) P(A)}{P(LH)}$$</p>
<p>P(LH | A) is the probability that you are left-handed <em>given that</em> you died at age A. P(A) is the overall probability of dying at age A, and P(LH) is the overall probability of being left-handed. We will now calculate each of these three quantities, beginning with P(LH | A).</p>
<p>To calculate P(LH | A) for ages that might fall outside the original data, we will need to extrapolate the data to earlier and later years. Since the rates flatten out in the early 1900s and late 1900s, we'll use a few points at each end and take the mean to extrapolate the rates on each end. The number of points used for this is arbitrary, but we'll pick 10 since the data looks flat-ish until about 1910. </p>
```
# import library
# ... YOUR CODE FOR TASK 3 ...
import numpy as np
# create a function for P(LH | A)
def P_lh_given_A(ages_of_death, study_year = 1990):
""" P(Left-handed | ages of death), calculated based on the reported rates of left-handedness.
Inputs: numpy array of ages of death, study_year
Returns: probability of left-handedness given that subjects died in `study_year` at ages `ages_of_death` """
# Use the mean of the 10 last and 10 first points for left-handedness rates before and after the start
early_1900s_rate = lefthanded_data["Mean_lh"][-10:].mean()
late_1900s_rate = lefthanded_data["Mean_lh"][:10].mean()
middle_rates = lefthanded_data.loc[lefthanded_data['Birth_year'].isin(study_year - ages_of_death)]['Mean_lh']
youngest_age = study_year - 1986 + 10 # the youngest age is 10
oldest_age = study_year - 1986 + 86 # the oldest age is 86
P_return = np.zeros(ages_of_death.shape) # create an empty array to store the results
# extract rate of left-handedness for people of ages 'ages_of_death'
P_return[ages_of_death > oldest_age] = early_1900s_rate/100
P_return[ages_of_death < youngest_age] = late_1900s_rate/100
P_return[np.logical_and((ages_of_death <= oldest_age), (ages_of_death >= youngest_age))] = middle_rates/100
return P_return
```
## 4. When do people normally die?
<p>To estimate the probability of living to an age A, we can use data that gives the number of people who died in a given year and how old they were to create a distribution of ages of death. If we normalize the numbers to the total number of people who died, we can think of this data as a probability distribution that gives the probability of dying at age A. The data we'll use for this is from the entire US for the year 1999 - the closest I could find for the time range we're interested in. </p>
<p>In this block, we'll load in the death distribution data and plot it. The first column is the age, and the other columns are the number of people who died at that age. </p>
```
# Death distribution data for the United States in 1999
data_url_2 = "https://gist.githubusercontent.com/mbonsma/2f4076aab6820ca1807f4e29f75f18ec/raw/62f3ec07514c7e31f5979beeca86f19991540796/cdc_vs00199_table310.tsv"
# load death distribution data
# ... YOUR CODE FOR TASK 4 ...
death_distribution_data = pd.read_csv(data_url_2, sep = "\t", skiprows=[1])
# drop NaN values from the `Both Sexes` column
# ... YOUR CODE FOR TASK 4 ...
death_distribution_data = death_distribution_data.dropna(subset = ["Both Sexes"])
# plot number of people who died as a function of age
fig, ax = plt.subplots()
ax.plot("Age", "Both Sexes", data = death_distribution_data, marker='o') # plot 'Both Sexes' vs. 'Age'
ax.set_xlabel("Both Sexes")
ax.set_ylabel("Age")
```
## 5. The overall probability of left-handedness
<p>In the previous code block we loaded data to give us P(A), and now we need P(LH). P(LH) is the probability that a person who died in our particular study year is left-handed, assuming we know nothing else about them. This is the average left-handedness in the population of deceased people, and we can calculate it by summing up all of the left-handedness probabilities for each age, weighted with the number of deceased people at each age, then divided by the total number of deceased people to get a probability. In equation form, this is what we're calculating, where N(A) is the number of people who died at age A (given by the dataframe <code>death_distribution_data</code>):</p>
<p><img src="https://i.imgur.com/gBIWykY.png" alt="equation" width="220"></p>
<!--- $$P(LH) = \frac{\sum_{\text{A}} P(LH | A) N(A)}{\sum_{\text{A}} N(A)}$$ -->
```
def P_lh(death_distribution_data, study_year = 1990): # sum over P_lh for each age group
""" Overall probability of being left-handed if you died in the study year
Input: dataframe of death distribution data, study year
Output: P(LH), a single floating point number """
p_list = death_distribution_data["Both Sexes"]*P_lh_given_A(death_distribution_data["Age"], study_year) # multiply number of dead people by P_lh_given_A
p = np.sum(p_list) # calculate the sum of p_list
return p/np.sum(death_distribution_data["Both Sexes"]) # normalize to total number of people (sum of death_distribution_data['Both Sexes'])
print(P_lh(death_distribution_data, 1990))
```
## 6. Putting it all together: dying while left-handed (i)
<p>Now we have the means of calculating all three quantities we need: P(A), P(LH), and P(LH | A). We can combine all three using Bayes' rule to get P(A | LH), the probability of being age A at death (in the study year) given that you're left-handed. To make this answer meaningful, though, we also want to compare it to P(A | RH), the probability of being age A at death given that you're right-handed. </p>
<p>We're calculating the following quantity twice, once for left-handers and once for right-handers.</p>
<p>$$P(A | LH) = \frac{P(LH|A) P(A)}{P(LH)}$$</p>
<p>First, for left-handers.</p>
<!--Notice that I was careful not to call these "probability of dying at age A", since that's not actually what we're calculating: we use the exact same death distribution data for each. -->
```
def P_A_given_lh(ages_of_death, death_distribution_data, study_year = 1990):
""" The overall probability of being a particular `age_of_death` given that you're left-handed """
P_A = death_distribution_data["Both Sexes"][ages_of_death]/np.sum(death_distribution_data["Both Sexes"])
P_left = P_lh(death_distribution_data, study_year) # use P_lh function to get probability of left-handedness overall
P_lh_A = P_lh_given_A(ages_of_death, study_year) # use P_lh_given_A to get probability of left-handedness for a certain age
return P_lh_A*P_A/P_left
```
## 7. Putting it all together: dying while left-handed (ii)
<p>And now for right-handers.</p>
```
def P_A_given_rh(ages_of_death, death_distribution_data, study_year = 1990):
""" The overall probability of being a particular `age_of_death` given that you're right-handed """
P_A = death_distribution_data["Both Sexes"][ages_of_death]/np.sum(death_distribution_data["Both Sexes"])
P_right = 1 - P_lh(death_distribution_data, study_year)# either you're left-handed or right-handed, so P_right = 1 - P_left
P_rh_A = 1 - P_lh_given_A(ages_of_death, study_year) # P_rh_A = 1 - P_lh_A
return P_rh_A*P_A/P_right
```
## 8. Plotting the distributions of conditional probabilities
<p>Now that we have functions to calculate the probability of being age A at death given that you're left-handed or right-handed, let's plot these probabilities for a range of ages of death from 6 to 120. </p>
<p>Notice that the left-handed distribution has a bump below age 70: of the pool of deceased people, left-handed people are more likely to be younger. </p>
```
ages = np.arange(6, 120) # make a list of ages of death to plot
# calculate the probability of being left- or right-handed for each
left_handed_probability = P_A_given_lh(ages, death_distribution_data)
right_handed_probability = P_A_given_rh(ages, death_distribution_data)
# create a plot of the two probabilities vs. age
fig, ax = plt.subplots() # create figure and axis objects
ax.plot(ages, left_handed_probability, label = "Left-handed")
ax.plot(ages, right_handed_probability, label = "Right-handed")
ax.legend() # add a legend
ax.set_xlabel("Age at death")
ax.set_ylabel(r"Probability of being age A at death")
```
## 9. Moment of truth: age of left and right-handers at death
<p>Finally, let's compare our results with the original study that found that left-handed people were nine years younger at death on average. We can do this by calculating the mean of these probability distributions in the same way we calculated P(LH) earlier, weighting the probability distribution by age and summing over the result.</p>
<p>$$\text{Average age of left-handed people at death} = \sum_A A P(A | LH)$$</p>
<p>$$\text{Average age of right-handed people at death} = \sum_A A P(A | RH)$$</p>
```
# calculate average ages for left-handed and right-handed groups
# use np.array so that two arrays can be multiplied
average_lh_age = np.nansum(ages*np.array(left_handed_probability))
average_rh_age = np.nansum(ages*np.array(right_handed_probability))
# print the average ages for each group
# ... YOUR CODE FOR TASK 9 ...
print("Average age of lefthanded" + str(average_lh_age))
print("Average age of righthanded" + str(average_rh_age))
# print the difference between the average ages
print("The difference in average ages is " + str(round(average_lh_age - average_rh_age, 1)) + " years.")
```
## 10. Final comments
<p>We got a pretty big age gap between left-handed and right-handed people purely as a result of the changing rates of left-handedness in the population, which is good news for left-handers: you probably won't die young because of your sinisterness. The reported rates of left-handedness have increased from just 3% in the early 1900s to about 11% today, which means that older people are much more likely to be reported as right-handed than left-handed, and so looking at a sample of recently deceased people will have more old right-handers.</p>
<p>Our number is still less than the 9-year gap measured in the study. It's possible that some of the approximations we made are the cause: </p>
<ol>
<li>We used death distribution data from almost ten years after the study (1999 instead of 1991), and we used death data from the entire United States instead of California alone (which was the original study). </li>
<li>We extrapolated the left-handedness survey results to older and younger age groups, but it's possible our extrapolation wasn't close enough to the true rates for those ages. </li>
</ol>
<p>One thing we could do next is figure out how much variability we would expect to encounter in the age difference purely because of random sampling: if you take a smaller sample of recently deceased people and assign handedness with the probabilities of the survey, what does that distribution look like? How often would we encounter an age gap of nine years using the same data and assumptions? We won't do that here, but it's possible with this data and the tools of random sampling. </p>
<!-- I did do this if we want to add more tasks - it would probably take three more blocks.-->
<p>To finish off, let's calculate the age gap we'd expect if we did the study in 2018 instead of in 1990. The gap turns out to be much smaller since rates of left-handedness haven't increased for people born after about 1960. Both the National Geographic study and the 1990 study happened at a unique time - the rates of left-handedness had been changing across the lifetimes of most people alive, and the difference in handedness between old and young was at its most striking. </p>
```
# Calculate the probability of being left- or right-handed for all ages
left_handed_probability_2018 = P_A_given_lh(ages, death_distribution_data, 2018)
right_handed_probability_2018 = P_A_given_rh(ages, death_distribution_data, 2018)
# calculate average ages for left-handed and right-handed groups
average_lh_age_2018 = np.nansum(ages*np.array(left_handed_probability_2018))
average_rh_age_2018 = np.nansum(ages*np.array(right_handed_probability_2018))
# print the average ages for each group
print("Average age of lefthanded" + str(average_lh_age_2018))
print("Average age of righthanded" + str(average_rh_age_2018))
print("The difference in average ages is " +
str(round(average_lh_age_2018 - average_rh_age_2018, 1)) + " years.")
```
| github_jupyter |
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Challenge Notebook
## Problem: Given a list of stock prices on each consecutive day, determine the max profits with k transactions.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Unit Test](#Unit-Test)
* [Solution Notebook](#Solution-Notebook)
## Constraints
* Is k the number of sell transactions?
* Yes
* Can we assume the prices input is an array of ints?
* Yes
* Can we assume the inputs are valid?
* No
* If the prices are all decreasing and there is no opportunity to make a profit, do we just return 0?
* Yes
* Should the output be the max profit and days to buy and sell?
* Yes
* Can we assume this fits memory?
* Yes
## Test Cases
<pre>
* Prices: None or k: None -> None
* Prices: [] or k <= 0 -> []
* Prices: [0, -1, -2, -3, -4, -5]
* (max profit, list of transactions)
* (0, [])
* Prices: [2, 5, 7, 1, 4, 3, 1, 3] k: 3
* (max profit, list of transactions)
* (10, [Type.SELL day: 7 price: 3,
Type.BUY day: 6 price: 1,
Type.SELL day: 4 price: 4,
Type.BUY day: 3 price: 1,
Type.SELL day: 2 price: 7,
Type.BUY day: 0 price: 2])
</pre>
## Algorithm
Refer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
## Code
```
from enum import Enum # Python 2 users: Run pip install enum34
class Type(Enum):
SELL = 0
BUY = 1
class Transaction(object):
def __init__(self, type, day, price):
self.type = type
self.day = day
self.price = price
def __eq__(self, other):
return self.type == other.type and \
self.day == other.day and \
self.price == other.price
def __repr__(self):
return str(self.type) + ' day: ' + \
str(self.day) + ' price: ' + \
str(self.price)
class StockTrader(object):
def find_max_profit(self, prices, k):
# TODO: Implement me
pass
```
## Unit Test
**The following unit test is expected to fail until you solve the challenge.**
```
# %load test_max_profit.py
import unittest
class TestMaxProfit(unittest.TestCase):
def test_max_profit(self):
stock_trader = StockTrader()
self.assertRaises(TypeError, stock_trader.find_max_profit, None, None)
self.assertEqual(stock_trader.find_max_profit(prices=[], k=0), [])
prices = [5, 4, 3, 2, 1]
k = 3
self.assertEqual(stock_trader.find_max_profit(prices, k), (0, []))
prices = [2, 5, 7, 1, 4, 3, 1, 3]
profit, transactions = stock_trader.find_max_profit(prices, k)
self.assertEqual(profit, 10)
self.assertTrue(Transaction(Type.SELL,
day=7,
price=3) in transactions)
self.assertTrue(Transaction(Type.BUY,
day=6,
price=1) in transactions)
self.assertTrue(Transaction(Type.SELL,
day=4,
price=4) in transactions)
self.assertTrue(Transaction(Type.BUY,
day=3,
price=1) in transactions)
self.assertTrue(Transaction(Type.SELL,
day=2,
price=7) in transactions)
self.assertTrue(Transaction(Type.BUY,
day=0,
price=2) in transactions)
print('Success: test_max_profit')
def main():
test = TestMaxProfit()
test.test_max_profit()
if __name__ == '__main__':
main()
```
## Solution Notebook
Review the [Solution Notebook]() for a discussion on algorithms and code solutions.
| github_jupyter |
# Address Segmentation
Conversion of address points into segmented address ranges along a road network.
**Notes:** The following guide assumes data has already been preprocessed including data scrubbing and filtering.
```
import contextily as ctx
import geopandas as gpd
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import shapely
from bisect import bisect
from collections import OrderedDict
from IPython.display import display_html
from matplotlib_scalebar.scalebar import ScaleBar
from operator import itemgetter
from shapely.geometry import LineString, Point
# Define index of example roadseg segment.
ex_idx = 264
ex_place = "City of Yellowknife"
# Define join fields.
join_roadseg = "roadname"
join_addresses = "street"
# Define helper functions.
def groupby_to_list(df, group_field, list_field):
"""
Helper function: faster alternative to pandas groupby.apply/agg(list).
Groups records by one or more fields and compiles an output field into a list for each group.
"""
if isinstance(group_field, list):
for field in group_field:
if df[field].dtype.name != "geometry":
df[field] = df[field].astype("U")
transpose = df.sort_values(group_field)[[*group_field, list_field]].values.T
keys, vals = np.column_stack(transpose[:-1]), transpose[-1]
keys_unique, keys_indexes = np.unique(keys.astype("U") if isinstance(keys, np.object) else keys,
axis=0, return_index=True)
else:
keys, vals = df.sort_values(group_field)[[group_field, list_field]].values.T
keys_unique, keys_indexes = np.unique(keys, return_index=True)
vals_arrays = np.split(vals, keys_indexes[1:])
return pd.Series([list(vals_array) for vals_array in vals_arrays], index=keys_unique).copy(deep=True)
```
## Step 1. Load dataframes and configure attributes
Loads dataframes into geopandas and separates address numbers and suffixes, if required.
```
# Load dataframes.
addresses = gpd.read_file("C:/scratch/City_Of_Yellowknife.gpkg", layer="addresses")
roadseg = gpd.read_file("C:/scratch/City_Of_Yellowknife.gpkg", layer="roads")
# Configure attributes - number and suffix.
addresses["suffix"] = addresses["number"].map(lambda val: re.sub(pattern="\\d+", repl="", string=val, flags=re.I))
addresses["number"] = addresses["number"].map(lambda val: re.sub(pattern="[^\\d]", repl="", string=val, flags=re.I)).map(int)
addresses.head()
roadseg.head()
```
## Preview data
**Note:** this code block is for visual purposes only.
```
# Fetch basemaps.
# Note: basemaps are retrieved in EPSG:3857 and, therefore, dataframes should also use this crs.
basemaps = list()
basemaps.append(ctx.bounds2img(*roadseg.total_bounds, ll=False, source=ctx.providers.Esri.WorldImagery))
basemaps.append(ctx.bounds2img(*roadseg.loc[roadseg.index==ex_idx].total_bounds, ll=False,
source=ctx.providers.Esri.WorldImagery))
# Configure local positional distortion (for scalebar dx parameter).
ymin, ymax = itemgetter(1, 3)(roadseg[roadseg.index==ex_idx].to_crs("EPSG:4617").total_bounds)
lat = ymin + ((ymax - ymin) / 2)
dx = math.cos(math.radians(lat))
# Create data for viewing.
starting_pt = gpd.GeoDataFrame(geometry=[Point(roadseg.loc[roadseg.index==ex_idx]["geometry"].iloc[0].coords[0])],
crs=addresses.crs)
# Configure plots.
fig, ax = plt.subplots(1, 2, figsize=(12, 7), tight_layout=True)
for plt_idx, title in enumerate(["All Data", f"roadseg={ex_idx}"]):
ax[plt_idx].imshow(basemaps[plt_idx][0], extent=basemaps[plt_idx][1])
if plt_idx == 0:
addresses.plot(ax=ax[plt_idx], color="red", label="addresses", markersize=2)
roadseg.plot(ax=ax[plt_idx], color="cyan", label="roadseg", linewidth=1)
else:
addresses.plot(ax=ax[plt_idx], color="red", label="addresses", linewidth=2)
starting_pt.plot(ax=ax[plt_idx], color="gold", label=f"roadseg={ex_idx}, 1st point", linewidth=2)
roadseg.loc[roadseg.index==ex_idx].plot(ax=ax[plt_idx], color="yellow", label=f"roadseg={ex_idx}", linewidth=2)
roadseg.loc[roadseg.index!=ex_idx].plot(ax=ax[plt_idx], color="cyan", label="roadseg", linewidth=1)
ax[plt_idx].add_artist(ScaleBar(dx=dx, units="m", location="lower left", pad=0.5, color="black"))
ax[plt_idx].axes.xaxis.set_visible(False)
ax[plt_idx].axes.yaxis.set_visible(False)
ax[plt_idx].set_title(title, fontsize=12)
ax[plt_idx].set_xlim(itemgetter(0, 1)(basemaps[plt_idx][1]))
ax[plt_idx].set_ylim(itemgetter(2, 3)(basemaps[plt_idx][1]))
plt.suptitle(ex_place, fontsize=12)
plt.legend(loc="center left", bbox_to_anchor=(1.0, 0.5), fontsize=12)
plt.show()
```
## Step 2. Configure address to roadseg linkages
Links addresses to the nearest, matching road segment.
```
# Link addresses and roadseg on join fields.
addresses["addresses_index"] = addresses.index
roadseg["roadseg_index"] = roadseg.index
merge = addresses.merge(roadseg[[join_roadseg, "roadseg_index"]], how="left", left_on=join_addresses, right_on=join_roadseg)
addresses["roadseg_index"] = groupby_to_list(merge, "addresses_index", "roadseg_index")
addresses.drop(columns=["addresses_index"], inplace=True)
roadseg.drop(columns=["roadseg_index"], inplace=True)
# Discard non-linked addresses.
addresses.drop(addresses[addresses["roadseg_index"].map(itemgetter(0)).isna()].index, axis=0, inplace=True)
# Convert linkages to integer tuples, if possible.
def as_int(val):
try:
return int(val)
except ValueError:
return val
addresses["roadseg_index"] = addresses["roadseg_index"].map(lambda vals: tuple(set(map(as_int, vals))))
addresses.head()
# Reduce linkages to one roadseg index per address.
# Configure roadseg geometry lookup dictionary.
roadseg_geom_lookup = roadseg["geometry"].to_dict()
def get_nearest_linkage(pt, roadseg_indexes):
"""Returns the roadseg index associated with the nearest roadseg geometry to the given address point."""
# Get roadseg geometries.
roadseg_geometries = itemgetter(*roadseg_indexes)(roadseg_geom_lookup)
# Get roadseg distances from address point.
roadseg_distances = tuple(map(lambda road: pt.distance(road), roadseg_geometries))
# Get the roadseg index associated with the smallest distance.
roadseg_index = roadseg_indexes[roadseg_distances.index(min(roadseg_distances))]
return roadseg_index
# Flag plural linkages.
flag_plural = addresses["roadseg_index"].map(len) > 1
# Reduce plural linkages to the road segment with the lowest (nearest) geometric distance.
addresses.loc[flag_plural, "roadseg_index"] = addresses[flag_plural][["geometry", "roadseg_index"]].apply(
lambda row: get_nearest_linkage(*row), axis=1)
# Unpack first tuple element for singular linkages.
addresses.loc[~flag_plural, "roadseg_index"] = addresses[~flag_plural]["roadseg_index"].map(itemgetter(0))
# Compile linked roadseg geometry for each address.
addresses["roadseg_geometry"] = addresses.merge(
roadseg["geometry"], how="left", left_on="roadseg_index", right_index=True)["geometry_y"]
addresses.head()
```
## Step 3. Configure address parity
Computes address-roadseg parity (left / right side).
```
def get_parity(pt, vector):
"""
Determines the parity (left or right side) of an address point relative to a roadseg vector.
Parity is derived from the determinant of the vectors formed by the road segment and the address-to-roadseg
vectors. A positive determinant indicates 'left' parity and negative determinant indicates 'right' parity.
"""
det = (vector[1][0] - vector[0][0]) * (pt.y - vector[0][1]) - \
(vector[1][1] - vector[0][1]) * (pt.x - vector[0][0])
sign = np.sign(det)
return "l" if sign == 1 else "r"
def get_road_vector(pt, segment):
"""
Returns the following:
a) the distance of the address intersection along the road segment.
b) the vector comprised of the road segment coordinates immediately before and after the address
intersection point.
"""
# For all road segment points and the intersection point, calculate the distance along the road segment.
# Note: always use the length as the distance for the last point to avoid distance=0 for looped roads.
node_distance = (*map(lambda coord: segment.project(Point(coord)), segment.coords[:-1]), segment.length)
intersection_distance = segment.project(pt)
# Compute the index of the intersection point within the road segment points, based on distances.
intersection_index = bisect(node_distance, intersection_distance)
# Conditionally compile the road segment points, as a vector, immediately bounding the intersection point.
# Intersection matches a pre-existing road segment point.
if intersection_distance in node_distance:
# Intersection matches the first road segment point.
if intersection_index == 1:
vector = itemgetter(intersection_index - 1, intersection_index)(segment.coords)
# Intersection matches the last road segment point.
elif intersection_index == len(node_distance):
vector = itemgetter(intersection_index - 2, intersection_index - 1)(segment.coords)
# Intersection matches an interior road segment point.
else:
vector = itemgetter(intersection_index - 2, intersection_index)(segment.coords)
# Intersection matches no pre-existing road segment point.
else:
vector = itemgetter(intersection_index - 1, intersection_index)(segment.coords)
return intersection_distance, vector
# Get point of intersection between each address and the linked road segment.
addresses["intersection"] = addresses[["geometry", "roadseg_geometry"]].apply(
lambda row: itemgetter(-1)(shapely.ops.nearest_points(*row)), axis=1)
# Get the following:
# a) the distance of the intersection point along the linked road segment.
# b) the road segment vector which bounds the intersection point.
# i.e. vector formed by the coordinates immediately before and after the intersection point.
results = addresses[["intersection", "roadseg_geometry"]].apply(lambda row: get_road_vector(*row), axis=1)
addresses["distance"] = results.map(itemgetter(0))
addresses["roadseg_vector"] = results.map(itemgetter(1))
# Get address parity.
addresses["parity"] = addresses[["geometry", "roadseg_vector"]].apply(
lambda row: get_parity(*row), axis=1)
addresses[["geometry", "roadseg_geometry", "intersection", "distance", "roadseg_vector", "parity"]].head()
```
## View relationship between parity variables
View the relationship between address points, bounding roadseg vectors, address-roadseg intersection points, and the computed parity.
**Note:** this code block is for visual purposes only.
```
# Create geometries for viewing.
bounding_vectors = gpd.GeoDataFrame(geometry=addresses["roadseg_vector"].map(LineString), crs=addresses.crs)
intersection = gpd.GeoDataFrame(addresses["parity"], geometry=addresses[["geometry", "intersection"]].apply(
lambda row: LineString([pt.coords[0][:2] for pt in row]), axis=1), crs=addresses.crs)
# Configure plots.
fig, ax = plt.subplots(1, 2, figsize=(14.5, 7), tight_layout=True)
for plt_idx, title in enumerate(["Parity Input", "Parity Output"]):
ax[plt_idx].imshow(basemaps[1][0], extent=basemaps[1][1])
addresses.plot(ax=ax[plt_idx], color="red", label="addresses", linewidth=2)
starting_pt.plot(ax=ax[plt_idx], color="gold", label=f"roadseg={ex_idx}, 1st point", linewidth=2)
roadseg.loc[roadseg.index==ex_idx].plot(ax=ax[plt_idx], color="yellow", label=f"roadseg={ex_idx}", linewidth=2)
roadseg.loc[roadseg.index!=ex_idx].plot(ax=ax[plt_idx], color="cyan", label="roadseg", linewidth=1)
if plt_idx == 0:
intersection.plot(ax=ax[plt_idx], color="orange", label="address-roadseg intersection", linewidth=2)
bounding_vectors.plot(ax=ax[plt_idx], color="magenta", label="bounding roadseg vectors", linewidth=2)
else:
intersection.loc[intersection["parity"]=="l"].plot(
ax=ax[plt_idx], color="blue", label="address-roadseg intersection (left)", linewidth=2)
intersection.loc[intersection["parity"]=="r"].plot(
ax=ax[plt_idx], color="lime", label="address-roadseg intersection (right)", linewidth=2)
ax[plt_idx].add_artist(ScaleBar(dx=dx, units="m", location="lower left", pad=0.5, color="black"))
ax[plt_idx].axes.xaxis.set_visible(False)
ax[plt_idx].axes.yaxis.set_visible(False)
ax[plt_idx].set_title(title, fontsize=12)
ax[plt_idx].set_xlim(itemgetter(0, 1)(basemaps[1][1]))
ax[plt_idx].set_ylim(itemgetter(2, 3)(basemaps[1][1]))
plt.suptitle(ex_place, fontsize=12)
legend_icons = list()
legend_labels = list()
for axis in ax:
legend_items = list(zip(*[items for items in zip(*axis.get_legend_handles_labels()) if items[1] not in legend_labels]))
legend_icons.extend(legend_items[0])
legend_labels.extend(legend_items[1])
plt.legend(legend_icons, legend_labels, loc="center left", bbox_to_anchor=(1.0, 0.5), fontsize=12)
plt.show()
```
## Step 4. Configure address ranges (addrange) and attributes
Groups addresses into ranges then computes the addrange attributes.
```
def get_digdirfg(sequence):
"""Returns the digdirfg attribute for the given sequence of address numbers."""
sequence = list(sequence)
# Return digitizing direction for single addresses.
if len(sequence) == 1:
return "Not Applicable"
# Derive digitizing direction from sequence sorting direction.
if sequence == sorted(sequence):
return "Same Direction"
else:
return "Opposite Direction"
def get_hnumstr(sequence):
"""Returns the hnumstr attribute for the given sequence of address numbers."""
sequence = list(sequence)
# Validate structure for single addresses.
if len(sequence) == 1:
return "Even" if (sequence[0] % 2 == 0) else "Odd"
# Configure sequence sort status.
if sequence == sorted(sequence) or sequence == sorted(sequence, reverse=True):
# Configure sequence parities.
parities = tuple(map(lambda number: number % 2 == 0, sequence))
# Validate structure for sorted address ranges.
if all(parities):
return "Even"
elif not any(parities):
return "Odd"
else:
return "Mixed"
# Return structure for unsorted address ranges.
else:
return "Irregular"
def get_number_sequence(addresses):
"""Returns the filtered number sequence for the given addresses."""
# Separate address components.
numbers, suffixes, distances = tuple(zip(*addresses))
# Reduce addresses at a duplicated intersection distance to only the first instance.
if len(distances) == len(set(distances)):
sequence = numbers
else:
sequence = pd.DataFrame({"number": numbers, "suffix": suffixes, "distance": distances}).drop_duplicates(
subset="distance", keep="first")["number"].to_list()
# Remove duplicated addresses.
sequence = list(OrderedDict.fromkeys(sequence))
return sequence
def sort_addresses(numbers, suffixes, distances):
"""
Sorts the addresses successively by:
1) distance - the distance of the intersection point along the road segment.
2) number
3) suffix
Taking into account the directionality of the addresses relative to the road segment.
"""
# Create individual address tuples from separated address components.
addresses = tuple(zip(numbers, suffixes, distances))
# Apply initial sorting, by distance, to identify address directionality.
addresses_sorted = sorted(addresses, key=itemgetter(2))
directionality = -1 if addresses_sorted[0][0] > addresses_sorted[-1][0] else 1
# Sort addresses - same direction.
if directionality == 1:
return tuple(sorted(addresses, key=itemgetter(2, 1, 0)))
# Sort addresses - opposite direction.
else:
return tuple(sorted(sorted(sorted(
addresses, key=itemgetter(1), reverse=True),
key=itemgetter(0), reverse=True),
key=itemgetter(2)))
```
### Step 4.1. Group and sort addresses
Groups addresses by roadseg index and parity and sorts each grouping prior to configuring addrange attributes.
```
# Split address dataframe by parity.
addresses_l = addresses[addresses["parity"] == "l"].copy(deep=True)
addresses_r = addresses[addresses["parity"] == "r"].copy(deep=True)
# Create dataframes from grouped addresses.
cols = ("number", "suffix", "distance")
addresses_l = pd.DataFrame({col: groupby_to_list(addresses_l, "roadseg_index", col) for col in cols})
addresses_r = pd.DataFrame({col: groupby_to_list(addresses_r, "roadseg_index", col) for col in cols})
# Sort addresses.
addresses_l = addresses_l.apply(lambda row: sort_addresses(*row), axis=1)
addresses_r = addresses_r.apply(lambda row: sort_addresses(*row), axis=1)
```
### View example address grouping
**Note:** this code block is for visual purposes only.
```
# View data.
vals_l = list(zip(*addresses_l.loc[addresses_l.index==ex_idx].iloc[0]))
vals_r = list(zip(*addresses_r.loc[addresses_r.index==ex_idx].iloc[0]))
cols = ("number", "suffix", "distance")
left = pd.DataFrame({("Left Parity", cols[idx]): vals for idx, vals in enumerate(vals_l)})
right = pd.DataFrame({("Right Parity", cols[idx]): vals for idx, vals in enumerate(vals_r)})
display_html(left.style.set_table_attributes("style='display:inline'")._repr_html_()+
"<pre style='display:inline'> </pre>"+
right.style.set_table_attributes("style='display:inline'")._repr_html_(), raw=True)
```
### Step 4.2. Configure addrange attributes
```
# Configure addrange attributes.
addrange = pd.DataFrame(index=map(int, {*addresses_l.index, *addresses_r.index}))
# Configure addrange attributes - hnumf, hnuml.
addrange.loc[addresses_l.index, "l_hnumf"] = addresses_l.map(lambda addresses: addresses[0][0])
addrange.loc[addresses_l.index, "l_hnuml"] = addresses_l.map(lambda addresses: addresses[-1][0])
addrange.loc[addresses_r.index, "r_hnumf"] = addresses_r.map(lambda addresses: addresses[0][0])
addrange.loc[addresses_r.index, "r_hnuml"] = addresses_r.map(lambda addresses: addresses[-1][0])
# Configuring addrange attributes - hnumsuff, hnumsufl.
addrange.loc[addresses_l.index, "l_hnumsuff"] = addresses_l.map(lambda addresses: addresses[0][1])
addrange.loc[addresses_l.index, "l_hnumsufl"] = addresses_l.map(lambda addresses: addresses[-1][1])
addrange.loc[addresses_r.index, "r_hnumsuff"] = addresses_r.map(lambda addresses: addresses[0][1])
addrange.loc[addresses_r.index, "r_hnumsufl"] = addresses_r.map(lambda addresses: addresses[-1][1])
# Configuring addrange attributes - hnumtypf, hnumtypl.
addrange.loc[addresses_l.index, "l_hnumtypf"] = addresses_l.map(lambda addresses: "Actual Located")
addrange.loc[addresses_l.index, "l_hnumtypl"] = addresses_l.map(lambda addresses: "Actual Located")
addrange.loc[addresses_r.index, "r_hnumtypf"] = addresses_r.map(lambda addresses: "Actual Located")
addrange.loc[addresses_r.index, "r_hnumtypl"] = addresses_r.map(lambda addresses: "Actual Located")
# Get address number sequence.
address_sequence_l = addresses_l.map(get_number_sequence)
address_sequence_r = addresses_r.map(get_number_sequence)
# Configure addrange attributes - hnumstr.
addrange.loc[addresses_l.index, "l_hnumstr"] = address_sequence_l.map(get_hnumstr)
addrange.loc[addresses_r.index, "r_hnumstr"] = address_sequence_r.map(get_hnumstr)
# Configure addrange attributes - digdirfg.
addrange.loc[addresses_l.index, "l_digdirfg"] = address_sequence_l.map(get_digdirfg)
addrange.loc[addresses_r.index, "r_digdirfg"] = address_sequence_r.map(get_digdirfg)
```
## Step 5. Merge addrange attributes with roadseg
```
# Merge addrange attributes with roadseg.
roadseg = roadseg.merge(addrange, how="left", left_index=True, right_index=True)
```
## View Results
**Note:** this code block is for visual purposes only.
```
# Create data for viewing.
addresses_filtered = addresses.loc[addresses["roadseg_index"]==ex_idx]
labels = addresses_filtered[["number", "suffix", "geometry", "parity"]].apply(
lambda row: (f"{row[0]}{row[1]}", row[2].x, row[2].y, row[3]), axis=1)
# Configure plots.
fig, ax = plt.subplots(1, 1, figsize=(6, 7), tight_layout=False)
ax.imshow(basemaps[1][0], extent=basemaps[1][1])
addresses_filtered.loc[addresses_filtered["parity"]=="l"].plot(ax=ax, color="blue", label="addresses (left)", linewidth=2)
addresses_filtered.loc[addresses_filtered["parity"]=="r"].plot(ax=ax, color="lime", label="addresses(right)", linewidth=2)
starting_pt.plot(ax=ax, color="gold", label=f"roadseg={ex_idx}, 1st point", linewidth=2)
roadseg.loc[roadseg.index==ex_idx].plot(ax=ax, color="yellow", label=f"roadseg={ex_idx}", linewidth=2)
roadseg.loc[roadseg.index!=ex_idx].plot(ax=ax, color="cyan", label="roadseg", linewidth=1)
ax.add_artist(ScaleBar(dx=dx, units="m", location="lower left", pad=0.5, color="black"))
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
ax.set_title("Parity Output", fontsize=12)
ax.set_xlim(itemgetter(0, 1)(basemaps[1][1]))
ax.set_ylim(itemgetter(2, 3)(basemaps[1][1]))
for label_params in labels:
label, x, y, parity = label_params
if parity == "l":
kwargs = {"xytext": (x-10, y+10), "ha": "right"}
else:
kwargs = {"xytext": (x+10, y+10), "ha": "left"}
plt.annotate(label, xy=(x, y), textcoords="data", va="bottom", fontsize=10, color="red", fontweight="bold",
bbox=dict(pad=0.3, fc="black"), **kwargs)
plt.legend(loc="center left", bbox_to_anchor=(1.0, 0.5), fontsize=12)
plt.savefig("temp.png", bbox_inches='tight', pad_inches=0)
plt.close()
display_html(f"""
<div class=\"container\" style=\"width:100%;\">
<img src=\"temp.png\" style=\"float:left;max-width:59%;\">
{pd.DataFrame(roadseg.loc[roadseg.index==ex_idx].iloc[0]).style.set_table_styles([
{'selector': '', 'props': [('float', 'right'), ('width', '40%')]},
{'selector': 'td', 'props': [('overflow', 'hidden'), ('text-overflow', 'ellipsis'), ('white-space', 'nowrap')]}
])._repr_html_()}
</div>
""", raw=True)
```
| github_jupyter |
<img src="https://raw.githubusercontent.com/dask/dask/main/docs/source/images/dask_horizontal_no_pad.svg"
width="30%"
alt="Dask logo\" />
# Parallel and Distributed Machine Learning
The material in this notebook was based on the open-source content from [Dask's tutorial repository](https://github.com/dask/dask-tutorial) and the [Machine learning notebook](https://github.com/coiled/data-science-at-scale/blob/master/3-machine-learning.ipynb) from data science at scale from coiled
So far we have seen how Dask makes data analysis scalable with parallelization via Dask DataFrames. Let's now see how [Dask-ML](https://ml.dask.org/) allows us to do machine learning in a parallel and distributed manner. Note, machine learning is really just a special case of data analysis (one that automates analytical model building), so the 💪 Dask gains 💪 we've seen will apply here as well!
(If you'd like a refresher on the difference between parallel and distributed computing, [here's a good discussion on StackExchange](https://cs.stackexchange.com/questions/1580/distributed-vs-parallel-computing).)
## Types of scaling problems in machine learning
There are two main types of scaling challenges you can run into in your machine learning workflow: scaling the **size of your data** and scaling the **size of your model**. That is:
1. **CPU-bound problems**: Data fits in RAM, but training takes too long. Many hyperparameter combinations, a large ensemble of many models, etc.
2. **Memory-bound problems**: Data is larger than RAM, and sampling isn't an option.
Here's a handy diagram for visualizing these problems:
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/dimensions_of_scale.svg"
width="60%"
alt="scaling problems\" />
In the bottom-left quadrant, your datasets are not too large (they fit comfortably in RAM) and your model is not too large either. When these conditions are met, you are much better off using something like scikit-learn, XGBoost, and similar libraries. You don't need to leverage multiple machines in a distributed manner with a library like Dask-ML. However, if you are in any of the other quadrants, distributed machine learning is the way to go.
Summarizing:
* For in-memory problems, just use scikit-learn (or your favorite ML library).
* For large models, use `dask_ml.joblib` and your favorite scikit-learn estimator.
* For large datasets, use `dask_ml` estimators.
## Scikit-learn in five minutes
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/scikit_learn_logo_small.svg"
width="30%"
alt="sklearn logo\" />
In this section, we'll quickly run through a typical scikit-learn workflow:
* Load some data (in this case, we'll generate it)
* Import the scikit-learn module for our chosen ML algorithm
* Create an estimator for that algorithm and fit it with our data
* Inspect the learned attributes
* Check the accuracy of our model
Scikit-learn has a nice, consistent API:
* You instantiate an `Estimator` (e.g. `LinearRegression`, `RandomForestClassifier`, etc.). All of the models *hyperparameters* (user-specified parameters, not the ones learned by the estimator) are passed to the estimator when it's created.
* You call `estimator.fit(X, y)` to train the estimator.
* Use `estimator` to inspect attributes, make predictions, etc.
Here `X` is an array of *feature variables* (what you're using to predict) and `y` is an array of *target variables* (what we're trying to predict).
### Generate some random data
```
from sklearn.datasets import make_classification
# Generate data
X, y = make_classification(n_samples=10000, n_features=4, random_state=0)
```
**Refreshing some ML concepts**
- `X` is the samples matrix (or design matrix). The size of `X` is typically (`n_samples`, `n_features`), which means that samples are represented as rows and features are represented as columns.
- A "feature" (also called an "attribute") is a measurable property of the phenomenon we're trying to analyze. A feature for a dataset of employees might be their hire date, for example.
- `y` are the target values, which are real numbers for regression tasks, or integers for classification (or any other discrete set of values). For unsupervized learning tasks, `y` does not need to be specified. `y` is usually 1d array where the `i`th entry corresponds to the target of the `i`th sample (row) of `X`.
```
# Let's take a look at X
X[:8]
# Let's take a look at y
y[:8]
```
### Fitting and SVC
For this example, we will fit a [Support Vector Classifier](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html).
```
from sklearn.svm import SVC
estimator = SVC(random_state=0)
estimator.fit(X, y)
```
We can inspect the learned features by taking a look a the `support_vectors_`:
```
estimator.support_vectors_[:4]
```
And we check the accuracy:
```
estimator.score(X, y)
```
There are [3 different approaches](https://scikit-learn.org/0.15/modules/model_evaluation.html) to evaluate the quality of predictions of a model. One of them is the **estimator score method**. Estimators have a score method providing a default evaluation criterion for the problem they are designed to solve, which is discussed in each estimator's documentation.
### Hyperparameter Optimization
There are a few ways to learn the best *hyper*parameters while training. One is `GridSearchCV`.
As the name implies, this does a brute-force search over a grid of hyperparameter combinations. scikit-learn provides tools to automatically find the best parameter combinations via cross-validation (which is the "CV" in `GridSearchCV`).
```
from sklearn.model_selection import GridSearchCV
%%time
estimator = SVC(gamma='auto', random_state=0, probability=True)
param_grid = {
'C': [0.001, 10.0],
'kernel': ['rbf', 'poly'],
}
# Brute-force search over a grid of hyperparameter combinations
grid_search = GridSearchCV(estimator, param_grid, verbose=2, cv=2)
grid_search.fit(X, y)
grid_search.best_params_, grid_search.best_score_
```
## Compute Bound: Single-machine parallelism with Joblib
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/joblib_logo.svg"
alt="Joblib logo"
width="50%"/>
In this section we'll see how [Joblib](https://joblib.readthedocs.io/en/latest/) ("*a set of tools to provide lightweight pipelining in Python*") gives us parallelism on our laptop. Here's what our grid search graph would look like if we set up six training "jobs" in parallel:
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/unmerged_grid_search_graph.svg"
alt="grid search graph"
width="100%"/>
With Joblib, we can say that scikit-learn has *single-machine* parallelism.
Any scikit-learn estimator that can operate in parallel exposes an `n_jobs` keyword, which tells you how many tasks to run in parallel. Specifying `n_jobs=-1` jobs means running the maximum possible number of tasks in parallel.
```
%%time
grid_search = GridSearchCV(estimator, param_grid, verbose=2, cv=2, n_jobs=-1)
grid_search.fit(X, y)
```
Notice that the computation above it is faster than before. If you are running this computation on binder, you might not see a speed-up and the reason for that is that binder instances tend to have only one core with no threads so you can't see any parallelism.
## Compute Bound: Multi-machine parallelism with Dask
In this section we'll see how Dask (plus Joblib and scikit-learn) gives us multi-machine parallelism. Here's what our grid search graph would look like if we allowed Dask to schedule our training "jobs" over multiple machines in our cluster:
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/merged_grid_search_graph.svg"
alt="merged grid search graph"
width="100%"/>
We can say that Dask can talk to scikit-learn (via Joblib) so that our *cluster* is used to train a model.
If we run this on a laptop, it will take quite some time, but the CPU usage will be satisfyingly near 100% for the duration. To run faster, we would need a distributed cluster. For details on how to create a LocalCluster you can check the Dask documentation on [Single Machine: dask.distributed](https://docs.dask.org/en/latest/setup/single-distributed.html).
Let's instantiate a Client with `n_workers=4`, which will give us a `LocalCluster`.
```
import dask.distributed
client = dask.distributed.Client(n_workers=4)
client
```
**Note:** Click on Cluster Info, to see more details about the cluster. You can see the configuration of the cluster and some other specs.
We can expand our problem by specifying more hyperparameters before training, and see how using `dask` as backend can help us.
```
param_grid = {
'C': [0.001, 0.1, 1.0, 2.5, 5, 10.0],
'kernel': ['rbf', 'poly', 'linear'],
'shrinking': [True, False],
}
grid_search = GridSearchCV(estimator, param_grid, verbose=2, cv=2, n_jobs=-1)
```
### Dask parallel backend
We can fit our estimator with multi-machine parallelism by quickly *switching to a Dask parallel backend* when using joblib.
```
import joblib
%%time
with joblib.parallel_backend("dask", scatter=[X, y]):
grid_search.fit(X, y)
```
**What did just happen?**
Dask-ML developers worked with the scikit-learn and Joblib developers to implement a Dask parallel backend. So internally, scikit-learn now talks to Joblib, and Joblib talks to Dask, and Dask is what handles scheduling all of those tasks on multiple machines.
The best parameters and best score:
```
grid_search.best_params_, grid_search.best_score_
```
## Memory Bound: Single/Multi machine parallelism with Dask-ML
We have seen how to work with larger models, but sometimes you'll want to train on a larger than memory dataset. `dask-ml` has implemented estimators that work well on Dask `Arrays` and `DataFrames` that may be larger than your machine's RAM.
```
import dask.array as da
import dask.delayed
from sklearn.datasets import make_blobs
import numpy as np
```
We'll make a small (random) dataset locally using scikit-learn.
```
n_centers = 12
n_features = 20
X_small, y_small = make_blobs(n_samples=1000, centers=n_centers, n_features=n_features, random_state=0)
centers = np.zeros((n_centers, n_features))
for i in range(n_centers):
centers[i] = X_small[y_small == i].mean(0)
centers[:4]
```
**Note**: The small dataset will be the template for our large random dataset.
We'll use `dask.delayed` to adapt `sklearn.datasets.make_blobs`, so that the actual dataset is being generated on our workers.
If you are not in binder and you machine has 16GB of RAM you can make `n_samples_per_block=200_000` and the computations takes around 10 min. If you are in binder the resources are limited and the problem below is big enough.
```
n_samples_per_block = 60_000 #on binder replace this for 15_000
n_blocks = 500
delayeds = [dask.delayed(make_blobs)(n_samples=n_samples_per_block,
centers=centers,
n_features=n_features,
random_state=i)[0]
for i in range(n_blocks)]
arrays = [da.from_delayed(obj, shape=(n_samples_per_block, n_features), dtype=X.dtype)
for obj in delayeds]
X = da.concatenate(arrays)
X
```
### KMeans from Dask-ml
The algorithms implemented in Dask-ML are scalable. They handle larger-than-memory datasets just fine.
They follow the scikit-learn API, so if you're familiar with scikit-learn, you'll feel at home with Dask-ML.
```
from dask_ml.cluster import KMeans
clf = KMeans(init_max_iter=3, oversampling_factor=10)
%time clf.fit(X)
clf.labels_
clf.labels_[:10].compute()
client.close()
```
## Multi-machine parallelism in the cloud with Coiled
<br>
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/Coiled-Logo_Horizontal_RGB_Black.png"
alt="Coiled logo"
width=25%/>
<br>
In this section we'll see how Coiled allows us to solve machine learning problems with multi-machine parallelism in the cloud.
Coiled, [among other things](https://coiled.io/product/), provides hosted and scalable Dask clusters. The biggest barriers to entry for doing machine learning at scale are "Do you have access to a cluster?" and "Do you know how to manage it?" Coiled solves both of those problems.
We'll spin up a Coiled cluster (with 10 workers in this case), then instantiate a Dask Client to use with that cluster.
If you are running on your local machine and not in binder, and you want to give Coiled a try, you can signup [here](https://cloud.coiled.io/login?redirect_uri=/) and you will get some free credits. If you installed the environment by following the steps on the repository's [README](https://github.com/coiled/dask-mini-tutorial/blob/main/README.md) you will have `coiled` installed. You will just need to login, by following the steps on the [setup page](https://docs.coiled.io/user_guide/getting_started.html), and you will be ready to go.
To learn more about how to set up an environment you can visit Coiled documentation on [Creating software environments](https://docs.coiled.io/user_guide/software_environment_creation.html). But for now you can use the envioronment we set up for this tutorial.
```
import coiled
from dask.distributed import Client
# Spin up a Coiled cluster, instantiate a Client
cluster = coiled.Cluster(n_workers=10, software="ncclementi/dask-mini-tutorial",)
client = Client(cluster)
client
```
### Memory bound: Dask-ML
We can use Dask-ML estimators on the cloud to work with larger datasets.
```
n_centers = 12
n_features = 20
X_small, y_small = make_blobs(n_samples=1000, centers=n_centers, n_features=n_features, random_state=0)
centers = np.zeros((n_centers, n_features))
for i in range(n_centers):
centers[i] = X_small[y_small == i].mean(0)
n_samples_per_block = 200_000
n_blocks = 500
delayeds = [dask.delayed(make_blobs)(n_samples=n_samples_per_block,
centers=centers,
n_features=n_features,
random_state=i)[0]
for i in range(n_blocks)]
arrays = [da.from_delayed(obj, shape=(n_samples_per_block, n_features), dtype=X.dtype)
for obj in delayeds]
X = da.concatenate(arrays)
X = X.persist()
from dask_ml.cluster import KMeans
clf = KMeans(init_max_iter=3, oversampling_factor=10)
%time clf.fit(X)
```
Computing the labels:
```
clf.labels_[:10].compute()
client.close()
```
## Extra resources:
- [Dask-ML documentation](https://ml.dask.org/)
- [Getting started with Coiled](https://docs.coiled.io/user_guide/getting_started.html)
| github_jupyter |
[](https://colab.research.google.com/github/mahdimplus/DeepRetroMoco/blob/main/functions.ipynb)
```
pip install voxelmorph
```
### Requirement libraries.
```
import nibabel as nib
import os
import numpy as np
import random
from nibabel.affines import apply_affine
import time
import voxelmorph as vxm
import pandas as pd
import matplotlib.pyplot as plt
```
### Loading data in our interested shape (64*64)
```
"""
laod_m ==== load data and reshape it
load_with_head ==== loading the data, reshaping it, and creating an appropriate header file
"""
def load_m (file_path):
img = nib.load(file_path)
img_data = img.get_fdata()
if img.shape[0:2]!=(64,64):
img_data = img_data[23:87,23:87,:,:]
if not (file_path.endswith(".nii") or file_path.endswith(".nii.gz")):
raise ValueError(
f"Nifti file path must end with .nii or .nii.gz, got {file_path}."
)
return img_data
def load_with_head (file_path: str):
img = nib.load(file_path)
img_data = img.get_fdata()
if img.shape[0:2]!=(64,64):
img_data = img_data[23:87,23:87,:,:]
header=img.header
## edit the header for shape
header['dim'][1:5]=img_data.shape
if not (file_path.endswith(".nii") or file_path.endswith(".nii.gz")):
raise ValueError(
f"Nifti file path must end with .nii or .nii.gz, got {file_path}."
)
return img_data ,img
```
### Listing data
```
"""
count ==== Listing data:
n= number of data
train_data_num= ID of the data
"""
def count (data_dir):
train_dir = os.path.join(data_dir)
train_data_num = []
for file in os.listdir(train_dir):
train_data_num.append([file])
train_data_num=np.array(train_data_num)
n=train_data_num.shape[0]
return n,train_data_num
```
### Maximum intensity
```
"""
maxx ==== finding maximum intensity among all data
"""
def maxx (data_dir):
n,train_data_num=count(data_dir)
start=0
for i in range(n):
d=load_m(data_dir+'/'+str(train_data_num[i][0]))
maxx=d.max()
if maxx>=start:
start=maxx
return start
```
### Prepare input (moved , fix) and ground truth (ref , deformation map) for training network
```
"""
Generator that takes in data of size [N, H, W], and yields data for
our model. Note that we need to provide numpy data for each
input, and each output.
inputs: moving [bs, H, W, 1], fixed image [bs, H, W, 1]
outputs: moved image [bs, H, W, 1], zero-gradient [bs, H, W, 2]
m= maximum intensity between all subject
split= percent of validation data
batch_size= number of data that take from subject.
### first selects a random subject and then random slice and following it finding random volume based on the number of batch_size ###
"""
def data_generator(data_dir, batch_size,m,split):
n,train_data_num=count(data_dir)
n_train=n-int(split*n)
subject_ID=random.randint(0,n_train-1)
d=load_m(data_dir+'/'+str(train_data_num[subject_ID][0]))
s=d.shape[2]
slice_ID =random.randint(0,s-1)
v=d.shape[3]
# preliminary sizing
vol_shape = d.shape[:2] # extract data shape
ndims = len(vol_shape)
d=d[:,:,slice_ID,:]
d = np.einsum('jki->ijk', d)
# prepare a zero array the size of the deformation
# we'll explain this below
zero_phi = np.zeros([batch_size, *vol_shape, ndims])
while True:
# prepare inputs:
# images need to be of the size [batch_size, H, W, 1]
idx1 = np.random.randint(0, v, size=batch_size)
moving_images = d[idx1, ..., np.newaxis]
moving_images=moving_images/m
idx2 = np.random.randint(0, v, size=batch_size)
fixed_images = d[idx2, ..., np.newaxis]
fixed_images=fixed_images/m
inputs = [moving_images, fixed_images]
# prepare outputs (the 'true' moved image):
# of course, we don't have this, but we know we want to compare
# the resulting moved image with the fixed image.
# we also wish to penalize the deformation field.
outputs = [fixed_images, zero_phi]
yield (inputs, outputs)
```
### prepare data for validation
```
"""
Generator that takes in data of size [N, H, W], and yields data for
our model. Note that we need to provide numpy data for each
input, and each output.
inputs: moving [bs, H, W, 1], fixed image [bs, H, W, 1]
outputs: moved image [bs, H, W, 1], zero-gradient [bs, H, W, 2]
m= maximum intensity between all subject
split= percent of validation data
batch_size= number of data that take from subject.
### first selects a random subject and then random slice and following it finding random volume based on the number of batch_size ###
"""
def val_generator(data_dir, batch_size,m,split):
n,train_data_num=count(data_dir)
n_train=n-int(split*n)
a=n_train
subject_ID=random.randint(a,n-1)
d=load_m(data_dir+'/'+str(train_data_num[subject_ID][0]))
s=d.shape[2]
slice_ID =random.randint(0,s-1)
v=d.shape[3]
# preliminary sizing
vol_shape = d.shape[:2] # extract data shape
ndims = len(vol_shape)
d=d[:,:,slice_ID,:]
d = np.einsum('jki->ijk', d)
# prepare a zero array the size of the deformation
# we'll explain this below
zero_phi = np.zeros([batch_size, *vol_shape, ndims])
# prepare inputs:
# images need to be of the size [batch_size, H, W, 1]
idx1 = np.random.randint(0, v, size=batch_size)
moving_images = d[idx1, ..., np.newaxis]
moving_images=moving_images/m
idx2 = np.random.randint(0, v, size=batch_size)
fixed_images = d[idx2, ..., np.newaxis]
fixed_images=fixed_images/m
inputs = [moving_images, fixed_images]
# prepare outputs (the 'true' moved image):
# of course, we don't have this, but we know we want to compare
# the resulting moved image with the fixed image.
# we also wish to penalize the deformation field.
outputs = [fixed_images,zero_phi]
return (inputs, outputs)
```
### Deformation matrix DOF=4
```
"""
This part creates the deformation matrix based on the:
12mm freedom through the y-direction,
6mm through the x-direction,
and 5 degrees to the left and right direction.
"""
def a(teta):
return (teta*np.pi)/180
def nearest_neighbors(i, j, M, T_inv):
x_max, y_max = M.shape[0] - 1, M.shape[1] - 1
x, y, k = apply_affine(T_inv, np.array([i, j, 1]))
if x<0 or y<0:
x=0
y=0
if x>=x_max+1 or y>=y_max+1:
x=0
y=0
if np.floor(x) == x and np.floor(y) == y:
x, y = int(x), int(y)
return M[x, y]
if np.abs(np.floor(x) - x) < np.abs(np.ceil(x) - x):
x = int(np.floor(x))
else:
x = int(np.ceil(x))
if np.abs(np.floor(y) - y) < np.abs(np.ceil(y) - y):
y = int(np.floor(y))
else:
y = int(np.ceil(y))
if x > x_max:
x = x_max
if y > y_max:
y = y_max
return M[x, y]
def affine_matrix():
t=random.randint(-5, 5)
cos_gamma = np.cos(a(t))
sin_gamma = np.sin(a(t))
x=random.randint(-3, 3)
y=random.randint(-6, 6)
T=np.array([[cos_gamma,-sin_gamma,0,x],
[sin_gamma,cos_gamma,0,y],
[0,0,1,0],
[0,0,0,1]])
return T
```
### Augmentation
```
"""
warps the images based on the deformation matrix
"""
def augsb(ref,volume,affine_matrix):
tdim,xdim,ydim,tdim = ref.shape
img_transformed = np.zeros((xdim, ydim), dtype=np.float64)
for i, row in enumerate(ref[volume,:,:,0]):
for j, col in enumerate(row):
pixel_data = ref[volume,i, j, 0]
input_coords = np.array([i, j, 1])
i_out, j_out,k= apply_affine(affine_matrix, input_coords)
if i_out<0 or j_out<0:
i_out=0
j_out=0
if i_out>=xdim or j_out>=ydim:
i_out=0
j_out=0
img_transformed[int(i_out),int(j_out)] = pixel_data
T_inv = np.linalg.inv(affine_matrix)
img_nn = np.ones((xdim, ydim), dtype=np.float64)
for i, row in enumerate(img_transformed):
for j, col in enumerate(row):
img_nn[i, j] = nearest_neighbors(i, j, ref[volume,:,:,0], T_inv)
return img_nn
```
### Prepare data for augmentation
```
def affine_generator(data_dir,batch_size,m,split):
n,train_data_num=count(data_dir)
n_train=n-int(split*n)
subject_ID=random.randint(0,n_train-1)
d=load_m(data_dir+'/'+str(train_data_num[subject_ID][0]))
s=d.shape[2]
slice_ID =random.randint(0,s-1)
v=d.shape[3]
# preliminary sizing
vol_shape = d.shape[:2] # extract data shape
ndims = len(vol_shape)
d=d[:,:,slice_ID,:]
d = np.einsum('jki->ijk', d)
y=[]
for i in range(batch_size):
y.append(affine_matrix())
y=np.array(y)
# prepare a zero array the size of the deformation
# we'll explain this below
zero_phi = np.zeros([batch_size, *vol_shape, ndims])
# prepare inputs:
# images need to be of the size [batch_size, H, W, 1]
while True:
idx2 = np.random.randint(0, v, size=batch_size)
fixed_images = d[idx2, ..., np.newaxis]
fixed_images=fixed_images/m
moving_images=[]
for i in range(batch_size):
moving_images.append(augsb(fixed_images,i,y[i]))
moving_images=np.array(moving_images)
moving_images=moving_images[... , np.newaxis]
#moving_images=augsb(fixed_images,y)
#idx1 = np.random.randint(0, v, size=batch_size)
#moving_images = d[idx1, ..., np.newaxis]
#moving_images=moving_images/m
inputs = [moving_images, fixed_images]
# prepare outputs (the 'true' moved image):
# of course, we don't have this, but we know we want to compare
# the resulting moved image with the fixed image.
# we also wish to penalize the deformation field.
outputs = [fixed_images,zero_phi]
yield(inputs,outputs)
#y)
```
### Prepare data for validation (based on augmentation data)
```
def label_generator(data_dir,batch_size,m,split):
n,train_data_num=count(data_dir)
n_train=n-int(split*n)
a=n_train
subject_ID=random.randint(a,n-1)
d=load_m(data_dir+'/'+str(train_data_num[subject_ID][0]))
s=d.shape[2]
slice_ID =random.randint(0,s-1)
v=d.shape[3]
# preliminary sizing
vol_shape = d.shape[:2] # extract data shape
ndims = len(vol_shape)
d=d[:,:,slice_ID,:]
d = np.einsum('jki->ijk', d)
y=[]
for i in range(batch_size):
y.append(affine_matrix())
y=np.array(y)
# prepare a zero array the size of the deformation
# we'll explain this below
# prepare inputs:
# images need to be of the size [batch_size, H, W, 1]
while True:
idx2 = np.random.randint(0, v, size=batch_size)
fixed_images = d[idx2, ...]
fixed_images=fixed_images/m
moving_images=[]
for i in range(batch_size):
moving_images.append(augsb(fixed_images,i,y[i]))
moving_images=np.array(moving_images)
#moving_images=moving_images[... ]
c=np.stack([moving_images,fixed_images], axis=2)
inputs = [c]
#inputs=[[moving_images,fixed_images]]
# prepare outputs (the 'true' moved image):
# of course, we don't have this, but we know we want to compare
# the resulting moved image with the fixed image.
# we also wish to penalize the deformation field.
outputs = [y]
yield (inputs, outputs)
```
#### **PREDICTION PHASE**
### Separating and gathering data based on the reference volume
```
"""
Generator that takes in data of size [N, H, W], and yields data for
our model. Note that we need to provide numpy data for each
input, and each output.
inputs: moving [bs, H, W, 1], fixed image [bs, H, W, 1]
outputs: moved image [bs, H, W, 1], zero-gradient [bs, H, W, 2]
m= maximum intensity between all subject
split= percent of validation data
batch_size= number of data that take from subject.
### It works with Slice and volume of data to box the data. ###
"""
def ref(data_dir,m,slice_ID,reference):
d=load_m(data_dir)
#s=d.shape[2]
#slice_ID =random.randint(0,s-1)
v=d.shape[3]
# preliminary sizing
vol_shape = d.shape[:2] # extract data shape
ndims = len(vol_shape)
d=d[:,:,slice_ID,:]
d = np.einsum('jki->ijk', d)
# prepare a zero array the size of the deformation
# we'll explain this below
zero_phi = np.zeros([v, *vol_shape, ndims])
# prepare inputs:
# images need to be of the size [batch_size, H, W, 1]
idx1=[]
for i in range(v):
idx1.append(i)
idx1=np.array(idx1)
moving_images = d[idx1, ..., np.newaxis]
moving_images=moving_images/m
if reference.strip().isdigit():
# print("User input is Number")
reference=int(reference)
idx2=np.ones(v)*reference
idx2=idx2.astype(int)
fixed_images = d[idx2, ..., np.newaxis]
fixed_images=fixed_images/m
else:
# print("User input is string")
img = nib.load(reference)
img_data = img.get_fdata()
if img.shape[0:2]!=(64,64):
img_data = img_data[23:87,23:87,:]
img_data=img_data[np.newaxis,:,:,slice_ID]
idx2=np.zeros(v)
idx2=idx2.astype(int)
fixed_images = img_data[idx2, ..., np.newaxis]
fixed_images=fixed_images/m
inputs = [moving_images, fixed_images]
# prepare outputs (the 'true' moved image):
# of course, we don't have this, but we know we want to compare
# the resulting moved image with the fixed image.
# we also wish to penalize the deformation field.
outputs = [fixed_images,zero_phi]
return (inputs, outputs)
```
### Predict function
```
"""
arg*
input_direction: input Raw data
reference: reference volume
output_direction: output motion corrected data
maximum_intensity: for normalization
loadable_model: loading trained model
"""
def main (input_direction,reference,output_direction,maximum_intensity,loadable_model):
start_time = time.time()
img_data,img=load_with_head(input_direction)
slice_number = img_data.shape[2]
header=img.header
img_mask_affine = img.affine
# configure unet input shape (concatenation of moving and fixed images)
ndim = 2
unet_input_features = 2
# data shape 64*64
s=(64,64)
inshape = (*s, unet_input_features)
# configure unet features
nb_features =[
[64, 64, 64, 64], # encoder features
[64, 64, 64, 64, 64, 32,16] # decoder features
]
# build model using VxmDense
inshape =s
vxm_model = vxm.networks.VxmDense(inshape, nb_features, int_steps=0)
# voxelmorph has a variety of custom loss classes
losses = [vxm.losses.MSE().loss, vxm.losses.Grad('l2').loss]
# usually, we have to balance the two losses by a hyper-parameter
lambda_param = 0.05
loss_weights = [1, lambda_param]
vxm_model.compile(optimizer='Adam', loss=losses, loss_weights=loss_weights, metrics=['accuracy'])
vxm_model.load_weights(loadable_model)
o=np.zeros((img_data.shape[0],img_data.shape[1],img_data.shape[2],img_data.shape[3]))
for i in range(slice_number):
prepare_data=ref(input_direction,maximum_intensity,i,reference)
val_input, _ = prepare_data
val_pred = vxm_model.predict(val_input)
change_order= np.einsum('jki->kij',val_pred[0][:,:,:,0])
o[:, :, i,:] = change_order
img_reg = nib.Nifti1Image(o*maximum_intensity, affine=img_mask_affine, header=header)
nib.save(img_reg,output_direction)
print("--- %s second ---" % (time.time() - start_time))
```
### Calculate SNR parameter
```
def snr (direction):
img = nib.load(direction)
img = img.get_fdata()
mean=[]
for i in range(img.shape[2]):
mean.append(np.mean(img[:,:,i]))
mean=np.array(mean)
deviation=[]
for i in range(img.shape[2]):
deviation.append(np.std(img[:,:,i]))
deviation=np.array(deviation)
return (mean/deviation),mean,deviation
```
### Mean across slice
```
def mean(direction):
img = nib.load(direction)
img = img.get_fdata()
mean=[]
where_are_NaNs = isnan(img)
img[where_are_NaNs] = 0
for i in range(img.shape[2]):
mean.append(np.mean(img[:,:,i]))
mean.append(np.mean(mean))
mean=np.array(mean)
return mean
```
### Mean across a specific region
```
"""
extract mean of segmented part
img[:,:,i][img[:,:,i] != 0
"""
def seg_mean(img):
p=0
for m in range(img.shape[0]):
for n in range(img.shape[1]):
if img[m,n]==0:
p=p+1
s=np.sum(img[:,:])
mean=s/((64*64)-p)
return mean
"""
This function calculate the mean of the ROI mask
note: it remove the non interested area.
"""
def mean_all(direction):
img = nib.load(direction)
img = img.get_fdata()
mean=[]
where_are_NaNs = np.isnan(img)
img[where_are_NaNs] = 0
for i in range(img.shape[2]):
mean.append(seg_mean(img[:,:,i]))
mean=np.mean(mean)
return mean
```
### Shifting images
```
"""
Shifts images across dx and dy
X: source image
"""
def shift_image(X, dx, dy):
X = np.roll(X, dy, axis=0)
X = np.roll(X, dx, axis=1)
if dy>0:
X[:dy, :] = 0
elif dy<0:
X[dy:, :] = 0
if dx>0:
X[:, :dx] = 0
elif dx<0:
X[:, dx:] = 0
return X
```
## DeepRetroMoCO
```
"""
arg*
source_centerline_directory: reference centerline
centerlines_directory: data centerline
main_data_directory: Raw data
center_fix_directory: directory of FC(fix centerline) data------------------ID_c.nifti
final_cplus_directory: final motion corrected data directory----------------ID_cplus.nifti
maximum_intensity: normalization
model: trained model
reference: fix image volume
******
# if reference=0 means reference=first volume
# if reference=-1 means reference=mid volume
# if reference=-2 means reference=mean volume
# if reference>0 means reference=any volume
******
mean_directory: directory of mean image ** if reference is mean**
"""
def cplus(source_centerline_directory,centerlines_directory,main_data_directory,
center_fix_directory,final_cplus_directory,
maximum_intensity,model,reference,mean_directory
):
#############################################
# if reference=0 means reference=first volume
# if reference=-1 means reference=mid volume
# if reference=-2 means reference=mean volume
# if reference>0 means reference=any volume
Xs=[]
Ys=[]
source = pd.read_csv(source_centerline_directory, header=None)
source.columns=['x','y','delete']
source = source[['x','y']]
for s in range(source.shape[0]):
c=source.loc[s]
#xs=int(c['x'])
ys=int(c['y'])
#Xs.append(xs)
Ys.append(ys)
n2,name2=count_endwith(centerlines_directory,'.csv')
dx=[]
dy=[]
for s in range(0,source.shape[0]):
for j in range(n2):
df = pd.read_csv(centerlines_directory+name2[j][0], header=None)
df.columns=['x','y','delete']
df=df[['x','y']]
c=df.loc[s]
#x=int(c['x'])
y=int(c['y'])
#dx.append(Xs[s]-x)
dy.append(Ys[s]-y)
input_direction=main_data_directory
img = nib.load(input_direction)
img_data=img.get_fdata()
img_mask_affine = img.affine
header = img.header
nb_img = header.get_data_shape()
o=np.zeros((nb_img[0],nb_img[1],nb_img[2],nb_img[3]))
DX=np.zeros(len(dy))
start=0
for s in range(0,source.shape[0]):
for v in range(n2):
a= shift_image(img_data[:,:,s,v],dy[v+start],DX[v+start])
o[:,:,s, v] = a
start=start + n2
input_direction=center_fix_directory
img_reg = nib.Nifti1Image(o, affine=img_mask_affine, header=header)
nib.save(img_reg,input_direction)
if reference>0:
reference=str(reference)
if reference==0:
reference='0'
if reference==-1:
y=int(n2/2)
reference=str(y)
if reference==-2:
reference=mean_directory
main(input_direction,reference,final_cplus_directory,maximum_intensity,model)
```
### Tool for finding data
```
"""
arg*
data_dir: input directory
prefix: finding data by this prefix
"""
def count_startwith (data_dir,prefix):
train_dir = os.path.join(data_dir)
train_data_num = []
for file in os.listdir(train_dir):
if file.startswith(prefix):
train_data_num.append([file])
train_data_num=np.array(train_data_num)
n=train_data_num.shape[0]
return n,sorted(train_data_num)
def count_endwith (data_dir,prefix):
train_dir = os.path.join(data_dir)
train_data_num = []
for file in os.listdir(train_dir):
if file.endswith(prefix):
train_data_num.append([file])
train_data_num=np.array(train_data_num)
n=train_data_num.shape[0]
return n,sorted(train_data_num)
```
### Movement plots for one slice
```
"""
arg*
input_direction: "
reference: "
maximum_intensity: "
loadable_model: "
slice_num: Specific slice number to draw
mean_directory: "
title: title of figure
"""
def flow_one_slice(input_direction,reference,maximum_intensity,loadable_model,slice_num,mean_directory,title):
img_data,img=load_with_head(input_direction)
slice_number = img_data.shape[2]
header=img.header
img_mask_affine = img.affine
# configure unet input shape (concatenation of moving and fixed images)
ndim = 2
unet_input_features = 2
# data shape 64*64
s=(64,64)
inshape = (*s, unet_input_features)
# configure unet features
nb_features =[
[64, 64, 64, 64], # encoder features
[64, 64, 64, 64, 64, 32,16] # decoder features
]
# build model using VxmDense
inshape =s
vxm_model = vxm.networks.VxmDense(inshape, nb_features, int_steps=0)
# voxelmorph has a variety of custom loss classes
losses = [vxm.losses.MSE().loss, vxm.losses.Grad('l2').loss]
# usually, we have to balance the two losses by a hyper-parameter
lambda_param = 0.05
loss_weights = [1, lambda_param]
vxm_model.compile(optimizer='Adam', loss=losses, loss_weights=loss_weights, metrics=['accuracy'])
vxm_model.load_weights(loadable_model)
if reference>0:
reference=str(reference)
if reference==0:
reference='0'
if reference==-1:
y=int(img_data.shape[3]/2)
reference=str(y)
if reference==-2:
reference=mean_directory
#for i in range(slice_number):
#slice_number=5
prepare_data=ref(input_direction,maximum_intensity,slice_num,reference)
val_input, _ = prepare_data
val_pred = vxm_model.predict(val_input)
#val_pred=flow(input_direction,reference,maximum_intensity,loadable_model,slice_num)
x=[]
y=[]
for i in range(val_pred[1][:,0,0,0].shape[0]):
x.append(np.mean(val_pred[1][i,...,0]))
y.append(np.mean(val_pred[1][i,...,1]))
x=np.array(x)
y=np.array(y)
volume=range(val_pred[1][:,0,0,0].shape[0])
plt.figure(figsize=(20,5))
plt.plot(volume,x,label = "x")
plt.plot(volume,y,label = "y")
# naming the x axis
plt.xlabel('volumes')
# naming the y axis
plt.ylabel('movement')
# giving a title to my graph
plt.title(title)
# show a legend on the plot
plt.legend()
```
### Movement plot for all slice in one plot
```
"""
arg*
input_direction: "
reference: "
maximum_intensity: "
loadable_model: "
slice_num: Specific slice number to draw
mean_directory: "
title: title of figure
"""
def flow_all_slice(input_direction,reference,maximum_intensity,loadable_model,mean_directory,title):
img_data,img=load_with_head(input_direction)
slice_number = img_data.shape[2]
header=img.header
img_mask_affine = img.affine
# configure unet input shape (concatenation of moving and fixed images)
ndim = 2
unet_input_features = 2
# data shape 64*64
s=(64,64)
inshape = (*s, unet_input_features)
# configure unet features
nb_features =[
[64, 64, 64, 64], # encoder features
[64, 64, 64, 64, 64, 32,16] # decoder features
]
# build model using VxmDense
inshape =s
vxm_model = vxm.networks.VxmDense(inshape, nb_features, int_steps=0)
# voxelmorph has a variety of custom loss classes
losses = [vxm.losses.MSE().loss, vxm.losses.Grad('l2').loss]
# usually, we have to balance the two losses by a hyper-parameter
lambda_param = 0.05
loss_weights = [1, lambda_param]
vxm_model.compile(optimizer='Adam', loss=losses, loss_weights=loss_weights, metrics=['accuracy'])
vxm_model.load_weights(loadable_model)
if reference>0:
reference=str(reference)
if reference==0:
reference='0'
if reference==-1:
y=int(img_data.shape[3]/2)
reference=str(y)
if reference==-2:
reference=mean_directory
x_all_slice=[]
y_all_slice=[]
for i in range(slice_number):
prepare_data=ref(input_direction,maximum_intensity,i,reference)
val_input, _ = prepare_data
val_pred = vxm_model.predict(val_input)
#val_pred=flow(input_direction,reference,maximum_intensity,loadable_model,slice_num)
x=[]
y=[]
for i in range(val_pred[1][:,0,0,0].shape[0]):
x.append(np.mean(val_pred[1][i,...,0]))
y.append(np.mean(val_pred[1][i,...,1]))
x_all_slice.append(x)
y_all_slice.append(y)
x_all_slice=np.array(x_all_slice)
y_all_slice=np.array(y_all_slice)
mean_x=x_all_slice.mean(axis=0)
mean_y=y_all_slice.mean(axis=0)
### for delete the eror for reference to reference
mean_x[int(reference)]=0
mean_y[int(reference)]=0
overal=(mean_x+mean_y)/2
volume=range(val_pred[1][:,0,0,0].shape[0])
plt.figure(figsize=(20,5))
plt.plot(volume,overal,label = "x")
#plt.plot(volume,mean_y,label = "y")
# naming the x axis
plt.xlabel('volumes')
# naming the y axis
plt.ylabel('movement')
# giving a title to my graph
plt.title(title)
# show a legend on the plot
plt.legend()
```
### Showing Flow field map
```
def flow_between_two(input_direction0,input_direction1,reference,
maximum_intensity,loadable_model,mean_directory,
title,label1,label2):
img_data,img=load_with_head(input_direction0)
slice_number = img_data.shape[2]
header=img.header
img_mask_affine = img.affine
# configure unet input shape (concatenation of moving and fixed images)
ndim = 2
unet_input_features = 2
# data shape 64*64
s=(64,64)
inshape = (*s, unet_input_features)
# configure unet features
nb_features =[
[64, 64, 64, 64], # encoder features
[64, 64, 64, 64, 64, 32,16] # decoder features
]
# build model using VxmDense
inshape =s
vxm_model = vxm.networks.VxmDense(inshape, nb_features, int_steps=0)
# voxelmorph has a variety of custom loss classes
losses = [vxm.losses.MSE().loss, vxm.losses.Grad('l2').loss]
# usually, we have to balance the two losses by a hyper-parameter
lambda_param = 0.05
loss_weights = [1, lambda_param]
vxm_model.compile(optimizer='Adam', loss=losses, loss_weights=loss_weights, metrics=['accuracy'])
vxm_model.load_weights(loadable_model)
if reference>0:
reference=str(reference)
if reference==0:
reference='0'
if reference==-1:
y=int(img_data.shape[3]/2)
reference=str(y)
if reference==-2:
reference=mean_directory
x_all_slice=[]
y_all_slice=[]
for i in range(slice_number):
prepare_data=ref(input_direction0,maximum_intensity,i,reference)
val_input, _ = prepare_data
val_pred = vxm_model.predict(val_input)
#val_pred=flow(input_direction,reference,maximum_intensity,loadable_model,slice_num)
x=[]
y=[]
for i in range(val_pred[1][:,0,0,0].shape[0]):
x.append(np.mean(val_pred[1][i,...,0]))
y.append(np.mean(val_pred[1][i,...,1]))
x_all_slice.append(x)
y_all_slice.append(y)
x_all_slice=np.array(x_all_slice)
y_all_slice=np.array(y_all_slice)
mean_x=x_all_slice.mean(axis=0)
mean_y=y_all_slice.mean(axis=0)
### for delete the eror for reference to reference
mean_x[int(reference)]=0
mean_y[int(reference)]=0
overal=(mean_x+mean_y)/2
x_all_slice=[]
y_all_slice=[]
for i in range(slice_number):
prepare_data=ref(input_direction1,maximum_intensity,i,reference)
val_input, _ = prepare_data
val_pred = vxm_model.predict(val_input)
#val_pred=flow(input_direction,reference,maximum_intensity,loadable_model,slice_num)
x=[]
y=[]
for i in range(val_pred[1][:,0,0,0].shape[0]):
x.append(np.mean(val_pred[1][i,...,0]))
y.append(np.mean(val_pred[1][i,...,1]))
x_all_slice.append(x)
y_all_slice.append(y)
x_all_slice=np.array(x_all_slice)
y_all_slice=np.array(y_all_slice)
mean_x=x_all_slice.mean(axis=0)
mean_y=y_all_slice.mean(axis=0)
### for delete the eror for reference to reference
mean_x[int(reference)]=0
mean_y[int(reference)]=0
overal1=(mean_x+mean_y)/2
volume=range(val_pred[1][:,0,0,0].shape[0])
plt.figure(figsize=(25,10))
plt.plot(volume,overal,label = label1)
plt.plot(volume,overal1,label = label2)
# naming the x axis
plt.xlabel('volumes',fontsize=18)
# naming the y axis
plt.ylabel('movement',fontsize=18)
# giving a title to my graph
plt.title(title,fontsize=20)
# show a legend on the plot
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
# show a legend on the plot
plt.legend()
plt.grid()
plt.legend(fontsize=15)
```
| github_jupyter |
```
'''
A Convolutional Network implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, n_classes]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
y: mnist.test.labels[:256],
keep_prob: 1.})
```
| github_jupyter |
## Dependencies
```
import json, warnings, shutil
from jigsaw_utility_scripts import *
from transformers import TFXLMRobertaModel, XLMRobertaConfig
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses, layers
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
```
## TPU configuration
```
strategy = set_up_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
AUTO = tf.data.experimental.AUTOTUNE
```
# Load data
```
database_base_path = '/kaggle/input/jigsaw-dataset-split-toxic-roberta-base-192/'
k_fold = pd.read_csv(database_base_path + '5-fold.csv')
display(k_fold.head())
# Unzip files
!tar -xvf /kaggle/input/jigsaw-dataset-split-toxic-roberta-base-192/fold_1.tar.gz
!tar -xvf /kaggle/input/jigsaw-dataset-split-toxic-roberta-base-192/fold_2.tar.gz
!tar -xvf /kaggle/input/jigsaw-dataset-split-toxic-roberta-base-192/fold_3.tar.gz
# !tar -xvf /kaggle/input/jigsaw-dataset-split-toxic-roberta-base-192/fold_4.tar.gz
# !tar -xvf /kaggle/input/jigsaw-dataset-split-toxic-roberta-base-192/fold_5.tar.gz
```
# Model parameters
```
base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/'
config = {
"MAX_LEN": 192,
"BATCH_SIZE": 16 * strategy.num_replicas_in_sync,
"EPOCHS": 3,
"LEARNING_RATE": 1e-5,
"ES_PATIENCE": 1,
"N_FOLDS": 3,
"base_model_path": base_path + 'tf-xlm-roberta-base-tf_model.h5',
"config_path": base_path + 'xlm-roberta-base-config.json'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
```
# Model
```
module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config)
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
last_state = sequence_output[0]
cls_token = last_state[:, 0, :]
output = layers.Dense(1, activation='sigmoid', name='output')(cls_token)
model = Model(inputs=[input_ids, attention_mask], outputs=output)
model.compile(optimizers.Adam(lr=config['LEARNING_RATE']),
loss=losses.BinaryCrossentropy(),
metrics=[metrics.BinaryAccuracy(), metrics.AUC()])
return model
```
# Train
```
history_list = []
for n_fold in range(config['N_FOLDS']):
print('\nFOLD: %d' % (n_fold+1))
# Load data
base_data_path = 'fold_%d/' % (n_fold+1)
x_train = np.load(base_data_path + 'x_train.npy')
y_train = np.load(base_data_path + 'y_train.npy')
x_valid = np.load(base_data_path + 'x_valid.npy')
y_valid = np.load(base_data_path + 'y_valid.npy')
step_size = x_train.shape[0] // config['BATCH_SIZE']
### Delete data dir
shutil.rmtree(base_data_path)
# Train model
model_path = 'model_fold_%d.h5' % (n_fold+1)
es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'],
restore_best_weights=True, verbose=1)
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min',
save_best_only=True, save_weights_only=True, verbose=1)
with strategy.scope():
model = model_fn(config['MAX_LEN'])
history = model.fit(list(x_train), y_train,
validation_data=(list(x_valid), y_valid),
callbacks=[checkpoint, es],
epochs=config['EPOCHS'],
batch_size=config['BATCH_SIZE'],
steps_per_epoch=step_size,
verbose=2).history
history_list.append(history)
# Make predictions
train_preds = model.predict(list(x_train))
valid_preds = model.predict(list(x_valid))
k_fold.loc[k_fold['fold_%d' % (n_fold+1)] == 'train', 'pred_%d' % (n_fold+1)] = np.round(train_preds)
k_fold.loc[k_fold['fold_%d' % (n_fold+1)] == 'validation', 'pred_%d' % (n_fold+1)] = np.round(valid_preds)
```
## Model loss graph
```
sns.set(style="whitegrid")
for n_fold in range(config['N_FOLDS']):
print('Fold: %d' % (n_fold+1))
plot_metrics(history_list[n_fold])
```
# Model evaluation
```
display(evaluate_model(k_fold, config['N_FOLDS']).style.applymap(color_map))
```
# Confusion matrix
```
for n_fold in range(config['N_FOLDS']):
print('Fold: %d' % (n_fold+1))
train_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'train']
validation_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'validation']
plot_confusion_matrix(train_set['toxic'], train_set['pred_%d' % (n_fold+1)],
validation_set['toxic'], validation_set['pred_%d' % (n_fold+1)])
```
# Visualize predictions
```
display(k_fold[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(15))
```
| github_jupyter |
## CIFAR 10
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
```
You can get the data via:
wget http://pjreddie.com/media/files/cifar.tgz
```
from fastai.conv_learner import *
from pathlib import Path
if os.name == 'nt':
PATH = str(Path.home()) + "\\data\\cifar10\\"
else:
PATH = "data/cifar10/"
os.makedirs(PATH, exist_ok=True)
def moveFilesToSubDirsFromFileName(path):
files = os.listdir(path)
for f in files:
if os.path.isdir(os.path.join(path, f)):
continue
filename, file_extension = os.path.splitext(f)
regex = re.compile('[^a-zA-Z]')
file_label = regex.sub('', filename)
target_folder = os.path.join(path, file_label)
if not os.path.exists(target_folder): os.makedirs(target_folder)
os.rename(os.path.join(path, f), os.path.join(target_folder, f))
moveFilesToSubDirsFromFileName(os.path.join(PATH, "train"))
moveFilesToSubDirsFromFileName(os.path.join(PATH, "test"))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))
def get_data(sz,bs):
tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomFlip()], pad=sz//8)
return ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)
bs=256
```
### Look at data
```
data = get_data(32, 4)
x,y=next(iter(data.trn_dl))
plt.imshow(data.trn_ds.denorm(x)[0]);
plt.imshow(data.trn_ds.denorm(x)[1]);
```
## Fully connected model
```
data = get_data(32,bs)
lr=1e-2
```
From [this notebook](https://github.com/KeremTurgutlu/deeplearning/blob/master/Exploring%20Optimizers.ipynb) by our student Kerem Turgutlu:
```
class SimpleNet(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = nn.ModuleList([
nn.Linear(layers[i], layers[i + 1]) for i in range(len(layers) - 1)])
def forward(self, x):
x = x.view(x.size(0), -1)
for l in self.layers:
l_x = l(x)
x = F.relu(l_x)
return F.log_softmax(l_x, dim=-1)
learn = ConvLearner.from_model_data(SimpleNet([32*32*3, 40,10]), data)
learn, [o.numel() for o in learn.model.parameters()]
learn.summary()
%time learn.lr_find()
learn.sched.plot()
%time learn.fit(lr, 2)
%time learn.fit(lr, 2, cycle_len=1)
```
## CNN
```
class ConvNet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.layers = nn.ModuleList([
nn.Conv2d(layers[i], layers[i + 1], kernel_size=3, stride=2)
for i in range(len(layers) - 1)])
self.pool = nn.AdaptiveMaxPool2d(1)
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
for l in self.layers: x = F.relu(l(x))
x = self.pool(x)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(ConvNet([3, 20, 40, 80], 10), data)
learn.summary()
learn.lr_find(end_lr=100)
learn.sched.plot()
%time learn.fit(1e-1, 2)
%time learn.fit(1e-1, 4, cycle_len=1)
```
## Refactored
```
class ConvLayer(nn.Module):
def __init__(self, ni, nf):
super().__init__()
self.conv = nn.Conv2d(ni, nf, kernel_size=3, stride=2, padding=1)
def forward(self, x): return F.relu(self.conv(x))
class ConvNet2(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.layers = nn.ModuleList([ConvLayer(layers[i], layers[i + 1])
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
for l in self.layers: x = l(x)
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(ConvNet2([3, 20, 40, 80], 10), data)
learn.summary()
%time learn.fit(1e-1, 2)
%time learn.fit(1e-1, 2, cycle_len=1)
```
## BatchNorm
```
class BnLayer(nn.Module):
def __init__(self, ni, nf, stride=2, kernel_size=3):
super().__init__()
self.conv = nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride,
bias=False, padding=1)
self.a = nn.Parameter(torch.zeros(nf,1,1))
self.m = nn.Parameter(torch.ones(nf,1,1))
def forward(self, x):
x = F.relu(self.conv(x))
x_chan = x.transpose(0,1).contiguous().view(x.size(1), -1)
if self.training:
self.means = x_chan.mean(1)[:,None,None]
self.stds = x_chan.std (1)[:,None,None]
return (x-self.means) / self.stds *self.m + self.a
class ConvBnNet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i + 1])
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for l in self.layers: x = l(x)
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(ConvBnNet([10, 20, 40, 80, 160], 10), data)
learn.summary()
%time learn.fit(3e-2, 2)
%time learn.fit(1e-1, 4, cycle_len=1)
```
## Deep BatchNorm
```
class ConvBnNet2(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([BnLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for l,l2 in zip(self.layers, self.layers2):
x = l(x)
x = l2(x)
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(ConvBnNet2([10, 20, 40, 80, 160], 10), data)
%time learn.fit(1e-2, 2)
%time learn.fit(1e-2, 2, cycle_len=1)
```
## Resnet
```
class ResnetLayer(BnLayer):
def forward(self, x): return x + super().forward(x)
class Resnet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for l,l2,l3 in zip(self.layers, self.layers2, self.layers3):
x = l3(l2(l(x)))
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(Resnet([10, 20, 40, 80, 160], 10), data)
wd=1e-5
%time learn.fit(1e-2, 2, wds=wd)
%time learn.fit(1e-2, 3, cycle_len=1, cycle_mult=2, wds=wd)
%time learn.fit(1e-2, 8, cycle_len=4, wds=wd)
```
## Resnet 2
```
class Resnet2(nn.Module):
def __init__(self, layers, c, p=0.5):
super().__init__()
self.conv1 = BnLayer(3, 16, stride=1, kernel_size=7)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
self.drop = nn.Dropout(p)
def forward(self, x):
x = self.conv1(x)
for l,l2,l3 in zip(self.layers, self.layers2, self.layers3):
x = l3(l2(l(x)))
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
x = self.drop(x)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(Resnet2([16, 32, 64, 128, 256], 10, 0.2), data)
wd=1e-6
%time learn.fit(1e-2, 2, wds=wd)
%time learn.fit(1e-2, 3, cycle_len=1, cycle_mult=2, wds=wd)
%time learn.fit(1e-2, 8, cycle_len=4, wds=wd)
learn.save('tmp3')
log_preds,y = learn.TTA()
preds = np.mean(np.exp(log_preds),0)
metrics.log_loss(y,preds), accuracy(preds,y)
```
### End
| github_jupyter |
# Lesson 1
In the screencast for this lesson I go through a few scenarios for time series. This notebook contains the code for that with a few little extras! :)
# Setup
```
# !pip install -U tf-nightly-2.0-preview
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
```
# Trend and Seasonality
```
def trend(time, slope=0):
return slope * time
```
Let's create a time series that just trends upward:
```
time = np.arange(4 * 365 + 1)
baseline = 10
series = trend(time, 0.1)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
Now let's generate a time series with a seasonal pattern:
```
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
baseline = 10
amplitude = 40
series = seasonality(time, period=365, amplitude=amplitude)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
Now let's create a time series with both trend and seasonality:
```
slope = 0.05
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
# Noise
In practice few real-life time series have such a smooth signal. They usually have some noise, and the signal-to-noise ratio can sometimes be very low. Let's generate some white noise:
```
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
plt.figure(figsize=(10, 6))
plot_series(time, noise)
plt.show()
```
Now let's add this white noise to the time series:
```
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
All right, this looks realistic enough for now. Let's try to forecast it. We will split it into two periods: the training period and the validation period (in many cases, you would also want to have a test period). The split will be at time step 1000.
```
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
φ1 = 0.5
φ2 = -0.1
ar = rnd.randn(len(time) + 50)
ar[:50] = 100
for step in range(50, len(time) + 50):
ar[step] += φ1 * ar[step - 50]
ar[step] += φ2 * ar[step - 33]
return ar[50:] * amplitude
series = autocorrelation(time, 10, seed=42)
plot_series(time[:200], series[:200])
plt.show()
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
φ = 0.8
ar = rnd.randn(len(time) + 1)
for step in range(1, len(time) + 1):
ar[step] += φ * ar[step - 1]
return ar[1:] * amplitude
series = autocorrelation(time, 10, seed=42)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + trend(time, 2)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2)
series2 = autocorrelation(time, 5, seed=42) + seasonality(time, period=50, amplitude=2) + trend(time, -1) + 550
series[200:] = series2[200:]
#series += noise(time, 30)
plot_series(time[:300], series[:300])
plt.show()
def impulses(time, num_impulses, amplitude=1, seed=None):
rnd = np.random.RandomState(seed)
impulse_indices = rnd.randint(len(time), size=10)
series = np.zeros(len(time))
for index in impulse_indices:
series[index] += rnd.rand() * amplitude
return series
series = impulses(time, 10, seed=42)
plot_series(time, series)
plt.show()
def autocorrelation(source, φs):
ar = source.copy()
max_lag = len(φs)
for step, value in enumerate(source):
for lag, φ in φs.items():
if step - lag > 0:
ar[step] += φ * ar[step - lag]
return ar
signal = impulses(time, 10, seed=42)
series = autocorrelation(signal, {1: 0.99})
plot_series(time, series)
plt.plot(time, signal, "k-")
plt.show()
signal = impulses(time, 10, seed=42)
series = autocorrelation(signal, {1: 0.70, 50: 0.2})
plot_series(time, series)
plt.plot(time, signal, "k-")
plt.show()
series_diff1 = series[1:] - series[:-1]
plot_series(time[1:], series_diff1)
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(series, order=(5, 1, 0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
df = pd.read_csv("sunspots.csv", parse_dates=["Date"], index_col="Date")
series = df["Monthly Mean Total Sunspot Number"].asfreq("1M")
series.head()
series.plot(figsize=(12, 5))
series["1995-01-01":].plot()
series.diff(1).plot()
plt.axis([0, 100, -50, 50])
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
autocorrelation_plot(series.diff(1)[1:])
autocorrelation_plot(series.diff(1)[1:].diff(11 * 12)[11*12+1:])
plt.axis([0, 500, -0.1, 0.1])
autocorrelation_plot(series.diff(1)[1:])
plt.axis([0, 50, -0.1, 0.1])
116.7 - 104.3
[series.autocorr(lag) for lag in range(1, 50)]
pd.read_csv(filepath_or_buffer, sep=',', delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None)
Read a comma-separated values (csv) file into DataFrame.
from pandas.plotting import autocorrelation_plot
series_diff = series
for lag in range(50):
series_diff = series_diff[1:] - series_diff[:-1]
autocorrelation_plot(series_diff)
import pandas as pd
series_diff1 = pd.Series(series[1:] - series[:-1])
autocorrs = [series_diff1.autocorr(lag) for lag in range(1, 60)]
plt.plot(autocorrs)
plt.show()
```
| github_jupyter |
# `rlplay`-ing around with Policy Gradients
```
import torch
import numpy
import matplotlib.pyplot as plt
%matplotlib inline
import gym
# hotfix for gym's unresponsive viz (spawns gl threads!)
import rlplay.utils.integration.gym
```
See example.ipynb for the overview of `rlplay`
<br>
## Sophisticated CartPole with PG
### The environment
The environment factory
```
from rlplay.zoo.env import NarrowPath
class FP32Observation(gym.ObservationWrapper):
def observation(self, observation):
obs = observation.astype(numpy.float32)
obs[0] = 0. # mask the position info
return obs
# def step(self, action):
# obs, reward, done, info = super().step(action)
# reward -= abs(obs[1]) / 10 # punish for non-zero speed
# return obs, reward, done, info
class OneHotObservation(gym.ObservationWrapper):
def observation(self, observation):
return numpy.eye(1, self.env.observation_space.n,
k=observation, dtype=numpy.float32)[0]
def base_factory(seed=None):
# return gym.make("LunarLander-v2")
return FP32Observation(gym.make("CartPole-v0").unwrapped)
# return OneHotObservation(NarrowPath())
```
<br>
### the Actor
A procedure and a layer, which converts the input integer data into its
little-endian binary representation as float $\{0, 1\}^m$ vectors.
```
def onehotbits(input, n_bits=63, dtype=torch.float):
"""Encode integers to fixed-width binary floating point vectors"""
assert not input.dtype.is_floating_point
assert 0 < n_bits < 64 # torch.int64 is signed, so 64-1 bits max
# n_bits = {torch.int64: 63, torch.int32: 31, torch.int16: 15, torch.int8 : 7}
# get mask of set bits
pow2 = torch.tensor([1 << j for j in range(n_bits)]).to(input.device)
x = input.unsqueeze(-1).bitwise_and(pow2).to(bool)
# upcast bool to float to get one-hot
return x.to(dtype)
class OneHotBits(torch.nn.Module):
def __init__(self, n_bits=63, dtype=torch.float):
assert 1 <= n_bits < 64
super().__init__()
self.n_bits, self.dtype = n_bits, dtype
def forward(self, input):
return onehotbits(input, n_bits=self.n_bits, dtype=self.dtype)
```
A special module dictionary, which applies itself to the input dict of tensors
```
from typing import Optional, Mapping
from torch.nn import Module, ModuleDict as BaseModuleDict
class ModuleDict(BaseModuleDict):
"""The ModuleDict, that applies itself to the input dicts."""
def __init__(
self,
modules: Optional[Mapping[str, Module]] = None,
dim: Optional[int]=-1
) -> None:
super().__init__(modules)
self.dim = dim
def forward(self, input):
# enforce concatenation in the order of the declaration in __init__
return torch.cat([
m(input[k]) for k, m in self.items()
], dim=self.dim)
```
An $\ell_2$ normalization layer.
```
from torch.nn.functional import normalize
class Normalize(torch.nn.Module):
def __init__(self, dim=-1):
super().__init__()
self.dim = dim
def forward(self, input):
return normalize(input, dim=self.dim)
```
A more sophisticated policy learner
```
from rlplay.engine import BaseActorModule
from rlplay.utils.common import multinomial
from torch.nn import Sequential, Linear, ReLU, LogSoftmax
class CartPoleActor(BaseActorModule):
def __init__(self, lstm='none'):
assert lstm in ('none', 'loop', 'cudnn')
super().__init__()
self.use_lstm = self.use_cudnn = False
# blend the policy with a uniform distribution, determined by
# the exploration epsilon. We update it in the actor clones via a buffer
# self.register_buffer('epsilon', torch.tensor(epsilon))
# XXX isn't the stochastic policy random enough by itself?
self.baseline = Sequential(
Linear(4, 20),
ReLU(),
Linear(20, 1),
)
self.policy = Sequential(
Linear(4, 20),
ReLU(),
Linear(20, 2),
LogSoftmax(dim=-1),
)
def forward(self, obs, act, rew, fin, *, hx=None, stepno=None, virtual=False):
# value must not have any trailing dims, i.e. T x B
logits = self.policy(obs)
value = self.baseline(obs).squeeze(-1)
if not self.training:
actions = logits.argmax(dim=-1)
else:
actions = multinomial(logits.detach().exp())
return actions, (), dict(value=value, logits=logits)
```
<br>
### PPO/GAE A2C and V-trace A2C algos
Service functions for the algorithms
```
from plyr import apply, suply, xgetitem
def timeshift(state, *, shift=1):
"""Get current and shifted slices of nested objects."""
# use `xgetitem` to let None through
# XXX `curr[t]` = (x_t, a_{t-1}, r_t, d_t), t=0..T-H
curr = suply(xgetitem, state, index=slice(None, -shift))
# XXX `next[t]` = (x_{t+H}, a_{t+H-1}, r_{t+H}, d_{t+H}), t=0..T-H
next = suply(xgetitem, state, index=slice(shift, None))
return curr, next
```
The Advantage Actor-Critic algo
```
import torch.nn.functional as F
from rlplay.algo.returns import pyt_gae, pyt_returns, pyt_multistep
# @torch.enable_grad()
def a2c(
fragment, module, *, gamma=0.99, gae=1., ppo=0.,
C_entropy=1e-2, C_value=0.5, c_rho=1.0, multistep=0,
):
r"""The Advantage Actor-Critic algorithm (importance-weighted off-policy).
Close to REINFORCE, but uses separate baseline value estimate to compute
advantages in the policy gradient:
$$
\nabla_\theta J(s_t)
= \mathbb{E}_{a \sim \beta(a\mid s_t)}
\frac{\pi(a\mid s_t)}{\beta(a\mid s_t)}
\bigl( r_{t+1} + \gamma G_{t+1} - v(s_t) \bigr)
\nabla_\theta \log \pi(a\mid s_t)
\,, $$
where the critic estimates the state's value under the current policy
$$
v(s_t)
\approx \mathbb{E}_{\pi_{\geq t}}
G_t(a_t, s_{t+1}, a_{t+1}, ... \mid s_t)
\,. $$
"""
state, state_next = timeshift(fragment.state)
# REACT: (state[t], h_t) \to (\hat{a}_t, h_{t+1}, \hat{A}_t)
_, _, info = module(
state.obs, state.act, state.rew, state.fin,
hx=fragment.hx, stepno=state.stepno)
# info['value'] = V(`.state[t]`)
# <<-->> v(x_t)
# \approx \mathbb{E}( G_t \mid x_t)
# \approx \mathbb{E}( r_{t+1} + \gamma r_{t+2} + ... \mid x_t)
# <<-->> npv(`.state[t+1:]`)
# info['logits'] = \log \pi(... | .state[t] )
# <<-->> \log \pi( \cdot \mid x_t)
# `.actor[t]` is actor's extra info in reaction to `.state[t]`, t=0..T
bootstrap = fragment.actor['value'][-1]
# `bootstrap` <<-->> `.value[-1]` = V(`.state[-1]`)
# XXX post-mul by `1 - \gamma` fails to train, but seems appropriate
# for the continuation/survival interpretation of the discount factor.
# <<-- but who says this is a good interpretation?
# ret.mul_(1 - gamma)
# \pi is the target policy, \mu is the behaviour policy
log_pi, log_mu = info['logits'], fragment.actor['logits']
# Future rewards after `.state[t]` are recorded in `.state[t+1:]`
# G_t <<-->> ret[t] = rew[t] + gamma * (1 - fin[t]) * (ret[t+1] or bootstrap)
if multistep > 0:
ret = pyt_multistep(state_next.rew, state_next.fin,
info['value'].detach(),
gamma=gamma, n_lookahead=multistep,
bootstrap=bootstrap.unsqueeze(0))
else:
ret = pyt_returns(state_next.rew, state_next.fin,
gamma=gamma, bootstrap=bootstrap)
# the critic's mse score (min)
# \frac1{2 T} \sum_t (G_t - v(s_t))^2
value = info['value']
critic_mse = F.mse_loss(value, ret, reduction='mean') / 2
# v(x_t) \approx \mathbb{E}( G_t \mid x_t )
# \approx G_t (one-point estimate)
# <<-->> ret[t]
# compute the advantages $G_t - v(s_t)$
# or GAE [Schulman et al. (2016)](http://arxiv.org/abs/1506.02438)
# XXX sec 6.1 in the GAE paper uses V from the `current` value
# network, not the one used during the rollout.
# value = fragment.actor['value'][:-1]
if gae < 1.:
# the positional arguments are $r_{t+1}$, $d_{t+1}$, and $v(s_t)$,
# respectively, for $t=0..T-1$. The bootstrap is $v(S_T)$ from
# the rollout.
adv = pyt_gae(state_next.rew, state_next.fin, value.detach(),
gamma=gamma, C=gae, bootstrap=bootstrap)
else:
adv = ret.sub(value.detach())
# adv.sub_(adv.mean())
# adv.div_(adv.std(dim=0))
# Assume `.act` is unstructured: `act[t]` = a_{t+1} -->> T x B x 1
act = state_next.act.unsqueeze(-1) # actions taken during the rollout
# the importance weights
log_pi_a = log_pi.gather(-1, act).squeeze(-1)
log_mu_a = log_mu.gather(-1, act).squeeze(-1)
# the policy surrogate score (max)
if ppo > 0:
# the PPO loss is the properly clipped rho times the advantage
ratio = log_pi_a.sub(log_mu_a).exp()
a2c_score = torch.min(
ratio * adv,
ratio.clamp(1. - ppo, 1. + ppo) * adv
).mean()
else:
# \exp{- ( \log \mu - \log \pi )}, evaluated at $a_t \mid z_t$
rho = log_mu_a.sub_(log_pi_a.detach()).neg_()\
.exp_().clamp_(max=c_rho)
# \frac1T \sum_t \rho_t (G_t - v_t) \log \pi(a_t \mid z_t)
a2c_score = log_pi_a.mul(adv.mul_(rho)).mean()
# the policy's neg-entropy score (min)
# - H(\pi(•\mid s)) = - (-1) \sum_a \pi(a\mid s) \log \pi(a\mid s)
f_min = torch.finfo(log_pi.dtype).min
negentropy = log_pi.exp().mul(log_pi.clamp(min=f_min)).sum(dim=-1).mean()
# breakpoint()
# maximize the entropy and the reinforce score, minimize the critic loss
objective = C_entropy * negentropy + C_value * critic_mse - a2c_score
return objective.mean(), dict(
entropy=-float(negentropy),
policy_score=float(a2c_score),
value_loss=float(critic_mse),
)
```
A couple of three things:
* a2c is on-policy and no importance weight could change this!
* L72-80: [stable_baselines3](./common/on_policy_algorithm.py#L183-192)
and [rlpyt](./algos/pg/base.py#L49-58) use rollout data, when computing the GAE
* L61-62: [stable_baselines3](./stable_baselines3/a2c/a2c.py#L147-156) uses `vf_coef=0.5`,
and **unhalved** `F.mse-loss`, while [rlpyt](./rlpyt/rlpyt/algos/pg/a2c.py#L93-94)
uses `value_loss_coeff=0.5`, and **halved** $\ell_2$ loss!
The off-policy actor-critic algorithm for the learner, called V-trace,
from [Espeholt et al. (2018)](http://proceedings.mlr.press/v80/espeholt18a.html).
```
from rlplay.algo.returns import pyt_vtrace
# @torch.enable_grad()
def vtrace(fragment, module, *, gamma=0.99, C_entropy=1e-2, C_value=0.5):
# REACT: (state[t], h_t) \to (\hat{a}_t, h_{t+1}, \hat{A}_t)
_, _, info = module(
fragment.state.obs, fragment.state.act,
fragment.state.rew, fragment.state.fin,
hx=fragment.hx, stepno=fragment.state.stepno)
# Assume `.act` is unstructured: `act[t]` = a_{t+1} -->> T x B x 1
state, state_next = timeshift(fragment.state)
act = state_next.act.unsqueeze(-1) # actions taken during the rollout
# \pi is the target policy, \mu is the behaviour policy (T+1 x B x ...)
log_pi, log_mu = info['logits'], fragment.actor['logits']
# the importance weights
log_pi_a = log_pi.gather(-1, act).squeeze(-1)
log_mu_a = log_mu.gather(-1, act).squeeze(-1)
log_rho = log_mu_a.sub_(log_pi_a.detach()).neg_()
# `.actor[t]` is actor's extra info in reaction to `.state[t]`, t=0..T
val = fragment.actor['value'] # info['value'].detach()
# XXX Although Esperholt et al. (2018, sec.~4.2) use the value estimate of
# the rollout policy for the V-trace target in eq. (1), it makes more sense
# to use the estimates of the current policy, as has been done in monobeast.
# https://hackernoon.com/intuitive-rl-intro-to-advantage-actor-critic-a2c-4ff545978752
val, bootstrap = val[:-1], val[-1]
target = pyt_vtrace(state_next.rew, state_next.fin, val,
gamma=gamma, bootstrap=bootstrap,
omega=log_rho, r_bar=1., c_bar=1.)
# the critic's mse score against v-trace targets (min)
critic_mse = F.mse_loss(info['value'][:-1], target, reduction='mean') / 2
# \delta_t = r_{t+1} + \gamma \nu(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
adv = torch.empty_like(state_next.rew).copy_(bootstrap)
adv[:-1].copy_(target[1:]) # copy the v-trace targets \nu(s_{t+1})
adv.masked_fill_(state_next.fin, 0.).mul_(gamma)
adv.add_(state_next.rew).sub_(val)
# XXX note `val` here, not `target`! see sec.~4.2 in (Esperholt et al.; 2018)
# the policy surrogate score (max)
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = log_rho.exp_().clamp_(max=1.)
vtrace_score = log_pi_a.mul(adv.mul_(rho)).mean()
# the policy's neg-entropy score (min)
f_min = torch.finfo(log_pi.dtype).min
negentropy = log_pi.exp().mul(log_pi.clamp(min=f_min)).sum(dim=-1).mean()
# maximize the entropy and the reinforce score, minimize the critic loss
objective = C_entropy * negentropy + C_value * critic_mse - vtrace_score
return objective.mean(), dict(
entropy=-float(negentropy),
policy_score=float(vtrace_score),
value_loss=float(critic_mse),
)
```
<br>
### Run!
Initialize the learner and the environment factories
```
from functools import partial
factory_eval = partial(base_factory)
factory = partial(base_factory)
learner = CartPoleActor(lstm='none')
learner.train()
device_ = torch.device('cpu') # torch.device('cuda:0')
learner.to(device=device_)
optim = torch.optim.Adam(learner.parameters(), lr=1e-3)
```
Initialize the sampler
```
T, B = 25, 4
sticky = learner.use_cudnn
```
```
from rlplay.engine.rollout import multi
batchit = multi.rollout(
factory,
learner,
n_steps=T,
n_actors=6,
n_per_actor=B,
n_buffers=15,
n_per_batch=2,
sticky=sticky,
pinned=False,
clone=True,
close=False,
device=device_,
start_method='fork', # fork in notebook for macos, spawn in linux
)
```
A generator of evaluation rewards
```
from rlplay.engine.rollout.evaluate import evaluate
test_it = evaluate(factory_eval, learner, n_envs=4, n_steps=500,
clone=False, device=device_, start_method='fork')
```
Implement your favourite training method
```
n_epochs = 100
use_vtrace = True
# gamma, gae, ppo = 0.99, 0.92, 0.2
gamma, gae, ppo, multistep = 0.99, 1., 0.2, 0
import tqdm
from torch.nn.utils import clip_grad_norm_
torch.set_num_threads(1)
losses, rewards = [], []
for epoch in tqdm.tqdm(range(n_epochs)):
for j, batch in zip(range(100), batchit):
if use_vtrace:
loss, info = vtrace(batch, learner, gamma=gamma)
else:
loss, info = a2c(batch, learner, gamma=gamma, gae=gae, ppo=ppo, multistep=multistep)
optim.zero_grad()
loss.backward()
grad = clip_grad_norm_(learner.parameters(), max_norm=1.0)
optim.step()
losses.append(dict(
loss=float(loss), grad=float(grad), **info
))
# fetch the evaluation results lagged by one inner loop!
rewards.append(next(test_it))
# close the generators
batchit.close()
test_it.close()
```
<br>
```
def collate(records):
"""collate identically keyed dicts"""
out, n_records = {}, 0
for record in records:
for k, v in record.items():
out.setdefault(k, []).append(v)
return out
data = {k: numpy.array(v) for k, v in collate(losses).items()}
if 'value_loss' in data:
plt.semilogy(data['value_loss'])
if 'entropy' in data:
plt.plot(data['entropy'])
if 'policy_score' in data:
plt.plot(data['policy_score'])
plt.semilogy(data['grad'])
rewards = numpy.stack(rewards, axis=0)
rewards
m, s = numpy.median(rewards, axis=-1), rewards.std(axis=-1)
fi, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.plot(numpy.mean(rewards, axis=-1))
ax.plot(numpy.median(rewards, axis=-1))
ax.plot(numpy.min(rewards, axis=-1))
ax.plot(numpy.std(rewards, axis=-1))
# ax.plot(m+s * 1.96)
# ax.plot(m-s * 1.96)
plt.show()
```
<br>
The ultimate evaluation run
```
from rlplay.engine import core
with factory_eval() as env:
learner.eval()
eval_rewards, info = core.evaluate([
env
], learner, render=True, n_steps=1e4, device=device_)
print(sum(eval_rewards))
```
<br>
Let's analyze the performance
```
from rlplay.algo.returns import npy_returns, npy_deltas
td_target = eval_rewards + gamma * info['value'][1:]
td_error = td_target - info['value'][:-1]
# td_error = npy_deltas(
# eval_rewards, numpy.zeros_like(eval_rewards, dtype=bool), info['value'][:-1],
# gamma=gamma, bootstrap=info['value'][-1])
fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.semilogy(abs(td_error) / abs(td_target))
ax.set_title('relative td(1)-error');
from rlplay.algo.returns import npy_returns, npy_deltas
# plt.plot(
# npy_returns(eval_rewards, numpy.zeros_like(eval_rewards, dtype=bool),
# gamma=gamma, bootstrap=info['value'][-1]))
fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.plot(info['value'])
ax.axhline(1 / (1 - gamma), c='k', alpha=0.5, lw=1);
import math
from scipy.special import softmax, expit, entr
*head, n_actions = info['logits'].shape
proba = softmax(info['logits'], axis=-1)
fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.plot(entr(proba).sum(-1)[:, 0])
ax.axhline(math.log(n_actions), c='k', alpha=0.5, lw=1);
fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.hist(info['logits'][..., 1] - info['logits'][..., 0], bins=51); # log-ratio
```
<br>
```
assert False
```
<br>
### Other agents
An agent that uses other inputs, beside `obs`.
```
class CartPoleActor(BaseActorModule):
def __init__(self, epsilon=0.1, lstm='none'):
assert lstm in ('none', 'loop', 'cudnn')
super().__init__()
self.use_lstm = (lstm != 'none')
self.use_cudnn = (lstm == 'cudnn')
# for updating the exploration epsilon in the actor clones
self.register_buffer('epsilon', torch.tensor(epsilon))
# the features
n_output_dim = dict(obs=64, act=8, stepno=0)
self.features = torch.nn.Sequential(
ModuleDict(dict(
obs=Linear(4, n_output_dim['obs']),
act=Embedding(2, n_output_dim['act']),
stepno=Sequential(
OneHotBits(32),
Linear(32, n_output_dim['stepno']),
),
)),
ReLU(),
)
# the core
n_features = sum(n_output_dim.values())
if self.use_lstm:
self.core = LSTM(n_features, 64, 1)
else:
self.core = Sequential(
Linear(n_features, 64, bias=True),
ReLU(),
)
# the rest of the actor's model
self.baseline = Linear(64, 1)
self.policy = Sequential(
Linear(64, 2),
LogSoftmax(dim=-1),
)
def forward(self, obs, act, rew, fin, *, hx=None, stepno=None, virtual=False):
# Everything is [T x B x ...]
input = self.features(locals())
# `input` is T x B x F, `hx` is either `None`, or a proper recurrent state
n_steps, n_envs, *_ = fin.shape
if not self.use_lstm:
# update `hx` into an empty container
out, hx = self.core(input), ()
elif not self.use_cudnn:
outputs = []
for x, m in zip(input.unsqueeze(1), ~fin.unsqueeze(-1)):
# `m` indicates if NO reset took place, otherwise
# multiply by zero to stop the grads
if hx is not None:
hx = suply(m.mul, hx)
# one LSTM step [1 x B x ...]
output, hx = self.core(x, hx)
outputs.append(output)
# compile the output
out = torch.cat(outputs, dim=0)
else:
# sequence padding (MUST have sampling with `sticky=True`)
if n_steps > 1:
lengths = 1 + (~fin[1:]).sum(0).cpu()
input = pack_padded_sequence(input, lengths, enforce_sorted=False)
out, hx = self.core(input, hx)
if n_steps > 1:
out, lens = pad_packed_sequence(
out, batch_first=False, total_length=n_steps)
# apply relu after the core and get the policy
logits = self.policy(out)
# value must not have any trailing dims, i.e. T x B
value = self.baseline(out).squeeze(-1)
if not self.training:
actions = logits.argmax(dim=-1)
else:
# blend the policy with a uniform distribution
prob = logits.detach().exp().mul_(1 - self.epsilon)
prob.add_(self.epsilon / logits.shape[-1])
actions = multinomial(prob)
return actions, hx, dict(value=value, logits=logits)
```
A non-recurrent actor with features shared between the policy and the baseline.
```
class CartPoleActor(BaseActorModule):
def __init__(self, epsilon=0.1, lstm='none'):
assert lstm in ('none', 'loop', 'cudnn')
super().__init__()
self.use_lstm = self.use_cudnn = False
# for updating the exploration epsilon in the actor clones
self.register_buffer('epsilon', torch.tensor(epsilon))
# the features
self.features = Sequential(
Linear(4, 20),
ReLU(),
)
self.baseline = Linear(20, 1)
self.policy = Sequential(
Linear(20, 2),
LogSoftmax(dim=-1),
)
def forward(self, obs, act, rew, fin, *, hx=None, stepno=None, virtual=False):
x = self.features(obs)
# value must not have any trailing dims, i.e. T x B
logits = self.policy(x)
value = self.baseline(x).squeeze(-1)
if not self.training:
actions = logits.argmax(dim=-1)
else:
# blend the policy with a uniform distribution
prob = logits.detach().exp().mul_(1 - self.epsilon)
prob.add_(self.epsilon / logits.shape[-1])
actions = multinomial(prob)
return actions, (), dict(value=value, logits=logits)
```
<br>
```
# stepno = batch.state.stepno
stepno = torch.arange(256)
with torch.no_grad():
out = learner.features[0]['stepno'](stepno)
out = F.linear(F.relu(out), learner.core[1].weight[:, -8:],
bias=learner.core[1].bias)
# out = F.linear(F.relu(out), learner.core.weight_ih_l0[:, -8:],
# bias=learner.core.bias_ih_l0)
# out = F.relu(out)
fig, axes = plt.subplots(3, 3, figsize=(8, 8), dpi=200,
sharex=True, sharey=True)
for j, ax in zip(range(out.shape[1]), axes.flat):
ax.plot(out[:, j], lw=1)
fig.tight_layout(pad=0, h_pad=0, w_pad=0)
with torch.no_grad():
plt.imshow(abs(learner.core[1].weight[:, -8:]).T)
lin = learner.features.stepno[1]
with torch.no_grad():
plt.imshow(abs(lin.weight))
```
| github_jupyter |
```
import numpy as np
from LSTM_Learning_Lib import Model
from FeatureSetCalculation_Lib import ComputeMultiLevelLogsig1dBM
import matplotlib.pyplot as plt
import time
from sklearn.model_selection import ParameterGrid
from sklearn import preprocessing
import random
from GetSeqMnistData import GetSeqPenandCalLogSig, GetSeqPenDigit,GetSeqPenNormCalLogSig,GetSeqPenNorm
```
## Hyperparameters
```
# Parameters grid
param_grid = {'deg_of_sig': [2,3,4,5,6], 'number_of_segment': [8],
'learning_rate': [0.001]}
Param = list(ParameterGrid(param_grid))
# Parameters
training_iters = 60
batch_size = 200
sig_comp_time = []
test_result = []
test_time = []
```
## train RNN with different feature sets
```
for i in range(np.size(Param)):
# Raw data feature set generator
if Param[i]['deg_of_sig']==0:
start = time.time()
train_X, train_Y = GetSeqPenDigit('pendigits-orig.tra.txt')
test_X, test_Y = GetSeqPenDigit('pendigits-orig.tes.txt')
trainsampleClip = len(train_Y)
testsampleClip = len(test_Y)
max_interval = 0
for j in range(trainsampleClip):
if max_interval < len(train_X[j]):
max_interval = len(train_X[j])
for j in range(testsampleClip):
if max_interval < len(test_X[j]):
max_interval = len(test_X[j])
n_input = int(max_interval/Param[i]['number_of_segment'])+1
if n_input % 2 != 0:
n_input += 1
train_data = np.zeros((trainsampleClip, n_input*Param[i]['number_of_segment']))
test_data = np.zeros((testsampleClip, n_input*Param[i]['number_of_segment']))
for sn in range(trainsampleClip):
tmplen = len(train_X[sn])
train_data[sn, :tmplen] = train_X[sn][:]
for sn in range(testsampleClip):
tmplen = len(test_X[sn])
test_data[sn, :tmplen] = test_X[sn][:]
train_data = train_data.reshape(trainsampleClip, Param[i]['number_of_segment'], n_input)
test_data = test_data.reshape(testsampleClip, Param[i]['number_of_segment'], n_input)
elapsed = time.time()-start
sig_comp_time.append(elapsed)
model3 = Model( Param[i]['learning_rate'], training_iters, batch_size, n_input, Param[i]['number_of_segment'], Param[i]['deg_of_sig'], train_data, train_Y, test_data, test_Y)
# Folded raw data feature set generator
elif Param[i]['deg_of_sig']==1:
start = time.time()
train_X, train_Y = GetSeqPenDigit('pendigits-orig.tra.txt')
test_X, test_Y = GetSeqPenDigit('pendigits-orig.tes.txt')
trainsampleClip = len(train_Y)
testsampleClip = len(test_Y)
max_interval = 0
train_increment = [[] for k in range(trainsampleClip)]
test_increment = [[] for k in range(testsampleClip)]
for x in train_X:
if max_interval < len(x):
max_interval = len(x)
for x in test_X:
if max_interval < len(x):
max_interval = len(x)
print(max_interval)
train_data = np.zeros((trainsampleClip, max_interval))
test_data = np.zeros((testsampleClip, max_interval))
for sn in range(trainsampleClip):
tmplen = len(train_X[sn])
train_data[sn, :tmplen] = train_X[sn][:]
for sn in range(testsampleClip):
tmplen = len(test_X[sn])
test_data[sn, :tmplen] = test_X[sn][:]
n_input = 2
train_data = train_data.reshape(trainsampleClip, int(max_interval/2), 2)
test_data = test_data.reshape(testsampleClip, int(max_interval/2), 2)
print(train_data[0])
elapsed = time.time()-start
sig_comp_time.append(elapsed)
model3 = Model( Param[i]['learning_rate'], training_iters, batch_size, n_input, int(max_interval/2), Param[i]['deg_of_sig'], train_data, train_Y, test_data, test_Y)
# Logsig feature set generator
else:
start = time.time()
X_logsig_start, Y = GetSeqPenandCalLogSig(Param[i]['deg_of_sig'], Param[i]['number_of_segment'],'pendigits-orig.tra.txt')
test_X_logsig_start, test_Y = GetSeqPenandCalLogSig(Param[i]['deg_of_sig'], Param[i]['number_of_segment'],'pendigits-orig.tes.txt')
print(X_logsig_start.shape)
print(test_X_logsig_start.shape)
n_input = np.shape(X_logsig_start)[2]
# number_of_samples = np.shape(X_logsig_start)[0]
elapsed = time.time()-start
sig_comp_time.append(elapsed)
model3 = Model( Param[i]['learning_rate'], training_iters, batch_size, n_input, Param[i]['number_of_segment'], Param[i]['deg_of_sig'], X_logsig_start, Y, test_X_logsig_start, test_Y)
# Model built and train
fixed_error_result_model3 = model3.BuildModelKerasMn()
print("Time = " + str(time.time()-start))
print("Testing loss = " + str(fixed_error_result_model3['Loss']))
# model3.KerasPredict()
test_result.append(fixed_error_result_model3 ['Loss'])
test_time.append(fixed_error_result_model3 ['Time'])
# results save
np.save('error_tol'+str(error_tol)+'deg_logsig'+str(Param[i]['deg_of_sig'])+'_test_result', test_result)
np.save('error_tol'+str(error_tol)+'deg_logsig'+str(Param[i]['deg_of_sig'])+'_test_time', test_time)
np.save('error_tol'+str(error_tol)+'deg_logsig'+str(Param[i]['deg_of_sig'])+'_sig_comp_time', elapsed)
print(test_time)
print(test_result)
print(sig_comp_time)
```
| github_jupyter |
```
# !wget https://raw.githubusercontent.com/huseinzol05/Malaya-Dataset/master/dependency/gsd-ud-train.conllu.txt
# !wget https://raw.githubusercontent.com/huseinzol05/Malaya-Dataset/master/dependency/gsd-ud-test.conllu.txt
# !wget https://raw.githubusercontent.com/huseinzol05/Malaya-Dataset/master/dependency/gsd-ud-dev.conllu.txt
# !wget https://raw.githubusercontent.com/huseinzol05/Malaya-Dataset/master/dependency/augmented-dependency.json
with open('gsd-ud-train.conllu.txt') as fopen:
corpus = fopen.read().split('\n')
with open('gsd-ud-test.conllu.txt') as fopen:
corpus.extend(fopen.read().split('\n'))
with open('gsd-ud-dev.conllu.txt') as fopen:
corpus.extend(fopen.read().split('\n'))
import malaya
import re
from malaya.texts._text_functions import split_into_sentences
from malaya.texts import _regex
import numpy as np
import itertools
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = malaya.preprocessing._tokenizer
splitter = split_into_sentences
def is_number_regex(s):
if re.match("^\d+?\.\d+?$", s) is None:
return s.isdigit()
return True
def preprocessing(w):
if is_number_regex(w):
return '<NUM>'
elif re.match(_regex._money, w):
return '<MONEY>'
elif re.match(_regex._date, w):
return '<DATE>'
elif re.match(_regex._expressions['email'], w):
return '<EMAIL>'
elif re.match(_regex._expressions['url'], w):
return '<URL>'
else:
w = ''.join(''.join(s)[:2] for _, s in itertools.groupby(w))
return w
def process_string(string):
splitted = string.split()
return [preprocessing(w) for w in splitted]
word2idx = {'PAD': 0,'UNK':1, '_ROOT': 2}
tag2idx = {'PAD': 0, '_<ROOT>': 1}
char2idx = {'PAD': 0,'UNK':1, '_ROOT': 2}
word_idx = 3
tag_idx = 2
char_idx = 3
special_tokens = ['<NUM>', '<MONEY>', '<DATE>', '<URL>', '<EMAIL>']
for t in special_tokens:
word2idx[t] = word_idx
word_idx += 1
char2idx[t] = char_idx
char_idx += 1
word2idx, char2idx
PAD = "_PAD"
PAD_POS = "_PAD_POS"
PAD_TYPE = "_<PAD>"
PAD_CHAR = "_PAD_CHAR"
ROOT = "_ROOT"
ROOT_POS = "_ROOT_POS"
ROOT_TYPE = "_<ROOT>"
ROOT_CHAR = "_ROOT_CHAR"
END = "_END"
END_POS = "_END_POS"
END_TYPE = "_<END>"
END_CHAR = "_END_CHAR"
def process_corpus(corpus, until = None):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
sentences, words, depends, labels, pos, chars = [], [], [], [], [], []
temp_sentence, temp_word, temp_depend, temp_label, temp_pos = [], [], [], [], []
first_time = True
for sentence in corpus:
try:
if len(sentence):
if sentence[0] == '#':
continue
if first_time:
print(sentence)
first_time = False
sentence = sentence.split('\t')
for c in sentence[1]:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if sentence[7] not in tag2idx:
tag2idx[sentence[7]] = tag_idx
tag_idx += 1
sentence[1] = preprocessing(sentence[1])
if sentence[1] not in word2idx:
word2idx[sentence[1]] = word_idx
word_idx += 1
temp_word.append(word2idx[sentence[1]])
temp_depend.append(int(sentence[6]))
temp_label.append(tag2idx[sentence[7]])
temp_sentence.append(sentence[1])
temp_pos.append(sentence[3])
else:
if len(temp_sentence) < 2 or len(temp_word) != len(temp_label):
temp_word = []
temp_depend = []
temp_label = []
temp_sentence = []
temp_pos = []
continue
words.append(temp_word)
depends.append(temp_depend)
labels.append(temp_label)
sentences.append( temp_sentence)
pos.append(temp_pos)
char_ = []
for w in temp_sentence:
if w in char2idx:
char_.append([char2idx[w]])
else:
char_.append([char2idx[c] for c in w])
chars.append(char_)
temp_word = []
temp_depend = []
temp_label = []
temp_sentence = []
temp_pos = []
except Exception as e:
print(e, sentence)
return sentences[:-1], words[:-1], depends[:-1], labels[:-1], pos[:-1], chars[:-1]
sentences, words, depends, labels, _, _ = process_corpus(corpus)
import json
with open('augmented-dependency.json') as fopen:
augmented = json.load(fopen)
text_augmented = []
for a in augmented:
text_augmented.extend(a[0])
depends.extend((np.array(a[1]) - 1).tolist())
labels.extend((np.array(a[2]) + 1).tolist())
def parse_XY(texts):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
outside, sentences = [], []
for no, text in enumerate(texts):
s = process_string(text)
sentences.append(s)
inside = []
for w in s:
for c in w:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if w not in word2idx:
word2idx[w] = word_idx
word_idx += 1
inside.append(word2idx[w])
outside.append(inside)
return outside, sentences
outside, new_sentences = parse_XY(text_augmented)
words.extend(outside)
sentences.extend(new_sentences)
idx2word = {v:k for k, v in word2idx.items()}
idx2tag = {v:k for k, v in tag2idx.items()}
len(idx2word)
from sklearn.model_selection import train_test_split
sentences_train, sentences_test, words_train, words_test, depends_train, depends_test, labels_train, labels_test \
= train_test_split(sentences, words, depends, labels, test_size = 0.2)
len(sentences_train), len(sentences_test)
def generate_char_seq(batch, UNK = 2):
maxlen_c = max([len(k) for k in batch])
x = [[len(i) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((len(batch),maxlen_c,maxlen),dtype=np.int32)
for i in range(len(batch)):
for k in range(len(batch[i])):
for no, c in enumerate(batch[i][k]):
temp[i,k,-1-no] = char2idx.get(c, UNK)
return temp
generate_char_seq(sentences_train[:5]).shape
train_X = words_train
train_Y = labels_train
train_depends = depends_train
train_char = sentences_train
test_X = words_test
test_Y = labels_test
test_depends = depends_test
test_char = sentences_test
class BiAAttention:
def __init__(self, input_size_encoder, input_size_decoder, num_labels):
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.W_d = tf.get_variable("W_d", shape=[self.num_labels, self.input_size_decoder],
initializer=tf.contrib.layers.xavier_initializer())
self.W_e = tf.get_variable("W_e", shape=[self.num_labels, self.input_size_encoder],
initializer=tf.contrib.layers.xavier_initializer())
self.U = tf.get_variable("U", shape=[self.num_labels, self.input_size_decoder, self.input_size_encoder],
initializer=tf.contrib.layers.xavier_initializer())
def forward(self, input_d, input_e, mask_d=None, mask_e=None):
batch = tf.shape(input_d)[0]
length_decoder = tf.shape(input_d)[1]
length_encoder = tf.shape(input_e)[1]
out_d = tf.expand_dims(tf.matmul(self.W_d, tf.transpose(input_d, [0, 2, 1])), 3)
out_e = tf.expand_dims(tf.matmul(self.W_e, tf.transpose(input_e, [0, 2, 1])), 2)
output = tf.matmul(tf.expand_dims(input_d, 1), self.U)
output = tf.matmul(output, tf.transpose(tf.expand_dims(input_e, 1), [0, 1, 3, 2]))
output = output + out_d + out_e
if mask_d is not None:
d = tf.expand_dims(tf.expand_dims(mask_d, 1), 3)
e = tf.expand_dims(tf.expand_dims(mask_e, 1), 2)
output = output * d * e
return output
class Model:
def __init__(
self,
dim_word,
dim_char,
dropout,
learning_rate,
hidden_size_char,
hidden_size_word,
num_layers
):
def cells(size, reuse = False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(
size,
initializer = tf.orthogonal_initializer(),
reuse = reuse,
),
output_keep_prob = dropout,
)
self.word_ids = tf.placeholder(tf.int32, shape = [None, None])
self.char_ids = tf.placeholder(tf.int32, shape = [None, None, None])
self.labels = tf.placeholder(tf.int32, shape = [None, None])
self.depends = tf.placeholder(tf.int32, shape = [None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.mask = tf.math.not_equal(self.word_ids, 0)
float_mask = tf.cast(self.mask, tf.float32)
self.arc_h = tf.layers.Dense(hidden_size_word)
self.arc_c = tf.layers.Dense(hidden_size_word)
self.attention = BiAAttention(hidden_size_word, hidden_size_word, 1)
self.word_embeddings = tf.Variable(
tf.truncated_normal(
[len(word2idx), dim_word], stddev = 1.0 / np.sqrt(dim_word)
)
)
self.char_embeddings = tf.Variable(
tf.truncated_normal(
[len(char2idx), dim_char], stddev = 1.0 / np.sqrt(dim_char)
)
)
word_embedded = tf.nn.embedding_lookup(
self.word_embeddings, self.word_ids
)
char_embedded = tf.nn.embedding_lookup(
self.char_embeddings, self.char_ids
)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(
char_embedded, shape = [s[0] * s[1], s[-2], dim_char]
)
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(hidden_size_char),
cell_bw = cells(hidden_size_char),
inputs = char_embedded,
dtype = tf.float32,
scope = 'bidirectional_rnn_char_%d' % (n),
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(
char_embedded[:, -1], shape = [s[0], s[1], 2 * hidden_size_char]
)
word_embedded = tf.concat([word_embedded, output], axis = -1)
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(hidden_size_word),
cell_bw = cells(hidden_size_word),
inputs = word_embedded,
dtype = tf.float32,
scope = 'bidirectional_rnn_word_%d' % (n),
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.dense(word_embedded, len(idx2tag))
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, self.labels, self.lengths
)
arc_h = tf.nn.elu(self.arc_h(word_embedded))
arc_c = tf.nn.elu(self.arc_c(word_embedded))
out_arc = tf.squeeze(self.attention.forward(arc_h, arc_h, mask_d=float_mask, mask_e=float_mask), axis = 1)
batch = tf.shape(out_arc)[0]
batch_index = tf.range(0, batch)
max_len = tf.shape(out_arc)[1]
sec_max_len = tf.shape(out_arc)[2]
minus_inf = -1e8
minus_mask = (1 - float_mask) * minus_inf
out_arc = out_arc + tf.expand_dims(minus_mask, axis = 2) + tf.expand_dims(minus_mask, axis = 1)
loss_arc = tf.nn.log_softmax(out_arc, dim=1)
loss_arc = loss_arc * tf.expand_dims(float_mask, axis = 2) * tf.expand_dims(float_mask, axis = 1)
num = tf.reduce_sum(float_mask) - tf.cast(batch, tf.float32)
child_index = tf.tile(tf.expand_dims(tf.range(0, max_len), 1), [1, batch])
t = tf.transpose(self.depends)
broadcasted = tf.broadcast_to(batch_index, tf.shape(t))
concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),
tf.expand_dims(t, axis = 0),
tf.expand_dims(child_index, axis = 0)], axis = 0))
loss_arc = tf.gather_nd(loss_arc, concatenated)
loss_arc = tf.transpose(loss_arc, [1, 0])[1:]
loss_arc = tf.reduce_sum(-loss_arc) / num
self.cost = tf.reduce_mean(-log_likelihood) + loss_arc
self.optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen = self.maxlen)
self.tags_seq, _ = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
out_arc = out_arc + tf.linalg.diag(tf.fill([max_len], -np.inf))
minus_mask = tf.expand_dims(tf.cast(1.0 - float_mask, tf.bool), axis = 2)
minus_mask = tf.tile(minus_mask, [1, 1, sec_max_len])
out_arc = tf.where(minus_mask, tf.fill(tf.shape(out_arc), -np.inf), out_arc)
self.heads = tf.argmax(out_arc, axis = 1)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(self.labels, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
self.prediction = tf.cast(tf.boolean_mask(self.heads, mask), tf.int32)
mask_label = tf.boolean_mask(self.depends, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy_depends = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.reset_default_graph()
sess = tf.InteractiveSession()
dim_word = 128
dim_char = 256
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
model = Model(dim_word,dim_char,dropout,learning_rate,hidden_size_char,hidden_size_word,num_layers)
sess.run(tf.global_variables_initializer())
batch_x = train_X[:5]
batch_x = pad_sequences(batch_x,padding='post')
batch_char = train_char[:5]
batch_char = generate_char_seq(batch_char)
batch_y = train_Y[:5]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = train_depends[:5]
batch_depends = pad_sequences(batch_depends,padding='post')
sess.run([model.accuracy, model.accuracy_depends, model.cost],
feed_dict = {model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y,
model.depends: batch_depends})
from tqdm import tqdm
batch_size = 64
epoch = 10
for e in range(epoch):
train_acc, train_loss = [], []
test_acc, test_loss = [], []
train_acc_depends, test_acc_depends = [], []
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = train_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_char = train_char[i: index]
batch_char = generate_char_seq(batch_char)
batch_y = train_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = train_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
acc_depends, acc, cost, _ = sess.run(
[model.accuracy_depends, model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y,
model.depends: batch_depends
},
)
train_loss.append(cost)
train_acc.append(acc)
train_acc_depends.append(acc_depends)
pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = test_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_char = test_char[i: index]
batch_char = generate_char_seq(batch_char)
batch_y = test_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = test_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
acc_depends, acc, cost = sess.run(
[model.accuracy_depends, model.accuracy, model.cost],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y,
model.depends: batch_depends
},
)
test_loss.append(cost)
test_acc.append(acc)
test_acc_depends.append(acc_depends)
pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends)
print(
'epoch: %d, training loss: %f, training acc: %f, training depends: %f, valid loss: %f, valid acc: %f, valid depends: %f\n'
% (e, np.mean(train_loss),
np.mean(train_acc),
np.mean(train_acc_depends),
np.mean(test_loss),
np.mean(test_acc),
np.mean(test_acc_depends)
))
tags_seq, heads = sess.run(
[model.tags_seq, model.heads],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char
},
)
tags_seq[0], heads[0], batch_depends[0]
def evaluate(heads_pred, types_pred, heads, types, lengths,
symbolic_root=False, symbolic_end=False):
batch_size, _ = heads_pred.shape
ucorr = 0.
lcorr = 0.
total = 0.
ucomplete_match = 0.
lcomplete_match = 0.
corr_root = 0.
total_root = 0.
start = 1 if symbolic_root else 0
end = 1 if symbolic_end else 0
for i in range(batch_size):
ucm = 1.
lcm = 1.
for j in range(start, lengths[i] - end):
total += 1
if heads[i, j] == heads_pred[i, j]:
ucorr += 1
if types[i, j] == types_pred[i, j]:
lcorr += 1
else:
lcm = 0
else:
ucm = 0
lcm = 0
if heads[i, j] == 0:
total_root += 1
corr_root += 1 if heads_pred[i, j] == 0 else 0
ucomplete_match += ucm
lcomplete_match += lcm
return ucorr / total, lcorr / total, corr_root / total_root
arc_accuracy, type_accuracy, root_accuracy = evaluate(heads, tags_seq, batch_depends, batch_y,
np.count_nonzero(batch_x, axis = 1))
arc_accuracy, type_accuracy, root_accuracy
arcs, types, roots = [], [], []
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = test_X[i: index]
batch_x = pad_sequences(batch_x,padding='post')
batch_char = test_char[i: index]
batch_char = generate_char_seq(batch_char)
batch_y = test_Y[i: index]
batch_y = pad_sequences(batch_y,padding='post')
batch_depends = test_depends[i: index]
batch_depends = pad_sequences(batch_depends,padding='post')
tags_seq, heads = sess.run(
[model.tags_seq, model.heads],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char
},
)
arc_accuracy, type_accuracy, root_accuracy = evaluate(heads, tags_seq, batch_depends, batch_y,
np.count_nonzero(batch_x, axis = 1))
pbar.set_postfix(arc_accuracy = arc_accuracy, type_accuracy = type_accuracy,
root_accuracy = root_accuracy)
arcs.append(arc_accuracy)
types.append(type_accuracy)
roots.append(root_accuracy)
print('arc accuracy:', np.mean(arcs))
print('types accuracy:', np.mean(types))
print('root accuracy:', np.mean(roots))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/AI4Finance-Foundation/FinRL/blob/master/FinRL_Raytune_for_Hyperparameter_Optimization_RLlib%20Models.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#Installing FinRL
%%capture
!pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git
%%capture
!pip install "ray[tune]" optuna
%%capture
!pip install int_date==0.1.8
```
#Importing libraries
```
#Importing the libraries
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
import optuna
%matplotlib inline
from finrl import config
from finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader
from finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.finrl_meta.env_stock_trading.env_stocktrading_np import StockTradingEnv as StockTradingEnv_numpy
from finrl.agents.rllib.models import DRLAgent as DRLAgent_rllib
from stable_baselines3.common.vec_env import DummyVecEnv
from finrl.finrl_meta.data_processor import DataProcessor
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
import ray
from pprint import pprint
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.ddpg import DDPGTrainer
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.agents.a3c import a2c
from ray.rllib.agents.ddpg import ddpg, td3
from ray.rllib.agents.ppo import ppo
from ray.rllib.agents.sac import sac
import sys
sys.path.append("../FinRL-Library")
import os
import itertools
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.optuna import OptunaSearch
from ray.tune.registry import register_env
import time
import psutil
psutil_memory_in_bytes = psutil.virtual_memory().total
ray._private.utils.get_system_memory = lambda: psutil_memory_in_bytes
from typing import Dict, Optional, Any
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
# if not os.path.exists("./" + "tuned_models"):
# os.makedirs("./" + "tuned_models")
```
##Defining the hyperparameter search space
1. You can look up [here](https://docs.ray.io/en/latest/tune/key-concepts.html#search-spaces) to learn how to define hyperparameter search space
2. Jump over to this [link](https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/utils/hyperparams_opt.py) to find the range of different hyperparameter
3. To learn about different hyperparameters for different algorithms for RLlib models, jump over to this [link](https://docs.ray.io/en/latest/rllib-algorithms.html)
```
def sample_ddpg_params():
return {
"buffer_size": tune.choice([int(1e4), int(1e5), int(1e6)]),
"lr": tune.loguniform(1e-5, 1),
"train_batch_size": tune.choice([32, 64, 128, 256, 512])
}
def sample_a2c_params():
return{
"lambda": tune.choice([0.1,0.3,0.5,0.7,0.9,1.0]),
"entropy_coeff": tune.loguniform(0.00000001, 0.1),
"lr": tune.loguniform(1e-5, 1)
}
def sample_ppo_params():
return {
"entropy_coeff": tune.loguniform(0.00000001, 0.1),
"lr": tune.loguniform(5e-5, 1),
"sgd_minibatch_size": tune.choice([ 32, 64, 128, 256, 512]),
"lambda": tune.choice([0.1,0.3,0.5,0.7,0.9,1.0])
}
MODELS = {"a2c": a2c, "ddpg": ddpg, "td3": td3, "sac": sac, "ppo": ppo}
```
## Getting the training and testing environment
```
def get_train_env(start_date, end_date, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name, if_vix = True,
**kwargs):
#fetch data
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)
train_env_config = {'price_array':price_array,
'tech_array':tech_array,
'turbulence_array':turbulence_array,
'if_train':True}
return train_env_config
#Function to calculate the sharpe ratio from the list of total_episode_reward
def calculate_sharpe(episode_reward:list):
perf_data = pd.DataFrame(data=episode_reward,columns=['reward'])
perf_data['daily_return'] = perf_data['reward'].pct_change(1)
if perf_data['daily_return'].std() !=0:
sharpe = (252**0.5)*perf_data['daily_return'].mean()/ \
perf_data['daily_return'].std()
return sharpe
else:
return 0
def get_test_config(start_date, end_date, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name, if_vix = True,
**kwargs):
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)
test_env_config = {'price_array':price_array,
'tech_array':tech_array,
'turbulence_array':turbulence_array,'if_train':False}
return test_env_config
def val_or_test(test_env_config,agent_path,model_name,env):
episode_total_reward = DRL_prediction(model_name,test_env_config,
env = env,
agent_path=agent_path)
return calculate_sharpe(episode_total_reward),episode_total_reward
TRAIN_START_DATE = '2014-01-01'
TRAIN_END_DATE = '2019-07-30'
VAL_START_DATE = '2019-08-01'
VAL_END_DATE = '2020-07-30'
TEST_START_DATE = '2020-08-01'
TEST_END_DATE = '2021-10-01'
technical_indicator_list =config.INDICATORS
model_name = 'a2c'
env = StockTradingEnv_numpy
ticker_list = ['TSLA']
data_source = 'yahoofinance'
time_interval = '1D'
train_env_config = get_train_env(TRAIN_START_DATE, VAL_END_DATE,
ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name)
```
## Registering the environment
```
from ray.tune.registry import register_env
env_name = 'StockTrading_train_env'
register_env(env_name, lambda config: env(train_env_config))
```
## Running tune
```
MODEL_TRAINER = {'a2c':A2CTrainer,'ppo':PPOTrainer,'ddpg':DDPGTrainer}
if model_name == "ddpg":
sample_hyperparameters = sample_ddpg_params()
elif model_name == "ppo":
sample_hyperparameters = sample_ppo_params()
elif model_name == "a2c":
sample_hyperparameters = sample_a2c_params()
def run_optuna_tune():
algo = OptunaSearch()
algo = ConcurrencyLimiter(algo,max_concurrent=4)
scheduler = AsyncHyperBandScheduler()
num_samples = 10
training_iterations = 100
analysis = tune.run(
MODEL_TRAINER[model_name],
metric="episode_reward_mean", #The metric to optimize for tuning
mode="max", #Maximize the metric
search_alg = algo,#OptunaSearch method which uses Tree Parzen estimator to sample hyperparameters
scheduler=scheduler, #To prune bad trials
config = {**sample_hyperparameters,
'env':'StockTrading_train_env','num_workers':1,
'num_gpus':1,'framework':'torch'},
num_samples = num_samples, #Number of hyperparameters to test out
stop = {'training_iteration':training_iterations},#Time attribute to validate the results
verbose=1,local_dir="./tuned_models",#Saving tensorboard plots
# resources_per_trial={'gpu':1,'cpu':1},
max_failures = 1,#Extra Trying for the failed trials
raise_on_failed_trial=False,#Don't return error even if you have errored trials
keep_checkpoints_num = num_samples-5,
checkpoint_score_attr ='episode_reward_mean',#Only store keep_checkpoints_num trials based on this score
checkpoint_freq=training_iterations#Checpointing all the trials
)
print("Best hyperparameter: ", analysis.best_config)
return analysis
analysis = run_optuna_tune()
```
## Best config, directory and checkpoint for hyperparameters
```
best_config = analysis.get_best_config(metric='episode_reward_mean',mode='max')
best_config
best_logdir = analysis.get_best_logdir(metric='episode_reward_mean',mode='max')
best_logdir
best_checkpoint = analysis.best_checkpoint
best_checkpoint
# sharpe,df_account_test,df_action_test = val_or_test(TEST_START_DATE, TEST_END_DATE, ticker_list, data_source, time_interval,
# technical_indicator_list, env, model_name,best_checkpoint, if_vix = True)
test_env_config = get_test_config(TEST_START_DATE, TEST_END_DATE, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name)
sharpe,account,actions = val_or_test(test_env_config,agent_path,model_name,env)
def DRL_prediction(
model_name,
test_env_config,
env,
model_config,
agent_path,
env_name_test='StockTrading_test_env'
):
env_instance = env(test_env_config)
register_env(env_name_test, lambda config: env(test_env_config))
model_config['env'] = env_name_test
# ray.init() # Other Ray APIs will not work until `ray.init()` is called.
if model_name == "ppo":
trainer = MODELS[model_name].PPOTrainer(config=model_config)
elif model_name == "a2c":
trainer = MODELS[model_name].A2CTrainer(config=model_config)
elif model_name == "ddpg":
trainer = MODELS[model_name].DDPGTrainer(config=model_config)
elif model_name == "td3":
trainer = MODELS[model_name].TD3Trainer(config=model_config)
elif model_name == "sac":
trainer = MODELS[model_name].SACTrainer(config=model_config)
try:
trainer.restore(agent_path)
print("Restoring from checkpoint path", agent_path)
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
state = env_instance.reset()
episode_returns = list() # the cumulative_return / initial_account
episode_total_assets = list()
episode_total_assets.append(env_instance.initial_total_asset)
done = False
while not done:
action = trainer.compute_single_action(state)
state, reward, done, _ = env_instance.step(action)
total_asset = (
env_instance.amount
+ (env_instance.price_ary[env_instance.day] * env_instance.stocks).sum()
)
episode_total_assets.append(total_asset)
episode_return = total_asset / env_instance.initial_total_asset
episode_returns.append(episode_return)
ray.shutdown()
print("episode return: " + str(episode_return))
print("Test Finished!")
return episode_total_assets
episode_total_assets = DRL_prediction(
model_name,
test_env_config,
env,
best_config,
best_checkpoint,
env_name_test='StockTrading_test_env')
print('The test sharpe ratio is: ',calculate_sharpe(episode_total_assets))
df_account_test = pd.DataFrame(data=episode_total_assets,columns=['account_value'])
```
| github_jupyter |
```
A = {1,2,3,8}
B = {3,4}
print(1 in A)
print(4 in A)
flag = 4 in A
type(flag)
print(B.issubset(A))
def f_issubset(A,B):
for e in A:
if e in B:
pass
else:
return False
return True
print(f_issubset(B,A))
print(f_issubset({2,3,4},{1,2,3,4,5,6}))
import numpy as np
Omg = set(np.arange(10))
type(Omg)
Omg
A = set(np.arange(0,9,2))
A
B = set(np.arange(1,9,3))
B
A.union(B)
A.intersection(B)
B.add(6)
B
A.intersection(B)
A
B
A.difference(B)
A_complement = Omg.difference(A)
A_complement
B_complement = Omg.difference(B)
Omg.difference(A.union(B))
A_complement.intersection(B_complement)
Omg.difference(A.intersection(B))
A_complement.union(B_complement)
import numpy as np
def BernoulliTrail(p=0.5):
X = int(np.random.rand()<=p)
return X
print(BernoulliTrail(0.7))
countOnes = 0
countZeros = 0
p = 0.611334
n = 1000000
for i in range(n):
x = BernoulliTrail(p)
if x == 1:
countOnes += 1
else:
countZeros += 1
print("Success Fraction is : ",countOnes/n)
def GT(p):
X = 1
while True:
if BernoulliTrail(p) == 1:
return X
else:
X+=1
p = 0.2
print(GT(p))
n = 1000
p = 0.6
G = np.zeros(n)
for i in range(n):
G[i] = GT(p)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
n = 100000
p = 0.02
G = np.zeros(n)
for i in range(n):
G[i] = GT(p)
plt.hist(G,density=True,bins=100)
sns.kdeplot(G,shade=True)
def BT(n,p):
X = (np.random.rand(n)<=p).sum()
return X
print(BT(100,0.2))
N = 10000
n = 1000
p = 0.1
B = np.zeros(N)
for i in range(N):
B[i] = BT(n,p)
plt.hist(B,density=True,bins=20)
sns.kdeplot(B,shade=True)
import pandas as pd
iris = sns.load_dataset('iris')
iris.head()
iris.species.unique()
T = sns.load_dataset('titanic')
T.head()
T.survived.unique()
T.pclass.unique()
B = pd.read_csv(r'C:\Users\DeLL\Desktop\ML\fullcourse\PS\Data\Bias_correction_ucl.csv')
B.shape
B.head()
X = 100*np.random.rand(100000)+20
plt.hist(X,density=True,bins = 50)
sns.kdeplot(X)
x = np.linspace(0,10,1000)
lmdaz = np.linspace(1,2,4)
for i in lmdaz:
fx = i*np.exp(-i*x)
plt.plot(x,fx,label=str(i))
plt.legend()
x = np.linspace(-20,20,10000)
#muz = np.arange(-20,20,10)
mu = 0
sgmaz = np.arange(1,10,2)
for sgma in sgmaz:
#sgma = 1
fx = np.exp((-(x-mu)**2)/(2*(sgma**2)))/((2*np.pi*sgma**2)**0.5)
plt.plot(x,fx,label=str(sgma))
plt.legend()
X = np.random.normal(0,1,1000)
Y = np.random.normal(10,10,1000)
Z = np.random.normal(-10,4,1000)
W = np.random.exponential(scale=1,size=1000)
plt.hist(X,density=True,alpha=0.5,bins = 30)
#plt.hist(Y,density=True,alpha=0.5,bins = 30)
plt.hist(Z,density=True,alpha=0.5,bins = 30)
plt.hist(W,density=True,alpha=0.5,bins = 30)
iris = sns.load_dataset('iris')
X = iris.sepal_length
plt.hist(X,density=True,bins = 30)
sns.pairplot(iris,hue='species')
p = 0.73
N = 1000000
(np.random.rand(N)<=p).mean()
X = np.random.geometric(p,N)
X.mean()
n = 330
B = np.random.binomial(n,p,N)
B.mean()
B[0]
B[1]
B[3]
```
# Bernoulli
```
# E[X] = p
p = 0.812
sMeanX = []
for N in range(1,10000,100):
sMeanX.append((np.random.rand(N)<=p).mean())
y = np.array(sMeanX)
x = np.arange(y.size)
plt.plot(x,y,linestyle='--')
```
# Geometric(p), $E[X]=1/p$
```
# E[X] = 1/p
p = 0.312
print(1/p)
sMeanX = []
for N in range(1,10000,100):
sMeanX.append((np.random.geometric(p,N).mean()))
y = np.array(sMeanX)
x = np.arange(y.size)
plt.plot(x,y,linestyle='--')
y[-1]
np.random.geometric(p,1000000).mean()
```
# Binomial(n,p) $E[X]=np$
```
# E[X] = np
n = 350
p = 0.312
print(n*p)
sMeanX = []
for N in range(1,10000,100):
sMeanX.append((np.random.binomial(n,p,N).mean()))
y = np.array(sMeanX)
x = np.arange(y.size)
plt.plot(x,y,linestyle='--')
y[-1]
```
# Normal($\mu,\sigma$), $E[X]=\mu$
```
# E[X] = mu
mu = 2.4
sgma = 3.1
sMeanX = []
for N in range(1,10000,100):
sMeanX.append((np.random.normal(mu,sgma,N).mean()))
y = np.array(sMeanX)
x = np.arange(y.size)
plt.plot(x,y,linestyle='--')
#import seaborn as sns
iris = sns.load_dataset('iris')
iris.head()
iris.species.unique()
sns.pairplot(iris,hue='species')
X = iris.petal_length
X.shape
y = iris.species
y.unique()
y[y=='setosa'] = '0'
y[y=='versicolor'] = '1'
y[y=='verginica'] = '2'
y
L = y.unique()
L[-1]
y[y==L[-1]] = '2'
y.unique()
X
y
idx = np.random.permutation(np.arange(y.size))
nTrain = 120
nTest = idx.size-nTrain
Xtrain=X[idx[:nTrain]]
ytrain=y[idx[:nTrain]]
Xtest =X[idx[nTrain:]]
ytest =y[idx[nTrain:]]
ytrain.size
ytest.size
Py = np.zeros(3)
Py[0] = (ytrain=='0').sum()/ytrain.size
Py[1] = (ytrain=='1').sum()/ytrain.size
Py[2] = (ytrain=='2').sum()/ytrain.size
Py
fx_given_y = [[0,0],[0,0],[0,0]]
X_0 = Xtrain[ytrain=='0']
X_1 = Xtrain[ytrain=='1']
X_2 = Xtrain[ytrain=='2']
mu_0 = X_0.mean()
mu_1 = X_1.mean()
mu_2 = X_2.mean()
sgma_0 = X_0.std()
sgma_1 = X_1.std()
sgma_2 = X_2.std()
fx_given_y[0][0] = mu_0
fx_given_y[0][1] = sgma_0
fx_given_y[1][0] = mu_1
fx_given_y[1][1] = sgma_1
fx_given_y[2][0] = mu_2
fx_given_y[2][1] = sgma_2
x = np.linspace(-2,10,10000)
for i in range(3):
mu = fx_given_y[i][0]
sgma = fx_given_y[i][1]
fx = np.exp((-(x-mu)**2)/(2*(sgma**2)))/((2*np.pi*sgma**2)**0.5)
plt.plot(x,fx,label=str(i))
plt.legend()
type(Xtest)
Xtest = np.asarray(Xtest)
type(Xtest)
xt = Xtest[20]
print(xt)
i = 0
mu = fx_given_y[i][0]
sgma = fx_given_y[i][1]
Py_0_given_x = Py[0]*np.exp((-(xt-mu)**2)/(2*(sgma**2)))/((2*np.pi*sgma**2)**0.5)
Py_0_given_x
i = 1
mu = fx_given_y[i][0]
sgma = fx_given_y[i][1]
Py_1_given_x = Py[0]*np.exp((-(xt-mu)**2)/(2*(sgma**2)))/((2*np.pi*sgma**2)**0.5)
i = 2
mu = fx_given_y[i][0]
sgma = fx_given_y[i][1]
Py_2_given_x = Py[0]*np.exp((-(xt-mu)**2)/(2*(sgma**2)))/((2*np.pi*sgma**2)**0.5)
print(Py_0_given_x,Py_1_given_x,Py_2_given_x)
ytest = np.asarray(ytest)
ytest[20]
iris.head()
X = np.array(iris.petal_length)
X
plt.hist(X,bins=30,density=True)
sns.kdeplot(X)
X = np.random.exponential(1,10)
X
```
| github_jupyter |
## Welcome to Aequitas
The Aequitas toolkit is a flexible bias-audit utility for algorithmic decision-making models, accessible via Python API, command line interface (CLI), and through our [web application](http://aequitas.dssg.io/).
Use Aequitas to evaluate model performance across several bias and fairness metrics, and utilize the [most relevant metrics](https://dsapp.uchicago.edu/wp-content/uploads/2018/05/metrictree-1200x750.png) to your process in model selection.
Aequitas will help you:
- Understand where biases exist in your model(s)
- Compare the level of bias between groups in your sample population (bias disparity)
- Visualize absolute bias metrics and their related disparities for rapid comprehension and decision-making
Our goal is to support informed and equitable action for both machine learnining practitioners and the decision-makers who rely on them.
Aequitas is compatible with: **Python 3.6+**
<a id='getting_started'></a>
# Getting started
You can audit your risk assessment system for two types of biases:
- Biased actions or interventions that are not allocated in a way that’s representative of the population.
- Biased outcomes through actions or interventions that are a result of your system being wrong about certain groups of people.
For both audits, you need the following data:
- Data about the specific attributes (race, gender, age, income, etc.) you want to audit for the the overall population considered for interventions
- The set of individuals in the above population that your risk assessment system recommended/ selected for intervention or action. _It’s important t this set come from the assessments made after the system has been built, and not from the data the machine learning system was “trained” on if you're using the audit as a factor in model selection._
If you want to audit for biases due to model or system errors, you also need to include actual outcomes (label values) for all individuals in the overall population.
Input data has slightly different requirements depending on whether you are using Aequitas via the webapp, CLI or Python package. In general, input data is a single table with the following columns:
- `score`
- `label_value` (for error-based metrics only)
- at least one attribute e.g. `race`, `sex` and `age_cat` (attribute categories defined by user)
## Bias measures tailored to your problem
### Input machine learning predictions
After [installing on your computer](./installation.html)
Run `aequitas-report` on [COMPAS data](https://github.com/dssg/aequitas/tree/master/examples):
`compas_for_aequitas.csv` excerpt:
| score | label_value| race | sex | age_cat |
| --------- |------------| -----| --- | ------- |
| 0 | 1 | African-American | Male | 25 - 45 |
| 1 | 1 | Native American | Female | Less than 25 |
```
aequitas-report --input compas_for_aequitas.csv
```
**Note:** Disparites are always defined in relation to a reference group. By default, Aequitas uses the majority group within each attribute as the reference group. [Defining a reference group](./config.html)
### The Bias Report output
The Bias Report produces a pdf that returns descriptive interpretation of the results along with three sets of tables.
* Fairness Measures Results
* Bias Metrics Results
* Group Metrics Results
Additionally, a csv is produced that contains the relevant data. More information about output [here](./output_data.html).
### Command Line output
In the command line you will see The Bias Report, which returns counts for each attribute by group and then computes various fairness metrics. This is the same information that is captured in the csv output.
```
___ _ __
/ | ___ ____ ___ __(_) /_____ ______
/ /| |/ _ \/ __ `/ / / / / __/ __ `/ ___/
/ ___ / __/ /_/ / /_/ / / /_/ /_/ (__ )
/_/ |_\___/\__, /\__,_/_/\__/\__,_/____/
/_/
____________________________________________________________________________
Bias and Fairness Audit Tool
____________________________________________________________________________
Welcome to Aequitas-Audit
Fairness measures requested: Statistical Parity,Impact Parity,FDR Parity,FPR Parity,FNR Parity,FOR Parity
model_id, score_thresholds 1 {'rank_abs': [3317]}
COUNTS::: race
African-American 3696
Asian 32
Caucasian 2454
Hispanic 637
Native American 18
Other 377
dtype: int64
COUNTS::: sex
Female 1395
Male 5819
dtype: int64
COUNTS::: age_cat
25 - 45 4109
Greater than 45 1576
Less than 25 1529
dtype: int64
audit: df shape from the crosstabs: (11, 26)
get_disparity_major_group()
number of rows after bias majority ref group: 11
Any NaN?: False
bias_df shape: (11, 38)
Fairness Threshold: 0.8
Fairness Measures: ['Statistical Parity', 'Impact Parity', 'FDR Parity', 'FPR Parity', 'FNR Parity', 'FOR Parity']
...
```
| github_jupyter |
# 定义目标
# 数据获取
## 训练数据
```
import pandas as pd
import numpy as np
import seaborn as sb
from matplotlib import pyplot as plt
%matplotlib inline
data_train = pd.read_csv("./data/train.csv")
data_train.head()
```
## 测试数据
```
data_test = pd.read_csv("./data/test.csv")
data_test.head()
```
# 数据理解
## 数据集名称
PassengerId 乘客ID<br>
Survived 获救与否(0死亡,1生存)<br>
Pclass 乘客等级(1/2/3等舱位)<br>
Name 乘客姓名<br>
Sex 性别<br>
Age 年龄<br>
SibSp 堂兄弟/妹个数<br>
Parch 父母与小孩个数<br>
Ticket 船票信息<br>
Fare 票价<br>
Cabin 客舱<br>
Embarked 登船港口
## 缺失值分析
```
data_train.info()
```
* <font color=red>Age和Cabin都有大幅度缺失值,Embarked有2个缺失值<font>
## 数值型特征分布
```
data_train.describe()
```
* <font color=red>Survived中值(mean)表明仅有0.383838的人获救<font>
* <font color=red>平均年龄在29.7岁(缺失值不计入)<font>
* <font color=red>乘客普遍为2\3等级,1等乘客数量少<font>
* <font color=red>绝大多数乘客为独生子女<font>
## 部分特征与标签的关系
```
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
fig = plt.figure(figsize=(18,8))
fig.set(alpha=20) # 设定图表颜色alpha参数(透明)
plt.subplot2grid((2,3),(0,0)) # 在一张大图里分列几个小图
data_train.Survived.value_counts().plot(kind='bar') # 对获救人数做条形图统计
plt.title(u"获救情况 (1为获救)")
plt.ylabel(u"人数")
plt.grid(True)
plt.subplot2grid((2,3),(0,1))
data_train.Pclass.value_counts().plot(kind="bar") # 对乘客等级数量做统计
plt.ylabel(u"人数")
plt.title(u"乘客等级分布")
plt.grid(True)
plt.subplot2grid((2,3),(0,2))
plt.scatter(data_train.Survived, data_train.Age)
plt.ylabel(u"年龄")
plt.grid(b=True, which='major', axis='y') # 查看按年龄分布的获救情况散点图
plt.title(u"按年龄看获救分布 (1为获救)")
plt.subplot2grid((2,3),(1,0), colspan=2)
data_train.Age[data_train.Pclass == 1].plot(kind='kde') # 查看各等级乘客年龄分布
data_train.Age[data_train.Pclass == 2].plot(kind='kde')
data_train.Age[data_train.Pclass == 3].plot(kind='kde')
plt.xlabel(u"年龄")# plots an axis lable
plt.ylabel(u"密度")
plt.title(u"各等级的乘客年龄分布")
plt.legend((u'头等舱', u'2等舱',u'3等舱'),loc='best') # 设置图例
plt.grid(True)
plt.subplot2grid((2,3),(1,2))
data_train.Embarked.value_counts().plot(kind='bar') # 查看各登船口岸上船人数
plt.title(u"各登船口岸上船人数")
plt.ylabel(u"人数")
plt.grid(True)
plt.show()
```
* <font color=red>获救人数刚刚300多一点<font>
* <font color=red>3等乘客人数最多,其次是1等<font>
* <font color=red>头等舱乘客年龄普遍年轻,其次是2等舱,最后是3等舱<font>
* <font color=red>明显口岸上岸人数不均等,可能跟地位有关<font>
## 查看按乘客等级分布的获救情况
```
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
#看看各乘客等级的获救情况
fig = plt.figure(figsize=(20,10))
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()
df=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})
df.plot(kind='bar', stacked=True)
plt.title(u"各乘客等级的获救情况")
plt.xlabel(u"乘客等级")
plt.ylabel(u"人数")
plt.show()
```
* <font color=red>有钱就是希望大<font>
## 查看按性别分布的获救情况
```
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
#看看各乘客性别的获救情况
fig = plt.figure(figsize=(20,10))
fig.set(alpha=0.2)
Survived_0 = data_train.Sex[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Sex[data_train.Survived == 1].value_counts()
df=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})
df.plot(kind='bar', stacked=True, rot=0)
plt.title(u"各乘客性别的获救情况")
plt.xlabel(u"乘客性别")
plt.ylabel(u"人数")
plt.show()
```
* <font color=red>听船长的话,妇女先走<font>
## 登录口岸划分的获救情况
```
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
#看看各乘客性别的获救情况
fig = plt.figure(figsize=(20,10))
fig.set(alpha=0.2)
Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
df=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})
df.plot(kind='bar', stacked=True, rot=0)
plt.title(u"登录口岸划分的获救情况")
plt.xlabel(u"口岸")
plt.ylabel(u"人数")
plt.show()
```
* <font color=red>C口岸可能是给头等舱提供的入口<font>
## 查看按乘客等级分布的获救情况
```
# 然后我们再来看看各种舱级别情况下各性别的获救情况
fig=plt.figure(figsize=(16,10))
fig.set(alpha=0.5) # 设置图像透明度,无所谓
plt.title(u"根据舱等级和性别的获救情况")
ax1=fig.add_subplot(141)
data_train.Survived[data_train.Sex == 'female'][data_train.Pclass != 3].value_counts().plot(kind='bar', label="female highclass", color='#FA2479')
ax1.set_xticklabels([u"获救", u"未获救"], rotation=0)
ax1.legend([u"女性/高级舱"], loc='best')
ax2=fig.add_subplot(142, sharey=ax1)
data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts().plot(kind='bar', label='female, low class', color='pink')
ax2.set_xticklabels([u"未获救", u"获救"], rotation=0)
plt.legend([u"女性/低级舱"], loc='best')
ax3=fig.add_subplot(143, sharey=ax1)
data_train.Survived[data_train.Sex == 'male'][data_train.Pclass != 3].value_counts().plot(kind='bar', label='male, high class',color='lightblue')
ax3.set_xticklabels([u"未获救", u"获救"], rotation=0)
plt.legend([u"男性/高级舱"], loc='best')
ax4=fig.add_subplot(144, sharey=ax1)
data_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 3].value_counts().plot(kind='bar', label='male low class', color='steelblue')
ax4.set_xticklabels([u"未获救", u"获救"], rotation=0)
plt.legend([u"男性/低级舱"], loc='best')
plt.show()
```
* <font color=red>有钱就是希望大<font>
## 家庭成员与获救的关系
```
g = data_train.groupby(['Parch','Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
```
* <font color=red>没有太大的关联性<font>
## 查看兄弟姐妹人数和获救的关系
```
g = data_train.groupby(['SibSp','Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
```
* <font color=red>家里兄弟人数少的,获救希望更大<font>
# 特征工程
## 对于缺失数据补全
```
from sklearn.ensemble import RandomForestRegressor
### 使用 RandomForestClassifier 填补缺失的年龄属性
def set_missing_ages(df):
# 把已有的数值型特征取出来丢进Random Forest Regressor中
age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
# 乘客分成已知年龄和未知年龄两部分
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
# y即目标年龄
train_know_age_y = known_age[:, 0]
# X即特征属性值
train_know_age_X = known_age[:, 1:] # 这里与源代码比做了修改
# fit到RandomForestRegressor之中
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(train_know_age_X, train_know_age_y)
# 用得到的模型进行未知年龄结果预测
predictedAges = rfr.predict(unknown_age[:, 1:])
# 用得到的预测结果填补原缺失数据
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges
return df, rfr
def set_Cabin_type(df):
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "Yes"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "No"
return df
data_train, rfr = set_missing_ages(data_train)
data_train = set_Cabin_type(data_train)
data_train
```
## 数据离散化
* <font color=red>因为逻辑回归建模时,需要输入的特征都是数值型特征,我们通常会先对类目型的特征因子化/one-hot编码<font>
```
# 因为逻辑回归建模时,需要输入的特征都是数值型特征
# 我们先对类目型的特征离散/因子化
# 以Cabin为例,原本一个属性维度,因为其取值可以是['yes','no'],而将其平展开为'Cabin_yes','Cabin_no'两个属性
# 原本Cabin取值为yes的,在此处的'Cabin_yes'下取值为1,在'Cabin_no'下取值为0
# 原本Cabin取值为no的,在此处的'Cabin_yes'下取值为0,在'Cabin_no'下取值为1
# 我们使用pandas的get_dummies来完成这个工作,并拼接在原来的data_train之上,如下所示
dummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix= 'Cabin')
dummies_Embarked = pd.get_dummies(data_train['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data_train['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data_train['Pclass'], prefix= 'Pclass')
df = pd.concat([data_train, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
df.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)
df
```
## 数据归一化
* <font color=red>我们还得做一些处理,仔细看看Age和Fare两个属性,乘客的数值幅度变化,也忒大了吧!!如果大家了解逻辑回归与梯度下降的话,会知道,各属性值之间scale差距太大,将对收敛速度造成几万点伤害值!甚至不收敛!所以我们先用scikit-learn里面的preprocessing模块对这俩货做一个scaling,所谓scaling,其实就是将一些变化幅度较大的特征化到[-1,1]之内。<font>
```
# 接下来我们要接着做一些数据预处理的工作,比如scaling,将一些变化幅度较大的特征化到[-1,1]之内
# 这样可以加速logistic regression的收敛
import sklearn.preprocessing as preprocessing
scaler = preprocessing.StandardScaler()
age_scale_param = scaler.fit(df['Age'].values.reshape(-1,1))
df['Age_scaled'] = scaler.fit_transform(df['Age'].values.reshape(-1,1), age_scale_param)
fare_scale_param = scaler.fit(df['Fare'].values.reshape(-1,1))
df['Fare_scaled'] = scaler.fit_transform(df['Fare'].values.reshape(-1,1), fare_scale_param)
df.head()
```
## 特征选择
```
# 我们把需要的feature字段取出来,转成numpy格式
train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
train_np = train_df.as_matrix()
train_np
```
# 数据保存
# 模型选择
## 选择模型库训练获取模型
```
# 使用scikit-learn中的LogisticRegression建模
from sklearn import linear_model
# y即Survival结果
train_y = train_np[:, 0]
# X即特征属性值
train_X = train_np[:, 1:]
# fit 到 LogisticRegression 之中
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
clf.fit(train_X, train_y)
clf
```
## 对训练集做相同操作
```
from sklearn.ensemble import RandomForestRegressor
data_test.loc[ (data_test.Fare.isnull()), 'Fare' ] = 0
# 接着我们对test_data做和train_data中一致的特征变换
# 首先用同样的RandomForestRegressor模型填上丢失的年龄
tmp_df = data_test[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
null_age = tmp_df[data_test.Age.isnull()].as_matrix()
# 根据特征属性X预测年龄并补上
test_age_null_X = null_age[:, 1:]
predictedAges = rfr.predict(test_age_null_X)
data_test.loc[ (data_test.Age.isnull()), 'Age' ] = predictedAges
data_test = set_Cabin_type(data_test)
dummies_Cabin = pd.get_dummies(data_test['Cabin'], prefix= 'Cabin')
dummies_Embarked = pd.get_dummies(data_test['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data_test['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data_test['Pclass'], prefix= 'Pclass')
df_test = pd.concat([data_test, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
df_test.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)
df_test['Age_scaled'] = scaler.fit_transform(df_test['Age'].values.reshape(-1,1), age_scale_param)
df_test['Fare_scaled'] = scaler.fit_transform(df_test['Fare'].values.reshape(-1,1), fare_scale_param)
df_test
```
**注意训练一次可以用,第二次会对"rfr.predict(X)"报错.其次源代码中应该修改**
* 特征选择
```
test = df_test.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
predictions = clf.predict(test)
result = pd.DataFrame({'PassengerId':data_test['PassengerId'].as_matrix(), 'Survived':predictions.astype(np.int32)})
result.to_csv("logistic_regression_predictions.csv", index=False)
pd.read_csv("logistic_regression_predictions.csv")
```
<font color=red>提交后结果为0.76555,恩,结果还不错。毕竟,这只是我们简单分析过后出的一个baseline系统嘛</font>
# 性能评估
## 画出学习曲线
性能可视化
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# 用sklearn的learning_curve得到training_score和cv_score,使用matplotlib画出learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1,
train_sizes=np.linspace(.05, 1., 20), verbose=0, plot=True): # train_sizes=np.linspace(.05, 1., 20)做出切割选择
"""
画出data在某模型上的learning curve.
参数解释
----------
estimator : 你用的分类器。
title : 表格的标题。
X : 输入的feature,numpy类型
y : 输入的target vector
ylim : tuple格式的(ymin, ymax), 设定图像中纵坐标的最低点和最高点
cv : 做cross-validation的时候,数据分成的份数,其中一份作为cv集,其余n-1份作为training(默认为3份)
n_jobs : 并行的的任务数(默认1)
train_sizes : 对数据集进行切割的具体量
"""
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, verbose=verbose)
# print(train_sizes)
# print(train_scores)
# print(test_scores)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
# print(train_scores_mean)
# print(train_scores_std)
# print(test_scores_mean)
# print(test_scores_std)
if plot:
plt.figure(figsize=(12,8))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(u"训练样本数")
plt.ylabel(u"得分")
plt.gca().invert_yaxis() # 翻转坐标轴
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
alpha=0.1, color="b") # 颜色填充,下图中淡蓝色和淡红色的部分
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
alpha=0.1, color="r") # 颜色填充,下图中淡蓝色和淡红色的部分
plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label=u"训练集上得分")
plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label=u"交叉验证集上得分")
plt.legend(loc="best")
plt.draw()
plt.gca().invert_yaxis()
plt.show()
midpoint = ((train_scores_mean[-1] + train_scores_std[-1]) + (test_scores_mean[-1] - test_scores_std[-1])) / 2
diff = (train_scores_mean[-1] + train_scores_std[-1]) - (test_scores_mean[-1] - test_scores_std[-1])
return midpoint, diff
plot_learning_curve(clf, u"学习曲线", train_X, train_y)
```
<font color=red>在实际数据上看,我们得到的learning curve没有理论推导的那么光滑哈,但是可以大致看出来,训练集和交叉验证集上的得分曲线走势还是符合预期的。<font><br>
<font color=red>目前的曲线看来,我们的model并不处于overfitting的状态(overfitting的表现一般是训练集上得分高,而交叉验证集上要低很多,中间的gap比较大)。因此我们可以再做些feature engineering的工作,添加一些新产出的特征或者组合特征到模型中。<font><br>
将变量传递给另一个页面
```
%store train_X
%store train_y
%store clf
```
## 其他模型构建
```
import numpy as np
import pandas as pd
from pandas import DataFrame
from patsy import dmatrices
import string
from operator import itemgetter
import json
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import train_test_split,StratifiedShuffleSplit,StratifiedKFold
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.externals import joblib
##Read configuration parameters
train_file="./data/train.csv"
MODEL_PATH="./"
test_file="./data/test.csv"
SUBMISSION_PATH="./"
seed= 0
print(train_file,seed)
# 输出得分
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
#清理和处理数据
def substrings_in_string(big_string, substrings):
for substring in substrings:
if str.find(big_string, substring) != -1:
return substring
print(big_string)
return np.nan
le = preprocessing.LabelEncoder()
enc=preprocessing.OneHotEncoder()
def clean_and_munge_data(df):
#处理缺省值
df.Fare = df.Fare.map(lambda x: np.nan if x==0 else x)
#处理一下名字,生成Title字段
title_list=['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev',
'Dr', 'Ms', 'Mlle','Col', 'Capt', 'Mme', 'Countess',
'Don', 'Jonkheer']
df['Title']=df['Name'].map(lambda x: substrings_in_string(x, title_list))
#处理特殊的称呼,全处理成mr, mrs, miss, master
def replace_titles(x):
title=x['Title']
if title in ['Mr','Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']:
return 'Mr'
elif title in ['Master']:
return 'Master'
elif title in ['Countess', 'Mme','Mrs']:
return 'Mrs'
elif title in ['Mlle', 'Ms','Miss']:
return 'Miss'
elif title =='Dr':
if x['Sex']=='Male':
return 'Mr'
else:
return 'Mrs'
elif title =='':
if x['Sex']=='Male':
return 'Master'
else:
return 'Miss'
else:
return title
df['Title']=df.apply(replace_titles, axis=1)
#看看家族是否够大,咳咳
df['Family_Size']=df['SibSp']+df['Parch']
df['Family']=df['SibSp']*df['Parch']
df.loc[ (df.Fare.isnull())&(df.Pclass==1),'Fare'] =np.median(df[df['Pclass'] == 1]['Fare'].dropna())
df.loc[ (df.Fare.isnull())&(df.Pclass==2),'Fare'] =np.median( df[df['Pclass'] == 2]['Fare'].dropna())
df.loc[ (df.Fare.isnull())&(df.Pclass==3),'Fare'] = np.median(df[df['Pclass'] == 3]['Fare'].dropna())
df['Gender'] = df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
df['AgeFill']=df['Age']
mean_ages = np.zeros(4)
mean_ages[0]=np.average(df[df['Title'] == 'Miss']['Age'].dropna())
mean_ages[1]=np.average(df[df['Title'] == 'Mrs']['Age'].dropna())
mean_ages[2]=np.average(df[df['Title'] == 'Mr']['Age'].dropna())
mean_ages[3]=np.average(df[df['Title'] == 'Master']['Age'].dropna())
df.loc[ (df.Age.isnull()) & (df.Title == 'Miss') ,'AgeFill'] = mean_ages[0]
df.loc[ (df.Age.isnull()) & (df.Title == 'Mrs') ,'AgeFill'] = mean_ages[1]
df.loc[ (df.Age.isnull()) & (df.Title == 'Mr') ,'AgeFill'] = mean_ages[2]
df.loc[ (df.Age.isnull()) & (df.Title == 'Master') ,'AgeFill'] = mean_ages[3]
df['AgeCat']=df['AgeFill']
df.loc[ (df.AgeFill<=10) ,'AgeCat'] = 'child'
df.loc[ (df.AgeFill>60),'AgeCat'] = 'aged'
df.loc[ (df.AgeFill>10) & (df.AgeFill <=30) ,'AgeCat'] = 'adult'
df.loc[ (df.AgeFill>30) & (df.AgeFill <=60) ,'AgeCat'] = 'senior'
df.Embarked = df.Embarked.fillna('S')
df.loc[ df.Cabin.isnull()==True,'Cabin'] = 0.5
df.loc[ df.Cabin.isnull()==False,'Cabin'] = 1.5
df['Fare_Per_Person']=df['Fare']/(df['Family_Size']+1)
#Age times class
df['AgeClass']=df['AgeFill']*df['Pclass']
df['ClassFare']=df['Pclass']*df['Fare_Per_Person']
df['HighLow']=df['Pclass']
df.loc[ (df.Fare_Per_Person<8) ,'HighLow'] = 'Low'
df.loc[ (df.Fare_Per_Person>=8) ,'HighLow'] = 'High'
le.fit(df['Sex'] )
x_sex=le.transform(df['Sex'])
df['Sex']=x_sex.astype(np.float)
le.fit( df['Ticket'])
x_Ticket=le.transform( df['Ticket'])
df['Ticket']=x_Ticket.astype(np.float)
le.fit(df['Title'])
x_title=le.transform(df['Title'])
df['Title'] =x_title.astype(np.float)
le.fit(df['HighLow'])
x_hl=le.transform(df['HighLow'])
df['HighLow']=x_hl.astype(np.float)
le.fit(df['AgeCat'])
x_age=le.transform(df['AgeCat'])
df['AgeCat'] =x_age.astype(np.float)
le.fit(df['Embarked'])
x_emb=le.transform(df['Embarked'])
df['Embarked']=x_emb.astype(np.float)
df = df.drop(['PassengerId','Name','Age','Cabin'], axis=1) #remove Name,Age and PassengerId
return df
#读取数据
traindf=pd.read_csv(train_file)
##清洗数据
df=clean_and_munge_data(traindf)
########################################formula################################
formula_ml='Survived~Pclass+C(Title)+Sex+C(AgeCat)+Fare_Per_Person+Fare+Family_Size'
y_train, x_train = dmatrices(formula_ml, data=df, return_type='dataframe')
y_train = np.asarray(y_train).ravel()
print(y_train.shape,x_train.shape)
##选择训练和测试集
X_train, X_test, Y_train, Y_test = train_test_split(x_train, y_train, test_size=0.2,random_state=seed)
#初始化分类器
clf=RandomForestClassifier(n_estimators=500, criterion='entropy', max_depth=5, min_samples_split=2,
min_samples_leaf=1, max_features='auto', bootstrap=False, oob_score=False, n_jobs=1, random_state=seed,
verbose=0)
#grid search找到最好的参数
param_grid = dict( )
##创建分类pipeline
pipeline=Pipeline([('clf',clf)])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=3,scoring='accuracy',\
cv=StratifiedShuffleSplit(Y_train, n_iter=10, test_size=0.2, train_size=None, \
random_state=seed)).fit(X_train, Y_train)
# 对结果打分
print("Best score: %0.3f" % grid_search.best_score_)
print(grid_search.best_estimator_)
report(grid_search.grid_scores_)
print('-----grid search end------------')
print('on all train set')
scores = cross_val_score(grid_search.best_estimator_, x_train, y_train,cv=3,scoring='accuracy')
print(scores.mean(),scores)
print ('on test set')
scores = cross_val_score(grid_search.best_estimator_, X_test, Y_test,cv=3,scoring='accuracy')
print(scores.mean(),scores)
# 对结果打分
print(classification_report(Y_train, grid_search.best_estimator_.predict(X_train) ))
print('test data')
print(classification_report(Y_test, grid_search.best_estimator_.predict(X_test) ))
model_file=MODEL_PATH+'model-rf.pkl'
joblib.dump(grid_search.best_estimator_, model_file)
```
# 模型优化
## 根据模型的系数调优
<font color=red>接下来,我们就该看看如何优化baseline系统了<br>
我们还有些特征可以再挖掘挖掘<br><br>
1. 比如说Name和Ticket两个属性被我们完整舍弃了(好吧,其实是一开始我们对于这种,每一条记录都是一个完全不同的值的属性,并没有很直接的处理方式)<br>
2. 比如说,我们想想,年龄的拟合本身也未必是一件非常靠谱的事情<br>
3. 另外,以我们的日常经验,小盆友和老人可能得到的照顾会多一些,这样看的话,年龄作为一个连续值,给一个固定的系数,似乎体现不出两头受照顾的实际情况,所以,说不定我们把年龄离散化,按区段分作类别属性会更合适一些<br>
那怎么样才知道,哪些地方可以优化,哪些优化的方法是promising的呢?<br>
是的<br><br>
要做交叉验证(cross validation)!<br>
要做交叉验证(cross validation)!<br>
要做交叉验证(cross validation)!<br><br>
重要的事情说3编!!!<br>
因为test.csv里面并没有Survived这个字段(好吧,这是废话,这明明就是我们要预测的结果),我们无法在这份数据上评定我们算法在该场景下的效果。。。<br>
我们通常情况下,这么做cross validation:把train.csv分成两部分,一部分用于训练我们需要的模型,另外一部分数据上看我们预测算法的效果。<br>
我们可以用scikit-learn的cross_validation来完成这个工作</font>
<font color=red>在此之前,咱们可以看看现在得到的模型的系数,因为系数和它们最终的判定能力强弱是正相关的</font>
```
%store train_df
%store clf
%store df
%store origin_data_train
%store data_train
pd.DataFrame({"columns":list(train_df.columns)[1:], "coef":list(clf.coef_.T)})
```
<font color=red>上面的系数和最后的结果是一个正相关的关系<br>
我们先看看那些权重绝对值非常大的feature,在我们的模型上:<br>
* Sex属性,如果是female会极大提高最后获救的概率,而male会很大程度拉低这个概率。
* Pclass属性,1等舱乘客最后获救的概率会上升,而乘客等级为3会极大地拉低这个概率。
* 有Cabin值会很大程度拉升最后获救概率(这里似乎能看到了一点端倪,事实上从最上面的有无Cabin记录的Survived分布图上看出,即使有Cabin记录的乘客也有一部分遇难了,估计这个属性上我们挖掘还不够)
* Age是一个负相关,意味着在我们的模型里,年龄越小,越有获救的优先权(还得回原数据看看这个是否合理)
* 有一个登船港口S会很大程度拉低获救的概率,另外俩港口压根就没啥作用(这个实际上非常奇怪,因为我们从之前的统计图上并没有看到S港口的获救率非常低,所以也许可以考虑把登船港口这个feature去掉试试)。
* 船票Fare有小幅度的正相关(并不意味着这个feature作用不大,有可能是我们细化的程度还不够,举个例子,说不定我们得对它离散化,再分至各个乘客等级上?)
噢啦,观察完了,我们现在有一些想法了,但是怎么样才知道,哪些优化的方法是promising的呢?<br>
恩,要靠交叉验证
```
from sklearn import cross_validation
# 简单看看打分情况
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
all_data = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
X = all_data.as_matrix()[:,1:]
y = all_data.as_matrix()[:,0]
print(cross_validation.cross_val_score(clf, X, y, cv=5))
# 分割数据
split_train, split_cv = cross_validation.train_test_split(df, test_size=0.3, random_state=0) # random_state保存随机切割的数据量,确保下次使用还能切分成一样的数据
train_df = split_train.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
# 生成模型
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
clf.fit(train_df.as_matrix()[:,1:], train_df.as_matrix()[:,0])
# 对cross validation数据进行预测
cv_df = split_cv.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
predictions = clf.predict(cv_df.as_matrix()[:,1:])
split_cv[predictions != cv_df.as_matrix()[:,0]]
# 去除预测错误的case看原始dataframe数据
#split_cv['PredictResult'] = predictions
origin_data_train = pd.read_csv("./data/train.csv")
bad_cases = origin_data_train.loc[origin_data_train['PassengerId'].isin(split_cv[predictions != cv_df.as_matrix()[:,0]]['PassengerId'].values)]
bad_cases
```
<font color=red>对比bad case,我们仔细看看我们预测错的样本,到底是哪些特征有问题,咱们处理得还不够细?<br>
我们随便列一些可能可以做的优化操作:<br>
* Age属性不使用现在的拟合方式,而是根据名称中的『Mr』『Mrs』『Miss』等的平均值进行填充。
* Age不做成一个连续值属性,而是使用一个步长进行离散化,变成离散的类目feature。
* Cabin再细化一些,对于有记录的Cabin属性,我们将其分为前面的字母部分(我猜是位置和船层之类的信息) 和 后面的数字部分(应该是房间号,有意思的事情是,如果你仔细看看原始数据,你会发现,这个值大的情况下,似乎获救的可能性高一些)。
* **Pclass和Sex俩太重要了,我们试着用它们去组出一个组合属性来试试,这也是另外一种程度的细化。**
* 单加一个Child字段,Age<=12的,设为1,其余为0(你去看看数据,确实小盆友优先程度很高啊)
* 如果名字里面有『Mrs』,而Parch>1的,我们猜测她可能是一个母亲,应该获救的概率也会提高,因此可以多加一个Mother字段,此种情况下设为1,其余情况下设为0
* 登船港口可以考虑先去掉试试(Q和C本来就没权重,S有点诡异)
* 把堂兄弟/兄妹 和 Parch 还有自己 个数加在一起组一个Family_size字段(考虑到大家族可能对最后的结果有影响)
* Name是一个我们一直没有触碰的属性,我们可以做一些简单的处理,比如说男性中带某些字眼的(‘Capt’, ‘Don’, ‘Major’, ‘Sir’)可以统一到一个Title,女性也一样。
大家接着往下挖掘,可能还可以想到更多可以细挖的部分。我这里先列这些了,然后我们可以使用手头上的”train_df”和”cv_df”开始试验这些feature engineering的tricks是否有效了。
```
data_train[data_train['Name'].str.contains("Major")]
data_train = pd.read_csv("./data/train.csv")
data_train['Sex_Pclass'] = data_train.Sex + "_" + data_train.Pclass.map(str) # 对每个Pclass执行str函数(数字变为字符串)
from sklearn.ensemble import RandomForestRegressor
# 使用 RandomForestClassifier 填补缺失的年龄属性
def set_missing_ages(df):
# 把已有的数值型特征取出来丢进Random Forest Regressor中
age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
# 乘客分成已知年龄和未知年龄两部分
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
# y即目标年龄
y = known_age[:, 0]
# X即特征属性值
X = known_age[:, 1:]
# fit到RandomForestRegressor之中
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(X, y)
# 用得到的模型进行未知年龄结果预测
predictedAges = rfr.predict(unknown_age[:, 1:])
# 用得到的预测结果填补原缺失数据
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges
return df, rfr
def set_Cabin_type(df):
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "Yes"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "No"
return df
data_train, rfr = set_missing_ages(data_train)
data_train = set_Cabin_type(data_train)
dummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix= 'Cabin')
dummies_Embarked = pd.get_dummies(data_train['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data_train['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data_train['Pclass'], prefix= 'Pclass')
dummies_Sex_Pclass = pd.get_dummies(data_train['Sex_Pclass'], prefix= 'Sex_Pclass')
df = pd.concat([data_train, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass, dummies_Sex_Pclass], axis=1)
df.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked', 'Sex_Pclass'], axis=1, inplace=True)
import sklearn.preprocessing as preprocessing
scaler = preprocessing.StandardScaler()
age_scale_param = scaler.fit(df['Age'].values.reshape(-1,1))
df['Age_scaled'] = scaler.fit_transform(df['Age'].values.reshape(-1,1), age_scale_param)
fare_scale_param = scaler.fit(df['Fare'].values.reshape(-1,1))
df['Fare_scaled'] = scaler.fit_transform(df['Fare'].values.reshape(-1,1), fare_scale_param)
from sklearn import linear_model
train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*')
train_np = train_df.as_matrix()
# y即Survival结果
y = train_np[:, 0]
# X即特征属性值
X = train_np[:, 1:]
# fit到RandomForestRegressor之中
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
clf.fit(X, y)
clf
```
对test也做同样的变换
```
data_test = pd.read_csv("./data/test.csv")
data_test.loc[ (data_test.Fare.isnull()), 'Fare' ] = 0
data_test['Sex_Pclass'] = data_test.Sex + "_" + data_test.Pclass.map(str)
# 接着我们对test_data做和train_data中一致的特征变换
# 首先用同样的RandomForestRegressor模型填上丢失的年龄
tmp_df = data_test[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
null_age = tmp_df[data_test.Age.isnull()].as_matrix()
# 根据特征属性X预测年龄并补上
X = null_age[:, 1:]
predictedAges = rfr.predict(X)
data_test.loc[ (data_test.Age.isnull()), 'Age' ] = predictedAges
data_test = set_Cabin_type(data_test)
dummies_Cabin = pd.get_dummies(data_test['Cabin'], prefix= 'Cabin')
dummies_Embarked = pd.get_dummies(data_test['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data_test['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data_test['Pclass'], prefix= 'Pclass')
dummies_Sex_Pclass = pd.get_dummies(data_test['Sex_Pclass'], prefix= 'Sex_Pclass')
df_test = pd.concat([data_test, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass, dummies_Sex_Pclass], axis=1)
df_test.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked', 'Sex_Pclass'], axis=1, inplace=True)
df_test['Age_scaled'] = scaler.fit_transform(df_test['Age'].values.reshape(-1,1), age_scale_param)
df_test['Fare_scaled'] = scaler.fit_transform(df_test['Fare'].values.reshape(-1,1), fare_scale_param)
df_test
test = df_test.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*')
predictions = clf.predict(test)
result = pd.DataFrame({'PassengerId':data_test['PassengerId'].as_matrix(), 'Survived':predictions.astype(np.int32)})
result.to_csv("logistic_regression_predictions2.csv", index=False)
pd.read_csv("logistic_regression_predictions2.csv")
```
<font color=red>一般做到后期,咱们要进行模型优化的方法就是模型融合啦<br>
先解释解释啥叫模型融合哈,我们还是举几个例子直观理解一下好了。<br><br>
大家都看过知识问答的综艺节目中,求助现场观众时候,让观众投票,最高的答案作为自己的答案的形式吧,每个人都有一个判定结果,最后我们相信答案在大多数人手里。<br>
再通俗一点举个例子。你和你班某数学大神关系好,每次作业都『模仿』他的,于是绝大多数情况下,他做对了,你也对了。突然某一天大神脑子犯糊涂,手一抖,写错了一个数,于是…恩,你也只能跟着错了。 <br>
我们再来看看另外一个场景,你和你班5个数学大神关系都很好,每次都把他们作业拿过来,对比一下,再『自己做』,那你想想,如果哪天某大神犯糊涂了,写错了,but另外四个写对了啊,那你肯定相信另外4人的是正确答案吧?<br>
最简单的模型融合大概就是这么个意思,比如分类问题,当我们手头上有一堆在同一份数据集上训练得到的分类器(比如logistic regression,SVM,KNN,random forest,神经网络),那我们让他们都分别去做判定,然后对结果做投票统计,取票数最多的结果为最后结果。<br>
bingo,问题就这么完美的解决了。<br>
模型融合可以比较好地缓解,训练过程中产生的过拟合问题,从而对于结果的准确度提升有一定的帮助。<br>
话说回来,回到我们现在的问题。你看,我们现在只讲了logistic regression,如果我们还想用这个融合思想去提高我们的结果,我们该怎么做呢?<br>
既然这个时候模型没得选,那咱们就在数据上动动手脚咯。大家想想,如果模型出现过拟合现在,一定是在我们的训练上出现拟合过度造成的对吧。<br>
那我们干脆就不要用全部的训练集,每次取训练集的一个subset,做训练,这样,我们虽然用的是同一个机器学习算法,但是得到的模型却是不一样的;同时,因为我们没有任何一份子数据集是全的,因此即使出现过拟合,也是在子训练集上出现过拟合,而不是全体数据上,这样做一个融合,可能对最后的结果有一定的帮助。对,这就是常用的Bagging。<br>
我们用scikit-learn里面的Bagging来完成上面的思路,过程非常简单。代码如下:<br><br><font>
## 使用模型融合进行优化
```
from sklearn.ensemble import BaggingRegressor
train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*|Mother|Child|Family|Title')
train_np = train_df.as_matrix()
# y即Survival结果
y = train_np[:, 0]
# X即特征属性值
X = train_np[:, 1:]
# fit到BaggingRegressor之中
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
bagging_clf = BaggingRegressor(clf, n_estimators=10, max_samples=0.8, max_features=1.0, bootstrap=True, bootstrap_features=False, n_jobs=-1)
bagging_clf.fit(X, y)
test = df_test.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*|Mother|Child|Family|Title')
predictions = bagging_clf.predict(test)
result = pd.DataFrame({'PassengerId':data_test['PassengerId'].as_matrix(), 'Survived':predictions.astype(np.int32)})
result.to_csv("logistic_regression_predictions3.csv", index=False)
pd.read_csv("logistic_regression_predictions2.csv")
```
# 模型使用
## 模型保存输出
```
from sklearn.externals import joblib
model_file=MODEL_PATH+'model-rf.pkl'
joblib.dump(grid_search.best_estimator_, model_file)
```
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from bert import modeling
import numpy as np
import tensorflow as tf
import pandas as pd
from tqdm import tqdm
# !wget https://github.com/huseinzol05/Malaya/raw/master/pretrained-model/preprocess/sp10m.cased.bert.model
# !wget https://github.com/huseinzol05/Malaya/raw/master/pretrained-model/preprocess/sp10m.cased.bert.vocab
from prepro_utils import preprocess_text, encode_ids, encode_pieces
import sentencepiece as spm
sp_model = spm.SentencePieceProcessor()
sp_model.Load('sp10m.cased.bert.model')
with open('sp10m.cased.bert.vocab') as fopen:
v = fopen.read().split('\n')[:-1]
v = [i.split('\t') for i in v]
v = {i[0]: i[1] for i in v}
class Tokenizer:
def __init__(self, v):
self.vocab = v
pass
def tokenize(self, string):
return encode_pieces(sp_model, string, return_unicode=False, sample=False)
def convert_tokens_to_ids(self, tokens):
return [sp_model.PieceToId(piece) for piece in tokens]
def convert_ids_to_tokens(self, ids):
return [sp_model.IdToPiece(i) for i in ids]
tokenizer = Tokenizer(v)
from glob import glob
import json
left, right, label = [], [], []
for file in glob('../text-similarity/*k.json'):
with open(file) as fopen:
x = json.load(fopen)
for i in x:
splitted = i[0].split(' <> ')
if len(splitted) != 2:
continue
left.append(splitted[0])
right.append(splitted[1])
label.append(i[1])
l = {'contradiction': 0, 'entailment': 1}
snli = glob('../text-similarity/part*.json')
for file in snli:
with open(file) as fopen:
x = json.load(fopen)
for i in x:
splitted = i[1].split(' <> ')
if len(splitted) != 2:
continue
if i[0] not in l:
continue
left.append(splitted[0])
right.append(splitted[1])
try:
label.append(l[i[0]])
except Exception as e:
print(e)
print(splitted, i[0])
mnli = glob('../text-similarity/translated-*.json')
mnli
for file in mnli:
with open(file) as fopen:
x = json.load(fopen)
for i in x:
if len(i) != 3:
continue
splitted = i[2].split(' <> ')
if len(splitted) != 3:
continue
if i[1] not in l:
continue
left.append(splitted[0])
right.append(splitted[1])
try:
label.append(l[i[1]])
except Exception as e:
print(e)
print(splitted, i)
BERT_INIT_CHKPNT = 'bert-base-2020-03-19/model.ckpt-2000002'
BERT_CONFIG = 'bert-base-2020-03-19/bert_config.json'
from tqdm import tqdm
MAX_SEQ_LENGTH = 200
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def get_inputs(left, right):
input_ids, input_masks, segment_ids = [], [], []
for i in tqdm(range(len(left))):
tokens_a = tokenizer.tokenize(left[i])
tokens_b = tokenizer.tokenize(right[i])
_truncate_seq_pair(tokens_a, tokens_b, MAX_SEQ_LENGTH - 3)
tokens = []
segment_id = []
tokens.append("[CLS]")
segment_id.append(0)
for token in tokens_a:
tokens.append(token)
segment_id.append(0)
tokens.append("[SEP]")
segment_id.append(0)
for token in tokens_b:
tokens.append(token)
segment_id.append(1)
tokens.append("[SEP]")
segment_id.append(1)
input_id = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_id)
while len(input_id) < MAX_SEQ_LENGTH:
input_id.append(0)
input_mask.append(0)
segment_id.append(0)
input_ids.append(input_id)
input_masks.append(input_mask)
segment_ids.append(segment_id)
return input_ids, input_masks, segment_ids
input_ids, input_masks, segment_ids = get_inputs(left, right)
bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG)
epoch = 20
batch_size = 60
warmup_proportion = 0.1
num_train_steps = int(len(left) / batch_size * epoch)
num_warmup_steps = int(num_train_steps * warmup_proportion)
class Model:
def __init__(
self,
dimension_output,
learning_rate = 2e-5,
training = True
):
self.X = tf.placeholder(tf.int32, [None, None])
self.segment_ids = tf.placeholder(tf.int32, [None, None])
self.input_masks = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None])
model = modeling.BertModel(
config=bert_config,
is_training=training,
input_ids=self.X,
input_mask=self.input_masks,
token_type_ids=self.segment_ids,
use_one_hot_embeddings=False)
output_layer = model.get_pooled_output()
self.logits = tf.layers.dense(output_layer, dimension_output)
self.logits = tf.identity(self.logits, name = 'logits')
self.cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = self.logits, labels = self.Y
)
)
self.optimizer = optimization.create_optimizer(self.cost, learning_rate,
num_train_steps, num_warmup_steps, False)
correct_pred = tf.equal(
tf.argmax(self.logits, 1, output_type = tf.int32), self.Y
)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
dimension_output = 2
learning_rate = 2e-5
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(
dimension_output,
learning_rate
)
sess.run(tf.global_variables_initializer())
var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'bert')
saver = tf.train.Saver(var_list = var_lists)
saver.restore(sess, BERT_INIT_CHKPNT)
from sklearn.model_selection import train_test_split
train_input_ids, test_input_ids, train_input_masks, test_input_masks, train_segment_ids, test_segment_ids, train_Y, test_Y = train_test_split(
input_ids, input_masks, segment_ids, label, test_size = 0.2)
from tqdm import tqdm
import time
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 1, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print('break epoch:%d\n' % (EPOCH))
break
train_acc, train_loss, test_acc, test_loss = [], [], [], []
pbar = tqdm(
range(0, len(train_input_ids), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(train_input_ids))
batch_x = train_input_ids[i: index]
batch_masks = train_input_masks[i: index]
batch_segment = train_segment_ids[i: index]
batch_y = train_Y[i: index]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
model.segment_ids: batch_segment,
model.input_masks: batch_masks
},
)
assert not np.isnan(cost)
train_loss.append(cost)
train_acc.append(acc)
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(range(0, len(test_input_ids), batch_size), desc = 'test minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_input_ids))
batch_x = test_input_ids[i: index]
batch_masks = test_input_masks[i: index]
batch_segment = test_segment_ids[i: index]
batch_y = test_Y[i: index]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
model.segment_ids: batch_segment,
model.input_masks: batch_masks
},
)
test_loss.append(cost)
test_acc.append(acc)
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss = np.mean(train_loss)
train_acc = np.mean(train_acc)
test_loss = np.mean(test_loss)
test_acc = np.mean(test_acc)
if test_acc > CURRENT_ACC:
print(
'epoch: %d, pass acc: %f, current acc: %f'
% (EPOCH, CURRENT_ACC, test_acc)
)
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (EPOCH, train_loss, train_acc, test_loss, test_acc)
)
EPOCH += 1
saver = tf.train.Saver(tf.trainable_variables())
saver.save(sess, 'bert-base-similarity/model.ckpt')
dimension_output = 2
learning_rate = 2e-5
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(
dimension_output,
learning_rate,
training = False
)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(sess, 'bert-base-similarity/model.ckpt')
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'Placeholder' in n.name
or 'logits' in n.name
or 'alphas' in n.name
or 'self/Softmax' in n.name)
and 'adam' not in n.name
and 'beta' not in n.name
and 'global_step' not in n.name
]
)
strings.split(',')
real_Y, predict_Y = [], []
pbar = tqdm(
range(0, len(test_input_ids), batch_size), desc = 'validation minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(test_input_ids))
batch_x = test_input_ids[i: index]
batch_masks = test_input_masks[i: index]
batch_segment = test_segment_ids[i: index]
batch_y = test_Y[i: index]
predict_Y += np.argmax(sess.run(model.logits,
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
model.segment_ids: batch_segment,
model.input_masks: batch_masks
},
), 1, ).tolist()
real_Y += batch_y
from sklearn import metrics
print(
metrics.classification_report(
real_Y, predict_Y, target_names = ['not similar', 'similar'],
digits = 5
)
)
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('bert-base-similarity', strings)
```
| github_jupyter |
# Data union
4 archives are presented with data from year 2017 to 2019 for each hive (names: Wurzburg and Schwartau)
- flow(nameofthehive).csv : For a date it contains the number of departures and arrivals from/to the beehive. A positive number indicates the number of arrivals and a negative number of departures. Note that this 2 values are in the data set with the same timestamp.
- humidty(nameofthehive).csv : Level of humidity through time of the beehive expressed in %
- temperature(nameofthehive).csv : Temperature of the beehive through time of the beehive in Cº
- weight(nameofthehive).csv : Weight of the beehive through time in Kg.
Resample daily information and join everything into a single dataframe.
```
# Pandas for data loading and processing
import pandas as pd
#visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
# Reading data from schwartau
df_flow_schwartau = pd.read_csv('data/flow_schwartau.csv')
df_humidity_schwartau = pd.read_csv('data/humidity_schwartau.csv')
df_temperature_schwartau = pd.read_csv('data/temperature_schwartau.csv')
df_weight_schwartau = pd.read_csv('data/weight_schwartau.csv')
# Reading data from wurzburg
df_flow_wurzburg = pd.read_csv('data/flow_wurzburg.csv')
df_humidity_wurzburg = pd.read_csv('data/humidity_wurzburg.csv')
df_temperature_wurzburg = pd.read_csv('data/temperature_wurzburg.csv')
df_weight_wurzburg = pd.read_csv('data/weight_wurzburg.csv')
# Changing data type to timestamp from schwartau
df_flow_schwartau['timestamp'] = pd.to_datetime(df_flow_schwartau['timestamp'], format='%Y-%m-%d %H:%M:%S')
df_humidity_schwartau['timestamp'] = pd.to_datetime(df_humidity_schwartau['timestamp'], format='%Y-%m-%d %H:%M:%S')
df_temperature_schwartau['timestamp'] = pd.to_datetime(df_temperature_schwartau['timestamp'], format='%Y-%m-%d %H:%M:%S')
df_weight_schwartau['timestamp'] = pd.to_datetime(df_weight_schwartau['timestamp'], format='%Y-%m-%d %H:%M:%S')
# Changing data type to timestamp from wurzburg
df_flow_wurzburg['timestamp'] = pd.to_datetime(df_flow_wurzburg['timestamp'], format='%Y-%m-%d %H:%M:%S')
df_humidity_wurzburg['timestamp'] = pd.to_datetime(df_humidity_wurzburg['timestamp'], format='%Y-%m-%d %H:%M:%S')
df_temperature_wurzburg['timestamp'] = pd.to_datetime(df_temperature_wurzburg['timestamp'], format='%Y-%m-%d %H:%M:%S')
df_weight_wurzburg['timestamp'] = pd.to_datetime(df_weight_wurzburg['timestamp'], format='%Y-%m-%d %H:%M:%S')
print(df_flow_schwartau.head(5))
print(df_humidity_schwartau.head(5))
print(df_temperature_schwartau.head(5))
print(df_weight_schwartau.head(5))
print(df_flow_wurzburg.head(5))
print(df_humidity_wurzburg.head(5))
print(df_temperature_wurzburg.head(5))
print(df_weight_wurzburg.head(5))
# Resampling data daily
flow_schwartau_daily = df_flow_schwartau.groupby(pd.Grouper(key = 'timestamp',freq='D')).sum()
humidity_schwartau_daily = df_humidity_schwartau.groupby(pd.Grouper(key = 'timestamp',freq='D')).mean()
temperature_schwartau_daily = df_temperature_schwartau.groupby(pd.Grouper(key = 'timestamp',freq='D')).mean()
weight_schwartau_daily = df_weight_schwartau.groupby(pd.Grouper(key = 'timestamp',freq='D')).mean()
# Resampling data daily
flow_wurzburg_daily = df_flow_wurzburg.groupby(pd.Grouper(key = 'timestamp',freq='D')).sum()
humidity_wurzburg_daily = df_humidity_wurzburg.groupby(pd.Grouper(key = 'timestamp',freq='D')).mean()
temperature_wurzburg_daily = df_temperature_wurzburg.groupby(pd.Grouper(key = 'timestamp',freq='D')).mean()
weight_wurzburg_daily = df_weight_wurzburg.groupby(pd.Grouper(key = 'timestamp',freq='D')).mean()
print(flow_schwartau_daily.head(5))
print(humidity_schwartau_daily.head(5))
print(temperature_schwartau_daily.head(5))
print(weight_schwartau_daily.head(5))
print(flow_wurzburg_daily.head(5))
print(humidity_wurzburg_daily.head(5))
print(temperature_wurzburg_daily.head(5))
print(weight_wurzburg_daily.head(5))
schwartau_daily=flow_schwartau_daily.merge(humidity_schwartau_daily,on='timestamp').merge(temperature_schwartau_daily,on='timestamp').merge(weight_schwartau_daily,on='timestamp')
wurzburg_daily=flow_wurzburg_daily.merge(humidity_wurzburg_daily,on='timestamp').merge(temperature_wurzburg_daily,on='timestamp').merge(weight_wurzburg_daily,on='timestamp')
schwartau_daily.head(10)
wurzburg_daily.head(10)
schwartau_daily.to_csv('data/summary/schwartau_daily.csv', index = True, header=True)
wurzburg_daily.to_csv('data/summary/wurzburg_daily.csv', index = True, header=True)
#describe our data
schwartau_daily[schwartau_daily.select_dtypes(exclude='object').columns].describe().\
style.background_gradient(axis=1,cmap=sns.light_palette('green', as_cmap=True))
#describe our data
wurzburg_daily[wurzburg_daily.select_dtypes(exclude='object').columns].describe().\
style.background_gradient(axis=1,cmap=sns.light_palette('green', as_cmap=True))
# Resampling data hourly
flow_schwartau_hourly = df_flow_schwartau.groupby(pd.Grouper(key = 'timestamp',freq='H')).sum()
humidity_schwartau_hourly = df_humidity_schwartau.groupby(pd.Grouper(key = 'timestamp',freq='H')).mean()
temperature_schwartau_hourly = df_temperature_schwartau.groupby(pd.Grouper(key = 'timestamp',freq='H')).mean()
weight_schwartau_hourly = df_weight_schwartau.groupby(pd.Grouper(key = 'timestamp',freq='H')).mean()
# Resampling data hourly
flow_wurzburg_hourly = df_flow_wurzburg.groupby(pd.Grouper(key = 'timestamp',freq='H')).sum()
humidity_wurzburg_hourly = df_humidity_wurzburg.groupby(pd.Grouper(key = 'timestamp',freq='H')).mean()
temperature_wurzburg_hourly = df_temperature_wurzburg.groupby(pd.Grouper(key = 'timestamp',freq='H')).mean()
weight_wurzburg_hourly = df_weight_wurzburg.groupby(pd.Grouper(key = 'timestamp',freq='H')).mean()
schwartau_hourly=flow_schwartau_hourly.merge(humidity_schwartau_hourly,on='timestamp').merge(temperature_schwartau_hourly,on='timestamp').merge(weight_schwartau_hourly,on='timestamp')
wurzburg_hourly=flow_wurzburg_hourly.merge(humidity_wurzburg_hourly,on='timestamp').merge(temperature_wurzburg_hourly,on='timestamp').merge(weight_wurzburg_hourly,on='timestamp')
schwartau_hourly.to_csv('data/summary/schwartau_hourly.csv', index = True, header=True)
wurzburg_hourly.to_csv('data/summary/wurzburg_hourly.csv', index = True, header=True)
#describe our data
schwartau_hourly[schwartau_hourly.select_dtypes(exclude='object').columns].describe().\
style.background_gradient(axis=1,cmap=sns.light_palette('green', as_cmap=True))
#describe our data
wurzburg_hourly[wurzburg_hourly.select_dtypes(exclude='object').columns].describe().\
style.background_gradient(axis=1,cmap=sns.light_palette('green', as_cmap=True))
```
| github_jupyter |
# Matrix Factorization for Recommender Systems - Part 2
As seen in [Part 1](/examples/matrix-factorization-for-recommender-systems-part-1), strength of [Matrix Factorization (MF)](https://en.wikipedia.org/wiki/Matrix_factorization_(recommender_systems)) lies in its ability to deal with sparse and high cardinality categorical variables. In this second tutorial we will have a look at Factorization Machines (FM) algorithm and study how it generalizes the power of MF.
**Table of contents of this tutorial series on matrix factorization for recommender systems:**
- [Part 1 - Traditional Matrix Factorization methods for Recommender Systems](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-1)
- [Part 2 - Factorization Machines and Field-aware Factorization Machines](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-2)
- [Part 3 - Large scale learning and better predictive power with multiple pass learning](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-3)
## Factorization Machines
Steffen Rendel came up in 2010 with [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf), an algorithm able to handle any real valued feature vector, combining the advantages of general predictors with factorization models. It became quite popular in the field of online advertising, notably after winning several Kaggle competitions. The modeling technique starts with a linear regression to capture the effects of each variable individually:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j}
$$
Then are added interaction terms to learn features relations. Instead of learning a single and specific weight per interaction (as in [polynomial regression](https://en.wikipedia.org/wiki/Polynomial_regression)), a set of latent factors is learnt per feature (as in MF). An interaction is calculated by multiplying involved features product with their latent vectors dot product. The degree of factorization — or model order — represents the maximum number of features per interaction considered. The model equation for a factorization machine of degree $d$ = 2 is defined as:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j} + \sum_{j=1}^{p} \sum_{j'=j+1}^{p} \langle \mathbf{v}_j, \mathbf{v}_{j'} \rangle x_{j} x_{j'}
$$
Where $\normalsize \langle \mathbf{v}_j, \mathbf{v}_{j'} \rangle$ is the dot product of $j$ and $j'$ latent vectors:
$$
\normalsize
\langle \mathbf{v}_j, \mathbf{v}_{j'} \rangle = \sum_{f=1}^{k} \mathbf{v}_{j, f} \cdot \mathbf{v}_{j', f}
$$
Higher-order FM will be covered in a following section, just note that factorization models express their power in sparse settings, which is also where higher-order interactions are hard to estimate.
Strong emphasis must be placed on feature engineering as it allows FM to mimic most factorization models and significantly impact its performance. High cardinality categorical variables one hot encoding is the most frequent step before feeding the model with data. For more efficiency, `river` FM implementation considers string values as categorical variables and automatically one hot encode them. FM models have their own module [river.facto](/api/overview/#facto).
## Mimic Biased Matrix Factorization (BiasedMF)
Let's start with a simple example where we want to reproduce the Biased Matrix Factorization model we trained in the previous tutorial. For a fair comparison with [Part 1 example](/examples/matrix-factorization-for-recommender-systems-part-1/#biased-matrix-factorization-biasedmf), let's set the same evaluation framework:
```
from river import datasets
from river import metrics
from river.evaluate import progressive_val_score
def evaluate(model):
X_y = datasets.MovieLens100K()
metric = metrics.MAE() + metrics.RMSE()
_ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)
```
In order to build an equivalent model we need to use the same hyper-parameters. As we can't replace FM intercept by the global running mean we won't be able to build the exact same model:
```
from river import compose
from river import facto
from river import meta
from river import optim
from river import stats
fm_params = {
'n_factors': 10,
'weight_optimizer': optim.SGD(0.025),
'latent_optimizer': optim.SGD(0.05),
'sample_normalization': False,
'l1_weight': 0.,
'l2_weight': 0.,
'l1_latent': 0.,
'l2_latent': 0.,
'intercept': 3,
'intercept_lr': .01,
'weight_initializer': optim.initializers.Zeros(),
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),
}
regressor = compose.Select('user', 'item')
regressor |= facto.FMRegressor(**fm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
Both MAE are very close to each other (0.7486 vs 0.7485) showing that we almost reproduced [reco.BiasedMF](/api/reco/BiasedMF/) algorithm. The cost is a naturally slower running time as FM implementation offers more flexibility.
## Feature engineering for FM models
Let's study the basics of how to properly encode data for FM models. We are going to keep using MovieLens 100K as it provides various feature types:
```
import json
for x, y in datasets.MovieLens100K():
print(f'x = {json.dumps(x, indent=4)}\ny = {y}')
break
```
The features we are going to add to our model don't improve its predictive power. Nevertheless, they are useful to illustrate different methods of data encoding:
1. Set-categorical variables
We have seen that categorical variables are one hot encoded automatically if set to strings, in the other hand, set-categorical variables must be encoded explicitly by the user. A good way of doing so is to assign them a value of $1/m$, where $m$ is the number of elements of the sample set. It gives the feature a constant "weight" across all samples preserving model's stability. Let's create a routine to encode movies genres this way:
```
def split_genres(x):
genres = x['genres'].split(', ')
return {f'genre_{genre}': 1 / len(genres) for genre in genres}
```
2. Numerical variables
In practice, transforming numerical features into categorical ones works better in most cases. Feature binning is the natural way, but finding good bins is sometimes more an art than a science. Let's encode users age with something simple:
```
def bin_age(x):
if x['age'] <= 18:
return {'age_0-18': 1}
elif x['age'] <= 32:
return {'age_19-32': 1}
elif x['age'] < 55:
return {'age_33-54': 1}
else:
return {'age_55-100': 1}
```
Let's put everything together:
```
fm_params = {
'n_factors': 14,
'weight_optimizer': optim.SGD(0.01),
'latent_optimizer': optim.SGD(0.025),
'intercept': 3,
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),
}
regressor = compose.Select('user', 'item')
regressor += (
compose.Select('genres') |
compose.FuncTransformer(split_genres)
)
regressor += (
compose.Select('age') |
compose.FuncTransformer(bin_age)
)
regressor |= facto.FMRegressor(**fm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
Note that using more variables involves factorizing a larger latent space, then increasing the number of latent factors $k$ often helps capturing more information.
Some other feature engineering tips from [3 idiots' winning solution](https://www.kaggle.com/c/criteo-display-ad-challenge/discussion/10555) for Kaggle [Criteo display ads](https://www.kaggle.com/c/criteo-display-ad-challenge) competition in 2014:
- Infrequent modalities often bring noise and little information, transforming them into a special tag can help
- In some cases, sample-wise normalization seems to make the optimization problem easier to be solved
## Higher-Order Factorization Machines (HOFM)
The model equation generalized to any order $d \geq 2$ is defined as:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j} + \sum_{l=2}^{d} \sum_{j_1=1}^{p} \cdots \sum_{j_l=j_{l-1}+1}^{p} \left(\prod_{j'=1}^{l} x_{j_{j'}} \right) \left(\sum_{f=1}^{k_l} \prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \right)
$$
```
hofm_params = {
'degree': 3,
'n_factors': 12,
'weight_optimizer': optim.SGD(0.01),
'latent_optimizer': optim.SGD(0.025),
'intercept': 3,
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),
}
regressor = compose.Select('user', 'item')
regressor += (
compose.Select('genres') |
compose.FuncTransformer(split_genres)
)
regressor += (
compose.Select('age') |
compose.FuncTransformer(bin_age)
)
regressor |= facto.HOFMRegressor(**hofm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
As said previously, high-order interactions are often hard to estimate due to too much sparsity, that's why we won't spend too much time here.
## Field-aware Factorization Machines (FFM)
[Field-aware variant of FM (FFM)](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf) improved the original method by adding the notion of "*fields*". A "*field*" is a group of features that belong to a specific domain (e.g. the "*users*" field, the "*items*" field, or the "*movie genres*" field).
FFM restricts itself to pairwise interactions and factorizes separated latent spaces — one per combination of fields (e.g. users/items, users/movie genres, or items/movie genres) — instead of a common one shared by all fields. Therefore, each feature has one latent vector per field it can interact with — so that it can learn the specific effect with each different field.
The model equation is defined by:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j} + \sum_{j=1}^{p} \sum_{j'=j+1}^{p} \langle \mathbf{v}_{j, f_{j'}}, \mathbf{v}_{j', f_{j}} \rangle x_{j} x_{j'}
$$
Where $f_j$ and $f_{j'}$ are the fields corresponding to $j$ and $j'$ features, respectively.
```
ffm_params = {
'n_factors': 8,
'weight_optimizer': optim.SGD(0.01),
'latent_optimizer': optim.SGD(0.025),
'intercept': 3,
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),
}
regressor = compose.Select('user', 'item')
regressor += (
compose.Select('genres') |
compose.FuncTransformer(split_genres)
)
regressor += (
compose.Select('age') |
compose.FuncTransformer(bin_age)
)
regressor |= facto.FFMRegressor(**ffm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
Note that FFM usually needs to learn smaller number of latent factors $k$ than FM as each latent vector only deals with one field.
## Field-weighted Factorization Machines (FwFM)
[Field-weighted Factorization Machines (FwFM)](https://arxiv.org/abs/1806.03514) address FFM memory issues caused by its large number of parameters, which is in the order of *feature number* times *field number*. As FFM, FwFM is an extension of FM restricted to pairwise interactions, but instead of factorizing separated latent spaces, it learns a specific weight $r_{f_j, f_{j'}}$ for each field combination modelling the interaction strength.
The model equation is defined as:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j} + \sum_{j=1}^{p} \sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \langle \mathbf{v}_j, \mathbf{v}_{j'} \rangle x_{j} x_{j'}
$$
```
fwfm_params = {
'n_factors': 10,
'weight_optimizer': optim.SGD(0.01),
'latent_optimizer': optim.SGD(0.025),
'intercept': 3,
'seed': 73,
}
regressor = compose.Select('user', 'item')
regressor += (
compose.Select('genres') |
compose.FuncTransformer(split_genres)
)
regressor += (
compose.Select('age') |
compose.FuncTransformer(bin_age)
)
regressor |= facto.FwFMRegressor(**fwfm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Dariush-Mehdiaraghi/bachelor_project/blob/main/ssdlite_mobiledet_transfer_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
**Retrain SSDLite Mobiledet for Coral's EdgeTpu**
This is a slightly modified version of the [notebook](https://github.com/Namburger/edgetpu-ssdlite-mobiledet-retrain) by [Nam Vu ](https://github.com/Namburger)
```
# Import tensorflow 1.x and install tf_slim.
%tensorflow_version 1.x
!pip install tf_slim
!pip show tensorflow
# Install protobuf-compiler and the tensorflow's object detection API.
!apt-get install protobuf-compiler
!git clone https://github.com/tensorflow/models.git
import os
os.environ['PYTHONPATH'] += ':/content/models/research/'
os.environ['PYTHONPATH'] += ':/content/models/research/slim/'
os.environ['PYTHONPATH'] += ':/content/models/research/object_detection/utils/'
os.environ['PYTHONPATH'] += ':/content/models/research/object_detection'
%cd models/research
# Compile all the protobuf dependencies.
!protoc object_detection/protos/*.proto --python_out=.
# Set up and install the object detection API.
!cp object_detection/packages/tf1/setup.py .
!python -m pip install .
# Run a test to make sure setup is correct.
!python object_detection/builders/model_builder_test.py
# Now let's download our training dataset.
#%rm -r /content/dataset
%cd /content
%rm -r /content/dataset
%mkdir /content/dataset
%cd /content/dataset
!curl -L "https://app.roboflow.com/ds/HaWC1lznR0?key=pJugxNUTU4" > roboflow.zip; unzip roboflow.zip; rm roboflow.zip
# IF YOU DONT HAVE TFRECORD FILES Now we can create the tfrecord files.
%cd /content/models/research
!cp object_detection/data/pet_label_map.pbtxt /content/dataset
!python3 object_detection/dataset_tools/create_pet_tf_record.py \
--label_map_path="/content/dataset/pet_label_map.pbtxt" \
--data_dir="/content/dataset" \
--output_dir="/content/dataset"
# Now let's download our ssdlite mobiledet pretrained model from tensorflow's model zoo.
!mkdir /content/pretrained_model
%cd /content/pretrained_model
!wget http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_edgetpu_320x320_coco_2020_05_19.tar.gz
!tar xvf ssdlite_mobiledet_edgetpu_320x320_coco_2020_05_19.tar.gz
#to save checkpoints to google drive
from google.colab import drive
drive.mount('/content/gdrive')
gdrive_ckpt_dir = "/content/gdrive/MyDrive/colabCheckpoints4SecondRun"
def get_num_classes(pbtxt_fname):
from object_detection.utils import label_map_util
label_map = label_map_util.load_labelmap(pbtxt_fname)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=90, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
print(categories)
return len(category_index.keys())
get_num_classes('/content/dataset/train/bottles_label_map.pbtxt')
# Edit Pipeline config to load in our new tfrecord that we just created and add quantization aware training.
import tensorflow as tf
from google.protobuf import text_format
from object_detection.protos import pipeline_pb2
# Hack to find out if you have colab pro or not :)
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
print(gpu_info)
gpu_name = !nvidia-smi --query-gpu=gpu_name --format=csv
# You get Tesla T4 with free colab and Tesla P100-PCIe with colab pro.
colab_pro = False if 'T4' in gpu_name else True
pipeline = pipeline_pb2.TrainEvalPipelineConfig()
config_path = '/content/models/research/object_detection/samples/configs/ssdlite_mobiledet_edgetpu_320x320_coco_sync_4x4.config'
with tf.gfile.GFile(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline)
pipeline.train_input_reader.tf_record_input_reader.input_path[:] = ['/content/dataset/train/bottles.tfrecord']
pipeline.train_input_reader.label_map_path = '/content/dataset/train/bottles_label_map.pbtxt'
pipeline.eval_input_reader[0].tf_record_input_reader.input_path[:] = ['/content/dataset/valid/bottles.tfrecord']
pipeline.eval_input_reader[0].label_map_path = '/content/dataset/valid/bottles_label_map.pbtxt'
#pipeline.train_config.fine_tune_checkpoint = '/content/pretrained_model/ssdlite_mobiledet_edgetpu_320x320_coco_2020_05_19/fp32/model.ckpt'
pipeline.train_config.fine_tune_checkpoint = '/content/gdrive/MyDrive/colabCheckpoints3/model.ckpt-25000'
pipeline.train_config.batch_size = 64 if colab_pro else 32 # Smaller batch size on free gpu to avoid OOM Killer normaly 64
pipeline.train_config.num_steps = 25000 if colab_pro else 10000 # Less steps with free gpu but 10k should be good enough
#CHANGE NUM OF CLASSES
pipeline.model.ssd.num_classes = get_num_classes('/content/dataset/train/bottles_label_map.pbtxt')
# Enable ssdlite, this should already be enabled in the config we downloaded, but this is just to make sure.
pipeline.model.ssd.box_predictor.convolutional_box_predictor.kernel_size = 3
pipeline.model.ssd.box_predictor.convolutional_box_predictor.use_depthwise = True
pipeline.model.ssd.feature_extractor.use_depthwise = True
# Quantization Aware Training
pipeline.graph_rewriter.quantization.delay = 0
pipeline.graph_rewriter.quantization.weight_bits = 8
pipeline.graph_rewriter.quantization.activation_bits = 8
config_text = text_format.MessageToString(pipeline)
with tf.gfile.Open(config_path, "wb") as f:
f.write(config_text)
# This is out config after modifying.
!cat /content/models/research/object_detection/samples/configs/ssdlite_mobiledet_edgetpu_320x320_coco_sync_4x4.config
# Before we start training, let's start tensorboard so we can track the progress.
# More info on tensorflow can be found here: https://www.tensorflow.org/tutorials
%cd /content
!wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
!unzip -o ngrok-stable-linux-amd64.zip
# Starts tensorboard, so we can monitor the training process.
get_ipython().system_raw(
'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'
.format(gdrive_ckpt_dir + '/eval_0')
)
get_ipython().system_raw('./ngrok http 6006 &')
print('Click on link below to track progress:')
import time
time.sleep(1)
!curl -s http://localhost:4040/api/tunnels | python3 -c \
"import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])"
# Let's begin training, expects to take a few hours, time for a good stretch :)
%cd /content/models/research/
!python3 object_detection/model_main.py \
--logtostderr=true \
--model_dir={gdrive_ckpt_dir} \
--pipeline_config_path=/content/models/research/object_detection/samples/configs/ssdlite_mobiledet_edgetpu_320x320_coco_sync_4x4.config
# Make inference graph.
best_ckpt_path = gdrive_ckpt_dir + "/model.ckpt-24082"# Make sure to change this checkpoint to the corresponding num step you set from above.
print("best checkpoint is:", best_ckpt_path)
!python3 /content/models/research/object_detection/export_inference_graph.py \
--input_type=image_tensor \
--pipeline_config_path=/content/models/research/object_detection/samples/configs/ssdlite_mobiledet_edgetpu_320x320_coco_sync_4x4.config \
--output_directory=/content/inference_graph \
--trained_checkpoint_prefix={best_ckpt_path}
# Let's download some test data from flickr.
!mkdir /content/test
!cd /content/test
#!wget https://live.staticflickr.com/7921/46683787864_86c9501c24_c_d.jpg -O /content/test/image1.jpg
#!wget https://live.staticflickr.com/4/8451898_8bedb2ae53_c_d.jpg -O /content/test/image2.jpg
# Do a Quick Evaluation on the inference graph model.
import numpy as np
import os
import sys
import tensorflow as tf
from collections import defaultdict
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
%matplotlib inline
# Initialize tf.Graph()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile('/content/inference_graph/frozen_inference_graph.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loads labels
label_map = label_map_util.load_labelmap('/content/dataset/train/bottles_label_map.pbtxt')
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=8, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Run Inference and populates results in a dict.
def run_inference(graph, image):
with graph.as_default():
with tf.Session() as sess:
ops = tf.get_default_graph().get_operations()
all_tensor_names = [output.name for op in ops for output in op.outputs]
tensor_dict = {}
tensor_keys = ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']
for key in tensor_keys:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
# Actual inference.
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict
test_image_path = [os.path.join('/content/test', 'image{}.jpg'.format(i)) for i in range(1, 6)]
for image_path in test_image_path:
print('Evaluating:', image_path)
image = Image.open(image_path)
img_width, img_height = image.size
image_np = np.array(image.getdata()).reshape((img_height, img_width, 3)).astype(np.uint8)
# Run inference.
output_dict = run_inference(detection_graph, image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=(48, 32))
plt.imshow(image_np)
# Now we export this model to tflite_graph format.
%cd /content/models/research
!mkdir /content/output_model
!python3 object_detection/export_tflite_ssd_graph.py \
--pipeline_config_path=/content/models/research/object_detection/samples/configs/ssdlite_mobiledet_edgetpu_320x320_coco_sync_4x4.config \
--trained_checkpoint_prefix={best_ckpt_path} \
--output_directory=/content/output_model \
--add_postprocessing_op=true
# Make sure to change the model-ckpt-# to match the checkpoint number you used.
# Now we can convert this custom trained model to a CPU tflite model
!tflite_convert \
--output_file="/content/output_model/ssdlite_mobiledet_own.tflite" \
--graph_def_file="/content/output_model/tflite_graph.pb" \
--inference_type=QUANTIZED_UINT8 \
--input_arrays="normalized_input_image_tensor" \
--output_arrays="TFLite_Detection_PostProcess,TFLite_Detection_PostProcess:1,TFLite_Detection_PostProcess:2,TFLite_Detection_PostProcess:3" \
--mean_values=128 \
--std_dev_values=128 \
--input_shapes=1,320,320,3 \
--allow_custom_ops
# Install tflite_runtime package to evaluate the model.
!pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp36-cp36m-linux_x86_64.whl
# Now we do evaluation on the tflite model.
import os
import numpy as np
from tflite_runtime.interpreter import Interpreter
from tflite_runtime.interpreter import load_delegate
from PIL import Image
from PIL import ImageDraw
%matplotlib inline
# Creates tflite interpreter
interpreter = Interpreter('/content/output_model/ssdlite_mobiledet_own.tflite')
# This exact code can be used to run inference on the edgetpu by simply creating
# the instantialize the interpreter with libedgetpu delegates:
# interpreter = Interpreter(args.model, experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
interpreter.allocate_tensors()
interpreter.invoke() # warmup
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
width = input_details[0]['shape'][2]
height = input_details[0]['shape'][1]
def run_inference(interpreter, image):
interpreter.set_tensor(input_details[0]['index'], image)
interpreter.invoke()
boxes = interpreter.get_tensor(output_details[0]['index'])[0]
classes = interpreter.get_tensor(output_details[1]['index'])[0]
scores = interpreter.get_tensor(output_details[2]['index'])[0]
# num_detections = interpreter.get_tensor(output_details[3]['index'])[0]
return boxes, classes, scores
test_image_paths = [os.path.join('/content/test', 'image{}.jpg'.format(i)) for i in range(1, 6)]
for image_path in test_image_paths:
print('Evaluating:', image_path)
image = Image.open(image_path)
image_width, image_height = image.size
draw = ImageDraw.Draw(image)
resized_image = image.resize((width, height))
np_image = np.asarray(resized_image)
input_tensor = np.expand_dims(np_image, axis=0)
# Run inference
boxes, classes, scores = run_inference(interpreter, input_tensor)
# Draw results on image
colors = {0:(128, 255, 102), 1:(102, 255, 255)}
labels = {0:'abyssian cat', 1:'american bulldog'}
for i in range(len(boxes)):
if scores[i] > .7:
ymin = int(max(1, (boxes[i][0] * image_height)))
xmin = int(max(1, (boxes[i][1] * image_width)))
ymax = int(min(image_height, (boxes[i][2] * image_height)))
xmax = int(min(image_width, (boxes[i][3] * image_width)))
draw.rectangle((xmin, ymin, xmax, ymax), width=7, outline=colors[int(classes[i])])
draw.rectangle((xmin, ymin, xmax, ymin-10), fill=colors[int(classes[i])])
text = labels[int(classes[i])] + ' ' + str(scores[i]*100) + '%'
draw.text((xmin+2, ymin-10), text, fill=(0,0,0), width=2)
display(image)
# Install the edgetpu compiler.
!curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
!echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
!sudo apt-get update
!sudo apt-get install edgetpu-compiler
output_model_dir = "/content/output_model/"
# Compile our model and make a tarball of the finished trained model.
%cd {output_model_dir}
!edgetpu_compiler -s ssdlite_mobiledet_own.tflite
#%cd /content/
# Copy the checkpoints, inference graph, pipeline config, and the tflite models.
#!cp -r /content/gdrive/MyDrive/colabCheckpoints2* {output_model_dir}
!cp -r /content/inference_graph/* {output_model_dir}
!tar cvf ssdlite_mobiledet_own.tar.gz {output_model_dir}
!cp ssdlite_mobiledet_own.tar.gz {gdrive_ckpt_dir}
!pip uninstall tensorflow-datasets
!pip install tensorflow-datasets==1.0.1
!pip install tflite-model-maker
from tflite_model_maker import image_classifier
from tflite_model_maker.image_classifier import DataLoader
data = DataLoader.from_folder('content/dataset')
train_data, test_data = data.split(0.8)
model = image_classifier.create(train_data)
model.export('image_classifier.tflite', 'imageLabels.txt')
# Download model and you're done!
from google.colab import files
files.download('/content/ssdlite_mobiledet_dog_vs_cat.tar.gz')
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import numpy as np
import torch, torch.optim
import torch.nn.functional as F
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
import os, sys
sys.path.append('utils/*')
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import models as md
import utils.common_utils as cu
import utils.diffuser_utils as df
import utils.utils_hyperspectral as helper
```
# Single-shot Imaging Demo
Load in the PSF, 2D measurement and rolling shutter mask.
```
simulated = True # True: Use a simulated measurement or False: use an experimental measurement
downsampling_factor = 2
meas_np, mask_np, psf_np, gt_np = helper.load_data(simulated = simulated)
plt.figure(figsize=(20,10))
plt.subplot(1,3,1);plt.title('PSF');plt.imshow(psf_np)
plt.subplot(1,3,2);plt.title('Measurement');plt.imshow(meas_np)
plt.subplot(1,3,3);plt.title('Rolling shutter mask');plt.imshow(mask_np[:,:,20])
```
Initialize the lensless forward model
```
DIMS0 = meas_np.shape[0] # Image Dimensions
DIMS1 = meas_np.shape[1] # Image Dimensions
py = int((DIMS0)//2) # Pad size
px = int((DIMS1)//2) # Pad size
def pad(x):
if len(x.shape) == 2:
out = np.pad(x, ([py, py], [px,px]), mode = 'constant')
elif len(x.shape) == 3:
out = np.pad(x, ([py, py], [px,px], [0, 0]), mode = 'constant')
elif len(x.shape) == 4:
out = np.pad(x, ([py, py], [px,px], [0, 0], [0, 0]), mode = 'constant')
return out
#meas_np = pad(meas_np)
psf_pad = pad(psf_np)
h_full = np.fft.fft2(np.fft.ifftshift(psf_pad))
forward = df.Forward_Model_combined(h_full,
shutter = mask_np,
imaging_type = 'spectral')
if simulated == True:
meas_torch = forward(cu.np_to_torch(gt_np.transpose(2,0,1)).type(dtype).unsqueeze(0))
meas_np = cu.torch_to_np(meas_torch)[0]
plt.imshow(meas_np)
```
Set up parameters and network
```
# Define network hyperparameters:
input_depth = 32
INPUT = 'noise'
pad = 'reflection'
LR = 1e-3
tv_weight = 0
reg_noise_std = 0.05
if simulated == True:
num_iter = 100000
net_input = cu.get_noise(input_depth, INPUT, (meas_np.shape[0], meas_np.shape[1])).type(dtype).detach()
else:
num_iter = 4600
input_depth = 1
net_input = cu.get_noise(input_depth, INPUT, (mask_np.shape[-1], meas_np.shape[0], meas_np.shape[1])).type(dtype).detach()
# Initialize network input
net_input_saved = net_input.detach().clone()
noise = net_input.detach().clone()
# reinitialize netowrk and optimizer
if simulated == True:
NET_TYPE = 'skip'
net = md.get_net(input_depth, NET_TYPE, pad, n_channels=32, skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=5,upsample_mode='bilinear').type(dtype)
else:
print('experimental')
NET_TYPE = 'skip3D'
input_depth = 1
net = md.get_net(input_depth, NET_TYPE, pad, n_channels=1, skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=4,upsample_mode='trilinear').type(dtype)
#NET_TYPE = 'skip'
#net = md.get_net(input_depth, NET_TYPE, pad, n_channels=32, skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=5,upsample_mode='bilinear').type(dtype)
p = [x for x in net.parameters()]
optimizer = torch.optim.Adam(p, lr=LR)
# Losses
mse = torch.nn.MSELoss().type(dtype)
def main():
global recons
full_recons = []
meas_ts = cu.np_to_ts(meas_np)
meas_ts = meas_ts.detach().clone().type(dtype).cuda()
for i in range(num_iter):
optimizer.zero_grad()
net_input = net_input_saved + (noise.normal_() * reg_noise_std)
recons = net(net_input)
gen_meas = forward.forward(recons)
gen_meas = F.normalize(gen_meas, dim=[1,2], p=2)
loss = mse(gen_meas, meas_ts)
loss += tv_weight * df.tv_loss(recons)
loss.backward()
print('Iteration %05d, loss %.8f '%(i, loss.item()), '\r', end='')
if i % 100 == 0:
helper.plot(recons)
print('Iteration {}, loss {:.8f}'.format(i, loss.item()))
optimizer.step()
full_recons = helper.preplot(recons)
return full_recons
```
### Run the reconstruction
```
full_recons = main()
full_recons = helper.preplot2(recons)
```
Reconstructed video
```
def plot_slider(x):
plt.title('Reconstruction: frame %d'%(x))
plt.axis('off')
plt.imshow(full_recons[...,x])
return x
interactive(plot_slider,x=(0,full_recons.shape[-1]-1,1))
```
| github_jupyter |
# Renumbering Test
Demonstrate creating a graph with renumbering.
Most cugraph algorithms operate on a CSR representation of a graph. A CSR representation requires an indices array that is as long as the number of edges and an offsets array that is as 1 more than the largest vertex id. This makes the memory utilization entirely dependent on the size of the largest vertex id. For data sets that have a sparse range of vertex ids, the size of the CSR can be unnecessarily large. It is easy to construct an example where the amount of memory required for the offsets array will exceed the amount of memory in the GPU (not to mention the performance cost of having a large number of offsets that are empty but still have to be read to be skipped).
The cugraph renumbering feature allows us to take two columns of any integer type and translate them into a densely packed contiguous array numbered from 0 to (num_unique_values - 1). These renumbered vertices can be used to create a graph much more efficiently.
Another of the features of the renumbering function is that it can take vertex ids that are 64-bit values and map them down into a range that fits into 32-bit integers. The current cugraph algorithms are limited to 32-bit signed integers as vertex ids. and the renumbering feature will allow the caller to translate ids that are 64-bit into a densly packed 32-bit array of ids that can be used in cugraph algorithms. Note that if there are more than 2^31 - 1 unique vertex ids then the renumber method will fail with an error indicating that there are too many vertices to renumber into a 32-bit signed integer.
Note that this version (0.7) is limited to integer types. The intention is to extend the renumbering function to be able to handle strings and other types.
First step is to import the needed libraries
```
import cugraph
import cudf
import socket
import struct
import pandas as pd
import numpy as np
import networkx as nx
```
# Create some test data
This creates a small circle using some ipv4 addresses, storing the columns in a GPU data frame.
The current version of renumbering operates only on integer types, so we translate the ipv4 strings into 64 bit integers.
```
source_list = [ '192.168.1.1', '172.217.5.238', '216.228.121.209', '192.16.31.23' ]
dest_list = [ '172.217.5.238', '216.228.121.209', '192.16.31.23', '192.168.1.1' ]
source_as_int = [ struct.unpack('!L', socket.inet_aton(x))[0] for x in source_list ]
dest_as_int = [ struct.unpack('!L', socket.inet_aton(x))[0] for x in dest_list ]
print("sources came from: " + str([ socket.inet_ntoa(struct.pack('!L', x)) for x in source_as_int ]))
print(" sources as int = " + str(source_as_int))
print("destinations came from: " + str([ socket.inet_ntoa(struct.pack('!L', x)) for x in dest_as_int ]))
print(" destinations as int = " + str(dest_as_int))
```
# Create our GPU data frame
```
df = pd.DataFrame({
'source_list': source_list,
'dest_list': dest_list,
'source_as_int': source_as_int,
'dest_as_int': dest_as_int
})
gdf = cudf.DataFrame.from_pandas(df[['source_as_int', 'dest_as_int']])
gdf.to_pandas()
```
# Run renumbering
The current version of renumbering takes a column of source vertex ids and a column of dest vertex ids. As mentioned above, these must be integer columns.
Output from renumbering is 3 cudf.Series structures representing the renumbered sources, the renumbered destinations and the numbering map which maps the new ids back to the original ids.
In this case,
* gdf['source_as_int'] is a column of type int64
* gdf['dest_as_int'] is a column of type int64
* src_r will be a series of type int32 (we translate back to 32-bit integers)
* dst_r will be a series of type int32
* numbering will be a series of type int64 that translates the elements of src and dst back to their original 64-bit values
Note that because the renumbering translates us to 32-bit integers, if there are more than 2^31 - 1 unique 64-bit values in the source/dest passed into renumbering this would exceed the size of the 32-bit integers so you will get an error from the renumber call.
```
src_r, dst_r, numbering = cugraph.renumber(gdf['source_as_int'], gdf['dest_as_int'])
gdf.add_column("original id", numbering)
gdf.add_column("src_renumbered", src_r)
gdf.add_column("dst_renumbered", dst_r)
gdf.to_pandas()
```
# Data types
Just to confirm, the data types of the renumbered columns should be int32, the original data should be int64, the numbering map needs to be int64 since the values it contains map to the original int64 types.
```
gdf.dtypes
```
# Quick verification
To understand the renumbering, here's a block of verification logic. In the renumbered series we created a new id for each unique value in the original series. The numbering map identifies that mapping. For any vertex id X in the new numbering, numbering[X] should refer to the original value.
```
for i in range(len(src_r)):
print(" " + str(i) +
": (" + str(source_as_int[i]) + "," + str(dest_as_int[i]) +")"
", renumbered: (" + str(src_r[i]) + "," + str(dst_r[i]) +")"
", translate back: (" + str(numbering[src_r[i]]) + "," + str(numbering[dst_r[i]]) +")"
)
```
# Now let's do some graph things...
To start, let's run page rank. Not particularly interesting on our circle, since everything should have an equal rank.
```
G = cugraph.Graph()
G.add_edge_list(src_r, dst_r)
pr = cugraph.pagerank(G)
pr.add_column("original id", numbering)
pr.to_pandas()
```
# Try to run jaccard
Not at all an interesting result, but it demonstrates a more complicated case. Jaccard returns a coefficient for each edge. In order to show the original ids we need to add columns to the data frame for each column that contains one of renumbered vertices. In this case, the columns source and destination contain renumbered vertex ids.
```
jac = cugraph.jaccard(G)
jac.add_column("original_source",
[ socket.inet_ntoa(struct.pack('!L', numbering[x])) for x in jac['source'] ])
jac.add_column("original_destination",
[ socket.inet_ntoa(struct.pack('!L', numbering[x])) for x in jac['destination'] ])
jac.to_pandas()
```
___
Copyright (c) 2019, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
___
| github_jupyter |
```
from sklearn.datasets import load_wine
wine_data = load_wine()
dir(wine_data)
print(wine_data.DESCR)
inputs = wine_data.data
output = wine_data.target
inputs.shape
output.shape
wine_data.feature_names
import pandas as pd
df = pd.DataFrame(inputs, columns=wine_data.feature_names)
df = pd.concat([df, pd.DataFrame(output)], axis=1)
df
df.describe()
df.describe().style.format("{:.5f}")
import matplotlib.pyplot as plt
plt.matshow(df.corr())
plt.xticks(range(len(df.columns)), df.columns)
plt.yticks(range(len(df.columns)), df.columns)
plt.colorbar()
plt.show()
```
Chapter Break
```
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(inputs, output, test_size=0.33, random_state=42)
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(LinearRegression())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train)
pipe.score(X_test, y_test)
from sklearn.linear_model import Ridge
# tactic 1: minimize weights, smaller the better, higher penalty on large weights
# = ridge regression
pipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), Ridge())
pipe.fit(X_train, y_train)
pipe.steps[2][1].coef_
pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()
pipe.score(X_train, y_train)
pipe.score(X_test, y_test)
from sklearn.linear_model import Lasso
# tactic 2: minimize number of non-zero weights
# = Lasso
pipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), Lasso())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train)
pipe.score(X_test, y_test)
pipe.steps[2][1].coef_
pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()
from sklearn.linear_model import ElasticNet
# tactic 3: mix lasso and ridge!
# = elasticnet
pipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), ElasticNet())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train)
pipe.score(X_test, y_test)
pipe.steps[2][1].coef_
pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()
```
----
# Implementing a model to classify wines
```
from sklearn.datasets import load_wine
wine_data = load_wine()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
wine_data.data, wine_data.target, test_size=0.5, random_state=42)
import numpy as np
import pandas as pd
df_x_train = pd.DataFrame(X_train, columns=wine_data.feature_names)
df_x_train.describe()
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn import linear_model
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
pipe = make_pipeline(GaussianNB())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train), pipe.score(X_test, y_test)
confusion_matrix(y_test, pipe.predict(X_test))
pipe = make_pipeline(svm.SVC())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train), pipe.score(X_test, y_test)
confusion_matrix(y_test, pipe.predict(X_test))
pipe = make_pipeline(linear_model.LogisticRegression())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train), pipe.score(X_test, y_test)
confusion_matrix(y_test, pipe.predict(X_test))
```
| github_jupyter |
```
envname = 'variables/loop_stim10e-16.0et6.0ph1.0pvaryt0.1plNonebp0.5.pkl'
# import stuff
from placerg.funcs import *
from placerg.objects import*
from placerg.funcsrg import *
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
# set up notebook displayt
np.set_printoptions(threshold=5)
alpha=0.4
color='black'
cmap='Greys'
colorline='black'
linethick=3.
colorfit='grey'
plt.style.use('seaborn-paper')
fontsize=20
ticksize=20
fontsizesmall=25
ticksizesmall=20
legendsize=20
alpha=.3
colorfit='gray'
linecolor='black'
palit=['black','firebrick', 'crimson', 'orangered', 'darkorange', 'goldenrod', 'gold', 'khaki']
mycmap = cm.gnuplot
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
# load in objects
allo=load_object(envname)
orderplot(allo)
if allo.labeltype[0]=='eta':
maxx=np.max(np.array(allo.eta).flatten())
minn=np.min(np.array(allo.eta).flatten())
cc=allo.eta
if allo.labeltype[0]=='epsilon':
maxx=np.max(np.array(allo.epsilon).flatten())
minn=np.min(np.array(allo.epsilon).flatten())
cc=allo.epsilon
if allo.labeltype[0]=='time constant':
maxx=np.max(np.array(allo.timeconst).flatten())
minn=np.min(np.array(allo.timeconst).flatten())
cc=np.array(allo.timeconst)[:,0]
if allo.labeltype[0]=='# of stimuli':
maxx=np.max(np.array(allo.stim).flatten())
minn=np.min(np.array(allo.stim).flatten())
cc=allo.stim
if allo.labeltype[0]=='p':
maxx=np.max(np.array(allo.percell).flatten())
minn=np.min(np.array(allo.percell).flatten())
cc=allo.percell
for i in range(len(allo.labeltype)):
allo.labeltype[i]='q'
mrange=maxx-minn
allo.label
"""
Here plot the eigenvalues from each sucessive RG step, averaged over all clusters and
normalized by cluster size.
"""
fig, ax = plt.subplots(2,2, sharex=True, sharey=True, figsize=(10,10))
ylabel= 'eigenvalue'
ax[0,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_xlabel('rank$/K$', fontsize=fontsize)
ax[1,1].set_xlabel('rank$/K$', fontsize=fontsize)
c=0
for h in np.array([0,5,10,19]):
n=int(c/2)
l=c-2*n
errplot=allo.eigspecerr[h]
xplot,plot=(allo.eigspecx[h], allo.eigspec[h])
for m in range(len(xplot)):
ax[n,l].errorbar(xplot[m], plot[m], yerr=errplot[m], \
label= r'$K=$'+str(2**(m+4)),\
color=palit[m+2], marker='o', \
markersize=5, linestyle='None', linewidth=2)
popt=allo.mu[h]
ax[n,l].plot(xplot[m],linfunc(xplot[m], \
popt[0], popt[1]), '--', color=colorfit, linewidth=2)
ax[n,l].tick_params(labelsize=ticksize)
ax[n,l].text(.005, .015, r'$\mu$='+ str(np.round(popt[1],3))+r'$\pm$'\
+str(np.round(allo.muerr[h]\
[0], 3)), fontsize=ticksize)
ax[n,l].text(.005, .0055, r'$q=$'+str(np.round(allo.label[h],2)), \
fontsize=ticksize)
ax[n,l].set_yscale('log')
ax[n,l].set_xscale('log')
ax[n,l].set_ylim(top=1)
c+=1
for n in range(2):
for l in range(2):
ax[n,l].set_yticks([.1, .01,.001,.0001])
ax[n,l].tick_params(length=6, width=1, which='major')
ax[n,l].tick_params(length=3, width=1, which='minor')
#a.grid(True, linewidth=1)
ax[0,0].text(.0015,0.7,r'(A)', fontsize=ticksize, weight='bold')
ax[0,1].text(.0015,0.7,r'(B)', fontsize=ticksize, weight='bold')
ax[1,0].text(.0015,0.7,r'(C)', fontsize=ticksize, weight='bold')
ax[1,1].text(.0015,0.7,r'(D)', fontsize=ticksize, weight='bold')
lines_labels = [ax.get_legend_handles_labels() for ax in fig.axes[:1]]
lines, labels = [sum(z, []) for z in zip(*lines_labels)]
fig.legend(lines, labels, fontsize=fontsize-5, loc=(.2,.6))
plt.tight_layout()
name=str(envname)+'eigs.pdf'
plt.savefig(name)
```
# variance of activity at each RG step over clusters
```
"""
plot coarse grained variance vs. cluster size
"""
fig, ax = plt.subplots(2,2, sharex=True, sharey=True, figsize=(10,10))
ylabel= 'activity variance'
ax[0,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_xlabel(r'cluster size $K$', fontsize=fontsize)
ax[1,1].set_xlabel(r'cluster size $K$', fontsize=fontsize)
c=0
for h in np.array([0,5,10,19]):
n=int(c/2)
l=c-2*n
ax[n,l].errorbar(allo.varx[h],allo.var[h], allo.varerr[h], \
color='black', marker='o', markersize=5, linewidth=2, linestyle='None')
popt = allo.alpha[h]
ax[n,l].plot(allo.varx[h],linfunc(allo.varx[h], \
popt[0], popt[1]), '--', color=colorfit, linewidth=2)
ax[n,l].plot(allo.varx[h], linfunc(allo.varx[h], popt[0], 1.), \
color=colorfit, linewidth=2, alpha=alpha)
ax[n,l].text(2, 5, r'$q=$'+str(np.round(allo.label[h],2)), fontsize=ticksize)
ax[n,l].tick_params(labelsize=ticksize)
ax[n,l].text(2, 20, r'${\alpha}$='+ str(np.format_float_positional(popt[1],unique=False, precision=3))+r'$\pm$'+\
str(np.format_float_positional(allo.alphaerr[h][0], unique=False, precision=3)), fontsize=fontsize)
ax[n,l].set_yscale('log')
ax[n,l].set_xscale('log')
ax[n,l].set_ylim(top=260, bottom=.01)
c+=1
for n in range(2):
for l in range(2):
#ax[n,l].set_yticks([.1, .01,.001,.0001])
ax[n,l].tick_params(length=6, width=1, which='major')
ax[n,l].tick_params(length=3, width=1, which='minor')
#a.grid(True, linewidth=1)
ax[0,0].text(.35,155,r'(A)', fontsize=ticksize, weight='bold')
ax[0,1].text(.35,155,r'(B)', fontsize=ticksize, weight='bold')
ax[1,0].text(.35,155,r'(C)', fontsize=ticksize, weight='bold')
ax[1,1].text(.35,155,r'(D)', fontsize=ticksize, weight='bold')
plt.tight_layout()
name=str(envname)+'var.pdf'
plt.savefig(name)
"""
Plot log probability of complete cluster silence vs cluster size
"""
fig, ax = plt.subplots(2,2, sharex=True, sharey=True, figsize=(10,10))
ylabel= r'$F$'
ax[0,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_xlabel(r'cluster size $K$', fontsize=fontsize)
ax[1,1].set_xlabel(r'cluster size $K$', fontsize=fontsize)
c=0
for h in np.array([0,5,10,19]):
n=int(c/2)
l=c-2*n
x=allo.psilx[h]
y=allo.psil[h]
popt= allo.beta[h]
ax[n,l].errorbar(allo.psilx[h], allo.psil[h],allo.psilerr[h], \
color='black', marker='o', linestyle='None', markersize=5)
ax[n,l].plot(np.arange(np.min(allo.psilx[h]),np.max(allo.psilx[h]), .01),\
(probfunc(np.arange(np.min(allo.psilx[h]),np.max(allo.psilx[h]), .01), \
popt[0], popt[1])), '--', color=colorfit, linewidth=2)
ax[n,l].text(2, -1.0, r'$q=$'+str(np.round(allo.label[h],2)),\
fontsize=ticksize)
ax[n,l].text(2, -.75, r'$\tilde{\beta}=$'+str(np.format_float_positional(popt[1], unique=False, precision=3))+r'$\pm$'+\
str(np.format_float_positional(allo.alphaerr[h][0], unique=False, precision=3)),fontsize=ticksize)
ax[n,l].tick_params(labelsize=ticksize)
ax[n,l].set_xscale('log')
ax[n,l].set_ylim(top=0.4)
c+=1
for n in range(2):
for l in range(2):
#ax[n,l].set_yticks([.1, .01,.001,.0001])
ax[n,l].tick_params(length=6, width=1, which='major')
ax[n,l].tick_params(length=3, width=1, which='minor')
#a.grid(True, linewidth=1)
ax[0,0].text(.4,.25,r'(A)', fontsize=ticksize, weight='bold')
ax[0,1].text(.4,.25,r'(B)', fontsize=ticksize, weight='bold')
ax[1,0].text(.4, .25,r'(C)', fontsize=ticksize, weight='bold')
ax[1,1].text(.4,.25,r'(D)', fontsize=ticksize, weight='bold')
plt.tight_layout()
name=str(envname)+'freeenergy.pdf'
plt.savefig(name)
minnm=16
maxxm=128
mrangem=np.abs(minnm-maxxm)
x=allo.actmomx
plott=allo.actmom
plterr=allo.actmomerr
fig, ax = plt.subplots(2,2, sharex=True, sharey=True, figsize=(10,10))
ylabel= r'density'
ax[0,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_xlabel('normalized activity', fontsize=fontsize)
ax[1,1].set_xlabel('normalized activity', fontsize=fontsize)
c=0
for h in np.array([0,5,10,19]):
n=int(c/2)
l=c-2*n
for i in (np.arange(len(allo.actmomx[0]))):
if i==3:
ax[n,l].errorbar(x[h][i],plott[h][i], plterr[h][i], \
label='N/'+str(2**(i+4)), \
color=palit[i+2], linewidth=2, errorevery=3, alpha=.7)
popt, pcov = curve_fit(gaussian,x[h][i], plott[h][i])
ax[n,l].plot(np.arange(-4, 4,.1), \
gaussian(np.arange(-4, 4, .1),\
popt[0], popt[1]), '--', color=colorfit, linewidth=2)
else:
ax[n,l].plot(x[h][i],plott[h][i], \
label='N/'+str(2**(i+4)), \
color=palit[i+2], linewidth=2)
ax[n,l].text(-8, 4, r'$q=$'+str(np.round(allo.label[h],2)), \
fontsize=ticksize)
ax[n,l].tick_params(labelsize=ticksize)
ax[n,l].set_yscale('log')
ax[n,l].set_ylim(bottom=10**-6, top=9)
c+=1
for n in range(2):
for l in range(2):
#ax[n,l].set_yticks([.1, .01,.001,.0001])
ax[n,l].tick_params(length=6, width=1, which='major')
ax[n,l].tick_params(length=3, width=1, which='minor')
#a.grid(True, linewidth=1)
ax[0,0].legend(fontsize=fontsize)
ax[0,0].text(-14,4,r'(A)', fontsize=ticksize, weight='bold')
ax[0,1].text(-14,4,r'(B)', fontsize=ticksize, weight='bold')
ax[1,0].text(-14,4,r'(C)', fontsize=ticksize, weight='bold')
ax[1,1].text(-14,4,r'(D)', fontsize=ticksize, weight='bold')
plt.tight_layout()
name=str(envname)+'momdist.pdf'
plt.savefig(name)
minnm=2
maxxm=256
mrangem=np.abs(minnm-maxxm)
x=allo.autocorrx
plterr=allo.autocorrerr
result=allo.autocorr
fig, ax = plt.subplots(2,2, sharex=True, sharey=True, figsize=(10,10))
ylabel= r'$C(t)$'
ax[0,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_xlabel(r'time $t$', fontsize=fontsize)
ax[1,1].set_xlabel(r'time $t$', fontsize=fontsize)
c=0
for h in np.array([0,5,10,19]):
n=int(c/2)
l=c-2*n
for i in range(result[h].shape[0]):
#print(result[l][i, int(result[l].shape[1]/2)-50:int(result[l].shape[1]/2)+50])
ax[n,l].errorbar((x[h][int(result[h].shape[1]/2)-\
20:int(result[h].shape[1]/2)+20]), \
(result[h][i, int(result[h].shape[1]/2)-20:int(result[h].\
shape[1]/2)+20]),\
(plterr[h][i][int(result[h].shape[1]/2)-20:int(result[h]\
.shape[1]/2)+20]), \
label=r'$K$ ='+str(2**(i+2)),color=palit[i],\
linewidth=2)
ax[n,l].text(-10, 1.0, r'$q=$'+str(np.round(allo.label[h],2)), \
fontsize=fontsize)
ax[n,l].tick_params(labelsize=ticksize)
ax[n,l].set_ylim(top=1.15)
ax[n,l].set_xlim(-15,15)
c+=1
for n in range(2):
for l in range(2):
#ax[n,l].set_yticks([.1, .01,.001,.0001])
ax[n,l].tick_params(length=6, width=1, which='major')
ax[n,l].tick_params(length=3, width=1, which='minor')
#a.grid(True, linewidth=1)
ax[0,0].legend(fontsize=fontsize-5)
ax[0,0].text(-19,1.1,r'(A)', fontsize=ticksize, weight='bold')
ax[0,1].text(-19,1.1,r'(B)', fontsize=ticksize, weight='bold')
ax[1,0].text(-19,1.1,r'(C)', fontsize=ticksize, weight='bold')
ax[1,1].text(-19,1.1,r'(D)', fontsize=ticksize, weight='bold')
plt.tight_layout()
name=str(envname)+'dynamic.pdf'
plt.savefig(name)
"""
plot exponents
"""
fig, ax = plt.subplots(2,2, sharex=True, sharey=True, figsize=(10,10))
ylabel= r'$\tau_c$'
ax[0,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_xlabel(r'cluster size $K$', fontsize=fontsize)
ax[1,1].set_xlabel(r'cluster size $K$', fontsize=fontsize)
c=0
for h in np.array([0,5,10,19]):
n=int(c/2)
l=c-2*n
ax[n,l].errorbar(2**np.arange(1,8),allo.tau[h],allo.tauerr[h], color=colorline, \
label='taus', marker='o', markersize=5, linestyle='None')
popt= allo.z[h]
ax[n,l].plot(2**np.arange(1,8), linfunc(2**np.arange(1,8), \
popt[0], popt[1]), '--', label='fit', \
color=colorfit, linewidth=2)
ax[n,l].text(2, 3, r'$\tilde{z}=$'+str(np.format_float_positional(popt[1],unique=False, precision=3))+r'$\pm$'+\
str(np.format_float_positional(allo.zerr[h][0], unique=False, precision=3)), fontsize=ticksize)
ax[n,l].set_yscale('log')
ax[n,l].set_xscale('log')
ax[n,l].text(2, 2.5, r'$q=$'+str(np.round(allo.label[h],2)), \
fontsize=fontsize)
ax[n,l].set_ylim(top=3.8, bottom=0.8)
c+=1
for n in range(2):
for l in range(2):
ax[n,l].set_yticks([1,2,3])
ax[n,l].tick_params(length=6, width=1, which='major', labelsize=ticksize)
ax[n,l].tick_params(length=3, width=1, which='minor')
#a.grid(True, linewidth=1)
ax[0,0].text(1,3.5,r'(A)', fontsize=ticksize, weight='bold')
ax[0,1].text(1,3.5,r'(B)', fontsize=ticksize, weight='bold')
ax[1,0].text(1,3.5,r'(C)', fontsize=ticksize, weight='bold')
ax[1,1].text(1,3.5,r'(D)', fontsize=ticksize, weight='bold')
plt.tight_layout()
name=str(envname)+'dynamicexps.pdf'
plt.savefig(name)
minnm=2
maxxm=256
mrangem=np.abs(minnm-maxxm)
x=allo.autocorrx
plterr=allo.autocorrerr
result=allo.autocorr
ylabel= r'$C(t)$'
fig, ax = plt.subplots(2,2, sharex=True, sharey=True, figsize=(10,10))
ax[0,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_ylabel(ylabel, fontsize=fontsize)
ax[1,0].set_xlabel(r'time $t/\tau_c$', fontsize=fontsize)
ax[1,1].set_xlabel(r'time $t/\tau_c$', fontsize=fontsize)
c=0
for h in np.array([0,5,10,19]):
n=int(c/2)
l=c-2*n
for i in range(result[h].shape[0]):
#print(result[l][i, int(result[l].shape[1]/2)-50:int(result[l].shape[1]/2)+50])
ax[n,l].errorbar((x[h][int(result[h].shape[1]/2)-\
20:int(result[h].shape[1]/2)+20])/allo.tau[h][i], \
(result[h][i, int(result[h].shape[1]/2)-20:int(result[h].\
shape[1]/2)+20]),\
(plterr[h][i][int(result[h].shape[1]/2)-20:int(result[h]\
.shape[1]/2)+20]), \
label=r'$K$ ='+str(2**(i+2)), color=palit[i],\
linewidth=2)
ax[n,l].text(-10, 1.0, r'$q=$'+str(np.round(allo.label[h],2)), \
fontsize=fontsize)
ax[n,l].tick_params(labelsize=ticksize)
ax[n,l].set_ylim(top=1.15)
ax[n,l].set_xlim(-15,15)
c+=1
for n in range(2):
for l in range(2):
ax[n,l].tick_params(length=6, width=1, which='major', labelsize=ticksize)
ax[n,l].tick_params(length=3, width=1, which='minor')
#a.grid(True, linewidth=1)
ax[0,0].legend(fontsize=fontsize-5)
ax[0,0].text(-19,1.1,r'(A)', fontsize=ticksize, weight='bold')
ax[0,1].text(-19,1.1,r'(B)', fontsize=ticksize, weight='bold')
ax[1,0].text(-19,1.1,r'(C)', fontsize=ticksize, weight='bold')
ax[1,1].text(-19,1.1,r'(D)', fontsize=ticksize, weight='bold')
plt.tight_layout()
name=str(envname)+'dynamicrescale.pdf'
plt.savefig(name)
inds=[5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]
plotexps(allo, 'percell', inds, fontsize, ticksize, 1.89, 1.25, 0.92, 0.775, 0.49, 0.05, -0.52, -0.95, 0.12, 1.86, \
0.12, 0.91, 0.12, 0.47, 0.12, -0.54)
name=str(envname)+'varvspercell.pdf'
plt.savefig(name)
```
| github_jupyter |
```
with open('/mnt/pmldl/paracrawl-release1.en-ru.zipporah0-dedup-clean.en') as f:
eng_lines = f.readlines()
with open('/mnt/pmldl/paracrawl-release1.en-ru.zipporah0-dedup-clean.ru') as f:
ru_lines = f.readlines()
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
```
# Use pretrained model and tokenizer
```
tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-ru-en")
model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-ru-en")
```
# Freeze encoder weights
```
for param in model.base_model.parameters():
param.requires_grad = False
```
# Split data
```
from sklearn.model_selection import train_test_split
ru_train, ru_val, eng_train, eng_val = train_test_split(ru_lines, eng_lines, test_size=.1)
ru_train, ru_val, eng_train, eng_val = ru_train[:10000], ru_val[:1000],\
eng_train[:10000], eng_val[:1000]
train_encodings = tokenizer.prepare_seq2seq_batch(ru_train, eng_train,
truncation=True,
padding=True,
max_length=100)
val_encodings = tokenizer.prepare_seq2seq_batch(ru_val, eng_val,
truncation=True,
padding=True,
max_length=100)
import torch
from torch.utils.data import Dataset
class Seq2seqDataset(Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
return item
def __len__(self):
return len(self.encodings["labels"])
train_dataset = Seq2seqDataset(train_encodings)
eval_dataset = Seq2seqDataset(val_encodings)
from torch.utils.data import DataLoader
from transformers import DistilBertForSequenceClassification, AdamW
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
optim = AdamW(model.lm_head.parameters(), lr=5e-5)
import numpy as np
from tqdm.notebook import tqdm
for epoch in range(3):
epoch_loss = []
for batch in tqdm(train_loader):
optim.zero_grad()
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
# embeddings = model.base_model(input_ids,\
# decoder_input_ids=labels,\
# attention_mask=attention_mask)/
# .requires_grad(True)
# outputs = model.lm_head(embeddings)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs[0]
epoch_loss.append(loss.item())
loss.backward()
optim.step()
print(f"Epoch {epoch} finished; Loss : {np.mean(epoch_loss)}")
model.eval()
import os
experiment_name = "marian_model_3_epochs_10k_samples_no_max_length"
model.save_pretrained(os.path.join("models", experiment_name))
!nvidia-smi
```
| github_jupyter |
```
import pandas as pd
import numpy as np
# Read in feature sets and corresponding outputs
# Some values of a_max were too large for a 64-bit number,
# so a 128-bit float had to be specified in order for the
# column to be parsed correctly (otherwise Pandas defaulted
# to parsing them as strings)
X1 = pd.read_csv("features_idl.csv")
X2 = pd.read_csv("features_lia.csv",dtype={'a_max':np.float128})
y1 = pd.read_csv("best_solver_idl.csv")
y2 = pd.read_csv("best_solver_lia.csv")
# Convert output values to 0 for cvc4, 1 for z3, or 2 for sat
y1 = y1.values
y1 = pd.DataFrame(np.where(y1 == "sat", 2, np.where(y1 == "z3", 1, np.where(y1 == "cvc4", 0, -1))))
y2 = y2.values
y2 = pd.DataFrame(np.where(y2 == "sat", 2, np.where(y2 == "z3", 1, np.where(y2 == "cvc4", 0, -1))))
# Verifies that there were no values in the data other than "cvc4", "z3", or "sat"
assert(not -1 in y1.values)
assert(not -1 in y2.values)
# Combine data from IDL and LIA datasets
X = pd.concat([X1,X2])
y = pd.concat([y1,y2])
from sklearn.model_selection import train_test_split
# Split datasets into
# training (60%)
# validation (20%)
# testing (20%)
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv, test_size=0.25, random_state=1)
# Combine output and features
train = pd.concat([y_train, X_train], axis=1)
val = pd.concat([y_val, X_val], axis=1)
test = pd.concat([y_test, X_test], axis=1)
train.to_csv('train.csv', index=False, header=False)
val.to_csv('validation.csv', index=False, header=False)
test.to_csv('test.csv', index=False, header=False)
import sagemaker, boto3, os
bucket = sagemaker.Session().default_bucket()
prefix = "smt-eager-vs-lazy"
# Upload datasets to S3
boto3.Session().resource('s3').Bucket(bucket).Object(
os.path.join(prefix, 'data/train.csv')).upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(
os.path.join(prefix, 'data/validation.csv')).upload_file('validation.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(
os.path.join(prefix, 'data/test.csv')).upload_file('test.csv')
region = sagemaker.Session().boto_region_name
role = sagemaker.get_execution_role()
from sagemaker.debugger import Rule, rule_configs
from sagemaker.session import TrainingInput
# Configure model
s3_output_location='s3://{}/{}/{}'.format(bucket, prefix, 'xgboost_model')
container=sagemaker.image_uris.retrieve("xgboost", region, "1.2-1")
print(container)
xgb_model=sagemaker.estimator.Estimator(
image_uri=container,
role=role,
instance_count=1,
instance_type='ml.m4.xlarge',
volume_size=5,
output_path=s3_output_location,
sagemaker_session=sagemaker.Session(),
rules=[Rule.sagemaker(rule_configs.create_xgboost_report())]
)
xgb_model.set_hyperparameters(
objective = 'multi:softprob',
num_class = 3,
num_round = 100,
subsample = 0.7,
colsample_bytree = 0.8
)
from sagemaker.session import TrainingInput
train_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "data/train.csv"), content_type="csv"
)
validation_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "data/validation.csv"), content_type="csv"
)
# Run the training job to fit the model
xgb_model.fit({"train": train_input, "validation": validation_input}, wait=True)
# Get the auto-generated analytics
rule_output_path = xgb_model.output_path + "/" + xgb_model.latest_training_job.name + "/rule-output"
! aws s3 ls {rule_output_path} --recursive
# Download the auto-generated analytics
! aws s3 cp {rule_output_path} ./ --recursive
# When done training/tuning the model, deploy an endpoint to SageMaker
import sagemaker
from sagemaker.serializers import CSVSerializer
xgb_predictor=xgb_model.deploy(
initial_instance_count=1,
instance_type='ml.t2.medium',
serializer=CSVSerializer()
)
import numpy as np
# This function calls the endpoint to get predictions
# from the model and processes the returned data
def predict_multi_class(data, num_class, rows=1000):
assert(num_class >= 2)
num_examples = data.shape[0]
split_array = np.array_split(data, int(num_examples / float(rows) + 1))
predictions = ''
for array in split_array:
predictions = ','.join([predictions, xgb_predictor.predict(array).decode('utf-8')])
# For binary classifiers, predict() returns a single float:
# the probability of a positive outcome
# formally, this means the model returns 1
if num_class == 2:
return np.fromstring(predictions[1:], sep=',')
# Convert string version of 2D array to Python list of strings
pred_list = predictions[1:].replace('[','').replace(']','').strip().split(',')
try:
assert(len(pred_list) == num_examples * num_class)
except AssertionError:
print("Something went wrong. Verify that the value of num_class is correct.")
exit()
# Convert Python list to Numpy array of floats, and reshape to 2D
return np.array(pred_list, dtype=float).reshape([num_examples,num_class])
import sklearn
# Output the accuracy of the model on the test set
log_predictions = predict_multi_class(test.to_numpy()[:,1:], 3)
predictions = np.argmax(log_predictions, axis=1)
sklearn.metrics.accuracy_score(test.iloc[:,0], predictions)
# Output the confusion matrix for the test set
cm = sklearn.metrics.confusion_matrix(test.iloc[:,0], predictions)
cm
np.count_nonzero(np.where(y_train == 0))
np.count_nonzero(np.where(y_train == 1))
np.count_nonzero(np.where(y_train == 2))
```
| github_jupyter |
# Imports
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
from glob import glob
from hypnospy import Wearable
from hypnospy.data import ActiwatchSleepData
from hypnospy.analysis import SleepWakeAnalysis
from hypnospy.analysis import NonWearingDetector
from hypnospy.analysis import SleepBoudaryDetector
from hypnospy.analysis import CircadianAnalysis
from hypnospy.analysis import PhysicalActivity
from hypnospy import Experiment
from hypnospy.analysis import Viewer
from tqdm import trange, tqdm
import pickle
```
# Read dataset - HCHS
```
# Configure an Experiment
exp = Experiment()
file_path = "HypnosPy-master/data/small_collection_hchs/*"
# Iterates over a set of files in a directory.
# Unfortunately, we have to do it manually with RawProcessing because we are modifying the annotations
for file in glob(file_path):
pp = ActiwatchSleepData(file, col_for_datetime="time", col_for_pid="pid")
w = Wearable(pp) # Creates a wearable from a pp object
exp.add_wearable(w)
print(w.pid)
freq = 30
exp.set_freq_in_secs(freq)
# tsp = NonWearingDetector(exp)
# tsp.fill_no_activity(-0.0001)
# tsp.detect_non_wear(strategy="choi")
# tsp.detect_non_wear(strategy="choi2011")
# tsp.check_consecutive_days(5)
# # print("Valid days:", tsp.get_valid_days())
# # print("Invalid days:", tsp.get_invalid_days())
# # strategy: "annotation", "hr", "angle"
# # sbd = SleepBoudaryDetector(exp)
# # sbd.detect_sleep_boundaries(strategy="annotation",
# # annotation_hour_to_start_search=0,
# # annotation_col='hyp_annotation',
# # output_col='hyp_sleep_period',
# # annotation_only_largest_sleep_period=False)
# tsp.invalidate_day_if_no_sleep(sleep_period_col='hyp_sleep_period')
# print("Valid days:", tsp.get_valid_days())
# tsp.check_valid_days(max_non_wear_minutes_per_day=180, min_activity_threshold=0)
# print("Valid days:", tsp.get_valid_days())
# print("Invalid days:", tsp.get_invalid_days())
```
# Use PhysicalActivity
```
pa = PhysicalActivity(exp, cutoffs=[399, 1404], names=['lpa', 'mvpa', 'vpa'])
pa.generate_pa_columns(based_on='activity')
print('cutoffs:', exp.get_all_wearables()[0].pa_cutoffs)
print('cutoffs region names:', exp.get_all_wearables()[0].pa_names)
```
# Find count of bouts per day per wearable
```
pa.get_bouts(pa_col='mvpa', length_in_minutes=10, decomposite_bouts=True)
```
# Find stats of activity per hour per day per wearable
```
pa.get_stats_pa_representation()
```
# draw physical activity within using Viewer module
```
Viewer(exp).view_signals(signal_categories=['activity'], signal_as_area=['mvpa'],
colors ={"area": ["orange"]}, alphas ={"area": 0.8},
resample_to='5T')
```
## draw MVPA boxplot
Each row represents the wearable MVPA boxplot
<br/>X-axis = 24 hour
<br/>Y-axis = MVPA box plot for 7 days
```
data = pa.get_binned_pa_representation()
data = data.reset_index()
# data = data[data['pid'] == '29881087']
g = sns.catplot(x="hyp_time_col", y="MVPA", data=data,
kind="box", color='white',
row='pid',
height=4, aspect=2.5)
```
| github_jupyter |
<table> <tr>
<td style="background-color:#ffffff;">
<a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="25%" align="left"> </a></td>
<td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
prepared by <a href="http://abu.lu.lv" target="_blank">Abuzer Yakaryilmaz</a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
</td>
</tr></table>
<table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
<h2> Probabilistic States </h2>
[Watch Lecture](https://youtu.be/tJjrF7WgT1g)
Suppose that Asja tosses a fair coin secretly.
Because we do not see the result, our information about the outcome will be probabilistic:
$\rightarrow$ The outcome is heads with probability $0.5$ and the outcome will be tails with probability $0.5$.
If the coin has a bias $ \dfrac{Pr(Head)}{Pr(Tail)} = \dfrac{3}{1}$, then our information about the outcome will be as follows:
$\rightarrow$ The outcome will be heads with probability $ 0.75 $ and the outcome will be tails with probability $ 0.25 $.
<i><u>Explanation</u>: The probability of getting heads is three times of the probability of getting tails.
<ul>
<li>The total probability is 1. </li>
<li> We divide 1 into four parts (three parts are for heads and one part is for tail),
<li> one part is $ \dfrac{1}{4} = 0.25$,</li>
<li> and then give three parts for heads ($0.75$) and one part for tails ($0.25$).</li>
</ul></i>
<h3> Listing probabilities as a column </h3>
We have two different outcomes: heads (0) and tails (1).
Then, we can use a column of size 2 to show the probabilities of getting heads and getting tails.
For the fair coin, our information after the coin-flip will be $ \myvector{0.5 \\ 0.5} $.
For the biased coin, it will be $ \myvector{0.75 \\ 0.25} $.
The first entry shows the probability of getting heads, and the second entry shows the probability of getting tails.
$ \myvector{0.5 \\ 0.5} $ and $ \myvector{0.75 \\ 0.25} $ are two examples of 2-dimensional (column) vectors.
<h3> Task 1 </h3>
Suppose that Balvis secretly flips a coin having the bias $ \dfrac{Pr(Heads)}{Pr(Tails)} = \dfrac{1}{4}$.
Represent your information about the outcome as a column vector.
<h3> Task 2 </h3>
Suppose that Fyodor secretly rolls a loaded (tricky) dice with the bias
$$ Pr(1):Pr(2):Pr(3):Pr(4):Pr(5):Pr(6) = 7:5:4:2:6:1 . $$
Represent your information about the result as a column vector. Remark that the size of your column should be 6.
You may use python for your calculations.
```
#
# your code is here
#
```
<a href="B12_Probabilistic_States_Solutions.ipynb#task2">click for our solution</a>
<h3> Vector representation </h3>
Suppose that we have a system with 4 distiguishable states: $ s_1 $, $s_2 $, $s_3$, and $s_4$.
We expect the system to be in one of them at any moment.
By speaking with probabilities, we say that the system is in one of the states with probability 1, and in any other state with probabilty 0.
Then, by using our column representation, we can show each state as a column vector (by using the vectors in standard basis of $ \mathbb{R}^4 $):
$$
e_1 = \myvector{1\\ 0 \\ 0 \\ 0}, e_2 = \myvector{0 \\ 1 \\ 0 \\ 0}, e_3 = \myvector{0 \\ 0 \\ 1 \\ 0},
\mbox{ and } e_4 = \myvector{0 \\ 0 \\ 0 \\ 1}.
$$
This representation helps us to represent our knowledge on a system when it is in more than one state with certain probabilities.
Remember the case in which the coins are tossed secretly.
For example, suppose that the system is in states $ s_1 $, $ s_2 $, $ s_3 $, and $ s_4 $ with probabilities $ 0.20 $, $ 0.25 $, $ 0.40 $, and $ 0.15 $, respectively.
(<i>The total probability should be 1, i.e., $ 0.20+0.25+0.40+0.15 = 1.00 $</i>)
Then, we can say that the system is in the following probabilistic state:
$$ 0.20 \cdot e_1 + 0.25 \cdot e2 + 0.40 \cdot e_3 + 0.15 \cdot e4 $$
$$ = 0.20 \cdot \myvector{1\\ 0 \\ 0 \\ 0} + 0.25 \cdot \myvector{0\\ 1 \\ 0 \\ 0} + 0.40 \cdot \myvector{0\\ 0 \\ 1 \\ 0} + 0.15 \cdot \myvector{0\\ 0 \\ 0 \\ 1}
$$
$$ = \myvector{0.20\\ 0 \\ 0 \\ 0} + \myvector{0\\ 0.25 \\ 0 \\ 0} + \myvector{0\\ 0 \\0.40 \\ 0} + \myvector{0\\ 0 \\ 0 \\ 0.15 } = \myvector{ 0.20 \\ 0.25 \\ 0.40 \\ 0.15 }, $$
where the summation of entries must be 1.
<h3> Probabilistic state </h3>
A probabilistic state is a linear combination of the vectors in the standard basis.
Here coefficients (scalars) must satisfy certain properties:
<ol>
<li> Each coefficient is non-negative </li>
<li> The summation of coefficients is 1 </li>
</ol>
Alternatively, we can say that a probabilistic state is a probability distribution over deterministic states.
We can show all information as a single mathematical object, which is called as a stochastic vector.
<i> Remark that the state of any linear system is a linear combination of the vectors in the basis. </i>
<h3> Task 3 </h3>
For a system with 4 states, randomly create a probabilistic state, and print its entries, e.g., $ 0.16~~0.17~~0.02~~0.65 $.
<i>Hint: You may pick your random numbers between 0 and 100 (or 1000), and then normalize each value by dividing the summation of all numbers.</i>
```
#
# your solution is here
#
```
<a href="B12_Probabilistic_States_Solutions.ipynb#task3">click for our solution</a>
<h3> Task 4 [extra] </h3>
As given in the hint for Task 3, you may pick your random numbers between 0 and $ 10^k $. For better precision, you may take bigger values of $ k $.
Write a function that randomly creates a probabilisitic state of size $ n $ with a precision up to $ k $ digits.
Test your function.
```
#
# your solution is here
#
```
| github_jupyter |
**[Deep Learning Course Home Page](https://www.kaggle.com/learn/deep-learning)**
---
# Exercise Introduction
We will return to the automatic rotation problem you worked on in the previous exercise. But we'll add data augmentation to improve your model.
The model specification and compilation steps don't change when you start using data augmentation. The code you've already worked with for specifying and compiling a model is in the cell below. Run it so you'll be ready to work on data augmentation.
```
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D
num_classes = 2
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
my_new_model = Sequential()
my_new_model.add(ResNet50(include_top=False, pooling='avg', weights=resnet_weights_path))
my_new_model.add(Dense(num_classes, activation='softmax'))
my_new_model.layers[0].trainable = False
my_new_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning.exercise_5 import *
print("Setup Complete")
```
# 1) Fit the Model Using Data Augmentation
Here is some code to set up some ImageDataGenerators. Run it, and then answer the questions below about it.
```
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
image_size = 224
# Specify the values for all arguments to data_generator_with_aug.
data_generator_with_aug = ImageDataGenerator(preprocessing_function=preprocess_input,
horizontal_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1)
data_generator_no_aug = ImageDataGenerator(preprocessing_function=preprocess_input)
```
Why do we need both a generator with augmentation and a generator without augmentation? After thinking about it, check out the solution below.
```
q_1.solution()
```
# 2) Choosing Augmentation Types
ImageDataGenerator offers many types of data augmentation. For example, one argument is `rotation_range`. This rotates each image by a random amount that can be up to whatever value you specify.
Would it be sensible to use automatic rotation for this problem? Why or why not?
```
q_2.solution()
```
# 3) Code
Fill in the missing pieces in the following code. We've supplied some boilerplate. You need to think about what ImageDataGenerator is used for each data source.
```
# Specify which type of ImageDataGenerator above is to load in training data
train_generator = data_generator_with_aug.flow_from_directory(
directory = '../input/dogs-gone-sideways/images/train',
target_size=(image_size, image_size),
batch_size=12,
class_mode='categorical')
# Specify which type of ImageDataGenerator above is to load in validation data
validation_generator = data_generator_no_aug.flow_from_directory(
directory = '../input/dogs-gone-sideways/images/val',
target_size=(image_size, image_size),
class_mode='categorical')
my_new_model.fit_generator(
train_generator, # if you don't know what argument goes first, try the hint
epochs = 3,
steps_per_epoch=19,
validation_data=validation_generator)
q_3.check()
# q_3.hint()
# q_3.solution()
```
# 4) Did Data Augmentation Help?
How could you test whether data augmentation improved your model accuracy?
```
q_4.solution()
```
# Keep Going
You are ready for **[a deeper understanding of deep learning](https://www.kaggle.com/dansbecker/a-deeper-understanding-of-deep-learning/)**.
---
**[Deep Learning Course Home Page](https://www.kaggle.com/learn/deep-learning)**
| github_jupyter |
# Interactive Plotting with Jupyter
There are several ways to interactively plot. In this tutorial I will show how to interact with 2D and 1D data. There are other ways to interact with large tables of data using either [Bokeh](https://docs.bokeh.org/en/latest/index.html) (shown the Skyfit notebook) or [Glue](http://docs.glueviz.org/en/stable). A non-python based solution that also works with large tables of data is Topcat.
Most of the methods here will work on the command line. In order to make this work within Jupyter you will need the following modules.
```
conda install -c conda-forge ipympl
conda install -c conda-forge ipywidgets
```
https://ipywidgets.readthedocs.io/
```
import sys
import astropy
import astroquery
import ipywidgets
import matplotlib
print('\n Python version: ', sys.version)
print('\n Astropy version: ', astropy.__version__)
print('\n Matplotlib version: ', matplotlib.__version__)
print('\n Astroquery version: ', astroquery.__version__)
print('\n ipywidgets version: ', ipywidgets.__version__)
import glob,os,sys
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
import astropy.units as u
from astroquery.skyview import SkyView
import ipywidgets as widgets
```
Here we need an image to play with, we can either download it via SkyView or load one from our machine.
```
ext = 0
# download an image
pflist = SkyView.get_images(position='M82', survey=['SDSSr'], radius=10 * u.arcmin)
pf = pflist[0] # first element of the list, might need a loop if multiple images
# or load an image
#pf = pyfits.open('m82.fits')
image = pf[ext].data
```
Next we need to turn on the interactive plotting.
```
# turn-on interactive plots
%matplotlib widget
```
# Display an image (2D data)
We plot a 2D image using imshow, we can set the scale of the image as well as the colormap.
```
#plt.ioff()
fig = plt.figure(figsize=[6,6])
plt.ion()
p = fig.add_subplot(111)
p.imshow(image, interpolation='Nearest', origin='lower', vmin=-10, vmax=20, cmap='viridis')
plt.show()
```
# Add an event to the display
There are several types of matplotlib events that you can use to interact with a figure.
A few useful events are the following:
`button_press_event`
`button_release_event`
`key_press_event`
`key_release_event`
For more information on event handling and examples check out the following website:
https://matplotlib.org/stable/users/event_handling.html
Here we add a python function linking to link to the `key_press_event`. The function checks for the which key being pressed and if the condition is met runs its code, in this case plotting a red point on the image. We can easily add more keys adding more functionaly to our interactive figure.
```
#plt.ioff()
fig = plt.figure(figsize=[6,6])
plt.ion()
p = fig.add_subplot(111)
p.imshow(image, interpolation='Nearest', origin='lower', vmin=-10, vmax=20, cmap='viridis')
def on_key_press(event):
xc, yc = event.xdata, event.ydata
if event.key == 'm':
p.plot(xc,yc,'ro', markersize=5)
fig.canvas.draw_idle
fig.canvas.mpl_connect('key_press_event', on_key_press)
plt.show()
```
# Add output to the display with the event
If we want to display the coordinate of the points we mark, we need to use the Output widget.
```
#plt.ioff()
fig = plt.figure(figsize=[6,6])
plt.ion()
p = fig.add_subplot(111)
p.imshow(image, interpolation='Nearest', origin='lower', vmin=-10, vmax=20, cmap='viridis')
out = widgets.Output()
@out.capture()
def on_key_press(event):
xc, yc = event.xdata, event.ydata
if event.key == 'm':
p.plot(xc,yc,'ro', markersize=5)
fig.canvas.draw_idle
print("[%.1f, %.1f] = %.4f" % (xc, yc, image[int(yc),int(xc)]))
fig.canvas.mpl_connect('key_press_event', on_key_press)
display(out)
```
We can also write a Python class, this makes it more convient for dealing with multiple interactive events (i.e. keypress, mouse clicking, dragging, etc).
```
class GUI_inter:
def __init__(self,fig,img):
self.fig = fig
self.p = self.fig.gca()
self.img = img
self.display()
def display(self,sigma=20.0):
plt.clf()
self.v0 = np.mean(self.img) - sigma * np.std(self.img)
self.v1 = np.mean(self.img) + sigma * np.std(self.img)
self.p = self.fig.add_subplot(111)
self.p.imshow(self.img, interpolation='Nearest', origin='lower',
vmin=self.v0, vmax=self.v1, cmap='viridis')
plt.draw()
def on_key_press(self, event):
xc, yc = event.xdata, event.ydata
if event.key == 'm':
self.p.plot(xc,yc,'ro', markersize=5)
fig.canvas.draw_idle
print("[%.2f, %.2f]" % (xc,yc))
fig = plt.figure(figsize=[6,6])
G = GUI_inter(fig, image)
fig.canvas.mpl_connect('key_press_event', G.on_key_press)
#display(fig)
```
# Interactive 1D data
```
slice = image[150,:]
fig = plt.figure(figsize=[6,6])
p = fig.add_subplot(111)
p.plot(slice)
plt.show()
zl,xl = image.shape
fig = plt.figure(figsize=[6,6])
p = fig.add_subplot(111)
#p.set_yscale('log')
slice = image[150,:]
line, = p.plot(slice)
def update(change):
line.set_ydata(image[change.new,:])
fig.canvas.draw()
int_slider = widgets.IntSlider(
value=150,
min=0,
max=zl,
step=1,
description='Z-axis:',
continuous_update=False
)
int_slider.observe(update, 'value')
int_slider
from astroquery.sdss import SDSS
from astropy import coordinates
ra, dec = 148.969687, 69.679383
co = coordinates.SkyCoord(ra=ra, dec=dec,unit=(u.deg, u.deg), frame='fk5')
xid = SDSS.query_region(co, radius=20 * u.arcmin, spectro=True)
sp = SDSS.get_spectra(matches=xid)
print("N =",len(sp))
pf = sp[0]
ext = 1
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.plot(wave,spec)
ax.set_xlabel('Wavelength [Angstroms]')
ax.set_ylabel('Flux')
ext = 1
n_max = len(sp)-1 # total number of spectra - 1
pf = sp[0]
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
line, = ax.plot(wave,spec)
ax.set_xlabel('Wavelength [Angstroms]')
ax.set_ylabel('Flux')
def new_spec(change):
pf = sp[change.new]
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
line.set_xdata(wave)
line.set_ydata(spec)
fig.canvas.draw()
int_slider = widgets.IntSlider(
value=0,
min=0,
max=n_max,
step=1,
description='Spectrum:',
continuous_update=False
)
int_slider.observe(new_spec, 'value')
int_slider
ext = 1
n_max = len(sp)-1 # total number of spectra - 1
pf = sp[0]
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
line, = ax.plot(wave,spec)
ax.set_xlabel('Wavelength [Angstroms]')
ax.set_ylabel('Flux')
line2, = ax.plot([6563,6563],[0,20],"--",c="r")
line2.set_visible(False)
def new_spec(change):
pf = sp[change.new]
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
line.set_xdata(wave)
line.set_ydata(spec)
fig.canvas.draw()
def display_lines(change):
if change.new: line2.set_visible(True)
else: line2.set_visible(False)
fig.canvas.draw()
int_slider = widgets.IntSlider(
value=0,
min=0,
max=n_max,
step=1,
description='Spectrum:',
continuous_update=False
)
int_slider.observe(new_spec, 'value')
display(int_slider)
chk_box = widgets.Checkbox(
value=False,
description='Line list',
)
chk_box.observe(display_lines, 'value')
display(chk_box)
# turn-off interactive plots
%matplotlib inline
```
# Resources
https://ipywidgets.readthedocs.io/
https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html
https://ipywidgets.readthedocs.io/en/latest/examples/Output%20Widget.html
https://kapernikov.com/ipywidgets-with-matplotlib/
https://matplotlib.org/stable/users/event_handling.html
https://docs.bokeh.org/en/latest/index.html
http://docs.glueviz.org/en/stable
| github_jupyter |
# LetsGrowMore
## ***Virtual Internship Program***
***Data Science Tasks***
### ***Author: SARAVANAVEL***
# ***ADVANCED LEVEL TASK***
### Task 9 -Handwritten equation solver using CNN
Simple Mathematical equation solver using character and symbol regonition using image processing and CNN
## 1. Import Libraries/Packages
```
import numpy as np
import pandas as pd
import cv2
import tensorflow as tf
import matplotlib.pyplot as plt
from imutils.contours import sort_contours
import imutils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
```
## Data preprocessing
```
print(os.listdir("./input")) #without extracting the data.rar file
```
## Data Augementation
```
train_datagen = ImageDataGenerator(
rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
validation_split = 0.25
)
data_path='./input/extracted_images'
train_set = train_datagen.flow_from_directory(
data_path,
target_size = (40, 40),
color_mode = 'grayscale',
batch_size = 32,
class_mode = 'categorical',
shuffle = True,
subset='training',
seed = 123
)
valid_set = train_datagen.flow_from_directory(
data_path,
target_size = (40, 40),
color_mode = 'grayscale',
batch_size = 32,
class_mode = 'categorical',
shuffle = True,
subset='validation',
seed = 123
)
```
## Model Building
```
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(40, 40, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(18, activation='softmax'))
# compile model
adam = tf.keras.optimizers.Adam(learning_rate = 5e-4)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
```
## Model Training
```
history=model.fit(train_set,
validation_data=valid_set,
epochs=1,
verbose=1)
```
## Model evaluation
```
val_loss, val_accuracy = model.evaluate(valid_set)
print(val_loss,val_accuracy)
train_set.class_indices
```
print('\n',train_set.class_indices, sep = "\n")
```
label_map = (train_set.class_indices)
label_map
def prediction(img):
#img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
plt.imshow(img, cmap = 'gray')
img = cv2.resize(img,(40, 40))
norm_image = cv2.normalize(img, None, alpha = 0, beta = 1, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)
#norm_image=img/255
norm_image = norm_image.reshape((norm_image.shape[0], norm_image.shape[1], 1))
case = np.asarray([norm_image])
pred = (model.predict_classes([case]))
return ([i for i in train_set.class_indices if train_set.class_indices[i]==(pred[0])][0],pred)
image = cv2.imread('./input/data-eqns/test_image1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# perform edge detection, find contours in the edge map, and sort the
# resulting contours from left-to-right
edged = cv2.Canny(blurred, 30, 150)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sort_contours(cnts, method="left-to-right")[0]
chars=[]
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# filter out bounding boxes, ensuring they are neither too small
# nor too large
if w*h>1200:
# extract the character and threshold it to make the character
# appear as *white* (foreground) on a *black* background, then
# grab the width and height of the thresholded image
roi = gray[y:y + h, x:x + w]
chars.append(prediction(roi))
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
plt.figure(figsize=(20,20))
plt.imshow(image)
chars
labels=[i for i in train_set.class_indices]
print(labels)
eq=[]
pos=[]
for i in ((chars)):
if len(eq)==0 and i[0][0] in labels[3:]:
eq.append(i[0][0])
elif len(eq)>0 and i[0][0] in labels[4:14]:
eq.append(i[0][0])
elif len(eq)>0 and i[0][0] in labels[:4]:
eq.append(i[0][0])
pos.append(len(eq))
else:
pass
for i in pos:
if eq[i-1]=='+':
print(int(''.join(eq[:pos[0]-1]))+int(''.join(eq[pos[0]:])))
elif eq[i-1]=='%':
print(int(''.join(eq[:pos[0]-1]))/int(''.join(eq[pos[0]:])))
elif eq[i-1]=='*':
print(int(''.join(eq[:pos[0]-1]))*int(''.join(eq[pos[0]:])))
else:
print(int(''.join(eq[:pos[0]-1]))-int(''.join(eq[pos[0]:])))
image = cv2.imread('./input/data-eqns/test0.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# perform edge detection, find contours in the edge map, and sort the
# resulting contours from left-to-right
edged = cv2.Canny(blurred, 30, 150)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sort_contours(cnts, method="left-to-right")[0]
chars=[]
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# filter out bounding boxes, ensuring they are neither too small
# nor too large
if w*h>1200:
# extract the character and threshold it to make the character
# appear as *white* (foreground) on a *black* background, then
# grab the width and height of the thresholded image
roi = gray[y:y + h, x:x + w]
chars.append(prediction(roi))
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
plt.figure(figsize=(20,20))
plt.imshow(image)
chars
labels=[i for i in train_set.class_indices]
print(labels)
eq=[]
pos=[]
for i in ((chars)):
if len(eq)==0 and i[0][0] in labels[3:]:
eq.append(i[0][0])
elif len(eq)>0 and i[0][0] in labels[4:14]:
eq.append(i[0][0])
elif len(eq)>0 and i[0][0] in labels[:4]:
eq.append(i[0][0])
pos.append(len(eq))
else:
pass
for i in pos:
if eq[i-1]=='+':
print(int(''.join(eq[:pos[0]-1]))+int(''.join(eq[pos[0]:])))
elif eq[i-1]=='%':
print(int(''.join(eq[:pos[0]-1]))/int(''.join(eq[pos[0]:])))
elif eq[i-1]=='*':
print(int(''.join(eq[:pos[0]-1]))*int(''.join(eq[pos[0]:])))
else:
print(int(''.join(eq[:pos[0]-1]))-int(''.join(eq[pos[0]:])))
```
# THANK YOU!!
| github_jupyter |
<i>Copyright (c) Microsoft Corporation. All rights reserved.<br>
Licensed under the MIT License.</i>
<br>
# Recommender Hyperparameter Tuning w/ AzureML
This notebook shows how to auto-tune hyperparameters of a recommender model by utilizing **Azure Machine Learning service** ([AzureML](https://azure.microsoft.com/en-us/services/machine-learning-service/))<sup><a href="#azureml-search">a</a>, <a href="#azure-subscription">b</a></sup>.
We present an overall process of utilizing AzureML, specifically [**Hyperdrive**](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive?view=azure-ml-py) component, for the hyperparameter tuning by demonstrating key steps:
1. Configure AzureML Workspace
2. Create Remote Compute Target (GPU cluster)
3. Prepare Data
4. Prepare Training Scripts
5. Setup and Run Hyperdrive Experiment
6. Model Import, Re-train and Test
In this notebook, we use [**Wide-and-Deep model**](https://ai.googleblog.com/2016/06/wide-deep-learning-better-together-with.html) from **TensorFlow high-level Estimator API (v1.12)** on the movie recommendation scenario. Wide-and-Deep learning jointly trains wide linear model and deep neural networks (DNN) to combine the benefits of memorization and generalization for recommender systems.
For more details about the **Wide-and-Deep** model:
* [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb)
* [Original paper](https://arxiv.org/abs/1606.07792)
* [TensorFlow API doc](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNLinearCombinedRegressor)
Regarding **AuzreML**, please refer:
* [Quickstart notebook](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
* [Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters)
* [Tensorflow model tuning with Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-train-tensorflow)
---
<sub><span id="azureml-search">a. To use AzureML, you will need an Azure subscription.</span><br>
<span id="azure-subscription">b. When you web-search "Azure Machine Learning", you will most likely to see mixed results of Azure Machine Learning (AzureML) and Azure Machine Learning **Studio**. Please note they are different services where AzureML's focuses are on ML model management, tracking and hyperparameter tuning, while the [ML Studio](https://studio.azureml.net/)'s is to provide a high-level tool for 'easy-to-use' experience of ML designing and experimentation based on GUI.</span></sub>
```
import sys
sys.path.append("../../")
import itertools
import os
import shutil
from tempfile import TemporaryDirectory
import time
from IPython.display import clear_output
import numpy as np
import papermill as pm
import pandas as pd
import sklearn.preprocessing
import tensorflow as tf
import azureml as aml
import azureml.widgets as widgets
import azureml.train.hyperdrive as hd
from reco_utils.dataset.pandas_df_utils import user_item_pairs
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_random_split
import reco_utils.evaluation.python_evaluation
print("Azure ML SDK Version:", aml.core.VERSION)
print("Tensorflow Version:", tf.__version__)
tmp_dir = TemporaryDirectory()
```
### 1. Configure AzureML Workspace
**AzureML workspace** is a foundational block in the cloud that you use to experiment, train, and deploy machine learning models via AzureML service. In this notebook, we 1) create a workspace from [**Azure portal**](https://portal.azure.com) and 2) configure from this notebook.
You can find more details about the setup and configure processes from the following links:
* [Quickstart with Azure portal](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started)
* [Quickstart with Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
#### 1.1 Create a workspace
1. Sign in to the [Azure portal](https://portal.azure.com) by using the credentials for the Azure subscription you use.
2. Select **Create a resource** menu, search for **Machine Learning service workspace** select **Create** button.
3. In the **ML service workspace** pane, configure your workspace with entering the *workspace name* and *resource group* (or **create new** resource group if you don't have one already), and select **Create**. It can take a few moments to create the workspace.
#### 1.2 Configure
To configure this notebook to communicate with the workspace, type in your Azure subscription id, the resource group name and workspace name to `<subscription-id>`, `<resource-group>`, `<workspace-name>` in the above notebook cell. Alternatively, you can create a *.\aml_config\config.json* file with the following contents:
```
{
"subscription_id": "<subscription-id>",
"resource_group": "<resource-group>",
"workspace_name": "<workspace-name>"
}
```
```
# AzureML workspace info. Note, will look up "aml_config\config.json" first, then fall back to use this
SUBSCRIPTION_ID = '<subscription-id>'
RESOURCE_GROUP = '<resource-group>'
WORKSPACE_NAME = '<workspace-name>'
# Remote compute (cluster) configuration. If you want to save the cost more, set these to small.
VM_SIZE = 'STANDARD_NC6'
VM_PRIORITY = 'lowpriority'
# Cluster nodes
MIN_NODES = 4
MAX_NODES = 8
# Hyperdrive experimentation configuration
MAX_TOTAL_RUNS = 100 # Number of runs (training-and-evaluation) to search the best hyperparameters.
MAX_CONCURRENT_RUNS = 8
# Recommend top k items
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
EPOCHS = 50
# Metrics to track
RANKING_METRICS = ['ndcg_at_k', 'precision_at_k']
RATING_METRICS = ['rmse', 'mae']
PRIMARY_METRIC = 'rmse'
# Data column names
USER_COL = 'UserId'
ITEM_COL = 'MovieId'
RATING_COL = 'Rating'
ITEM_FEAT_COL = 'Genres'
```
Now let's see if everything is ready!
```
# Connect to a workspace
try:
ws = aml.core.Workspace.from_config()
except aml.exceptions.UserErrorException:
try:
ws = aml.core.Workspace(
subscription_id=SUBSCRIPTION_ID,
resource_group=RESOURCE_GROUP,
workspace_name=WORKSPACE_NAME
)
ws.write_config()
except aml.exceptions.AuthenticationException:
ws = None
if ws is None:
raise ValueError(
"""Cannot access the AzureML workspace w/ the config info provided.
Please check if you entered the correct id, group name and workspace name"""
)
else:
print("AzureML workspace name: ", ws.name)
clear_output() # Comment out this if you want to see your workspace info.
```
### 2. Create Remote Compute Target
We create a GPU cluster as our **remote compute target**. If a cluster with the same name is already exist in your workspace, the script will load it instead. You can see [this document](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets) to learn more about setting up a compute target on different locations.
This notebook selects **STANDARD_NC6** virtual machine (VM) and sets it's priority as *lowpriority* to save the cost.
Size | vCPU | Memory (GiB) | Temp storage (SSD, GiB) | GPU | GPU memory (GiB) | Max data disks | Max NICs
---|---|---|---|---|---|---|---
Standard_NC6 | <div align="center">6</div> | <div align="center">56</div> | <div align="center">340</div> | <div align="center">1</div> | <div align="center">8</div> | <div align="center">24</div> | <div align="center">1</div>
For more information about Azure virtual machine sizes, see [here](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu).
```
CLUSTER_NAME = 'gpu-cluster-nc6'
try:
compute_target = aml.core.compute.ComputeTarget(workspace=ws, name=CLUSTER_NAME)
print("Found existing compute target")
except aml.core.compute_target.ComputeTargetException:
print("Creating a new compute target...")
compute_config = aml.core.compute.AmlCompute.provisioning_configuration(
vm_size=VM_SIZE,
vm_priority=VM_PRIORITY,
min_nodes=MIN_NODES,
max_nodes=MAX_NODES
)
# create the cluster
compute_target = aml.core.compute.ComputeTarget.create(ws, CLUSTER_NAME, compute_config)
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# Use the 'status' property to get a detailed status for the current cluster.
print(compute_target.status.serialize())
```
### 3. Prepare Data
For demonstration purpose, we use 100k MovieLens dataset. First, download the data and convert the format (multi-hot encode *genres*) to make it work for our model. More details about this step is described in our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb).
```
data = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=[USER_COL, ITEM_COL, RATING_COL],
genres_col='Genres_string'
)
# Encode 'genres' into int array (multi-hot representation) to use as item features
genres_encoder = sklearn.preprocessing.MultiLabelBinarizer()
data[ITEM_FEAT_COL] = genres_encoder.fit_transform(
data['Genres_string'].apply(lambda s: s.split("|"))
).tolist()
data.drop('Genres_string', axis=1, inplace=True)
data.head()
```
The dataset is split into train, validation, and test sets. The train and validation sets will be used for hyperparameter tuning, and the test set will be used for the final evaluation of the model after we import the best model from AzureML workspace.
Here, we don't use multiple-split directly by passing `ratio=[0.56, 0.19, 0.25]`. Instead, we first split the data into train and test sets with the same `seed` we've been using in other notebooks to make the train set identical across them. Then, we further split the train set into train and validation sets.
```
# Use the same seed to make the train and test sets identical across other notebooks in the repo.
train, test = python_random_split(data, ratio=0.75, seed=42)
# Further split the train set into train and validation set.
train, valid = python_random_split(train)
print(len(train), len(valid), len(test))
```
Now, upload the train and validation sets to the AzureML workspace. Our Hyperdrivce experiment will use them.
```
DATA_DIR = os.path.join(tmp_dir.name, 'aml_data')
os.makedirs(DATA_DIR, exist_ok=True)
TRAIN_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_train.pkl"
train.to_pickle(os.path.join(DATA_DIR, TRAIN_FILE_NAME))
VALID_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_valid.pkl"
valid.to_pickle(os.path.join(DATA_DIR, VALID_FILE_NAME))
# Note, all the files under DATA_DIR will be uploaded to the data store
ds = ws.get_default_datastore()
ds.upload(
src_dir=DATA_DIR,
target_path='data',
overwrite=True,
show_progress=True
)
```
### 4. Prepare Training Scripts
Next step is to prepare scripts that AzureML Hyperdrive will use to train and evaluate models with selected hyperparameters. We re-use our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb) for that. To run the model notebook from the Hyperdrive Run, all we need is to prepare an [entry script](../../reco_utils/azureml/wide_deep.py) which parses the hyperparameter arguments, passes them to the notebook, and records the results of the notebook to AzureML Run logs by using `papermill`. Hyperdrive uses the logs to track the performance of each hyperparameter-set and finds the best performed one.
Here is a code snippet from the entry script:
```
...
from azureml.core import Run
run = Run.get_context()
...
NOTEBOOK_NAME = os.path.join(
"notebooks",
"00_quick_start",
"wide_deep_movielens.ipynb"
)
...
parser = argparse.ArgumentParser()
...
parser.add_argument('--dnn-optimizer', type=str, dest='dnn_optimizer', ...
parser.add_argument('--dnn-optimizer-lr', type=float, dest='dnn_optimizer_lr', ...
...
pm.execute_notebook(
NOTEBOOK_NAME,
OUTPUT_NOTEBOOK,
parameters=params,
kernel_name='python3',
)
...
```
```
# Prepare all the necessary scripts which will be loaded to our Hyperdrive Experiment Run
SCRIPT_DIR = os.path.join(tmp_dir.name, 'aml_script')
# Copy scripts to SCRIPT_DIR temporarly
shutil.copytree(os.path.join('..', '..', 'reco_utils'), os.path.join(SCRIPT_DIR, 'reco_utils'))
# We re-use our model notebook for training and testing models.
model_notebook_dir = os.path.join('notebooks', '00_quick_start')
dest_model_notebook_dir = os.path.join(SCRIPT_DIR, model_notebook_dir)
os.makedirs(dest_model_notebook_dir , exist_ok=True)
shutil.copy(
os.path.join('..', '..', model_notebook_dir, 'wide_deep_movielens.ipynb'),
dest_model_notebook_dir
)
# This is our entry script for Hyperdrive Run
ENTRY_SCRIPT_NAME = 'reco_utils/azureml/wide_deep.py'
```
### 5. Setup and Run Hyperdrive Experiment
[Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters) create a machine learning Experiment [Run](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.run?view=azure-ml-py) on the workspace and utilizes child-runs to search the best set of hyperparameters.
#### 5.1 Create Experiment
[Experiment](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment(class)?view=azure-ml-py) is the main entry point into experimenting with AzureML. To create new Experiment or get the existing one, we pass our experimentation name.
```
# Create an experiment to track the runs in the workspace
EXP_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_wide_deep_model"
exp = aml.core.Experiment(workspace=ws, name=EXP_NAME)
```
#### 5.2 Define Search Space
Now we define the search space of hyperparameters. For example, if you want to test different batch sizes of {64, 128, 256}, you can use `azureml.train.hyperdrive.choice(64, 128, 256)`. To search from a continuous space, use `uniform(start, end)`. For more options, see [Hyperdrive parameter expressions](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.parameter_expressions?view=azure-ml-py).
In this notebook, we fix model type as `wide_deep` and the number of epochs to 50.
In the search space, we set different linear and DNN optimizers, structures, learning rates and regularization rates. Details about the hyperparameters can be found from our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb).
Hyperdrive provides three different parameter sampling methods: `RandomParameterSampling`, `GridParameterSampling`, and `BayesianParameterSampling`. Details about each method can be found from [Azure doc](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters). Here, we use the Bayesian sampling.
```
# Fixed parameters
script_params = {
'--datastore': ds.as_mount(),
'--train-datapath': "data/" + TRAIN_FILE_NAME,
'--test-datapath': "data/" + VALID_FILE_NAME,
'--top-k': TOP_K,
'--user-col': USER_COL,
'--item-col': ITEM_COL,
'--item-feat-col': ITEM_FEAT_COL,
'--rating-col': RATING_COL,
'--ranking-metrics': RANKING_METRICS,
'--rating-metrics': RATING_METRICS,
'--epochs': EPOCHS,
'--model-type': 'wide_deep'
}
# Hyperparameter search space
params = {
'--batch-size': hd.choice(64, 128, 256),
# Linear model hyperparameters
'--linear-optimizer': hd.choice('Ftrl'), # 'SGD' and 'Momentum' easily got exploded loss in regression problems.
'--linear-optimizer-lr': hd.uniform(0.0001, 0.1),
'--linear-l1-reg': hd.uniform(0.0, 0.1),
# Deep model hyperparameters
'--dnn-optimizer': hd.choice('Adagrad', 'Adam'),
'--dnn-optimizer-lr': hd.uniform(0.0001, 0.1),
'--dnn-user-embedding-dim': hd.choice(4, 8, 16, 32, 64),
'--dnn-item-embedding-dim': hd.choice(4, 8, 16, 32, 64),
'--dnn-hidden-layer-1': hd.choice(0, 32, 64, 128, 256, 512, 1024), # 0: not using this layer
'--dnn-hidden-layer-2': hd.choice(0, 32, 64, 128, 256, 512, 1024),
'--dnn-hidden-layer-3': hd.choice(0, 32, 64, 128, 256, 512, 1024),
'--dnn-hidden-layer-4': hd.choice(32, 64, 128, 256, 512, 1024),
'--dnn-batch-norm': hd.choice(0, 1),
'--dnn-dropout': hd.choice(0.0, 0.1, 0.2, 0.3, 0.4)
}
```
**AzureML Estimator** is the building block for training. An Estimator encapsulates the training code and parameters, the compute resources and runtime environment for a particular training scenario (Note, this is not TensorFlow's Estimator)
We create one for our experimentation with the dependencies our model requires as follows:
```
conda_packages=['pandas', 'scikit-learn'],
pip_packages=['ipykernel', 'papermill', 'tensorflow-gpu==1.12']
```
To the Hyperdrive Run Config, we set our primary metric name and the goal (our hyperparameter search criteria), hyperparameter sampling method, and number of total child-runs. The bigger the search space, the more number of runs we will need for better results.
```
est = aml.train.estimator.Estimator(
source_directory=SCRIPT_DIR,
entry_script=ENTRY_SCRIPT_NAME,
script_params=script_params,
compute_target=compute_target,
use_gpu=True,
conda_packages=['pandas', 'scikit-learn'],
pip_packages=['ipykernel', 'papermill', 'tensorflow-gpu==1.12']
)
hd_run_config = hd.HyperDriveRunConfig(
estimator=est,
hyperparameter_sampling=hd.BayesianParameterSampling(params),
primary_metric_name=PRIMARY_METRIC,
primary_metric_goal=hd.PrimaryMetricGoal.MINIMIZE,
max_total_runs=MAX_TOTAL_RUNS,
max_concurrent_runs=MAX_CONCURRENT_RUNS
)
```
#### 5.3 Run Experiment
Now we submit the Run to our experiment. You can see the experiment progress from this notebook by using `azureml.widgets.RunDetails(hd_run).show()` or check from the Azure portal with the url link you can get by running `hd_run.get_portal_url()`.
<img src="https://recodatasets.blob.core.windows.net/images/aml_0.png?sanitize=true" width="600"/>
<img src="https://recodatasets.blob.core.windows.net/images/aml_1.png?sanitize=true" width="600"/>
<center><i>AzureML Hyperdrive Widget</i></center>
To load an existing Hyperdrive Run instead of start new one, use `hd_run = hd.HyperDriveRun(exp, <user-run-id>, hyperdrive_run_config=hd_run_config)`. You also can cancel the Run with `hd_run.cancel()`.
```
hd_run = exp.submit(config=hd_run_config)
widgets.RunDetails(hd_run).show()
```
Once all the child-runs are finished, we can get the best run and the metrics.
> Note, if you run Hyperdrive experiment again, you will see the best metrics and corresponding hyperparameters are not the same. It is because of 1) the random initialization of the model and 2) Hyperdrive sampling (when you use RandomSampling). You will get different results as well if you use different training and validation sets.
```
# Get best run and printout metrics
best_run = hd_run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
print("* Best Run Id:", best_run.id)
print("\n* Best hyperparameters:")
print("Model type =", best_run_metrics['MODEL_TYPE'])
print("Batch size =", best_run_metrics['BATCH_SIZE'])
print("Linear optimizer =", best_run_metrics['LINEAR_OPTIMIZER'])
print("\tLearning rate = {0:.4f}".format(best_run_metrics['LINEAR_OPTIMIZER_LR']))
print("\tL1 regularization = {0:.4f}".format(best_run_metrics['LINEAR_L1_REG']))
print("DNN optimizer =", best_run_metrics['DNN_OPTIMIZER'])
print("\tUser embedding dimension =", best_run_metrics['DNN_USER_DIM'])
print("\tItem embedding dimension =", best_run_metrics['DNN_ITEM_DIM'])
hidden_units = []
for i in range(1, 5):
hidden_nodes = best_run_metrics['DNN_HIDDEN_LAYER_{}'.format(i)]
if hidden_nodes > 0:
hidden_units.append(hidden_nodes)
print("\tHidden units =", hidden_units)
print("\tLearning rate = {0:.4f}".format(best_run_metrics['DNN_OPTIMIZER_LR']))
print("\tDropout rate = {0:.4f}".format(best_run_metrics['DNN_DROPOUT']))
print("\tBatch normalization =", best_run_metrics['DNN_BATCH_NORM'])
# Metrics evaluated on validation set
print("\n* Performance metrics:")
print("Top", TOP_K)
for m in RANKING_METRICS:
print("\t{0} = {1:.4f}".format(m, best_run_metrics[m]))
for m in RATING_METRICS:
print("\t{0} = {1:.4f}".format(m, best_run_metrics[m]))
```
### 6. Model Import and Test
[Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb), which we've used in our Hyperdrive Experiment, exports the trained model to the output folder (the output path is recorded at `best_run_metrics['saved_model_dir']`). We can download a model from the best run and test it.
```
MODEL_DIR = os.path.join(tmp_dir.name, 'aml_model')
os.makedirs(MODEL_DIR, exist_ok=True)
model_file_dir = os.path.normpath(best_run_metrics['saved_model_dir'][2:-1]) + '/'
print(model_file_dir)
for f in best_run.get_file_names():
if f.startswith(model_file_dir):
output_file_path = os.path.join(MODEL_DIR, f[len(model_file_dir):])
print("Downloading {}..".format(f))
best_run.download_file(name=f, output_file_path=output_file_path)
saved_model = tf.contrib.estimator.SavedModelEstimator(MODEL_DIR)
cols = {
'col_user': USER_COL,
'col_item': ITEM_COL,
'col_rating': RATING_COL,
'col_prediction': 'prediction'
}
tf.logging.set_verbosity(tf.logging.ERROR)
# Prediction input function for TensorFlow SavedModel
def predict_input_fn(df):
def input_fn():
examples = [None] * len(df)
for index, test_sample in df.iterrows():
example = tf.train.Example()
example.features.feature[USER_COL].int64_list.value.extend([test_sample[USER_COL]])
example.features.feature[ITEM_COL].int64_list.value.extend([test_sample[ITEM_COL]])
example.features.feature[ITEM_FEAT_COL].float_list.value.extend(test_sample[ITEM_FEAT_COL])
examples[index] = example.SerializeToString()
return {'inputs': tf.constant(examples)}
return input_fn
# Rating prediction set
X_test = test.drop(RATING_COL, axis=1)
X_test.reset_index(drop=True, inplace=True)
# Rating prediction
predictions = list(itertools.islice(
saved_model.predict(predict_input_fn(X_test)),
len(X_test)
))
prediction_df = X_test.copy()
prediction_df['prediction'] = [p['outputs'][0] for p in predictions]
print(prediction_df['prediction'].describe(), "\n")
for m in RATING_METRICS:
fn = getattr(reco_utils.evaluation.python_evaluation, m)
result = fn(test, prediction_df, **cols)
print(m, "=", result)
# Unique items
if ITEM_FEAT_COL is None:
items = data.drop_duplicates(ITEM_COL)[[ITEM_COL]].reset_index(drop=True)
else:
items = data.drop_duplicates(ITEM_COL)[[ITEM_COL, ITEM_FEAT_COL]].reset_index(drop=True)
# Unique users
users = data.drop_duplicates(USER_COL)[[USER_COL]].reset_index(drop=True)
# Ranking prediction set
ranking_pool = user_item_pairs(
user_df=users,
item_df=items,
user_col=USER_COL,
item_col=ITEM_COL,
user_item_filter_df=pd.concat([train, valid]), # remove seen items
shuffle=True
)
predictions = []
# To prevent creating a tensor proto whose content is larger than 2GB (which will raise an error),
# divide ranking_pool into 10 chunks, predict each, and concat back.
for pool in np.array_split(ranking_pool, 10):
pool.reset_index(drop=True, inplace=True)
# Rating prediction
pred = list(itertools.islice(
saved_model.predict(predict_input_fn(pool)),
len(pool)
))
predictions.extend([p['outputs'][0] for p in pred])
ranking_pool['prediction'] = predictions
for m in RANKING_METRICS:
fn = getattr(reco_utils.evaluation.python_evaluation, m)
result = fn(test, ranking_pool, **{**cols, 'k': TOP_K})
print(m, "=", result)
```
#### Wide-and-Deep Baseline Comparison
To see if Hyperdrive found good hyperparameters, we simply compare with the model with known hyperparameters from [TensorFlow's wide-deep learning example](https://github.com/tensorflow/models/blob/master/official/wide_deep/movielens_main.py) which uses only the DNN part from the wide-and-deep model for MovieLens data.
> Note, this is not 'apples to apples' comparison. For example, TensorFlow's movielens example uses *rating-timestamp* as a numeric feature, but we did not use that here because we think the timestamps are not relevant to the movies' ratings. This comparison is more like to show how Hyperdrive can help to find comparable hyperparameters without requiring exhaustive efforts in going over a huge search-space.
```
OUTPUT_NOTEBOOK = os.path.join(tmp_dir.name, "output.ipynb")
OUTPUT_MODEL_DIR = os.path.join(tmp_dir.name, "known_hyperparam_model_checkpoints")
params = {
'MOVIELENS_DATA_SIZE': MOVIELENS_DATA_SIZE,
'TOP_K': TOP_K,
'MODEL_TYPE': 'deep',
'EPOCHS': EPOCHS,
'BATCH_SIZE': 256,
'DNN_OPTIMIZER': 'Adam',
'DNN_OPTIMIZER_LR': 0.001,
'DNN_HIDDEN_LAYER_1': 256,
'DNN_HIDDEN_LAYER_2': 256,
'DNN_HIDDEN_LAYER_3': 256,
'DNN_HIDDEN_LAYER_4': 128,
'DNN_USER_DIM': 16,
'DNN_ITEM_DIM': 64,
'DNN_DROPOUT': 0.3,
'DNN_BATCH_NORM': 0,
'MODEL_DIR': OUTPUT_MODEL_DIR,
'EVALUATE_WHILE_TRAINING': False,
'EXPORT_DIR_BASE': OUTPUT_MODEL_DIR,
'RANKING_METRICS': RANKING_METRICS,
'RATING_METRICS': RATING_METRICS,
}
start_time = time.time()
pm.execute_notebook(
"../00_quick_start/wide_deep_movielens.ipynb",
OUTPUT_NOTEBOOK,
parameters=params,
kernel_name='python3'
)
end_time = time.time()
print("Training and evaluation of Wide-and-Deep model took", end_time-start_time, "secs.")
nb = pm.read_notebook(OUTPUT_NOTEBOOK)
for m in RANKING_METRICS:
print(m, "=", nb.data[m])
for m in RATING_METRICS:
print(m, "=", nb.data[m])
```
### Concluding Remark
We showed how to tune hyperparameters by utilizing Azure Machine Learning service. Complex and powerful models like Wide-and-Deep model often have many number of hyperparameters that affect on the recommendation accuracy, and it is not practical to tune the model without using a GPU cluster. For example, a training and evaluation of a model took around 3 minutes on 100k MovieLens data on a single *Standard NC6* VM as we tested from the [above cell](#Wide-and-Deep-Baseline-Comparison). When we used 1M MovieLens, it took about 47 minutes. If we want to investigate through 100 different combinations of hyperparameters **manually**, it will take **78 hours** on the VM and we may still wonder if we had tested good candidates of hyperparameters. With AzureML, as we shown in this notebook, we can easily setup different size of GPU cluster fits to our problem and utilize Bayesian sampling to navigate through the huge search space efficiently, and tweak the experiment with different criteria and algorithms for further research.
#### Cleanup
```
tmp_dir.cleanup()
```
| github_jupyter |
# Show iterative steps of preprocessing
```
import data_utils
import numpy as np
import matplotlib.pyplot as plt
from preprocessing import binarize_per_slice, all_slice_analysis, fill_hole, two_lung_only, process_mask
# Show iterative steps of computing lung mask
first_patient_pixels, spacing, _ = data_utils.load_dicom_slices("../../data/LIDC-IDRI-DCM/LIDC-IDRI-0001/01-01-2000-30178/3000566-03192/")
print(first_patient_pixels.shape)
import matplotlib.pyplot as plt
plt.hist(first_patient_pixels.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
# Show some slice in the middle
h = 80
plt.imshow(first_patient_pixels[h], cmap=plt.cm.gray)
plt.show()
bw = binarize_per_slice(first_patient_pixels, spacing)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
```
Parallélisé mais très long sur Power (de l'ordre de 2 minutes).
```
flag = 0
cut_num = 0
while flag == 0:
bw, flag = all_slice_analysis(bw, spacing, cut_num=cut_num)
cut_num = cut_num + 1
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
```
Pas de multiprocessing.
```
bw = fill_hole(bw)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
```
Pas de multiprocessing.
```
bw1, bw2, bw = two_lung_only(bw, spacing)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
```
Pas de multiprocessing. Plutôt long.
```
plt.imshow(bw1[h], cmap=plt.cm.gray)
plt.show()
plt.imshow(bw2[h], cmap=plt.cm.gray)
plt.show()
dm1 = process_mask(bw1)
dm2 = process_mask(bw2)
plt.imshow(dm1[h]+dm2[h], cmap=plt.cm.gray)
plt.show()
dm = process_mask(bw)
plt.imshow(dm[h], cmap=plt.cm.gray)
plt.show()
x = np.load("/wmlce/data/projects/lung_segmentation/output/preprocessing/2_128_256_256/456.npy")
plt.imshow(x[h], cmap=plt.cm.gray)
plt.show()
x_mask = np.load("/wmlce/data/projects/lung_segmentation/output/preprocessing/2_128_256_256/456_mask.npy")
plt.imshow(x_mask[h], cmap=plt.cm.gray)
plt.show()
```
# Using U-Net Lungs Segmentation
```
import os
import sys
import time
import torch
import mlflow
import mlflow.pytorch
import numpy as np
import SimpleITK as sitk
from pathlib import Path
import matplotlib.pyplot as plt
os.environ['MDT_DATASETS_DIR'] = '/wmlce/data/medical-datasets'
from preprocessing import binarize_per_slice, all_slice_analysis, fill_hole, two_lung_only, process_mask, resample_array, lumTrans
LS_PATH = os.path.join('.', 'lung-segmentation')
sys.path.append(LS_PATH)
import predict
from data import utils as data_utils
start_time = time.time()
pid = 'LIDC-IDRI-0489'
path = f'/wmlce/data/medical-datasets/MDT-LIDC-IDRI/NRRDs/{pid}'
target_spacing = (0.7, 0.7, 1.25)
remote_server_uri = "http://mlflow.10.7.13.202.nip.io/"
mlflow.set_tracking_uri(remote_server_uri)
h = 150
# Load scan
img = sitk.ReadImage(os.path.join(path, '{}_CT.nrrd'.format(pid)))
original_spacing = np.array(img.GetSpacing())
img_arr = sitk.GetArrayFromImage(img)
ls_img_arr = np.copy(img_arr)
load_time = time.time()
print(f'{pid}: loaded in {load_time - start_time} s')
# Resample and Normalize
img_arr = resample_array(img_arr, img.GetSpacing(), target_spacing)
lum_img_arr = np.copy(img_arr)
img_arr = np.clip(img_arr, -1200, 600)
img_arr = img_arr.astype(np.float32)
img_arr = (img_arr - np.mean(img_arr)) / np.std(img_arr).astype(np.float16)
norm_time = time.time()
print(f'{pid}: Resampled in {norm_time - load_time} s')
print(f'{pid}: {img_arr.shape}, {target_spacing}')
plt.imshow(img_arr[h], cmap=plt.cm.gray)
plt.show()
# Compute lungs mask
model_name = "2-lungs-segmentation"
unet = mlflow.pytorch.load_model("models:/{}/production".format(model_name))
print(ls_img_arr.shape, original_spacing)
ls_img_arr, spacing = data_utils.prep_img_arr(ls_img_arr, original_spacing)
print(ls_img_arr.shape, spacing)
mask = predict.predict(ls_img_arr, 1, unet, threshold=True, erosion=True)
print(mask.shape, spacing)
mask, spacing = data_utils.prep_img_arr(mask[0][0], spacing, target_shape=img_arr.shape)
mask = mask[0]
mask[mask>0.5] = 1
mask[mask!=1] = 0
print(mask.shape, target_spacing)
ls_time = time.time()
print(f'{pid}: Lung segmentation took {ls_time - norm_time} s')
plt.imshow(mask[h], cmap=plt.cm.gray)
plt.show()
dilatedMask = process_mask(mask)
Mask = mask
extramask = dilatedMask.astype(np.uint8) - Mask.astype(np.uint8)
bone_thresh = 210
pad_value = 1 #170
img_arr[np.isnan(img_arr)]=-2000
sliceim = lumTrans(lum_img_arr)
#sliceim = sliceim*dilatedMask+pad_value*(1-dilatedMask).astype('uint8')
bones = sliceim*extramask>bone_thresh
#sliceim[bones] = pad_value
img_arr = img_arr*dilatedMask+pad_value*(1-dilatedMask).astype('uint8')
img_arr[bones] = pad_value
bones_mask = np.zeros(sliceim.shape)
bones_mask[bones] = 1
print(f'{pid}: Cleaning took {time.time() - ls_time} s')
print(f'{pid}: Ellapsed {time.time() - start_time} s')
# Plot image
plt.subplot(2, 3, 1).imshow(img_arr[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 2).imshow(Mask[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 3).imshow(dilatedMask[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 4).imshow(img_arr[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 5).imshow(bones_mask[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 6).imshow(extramask[h], cmap=plt.cm.gray)
plt.show()
dir_path = "/wmlce/data/medical-datasets/MDT-PP"
imgs = glob.glob(os.path.join(dir_path, "*_rois.npy"))
print(np.min(img_arr), np.max(img_arr))
plt.imshow(img_arr[h], cmap=plt.cm.gray)
plt.show()
```
## Load some images generated by such preprocessing
```
import os, glob
import numpy as np
import matplotlib.pyplot as plt
dir_path = "/wmlce/data/medical-datasets/MDT-PP"
imgs = glob.glob(os.path.join(dir_path, "*_rois.npy"))
h = 150
n = 10
for ix, img in enumerate(np.random.choice(imgs, n), 1):
img_arr = np.load(img.replace("_rois", "_img")).astype(np.float32)
rois_arr = np.load(img)
print(f"Image {os.path.splitext(os.path.basename(img))[0]} {img_arr.shape}, rois {rois_arr.shape}")
plt.subplot(2, n/2, ix).imshow(img_arr[h], cmap=plt.cm.gray)
plt.show()
dir_path = "/wmlce/data/medical-datasets/MDT-PP"
img = "LIDC-IDRI-0338_img.npy"
img = "LIDC-IDRI-0479_img.npy"
img = "LIDC-IDRI-0489_img.npy"
img = "LIDC-IDRI-0015_img.npy"
img = "LIDC-IDRI-0509_img.npy" # This image seems to have been swapped (axes issues / flipped ?)
img_arr = np.load(os.path.join(dir_path, img))
print(img_arr.shape, img_arr.dtype)
plt.imshow(img_arr[:,250,:], cmap=plt.cm.gray)
plt.show()
```
# Crap image analysis
### Resample to original size and save to nrrd
```
from preprocessing import resample_array_to_shape
itkimg = sitk.ReadImage("/wmlce/data/medical-datasets/LIDC-IDRI/LIDC-IDRI-0015/1.3.6.1.4.1.14519.5.2.1.6279.6001.231462296937187240061810311146/1.3.6.1.4.1.14519.5.2.1.6279.6001.227962600322799211676960828223/LIDC-IDRI-0015_CT.nrrd")
seg_mask, seg_spacing = resample_array_to_shape(img_arr, [0.7,0.7,1.25], target_shape=sitk.GetArrayFromImage(itkimg).shape)
new_itk = sitk.GetImageFromArray(seg_mask)
new_itk.SetOrigin(itkimg.GetOrigin())
new_itk.SetSpacing(itkimg.GetSpacing())
sitk.WriteImage(new_itk, 'test.nrrd')
```
# Get list of all images with high spacing / flipped images
```
import nrrd
import os, glob
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
def load_itk(filename):
with open(filename) as f:
contents = f.readlines()
line = [k for k in contents if k.startswith('TransformMatrix')][0]
transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
transformM = np.round(transformM)
if np.any( transformM!=np.array([1,0,0, 0, 1, 0, 0, 0, 1])):
isflip = True
else:
isflip = False
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing, isflip
list_path = glob.glob("/wmlce/data/medical-datasets/LIDC-IDRI/LIDC-IDRI-*/*/*/*_CT.nrrd")
spaces = set([])
kinds = set([])
dimensions = set([])
high_spacing = set([])
for path in list_path:
h = nrrd.read_header(path)
spaces.add(h['space'])
for k in h['kinds']:
kinds.add(k)
dimensions.add(h['dimension'])
if np.max(h['space directions']) > 2.5:
high_spacing.add(path)
print(spaces)
print(kinds)
print(dimensions)
print(len(high_spacing))
```
# Check scans manually
```
import nrrd
import os, glob
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
from ipywidgets import interact, interact_manual
from IPython.display import display
import matplotlib.pyplot as plt
import ipywidgets as widgets
ix = 0
#list_path = glob.glob("/wmlce/data/medical-datasets/LIDC-IDRI/LIDC-IDRI-*/*/*/*_CT.nrrd")
list_path = glob.glob("/wmlce/data/medical-datasets/MDT-PP/*_img.npy")
import csv
#csv_path = 'scans_checkup.csv'
csv_path = 'pp_scans_checkup.csv'
header = ['pid', 'flip', 'crap', 'poor', 'warn', 'note']
if not os.path.exists(csv_path):
f = open(csv_path, 'w+')
writer = csv.DictWriter(f, fieldnames=header)
writer.writeheader()
f.close()
def f(flip, crap, poor, warn, note):
global ix, list_path, csv_path, header
if ix >= 1 and (flip or crap or poor or warn):
#pid = os.path.splitext(os.path.basename(list_path[ix-1]))[0].split('_CT')[0]
pid = os.path.splitext(os.path.basename(list_path[ix-1]))[0].split('_img')[0]
print(f'Adding abnormal img {pid} to csv')
f = open(csv_path, 'a')
writer = csv.DictWriter(f, fieldnames=header)
writer.writerow({header[0]: pid, header[1]: int(flip), header[2]: int(crap), header[3]: int(poor), header[4]: int(warn), header[5]: note})
f.close()
if ix >= 0 and ix < len(list_path):
print(f'Showing img: {ix}: {list_path[ix]}')
## Show ITK
#itkimage = sitk.ReadImage(list_path[ix])
#numpyImage = sitk.GetArrayFromImage(itkimage)
numpyImage = np.load(list_path[ix])
plt.imshow(numpyImage[int(len(numpyImage)/2)].astype(np.uint8), cmap=plt.cm.gray)
plt.show()
elif ix >= 0:
print('Done')
ix = ix + 1
_ = interact_manual(f, flip=False, crap=False, poor=False, warn=False, note='')
```
# Check ROI labels
```
import os, glob
import numpy as np
list_paths = glob.glob("/wmlce/data/medical-datasets/MDT-PP/*_rois.npy")
for path in list_paths[:10]:
arr = np.load(path)
pid = os.path.splitext(os.path.basename(path))[0].split('_')[0]
print(pid, np.unique(arr))
```
## Upsample ROIs to original scan size for visualization
```
os.environ["MDT_DATASETS_DIR"] = "/wmlce/data/medical-datasets"
from preprocessing import resample_array_to_shape
import numpy as np
import os, glob
itkimg = sitk.ReadImage(glob.glob("/wmlce/data/medical-datasets/LIDC-IDRI/LIDC-IDRI-0806/*/*/*_CT.nrrd")[0])
rois_path = "/wmlce/data/medical-datasets/MDT-PP/LIDC-IDRI-0806_rois.npy"
pid = os.path.splitext(os.path.basename(rois_path))[0].split('_')[0]
rois_arr = np.load(rois_path)
rois_arr[rois_arr != 0] = 1
seg_mask, seg_spacing = resample_array_to_shape(rois_arr, [0.7,0.7,1.25], target_shape=sitk.GetArrayFromImage(itkimg).shape)
seg_mask[seg_mask >= 0.5] = 1
seg_mask[seg_mask < 0.5] = 0
seg_mask = seg_mask.astype(np.uint8)
new_itk = sitk.GetImageFromArray(seg_mask)
new_itk.SetOrigin(itkimg.GetOrigin())
new_itk.SetSpacing(itkimg.GetSpacing())
sitk.WriteImage(new_itk, f'{pid}_rois.nrrd')
```
## GAN generated scans
```
import os, glob
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
from ipywidgets import interact, interact_manual
from IPython.display import display
import matplotlib.pyplot as plt
import ipywidgets as widgets
list_path = glob.glob("/wmlce/data/medical-datasets/MDT-PP/*-AUG_img.npy")
for path in list_path:
pid = os.path.basename(path).replace("_img.npy", "")
n_nods = len(glob.glob(f"/wmlce/data/medical-datasets/MDT-LIDC-IDRI/NRRDs/{pid}/*nod*"))
print(pid, n_nods, np.unique(np.load(path.replace("_img", "_rois"))))
import csv
#csv_path = 'scans_checkup.csv'
csv_path = 'pp_aug_scans_checkup.csv'
header = ['pid', 'flip', 'crap', 'poor', 'warn', 'note']
if not os.path.exists(csv_path):
f = open(csv_path, 'w+')
writer = csv.DictWriter(f, fieldnames=header)
writer.writeheader()
f.close()
def f(flip, crap, poor, warn, note):
global ix, list_path, csv_path, header
if ix >= 1 and (flip or crap or poor or warn):
#pid = os.path.splitext(os.path.basename(list_path[ix-1]))[0].split('_CT')[0]
pid = os.path.splitext(os.path.basename(list_path[ix-1]))[0].split('_img')[0]
print(f'Adding abnormal img {pid} to csv')
f = open(csv_path, 'a')
writer = csv.DictWriter(f, fieldnames=header)
writer.writerow({header[0]: pid, header[1]: int(flip), header[2]: int(crap), header[3]: int(poor), header[4]: int(warn), header[5]: note})
f.close()
if ix >= 0 and ix < len(list_path):
print(f'Showing img: {ix}: {list_path[ix]}')
## Show ITK
#itkimage = sitk.ReadImage(list_path[ix])
#numpyImage = sitk.GetArrayFromImage(itkimage)
numpyImage = np.load(list_path[ix])
plt.imshow(numpyImage[int(len(numpyImage)/2)].astype(np.uint8), cmap=plt.cm.gray)
plt.show()
elif ix >= 0:
print('Done')
ix = ix + 1
_ = interact_manual(f, flip=False, crap=False, poor=False, warn=False, note='')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D3_BiologicalNeuronModels/student/W2D3_Intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Intro
**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
## Overview
Today you will learn about a few interesting properties of biological neurons and synapses. In his intro lecture Upi Bhalla will start with an overview of the complexity of the neurons and synapses in the brain. He will also introduce a mathematical description of action potential generation and propagation by which neurons communicate with each other. Then, in a series of short tutorials Richard Naud will introduce simple neuron and synapse models. These tutorials will give you insights about how neurons may generate irregular spike patterns and synchronize their activity. In the first tutorial you will learn about the input-output transfer function of the leaky integrate and fire neuron model. In the second tutorial you will use this model to understand how statistics of inputs affects transfer of synchrony. In the third tutorial you will explore the short-term dynamics of synapses which means that synaptic weight is dependent on the recent history of spiking activity of the pre-synaptic neurons. In the fourth tutorial, you will learn about spike timing dependent plasticity and explore how synchrony in the input may shape the synaptic weight distribution. Finally, in the outro lecture Yiota Poirazi will explain how the simplified description of neurons can be expanded to include more biological complexity. She will provide evidence of how dendritic morphology may expand the computational repertoire of individual neurons.
The models we use in today’s lecture fall in the category of how models (W1D1). You will use several concepts from linear systems (W2D2). The insights developed in these tutorials will be useful to understand the dynamics of neural networks (W3D4). Moreover, you will learn about the origin of statistics of neuronal activity which will be useful for several tutorials. For example, the understanding of synchrony will be very useful in appreciating the problem of causality (W3D5).
Neuron and synapse models are essential building blocks of mechanistic models of brain function and dysfunction. One of the common questions in neuroscience is to identify the causes of changes in the statistics of spiking activity patterns. Whether these changes are caused by changes in neuron/synapse properties or by a change in the input or by a combination of both? With the contents of this tutorial, you should have a framework to think about which changes in spike patterns are due to neuron/synapse or input changes.
## Video
```
# @markdown
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV18A411v7Yy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"MAOOPv3whZ0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
## Slides
```
# @markdown
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/gyfr2/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/bereml/iap/blob/master/libretas/1f_fashion_fcn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Clasificación de Fashion-MNIST con una red densa
Curso: [Introducción al Aprendizaje Profundo](http://turing.iimas.unam.mx/~ricardoml/course/iap/). Profesores: [Bere](https://turing.iimas.unam.mx/~bereml/) y [Ricardo](https://turing.iimas.unam.mx/~ricardoml/) Montalvo Lezama.
---
---
En esta libreta debes entrenar dos clasificadores para el conjunto Fashion-MNIST.
1. El primero usando la misma arquitectura e hiperparámetros que en el ejemplo de MNIST.
2. En un segundo clasificador modifica la arquitectura intentando obtener un mejor desempeño.
Para resolver este ejercicio emplea la clase [`FashionMNIST`](https://pytorch.org/vision/0.8/datasets.html#fashion-mnist) proporcionada por PyTorch.
[Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) es un conjunto para remplzar MNIST. Fue recolectado con la intención de proveer un conjunto un poco más dificil que MNIST.
<img src="https://miro.medium.com/max/800/1*RNBs0OsymwAzDyYMk3_0Aw.jpeg" width="600"/>
Conjunto Fashion-MNIST. Imagen tomada de https://medium.com/@sankarchanna2k18/fashion-mnist-data-image-classification-in-tensorflow-bd22f9e680bc.
## 1 Preparación
```
# biblioteca para inspeccionar arquitecturas
# https://github.com/tyleryep/torchinfo
!pip install torchinfo
```
### 1.1 Bibliotecas
```
# funciones aleatorias
import random
# tomar n elementos de una secuencia
from itertools import islice as take
# gráficas
import matplotlib.pyplot as plt
# arreglos multidimensionales
import numpy as np
# redes neuronales
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
# procesamiento de imágenes
from skimage import io
# redes neuronales
from torch.utils.data import DataLoader
from torchvision.datasets import FashionMNIST
# inspección de arquitectura
from torchinfo import summary
# barras de progreso
from tqdm import trange
```
### 1.2 Auxiliares
```
# directorio de datos
DATA_DIR = '../data'
# tamaño del lote
BATCH_SIZE = 32
# filas y columnas de la regilla de imágenes
ROWS, COLS = 4, 8
# Fashion-MNIST classes
CLASSES = {
0: "T-shirt/top",
1: "Trouser",
2: "Pullover",
3: "Dress",
4: "Coat",
5: "Sandal",
6: "Shirt",
7: "Sneaker",
8: "Bag",
9: "Ankle boot",
}
def display_grid(xs, titles, rows, cols, figsize=(12, 6)):
"""Displays examples in a grid."""
fig, ax = plt.subplots(rows, cols, figsize=figsize)
i = 0
for r in range(rows):
for c in range(cols):
ax[r, c].imshow(xs[i], cmap='gray')
ax[r, c].set_title(titles[i])
ax[r, c].set_xticklabels([])
ax[r, c].set_yticklabels([])
i += 1
fig.tight_layout()
plt.show()
def display_batch(x, titles, rows, cols, figsize=(12, 6)):
"""Displays a batch of processed examples in a grid."""
# denormalizamos [0, 1] => [0, 255]
x *= 255
# rotamos canales (C x H x W) => (H x W x C)
x = x.permute(0, 2, 3, 1)
# convertimos a entero
x = (x.numpy()).astype(np.uint8)
# aplanamos canal
x = x.reshape(*x.shape[:3])
# desplegamos
display_grid(x, titles, rows, cols, figsize)
def set_seed(seed=0):
"""Initializes pseudo-random number generators."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# reproducibilidad
set_seed()
```
## 2 Datos
### 2.1 Tuberias de datos con PyTorch
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/mnist_pipeline.png"/>
Tuberia de datos para MNIST.
### 2.2 Exploración
### 2.3 Cargadores de datos
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/eval_trn_tst.svg" width="450"/>
Entrenamiento con una partición de entrenamiento y prueba.
#### Entrenamiento
#### Prueba
## 3 Modelo
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/fcn_arch.png"/>
Arquitectura de la red completamente conectada.
### 3.1 Definición de la arquitectura
### 3.2 Instancia de la arquitectura
### 3.3 Inspección de la arquitectura
## 4 Entrenamiento
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/supervisado.svg" width="700"/>
Ciclo de entrenamiento supervisado.
### 4.1 Ciclo de entrenamiento
Entrenamos un modelo:
### 4.2 Gráficas de pérdidas y exactitud
## 5 Evaluación
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/eval_trn_tst.svg" width="450"/>
Entrenamiento con una partición de entrenamiento y prueba.
### 5.1 Evaluación final
### 5.2 Inferencia
| github_jupyter |
```
from collections import defaultdict, OrderedDict
import warnings
import gffutils
import pybedtools
import pandas as pd
import copy
import os
import re
from gffutils.pybedtools_integration import tsses
from copy import deepcopy
from collections import OrderedDict, Callable
import errno
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
gtf = '/panfs/qcb-panasas/skchoudh/genomes/hg38/annotation/Homo_sapiens.GRCh38.96.gtf'
gtf_db = '/panfs/qcb-panasas/skchoudh/genomes/hg38/annotation/Homo_sapiens.GRCh38.96.gtf.db'
prefix = '/panfs/qcb-panasas/skchoudh/github_projects/riboraptor/riboraptor/annotation/hg38/v96/'
chrsizes = '/panfs/qcb-panasas/skchoudh/genomes/hg38/fasta/hg38.chrom.sizes'
mkdir_p(prefix)
def create_gene_dict(db):
'''
Store each feature line db.all_features() as a dict of dicts
'''
gene_dict = DefaultOrderedDict(lambda: DefaultOrderedDict(lambda: DefaultOrderedDict(list)))
for line_no, feature in enumerate(db.all_features()):
gene_ids = feature.attributes['gene_id']
feature_type = feature.featuretype
if feature_type == 'gene':
if len(gene_ids)!=1:
logging.warning('Found multiple gene_ids on line {} in gtf'.format(line_no))
break
else:
gene_id = gene_ids[0]
gene_dict[gene_id]['gene'] = feature
else:
transcript_ids = feature.attributes['transcript_id']
for gene_id in gene_ids:
for transcript_id in transcript_ids:
gene_dict[gene_id][transcript_id][feature_type].append(feature)
return gene_dict
db = gffutils.create_db(gtf, dbfn=gtf_db,
merge_strategy='merge',
force=True,
disable_infer_transcripts=True,
disable_infer_genes=True)
db = gffutils.FeatureDB(gtf_db, keep_order=True)
gene_dict = create_gene_dict(db)
def get_gene_list(gene_dict):
return list(set(gene_dict.keys()))
def get_UTR_regions(utrs, cds):
if len(cds)==0:
return [], []
utr5_regions = []
utr3_regions = []
cds_sorted = sorted(list(cds), key=lambda x: x.start)
first_cds = cds_sorted[0]
last_cds = cds_sorted[-1]
for orig_utr in utrs:
utr = deepcopy(orig_utr)
## Push all cds at once
## Sort later to remove duplicates
strand = utr.strand
if utr.start < first_cds.start:
if utr.stop >= first_cds.start:
utr.stop = first_cds.start - 1
if strand == '+':
utr5_regions.append(utr)
else:
utr3_regions.append(utr)
elif utr.stop > last_cds.stop:
if utr.start <= last_cds.stop:
utr.start = last_cds.stop + 1
if strand == '+':
utr3_regions.append(utr)
else:
utr5_regions.append(utr)
return utr5_regions, utr3_regions
def create_bed(regions, bedtype='0'):
'''Create bed from list of regions
bedtype: 0 or 1
0-Based or 1-based coordinate of the BED
'''
bedstr = ''
for region in regions:
assert len(region.attributes['gene_id']) == 1
## GTF start is 1-based, so shift by one while writing
## to 0-based BED format
if bedtype == '0':
start = region.start - 1
else:
start = region.start
bedstr += '{}\t{}\t{}\t{}\t{}\t{}\n'.format(region.chrom,
start,
region.stop,
re.sub('\.\d+', '', region.attributes['gene_id'][0]),
'.',
region.strand)
return bedstr
def rename_regions(regions, gene_id):
regions = list(regions)
if len(regions) == 0:
return []
for region in regions:
region.attributes['gene_id'] = gene_id
return regions
def merge_regions(db, regions):
if len(regions) == 0:
return []
merged = db.merge(sorted(list(regions), key=lambda x: x.start))
return merged
def merge_regions_nostrand(db, regions):
if len(regions) == 0:
return []
merged = db.merge(sorted(list(regions), key=lambda x: x.start), ignore_strand=True)
return merged
utr5_bed = ''
utr3_bed = ''
gene_bed = ''
exon_bed = ''
intron_bed = ''
start_codon_bed = ''
stop_codon_bed = ''
cds_bed = ''
gene_list = []
for gene_id in get_gene_list(gene_dict):
gene_list.append(gene_dict[gene_id]['gene'])
utr5_regions, utr3_regions = [], []
exon_regions, intron_regions = [], []
star_codon_regions, stop_codon_regions = [], []
cds_regions = []
utr_regions = []
for feature in gene_dict[gene_id].keys():
if feature == 'gene':
continue
cds = list(gene_dict[gene_id][feature]['CDS'])
exons = list(gene_dict[gene_id][feature]['exon'])
utrs = list(gene_dict[gene_id][feature]['UTR'])
cds = sorted(list(cds), key=lambda x: x.start)
exons = sorted(list(exons), key=lambda x: x.start)
utrs = sorted(list(utrs), key=lambda x: x.start)
merged_exons = merge_regions(db, exons)
introns = db.interfeatures(merged_exons)
exon_regions += exons
intron_regions += introns
cds_regions += cds
utr_regions += utrs
cds_regions = sorted(list(cds_regions), key=lambda x: x.start)
utr_regions = sorted(list(utr_regions), key=lambda x: x.start)
utr5_regions, utr3_regions = get_UTR_regions(utr_regions, cds_regions)
merged_utr5 = merge_regions(db, utr5_regions)
renamed_utr5 = rename_regions(merged_utr5, gene_id)
merged_utr3 = merge_regions(db, utr3_regions)
renamed_utr3 = rename_regions(merged_utr3, gene_id)
merged_exons = merge_regions(db, exon_regions)
renamed_exons = rename_regions(merged_exons, gene_id)
merged_introns = merge_regions(db, intron_regions)
renamed_introns = rename_regions(merged_introns, gene_id)
merged_cds = merge_regions(db, cds_regions)
renamed_cds = rename_regions(merged_cds, gene_id)
utr3_bed += create_bed(renamed_utr3)
utr5_bed += create_bed(renamed_utr5)
exon_bed += create_bed(renamed_exons)
intron_bed += create_bed(renamed_introns)
cds_bed += create_bed(renamed_cds)
gene_bed = create_bed(gene_list)
gene_bedtool = pybedtools.BedTool(gene_bed, from_string=True)
utr5_bedtool = pybedtools.BedTool(utr5_bed, from_string=True)
utr3_bedtool = pybedtools.BedTool(utr3_bed, from_string=True)
exon_bedtool = pybedtools.BedTool(exon_bed, from_string=True)
intron_bedtool = pybedtools.BedTool(intron_bed, from_string=True)
cds_bedtool = pybedtools.BedTool(cds_bed, from_string=True)
utr5_cds_subtracted = utr5_bedtool.subtract(cds_bedtool)
utr3_cds_subtracted = utr3_bedtool.subtract(cds_bedtool)
utr5_cds_subtracted.remove_invalid().sort().saveas(os.path.join(prefix, 'utr5.bed.gz'))
utr3_cds_subtracted.remove_invalid().sort().saveas(os.path.join(prefix, 'utr3.bed.gz'))
gene_bedtool.remove_invalid().sort().saveas(os.path.join(prefix, 'gene.bed.gz'))
exon_bedtool.remove_invalid().sort().saveas(os.path.join(prefix, 'exon.bed.gz'))
intron_bedtool.remove_invalid().sort().saveas(os.path.join(prefix, 'intron.bed.gz'))
cds_bedtool.remove_invalid().sort().saveas(os.path.join(prefix, 'cds.bed.gz'))
for gene_id in get_gene_list(gene_dict):
start_codons = []
stop_codons = []
for start_codon in db.children(gene_id, featuretype='start_codon'):
## 1 -based stop
## 0-based start handled while converting to bed
start_codon.stop = start_codon.start
start_codons.append(start_codon)
for stop_codon in db.children(gene_id, featuretype='stop_codon'):
stop_codon.start = stop_codon.stop
stop_codon.stop = stop_codon.stop+1
stop_codons.append(stop_codon)
merged_start_codons = merge_regions(db, start_codons)
renamed_start_codons = rename_regions(merged_start_codons, gene_id)
merged_stop_codons = merge_regions(db, stop_codons)
renamed_stop_codons = rename_regions(merged_stop_codons, gene_id)
start_codon_bed += create_bed(renamed_start_codons)
stop_codon_bed += create_bed(renamed_stop_codons)
start_codon_bedtool = pybedtools.BedTool(start_codon_bed, from_string=True)
stop_codon_bedtool = pybedtools.BedTool(stop_codon_bed, from_string=True)
start_codon_bedtool.remove_invalid().sort().saveas(os.path.join(prefix, 'start_codon.bed.gz'))
stop_codon_bedtool.remove_invalid().sort().saveas(os.path.join(prefix, 'stop_codon.bed.gz'))
stop_codon_bedtool
tss = tsses(db, as_bed6=True, merge_overlapping=True)
tss.remove_invalid().sort().saveas(os.path.join(prefix, 'tss.bed'))
promoter = tss.slop(l=1500, r=1500, s=True, g=chrsizes)
promoter.remove_invalid().sort().saveas(os.path.join(prefix, 'promoter.1500.bed.gz'))
promoter.to_dataframe().head()
```
| github_jupyter |
# Variance Component Analysis
This notebook illustrates variance components analysis for two-level
nested and crossed designs.
```
import numpy as np
import statsmodels.api as sm
from statsmodels.regression.mixed_linear_model import VCSpec
import pandas as pd
```
Make the notebook reproducible
```
np.random.seed(3123)
```
## Nested analysis
In our discussion below, "Group 2" is nested within "Group 1". As a
concrete example, "Group 1" might be school districts, with "Group
2" being individual schools. The function below generates data from
such a population. In a nested analysis, the group 2 labels that
are nested within different group 1 labels are treated as
independent groups, even if they have the same label. For example,
two schools labeled "school 1" that are in two different school
districts are treated as independent schools, even though they have
the same label.
```
def generate_nested(n_group1=200, n_group2=20, n_rep=10, group1_sd=2,
group2_sd=3, unexplained_sd=4):
# Group 1 indicators
group1 = np.kron(np.arange(n_group1), np.ones(n_group2 * n_rep))
# Group 1 effects
u = group1_sd * np.random.normal(size=n_group1)
effects1 = np.kron(u, np.ones(n_group2 * n_rep))
# Group 2 indicators
group2 = np.kron(np.ones(n_group1), np.kron(np.arange(n_group2), np.ones(n_rep)))
# Group 2 effects
u = group2_sd * np.random.normal(size=n_group1*n_group2)
effects2 = np.kron(u, np.ones(n_rep))
e = unexplained_sd * np.random.normal(size=n_group1 * n_group2 * n_rep)
y = effects1 + effects2 + e
df = pd.DataFrame({"y":y, "group1": group1, "group2": group2})
return df
```
Generate a data set to analyze.
```
df = generate_nested()
```
Using all the default arguments for `generate_nested`, the population
values of "group 1 Var" and "group 2 Var" are 2^2=4 and 3^2=9,
respectively. The unexplained variance, listed as "scale" at the
top of the summary table, has population value 4^2=16.
```
model1 = sm.MixedLM.from_formula("y ~ 1", re_formula="1", vc_formula={"group2": "0 + C(group2)"},
groups="group1", data=df)
result1 = model1.fit()
print(result1.summary())
```
If we wish to avoid the formula interface, we can fit the same model
by building the design matrices manually.
```
def f(x):
n = x.shape[0]
g2 = x.group2
u = g2.unique()
u.sort()
uv = {v: k for k, v in enumerate(u)}
mat = np.zeros((n, len(u)))
for i in range(n):
mat[i, uv[g2[i]]] = 1
colnames = ["%d" % z for z in u]
return mat, colnames
```
Then we set up the variance components using the VCSpec class.
```
vcm = df.groupby("group1").apply(f).to_list()
mats = [x[0] for x in vcm]
colnames = [x[1] for x in vcm]
names = ["group2"]
vcs = VCSpec(names, [colnames], [mats])
```
Finally we fit the model. It can be seen that the results of the
two fits are identical.
```
oo = np.ones(df.shape[0])
model2 = sm.MixedLM(df.y, oo, exog_re=oo, groups=df.group1, exog_vc=vcs)
result2 = model2.fit()
print(result2.summary())
```
## Crossed analysis
In a crossed analysis, the levels of one group can occur in any
combination with the levels of the another group. The groups in
Statsmodels MixedLM are always nested, but it is possible to fit a
crossed model by having only one group, and specifying all random
effects as variance components. Many, but not all crossed models
can be fit in this way. The function below generates a crossed data
set with two levels of random structure.
```
def generate_crossed(n_group1=100, n_group2=100, n_rep=4, group1_sd=2,
group2_sd=3, unexplained_sd=4):
# Group 1 indicators
group1 = np.kron(np.arange(n_group1, dtype=np.int),
np.ones(n_group2 * n_rep, dtype=np.int))
group1 = group1[np.random.permutation(len(group1))]
# Group 1 effects
u = group1_sd * np.random.normal(size=n_group1)
effects1 = u[group1]
# Group 2 indicators
group2 = np.kron(np.arange(n_group2, dtype=np.int),
np.ones(n_group2 * n_rep, dtype=np.int))
group2 = group2[np.random.permutation(len(group2))]
# Group 2 effects
u = group2_sd * np.random.normal(size=n_group2)
effects2 = u[group2]
e = unexplained_sd * np.random.normal(size=n_group1 * n_group2 * n_rep)
y = effects1 + effects2 + e
df = pd.DataFrame({"y":y, "group1": group1, "group2": group2})
return df
```
Generate a data set to analyze.
```
df = generate_crossed()
```
Next we fit the model, note that the `groups` vector is constant.
Using the default parameters for `generate_crossed`, the level 1
variance should be 2^2=4, the level 2 variance should be 3^2=9, and
the unexplained variance should be 4^2=16.
```
vc = {"g1": "0 + C(group1)", "g2": "0 + C(group2)"}
oo = np.ones(df.shape[0])
model3 = sm.MixedLM.from_formula("y ~ 1", groups=oo, vc_formula=vc, data=df)
result3 = model3.fit()
print(result3.summary())
```
If we wish to avoid the formula interface, we can fit the same model
by building the design matrices manually.
```
def f(g):
n = len(g)
u = g.unique()
u.sort()
uv = {v: k for k, v in enumerate(u)}
mat = np.zeros((n, len(u)))
for i in range(n):
mat[i, uv[g[i]]] = 1
colnames = ["%d" % z for z in u]
return [mat], [colnames]
vcm = [f(df.group1), f(df.group2)]
mats = [x[0] for x in vcm]
colnames = [x[1] for x in vcm]
names = ["group1", "group2"]
vcs = VCSpec(names, colnames, mats)
```
Here we fit the model without using formulas, it is simple to check
that the results for models 3 and 4 are identical.
```
oo = np.ones(df.shape[0])
model4 = sm.MixedLM(df.y, oo[:, None], exog_re=None, groups=oo, exog_vc=vcs)
result4 = model4.fit()
print(result4.summary())
```
| github_jupyter |
### Import libraries and modify notebook settings
```
# Import libraries
import os
import sys
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import clear_output
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import BatchNormalization
from keras.utils import np_utils
from keras.utils import HDF5Matrix
from keras.callbacks import Callback
from keras.callbacks import ModelCheckpoint
from keras import regularizers
# Modify notebook settings
%matplotlib inline
```
### Create paths to data folders and files
```
# Create a variable for the project root directory
proj_root = os.path.join(os.pardir)
# Save the path to the folder that contains
# the interim data sets for modeling:
# /data/interim
interim_data_dir = os.path.join(proj_root,
"data",
"interim")
# Save path to the folder for the
# spectrogram arrays
spectrogram_arrays_path = os.path.join(interim_data_dir,
"spectrogram_arrays")
# Full path for test_hdf5_path
test_hdf5_path = os.path.join(spectrogram_arrays_path,
"spectrogram_arrays_test.hdf5")
# Full path for train_hdf5_path
train_hdf5_path = os.path.join(spectrogram_arrays_path,
"spectrogram_arrays_train.hdf5")
# Save the path to the models folder
models_dir = os.path.join(proj_root,
"models")
# Full path for my_model.hdf5
model_path = os.path.join(models_dir,
"my_model.hdf5")
# Save the path to the models/log folder
models_log_dir = os.path.join(models_dir,
"log")
# log_figure file_name
fig_file_name = "log_figure"
# Save the path to the log_figure
log_fig_path = os.path.join(models_log_dir,
fig_file_name)
# log_dataframe.csv file_name
log_file_name = "log_dataframe.csv"
# Save the path to the log_figure
log_file_path = os.path.join(models_log_dir,
log_file_name)
# Save the path to the models/checkpoints folder
models_checkpoints_dir = os.path.join(models_dir,
"checkpoints")
# add the 'src' directory as one where we can import modules
src_dir = os.path.join(proj_root, "src")
sys.path.append(src_dir)
```
## Create objects for X_train, y_train, X_test, & y_test
```
X_train = HDF5Matrix(train_hdf5_path,
'spectrogram_arrays_X_train')
y_train = HDF5Matrix(train_hdf5_path,
'spectrogram_arrays_y_train')
X_test = HDF5Matrix(test_hdf5_path,
'spectrogram_arrays_X_test')
y_test = HDF5Matrix(test_hdf5_path,
'spectrogram_arrays_y_test')
# 6. Preprocess class labels
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print(Y_train.shape)
print(Y_test.shape)
```
# ...
```
with h5py.File(train_hdf5_path, "r") as f:
dset = f['spectrogram_arrays_X_train']
min_val = np.min(dset[0,:,:,:])
max_val = np.max(dset[0,:,:,:])
for i in range(dset.len()):
min_val_new = np.min(dset[i,:,:,:])
min_val_old = min_val
min_val = np.minimum(min_val_new, min_val_old)
max_val_new = np.max(dset[i,:,:,:])
max_val_old = max_val
max_val = np.maximum(max_val_new, max_val_old)
min_val
max_val
```
# Model...
```
#log_fig_path = log_fig_path
class User_Defined_Callback(Callback):
def on_train_begin(self, logs={}):
self.i = 1
self.x = []
self.accuracy = []
self.val_accuracy = []
self.losses = []
self.val_losses = []
self.fig = plt.figure()
self.logs = []
self.df_log = pd.DataFrame()
self.df_log.to_csv(log_file_path)
def on_epoch_end(self, epoch, logs={}):
# Update lists
self.logs.append(logs)
self.x.append(self.i)
self.accuracy.append(logs.get('acc'))
self.val_accuracy.append(logs.get('val_acc'))
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.i += 1
# Create log dataframe
self.df_log = pd.DataFrame({'epoch_x': self.x,
'accuracy' : self.accuracy,
'val_accuracy' : self.val_accuracy,
'losses' : self.losses,
'val_losses' : self.val_losses,
'logs' : self.logs})
# Reorder dataframe columns
self.df_log = self.df_log[['epoch_x', 'accuracy', 'val_accuracy', 'losses', 'val_losses', 'logs']]
# Save log dataframe to csv
self.df_log.to_csv(log_file_path)
# Create summary plots of Loss vs Epoch and Accuracy vs Epoch
clear_output(wait=True)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8,4))
ax1.plot(self.x, self.losses, label="loss")
ax1.plot(self.x, self.val_losses, label="val_loss")
ax1.axes.set_xlabel('Epoch')
ax1.axes.set_ylabel('Loss Function')
ax1.axes.set_title('Loss vs Epoch')
ax1.legend()
ax2.plot(self.x, self.accuracy, label="acc")
ax2.plot(self.x, self.val_accuracy, label="val_acc")
ax2.axes.set_xlabel('Epoch')
ax2.axes.set_ylabel('Accuracy')
ax2.axes.set_title('Accuracy vs Epoch')
ax2.legend()
plt.tight_layout()
plt.savefig(log_fig_path, dpi=300)
plt.show()
user_defined_callback = User_Defined_Callback()
# Checkpoint file names
checkpoint_file_name="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
# Save the path to the models/checkpoints folder
models_checkpoints_path = os.path.join(models_checkpoints_dir,
checkpoint_file_name)
checkpoint = ModelCheckpoint(models_checkpoints_path,
monitor='val_acc', verbose=0,
save_best_only=True, mode='max')
# Define the callbacks_list
callbacks_list = [checkpoint, user_defined_callback]
# For reproducibility
np.random.seed(42)
```
# Add BatchNormalization() layers
```
def minmax_norm(x, min_val, max_val):
numerator = np.add(x, (-min_val))
divisor = max_val - min_val
return np.divide(numerator, divisor)
# return (x - min_val) / (max_val - min_val)
from keras.layers import Lambda
# Define model architecture
model = Sequential()
# Input Layer
model.add(Activation(None, input_shape=(96, 173, 1)))
#Min_max_scale
model.add(Lambda(minmax_norm, arguments={"min_val": min_val, "max_val": max_val}))
model.add(BatchNormalization())
# Input Layer
#model.add(Activation(None, input_shape=(96, 173, 1)))
#model.add(BatchNormalization())
# Convolution Layer 1
model.add(Convolution2D(24, (5, 5), activation='relu',
input_shape=(96, 173, 1)))
model.add(MaxPooling2D(pool_size=(4,2)))
model.add(BatchNormalization())
# Convolution Layer 2
model.add(Convolution2D(48, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(4,2)))
model.add(BatchNormalization())
# Convolution Layer 3
model.add(Convolution2D(48, (5, 5),
padding='same',
activation='relu'))
model.add(BatchNormalization())
# Dense Layer
model.add(Flatten())
model.add(Dense(64, activation='relu',
kernel_regularizer=regularizers.l2(0.001),
activity_regularizer=regularizers.l2(0.001)))
# Softmax Layer
#model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax',
kernel_regularizer=regularizers.l2(0.001),
activity_regularizer=regularizers.l2(0.001)))
# Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Fit model on training data
model.fit(X_train, Y_train,
batch_size=100,
epochs=50,
verbose=1,
callbacks=callbacks_list,
validation_data=(X_test, Y_test),
shuffle="batch")
# Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
#...
print('test score:', score[1])
model.save(model_path) # creates a HDF5 file 'my_model.hdf5'
```
from keras.layers import Convolution1D
# Define model architecture
model = Sequential()
# Input Layer
model.add(Activation(None, input_shape=(96, 173, 1)))
#Min_max_scale
model.add(Lambda(minmax_norm, arguments={"min_val": min_val, "max_val": max_val}))
model.add(BatchNormalization())
# Input Layer
#model.add(Activation(None, input_shape=(96, 173, 1)))
#model.add(BatchNormalization())
# Convolution Layer 1
model.add(Convolution1D(24, 173, activation='relu',
input_shape=(96, 173, 1)))
#model.add(MaxPooling2D(pool_size=(4,2)))
model.add(BatchNormalization())
# # Convolution Layer 2
# model.add(Convolution2D(48, (5, 5), activation='relu'))
# model.add(MaxPooling2D(pool_size=(4,2)))
# model.add(BatchNormalization())
# # Convolution Layer 3
# model.add(Convolution2D(48, (5, 5),
# padding='same',
# activation='relu'))
# model.add(BatchNormalization())
# Dense Layer
model.add(Flatten())
model.add(Dense(64, activation='relu',
kernel_regularizer=regularizers.l2(0.001),
activity_regularizer=regularizers.l2(0.001)))
# Softmax Layer
#model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax',
kernel_regularizer=regularizers.l2(0.001),
activity_regularizer=regularizers.l2(0.001)))
# Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Fit model on training data
model.fit(X_train, Y_train,
batch_size=100,
epochs=50,
verbose=1,
callbacks=callbacks_list,
validation_data=(X_test, Y_test),
shuffle="batch")
# Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
#...
print('test score:', score[1])
| github_jupyter |
# ism3d.uvhelper: visibility imaging
## Setup
We first import essential API functions / modules from `ism3d` and other libraries
**Used ISM3D Functions:**
* `im3d.logger.logger_config`
* `im3d.logger.logger_status`
```
nb_dir=_dh[0]
os.chdir(nb_dir+'/../output/mockup')
sys.path.append(nb_dir)
from notebook_setup import *
%matplotlib inline
%config InlineBackend.figure_format = "png" # ‘png’, ‘retina’, ‘jpeg’, ‘svg’, ‘pdf’.
%reload_ext wurlitzer
%reload_ext memory_profiler
%reload_ext line_profiler
ism3d.logger_config(logfile='ism3d.log',loglevel='INFO',logfilelevel='INFO',log2term=False)
print(''+ism3d.__version__)
print('working dir: {}\n'.format(os.getcwd()))
```
## Data Import
We import the visibility data from CASA measurement sets into the internal `uvdata` variable (essential a simple nested dict, not class yet) and also save them into compressed HDF5 for compact storage and easy retrieve.
Here three ALMA/VLA datasets are used:
* mockup1: based on the VLA GN20 observation 1-channel
* mockup2: based on the ALMA G09 observation 1-channel
* mockup3: based on the ALMA G09 observation 240-channel, only 2mins integration
* mockup3: based on the ALMA G09 observation 240-channel, all on-source data
**Used ISM3D Functions:**
* `im3d.uvhelper.io.to_hdf5`
* `ism3d.uvhelper.ms.rmPointing`
* `ism3d.uvhelper.ms.read_ms`
```
os.system('rm -rf '+'mockup1_basis.ms')
mstransform(vis='../data/gn20/vla/AC974.100409.ms',outputvis='mockup1_basis.ms',
spw='0:60',datacolumn='data')
os.system('rm -rf '+'mockup2_basis.ms')
mstransform(vis='../data/g09/alma/bb4.ms',outputvis='mockup2_basis.ms',
spw='*:60',datacolumn='data')
rmPointing('mockup2_basis.ms',verbose=False)
os.system('rm -rf '+'mockup3_basis.ms')
mstransform(vis='../../data/g09/alma/bb4.ms',outputvis='mockup3_basis.ms',
spw='',timerange='06:08:00~06:10:00',datacolumn='data')
rmPointing('mockup3_basis.ms',verbose=False)
os.system('rm -rf '+'mockup4_basis.ms')
os.system('ln -s ../../data/g09/alma/bb4.ms '+'mockup4_basis.ms')
for model_name in ['mockup1','mockup2','mockup3','mockup4']:
# option 1
#uvdata={}
#read_ms(vis=model_name+'_basis.ms',dataset=uvdata,keyrule='basename')
# option 2
uvdata=read_ms(vis=model_name+'_basis.ms')
# save to / reterive from .h5
to_hdf5(uvdata,outname=model_name+'_basis.h5',checkname=False,compression='gzip')
```
## Imaging
We image the visibility using two different approched implemented in `ism3d`:
* `ism3d.uvhelper.invert`: a function wrapping around casa.tclean etc. to create dirty maps in an organized fahsion
* `ism3d.uvhelper.invert_ft`: the same purpose as above, but based on FINUFFT
**Used ISM3D Functions:**
* `ism3d.uvhelper.imager.invert`
* `ism3d.uvhelper.io.from_hdf5`
* `ism3d.uvhelper.invert`
* `ism3d.uvhelper.invert_ft`
* `ism3d.uvhelper.make_psf`
* `ism3d.uvhelper.ft.advise_header`
* `ism3d.xyhelper.cube.hextract`
```
model_name='mockup4'
uvdata=from_hdf5(model_name+'_basis.h5')
header=advise_header(uvdata['uvw'],
uvdata['phasecenter'],
uvdata['chanfreq'],
uvdata['chanwidth'],
antsize=12*u.m,sortbyfreq=True)
cell=header['CDELT2']<<u.deg
imsize=header['NAXIS1']
print(imsize,cell.to(u.arcsec))
tic= time.time()
invert(vis=model_name+'_basis.ms',
imagename=model_name+'_basis.images/casa',
weighting='natural',specmode='cubedata',width='',start='',nchan=-1, # width=-1,start=239,nchan=-1,
cell=cell.to_value(u.arcsec),imsize=[imsize,imsize],onlydm=False,dropstokes=True)
toc= time.time()
print("Elapsed Time: {:>8.2f} seconds # {} \n".format(toc-tic,'ism3d.uvhelper.imager.invert'))
tic= time.time()
%memit cube=invert_ft(uvdata=uvdata,header=header,sortbyfreq=True).astype(np.float32)
%memit psf=(make_psf(uvdata=uvdata,header=header,sortbyfreq=True)).astype(np.float32)
toc= time.time()
print("Elapsed Time: {:>8.2f} seconds # {} \n".format(toc-tic,'ism3d.uvhelper.ft.invert_ft/.make_psf'))
fits.writeto(model_name+'_basis.images/nufft.image.fits',cube.T,header,overwrite=True)
fits.writeto(model_name+'_basis.images/nufft.psf.fits',psf.T,header,overwrite=True)
for version in ['image','psf']:
tcube,thdr=fits.getdata(model_name+'_basis.images/casa.'+version+'.fits',header=True)
cube,hdr=fits.getdata(model_name+'_basis.images/nufft.'+version+'.fits',header=True)
cube_diff=cube-tcube
fits.writeto(model_name+'_basis.images/diff.'+version+'.fits',cube_diff,thdr,overwrite=True)
if model_name=='mockup4' or model_name=='mockup3':
# get ride of the first plan of mockup4 as it's partially flagged with different PSF.
for version in ['nufft.image','nufft.psf','casa.image','casa.psf','casa.pb','diff.image','diff.psf']:
data,header=fits.getdata(model_name+'_basis.images/'+version+'.fits',header=True)
data_sub,header_sub=hextract(data, header, np.s_[1:,:,:])
fits.writeto(model_name+'_basis.images/'+version+'.fits',data_sub,header_sub,overwrite=True)
```
## Visualize
Here we demostrate the visulization capabiliy of ism3d. Specifically, we plot the results from two imaging appraoching and compare their precisions.
**Used ISM3D Functions:**
* `ism3d.visualize.nb.make_gif`
* `ism3d.visualize.nb.show_gif`
* `ism3d.visualize.plts.im_grid`
```
units=[] ; images=[] ; titles=[]; vmaxs=[]; vmins=[]
for version in ['casa.image','casa.psf','casa.pb','nufft.image','nufft.psf',None,'diff.image','diff.psf']:
if version is not None:
data,hdr=fits.getdata(model_name+'_basis.images/'+version+'.fits',header=True)
titles.append(version)
if 'psf' in titles[-1]:
images.append(data); units.append("Jy/beam")
else:
images.append(data*1e3); units.append("mJy/beam")
vmaxs.append(np.nanmax(images[-1]))
vmins.append(np.nanmin(images[-1]))
else:
titles.append(None); images.append(None); units.append(None); vmaxs.append(None); vmins.append(None)
w = WCS(hdr).celestial
coord = SkyCoord(hdr['CRVAL1'], hdr['CRVAL2'], unit="deg")
offset_w=linear_offset_coords(w,coord)
nchan=hdr['NAXIS3']
stepchan= int(np.maximum(np.floor(int(nchan/5)),1))
fignames=[]
for ichan in range(0,nchan,stepchan):
#clear_output(wait=True)
figname=model_name+'_basis.images/chmap/ch{:03d}'.format(ichan)+'.pdf'
images0=[None if image is None else image[ichan,:,:] for image in images]
titles0=[None if title is None else title+'['+'{}'.format(ichan)+']' for title in titles ]
im_grid(images0,offset_w,units=units,titles=titles0,nxy=(3,3),figsize=(9,9),figname=figname,vmins=vmins,vmaxs=vmaxs) ;
fignames.append(figname)
make_gif(fignames,model_name+'_basis.images/chmap.gif')
show_gif(model_name+'_basis.images/chmap.gif')
```
| github_jupyter |
# Relevancy Analysis
<div class="alert alert-info">
This tutorial is available as an IPython notebook at [Malaya/example/relevancy](https://github.com/huseinzol05/Malaya/tree/master/example/relevancy).
</div>
<div class="alert alert-warning">
This module only trained on standard language structure, so it is not save to use it for local language structure.
</div>
```
%%time
import malaya
```
### Models accuracy
We use `sklearn.metrics.classification_report` for accuracy reporting, check at https://malaya.readthedocs.io/en/latest/models-accuracy.html#relevancy-analysis
### labels supported
Default labels for relevancy module.
```
malaya.relevancy.label
```
### Explanation
Positive relevancy: The article or piece of text is relevant, tendency is high to become not a fake news. Can be a positive or negative sentiment.
Negative relevancy: The article or piece of text is not relevant, tendency is high to become a fake news. Can be a positive or negative sentiment.
**Right now relevancy module only support deep learning model**.
```
negative_text = 'Roti Massimo Mengandungi DNA Babi. Roti produk Massimo keluaran Syarikat The Italian Baker mengandungi DNA babi. Para pengguna dinasihatkan supaya tidak memakan produk massimo. Terdapat pelbagai produk roti keluaran syarikat lain yang boleh dimakan dan halal. Mari kita sebarkan berita ini supaya semua rakyat Malaysia sedar dengan apa yang mereka makna setiap hari. Roti tidak halal ada DNA babi jangan makan ok.'
positive_text = 'Jabatan Kemajuan Islam Malaysia memperjelaskan dakwaan sebuah mesej yang dikitar semula, yang mendakwa kononnya kod E dikaitkan dengan kandungan lemak babi sepertimana yang tular di media sosial. . Tular: November 2017 . Tular: Mei 2014 JAKIM ingin memaklumkan kepada masyarakat berhubung maklumat yang telah disebarkan secara meluas khasnya melalui media sosial berhubung kod E yang dikaitkan mempunyai lemak babi. Untuk makluman, KOD E ialah kod untuk bahan tambah (aditif) dan ianya selalu digunakan pada label makanan di negara Kesatuan Eropah. Menurut JAKIM, tidak semua nombor E yang digunakan untuk membuat sesuatu produk makanan berasaskan dari sumber yang haram. Sehubungan itu, sekiranya sesuatu produk merupakan produk tempatan dan mendapat sijil Pengesahan Halal Malaysia, maka ia boleh digunakan tanpa was-was sekalipun mempunyai kod E-kod. Tetapi sekiranya produk tersebut bukan produk tempatan serta tidak mendapat sijil pengesahan halal Malaysia walaupun menggunakan e-kod yang sama, pengguna dinasihatkan agar berhati-hati dalam memilih produk tersebut.'
```
### List available Transformer models
```
malaya.relevancy.available_transformer()
```
### Load Transformer model
```python
def transformer(model: str = 'xlnet', quantized: bool = False, **kwargs):
"""
Load Transformer relevancy model.
Parameters
----------
model : str, optional (default='bert')
Model architecture supported. Allowed values:
* ``'bert'`` - Google BERT BASE parameters.
* ``'tiny-bert'`` - Google BERT TINY parameters.
* ``'albert'`` - Google ALBERT BASE parameters.
* ``'tiny-albert'`` - Google ALBERT TINY parameters.
* ``'xlnet'`` - Google XLNET BASE parameters.
* ``'alxlnet'`` - Malaya ALXLNET BASE parameters.
* ``'bigbird'`` - Google BigBird BASE parameters.
* ``'tiny-bigbird'`` - Malaya BigBird BASE parameters.
* ``'fastformer'`` - FastFormer BASE parameters.
* ``'tiny-fastformer'`` - FastFormer TINY parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: model
List of model classes:
* if `bert` in model, will return `malaya.model.bert.MulticlassBERT`.
* if `xlnet` in model, will return `malaya.model.xlnet.MulticlassXLNET`.
* if `bigbird` in model, will return `malaya.model.xlnet.MulticlassBigBird`.
* if `fastformer` in model, will return `malaya.model.fastformer.MulticlassFastFormer`.
"""
```
```
model = malaya.relevancy.transformer(model = 'tiny-bigbird')
```
### Load Quantized model
To load 8-bit quantized model, simply pass `quantized = True`, default is `False`.
We can expect slightly accuracy drop from quantized model, and not necessary faster than normal 32-bit float model, totally depends on machine.
```
quantized_model = malaya.relevancy.transformer(model = 'alxlnet', quantized = True)
```
#### Predict batch of strings
```python
def predict(self, strings: List[str]):
"""
classify list of strings.
Parameters
----------
strings: List[str]
Returns
-------
result: List[str]
"""
```
```
%%time
model.predict([negative_text, positive_text])
%%time
quantized_model.predict([negative_text, positive_text])
```
#### Predict batch of strings with probability
```python
def predict_proba(self, strings: List[str]):
"""
classify list of strings and return probability.
Parameters
----------
strings : List[str]
Returns
-------
result: List[dict[str, float]]
"""
```
```
%%time
model.predict_proba([negative_text, positive_text])
%%time
quantized_model.predict_proba([negative_text, positive_text])
```
#### Open relevancy visualization dashboard
Default when you call `predict_words` it will open a browser with visualization dashboard, you can disable by `visualization=False`.
```python
def predict_words(
self,
string: str,
method: str = 'last',
bins_size: float = 0.05,
visualization: bool = True,
):
"""
classify words.
Parameters
----------
string : str
method : str, optional (default='last')
Attention layer supported. Allowed values:
* ``'last'`` - attention from last layer.
* ``'first'`` - attention from first layer.
* ``'mean'`` - average attentions from all layers.
bins_size: float, optional (default=0.05)
default bins size for word distribution histogram.
visualization: bool, optional (default=True)
If True, it will open the visualization dashboard.
Returns
-------
dictionary: results
"""
```
**This method not available for BigBird models**.
```
quantized_model.predict_words(negative_text)
```
### Vectorize
Let say you want to visualize sentence / word level in lower dimension, you can use `model.vectorize`,
```python
def vectorize(self, strings: List[str], method: str = 'first'):
"""
vectorize list of strings.
Parameters
----------
strings: List[str]
method : str, optional (default='first')
Vectorization layer supported. Allowed values:
* ``'last'`` - vector from last sequence.
* ``'first'`` - vector from first sequence.
* ``'mean'`` - average vectors from all sequences.
* ``'word'`` - average vectors based on tokens.
Returns
-------
result: np.array
"""
```
#### Sentence level
```
texts = [negative_text, positive_text]
r = model.vectorize(texts, method = 'first')
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE().fit_transform(r)
tsne.shape
plt.figure(figsize = (7, 7))
plt.scatter(tsne[:, 0], tsne[:, 1])
labels = texts
for label, x, y in zip(
labels, tsne[:, 0], tsne[:, 1]
):
label = (
'%s, %.3f' % (label[0], label[1])
if isinstance(label, list)
else label
)
plt.annotate(
label,
xy = (x, y),
xytext = (0, 0),
textcoords = 'offset points',
)
```
#### Word level
```
r = quantized_model.vectorize(texts, method = 'word')
x, y = [], []
for row in r:
x.extend([i[0] for i in row])
y.extend([i[1] for i in row])
tsne = TSNE().fit_transform(y)
tsne.shape
plt.figure(figsize = (7, 7))
plt.scatter(tsne[:, 0], tsne[:, 1])
labels = x
for label, x, y in zip(
labels, tsne[:, 0], tsne[:, 1]
):
label = (
'%s, %.3f' % (label[0], label[1])
if isinstance(label, list)
else label
)
plt.annotate(
label,
xy = (x, y),
xytext = (0, 0),
textcoords = 'offset points',
)
```
Pretty good, the model able to know cluster bottom left as positive relevancy.
### Stacking models
More information, you can read at [https://malaya.readthedocs.io/en/latest/Stack.html](https://malaya.readthedocs.io/en/latest/Stack.html)
```
albert = malaya.relevancy.transformer(model = 'albert')
malaya.stack.predict_stack([albert, model], [positive_text, negative_text])
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import datetime
import seaborn as sns
import matplotlib.pyplot as plt
from gs_quant.markets.portfolio import Portfolio
from gs_quant.risk import MarketDataShockBasedScenario, MarketDataPattern, MarketDataShock, MarketDataShockType, PnlExplain
from gs_quant.markets import PricingContext
plt.rcParams['figure.figsize'] = (16, 8)
from gs_quant.session import GsSession
# external users should substitute their client id and secret; please skip this step if using internal jupyterhub
GsSession.use(client_id=None, client_secret=None, scopes=('run_analytics', 'read_product_data'))
```
In this notebook, we'll load a portfolio and run analytics to understand it's risk and pnl. We'll also run a number of vol and spot scenarios which we'll use to compute VaR.
The content of this notebook is split into the following parts:
* [1: First, the portfolio](#1:-First,-the-portfolio)
* [2: Run risk and PnL explain](#2:-Run-risk-and-PnL-explain)
* [3: Scenarios grid and VaR](#3:-Scenarios-grid-and-VaR)
### 1: First, the portfolio
Let’s first load a portfolio from csv, mapping each column and look at it as a dataframe. Internal users can load this books directly using `from book`.
```
mappers = {
'pair': lambda row: row['foreign ccy'] + 'USD',
'notional_amount': 'notional',
'expiration_date': 'expiry',
'option_type': lambda row: 'Call' if row['C/P'] == 'C' else 'Put',
'strike_price': 'strike',
'premium': lambda row: 0
}
portfolio = Portfolio.from_csv(r'FXBook.csv', mappings=mappers)
portfolio.resolve()
frame = portfolio.to_frame()
frame.index = frame.index.droplevel(0)
frame.head(1).transpose()
# for internal users:
# portfolio = Portfolio.from_book('my_book_id')
```
### 2: Run risk and PnL explain
With the portfolio in hand, let’s use gs-quant to understand risk and pnl over the last business day.
```
from gs_quant.datetime.date import business_day_offset
from gs_quant.markets import CloseMarket, PricingContext, close_market_date
from gs_quant.risk import Price, DollarPrice, PnlExplain, Theta, FXDelta, FXGamma, FXVega
to_date = business_day_offset(close_market_date(), -1)
# Previous business day
from_date = business_day_offset(to_date, -1)
# A risk measure for calculating PnlExplain from that date
explain = PnlExplain(CloseMarket(date=to_date))
# Calculate PnlExplain and dollar price from 1 day ago
with PricingContext(pricing_date=from_date):
result = portfolio.calc((FXDelta, FXGamma, FXVega, DollarPrice, Theta, explain))
# Calculate dollar price with today's market
with PricingContext(pricing_date=to_date):
target_price = portfolio.calc((DollarPrice))
with PricingContext(pricing_date=from_date, market=CloseMarket(date=to_date)):
target_price_ex_theta = portfolio.calc((DollarPrice))
print('Risk and 1day Pnl as of '+str(from_date))
print(f'Book PV (in mUSD): {target_price[DollarPrice].aggregate()/1e6:.1f}')
print(f'Book Delta (in mUSD): {result[FXDelta].aggregate().value.sum()/1e6:.0f}')
print(f'Book Vega (in kUSD): {result[FXVega].aggregate().value.sum():.0f}')
print(f'Dollar price day on day change (in kUSD): {target_price[DollarPrice].aggregate()/1e3 - result[DollarPrice].aggregate()/1e3:.0f}')
print(f'Dollar price day on day change(ex theta) (in kUSD): {target_price_ex_theta[DollarPrice].aggregate()/1e3 - result[DollarPrice].aggregate()/1e3:.0f}')
print(f'Pnl explain total (in kUSD): {result[explain].aggregate().value.sum()/1e3:.0f}')
print(f'Theta total (in kUSD): {result[Theta].aggregate().value.sum()/1e3:.0f}')
print(f'Theta + Pnl explain total (in kUSD): {result[Theta].aggregate().value.sum()/1e3 + result[explain].aggregate().value.sum()/1e3:.0f}')
# Show the PnlExplain breakdown
explain_all = result[explain].aggregate()
explain_all[explain_all.value.abs() > 1.0].round(0)
```
### 3: Scenarios grid and VaR
We can also better understand risk by applying a number of market shocks – in this case we’ll look at a grid of vol and spot shocks, we are can also use to calculate VaR by looking at the 95% percentile price change.
```
shocks_fx = [-10, -7.5, -5, -2,-1, 0, 1, 2, 5, 7.5, 10]
shocks_vols = [-5, -2, -1, -0.5, 0, 0.5, 1, 2, 5, 7.5, 10]
shocked_prices = pd.DataFrame(index=shocks_vols, columns=shocks_fx)
cross = explain_all[explain_all['mkt_type'] == 'FX']['mkt_asset'][0]
with PricingContext(is_async=True):
for fx in shocks_fx:
for vol in shocks_vols:
with MarketDataShockBasedScenario({
MarketDataPattern('FX', cross): MarketDataShock(MarketDataShockType.Proportional, fx / 1e2),
MarketDataPattern('FX Vol', cross, 'ATM Vol'): MarketDataShock(MarketDataShockType.Absolute, vol / 1e2),
}):
shocked_prices[fx][vol] = portfolio.calc((DollarPrice))
# Aggregate and compute pnl by substracting 0 shock PV
shocked_prices_res = pd.DataFrame(index=shocks_vols, columns=shocks_fx, dtype='float')
for fx in shocks_fx:
for vol in shocks_vols:
shocked_prices_res[fx][vol] = shocked_prices[fx][vol].aggregate()
shocked_prices_res -= shocked_prices[0][0].aggregate()
shocked_prices_res /= 1e3
ax = sns.heatmap(shocked_prices_res, cmap='coolwarm', annot=True, fmt='.1f')
ax.set(ylabel='absolute vol point moves', xlabel='% spot change', title='PV changes ($k)')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
```
Compute VaR
```
p = np.percentile(shocked_prices_res, 5) # return 95th percentile
print('Portfolio base price ($m): {:,.1f}'.format(portfolio.price().aggregate()/1e6))
print('Scenario Based VaR with spot/vol grid ($m): {:,.1f}'.format(p/1e3))
```
### Disclaimers
Scenarios/predictions: Simulated results are for illustrative purposes only. GS provides no assurance or guarantee that the strategy will operate or would have operated in the past in a manner consistent with the above analysis. Past performance figures are not a reliable indicator of future results.
Indicative Terms/Pricing Levels: This material may contain indicative terms only, including but not limited to pricing levels. There is no representation that any transaction can or could have been effected at such terms or prices. Proposed terms and conditions are for discussion purposes only. Finalized terms and conditions are subject to further discussion and negotiation.
www.goldmansachs.com/disclaimer/sales-and-trading-invest-rec-disclosures.html If you are not accessing this material via Marquee ContentStream, a list of the author's investment recommendations disseminated during the preceding 12 months and the proportion of the author's recommendations that are 'buy', 'hold', 'sell' or other over the previous 12 months is available by logging into Marquee ContentStream using the link below. Alternatively, if you do not have access to Marquee ContentStream, please contact your usual GS representative who will be able to provide this information to you.
Backtesting, Simulated Results, Sensitivity/Scenario Analysis or Spreadsheet Calculator or Model: There may be data presented herein that is solely for illustrative purposes and which may include among other things back testing, simulated results and scenario analyses. The information is based upon certain factors, assumptions and historical information that Goldman Sachs may in its discretion have considered appropriate, however, Goldman Sachs provides no assurance or guarantee that this product will operate or would have operated in the past in a manner consistent with these assumptions. In the event any of the assumptions used do not prove to be true, results are likely to vary materially from the examples shown herein. Additionally, the results may not reflect material economic and market factors, such as liquidity, transaction costs and other expenses which could reduce potential return.
OTC Derivatives Risk Disclosures:
Terms of the Transaction: To understand clearly the terms and conditions of any OTC derivative transaction you may enter into, you should carefully review the Master Agreement, including any related schedules, credit support documents, addenda and exhibits. You should not enter into OTC derivative transactions unless you understand the terms of the transaction you are entering into as well as the nature and extent of your risk exposure. You should also be satisfied that the OTC derivative transaction is appropriate for you in light of your circumstances and financial condition. You may be requested to post margin or collateral to support written OTC derivatives at levels consistent with the internal policies of Goldman Sachs.
Liquidity Risk: There is no public market for OTC derivative transactions and, therefore, it may be difficult or impossible to liquidate an existing position on favorable terms. Transfer Restrictions: OTC derivative transactions entered into with one or more affiliates of The Goldman Sachs Group, Inc. (Goldman Sachs) cannot be assigned or otherwise transferred without its prior written consent and, therefore, it may be impossible for you to transfer any OTC derivative transaction to a third party.
Conflict of Interests: Goldman Sachs may from time to time be an active participant on both sides of the market for the underlying securities, commodities, futures, options or any other derivative or instrument identical or related to those mentioned herein (together, "the Product"). Goldman Sachs at any time may have long or short positions in, or buy and sell Products (on a principal basis or otherwise) identical or related to those mentioned herein. Goldman Sachs hedging and trading activities may affect the value of the Products.
Counterparty Credit Risk: Because Goldman Sachs, may be obligated to make substantial payments to you as a condition of an OTC derivative transaction, you must evaluate the credit risk of doing business with Goldman Sachs or its affiliates.
Pricing and Valuation: The price of each OTC derivative transaction is individually negotiated between Goldman Sachs and each counterparty and Goldman Sachs does not represent or warrant that the prices for which it offers OTC derivative transactions are the best prices available, possibly making it difficult for you to establish what is a fair price for a particular OTC derivative transaction; The value or quoted price of the Product at any time, however, will reflect many factors and cannot be predicted. If Goldman Sachs makes a market in the offered Product, the price quoted by Goldman Sachs would reflect any changes in market conditions and other relevant factors, and the quoted price (and the value of the Product that Goldman Sachs will use for account statements or otherwise) could be higher or lower than the original price, and may be higher or lower than the value of the Product as determined by reference to pricing models used by Goldman Sachs. If at any time a third party dealer quotes a price to purchase the Product or otherwise values the Product, that price may be significantly different (higher or lower) than any price quoted by Goldman Sachs. Furthermore, if you sell the Product, you will likely be charged a commission for secondary market transactions, or the price will likely reflect a dealer discount. Goldman Sachs may conduct market making activities in the Product. To the extent Goldman Sachs makes a market, any price quoted for the OTC derivative transactions, Goldman Sachs may differ significantly from (i) their value determined by reference to Goldman Sachs pricing models and (ii) any price quoted by a third party. The market price of the OTC derivative transaction may be influenced by many unpredictable factors, including economic conditions, the creditworthiness of Goldman Sachs, the value of any underlyers, and certain actions taken by Goldman Sachs.
Market Making, Investing and Lending: Goldman Sachs engages in market making, investing and lending businesses for its own account and the accounts of its affiliates in the same or similar instruments underlying OTC derivative transactions (including such trading as Goldman Sachs deems appropriate in its sole discretion to hedge its market risk in any OTC derivative transaction whether between Goldman Sachs and you or with third parties) and such trading may affect the value of an OTC derivative transaction.
Early Termination Payments: The provisions of an OTC Derivative Transaction may allow for early termination and, in such cases, either you or Goldman Sachs may be required to make a potentially significant termination payment depending upon whether the OTC Derivative Transaction is in-the-money to Goldman Sachs or you at the time of termination. Indexes: Goldman Sachs does not warrant, and takes no responsibility for, the structure, method of computation or publication of any currency exchange rates, interest rates, indexes of such rates, or credit, equity or other indexes, unless Goldman Sachs specifically advises you otherwise.
Risk Disclosure Regarding futures, options, equity swaps, and other derivatives as well as non-investment-grade securities and ADRs: Please ensure that you have read and understood the current options, futures and security futures disclosure document before entering into any such transactions. Current United States listed options, futures and security futures disclosure documents are available from our sales representatives or at http://www.theocc.com/components/docs/riskstoc.pdf, http://www.goldmansachs.com/disclosures/risk-disclosure-for-futures.pdf and https://www.nfa.futures.org/investors/investor-resources/files/security-futures-disclosure.pdf, respectively. Certain transactions - including those involving futures, options, equity swaps, and other derivatives as well as non-investment-grade securities - give rise to substantial risk and are not available to nor suitable for all investors. If you have any questions about whether you are eligible to enter into these transactions with Goldman Sachs, please contact your sales representative. Foreign-currency-denominated securities are subject to fluctuations in exchange rates that could have an adverse effect on the value or price of, or income derived from, the investment. In addition, investors in securities such as ADRs, the values of which are influenced by foreign currencies, effectively assume currency risk.
Options Risk Disclosures: Options may trade at a value other than that which may be inferred from the current levels of interest rates, dividends (if applicable) and the underlier due to other factors including, but not limited to, expectations of future levels of interest rates, future levels of dividends and the volatility of the underlier at any time prior to maturity. Note: Options involve risk and are not suitable for all investors. Please ensure that you have read and understood the current options disclosure document before entering into any standardized options transactions. United States listed options disclosure documents are available from our sales representatives or at http://theocc.com/publications/risks/riskstoc.pdf. A secondary market may not be available for all options. Transaction costs may be a significant factor in option strategies calling for multiple purchases and sales of options, such as spreads. When purchasing long options an investor may lose their entire investment and when selling uncovered options the risk is potentially unlimited. Supporting documentation for any comparisons, recommendations, statistics, technical data, or other similar information will be supplied upon request.
This material is for the private information of the recipient only. This material is not sponsored, endorsed, sold or promoted by any sponsor or provider of an index referred herein (each, an "Index Provider"). GS does not have any affiliation with or control over the Index Providers or any control over the computation, composition or dissemination of the indices. While GS will obtain information from publicly available sources it believes reliable, it will not independently verify this information. Accordingly, GS shall have no liability, contingent or otherwise, to the user or to third parties, for the quality, accuracy, timeliness, continued availability or completeness of the data nor for any special, indirect, incidental or consequential damages which may be incurred or experienced because of the use of the data made available herein, even if GS has been advised of the possibility of such damages.
Standard & Poor's ® and S&P ® are registered trademarks of The McGraw-Hill Companies, Inc. and S&P GSCI™ is a trademark of The McGraw-Hill Companies, Inc. and have been licensed for use by the Issuer. This Product (the "Product") is not sponsored, endorsed, sold or promoted by S&P and S&P makes no representation, warranty or condition regarding the advisability of investing in the Product.
Notice to Brazilian Investors
Marquee is not meant for the general public in Brazil. The services or products provided by or through Marquee, at any time, may not be offered or sold to the general public in Brazil. You have received a password granting access to Marquee exclusively due to your existing relationship with a GS business located in Brazil. The selection and engagement with any of the offered services or products through Marquee, at any time, will be carried out directly by you. Before acting to implement any chosen service or products, provided by or through Marquee you should consider, at your sole discretion, whether it is suitable for your particular circumstances and, if necessary, seek professional advice. Any steps necessary in order to implement the chosen service or product, including but not limited to remittance of funds, shall be carried out at your discretion. Accordingly, such services and products have not been and will not be publicly issued, placed, distributed, offered or negotiated in the Brazilian capital markets and, as a result, they have not been and will not be registered with the Brazilian Securities and Exchange Commission (Comissão de Valores Mobiliários), nor have they been submitted to the foregoing agency for approval. Documents relating to such services or products, as well as the information contained therein, may not be supplied to the general public in Brazil, as the offering of such services or products is not a public offering in Brazil, nor used in connection with any offer for subscription or sale of securities to the general public in Brazil.
The offer of any securities mentioned in this message may not be made to the general public in Brazil. Accordingly, any such securities have not been nor will they be registered with the Brazilian Securities and Exchange Commission (Comissão de Valores Mobiliários) nor has any offer been submitted to the foregoing agency for approval. Documents relating to the offer, as well as the information contained therein, may not be supplied to the public in Brazil, as the offer is not a public offering of securities in Brazil. These terms will apply on every access to Marquee.
Ouvidoria Goldman Sachs Brasil: 0800 727 5764 e/ou ouvidoriagoldmansachs@gs.com
Horário de funcionamento: segunda-feira à sexta-feira (exceto feriados), das 9hs às 18hs.
Ombudsman Goldman Sachs Brazil: 0800 727 5764 and / or ouvidoriagoldmansachs@gs.com
Available Weekdays (except holidays), from 9 am to 6 pm.
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/MyDrive/AGGLIO/github_upload
import numpy as np
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from agglio_lib import *
n = 1000
w_radius = 10
dim_list=[10, 20, 30, 40, 50]
val_ind=np.random.randint(n, size=int(0.20*n))
#AGGLIO-GD
l2_agd=[]
l2_agsgd=[]
for d in dim_list:
wAst = np.random.randn(d,1)
X = getData(0, 1, n, d)/np.sqrt(d)
w0 =w_radius*np.random.randn(d,1)/np.sqrt(d)
y = sigmoid(np.matmul(X, wAst))
#AGGLIO-GD
alpha_range = np.linspace(start=1, stop=d*5, num=10).tolist()
B_init_range= np.power(10.0, [-1, -2, -3, -4]).tolist()
B_step_range =np.linspace(start=1.01, stop=2, num=5).tolist()
parameters = dict(alpha = alpha_range, B_init=B_init_range, B_step=B_step_range )
cv = ShuffleSplit( n_splits = 1, test_size = 0.3, random_state = 42 )
grid = GridSearchCV( AG_GD( ), param_grid=parameters, refit = False, cv=cv) # verbose=3
grid.fit( X, y.ravel(), w_init=w0.ravel(), w_star=wAst.ravel())
best = grid.best_params_
print("The best parameters for AGGILIO_GD are %s with a score of %0.2f" % (grid.best_params_, grid.best_score_))
agd = AG_GD(alpha= best["alpha"], B_init=best['B_init'], B_step=best['B_step'] )
agd.fit( X, y.ravel(), w_init = w0.ravel(), w_star = wAst.ravel(), max_iter=600 )
l2_agd.append(agd.distVals[-1])
#AGGLIO-SGD
alpha_range = np.linspace(start=1, stop=d*5, num=5).tolist()
B_init_range= np.power(10.0, [-1, -2, -3, -4]).tolist()
B_step_range =np.linspace(start=1.01, stop=2, num=5).tolist()
parameters = dict(alpha = alpha_range, B_init=B_init_range, B_step=B_step_range )
cv = ShuffleSplit( n_splits = 1, test_size = 0.3, random_state = 42 )
grid = GridSearchCV( AG_SGD( ), param_grid=parameters, refit = False, cv=cv) # verbose=3
grid.fit( X, y.ravel(), w_init=w0.ravel(), w_star=wAst.ravel(), minibatch_size=200)
best = grid.best_params_
print("The best parameters for AGGILIO_SGD are %s with a score of %0.2f" % (grid.best_params_, grid.best_score_))
agsgd = AG_SGD(alpha= best["alpha"], B_init=best['B_init'], B_step=best['B_step'] )
agsgd.fit( X, y.ravel(), w_init = w0.ravel(), w_star = wAst.ravel(), max_iter=600, minibatch_size=200 )
l2_agsgd.append(agsgd.distVals[-1])
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
y_fmt = ticker.FormatStrFormatter('%2.0e')
fig, ax = plt.subplots()
ax.yaxis.set_major_formatter(y_fmt)
plt.plot(dim_list, l2_agd, label='AGGLIO-GD', color='#1b9e77', linewidth=3)
plt.plot(dim_list, l2_agsgd, label='AGGLIO-SGD', color='#5e3c99', linewidth=3)
plt.legend()
plt.ylabel("$||w^t-w^*||_2$",fontsize=12)
plt.xlabel("dimension",fontsize=12)
plt.grid()
plt.title(r"n=1000" )
plt.savefig('dimension_abliation.pdf', dpi=300, bbox_inches = 'tight')
plt.show()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
data = pd.read_csv("healthcare-dataset-stroke-data.csv")
data.head().T
print(f"Data shape: {data.shape}")
# missing values
missing_values = data.isnull().sum()
missing_values
#dropping missing value rows
train_data = data.dropna(axis=0, how="any")
print('Train Data Shape: {}'.format(train_data.shape))
train_data['stroke'].unique()
train_data['smoking_status'].unique()
train_data['ever_married'].unique()
train_data['stroke'].value_counts()
sns.countplot(x=train_data['stroke'])
plt.title('Number of patients affected by stroke',fontsize = 12)
train_data['gender'].value_counts()
train_data.groupby(['gender'])['stroke'].value_counts()
sns.countplot(x=train_data['gender'], hue = train_data['stroke'])
plt.title('Gender Stroke Rate',fontsize = 15)
plt.show()
drop_unknown = train_data[train_data['smoking_status'] == "Unknown"].index
train_data.drop(drop_unknown, inplace = True, axis = 0)
# rows and columns after dropped the unknown values
# from smoking status column
print(f"Data shape after dropped Unknown: {train_data.shape}")
train_data['smoking_status'].value_counts()
train_data.groupby(['smoking_status'])['stroke'].value_counts()
sns.countplot(x = train_data['smoking_status'], hue = train_data['stroke'])
plt.show()
# object datas and numeric data seperation
str_data = train_data.select_dtypes(include=['object'])
int_data = train_data.select_dtypes(include = ['integer','float'])
label = LabelEncoder()
features = str_data.apply(label.fit_transform)
features = features.join(int_data)
features.head()
# dropping 'id' and 'stroke' column to make x_train model
xtrain = features.drop(['stroke','id'], axis = 1)
xtrain.head()
# taking last column for y_train model
ytrain = features['stroke']
ytrain.head()
# Making training and testing models
from sklearn.model_selection import train_test_split
x_train,x_test, y_train, y_test = train_test_split(xtrain,ytrain)
# x_train model as a data frame
x_train.head().T
# GaussianNB model making
model = GaussianNB()
model.fit(x_train, y_train)
test_score = model.score(x_test, y_test)
print("NBtest_score:", test_score)
train_score = model.score(x_train, y_train)
print("NBtrain_score:",train_score)
# Desicion tree model making
desicion_tree_mod = DecisionTreeClassifier(criterion = 'entropy', max_depth = 8)
desicion_tree_mod.fit(xtrain, ytrain)
desicion_tree_test_score = desicion_tree_mod.score(x_test, y_test)
print("Desicion Tree test_score:", desicion_tree_test_score)
desicion_tree_train_score = desicion_tree_mod.score(x_train, y_train)
print("Desicion tree train score: ", desicion_tree_train_score)
y_pred = desicion_tree_mod.predict(x_test)
y_pred
# Support Vector Classifier Algorithm
from sklearn.svm import SVC
svc = SVC(kernel = 'linear', random_state = 42)
svc.fit(x_train, y_train)
y_pred_svc = svc.predict(x_test)
y_pred_svc
svc_test_score = svc.score(x_test, y_test)
print("svc test score: ",svc_test_score)
svc_train_score = svc.score(x_train, y_train)
print("svc train score: ", svc_train_score)
from sklearn.metrics import accuracy_score
accuracy_decision_tree = accuracy_score(y_test, y_pred)
print("Decision tree Accuracy: " + str(accuracy_decision_tree * 100))
sc = MinMaxScaler(feature_range=(0, 1))
dataset_scaled = sc.fit_transform(xtrain.values)
y = ytrain.values
y_pred = desicion_tree_mod.predict(x_test)
y_pred
import joblib as jb
jb.dump(desicion_tree_mod, 'stroke.joblib')
model = jb.load('stroke.joblib')
print(model.predict(sc.transform(np.array([[1.0, 0, 2.0, 1.0, 0, 25.0, 0, 0, 79.20, 38.5]]))))
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.