code
stringlengths 2.5k
150k
| kind
stringclasses 1
value |
|---|---|
# Uncertainty Sampling on the Radio Galaxy Zoo
```
import sys
import h5py, numpy, sklearn.neighbors
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
sys.path.insert(1, '..')
import crowdastro.train, crowdastro.test
TRAINING_H5_PATH = '../training.h5'
CROWDASTRO_H5_PATH = '../crowdastro.h5'
NORRIS_DAT_PATH = '../data/norris_2006_atlas_classifications_ra_dec_only.dat'
CLASSIFIER_OUT_PATH = '../classifier.pkl'
ASTRO_TRANSFORMER_OUT_PATH = '../astro_transformer.pkl'
IMAGE_TRANSFORMER_OUT_PATH = '../image_transformer.pkl'
IMAGE_SIZE = 200 * 200
ARCMIN = 1 / 60
N_JOBS = 8
%matplotlib inline
# Load labels.
with h5py.File(TRAINING_H5_PATH, 'r') as training_h5:
crowdsourced_labels = training_h5['labels'].value
with h5py.File(CROWDASTRO_H5_PATH, 'r') as crowdastro_h5:
ir_names = crowdastro_h5['/wise/cdfs/string'].value
ir_positions = crowdastro_h5['/wise/cdfs/numeric'].value[:, :2]
ir_tree = sklearn.neighbors.KDTree(ir_positions)
with open(NORRIS_DAT_PATH, 'r') as norris_dat:
norris_coords = [r.strip().split('|') for r in norris_dat]
norris_labels = numpy.zeros((len(ir_positions)))
for ra, dec in norris_coords:
# Find a neighbour.
skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg'))
ra = skycoord.ra.degree
dec = skycoord.dec.degree
((dist,),), ((ir,),) = ir_tree.query([(ra, dec)])
if dist < 0.1:
norris_labels[ir] = 1
def softmax(x):
exp = numpy.exp(x - numpy.max(x))
out = exp / exp.sum()
return out
def train_and_test(hidden_atlas_training_indices):
"""
hidden_atlas_training_indices: ATLAS indices to hide.
"""
with h5py.File(TRAINING_H5_PATH, 'r') as training_h5, h5py.File(CROWDASTRO_H5_PATH, 'r') as crowdastro_h5:
n_static = 5 if training_h5.attrs['ir_survey'] == 'wise' else 6
train_indices = training_h5['is_ir_train'].value
atlas_train_indices = training_h5['is_atlas_train'].value
# Remove all IR objects near hidden ATLAS objects.
for atlas_index in hidden_atlas_training_indices:
ir = crowdastro_h5['/atlas/cdfs/numeric'][atlas_index, n_static + IMAGE_SIZE:]
nearby = (ir < ARCMIN).nonzero()[0]
for ir_index in nearby:
train_indices[ir_index] = 0
n_ir = train_indices.sum()
# We can now proceed as usual with training/testing.
outputs = training_h5['labels'].value[train_indices]
n = len(outputs)
astro_inputs = numpy.minimum(
training_h5['features'][train_indices, :n_static], 1500)
image_inputs = training_h5['features'].value[train_indices, n_static:]
astro_transformer = sklearn.pipeline.Pipeline([
('normalise', sklearn.preprocessing.Normalizer()),
('scale', sklearn.preprocessing.StandardScaler()),
])
image_transformer = sklearn.pipeline.Pipeline([
('normalise', sklearn.preprocessing.Normalizer()),
])
features = []
features.append(astro_transformer.fit_transform(astro_inputs))
features.append(image_transformer.fit_transform(image_inputs))
inputs = numpy.hstack(features)
classifier = sklearn.linear_model.LogisticRegression(
class_weight='balanced', n_jobs=N_JOBS)
classifier.fit(inputs, outputs)
# Test the classifier.
test_indices = training_h5['is_atlas_test'].value
numeric_subjects = crowdastro_h5['/atlas/cdfs/numeric'][test_indices, :]
n_norris_agree = 0
n_crowdsourced_agree = 0
n_all_agree = 0
n_either_agree = 0
n_no_host = 0
n_total = 0
for subject in numeric_subjects:
swire = subject[2 + IMAGE_SIZE:]
nearby = swire < ARCMIN
astro_inputs = numpy.minimum(training_h5['features'][nearby, :n_static],
1500)
image_inputs = training_h5['features'][nearby, n_static:]
features = []
features.append(astro_transformer.transform(astro_inputs))
features.append(image_transformer.transform(image_inputs))
inputs = numpy.hstack(features)
crowdsourced_outputs = crowdsourced_labels[nearby]
norris_outputs = norris_labels[nearby]
if sum(crowdsourced_outputs) < 1 or sum(norris_outputs) < 1:
# No hosts!
n_no_host += 1
continue
selection = classifier.predict_proba(inputs)[:, 1].argmax()
n_norris_agree += norris_outputs[selection]
n_crowdsourced_agree += crowdsourced_outputs[selection]
n_all_agree += norris_outputs[selection] * crowdsourced_outputs[selection]
n_either_agree += norris_outputs[selection] or crowdsourced_outputs[selection]
n_total += 1
# Compute the uncertainties of the pool.
pool_indices = training_h5['is_atlas_train'].value
numeric_subjects = crowdastro_h5['/atlas/cdfs/numeric'][pool_indices, :]
uncertainties = []
for subject in numeric_subjects:
swire = subject[2 + IMAGE_SIZE:]
nearby = swire < ARCMIN
astro_inputs = numpy.minimum(training_h5['features'][nearby, :n_static],
1500)
image_inputs = training_h5['features'][nearby, n_static:]
features = []
features.append(astro_transformer.transform(astro_inputs))
features.append(image_transformer.transform(image_inputs))
inputs = numpy.hstack(features)
probs = softmax(classifier.predict_proba(inputs)[:, 1])
entropy = -numpy.sum(numpy.log(probs) * probs)
uncertainties.append(entropy)
return (n_norris_agree / n_total, n_crowdsourced_agree / n_total,
n_all_agree / n_total, n_either_agree / n_total, uncertainties, n_ir)
# Randomly hide 90% of labels.
with h5py.File(TRAINING_H5_PATH, 'r') as training_h5:
atlas_train_indices = training_h5['is_atlas_train'].value
initial_hidden_atlas_training_indices = numpy.arange(atlas_train_indices.sum())
numpy.random.shuffle(initial_hidden_atlas_training_indices)
initial_hidden_atlas_training_indices = initial_hidden_atlas_training_indices[
:9 * len(initial_hidden_atlas_training_indices) // 10]
initial_hidden_atlas_training_indices.sort()
# Testing random label selection.
norris_accuracies_random = []
rgz_accuracies_random = []
all_accuracies_random = []
any_accuracies_random = []
n_ir_random = []
n_batch = 100
n_epochs = 25
numpy.random.seed(0)
hidden_atlas_training_indices = initial_hidden_atlas_training_indices[:]
for epoch in range(n_epochs):
print('Epoch {}/{}'.format(epoch + 1, n_epochs))
# Train, test, and generate uncertainties.
results = train_and_test(hidden_atlas_training_indices)
norris_accuracies_random.append(results[0])
rgz_accuracies_random.append(results[1])
all_accuracies_random.append(results[2])
any_accuracies_random.append(results[3])
n_ir_random.append(results[5])
# Choose n_batch new labels at random.
if len(hidden_atlas_training_indices) < n_batch:
break
else:
numpy.random.shuffle(hidden_atlas_training_indices)
hidden_atlas_training_indices = hidden_atlas_training_indices[:-n_batch]
hidden_atlas_training_indices.sort()
# Testing uncertainty sampling label selection.
norris_accuracies_uncsample = []
rgz_accuracies_uncsample = []
all_accuracies_uncsample = []
any_accuracies_uncsample = []
n_ir_uncsample = []
hidden_atlas_training_indices = initial_hidden_atlas_training_indices[:]
for epoch in range(n_epochs):
print('Epoch {}/{}'.format(epoch + 1, n_epochs))
# Train, test, and generate uncertainties.
results = train_and_test(hidden_atlas_training_indices)
uncertainties = results[4]
norris_accuracies_uncsample.append(results[0])
rgz_accuracies_uncsample.append(results[1])
all_accuracies_uncsample.append(results[2])
any_accuracies_uncsample.append(results[3])
n_ir_uncsample.append(results[5])
# Choose the n_batch most uncertain objects to label.
if len(hidden_atlas_training_indices) < n_batch:
break
else:
hidden_atlas_training_indices = numpy.array(
sorted(hidden_atlas_training_indices, key=lambda z: uncertainties[z]))[:-n_batch]
hidden_atlas_training_indices.sort()
plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
plt.plot(all_accuracies_random, c='pink')
plt.plot(any_accuracies_random, c='darkred')
plt.plot(all_accuracies_uncsample, c='lightgreen')
plt.plot(any_accuracies_uncsample, c='darkgreen')
plt.xlabel('{}-batch epochs'.format(n_batch))
plt.ylabel('Classification accuracy')
plt.legend(['Norris & RGZ (passive)', 'Norris | RGZ (passive)',
'Norris & RGZ (unc)', 'Norris | RGZ (unc)'], loc='lower right')
plt.subplot(2, 2, 2)
plt.plot(norris_accuracies_random, c='red')
plt.plot(norris_accuracies_uncsample, c='green')
plt.legend(['Norris (passive)', 'Norris (unc)'], loc='lower right')
plt.xlabel('{}-batch epochs'.format(n_batch))
plt.ylabel('Classification accuracy')
plt.subplot(2, 2, 3)
plt.plot(rgz_accuracies_random, c='red')
plt.plot(rgz_accuracies_uncsample, c='green')
plt.legend(['RGZ (passive)', 'RGZ (unc)'], loc='lower right')
plt.xlabel('{}-batch epochs'.format(n_batch))
plt.ylabel('Classification accuracy')
plt.subplot(2, 2, 4)
plt.plot(numpy.array(n_ir_random) - numpy.array(n_ir_uncsample))
plt.xlabel('{}-batch epochs'.format(n_batch))
plt.ylabel('Difference in number of IR examples')
plt.show()
```
Conclusion: Uncertainty sampling with entropy doesn't work very well.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/lasyaistla/Ai.fellowship/blob/main/Copy_of_Style_Transfer_PyTorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Please complete the missing parts in the code below. Moreover, please correct the mistakes in the code if the performance is not satisfactory.
# Implementation of Neural Style Transfer with PyTorch
```
# importing libraries to implement style-transfer
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.models as models
import copy
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# desired size of the output image
imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu
loader = transforms.Compose([
transforms.Resize(imsize), # scale imported image
transforms.ToTensor()]) # transform it into a torch tensor
def image_loader(image_name):
image = Image.open(image_name)
# fake batch dimension required to fit network's input dimensions
image = loader(image).unsqueeze(0)
return image.to(device, torch.float)
style_img = image_loader("/dancing.jpg")
content_img = image_loader("/picasso.jpg")
assert style_img.size() == content_img.size(), \
"we need to import style and content images of the same size"
unloader = transforms.ToPILImage() # reconvert into PIL image
plt.ion()
def imshow(tensor, title=None):
image = tensor.cpu().clone() # we clone the tensor to not do changes on it
image = image.squeeze(0) # remove the fake batch dimension
image = unloader(image)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
plt.figure()
imshow(style_img, title='Style Image')
plt.figure()
imshow(content_img, title='Content Image')
class ContentLoss(nn.Module):
def __init__(self, target,):
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(input, self.target)
return input
def gram_matrix(input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self, target_feature):
super(StyleLoss, self).__init__()
self.target = gram_matrix(target_feature).detach()
def forward(self, input):
G = gram_matrix(input)
self.loss = F.mse_loss(G, self.target)
return input
# importing vgg-16 pre-trained model
cnn = models.vgg19(pretrained=True).features.to(device).eval()
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
# create a module to normalize input image so we can easily put it in a
# nn.Sequential
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
# .view the mean and std to make them [C x 1 x 1] so that they can
# directly work with image Tensor of shape [B x C x H x W].
# B is batch size. C is number of channels. H is height and W is width.
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
# normalize img
return (img - self.mean) / self.std
# desired depth layers to compute style/content losses :
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers=content_layers_default,
style_layers=style_layers_default):
# normalization module
normalization = Normalization(normalization_mean, normalization_std).to(device)
# just in order to have an iterable access to or list of content/syle
# losses
content_losses = []
style_losses = []
# assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
# to put in modules that are supposed to be activated sequentially
model = nn.Sequential(normalization)
i = 0 # increment every time we see a conv
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
# The in-place version doesn't play very nicely with the ContentLoss
# and StyleLoss we insert below. So we replace with out-of-place
# ones here.
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
# now we trim off the layers after the last content and style losses
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
input_img = content_img.clone()
# if you want to use white noise instead uncomment the below line:
# input_img = torch.randn(content_img.data.size(), device=device)
# add the original input image to the figure:
plt.figure()
imshow(input_img, title='Input Image')
def get_input_optimizer(input_img):
# this line to show that input is a parameter that requires a gradient
optimizer = optim.LBFGS([input_img])
return optimizer
def run_style_transfer(cnn, normalization_mean, normalization_std,
content_img, style_img, input_img, num_steps=300,
style_weight=1000000, content_weight=1):
"""Run the style transfer."""
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
normalization_mean, normalization_std, style_img, content_img)
# We want to optimize the input and not the model parameters so we
# update all the requires_grad fields accordingly
input_img.requires_grad_(True)
model.requires_grad_(False)
optimizer = get_input_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
# correct the values of updated input image
with torch.no_grad():
input_img.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print("run {}:".format(run))
print('Style Loss : {:4f} Content Loss: {:4f}'.format(
style_score.item(), content_score.item()))
print()
return style_score + content_score
optimizer.step(closure)
# a last correction...
with torch.no_grad():
input_img.clamp_(0, 1)
return input_img
```
### Show your output image
```
output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,
content_img, style_img, input_img)
plt.figure()
imshow(output, title='Output Image')
# sphinx_gallery_thumbnail_number = 4
plt.ioff()
plt.show()
```
|
github_jupyter
|
# Assignment 1: Numpy RNN
Implement a RNN and run BPTT
```
from typing import Dict, Tuple
import numpy as np
class RNN(object):
"""Numpy implementation of sequence-to-one recurrent neural network for regression tasks."""
def __init__(self, input_size: int, hidden_size: int, output_size: int):
"""Initialization
Parameters
----------
input_size : int
Number of input features per time step
hidden_size : int
Number of hidden units in the RNN
output_size : int
Number of output units.
"""
super(RNN, self).__init__()
self.input_size = input_size # D in literature
self.hidden_size = hidden_size # I in literature
self.output_size = output_size # K in literature
# create and initialize weights of the network
# as 90% of the usages in the scriptum are W.T, R.T, V.T
init = lambda shape: np.random.uniform(-0.2, 0.2, shape)
self.W = init((hidden_size, input_size)) # I X D
self.R = init((hidden_size, hidden_size)) # I x I
self.bs = np.zeros((hidden_size))
self.V = init((output_size, hidden_size)) # K x I
self.by = np.zeros((output_size))
# place holder to store intermediates for backprop
self.a = None
self.y_hat = None
self.grads = None
self.x = None
def forward(self, x: np.ndarray) -> np.ndarray:
"""Forward pass through the RNN.
Parameters
----------
x : np.ndarray
Input sequence(s) of shape [sequence length, number of features]
Returns
-------
NumPy array containing the network prediction for the input sample.
"""
self.x = x
# as we have no activation function (f(t) is linear)
# a(t) = f(s(t)) = s(t) = W^T . x(t) + R^T . a(t-1) + bs
# = tanh( W^T . x(t) + R^T . a(t-1) + bs )
self.a = np.zeros((self.input_size, self.hidden_size)) # to make accessing t = -1 possible
for t in range(len(x)):
self.a[t] = np.tanh(self.W @ x[t] + self.R @ self.a[t-1] + self.bs)
self.y_hat = self.V @ self.a[t] + self.by
return self.y_hat # sequence-to-1 model, so we only return the last
def forward_fast(self, x: np.ndarray) -> np.ndarray:
""" optimized method without saving to self.a """
a = np.tanh(self.W @ x[0] + self.bs)
for t in range(1, len(x)):
a = np.tanh(self.W @ x[t] + self.R @ a + self.bs)
return self.V @ a + self.by
def backward(self, d_loss: np.ndarray) -> Dict:
"""Calculate the backward pass through the RNN.
Parameters
----------
d_loss : np.ndarray
The gradient of the loss w.r.t the network output in the shape [output_size,]
Returns
-------
Dictionary containing the gradients for each network weight as key-value pair.
"""
# create view, so that we don't have to reshape every time we call it
a = self.a.reshape(self.a.shape[0], 1, self.a.shape[1])
x = self.x.reshape(self.x.shape[0], 1, self.x.shape[1])
# needs to be calculated only once
d_V = d_loss @ a[-1]
d_by = d_loss
# init with 0 and sum it up
d_W = np.zeros_like(self.W)
d_R = np.zeros_like(self.R)
d_bs = np.zeros_like(self.bs)
# instead of using * diag, we use elementwise multiplication
delta = d_loss.T @ self.V * (1 - a[-1] ** 2)
for t in reversed(range(self.input_size)):
d_bs += delta.reshape(self.bs.shape)
d_W += delta.T @ x[t]
if t > 0:
d_R += delta.T @ a[t-1]
# a[t] = tanh(..) -> derivation = 1-tanh² -> reuse already calculated tanh
# calculate delta for the next step at t-1
delta = delta @ self.R * (1 - a[t-1] ** 2)
self.grads = {'W': d_W, 'R': d_R, 'V': d_V, 'bs': d_bs, 'by': d_by}
return self.grads
def update(self, lr: float):
# update weights, aggregation is already done in backward
w = self.get_weights()
for name in w.keys():
w[name] -= lr * self.grads[name]
# reset internal class attributes
self.grads = {}
self.y_hat, self.a = None, None
def get_weights(self) -> Dict:
return {'W': self.W, 'R': self.R, 'V': self.V, 'bs': self.bs, 'by': self.by}
def set_weights(self, weights: Dict):
if not all(name in weights.keys() for name in ['W', 'R', 'V']):
raise ValueError("Missing one of 'W', 'R', 'V' keys in the weight dictionary")
for name, w in weights.items():
self.__dir__["name"] = w
```
<h2 style="color:rgb(0,120,170)">Numerical gradient check</h2>
To validate your implementation, especially the backward pass, use the two-sided gradient approximation given by the equation below.
```
def get_numerical_gradient(model: RNN, x: np.ndarray, eps: float=1e-7) -> Dict:
"""Implementation of the two-sided numerical gradient approximation
Parameters
----------
model : RNN
The RNN model object
x : np.ndarray
Input sequence(s) of shape [sequence length, number of features]
eps : float
The epsilon used for numerical gradient approximation
Returns
-------
A dictionary containing the numerical gradients for each weight of the RNN. Make sure
to name the dictionary keys like the names of the RNN gradients dictionary (e.g.
'd_R' for the weight 'R')
"""
g = {}
# iterate all weight-matrices w and all positions i, and calculate the num. grad.
for name, w in model.get_weights().items():
# initialize weight gradients with zero
wg = np.zeros_like(w)
# this makes a backup copy of original weights
for i, orig in np.ndenumerate(w): # can be 1d or 2d
# caculate for +eps
w[i] += eps
plus = model.forward_fast(x)
# calculate for -eps
w[i] = orig - eps
minus = model.forward_fast(x)
w[i] = orig # reset
# set weight gradient for this weight and this index
wg[i] = np.sum(plus - minus) / (2*eps)
# add calculated weights into return-weights
g[name] = wg
return g
def get_analytical_gradient(model: RNN, x: np.ndarray) -> Dict:
"""Helper function to get the analytical gradient.
Parameters
----------
model : RNN
The RNN model object
x : np.ndarray
Input sequence(s) of shape [sequence length, number of features]
Returns
-------
A dictionary containing the analytical gradients for each weight of the RNN.
"""
loss = model.forward(x)
return model.backward(np.ones((model.output_size, 1)))
def gradient_check(model: RNN, x: np.ndarray, treshold: float = 1e-7):
"""Perform gradient checking.
You don't have to do anything in this function.
Parameters
----------
model : RNN
The RNN model object
x : np.ndarray
Input sequence(s) of shape [sequence length, number of features]
eps : float
The epsilon used for numerical gradient approximation
"""
numerical_grads = get_numerical_gradient(model, x)
analytical_grads = get_analytical_gradient(model, x)
for key, num_grad in numerical_grads.items():
difference = np.linalg.norm(num_grad - analytical_grads[key])
# assert num_grad.shape == analytical_grads[key].shape
if difference < treshold:
print(f"Gradient check for {key} passed (difference {difference:.3e})")
else:
print(f"Gradient check for {key} failed (difference {difference:.3e})")
```
<h2 style="color:rgb(0,120,170)">Compare the time for gradient computation</h2>
Finally, use the code below to investigate the benefit of being able to calculate the exact analytical gradient.
```
print("Gradient check with a single output neuron:")
model = RNN(input_size=5, hidden_size=10, output_size=1)
x = np.random.rand(5, 5)
gradient_check(model, x)
print("\nGradient check with multiple output neurons:")
model = RNN(input_size=5, hidden_size=10, output_size=5)
x = np.random.rand(5, 5)
gradient_check(model, x)
analytical_time = %timeit -o get_analytical_gradient(model, x)
numerical_time = %timeit -o get_numerical_gradient(model, x)
if analytical_time.average < numerical_time.average:
fraction = numerical_time.average / analytical_time.average
print(f"The analytical gradient computation was {fraction:.0f} times faster")
else:
fraction = analytical_time.average / numerical_time.average
print(f"The numerical gradient computation was {fraction:.0f} times faster")
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/marixko/Supervised_Learning_Tutorial/blob/master/The_Basics_of_Supervised_Learning_For_Astronomers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
###**About Google's Colaboratory: **
This is a free Jupyter environment that runs in Google's cloud, which means you can run codes in your computer without having to install anything. You can create a copy of this tutorial in your own Google's Drive and make your own changes. Colaboratory also allows you to easily share your code with others! [Read more](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)
---
# Introduction
> **Author**: Lilianne M. I. Nakazono (email: lilianne.nakazono@usp.br)
> PhD student at Instituto de Astronomia, Geofísica e Ciências Atmosféricas -- Universidade de São Paulo (IAG-USP). Bachelor's degree in Statistics (IME-USP) and in Astronomy (IAG-USP).
> **April 2019**
---
###**What is Machine Learning?**
From SAS:
>> *"Machine learning is a method of data analysis that automates analytical model building. It is a branch of artificial intelligence based on the idea that systems can learn from data, identify patterns and make decisions with minimal human intervention."*
###**What is Supervised Learning?**#
From S.B. Kotsiantis (2007):
>> *"Every instance in any dataset used by machine learning algorithms is represented using the same set of features. The features may be continuous, categorical or binary. If instances are given with known labels (the corresponding correct outputs) then the learning is called *supervised*, in contrast to *unsupervised learning*, where instances are unlabeled."*
---
###**STAR/GALAXY separation**#
In this tutorial we will perform a STAR/GALAXY separation using a real dataset from [S-PLUS](http://www.splus.iag.usp.br/). This data were already matched with [SDSS](https://www.sdss.org/) (DR15) spectroscopical data and it will be used to train and test the supervised classifiers. The final step (not included in this tutorial) is to use the trained model to predict the classification of your unknown objects.
This tutorial will be entirely in Python 3 and we will go through the following topics:
- Introduction to `Pandas` ([Documentation](https://pandas.pydata.org/))
- Data visualization with `seaborn` ([Documentation](https://seaborn.pydata.org/))
- Classification methods with `sklearn` ([Documentation](https://scikit-learn.org/stable/index.html))
---
**Additional information about the data**
ID - Object ID Number
RA - Right Ascension in decimal degrees [J2000]
Dec - Declination in decimal degrees [J2000]
FWHM_n - Normalized Full width at half maximum to detection image seeing (pixels)
A - Profile RMS along major axis (pixels)
B - Profile RMS along minor axis (pixels)
KrRadDet - Kron apertures in units of A or B (pixels)
uJAVA_auto, F378_auto, F395_auto, F410_auto, F430_auto, g_auto, F515_auto, r_auto, F660_auto, i_auto, F861_auto, z_auto - Total-restricted magnitudes (AB) in corresponding filters
class - Spectroscopic classification from SDSS
#**1. Libraries and Functions**
```
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
import itertools
from mlxtend.plotting import plot_decision_regions
import matplotlib as mpl
import matplotlib.gridspec as gridspec
from sklearn import metrics
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
# Modified from: https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.3f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
```
#**2. Read Data**
For statistical/machine learning purposes it is **always** better to read the data in a dataframe (data structured in labels for rows and columns) format.
```
#Reading dataset from github and saving as dataframe
url = 'https://raw.githubusercontent.com/marixko/'
file = 'tutorial_classifiers/master/tutorial_data.txt'
df = pd.read_csv(url+file, delim_whitespace=True, low_memory=False)
# Run this cell to quickly check your dataset
df
# Check header
list(df)
```
#**3. Pre-analysis**
Before applying any kind of analysis, you need to be aware of any problem in your dataset that can affect your training (e.g. missing values and outliers). Sometimes it will require pre-processing your dataset beforehand (e.g. for missing values, interpolating values or removing them from data may be necessary).
```
# You can check your dataset by using describe().
# It will return the total count, mean, standard deviation,
# minimum, Q1, Q2 (median), Q3 and maximum
df.describe()
# If you want to check a specific feature use for instance:
# df.FWHM_n.describe()
```
Another good practice is to check high correlations in your dataset, which can allow you to identify which features are redundant. Thus, you can also be able to reduce dimensionality of your dataset.
>> *"The fact that many features depend on one another often unduly influences the accuracy of supervised ML classification models. This problem can be addressed by construction new features from the basic feature set."* -- S.B. Kotsiantis (2007)
(One way to deal with multicollinearity -- when 2 or more features are moderately or highly correlated -- is creating a new feature set using [Principal Component Analysis](https://en.wikipedia.org/wiki/Principal_component_analysis).)
```
plt.close()
f, ax = plt.subplots(figsize=(8, 8))
var = ['FWHM_n', 'A', 'B', 'KrRadDet', 'uJAVA_auto',
'F378_auto', 'F395_auto', 'F410_auto', 'g_auto', 'F515_auto',
'r_auto', 'F660_auto', 'i_auto', 'F861_auto', 'z_auto']
corr = df[var].corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax, center=0, vmin=-1, vmax=1)
plt.title('Correlation Matrix')
plt.show()
#It would also be interesting to check the correlation plot for each class
```
Qualitative variables can also be included. In this case, however, there are no qualitative features that came from S-PLUS observations.
But let's check the classification label counts:
```
# For qualitative variables, use value_counts()
df['class'].value_counts()
```
Note that for this example the classes are balanced. It represents a best case scenario, which rarely happens in the real world.
Be very careful with imbalanced datasets! Some methods and metrics are not good for imbalanced cases, some manipulation in your sampling method (e.g. over/under-sampling) or in your algorithm (e.g. penalized classification) may be necessary.
> **Note:** Supervised Learning is not suitable for problems like "I want to find very rare objects that we have never found before!". The learning process is based on your ground-truth samples, so you need to ask yourself "Is my ground-truth sample representative of what I want to find?"
#** 4. Feature Selection**
A very important step of the analysis is choosing your input features. Sometimes you already know which features you need to use to achieve your goals, which comes from your previous knowledge about the topic. However, you can also evaluate which features will give you the best performance. We will discuss more about it on the following sections.
For didactic purposes, let's consider two feature spaces:
> `dim15` = {all useful information from the catalog}
> `dim2` = {normalized FWHM, Profile RMS along major axis}
```
dim15 = ['FWHM_n', 'A', 'B', 'KrRadDet', 'uJAVA_auto',
'F378_auto', 'F395_auto', 'F410_auto', 'g_auto', 'F515_auto',
'r_auto', 'F660_auto', 'i_auto', 'F861_auto', 'z_auto']
dim2 = ['FWHM_n','A']
```
#** 5. Sampling training and testing sets **
Regardless of the classification method you choose, you will want to estimate how accurately your predictive model will perform. This is called **cross-validation** and there are several ways to do it. Some examples are:
* **Holdout method**: randomly separate your original dataset into the training and the testing set. It's very common to adopt 1:3 ratio for the size of test/training sets, although you can choose another ratio. Very simple and fast computationally, but you need to be cautious as it is a single run method. Thus, it may be subject to large variabilities
* **Leave-p-out cross-validation**:
Uses p observations as the testing set and the remaining observations as the training set. Repeat to cover any sampling possibility
* **k-fold cross-validation**: the original dataset is randomly partitioned into k equal sized subsamples. One subsample will be used as testing set and the other k-1 as training set. Repeat k times, until each subsample is used exactly once as the testing set.
I strongly recommend that you also check the other methods before choosing one. For this tutorial we will use the **Holdout method**, for simplicity.
```
label = pd.DataFrame(df['class'])
# Transform strings into numbered labels
label.loc[label['class'] == 'STAR', 'class'] = 0
label.loc[label['class'] == 'GALAXY', 'class'] = 1
# Use train_test_split() to sample your training and testing sets
# Let's fix a random_state=42 in order to have the same sets
# on each run. Stratify parameter guarantees that the original
# proportion of the classes is maintained
X_train, X_test, y_train, y_test = train_test_split(df[dim15], label,
test_size=0.3,
random_state=42,
stratify = label)
```
#** 6. Classification method: Support Vector Machine (SVM)**
We finally reached the point where we are going to run a classification algorithm. It is common to think, at first, that this would be the most complicated part, but a well-done job will require you to spend most of your time on the other steps.
There are several classification methods you can use, each of them has its own pros and cons, depending on your science goals and on your dataset. I will give you an example using Support Vector Machine (SVM) with linear kernel, but I recommend you to also check other methods (e.g. Random Forest, Logistic Regression, K-NN, ...)
**DON'T FORGET TO:**
- Learn the basic idea of the method. You don't need to know all the math behind it, but you need to know how it works intuitively
- Check what are the assumptions of the method and if your dataset is in agreement with it
- Learn what the parameters of your model (a.k.a. hyperparameters) do. Choosing them wisely can be crucial to have good results in the end. Note: the hyperparameters space can also be part of your validation tests
## 6.1. Basic idea
The SVM finds the hyperplane that best separates your data, based on maximizing the margin between each class. For instance, in one dimension SVM will find a point. For two dimensions, it will be a line. For three dimensions, it will be a plane.
To use a linear kernel, we assume that the data is linearly separable. Otherwise, we should use another kernel (e.g. polynomial).
Read more about SVM [here](https://scikit-learn.org/stable/modules/svm.html#scores-probabilities)
## 6.2. Feature space: dim2
```
# Train your model:
clf2 = SVC(kernel= 'linear')
clf2.fit(X_train[dim2], y_train.values.ravel())
# Make the predictions:
y_pred2 = clf2.predict(X_test[dim2])
# Plot confusion matrix:
matrix = confusion_matrix(y_test['class'], y_pred2)
fig = plot_confusion_matrix(matrix, classes=['STAR','GALAXY'])
plt.show()
```
From the confusion matrix above we can already see how good the results are: most of our stars (galaxies) are being assigned as stars (galaxies) and just a few percent were misclassified.
Now let's check the plot and how the separation looks like:
```
plt.style.use('seaborn-pastel')
fig = plt.figure(figsize=(18,6))
gs = gridspec.GridSpec(1, 2)
ax = plt.subplot(gs[0,0])
sns.scatterplot(x=X_train.FWHM_n, y=X_train.A,
hue=y_train['class'])
#Calculate margin (from https://scikit-learn.org/stable/auto_examples/svm/plot_svm_margin.html)
w = clf2.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf2.intercept_[0]) / w[1]
margin = 1 / np.sqrt(np.sum(clf2.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
#Plot margin
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.xlabel('FWHM_n')
plt.ylabel('A')
plt.xlim(0,8)
plt.ylim(0.8, 10)
plt.title('Training set')
ax = plt.subplot(gs[0,1])
sns.scatterplot(x=X_test.FWHM_n , y=X_test.A, hue=y_test['class'])
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.xlim(0,8)
plt.ylim(0.8, 10)
plt.title('Testing set')
plt.show()
```
The solid line corresponds to the optimal threshold found by SVM. The dashed lines in the plots above correspond to the maximized margin that I mentioned in Section 6.1.
These are calculated using only a small part of the data: the objects around where the separation may occur, they are called the Support Vectors. Let's check which ones were considered for this classification:
```
fig = plt.figure(figsize=(9,7))
sns.scatterplot(x=X_train[dim2].FWHM_n, y=X_train[dim2].A,
hue=y_train['class'])
plt.scatter(clf2.support_vectors_[:, 0],
clf2.support_vectors_[:, 1], s=8,
zorder=10,color='red', marker='+')
plt.xlim(0.9,2)
plt.ylim(0.8,5)
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.title('Support vectors (Training set)')
```
## 6.3. Feature space: dim15
In the last section we saw how SVM works in a 2D space. In that case, it is possible to visually check the separation. However, we have much more information available. if we analyse them altogether, it can improve our results. Although, it is impossible to visually check the results, so we need to rely on performance metrics that we will discuss further on the next section.
```
# Train your model:
clf15 = SVC(kernel= 'linear')
clf15.fit(X_train, y_train.values.ravel())
# Make predictions:
y_pred = clf15.predict(X_test)
# Plot confusion matrix:
matrix = confusion_matrix(y_test['class'], y_pred)
fig = plot_confusion_matrix(matrix, classes=['STAR','GALAXY'])
plt.show()
# Yeah, as simple as that! :)
```
#** 7. Validation and Model Selection**
How can we choose between two (or more) different models?
For that, we have several performance metrics that we can consider when selecting the best model and I will show a few of them.
The way you are going to analyze the metrics depends on your science goals. For instance:
* In a STAR/GALAXY separation you are probably not interested in a specific class, but in the overall classification. You can evaluate your model using, for example, Accuracy or F-measure
* Suppose you had a STAR/QSO problem instead, where your main goal is to find new QSOs. You can evaluate your model using, for example, Precision, Recall or F-measure.
## 7.1 Accuracy
Defined as the fraction of correct predictions.
(Note: accuracy will be biased towards the class with higher frequency, don't rely on this measurement if you have an imbalanced dataset)
```
print("Accuracy")
print(" First model (dim2):",
np.round(100*metrics.accuracy_score(y_test, y_pred2),2), '%')
print(" Second model (dim15):",
np.round(100*metrics.accuracy_score(y_test, y_pred),2), '%')
```
## 7.2. Precision
Defined as:
> Precision $\equiv \frac{TP}{(TP+FP)}$
TP - True Positive ; FP - False Positive
Note that you need to define which class will be your "positive". For example:
| STAR (predicted) | GALAXY (predicted)
--- | ---
**STAR** (true label) | True Negative | False Positive
**GALAXY** (true label)| False Negative | True Positive
In Astronomy, it's called **purity**.
```
P2 = metrics.precision_score(y_test, y_pred2, pos_label=1)
P = metrics.precision_score(y_test, y_pred, pos_label=1)
print("Galaxy Precision")
print(" First model (dim2):", np.round(100*P2,2), '%')
print(" Second model (dim15):", np.round(100*P,2), '%')
# Exercise: Calculate star precision for each model
```
## 7.3. Recall
Defined as:
> Recall $\equiv \frac{TP}{(TP+FN)}$
TP - True Positive ; FN - False Negative
In Astronomy, it's called **completeness**.
```
R2 = metrics.recall_score(y_test, y_pred2, pos_label=1)
R = metrics.recall_score(y_test, y_pred, pos_label=1)
print("Galaxy Recall")
print(" First model (dim2):", np.round(100*R2,2), '%')
print(" Second model (dim15):", np.round(100*R,2), '%')
# Exercise: Calculate star recall for each model
```
## 7.4. F-measure
It's the harmonic mean of Precision and Recall:
$F = \frac{1}{2}\Big(P_i^{-1}+R_i^{-1}\Big)^{-1} = 2 \times \frac{P_iR_i}{P_i+R_i}, F \in [0,1]$
```
print("F-measure")
print(" First model (dim2):", np.round(metrics.f1_score(y_test, y_pred2),3))
print(" Second model (dim15):", np.round(metrics.f1_score(y_test, y_pred),3))
```
## Final message
We came to the end of this tutorial, yay! :)
Although it is called "Machine Learning", you are still the one who is going to make crucial decisions. And that is hard work! I hope I was able to give you at least a brief idea of all the steps involved in the process.
Now, play around with the code:
* Try other algorithms with the same feature selection and compare your results using the performance metrics
* Test changing the parameters of your model
* Try it with your own dataset!
## Read more:
[Supervised Machine Learning: A Review of Classification Techniques](https://books.google.com/books?hl=en&lr=&id=vLiTXDHr_sYC&oi=fnd&pg=PA3&dq=review+supervised+learning&ots=CYpwxt2Bnn&sig=Y79PK3w3Q8CefKaTh03keRFEwyg#v=onepage&q=review%20supervised%20learning&f=false) (S.B. Kotsiantis, 2007)
An Empirical Comparison of Supervised Learning Algorithms Rich (Rich Caruana and Alexandru Niculescu-Mizil, 2006)
Classification of Imbalanced Data: a Review (Yanmin Sun, Andrew K. C. Wong and Mohamed S. Kamel, 2009)
[Cross-validation](https://en.wikipedia.org/wiki/Cross-validation_(statistics)
[A Practical Guide to Support Vector Classification](https://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf) (Chih-Wei Hsu, Chih-Chung Chang, and Chih-Jen Lin, 2016)
|
github_jupyter
|
```
import numpy as np
import pandas as pd
import xarray as xr
import xesmf as xe
import json
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
def make_regridder(ds, ds_base, variable, algorithm='bilinear'):
if 'latitude' in ds[variable].dims:
ds = ds.rename({'latitude': 'lat', 'longitude': 'lon'}).set_coords(['lon', 'lat'])
ds_regrid = xr.Dataset({'lat': (['lat'], np.arange(np.floor(ds_base['lat'].min().values*10)/10, np.ceil(ds_base['lat'].max().values*10)/10, 0.01)),
'lon': (['lon'], np.arange(np.floor(ds_base['lon'].min().values*10)/10, np.ceil(ds_base['lon'].max().values*10)/10, 0.01)),
}
)
regridder = xe.Regridder(ds, ds_regrid, algorithm)
regridder.clean_weight_file()
return regridder
```
### Create base grid using NO2 dataset
```
ds_s5p = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/starter_pack/s5p_no2/no2_1year.nc')
ds_no2_clouds = ds_s5p[['NO2_column_number_density', 'cloud_fraction']]
no2_regridder = make_regridder(ds_no2_clouds, ds_no2_clouds, 'NO2_column_number_density')
ds_base_regrid = no2_regridder(ds_no2_clouds)
ds_base_regrid = ds_base_regrid.where(ds_base_regrid['NO2_column_number_density']!=0.)
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base_regrid.NO2_column_number_density.isel(time=20).plot(ax=ax, transform=ccrs.PlateCarree());
ax.coastlines()
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
def find_boundaries(ds):
print('Min Lat', ds.lat.min().values)
print('Max Lat', ds.lat.max().values)
print('Min Lon', ds.lon.min().values)
print('Max Lon', ds.lon.max().values)
```
### Create land mask for base grid
```
#Download Super High Resolution SST file (0.01 degree grid)
#https://coastwatch.pfeg.noaa.gov/erddap/griddap/jplG1SST.nc?SST[(2017-09-13T00:00:00Z):1:(2017-09-13T00:00:00Z)][(17.005):1:(19.005)][(-69.995):1:(-64.005)],mask[(2017-09-13T00:00:00Z):1:(2017-09-13T00:00:00Z)][(17.005):1:(19.005)][(-69.995):1:(-64.005)],analysis_error[(2017-09-13T00:00:00Z):1:(2017-09-13T00:00:00Z)][(17.005):1:(19.005)][(-69.995):1:(-64.005)]
ds_sea = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/jplG1SST_e435_8209_9395.nc')
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_sea.SST.isel(time=0).plot(ax=ax, transform=ccrs.PlateCarree());
ax.coastlines()
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
sea_regridder = make_regridder(ds_sea, ds_base_regrid, 'SST')
ds_sea_regrid = sea_regridder(ds_sea)
ds_sea_regrid = ds_sea_regrid.where(ds_sea_regrid['SST']!=0.)
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_sea_regrid.SST.isel(time=0).plot(ax=ax, transform=ccrs.PlateCarree());
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
land_ones = ds_sea_regrid.SST.isel(time=0).fillna(1)
land_mask = land_ones.where(land_ones ==1.)
land_mask = land_mask.where(land_mask.lat<18.5)
land_mask = land_mask.drop('time')
ds_base_regrid.coords['land_mask'] = land_mask
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
land_mask.plot(ax=ax, transform=ccrs.PlateCarree());
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base_regrid['NO2_column_number_density'].isel(time=103).where(ds_base_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
```
### Compute daily averages
```
ds_base = ds_base_regrid.resample(time='1D').mean()
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base['NO2_column_number_density'].isel(time=26).where(ds_base.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
```
### Add wind speed
```
ds_gfs = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/starter_pack/gfs/gfs_1year.nc')
ds_gfs = ds_gfs.drop('crs')
gfs_regridder = make_regridder(ds_gfs, ds_base_regrid, 'temperature_2m_above_ground')
ds_gfs_regrid = gfs_regridder(ds_gfs)
ds_gfs_regrid = ds_gfs_regrid.where(ds_gfs_regrid['temperature_2m_above_ground']!=0.)
ds_gfs_regrid.coords['land_mask'] = land_mask
ds_gfs_regrid['wind_speed'] = np.sqrt(np.square(ds_gfs_regrid.u_component_of_wind_10m_above_ground) + np.square(ds_gfs_regrid.v_component_of_wind_10m_above_ground))
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_gfs_regrid['wind_speed'].isel(time=6).where(ds_gfs_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_gfs_daily_mean = ds_gfs_regrid.resample(time='1D').mean()
ds_gfs_daily_max = ds_gfs_regrid.resample(time='1D').max()
ds_gfs_daily_min = ds_gfs_regrid.resample(time='1D').min()
ds_gfs_regrid
ds_base['wind_speed_mean'] = ds_gfs_daily_mean['wind_speed']
ds_base['gfs_temp_mean'] = ds_gfs_daily_mean['temperature_2m_above_ground']
ds_base['gfs_temp_max'] = ds_gfs_daily_max['temperature_2m_above_ground']
ds_base['gfs_temp_min'] = ds_gfs_daily_min['temperature_2m_above_ground']
ds_base['gfs_humidity_mean'] = ds_gfs_daily_mean['specific_humidity_2m_above_ground']
ds_base['gfs_rain_max'] = ds_gfs_daily_max['precipitable_water_entire_atmosphere']
```
### Add in weather
```
ds_gldas = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/starter_pack/gldas/gldas_1year.nc')
ds_gldas = ds_gldas.drop('crs')
gldas_regridder = make_regridder(ds_gldas, ds_base_regrid, 'Tair_f_inst', 'nearest_s2d')
ds_gldas_regrid = gldas_regridder(ds_gldas)
ds_gldas_regrid = ds_gldas_regrid.where(ds_gldas_regrid['Tair_f_inst']!=0.)
ds_gldas_regrid.coords['land_mask'] = land_mask
ds_gldas_regrid_fill = ds_gldas_regrid.ffill(dim='lat')
ds_gldas_regrid_fill = ds_gldas_regrid_fill.bfill(dim='lat')
ds_gldas_regrid_fill = ds_gldas_regrid_fill.ffill(dim='lon')
ds_gldas_regrid_fill = ds_gldas_regrid_fill.bfill(dim='lon')
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_gldas_regrid['Tair_f_inst'].isel(time=4).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_gldas_daily_mean = ds_gldas_regrid_fill.resample(time='1D').mean()
ds_gldas_daily_max = ds_gldas_regrid_fill.resample(time='1D').max()
ds_gldas_daily_min = ds_gldas_regrid_fill.resample(time='1D').min()
ds_base['gldas_wind_mean'] = ds_gldas_daily_mean['Wind_f_inst']
ds_base['gldas_airT_mean'] = ds_gldas_daily_mean['Tair_f_inst']
ds_base['gldas_airT_max'] = ds_gldas_daily_max['Tair_f_inst']
ds_base['gldas_airT_min'] = ds_gldas_daily_min['Tair_f_inst']
ds_base['gldas_lwdown_mean'] = ds_gldas_daily_mean['LWdown_f_tavg']
ds_base['gldas_pres_mean'] = ds_gldas_daily_mean['Psurf_f_inst']
ds_base['gldas_humidity_mean'] = ds_gldas_daily_mean['Qair_f_inst']
ds_base['gldas_heatflux_mean'] = ds_gldas_daily_mean['Qg_tavg']
ds_base['gldas_rain_max'] = ds_gldas_daily_max['Rainf_f_tavg']
ds_base['gldas_SWdown_max'] = ds_gldas_daily_max['SWdown_f_tavg']
```
### Add in night time lights
```
ds_nightlights = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/supplementary_data/nc/VIIRS_nighttime_lights.nc')
ds_nightlights2 = ds_nightlights.drop('crs')
nl_regridder = make_regridder(ds_nightlights2, ds_base_regrid, 'avg_rad')
ds_nl_regrid = nl_regridder(ds_nightlights2)
ds_nl_regrid = ds_nl_regrid.where(ds_nl_regrid['avg_rad']!=0.)
ds_nl_regrid.coords['land_mask'] = land_mask
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_nl_regrid['avg_rad'].where(ds_nl_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_base['night_avg_rad'] = ds_nl_regrid['avg_rad']
ds_base
```
### Add in population
```
ds_population = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/supplementary_data/nc/GPWv411_populationdensity.nc')
ds_population = ds_population.drop('crs')
pop_regridder = make_regridder(ds_population, ds_base_regrid, 'population_density')
ds_pop_regrid = pop_regridder(ds_population)
ds_pop_regrid = ds_pop_regrid.where(ds_pop_regrid['population_density']!=0.)
ds_pop_regrid.coords['land_mask'] = land_mask
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_pop_regrid['population_density'].where(ds_pop_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_base['population_density'] = ds_pop_regrid['population_density']
```
### Add in landcover
```
ds_landcover = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/supplementary_data/nc/GFSAD1000_landcover.nc')
ds_landcover = ds_landcover.drop('crs')
land_regridder = make_regridder(ds_landcover, ds_base_regrid, 'landcover_category', 'nearest_s2d')
ds_land_regrid = land_regridder(ds_landcover)
ds_land_regrid.coords['land_mask'] = land_mask
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_land_regrid['landcover_category'].where(ds_land_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_base['landcover_category'] = ds_land_regrid['landcover_category']
```
### Add power plants layer
```
plants = pd.read_csv('../data/starter_pack/gppd/gppd_120_pr.csv')
plants = plants[['capacity_mw', 'estimated_generation_gwh', 'primary_fuel', '.geo']]
coordinates = pd.json_normalize(plants['.geo'].apply(json.loads))['coordinates']
plants[['longitude', 'latitude']] = pd.DataFrame(coordinates.values.tolist(), index= coordinates.index)
plants.drop('.geo', axis=1, inplace=True)
plants_fossil = plants[plants['primary_fuel'].isin(['Oil', 'Gas', 'Coal'])].copy()
plants_fossil.reset_index(drop=True, inplace=True)
plants_fossil['grid_lon'] = np.nan
plants_fossil['position_lon'] = np.ones
plants_fossil['grid_lat'] = np.nan
plants_fossil['position_lat'] = np.ones
lons = ds_base.lon.values
a=0
for lon in plants_fossil.longitude:
lon_diff = abs(lon-lons)
plants_fossil.at[a,'grid_lon'] = lons[np.argmin(lon_diff)]
plants_fossil.at[a,'position_lon'] = np.argmin(lon_diff)
a=a+1
lats = ds_base.lat.values
a=0
for lat in plants_fossil.latitude:
lat_diff = abs(lat-lats)
plants_fossil.at[a,'grid_lat'] = lats[np.argmin(lat_diff)]
plants_fossil.at[a,'position_lat'] = np.argmin(lat_diff)
a=a+1
plants_fossil['num_plants'] = 1
plants_fossil_grid = plants_fossil[['grid_lon', 'grid_lat', 'position_lat', 'position_lon', 'num_plants']].groupby(['grid_lon', 'grid_lat', 'position_lat', 'position_lon'], as_index=False).sum()
plants_fossil.to_csv('plants_fossil.csv', index=False)
plants_mask = 0 * np.ones((ds_base.dims['lat'], ds_base.dims['lon'])) * np.isnan(ds_base.NO2_column_number_density.isel(time=0))
position_lat_id = 0 * np.ones((ds_base.dims['lat'], ds_base.dims['lon'])) * np.isnan(ds_base.NO2_column_number_density.isel(time=0))
position_lon_id = 0 * np.ones((ds_base.dims['lat'], ds_base.dims['lon'])) * np.isnan(ds_base.NO2_column_number_density.isel(time=0))
plants_mask = plants_mask.drop('time')
count=0
for x in plants_fossil_grid.index:
plants_mask[(plants_fossil_grid.at[x,'position_lat']-2):(plants_fossil_grid.at[x,'position_lat']+2),(plants_fossil_grid.at[x,'position_lon']-2):(plants_fossil_grid.at[x,'position_lon']+2)]=1
position_lat_id[(plants_fossil_grid.at[x,'position_lat']-2):(plants_fossil_grid.at[x,'position_lat']+2),(plants_fossil_grid.at[x,'position_lon']-2):(plants_fossil_grid.at[x,'position_lon']+2)]=plants_fossil_grid.at[x,'position_lat']
position_lon_id[(plants_fossil_grid.at[x,'position_lat']-2):(plants_fossil_grid.at[x,'position_lat']+2),(plants_fossil_grid.at[x,'position_lon']-2):(plants_fossil_grid.at[x,'position_lon']+2)]=plants_fossil_grid.at[x,'position_lon']
plants_mask = plants_mask.where(plants_mask == 1.)
position_lat_id = position_lat_id.where(position_lat_id >= 1.)
position_lon_id = position_lon_id.where(position_lon_id >= 1.)
ds_base.coords['plants_mask'] = (('lat', 'lon'), plants_mask)
#ds_base.coords['plants_mask'] = ds_base.plants_mask.where(ds_base.land_mask == 1)
ds_base.coords['no_plants_mask'] = ds_base.plants_mask.fillna(0).where((ds_base.plants_mask != 1) & (ds_base.land_mask == 1))
ds_base.coords['no_plants_mask'] = ds_base.no_plants_mask + 1
ds_base.coords['position_lat_id'] = (('lat', 'lon'), position_lat_id)
ds_base.coords['position_lat_id'] = ds_base.position_lat_id.where(ds_base.position_lat_id >= 1)
ds_base.coords['position_lon_id'] = (('lat', 'lon'), position_lon_id)
ds_base.coords['position_lon_id'] = ds_base.position_lon_id.where(ds_base.position_lon_id >= 1)
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base['NO2_column_number_density'].isel(time=0).where((land_mask==1) & (plants_mask==1)).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base['NO2_column_number_density'].isel(time=0).where(ds_base.no_plants_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
```
### Calculate Annual Average NO2
```
ds_base_annual = ds_base.where((ds_base.wind_speed_mean <= 2)).mean(dim=['time'])
ds_base_annual_n = ds_base.where((ds_base.wind_speed_mean <= 2)).count(dim=['time'])
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base_annual['NO2_column_number_density'].where((ds_base_annual.land_mask == 1) & (ds_base_annual.no_plants_mask ==1)).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_base_annual['n'] = ds_base_annual_n['NO2_column_number_density']
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base_annual_n['NO2_column_number_density'].where((ds_base_annual.land_mask == 1) & (ds_base_annual.no_plants_mask ==1)).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_base_annual_allwind = ds_base.mean(dim=['time'])
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base_annual_allwind['NO2_column_number_density'].where(ds_base_annual_allwind.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_base_annual.to_netcdf('annual_low_wind.nc')
ds_base_annual_allwind.to_netcdf('annual_all_wind.nc')
```
### Calculate monthly average NO2
```
ds_base_monthly = ds_base.where((ds_base.wind_speed_mean <= 5)).resample(time='1M').mean()
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))
ds_base_monthly['NO2_column_number_density'].isel(time=6).where(ds_base_monthly.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())
ax.set_extent([-67.5, -65, 17.5, 19])
ax.set_aspect("equal")
ds_base_monthly.to_netcdf('monthly_all_wind.nc')
```
|
github_jupyter
|
# Peform statistical analyses of GNSS station locations and tropospheric zenith delays
**Author**: Simran Sangha, David Bekaert - Jet Propulsion Laboratory
This notebook provides an overview of the functionality included in the **`raiderStats.py`** program. Specifically, we outline examples on how to perform basic statistical analyses of GNSS station location and tropospheric zenith delay information over a user defined area of interest, span of time, and seasonal interval. In this notebook, we query GNSS stations spanning northern California between 2016 and 2020.
We will outline the following statistical analysis and filtering options:
- Restrict analyses to range of years
- Restrict analyses to range of months (i.e. seasonal interval)
- Illustrate station distribution and tropospheric zenith delay mean/standard deviation
- Illustrate gridded distribution and tropospheric zenith delay mean/standard deviation
- Generate variogram plots across specified time periods
- Perform basic seasonal amplitude/phase analyses
- Examine residuals between weather-models and collocated GNSS stations
<div class="alert alert-info">
<b>Terminology:</b>
- *GNSS*: Stands for Global Navigation Satellite System. Describes any satellite constellation providing global or regional positioning, navigation, and timing services.
- *tropospheric zenith delay*: The precise atmospheric delay satellite signals experience when propagating through the troposphere.
- *variogram*: Characterization of the difference between field values at two locations.
- *empirical variogram*: Provides a description of how the data are correlated with distance.
- *experimental variogram*: A discrete function calculated using a measure of variability between pairs of points at various distances
- *sill*: Limit of the variogram, tending to infinity lag distances.
- *range*: The distance in which the difference of the variogram from the sill becomes negligible, such that the data arre no longer autocorrelated.
</div>
## Table of Contents:
<a id='example_TOC'></a>
[**Overview of the raiderStats.py program**](#overview)
- [1. Basic user input options](#overview_1)
- [2. Run parameters](#overview_2)
- [3. Optional controls for spatiotemporal subsetting](#overview_3)
- [4. Supported types of individual station scatter-plots](#overview_4)
- [5. Supported types of gridded station plots](#overview_5)
- [6. Supported types of variogram plots](#overview_6)
- [7. Optional controls for plotting](#overview_7)
[**Download prerequisite GNSS station location and tropospheric zenith delay information with the raiderDownloadGNSS.py program**](#downloads)
[**Examples of the raiderStats.py program**](#examples)
- [Example 1. Generate all individual station scatter-plots, as listed under section #4](#example_1)
- [Example 2. Generate all basic gridded station plots, as listed under section #5](#example_2)
- [Example 2a. Redo plots efficiently using generated grid raster files](#example_2a)
- [Example 3. Generate gridded mean tropospheric zenith delay plot, with stations superimposed](#example_3)
- [Example 4. Generate variogram plots](#example_4)
- [Example 5. Generate seasonal phase/amplitude plots](#example_5)
- [Example 6. Generate weather model/GNSS residual plots](#example_6)
## Prep: Initial setup of the notebook
Below we set up the directory structure for this notebook exercise. In addition, we load the required modules into our python environment using the **`import`** command.
```
import os
import numpy as np
import matplotlib.pyplot as plt
## Defining the home and data directories
tutorial_home_dir = os.path.abspath(os.getcwd())
work_dir = os.path.abspath(os.getcwd())
print("Tutorial directory: ", tutorial_home_dir)
print("Work directory: ", work_dir)
# Verifying if RAiDER is installed correctly
try:
from RAiDER import statsPlot
except:
raise Exception('RAiDER is missing from your PYTHONPATH')
os.chdir(work_dir)
```
## Overview of the raiderStats.py program
<a id='overview'></a>
The **`raiderStats.py`** program provides a suite of convinient statistical analyses of GNSS station locations and tropospheric zenith delays.
Running **`raiderStats.py`** with the **`-h`** option will show the parameter options and outline several basic, practical examples.
Let us explore these options:
```
!raiderStats.py -h
```
### 1. Basic user input options
<a id='overview_1'></a>
#### Input CSV file (**`--file FNAME`**)
**REQUIRED** argument. Provide a valid CSV file as input through **`--file`** which lists the GNSS station IDs (ID), lat/lon coordinates (Lat,Lon), dates in YYYY-MM-DD format (Date), and the desired data field in units of meters.
Note that the complementary **`raiderDownloadGNSS.py`** format generates such a primary CSV file named **`UNRcombinedGPS_ztd.csv`** that contains all such fields and is already formatted as expected by **`raiderStats.py`**. Please refer to the accompanying **`raiderDownloadGNSS/raiderDownloadGNSS_tutorial.ipynb `** for more details and practical examples.
#### Data column name (**`--column_name COL_NAME`**)
Specify name of data column in input CSV file through **`--column_name `** that you wish to perform statistical analyses on. Input assumed to be in units of meters.
Default input column name set to **`ZTD`**, the name assigned to tropospheric zenith delays populated under the **`CombinedGPS_ztd.csv`** file generated through **`raiderDownloadGNSS.py`**
The column name is always prepended to output products (e.g. `ZTD_grid_heatmap.tif` and `ZTD_grid_heatmap.png`)
#### Data column unit (**`--unit UNIT`**)
Specify unit for output rasters/graphics through **`--unit`**. Again, input assumed to be in units of meters so it will be converted into meters if not already in meters.
### 2. Run parameters
<a id='overview_2'></a>
#### Output directory (**`--workdir WORKDIR`**)
Specify directory to deposit all outputs into with **`--workdir`**. Absolute and relative paths are both supported.
By default, outputs will be deposited into the current working directory where the program is launched.
#### Number of CPUs to be used (**`--cpus NUMCPUS`**)
Specify number of cpus to be used for multiprocessing with **`--cpus`**. For most cases, multiprocessing is essential in order to access data and perform statistical analyses within a reasonable amount of time.
May specify **`--cpus all`** at your own discretion in order to leverage all available CPUs on your system.
By default 8 CPUs will be used.
#### Verbose mode (**`--verbose`**)
Specify **`--verbose`** to print all statements through entire routine.
### 3. Optional controls for spatiotemporal subsetting
<a id='overview_3'></a>
#### Geographic bounding box (**`--bounding_box BOUNDING_BOX`**)
An area of interest may be specified as `SNWE` coordinates using the **`--bounding_box`** option. Coordinates should be specified as a space delimited string surrounded by quotes. The common intersection between the user-specified spatial bounds and the spatial bounds computed from the station locations in the input file is then passed. This example below would restrict the analysis to stations over northern California:
**`--bounding_box '36 40 -124 -119'`**
If no area of interest is specified, by default the spatial bounds computed from the station locations in the input file as passed.
#### Gridcell spacing (**`--spacing SPACING`**)
Specify degree spacing of grid-cells for statistical analyses through **`--spacing`**
By default grid-cell spacing is set to 1°. If the specified grid-cell spacing is not a multiple of the spatial bounds of the dataset, the grid-cell spacing again defaults back to 1°.
#### Subset in time (**`--timeinterval TIMEINTERVAL`**)
Define temporal bounds with **`--timeinterval TIMEINTERVAL`** by specifying earliest YYYY-MM-DD date followed by latest date YYYY-MM-DD. For example: **`--timeinterval 2018-01-01 2019-01-01`**
By default, bounds set to earliest and latest time found in input file.
#### Seasonal interval (**`--seasonalinterval SEASONALINTERVAL`**)
Define subset in time by a specific interval for each year (i.e. seasonal interval) with **`--seasonalinterval SEASONALINTERVAL`** by specifying earliest MM-DD time followed by latest MM-DD time. For example: **`--seasonalinterval '03-21 06-21'`**
### 4. Supported types of individual station scatter-plots
<a id='overview_4'></a>
#### Plot station distribution (**`--station_distribution`**)
Illustrate each individual station with black markers.
#### Plot mean tropospheric zenith delay by station (**`--station_delay_mean`**)
Illustrate the tropospheric zenith delay mean for each station with a **`hot`** colorbar.
#### Plot standard deviation of tropospheric zenith delay by station (**`--station_delay_stdev`**)
Illustrate the tropospheric zenith delay standard deviation for each station with a **`hot`** colorbar.
#### Plot phase/amplitude of tropospheric zenith delay by station (**`--station_seasonal_phase`**)
Illustrate the phase/amplitude of tropospheric zenith delay for each station with a **`hot`** colorbar.
### 5. Supported types of gridded station plots
<a id='overview_5'></a>
#### Plot gridded station heatmap (**`--grid_heatmap`**)
Illustrate heatmap of gridded station array with a **`hot`** colorbar.
#### Plot gridded mean tropospheric zenith delay (**`--grid_delay_mean`**)
Illustrate gridded tropospheric zenith delay mean with a **`hot`** colorbar.
Alternatively plot absolute gridded delay mean with the option `--grid_delay_absolute_mean`
#### Plot gridded median tropospheric zenith delay (**`--grid_delay_median`**)
Illustrate gridded tropospheric zenith delay median with a **`hot`** colorbar.
Alternatively plot absolute gridded delay median with the option `--grid_delay_absolute_median`
#### Plot gridded standard deviation of tropospheric zenith delay (**`--grid_delay_stdev`**)
Illustrate gridded tropospheric zenith delay standard deviation with a **`hot`** colorbar.
Alternatively plot absolute gridded delay standard deviation with the option `--grid_delay_absolute_stdev`
#### Plot gridded station-wise delay phase/amplitude (**`--grid_seasonal_phase`**)
Illustrate gridded station-wise zenith delay phase/amplitude with a **`hot`** colorbar.
Alternatively plot absolute gridded delay phase/amplitude with the option `--grid_seasonal_absolute_phase`
### 6. Supported types of variogram plots
<a id='overview_6'></a>
#### Plot variogram (**`--variogramplot`**)
Passing **`--variogramplot`** toggles plotting of gridded station variogram, where gridded sill and range values for the experimental variogram fits are illustrated.
#### Apply experimental fit to binned variogram (**`--binnedvariogram`**)
Pass **`--binnedvariogram`** to apply experimental variogram fit to total binned empirical variograms for each time slice.
Default is to pass total unbinned empiricial variogram.
#### Save variogram figures per time-slice (**`--variogram_per_timeslice`**)
Specify **`--variogram_per_timeslice`** to generate variogram plots per gridded station AND time-slice.
If option not toggled, then variogram plots are only generated per gridded station and spanning entire time-span.
### 7. Optional controls for plotting
<a id='overview_7'></a>
#### Save gridded array(s) as raster(s) (**`--grid_to_raster`**)
Save specified gridded array(s) as raster(s).
May directly load/plot in successive script call by passing output grid as argument for **`--file`**.
E.g. if specified with **`--grid_delay_mean`**, then a raster file named `ZTD_grid_delay_mean.tif` containing the gridded mean delay will be generated.
#### Save debug figures of station-wise seasonal fit (**`--phaseamp_per_station`**)
Save debug figures of curve-fit vs data per station for seasonal amplitude/phase analaysis options (i.e. **`--grid_seasonal_phase`** and/or **`--station_seasonal_phase`**).
#### Minimum TS span and minimum fractional observations for seasonal amplitude/phase analyses (**`--min_span`**)
Minimum TS span (years) and minimum fractional observations in span (fraction) imposed for seasonal amplitude/phase analyses to be performed for a given station.
By default set to 2 years and 0.6 respectively (i.e. **`--min_span 2 0.6`**)
#### Period limit for seasonal amplitude/phase analyses (**`--period_limit`**)
Period limit (years) imposed for seasonal amplitude/phase analyses to be performed for a given station.
#### Variogram density threshold (**`--densitythreshold DENSITYTHRESHOLD`**)
For variogram plots, a given grid-cell is only valid if it contains this specified threshold of stations.
By default set to 10 stations.
#### Figure DPI (**`--figdpi FIGDPI`**)
DPI to use for saving figures.
#### Plot title (**`--user_title USER_TITLE`**)
Specify custom title for plots.
#### Plot format (**`--plot_format PLOT_FMT`**)
File format for saving plots. Default is PNG.
#### Colorbar bounds (**`--color_bounds CBOUNDS`**)
Set lower and upper-bounds for plot colorbars. For example: **`--color_bounds '0 100'`**
By default set to the dynamic range of the data.
#### Colorbar percentile limits (**`--colorpercentile COLORPERCENTILE COLORPERCENTILE`**)
Set lower and upper percentile for plot colorbars. For example: **`--colorpercentile 30 100`**
By default set to 25% and 95%, respectively.
#### Superimpose individual stations over gridded array (**`--stationsongrids`**)
In gridded plots, superimpose your gridded array with a scatterplot of station locations.
#### Draw gridlines (**`--drawgridlines`**)
In gridded plots, draw gridlines.
#### Generate all supported plots (**`--plotall`**)
Generate all supported plots, as outlined under sections #4, #5, and #6 above.
## Download prerequisite GNSS station location and tropospheric zenith delay information with the **`raiderDownloadGNSS.py`** program
<a id='downloads'></a>
Virtually access GNSS station location and zenith delay information for the years '2016,2019', for every day, at a UTC time of day 'HH:MM:SS' of '00:00:00', and across a geographic bounding box '36 40 -124 -119' spanning over Northern California.
The footprint of the specified geographic bounding box is again depicted in **Fig. 1**.
In addition to querying for multiple years, we will also experiment with using the maximum number of allowed CPUs to save some time! Recall again that the default number of CPUs used for parallelization is 8.
Note these features and similar examples are outlined in more detail in the companion notebook **`raiderDownloadGNSS/raiderDownloadGNSS_tutorial.ipynb`**
```
!raiderDownloadGNSS.py --out products --date 20160101 20191231 --returntime '00:00:00' --bounding_box '36 40 -124 -119' --cpus all
```
All of the extracted tropospheric zenith delay information stored under **`GPS_delays`** is concatenated with the GNSS station location information stored under **`gnssStationList_overbbox.csv`** into a primary comprehensive file **`UNRcombinedGPS_ztd.csv`**
**`UNRcombinedGPS_ztd.csv`** may in turn be directly used to perform basic statistical analyses using **`raiderStats.py`**.
<img src="support_docs/bbox_footprint.png" alt="footprint" width="700">
<center><b>Fig. 1</b> Footprint of geopraphic bounding box used in examples 1 and 2. </center>
## Examples of the **`raiderStats.py`** program
<a id='examples'></a>
### Example 1. Generate all individual station scatter-plots, as listed under [section #4](#overview_4) <a id='example_1'></a>
Using the file **`UNRcombinedGPS_ztd.csv`** generated by **`raiderDownloadGNSS.py`** as input, produce plots illustrating station distribution, mean tropospheric zenith delay by station, and standard deviation of tropospheric zenith delay by station.
Restrict the temporal span of the analyses to all data acquired between 2016-01-01 and 2020-12-31, and restrict the spatial extent to a geographic bounding box '36 40 -124 -119' spanning over Northern California.
The footprint of the specified geographic bounding box is depicted in **Fig. 1**.
These basic spatiotemporal constraints will be inherited by all successive examples.
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex1 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --station_distribution --station_delay_mean --station_delay_stdev
```
Now we can take a look at the generated products:
```
!ls maps_ex1/figures
```
Here we visualize the spatial distribution of stations (*ZTD_station_distribution.png*) as black markers.
<img src="support_docs/maps/maps_ex1/figures/ZTD_station_distribution.png" alt="ZTD_station_distribution" width="700">
To generate this figure alone, run:
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex1 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --station_distribution
```
Here we visualize the mean tropospheric zenith delay by station (*ZTD_station_delay_mean.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex1/figures/ZTD_station_delay_mean.png" alt="ZTD_station_delay_mean" width="700">
To generate this figure alone, run:
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex1 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --station_delay_mean
```
Here we visualize the standard deviation of tropospheric zenith delay by station (*ZTD_station_delay_stdev.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex1/figures/ZTD_station_delay_stdev.png" alt="ZTD_station_delay_stdev" width="700">
To generate this figure alone, run:
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex1 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --station_delay_stdev
```
### Example 2. Generate all basic gridded station plots, as listed under [section #5](#overview_5) <a id='example_2'></a>
Produce plots illustrating gridded station distribution, gridded mean tropospheric zenith delay, and gridded standard deviation of tropospheric zenith delay.
Also save gridded arrays as raster files with **`--grid_to_raster`** so as to more conveniently replot in successive script calls (recommended).
Finally, use the maximum number of allowed CPUs to save some time.
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex2 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --grid_heatmap --grid_delay_mean --grid_delay_stdev --grid_to_raster --cpus all
```
Now we can take a look at the generated rasters (i.e. the TIF files in the specified output directory):
```
!ls maps_ex2
```
Now we can take a look at the generated plots:
```
!ls maps_ex2/figures
```
Here we visualize the heatmap of gridded station array (*ZTD_grid_heatmap.png*) with a **`hot`** colorbar.
Note that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**
<img src="support_docs/maps/maps_ex2/figures/ZTD_grid_heatmap.png" alt="ZTD_grid_heatmap" width="700">
To generate this figure alone, run:
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex2 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --grid_heatmap --grid_to_raster
```
Here we visualize the gridded mean tropospheric zenith delay (*ZTD_grid_delay_mean.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex2/figures/ZTD_grid_delay_mean.png" alt="ZTD_grid_delay_mean" width="700">
To generate this figure alone, run:
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex2 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --grid_delay_mean --grid_to_raster
```
Here we visualize the gridded standard deviation of tropospheric zenith delay (*ZTD_grid_delay_stdev.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex2/figures/ZTD_grid_delay_stdev.png" alt="ZTD_grid_delay_stdev" width="700">
To generate this figure alone, run:
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex2 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --grid_delay_stdev --grid_to_raster
```
#### Example 2a. Redo plots efficiently using generated grid raster files <a id='example_2a'></a>
Since we haved the saved gridded arrays as raster files by specifying the **`--grid_to_raster`** option, we may directly replot these graphics by passing a given output raster file as input (e.g. `--file ZTD_grid_heatmap.tif`).
This is a practical, efficient means to adjust/replot graphics and save a great deal of time by avoiding the gridding/prep steps involved with processing the initial input CSV file, especially for cases which span continental/global scales.
Note though that since the output rasters are static with respect to the original specified spatiotemporal constraints (e.g. `--bounding_box` and `--timeinterval`), you cannot adjust such options with the rasters as input arguments. These rasters must be re-computed for any adjusted spatiotemporal parameters (if necessary) before replotting.
For this replotting command, let us also adjust the colorbar-bounds (using the `--color_bounds` option).
```
!raiderStats.py --file maps_ex2/ZTD_grid_heatmap.tif --workdir maps_ex2a --color_bounds '10 40'
```
Here we visualize the replotted heatmap of gridded station array (*ZTD_grid_heatmap.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex2a/figures/ZTD_grid_heatmap.png" alt="ZTD_grid_heatmap" width="700">
### Example 3. Generate gridded mean tropospheric zenith delay plot, with stations superimposed <a id='example_3'></a>
Produce plot illustrating gridded mean tropospheric zenith delay, superimposed with individual station locations (`--stationsongrids`).
Additionally, subset data in time for spring. I.e. **`'03-21 06-21'`**
Finally, use the maximum number of allowed CPUs to save some time.
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex3 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --seasonalinterval '03-21 06-21' --grid_delay_mean --stationsongrids --cpus all
```
Now we can take a look at the generated plot:
```
!ls maps_ex3/figures
```
Here we visualize the gridded mean tropospheric zenith delay (*ZTD_grid_delay_mean.png*) with a **`hot`** colorbar, with superimposed station locations denoted by blue markers.
<img src="support_docs/maps/maps_ex3/figures/ZTD_grid_delay_mean.png" alt="ZTD_grid_delay_mean" width="700">
### Example 4. Generate variogram plots <a id='example_4'></a>
Produce plots illustrating empirical/experimental variogram fits per gridded station and time-slice (**`--variogram_per_timeslice`**) and also spanning the entire time-span. Plots of gridded station experimental variogram-derived sill and range values also generated.
Additionally, subset data in time for spring. I.e. **`'03-21 06-21'`**
Also save gridded arrays as raster files with **`--grid_to_raster`** so as to more conveniently replot in successive script calls (recommended).
Finally, use the maximum number of allowed CPUs to save some time.
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex4 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --seasonalinterval '03-21 06-21' --variogramplot --variogram_per_timeslice --grid_to_raster --cpus all
```
Now we can take a look at the generated variograms:
```
!ls maps_ex4/variograms
```
There are several subdirectories corresponding to each grid-cell that each contain empirical and experimental variograms generated for each time-slice (e.g. **`grid6_timeslice20160321_justEMPvariogram.eps `** and **`grid6_timeslice20160321_justEXPvariogram.eps `**, respectively) and across the entire sampled time period (**`grid6_timeslice20160321–20200621_justEMPvariogram.eps `** and **`grid6_timeslice20160321–20200621_justEXPvariogram.eps `**, respectively).
Recall that the former pair of empirical/experimental variograms per time-slice are generated only if the **`---variogram_per_timeslice`** option is toggled. By default only the latter two pair of empirical/experimental variograms spanning the entire time-span are generated.
Here we visualize the total empirical variogram corresponding to the entire sampled time period for grid-cell 6 in the array (*grid6_timeslice20160321–20200621_justEMPvariogram.eps*).
<img src="support_docs/maps/maps_ex4/variograms/grid6/grid6_timeslice20160321–20190621_justEMPvariogram.png" alt="justEMPvariogram" width="700">
Here we visualize the total experimental variogram corresponding to the entire sampled time period for grid-cell 6 in the array (*grid6_timeslice20160321–20200621_justEXPvariogram.eps*).
<img src="support_docs/maps/maps_ex4/variograms/grid6/grid6_timeslice20160321–20190621_justEXPvariogram.png" alt="justEXPvariogram" width="700">
The central coordinates for all grid-nodes that satisfy the specified station density threshold (**`--densitythreshold`**, by default 10 stations per grid-cell) for variogram plots are stored in a lookup table:
```
!head maps_ex4/variograms/gridlocation_lookup.txt
```
Now we can take a look at the other generated figures:
```
!ls maps_ex4/figures
```
Here we visualize the gridded experimental variogram range (*ZTD_grid_range.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex4/figures/ZTD_grid_range.png" alt="ZTD_grid_range" width="700">
Here we visualize the gridded experimental variogram variance (*ZTD_grid_variance.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex4/figures/ZTD_grid_variance.png" alt="ZTD_grid_variance" width="700">
### Example 5. Generate seasonal phase/amplitude plots <a id='example_5'></a>
Produce plots illustrating seasonal phase/amplitude/period fits for each individual station (**`--station_seasonal_phase`**) and averaged across each grid-cell (**`--grid_seasonal_phase`**). The standard deviation is also plotted across each grid-cell.
Control the prerequisite minimum time-series span (years) a given station TS must span, and the prerequisite minimum fractional observations in span (fraction across specified `--timeinterval TIMEINTERVAL`, by default the entire span of input dataset). Here, we will specify a minimum time-series span of 3 years, and minimum fraction observation of 0.6 (i.e. **`--min_span 3 0.6`**).
Save figures of curve-fits vs data per station with **`--phaseamp_per_station`** for debugging purposes. Not recommended for large-scale runs in the interest of practical speed/
Also save gridded arrays as raster files with **`--grid_to_raster`** so as to more conveniently replot in successive script calls (recommended).
Finally, use the maximum number of allowed CPUs to save some time.
```
!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex5 --bounding_box '36 40 -124 -119' --grid_seasonal_phase --station_seasonal_phase --min_span 3 0.6 --phaseamp_per_station --grid_to_raster --cpus all
```
Now we can take a look at the generated rasters (i.e. the TIF files in the specified output directory):
```
!ls maps_ex5
```
Now we can take a look at the generated debug figures illustrating the curve-fits vs data (**`--phaseamp_per_station`**):
```
!ls maps_ex5/phaseamp_per_station
```
Here we visualize the time-series and curve-fit corresponding for one of the stations.
<img src="maps_ex5/phaseamp_per_station/stationP335.png" alt="stationCACH" width="700">
```
!ls maps_ex5/figures
```
Now we can take a look at the generated plots:
Here we visualize the seasonal phase of tropospheric zenith delay by station (*ZTD_station_seasonal_phase.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex5/figures/ZTD_station_seasonal_phase.png" alt="ZTD_station_seasonal_phase" width="700">
Here we visualize the gridded mean of the station-wise seasonal phase of tropospheric zenith delay (*ZTD_grid_seasonal_phase.png*) with a **`hot`** colorbar.
Note that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**
<img src="support_docs/maps/maps_ex5/figures/ZTD_grid_seasonal_phase.png" alt="ZTD_grid_seasonal_phase" width="700">
Here we visualize the seasonal amplitude of tropospheric zenith delay by station (*ZTD_station_seasonal_amplitude.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex5/figures/ZTD_station_seasonal_amplitude.png" alt="ZTD_station_seasonal_amplitude" width="700">
Here we visualize the gridded mean of the station-wise seasonal amplitude of tropospheric zenith delay (*ZTD_grid_seasonal_amplitude.png*) with a **`hot`** colorbar.
Note that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**
<img src="support_docs/maps/maps_ex5/figures/ZTD_grid_seasonal_amplitude.png" alt="ZTD_grid_seasonal_amplitude" width="700">
Here we visualize the seasonal period of tropospheric zenith delay by station (*ZTD_station_delay_period.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex5/figures/ZTD_station_delay_period.png" alt="ZTD_station_delay_period" width="700">
Here we visualize the gridded period of the station-wise seasonal period of tropospheric zenith delay (*ZTD_grid_seasonal_period.png*) with a **`hot`** colorbar.
Note that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**
<img src="support_docs/maps/maps_ex5/figures/ZTD_grid_seasonal_period.png" alt="ZTD_grid_seasonal_period" width="700">
Here we visualize the gridded period standard deviation of the station-wise seasonal phase of tropospheric zenith delay (*ZTD_grid_seasonal_period_stdev.png*) with a **`hot`** colorbar.
Note that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**
<img src="support_docs/maps/maps_ex5/figures/ZTD_grid_seasonal_period_stdev.png" alt="ZTD_grid_seasonal_period_stdev" width="700">
### Example 6. Generate weather model/GNSS residual plots <a id='example_6'></a>
Produce plots illustrating the residual between tropospheric zenith delay at specified GNSS stations and collocated weather-model delay nodes.
GNSS data will again be downloaded with **`raiderDownloadGNSS.py`**, and GMAO weather-model derived delay will be computed with **`raiderDelay.py`**
Virtually access GNSS station location and zenith delay information for the year '2019', for every 12 days, at a UTC time of day 'HH:MM:SS' of '00:00:00', and across a geographic bounding box '36 40 -124 -119' spanning over Northern California.
The footprint of the specified geographic bounding box is again depicted in **Fig. 1**.
In addition to querying for multiple years, we will also experiment with using the maximum number of allowed CPUs to save some time! Recall again that the default number of CPUs used for parallelization is 8.
Note these features and similar examples are outlined in more detail in the companion notebook **`raiderDownloadGNSS/raiderDownloadGNSS_tutorial.ipynb`**
```
!raiderDownloadGNSS.py --out GNSS_2019 --date 20190101 20191231 12 --returntime '00:00:00' --bounding_box '36 40 -124 -119' --cpus 12
```
Compute tropospheric zenith delay from the GMAO weather-model for the year '2019', for every 12 days, at a UTC time of day 'HH:MM:SS' of '00:00:00', at stations located across a geographic bounding box '36 40 -124 -119' spanning over Northern California and captured in the `GNSS_2019/gnssStationList_overbbox.csv` list generated by the `raiderDownloadGNSS.py` above (i.e. `--station_file GNSS_2019/gnssStationList_overbbox.csv`), and with an integration height limit `--zref` of 30,000 m.
The footprint of the specified geographic bounding box is again depicted in **Fig. 1**.
```
!mkdir GMAO_2019
!cd GMAO_2019
!raiderDelay.py --model GMAO --date 20190101 20191231 12 --time 00:00 --station_file ../GNSS_2019/gnssStationList_overbbox.csv --zref 30000 -v
!cd ../
```
Combine delay files derived above from the GMAO weather-model (`--raider 'GMAO_2019/GMAO_Delay_*.csv' --raiderDir GMAO_2019 --raider_column totalDelay`) and GNSS stations (`GNSS_2019/UNRcombinedGPS_ztd.csv --column ZTD`) respectively, passing only data which are collocated in space and time.
```
!raiderCombine.py --gnss GNSS_2019/UNRcombinedGPS_ztd.csv --column ZTD --raider 'GMAO_2019/GMAO_Delay_*.csv' --raiderDir GMAO_2019 --raider_column totalDelay --out Combined_delays_GNSSandGMAO_2019.csv
```
Using the file **`Combined_delays_GNSSandGMAO_2019.csv`** generated by **`raiderCombine.py`** as input and passing the weather-model/GNSS residual values (`--column_name ZTD_minus_RAiDER`), produce plots illustrating mean tropospheric zenith delay by station + across each grid-cell, and standard deviation of tropospheric zenith delay by station + across each grid-cell.
```
!raiderStats.py --file Combined_delays_GNSSandGMAO_2019.csv --column_name ZTD_minus_RAiDER --workdir maps_ex6 --bounding_box '36 40 -124 -119' --station_delay_mean --station_delay_stdev --grid_delay_mean --grid_delay_stdev --grid_to_raster --cpus all
```
Here we visualize the mean tropospheric zenith delay by station (*ZTD_station_delay_mean.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex6/figures/ZTD_minus_RAiDER_station_delay_mean.png" alt="ZTD_minus_RAiDER_station_delay_mean" width="700">
Here we visualize the standard deviation of tropospheric zenith delay by station (*ZTD_minus_RAiDER_station_delay_stdev.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex6/figures/ZTD_minus_RAiDER_station_delay_stdev.png" alt="ZTD_minus_RAiDER_station_delay_stdev" width="700">
Here we visualize the gridded mean tropospheric zenith delay (*ZTD_minus_RAiDER_grid_delay_mean.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex6/figures/ZTD_minus_RAiDER_grid_delay_mean.png" alt="ZTD_minus_RAiDER_grid_delay_mean" width="700">
Here we visualize the gridded standard deviation of tropospheric zenith delay (*ZTD_minus_RAiDER_grid_delay_.png*) with a **`hot`** colorbar.
<img src="support_docs/maps/maps_ex6/figures/ZTD_minus_RAiDER_grid_delay_stdev.png" alt="ZTD_minus_RAiDER_grid_delay_stdev" width="700">
|
github_jupyter
|
<a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/smc_logreg_tempering.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#SMC for logistic regression
We compare data tempering (IBIS) with temperature tempering.
Code is from
https://github.com/nchopin/particles/blob/master/book/smc_samplers/logistic_reg.py
```
!git clone https://github.com/nchopin/particles.git
%cd /content/particles
!pip install --user .
import particles
import particles.state_space_models as ssm
import particles.distributions as dists
"""
Numerical experiment of Chapter 17 (SMC samplers).
Compare IBIS and SMC tempering for approximating:
* the normalising constant (marginal likelihood)
* the posterior expectation of the p coefficients
for a logistic regression model.
See below for how to select the data-set.
Note: the SMC samplers implemented in module smc_samplers are now "waste-free"
by default, see Dau & Chopin (2021), and the documentation of `smc_samplers`
(plus the corresponding jupyter notebook). This script still performs exactly
the same numerical experiments as in the book, based on standard (non
waste-free) SMC samplers. To do so, we added ``wastefree=False`` to the
definition of the corresponding `Feynman-Kac` object. Again, see the
documentation of `smc_samplers` for more details.
"""
from matplotlib import pyplot as plt
import numpy as np
from numpy import random
import seaborn as sb
import particles
from particles import datasets as dts
from particles import distributions as dists
from particles import resampling as rs
from particles import smc_samplers as ssps
from particles.collectors import Moments
datasets = {'pima': dts.Pima, 'eeg': dts.Eeg, 'sonar': dts.Sonar}
dataset_name = 'eeg' # choose one of the three
data = datasets[dataset_name]().data
T, p = data.shape
# for each dataset, we adapt:
# * N: number of particles
# * Ks = list of Ks (nr MCMC steps)
# * typK: value of M used for plots on "typical" run
if dataset_name == 'sonar':
N = 10 ** 4
Ks = [10, 20, 30, 40, 50, 60]
typK = 50
elif dataset_name == 'pima':
N = 10 ** 3
Ks = [1, 3, 5]
typK = 3
elif dataset_name == 'eeg':
N = 10 ** 3
#Ks = [1, 3, 5, 7, 10, 15, 20]
Ks = [1, 3, 5]
typK = 5
# prior & model
prior = dists.StructDist({'beta':dists.MvNormal(scale=5.,
cov=np.eye(p))})
class LogisticRegression(ssps.StaticModel):
def logpyt(self, theta, t):
# log-likelihood factor t, for given theta
lin = np.matmul(theta['beta'], data[t, :])
return - np.logaddexp(0., -lin)
# algorithms
# N and values of K set above according to dataset
ESSrmin = 0.5
nruns = 2 # 16
results = []
# runs
print('Dataset: %s' % dataset_name)
for K in Ks:
for i in range(nruns):
# need to shuffle the data for IBIS
random.shuffle(data)
model = LogisticRegression(data=data, prior=prior)
for alg_type in ['tempering', 'ibis']:
if alg_type=='ibis':
fk = ssps.IBIS(model=model, wastefree=False, len_chain=K + 1)
pf = particles.SMC(N=N, fk=fk, ESSrmin=ESSrmin,
collect=[Moments], verbose=False)
else:
fk = ssps.AdaptiveTempering(model=model, ESSrmin=ESSrmin,
wastefree=False, len_chain = K + 1)
pf = particles.SMC(N=N, fk=fk, ESSrmin=1., collect=[Moments],
verbose=True)
# must resample at every time step when doing adaptive
# tempering
print('%s, K=%i, run %i' % (alg_type, K, i))
pf.run()
print('CPU time (min): %.2f' % (pf.cpu_time / 60))
print('loglik: %f' % pf.logLt)
res = {'K': K, 'type': alg_type, 'out': pf.summaries,
'cpu': pf.cpu_time}
if alg_type=='ibis':
n_eval = N * (T + K * sum([t for t in range(T) if
pf.summaries.rs_flags[t]]))
else:
n_eval = N * T * (1. + K * (len(pf.summaries.ESSs) - 1))
res['path_sampling'] = pf.X.shared['path_sampling'][-1]
res['exponents'] = pf.X.shared['exponents']
res['n_eval'] = n_eval
results.append(res)
# plots
#######
savefigs = True # do you want to save figures as pdfs
plt.style.use('ggplot')
pal = sb.dark_palette('white', n_colors=2)
# Compare standard and path sampling estimates of the log-normalising cst
plt.figure()
diff_est = [(r['out'].logLts[-1] - r['path_sampling'])
for r in results if r['type']=='tempering']
sb.histplot(diff_est)
# Figure 17.1: typical behaviour of IBIS
typ_ibis = [r for r in results if r['type']=='ibis' and r['K'] == typK][0]
typ_ess = typ_ibis['out'].ESSs
typ_rs_times = np.nonzero(typ_ibis['out'].rs_flags)[0]
# Left panel: evolution of ESS
fig, ax = plt.subplots()
ax.plot(typ_ess, 'k')
ax.set(xlabel=r'$t$', ylabel='ESS')
if savefigs:
plt.savefig(dataset_name + '_typical_ibis_ess.pdf')
plt.savefig(dataset_name + '_typical_ibis_ess.png')
# Right panel: evolution of resampling times
fig, ax = plt.subplots()
ax.plot(typ_rs_times[:-1], np.diff(typ_rs_times), 'ko-')
ax.set(xlabel=r'$t$', ylabel='duration between successive rs')
if savefigs:
plt.savefig(dataset_name + '_typical_ibis_rs_times.pdf')
plt.savefig(dataset_name + '_typical_ibis_rs_times.png')
# Figure 17.2: evolution of temperature in a typical tempering run
typ_temp = [r for r in results if r['type']=='tempering' and r['K'] == typK][0]
expnts = typ_temp['exponents']
plt.figure()
plt.plot(expnts, 'k')
plt.xlabel(r'$t$')
plt.ylabel('tempering exponent')
if savefigs:
plt.savefig(dataset_name + '_typical_tempering_temperatures.pdf')
plt.savefig(dataset_name + '_typical_tempering_temperatures.png')
# nr evals vs K for both algorithms
plt.figure()
sb.boxplot(x=[r['K'] for r in results],
y=[r['n_eval'] for r in results],
hue=[r['type'] for r in results])
plt.xlabel('number MCMC steps')
plt.ylabel('number likelihood evaluations')
if savefigs:
plt.savefig(dataset_name + '_boxplots_nevals_vs_K.pdf')
plt.savefig(dataset_name + '_boxplots_nevals_vs_K.png')
print(type(results))
print(results[0])
for r in results:
print(r['type'], 'K=', r['K'], 'time=', r['cpu'])
# Figure 17.3: Box-plots estimate versus number of MCMC steps
# Left panel: marginal likelihood
plt.figure()
sb.boxplot(x=[r['K'] for r in results],
y=[r['out'].logLts[-1] for r in results],
hue=[r['type'] for r in results])
plt.xlabel('number MCMC steps')
plt.ylabel('marginal likelihood')
if savefigs:
plt.savefig(dataset_name + '_boxplots_marglik_vs_K.pdf')
plt.savefig(dataset_name + '_boxplots_marglik_vs_K.png')
# Right panel: post expectation 1st pred
plt.figure()
sb.boxplot(x=[r['K'] for r in results],
y=[r['out'].moments[-1]['mean']['beta'][1] for r in results],
hue=[r['type'] for r in results])
plt.xlabel('number MCMC steps')
plt.ylabel('posterior expectation first predictor')
if savefigs:
plt.savefig(dataset_name + '_boxplots_postexp1_vs_K.pdf')
plt.savefig(dataset_name + '_boxplots_postexp1_vs_K.png')
# Figure 17.4: variance vs CPU trade-off
# variance times K, as a function of K
plt.figure()
#cols = {'ibis': 'gray', 'tempering':'black'}
cols = {'ibis': 'blue', 'tempering':'red'}
lsts = {'ibis': '--', 'tempering': '-'}
for i in range(p):
for alg_type in ['ibis', 'tempering']:
adj_var = []
for K in Ks:
mts = [r['out'].moments[-1]
for r in results if r['K']==K and r['type']==alg_type]
av = (K * np.var([m['mean']['beta'][i] for m in mts]) /
np.mean([m['var']['beta'][i] for m in mts]))
adj_var.append(av)
if i==0:
plt.plot(Ks, adj_var, color=cols[alg_type], label=alg_type,
alpha=.8, linewidth=2, linestyle=lsts[alg_type])
else:
plt.plot(Ks, adj_var, color=cols[alg_type], alpha=.8, linewidth=2,
linestyle=lsts[alg_type])
plt.legend()
plt.xticks(Ks, ['%i' % K for K in Ks]) # force int ticks
plt.xlabel('number MCMC steps')
plt.ylabel(r'variance times number MCMC steps')
if savefigs:
plt.savefig(dataset_name + '_postexp_var_vs_K.pdf')
plt.savefig(dataset_name + '_postexp_var_vs_K.png')
!ls *.png
!mkdir figures
!mv *.png figures
!mv *.pdf figures
!ls
!zip -r figures figures
```
|
github_jupyter
|
* basic roberta ft: 0.6589791487657798 (thr 0.3)
* basic roberta ft (head first): 0.6768011808573329 (thr 0.42)
* fine tune roberta on weird clf, then only head on spans, then whole: 0.6853127403287083 (thr 0.32)
*
```
from transformers import RobertaTokenizer, RobertaForTokenClassification
from transformers import BertTokenizer, BertForTokenClassification
from transformers import AutoTokenizer, AutoModelForTokenClassification
import torch
import numpy as np
import pandas as pd
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '4'
device = torch.device('cuda:0')
model_name = 'roberta-base' #roberta-base
tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModelForTokenClassification.from_pretrained(model_name)
```
```
inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1
outputs = model(**inputs, labels=labels)
```
# Create labels for tagging
```
import os
import numpy as np
import pandas as pd
from ast import literal_eval
import re
import nltk
import matplotlib.pyplot as plt
from nltk.tokenize import word_tokenize
path = 'data/'
trial = pd.read_csv(path + 'tsd_trial.csv')
train = pd.read_csv(path + 'tsd_train.csv')
# final_test = pd.read_csv(path + 'tsd_test.csv')
final_test = pd.read_csv(path + 'tsd_test_gt.csv')
train['spans'] = train.spans.apply(literal_eval)
trial['spans'] = trial.spans.apply(literal_eval)
final_test['spans'] = final_test.spans.apply(literal_eval)
trial.shape, train.shape, final_test.shape
print(len(set(trial.text).intersection(set(train.text))))
print(len(set(final_test.text).intersection(set(train.text))))
print((train.spans.apply(len) == 0).mean())
print((trial.spans.apply(len) == 0).mean())
import spans_utils
from importlib import reload
reload(spans_utils)
from spans_utils import display_spans, spans2labels, labels2spans
display_spans(trial.spans[0], trial.text[0])
display_spans(trial.spans[0], trial.text[0])
from tqdm.auto import tqdm, trange
n = 0
for row in tqdm([row for i, row in trial.iterrows()]):
break
labels = spans2labels(row.text, row.spans, tokenizer)
spans2 = labels2spans(row.text, labels, tokenizer)
if row.spans != spans2:
t = row.text.replace(' ', '+')
display_spans(row.spans, t)
display_spans(spans2, t)
n += 1
print(n)
train_labels = [spans2labels(row.text, row.spans, tokenizer) for i, row in tqdm(train.iterrows())]
trial_labels = [spans2labels(row.text, row.spans, tokenizer) for i, row in tqdm(trial.iterrows())]
train['labels'] = train_labels
trial['labels'] = trial_labels
class SpansDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: val[idx] for key, val in self.encodings.items()}
if self.labels is not None:
item['labels'] = self.labels[idx]
return item
def __len__(self):
return len(self.encodings['input_ids'])
train_dataset = SpansDataset(tokenizer(train.text.tolist()), train_labels)
eval_dataset = SpansDataset(tokenizer(trial.text.tolist()), trial_labels)
final_test_dataset = SpansDataset(tokenizer(final_test.text.tolist()))
from transformers import DataCollatorForTokenClassification
data_collator = DataCollatorForTokenClassification(tokenizer, padding=True)
import numpy as np
from semeval2021 import f1
```
### Dataset for classification
```
import pandas as pd
df1 = pd.read_csv('../data/train/train.1.tsv', sep='\t')
df0 = pd.read_csv('../data/train/train_small.0.tsv', sep='\t')
df01 = pd.concat([df1, df0], ignore_index=True)
df01.label = df01.label.astype(int)
print(df01.shape)
df01.sample(3)
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df01, test_size=0.1, random_state=1)
df_train.head(10)
class SpansDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: val[idx] for key, val in self.encodings.items()}
if self.labels is not None:
item['labels'] = self.labels[idx]
return item
def __len__(self):
return len(self.encodings['input_ids'])
clf_train_dataset = SpansDataset(
tokenizer(df_train.comment_text.tolist(), truncation=True),
df_train.label.tolist()
)
clf_test_dataset = SpansDataset(
tokenizer(df_test.comment_text.tolist(), truncation=True),
df_test.label.tolist()
)
clf_test_small_dataset = SpansDataset(
tokenizer(df_test.comment_text.iloc[:3000].tolist(), truncation=True),
df_test.label[:3000].tolist()
)
```
# Train a single-task model
https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb
https://huggingface.co/transformers/custom_datasets.html
```
from transformers import Trainer, TrainingArguments, EarlyStoppingCallback
from transformers.file_utils import cached_property
from typing import Tuple
class TrAr(TrainingArguments):
@cached_property
def _setup_devices(self):
return device
torch.cuda.set_device(device)
model = AutoModelForTokenClassification.from_pretrained(model_name)
model.to(device);
for param in model.roberta.parameters():
param.requires_grad = False
training_args = TrAr(
output_dir='./models2/roberta_single', # output directory
overwrite_output_dir=True,
num_train_epochs=10, # total # of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=8, # batch size for evaluation
warmup_steps=3000, # number of warmup steps for learning rate scheduler
weight_decay=1e-8, # strength of weight decay
learning_rate=1e-3,
logging_dir='./logs', # directory for storing logs
logging_steps=100,
eval_steps=100,
evaluation_strategy='steps',
save_total_limit=1,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
data_collator=data_collator,
tokenizer=tokenizer,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]
)
trainer.train()
for param in model.parameters():
param.requires_grad = True
training_args = TrAr(
output_dir='./models2/roberta_single', # output directory
overwrite_output_dir=True,
num_train_epochs=10, # total # of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=8, # batch size for evaluation
warmup_steps=3000, # number of warmup steps for learning rate scheduler
weight_decay=1e-8, # strength of weight decay
learning_rate=1e-5,
logging_dir='./logs', # directory for storing logs
logging_steps=500,
eval_steps=500,
evaluation_strategy='steps',
save_total_limit=1,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
data_collator=data_collator,
tokenizer=tokenizer,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]
)
```
* The minimal loss of a single-task model (full) was about 28% on validation with 0.04 on train.
* If we first train only head (batch 8, lr 1e-3 with 3K warmup and 1e-8 decline), we get minimal loss of 0.185 on validation with 0.23 on train
* Training then the whole model (batch 8, lr 1e-5 with 3K warmup and 1e-8 decline) we get minimal loss of 0.175 on validation with 0.21 on train
```
trainer.train()
model.save_pretrained('./models2/roberta_single')
trainer.evaluate()
```
### evaluate
```
pred = trainer.predict(eval_dataset)
for threshold in [0, 0.01, 0.03, 0.1, 0.3, 0.4, 0.5, 0.6, 0.7, 1]:
preds = []
for text, pr in zip(trial.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
print(threshold, np.mean([f1(p, y) for p, y in zip(preds, trial.spans)]))
for threshold in [0.3, 0.32, 0.35, 0.38, 0.4, 0.42, 0.45, 0.5, 0.55, 0.6]:
preds = []
for text, pr in zip(trial.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
print(threshold, np.mean([f1(p, y) for p, y in zip(preds, trial.spans)]))
```
## Prepare a submission
```
pred = trainer.predict(final_test_dataset)
threshold = 0.4
preds = []
for text, pr in zip(final_test.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
row = final_test.sample(1).iloc[0]
display_spans(preds[row.name], row.text)
```
65.31%
```
print(np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)]))
```
# WM Classifier + tagging
```
from transformers import RobertaTokenizer, RobertaForTokenClassification, RobertaForSequenceClassification
from transformers import BertTokenizer, BertForTokenClassification
from transformers import AutoTokenizer, AutoModelForTokenClassification
import torch
from transformers.models.roberta.modeling_roberta import RobertaModel
from transformers.modeling_outputs import SequenceClassifierOutput
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
class WMean(nn.Module):
def __init__(self, dim=-2):
super(WMean, self).__init__()
self.pow = torch.nn.Parameter(data=torch.Tensor([1.0]), requires_grad=True)
self.coef = torch.nn.Parameter(data=torch.Tensor([0.0, 1.0]), requires_grad=True)
self.dim = dim
def forward(self, x, mask=None):
result = x ** self.pow[0]
if mask is None:
mp = result.mean(dim=-1)
else:
mp = (result * mask).sum(dim=self.dim) / mask.sum(dim=self.dim)
return torch.log(mp) * self.coef[1] + self.coef[0]
class RobertaTaggerClassifier(RobertaForTokenClassification):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.wmean = WMean()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
token_logits = self.classifier(sequence_output)
if attention_mask is not None:
masks = attention_mask.unsqueeze(-1).repeat(1, 1, 2)
else:
masks = None
logits = self.wmean(torch.softmax(token_logits, dim=-1), mask=masks)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
model = RobertaTaggerClassifier.from_pretrained('roberta-base')
tokenizer = AutoTokenizer.from_pretrained('roberta-base')
inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
with torch.no_grad():
o = model(**inputs)
o
#device = torch.device('cuda:3')
from transformers import Trainer, TrainingArguments
from transformers.file_utils import cached_property
from typing import Tuple
class TrAr(TrainingArguments):
@cached_property
def _setup_devices(self):
return device
```
The strategy: first tune the head only with large batches and LR, then tune the whole model.
Head-only stops at loss 0.4185, full model - at loss 0.302685
```
for param in model.roberta.parameters():
param.requires_grad = False
NEW_MODEL_NAME = './models2/roberta_clf_wm'
training_args = TrAr(
output_dir=NEW_MODEL_NAME, # output directory
overwrite_output_dir=True,
num_train_epochs=10, # total # of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=3000, # number of warmup steps for learning rate scheduler
weight_decay=1e-8, # strength of weight decay
learning_rate=1e-3,
logging_dir='./logs', # directory for storing logs
logging_steps=100,
eval_steps=500,
evaluation_strategy='steps',
save_total_limit=1,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=clf_train_dataset, # training dataset
eval_dataset=clf_test_small_dataset, # evaluation dataset
#data_collator=data_collator,
tokenizer=tokenizer,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]
)
trainer.train();
for param in model.parameters():
param.requires_grad = True
training_args = TrAr(
output_dir=NEW_MODEL_NAME, # output directory
overwrite_output_dir=True,
num_train_epochs=10, # total # of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=3000, # number of warmup steps for learning rate scheduler
weight_decay=1e-8, # strength of weight decay
learning_rate=1e-5,
logging_dir='./logs', # directory for storing logs
logging_steps=500,
eval_steps=500,
evaluation_strategy='steps',
save_total_limit=1,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=clf_train_dataset, # training dataset
eval_dataset=clf_test_small_dataset, # evaluation dataset
#data_collator=data_collator,
tokenizer=tokenizer,
callbacks=[EarlyStoppingCallback(early_stopping_patience=10, early_stopping_threshold=0)]
)
import gc
gc.collect()
torch.cuda.empty_cache()
trainer.train()
print(model.wmean.pow)
print(model.wmean.coef)
model.save_pretrained(NEW_MODEL_NAME)
```
# Fine tune the averager classifier
```
model = AutoModelForTokenClassification.from_pretrained('./models2/roberta_clf_wm')
NEW_MODEL_NAME = './models2/roberta_clf_wm_ft'
for param in model.roberta.parameters():
param.requires_grad = False
training_args = TrAr(
output_dir=NEW_MODEL_NAME, # output directory
overwrite_output_dir=True,
num_train_epochs=10, # total # of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=3000, # number of warmup steps for learning rate scheduler
weight_decay=1e-8, # strength of weight decay
learning_rate=1e-3,
logging_dir='./logs', # directory for storing logs
logging_steps=100,
eval_steps=500,
evaluation_strategy='steps',
save_total_limit=1,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
data_collator=data_collator,
tokenizer=tokenizer,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]
)
trainer.train()
```
* the raw quasi-classifier: no use in the model at all
* fine tuned head: still no use, the best score is 0.2138
* fine tune whole model: 0.3 0.6849391042415774
```
for param in model.parameters():
param.requires_grad = True
training_args = TrAr(
output_dir=NEW_MODEL_NAME, # output directory
overwrite_output_dir=True,
num_train_epochs=10, # total # of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=8, # batch size for evaluation
warmup_steps=3000, # number of warmup steps for learning rate scheduler
weight_decay=1e-8, # strength of weight decay
learning_rate=1e-5,
logging_dir='./logs', # directory for storing logs
logging_steps=500,
eval_steps=500,
evaluation_strategy='steps',
save_total_limit=1,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
data_collator=data_collator,
tokenizer=tokenizer,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]
)
trainer.train()
```
* The minimal loss of a single-task model (full) was about 28% on validation with 0.04 on train.
* If we first train only head (batch 8, lr 1e-3 with 3K warmup and 1e-8 decline), we get minimal loss of 0.185 on validation with 0.23 on train
* Training then the whole model (batch 8, lr 1e-5 with 3K warmup and 1e-8 decline) we get minimal loss of 0.175 on validation with 0.21 on train
```
trainer.train()
NEW_MODEL_NAME
model.save_pretrained(NEW_MODEL_NAME)
pred = trainer.predict(eval_dataset)
for threshold in [0, 0.01, 0.03, 0.1, 0.25, 0.3, 0.35, 0.4, 0.5, 0.6, 0.7, 1]:
preds = []
for text, pr in zip(trial.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
print(threshold, np.mean([f1(p, y) for p, y in zip(preds, trial.spans)]))
for threshold in [ 0.25, 0.28, 0.3, 0.32, 0.35]:
preds = []
for text, pr in zip(trial.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
print(threshold, np.mean([f1(p, y) for p, y in zip(preds, trial.spans)]))
pred = trainer.predict(final_test_dataset)
threshold = 0.4
preds = []
for text, pr in zip(final_test.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
print(len(preds))
print(np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)]))
```
# Try to reproduce the score of an ordinary classifier fine tuned as tagger
```
* roberta_clf_proba - roberta classifier with wm head
* roberta_clf_ft_plus_pseudolabels - roberta_clf_ft + pseudolabels fine-tuning on data/train/train.1.tsv
* roberta_clf - preliminary form of roberta_clf_proba
* roberta_clf_ft - roberta_clf_proba + tagger fine-tuning
* roberta_selflabel - preliminary form of roberta_clf_ft_plus_pseudolabels
* roberta_selflabel_final - preliminary form of roberta_clf_ft_plus_pseudolabels
* roberta_single_v2 - just roberta tagger
* roberta_single - just roberta tagger, first version
* roberta_clf_2 - roberta classic classifier
* roberta_ft_v2 - roberta_clf_2 + tagger fine-tuning
```
#### roberta_ft_v2
```
model = RobertaForTokenClassification.from_pretrained('models/roberta_ft_v2')
model.to(device);
training_args = TrAr(
output_dir='tmp',
per_device_eval_batch_size=8,
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
tokenizer=tokenizer,
)
pred = trainer.predict(eval_dataset)
for threshold in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:
preds = []
for text, pr in zip(trial.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
score = np.mean([f1(p, y) for p, y in zip(preds, trial.spans)])
print(threshold, score)
pred = trainer.predict(final_test_dataset)
scores = []
for threshold in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:
preds = []
for text, pr in zip(final_test.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
score = np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)])
print(threshold, score)
scores.append(score)
scores_standard_clf = scores
```
#### roberta_clf_ft
```
model = RobertaForTokenClassification.from_pretrained('models/roberta_clf_ft')
model.to(device);
training_args = TrAr(
output_dir='tmp',
per_device_eval_batch_size=8,
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
tokenizer=tokenizer,
)
pred = trainer.predict(eval_dataset)
for threshold in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:
preds = []
for text, pr in zip(trial.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
score = np.mean([f1(p, y) for p, y in zip(preds, trial.spans)])
print(threshold, score)
pred = trainer.predict(final_test_dataset)
scores = []
for threshold in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:
preds = []
for text, pr in zip(final_test.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
score = np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)])
print(threshold, score)
scores.append(score)
scores_tagging_clf = scores
```
#### roberta_clf_ft_plus_pseudolabels
```
model = RobertaForTokenClassification.from_pretrained('models/roberta_clf_ft_plus_pseudolabels')
model.to(device);
training_args = TrAr(
output_dir='tmp',
per_device_eval_batch_size=8,
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
tokenizer=tokenizer,
)
pred = trainer.predict(eval_dataset)
for threshold in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:
preds = []
for text, pr in zip(trial.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
score = np.mean([f1(p, y) for p, y in zip(preds, trial.spans)])
print(threshold, score)
pred = trainer.predict(final_test_dataset)
scores = []
for threshold in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:
preds = []
for text, pr in zip(final_test.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
score = np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)])
print(threshold, score)
scores.append(score)
scores_pseudolabel = scores
```
#### roberta_single_v2
```
model = RobertaForTokenClassification.from_pretrained('models/roberta_single_v2')
model.to(device);
training_args = TrAr(
output_dir='tmp',
per_device_eval_batch_size=8,
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
tokenizer=tokenizer,
)
pred = trainer.predict(eval_dataset)
for threshold in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:
preds = []
for text, pr in zip(trial.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
score = np.mean([f1(p, y) for p, y in zip(preds, trial.spans)])
print(threshold, score)
pred = trainer.predict(final_test_dataset)
scores = []
for threshold in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:
preds = []
for text, pr in zip(final_test.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
score = np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)])
print(threshold, score)
scores.append(score)
scores_standard = scores
xx = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]
plt.plot(xx, scores_standard)
plt.plot(xx, scores_standard_clf)
plt.plot(xx, scores_tagging_clf)
plt.plot(xx, scores_pseudolabel)
plt.legend(['standard', 'clf', 'tagging clf', 'pseudo labels'])
ss = [scores_standard, scores_pseudolabel, scores_standard_clf, scores_tagging_clf]
for sss in ss:
print(f'{np.max(sss):.3f}, {xx[np.argmax(sss)]}, {sss[8]:.3f}, {sss[10]:.3f}')
```
#### standard deviation of score
```
threshold = 0.5
preds = []
for text, pr in zip(final_test.text, pred.predictions):
proba = np.exp(pr[pr[:, 0]!=-100])
proba /= proba.sum(axis=1, keepdims=True)
labels = (proba[:, 1] >= threshold).astype(int).tolist()
preds.append(labels2spans(text, labels, tokenizer))
ff = [f1(p, y) for p, y in zip(preds, final_test.spans)]
score = np.mean(ff)
print(score)
se = np.std(ff) / np.sqrt(len(ff)) * 1.96
print(score - se, score + se)
np.std(ff) / np.sqrt(len(ff))
```
|
github_jupyter
|
# Predicting Credit Card Default with Neural Networks
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
%matplotlib inline
```
### Back with the credit card default dataset
```
# Loading the dataset
DATA_DIR = '../data'
FILE_NAME = 'credit_card_default.csv'
data_path = os.path.join(DATA_DIR, FILE_NAME)
ccd = pd.read_csv(data_path, index_col="ID")
ccd.rename(columns=lambda x: x.lower(), inplace=True)
ccd.rename(columns={'default payment next month':'default'}, inplace=True)
# getting the groups of features
bill_amt_features = ['bill_amt'+ str(i) for i in range(1,7)]
pay_amt_features = ['pay_amt'+ str(i) for i in range(1,7)]
numerical_features = ['limit_bal','age'] + bill_amt_features + pay_amt_features
# Creating creating binary features
ccd['male'] = (ccd['sex'] == 1).astype('int')
ccd['grad_school'] = (ccd['education'] == 1).astype('int')
ccd['university'] = (ccd['education'] == 2).astype('int')
#ccd['high_school'] = (ccd['education'] == 3).astype('int')
ccd['married'] = (ccd['marriage'] == 1).astype('int')
# simplifying pay features
pay_features= ['pay_' + str(i) for i in range(1,7)]
for x in pay_features:
ccd.loc[ccd[x] <= 0, x] = 0
# simplifying delayed features
delayed_features = ['delayed_' + str(i) for i in range(1,7)]
for pay, delayed in zip(pay_features, delayed_features):
ccd[delayed] = (ccd[pay] > 0).astype(int)
# creating a new feature: months delayed
ccd['months_delayed'] = ccd[delayed_features].sum(axis=1)
```
## Split and standarize the dataset
```
numerical_features = numerical_features + ['months_delayed']
binary_features = ['male','married','grad_school','university']
X = ccd[numerical_features + binary_features]
y = ccd['default'].astype(int)
## Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=5/30, random_state=101)
## Standarize
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train[numerical_features])
X_train.loc[:, numerical_features] = scaler.transform(X_train[numerical_features])
# Standarize also the testing set
X_test.loc[:, numerical_features] = scaler.transform(X_test[numerical_features])
```
### Building the neural network for classification
```
from keras.models import Sequential
nn_classifier = Sequential()
from keras.layers import Dense
n_input = X_train.shape[1]
n_units_hidden = 64
nn_classifier.add(Dense(units=n_units_hidden, activation='relu', input_shape=(n_input,)))
# add 2nd hidden layer
nn_classifier.add(Dense(units=n_units_hidden, activation='relu'))
# add 3th hidden layer
nn_classifier.add(Dense(units=n_units_hidden, activation='relu'))
# add 4th hidden layer
nn_classifier.add(Dense(units=n_units_hidden, activation='relu'))
# add 5th hidden layer
nn_classifier.add(Dense(units=n_units_hidden, activation='relu'))
# output layer
nn_classifier.add(Dense(1, activation='sigmoid'))
```
### Training the network
```
## compiling step
nn_classifier.compile(loss='binary_crossentropy', optimizer='adam')
nn_classifier.summary()
nn_classifier.save_weights('class_initial_w.h5')
batch_size = 64
n_epochs = 150
nn_classifier.fit(X_train, y_train, epochs=n_epochs, batch_size=batch_size)
```
## Evaluating predictions
```
## Getting the probabilities
y_pred_train_prob = nn_classifier.predict(X_train)
y_pred_test_prob = nn_classifier.predict(X_test)
## Classifications from predictions
y_pred_train = (y_pred_train_prob > 0.5).astype(int)
y_pred_test = (y_pred_test_prob > 0.5).astype(int)
from sklearn.metrics import accuracy_score
train_acc = accuracy_score(y_true=y_train, y_pred=y_pred_train)
test_acc = accuracy_score(y_true=y_test, y_pred=y_pred_test)
print("Train Accuracy: {:0.3f} \nTest Accuracy: {:0.3f}".format(train_acc, test_acc))
```
## Re-training the network with less epochs
```
## load the initial weights
nn_classifier.load_weights('class_initial_w.h5')
batch_size = 64
n_epochs = 50
nn_classifier.compile(loss='binary_crossentropy', optimizer='adam')
nn_classifier.fit(X_train, y_train, epochs=n_epochs, batch_size=batch_size)
## Getting the probabilities
y_pred_train_prob = nn_classifier.predict(X_train)
y_pred_test_prob = nn_classifier.predict(X_test)
## Classifications from predictions
y_pred_train = (y_pred_train_prob > 0.5).astype(int)
y_pred_test = (y_pred_test_prob > 0.5).astype(int)
## Calculating accuracy
train_acc = accuracy_score(y_true=y_train, y_pred=y_pred_train)
test_acc = accuracy_score(y_true=y_test, y_pred=y_pred_test)
print("Train Accuracy: {:0.3f} \nTest Accuracy: {:0.3f}".format(train_acc, test_acc))
```
|
github_jupyter
|
```
import pandas as pd
# Let us just create a dictioinary to understand about the DataFrame.
people = {
"First": ["Me", "Myself", "I"],
"Last" : ["He", 'She', "It"],
"Email" : ["mehe@email.com", "myselfshe@email.com", "iit@email.com"]
}
# In this dict We can visualise the keys as the column's descripton and the values as the data of those column , then we can visualise that each row of values is meant for a single person in this case. We can Make this dict be represented as rows and columns by using Pandas.
df_example1 = pd.DataFrame(people)
# Filter is python keyword so avoid using that.
# We cant use python default and or , we will use | and &.
filt = (df_example1['Last'] == 'He') & (df_example1['First'] == 'Me')
df_example1.loc[filt, 'Email']
# We get emails that matches those last name.
df_example1['Last'] == 'She'
# This returns True with the data its matched correctly and false where not matched.
df_example1[filt]
# It returns all the rows which have last name He
df_example1.loc[filt]
# Same result as we passed just filt.(it passes series of booleans.)
df_example1.loc[~filt, 'Email']
# ~ this negates the answer means those rows which doesnt match with our desired surname , now only their emails will showup.
```
# Now we will load our stcakoverflow survery data
```
df = pd.read_csv('data/survey_results_public.csv', index_col = 'Respondent')
schema_df = pd.read_csv('data/survey_results_schema.csv', index_col = 'Column')
# Filtering data for people's salaries above/below a certain amount.
# If you dont know which column in the data frame gives the salary, you can find that using schema_df
#Creating a filter for our desired result.
high_salary = (df['ConvertedComp'] > 70000)
df.loc[high_salary]
# Now we want only certain columms , we can pass that in df.loc
df.loc[high_salary, ['Country', 'LanguageWorkedWith', 'ConvertedComp']]
# Now as such we want data of the countries of our choice, we will create a list of those first.
# and then we will create a new filter to filter out the stuff.
countries = ['United States', 'India', 'United Kingdom', 'Germany', 'Canada']
filtc = df['Country'].isin(countries)
df.loc[filtc, 'Country']
# We only want look at people who answered they knew python.
# We will grab first the languageworedwith column for the desired data.
df['LanguageWorkedWith']
# We will use string method in pandas to get our results. or we can use (Regex).
# for this column here , the string in my column should contain python.
# We have NaN , so we will set na to false so it doesnt cause any error
filtl = df['LanguageWorkedWith'].str.contains('Python', na = False)
df.loc[filtl,'LanguageWorkedWith']
# all of these rows should have python.
```
|
github_jupyter
|
```
## Basic stuff
%load_ext autoreload
%autoreload
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
display(HTML("""<style>div.output_area{max-height:10000px;overflow:scroll;}</style>"""))
#IPython.Cell.options_default.cm_config.lineNumbers = true;
################################################################################
## Python Version
################################################################################
import sys
from io import StringIO
from pandas import DataFrame, read_csv
import urllib
from time import sleep
from fsUtils import isFile
from ioUtils import getFile, saveFile
from webUtils import getHTML
def downloadURL(url):
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers={'User-Agent':user_agent,}
print("Now Downloading {0}".format(url))
request=urllib.request.Request(url,None,headers) #The assembled request
response = urllib.request.urlopen(request)
data = response.read() # The data u need
return data, response.getcode()
txt="""Angola
Ngola Ritmos
Waldemar Bastos
Bonga
Teta Lando
Sam Mangwana
Lourdes Van-Dúnem
Matadidi Mario
Paulo Flores
Neide Van-Dúnem
Neblina
Titica
Don Kikas
Neide Van-Dúnem
Army Squad
Diamondog
KeyLiza
Anselmo Ralph
Neide Van-Dúnem
Don Kikas
Buraka Som Sistema
Titica
Dog Murras
Benin
Angelique Kidjo
Wally Badarou
Nigeria
Evelyn summer
Botswana
Banjo Mosele
Franco and Afro Musica
Matsieng
Zeus
Burkina Faso
Balaké
Cheikh Lô
Dramane Kone
Farafina
Burundi
Khadja Nin
Kebby Boy
Sat-B
Cameroon
Njacko Backo
Francis Bebey
Moni Bilé
Diboué Black
Richard Bona
Les Têtes Brulées
Manu Dibango
Charlotte Dipanda
Dyllann
Stanley Enow
Jovi
Michael Kiessou
Coco Mbassi
Yannick Noah
Kristo Numpuby
Sally Nyolo
Petit Pays
Sam Fan Thomas
Lady Ponce
Magasco
Wes Madiko
Daphné
Salatiel
Mr. Leo
Blanche Bailly
Reniss
Sublymme
King B Swag
Cape Verde
Cesaria Evora
Gil Semedo
Côte d'Ivoire
Alpha Blondy
Magic System
Ernesto Djédjé
Tiken Jah Fakoly
DJ Arafat
Serge Beynaud
Foliba trio
Republic of the Congo (Congo-Brazzaville)
Youlou Mabiala
Pierre Moutouari
Werrason
Papa Wemba
Ferre Gola
Fally Ipupa
Mbilia Bel
Abeti Masikini
Madilu System
Youlou Mabiala
Franco Luambo Makiadi
Franklin Boukaka
Koffi Olomide
Democratic Republic of the Congo (former Zaire)
Abeti Masikini
African Fiesta
Avelino
Awilo Longomba
Bimi Ombale
Bisso Na Bisso
Bouro Mpela
Bozi Boziana
Cindy Le Coeur
Dadju
Damso
Dany Engobo
Evoloko Jocker
Diblo Dibala
Dindo Yogo
Fabregas
Fally Ipupa
Ferré Gola
Gaz Mawete
Geo Bilongo
Gibson Butukondolo
Grand Kalle
Héritier Watanabe
Icha Kavons
INNOSS'B
Jean Bosco Mwenda
Jessy Matador
Jimmy Omonga
Josky Kiambukuta Londa
Kalash Criminel
Kanda Bongo Man
Kasai Allstars
Kaysha
Keblack
Kékélé
King Kester Emeneya
Koffi Olomide
Konono Nº1
Kasaloo Kyanga
LU KALA
Langa Langa Stars
Le Grand Kalle
Lokua Kanza
Madilu Système
Maître Gims
Marie Daulne
Marie Misamu
Mayaula Mayoni
Mbongwana Star
M'bilia Bel
Michel Boyibanda
Mohombi
Mose Fan Fan
M'Pongo Love
Naza
Ndombe Opetum
Nico Kasanda
Ninho
Papa Wemba
Pepe Kalle and Empire Bakuba
Ray Lema
Sam Mangwana
Singuila
Tabu Ley Rochereau
Werrason
Youlou Mabiala
Yxng Bane
Egypt
Amal Maher
Amira Selim
Amr Diab
Angham
Anoushka
Carmen Suleiman
Dina El Wedidi
Hisham Abbas
Leila Mourad
Mayam Mahmoud
Mohamed Mounir
Mohammed Abdel Wahab
Tamer Hosny
Ezz Eddin Hosni (1927-2013)
Mounira El Mahdeya
Nesma Mahgoub
Ratiba El-Hefny
Ruby
Sayed Darwish
Shadiya
Sherine
Umm Kulthum
Yasmine Niazy
Yousra
Zizi Adel
Eritrea
Abraham Afewerki
Ethiopia
Aminé
Mulugeta Abate
Teddy Afro
Alemu Aga
Mahmoud Ahmed
Tadesse Alemu
Mulatu Astatke
Aster Aweke
Abatte Barihun
Aragaw Bedaso
Eyasu Berhe
Girma Bèyènè
Ali Birra
Tamrat Desta
Alemayehu Eshete
Tilahun Gessesse
Gigi
Thomas Gobena
Hachalu Hundessa
Kenna
Getatchew Mekurya
Munit Mesfin
LoLa Monroe
Emilia Rydberg
Kuku Sebsebe
Kiros Alemayehu
Tigist Shibabaw
Shantam Shubissa
Abdu Kiar
Walias Band
Wayna
Asnaketch Worku
Dawit Yifru
Gildo Kassa
Yared Negu
Gabon
Oliver N'Goma
Patience Dabany
Annie-Flore Batchiellilys
Gambia
Sona Maya Jobarteh
Foday Musa Suso
Ghana
Guy Warren
Rebop Kwaku Baah
Becca
DopeNation
Fuse ODG
Jay Ghartey
Osibisa
Wendy Shay
Darkovibes
Mugeez
KiDi
Kuami Eugene
Ebony Reigns
Iwan
Kaakie
Samini
Shatta Wale
Stonebwoy
Bernice Ofei
Danny Nettey
Helen Yawson
Joe Beecham
Joe Mettle
Kofi Owusu Dua Anto
Nayaah
Nii Okai
Ohemaa Mercy
Preachers
QwameGaby
Stella Aba Seal
Tagoe Sisters
Diana Hamilton
Joyce Blessing
Efya
A. B. Crentsil
Alex Konadu
Amakye Dede
Ben Brako
Bisa Kdei
C.K. Mann
Daddy Lumba
E. T. Mensah
Ebo Taylor
K. Frimpong
King Bruce
Kojo Antwi
Koo Nimo
Kwabena Kwabena
Jerry Hansen
Ayesem
Ayigbe Edem
Ball J
Bice Osei Kuffour
Buk Bak
C-Real
Castro
Corp Sayvee
D-Black
Efya
EL
Eno Barony
Gasmilla
Kesse
M.anifest
Medikal
Nero X
Okyeame Kwame
Reggie Rockstone
Ruff n Smooth
Sarkodie
Sherifa Gunu
Sway
Tinny
Trigmatic
Joey B
Pappy Kojo
Gurunkz
R2Bees
Kofi Kinaata
Kwesi Arthur
KiDi
Kuami Eugene
Adam Ro
Bobo Shanti
Rascalimu
Rita Marley
Rocky Dawuni
Samini
Sheriff Ghale
Stonebwoy
Fancy Gadam
Abubakari Lunna
Ephraim Amu
Ken Kafui
Philip Gbeho
Guinea
Sona Tata Condé
Sekouba Bambino
Daddi Cool
Les Ballets Africains
Balla et ses Balladins
Bembeya Jazz
Djeli Moussa Diawara
Famoudou Konaté
Mory Kanté
Mamady Keita
Ballet Nimba
Guinea-Bissau
José Carlos Schwarz
Eneida Marta
Kenya
Akothee
Avril
Ayub Ogada
Cece Sagini
Daddy Owen
David Mathenge
Daudi Kabaka
DJ Fita
Eric Wainaina
E-Sir
Fadhili William
Fundi Konde
George Ramogi
Gloria Muliro
Harry Kimani
Jabali Afrika
Jason Dunford
Jua Cali
Kavirondo
King Kaka
Kleptomaniax
Mighty King Kong
Monski
Musa Juma
Naiboi
Necessary Noize
Okatch Biggy
Otile Brown
Princess Jully
Redsan
Roger Whittaker
Sanaipei Tande
Sauti Sol
Size 8
Stella Mwangi
Suzzana Owiyo
Tony Nyadundo
Wahu
Wanyika bands
Simba Wanyika
Willy Paul
Wyre
Liberia
Sundaygar Dearboy
Knero
Takun-J
Madagascar
AmbondronA
Vaiavy Chila
Mily Clément
Ninie Doniah
Rakoto Frah
D'Gary
Régis Gizavo
Eusèbe Jaojoby
Lego
Mahaleo
Erick Manana
Jerry Marcoss
Toto Mwandjani
Oladad
Rabaza
Naka Rabemanantsoa
Andrianary Ratianarivo
Olombelona Ricky
Rossy
Mama Sana
Senge
Madagascar Slim
Tarika
Tearano
Justin Vali
Nicolas Vatomanga
Mali
Boubacar Traoré
Mory Kanté
Salif Keita
Toumani Diabaté
Kandia Kouyaté
Habib Koité
Issa Bagayogo
Rokia Traoré
Tinariwen
Ali Farka Touré
Amadou et Mariam
Oumou Sangaré
Afel Bocoum
Lobi Traoré
Fatoumata Diawara
Djelimady Tounkara
Rail Band
Mauritania
Dimi Mint Abba
Malouma
Noura Mint Seymali
Morocco
Saad Lamjarred
Elam Jay
AnoGhan
Oussama Belhcen
Rajae El Mouhandiz
Mr Sufian
Manal
Two Tone
Muslim
Dizzy DROS
L7a9d
Cut Killer
Canardo
French Montana
ILY
Larbi Batma
Abdessadeq Cheqara
Mohamed Rouicha
World music
Driss El Maloumi
Henry Azra
Mozambique
Wazimbo
Ghorwane
Fany Pfumo
Stewart Sukuma
Moreira Chonguica
Lizha James
Neyma
Mingas
Al Bowlly
Wazimbo
340ml
Afric Simone
Niger
Mamar Kassey
Mdou Moctar
Nigeria
2face Idibia - hip hop and R&B singer
9ice - hip hop and afropop singer
A
A-Q - hip hop artist
Abiodun Koya (born 1980), gospel singer, opera singer
Adé Bantu - Nigerian-German musician, producer, front man of the 13 piece band BANTU
Adekunle Gold - singer, songwriter
Adewale Ayuba - fuji music singer
Afrikan Boy - rapper
Afro Candy - pop singer
Alamu Atatalo - sekere singer, a type of traditional Yoruba music
Ali Jita - Hausa singer and song writer
Amarachi - singer, dancer, violinist
Andre Blaze - rapper
Aramide - Afro-Jazz singer
Aṣa - R&B, country and pop singer-songwriter
Ayinde Bakare - Yoruba jùjú and highlife musician
Ayinla Kollington - Fuji musician
B
Babatunde Olatunji - drummer
Banky W - pop and R&B singer-songwriter
Blackface Naija - reggae musician
Blaqbonez - rapper
Brymo - singer
Burna Boy - reggae-dancehall musician
C
CDQ - rapper, songwriter
Celestine Ukwu - highlife musician
Chidinma - pop singer
Chike - singer, songwriter and actor
Chinko Ekun – rapper, songwriter
Cobhams Asuquo - soul singer
Cynthia Morgan - pop, hip hop and dancehall singer
D
D'banj - pop singer
Da Emperor - indigenous rapper
Da Grin - rapper
Dammy Krane - singer, songwriter
Darey - R&B singer-songwriter
Dauda Epo-Akara - Yoruba musician
Davido - pop singer
Dekumzy - R&B and highlife singer
Dele Ojo - juju music singer and performer
Dice Ailes - pop singer
Di'Ja - singer
Don Jazzy - recording artist and record producer
D'Prince - Afro-pop singer
Dr. Alban - Nigerian-Swedish recording artist and producer
Dr SID - pop singer
Duncan Mighty - reggae singer
E
Ebenezer Obey - jùjú musician
Echezonachukwu Nduka - pianist and musicologist
Eddy Wata - Eurodance singer
Eedris Abdulkareem
Ego Ogbaro
eLDee – rapper, singer, producer
Emeka Nwokedi – conductor and music director
Emma Nyra – R&B singer
Emmy Gee – rapper
Eva Alordiah-rapper and singer
Evi Edna Ogholi-Reggae singer
F
Falz - rapper, songwriter
Faze - R&B singer
Fela Kuti - afrobeat, jazz singer-songwriter and instrumentalist
Fela Sowande
Femi Kuti - afrobeat, jazz singer-songwriter and instrumentalist
Fireboy DML - singer
Flavour N'abania - highlife and hip hop singer
Frank Edwards – gospel singer
G
Genevieve Nnaji - pop singer
H
Helen Parker-Jayne Isibor - opera singer and composer
Harrysong - singer and songwriter
Haruna Ishola
Humblesmith - afropop singer
I
I.K. Dairo
Ice Prince - rapper
Idahams - Singer and song writer
Iyanya - pop singer
J
J. Martins - highlife singer-songwriter and record producer
Jesse Jagz - rapper
Jasën Blu - R&B singer-songwriter and record producer
Joeboy - singer
Johnny Drille - singer
K
Kcee
King Wadada - reggae singer
Kizz Daniel
Koker
Korede Bello
L
Ladipoe
Lagbaja
Lara George
Laycon
Lil Kesh
Lyta
M
M.I - rapper
M Trill - rapper
Majek Fashek - singer-songwriter
May7ven
Maud Meyer - jazz singer
Mike Ejeagha - Highlife musician
Mo'Cheddah - hip hop singer
Mode 9 - rapper
Monica Ogah - pop singer-songwriter
Mr 2Kay
Mr Eazi - singer-songwriter
Mr Raw
Mr Real - house singer
Muma Gee - pop singer-songwriter
Muna - rapper
N
Naeto C
Naira Marley – singer and songwriter
Niniola - Afro-house artist
Niyola - soul and jazz singer
Nneka - hip hop and soul singer
Nonso Amadi
Nosa - gospel artist
O
Obesere - fuji musician
Obiwon - R&B and gospel singer
Olamide - rapper and hip hop artist
Oliver De Coque
Omawumi - soul singer
Omotola Jalade Ekeinde – R&B and pop singer
Onyeka Onwenu - pop singer
Orezi - reggae singer
Oriental Brothers
Oritse Femi
Orlando Julius
Osita Osadebe
Orlando Owoh
P
Patience Ozokwor - highlife singer
Patoranking - reggae and dancehall singer
Pepenazi - rapper, hip hop artist and record producer
Pericoma Okoye
Peruzzi
Peter King
Phyno - rapper and record producer
Praiz - R&B singer and songwriter
Prince Nico Mbarga
R
Reekado Banks - hip hop artist
Rema - Afrobeats and Trap
Rex Lawson
Ric Hassani
Ruby Gyang
Ruggedman - rapper and hip hop artist
Runtown - songwriter and hip hop artist
S
Sade Adu
Safin De Coque - rapper and hip hop artist
Salawa Abeni - Waka singer
Samsong - gospel singer
Sasha P - rapper and singer
Sean Tizzle - Afropop
Seun Kuti - afrobeat, Jazz singer-songwriter and instrumentalist
Seyi Shay - pop singer and songwriter
Shina Peters - juju singer
Simi
Sinach - gospel singer
Skales - rapper and singer
Shola Allynson - Gospel Singer
Sonny Okosuns
Sound Sultan
Stella Damasus - R&B and soul singer
Sunny Ade - jùjú singer
Tamara Jones
Tekno Miles
Tems
Teni
Terry G
Timaya
Tiwa Savage
Timi Dakolo
Toby Foyeh
Tonto Dikeh
Tony Allen
Tony Tetuila
Tonye Garrick
Tope Alabi
Tunde King
Tunde Nightingale
TY Bello
Victor Olaiya
Victor Uwaifo
Waconzy
Waje
Wasiu Alabi Pasuma
Weird MC
William Onyeabor
Wizkid
Ycee
Yemi Alade
Yinka Ayefele
Yinka Davies
Yung6ix
Yusuf Olatunji
Zlatan
Zayn Africa
Zoro African
Rwanda
Alpha Rwirangira
Tom Close
Riderman
King James
Knolwess Butera
Benjami Mugisha
Urban Boyz
Kate Bashabe
Simon Bikindi
Corneille
Miss Jojo
Senegal
Akon
Baaba Maal
Étoile de Dakar
Ismaël Lô
Mansour Seck
Orchestra Baobab
Positive Black Soul
Thione Seck and Raam Daan
Star Band
Touré Kunda
Youssou N'Dour and Étoile de Dakar
Xalam (band)
Sierra Leone
Bai Kamara
S. E. Rogie
Steady Bongo
K-Man
Emmerson
Anis Halloway
Supa Laj
Somalia
Xiddigaha Geeska
Mohamed Mooge Liibaan
Abdullahi Qarshe
Waayaha Cusub
Ali Feiruz
Hasan Adan Samatar
Aar Maanta
Mohamed Sulayman Tubeec
Maryam Mursal
K'naan
Guduuda 'Arwo
Magool
South Africa
African Children's Choir
Afrotraction
AKA, hip-hop artist and record producer
Akustika Chamber Singers, chamber choir from Pretoria
aKing, South African acoustic rock band
Amanda Black, Multi-award winning and platinum-selling Afro-soul singer-songwriter
Amampondo, award-winning traditional Xhosa percussion group from Cape Town
Anatii (born 1993), hip-hop artist and record producer
A-Reece (born 1997), hip-hop artist and lyricist
Leigh Ashton (born 1956), singer-songwriter from Johannesburg
Assagai, Afro-rock band
The Awakening, gothic rock
B
Babes Wodumo, gqom musician
Ballyhoo, 1980s pop band best known for the hit "Man on the Moon"
The Bang
Leonel Bastos (born 1956), Mozambiquan adult contemporary musician and producer working in South Africa
Battery 9
BlackByrd
Busiswa, house musician
BLK JKS
Elvis Blue, musician and songwriter
Boo!
Bles Bridges (1947–2000), singer
Stef Bos
Cristina Boshoff
Jonathan Butler, singer-songwriter and guitarist
The Brother Moves On
Brasse Vannie Kaap
Bright Blue, 1980s pop band, best known for the hit song "Weeping"
Buckfever Underground
Beatenberg
Bongo Maffin, kwaito music group
Boom Shaka
Bucie (born 1987), R&B and soul singer
Guy Buttery
C
Adrienne Camp, singer-songwriter
Captain Stu, ska, funk, punk, and soul fusion band
Arno Carstens, former lead singer of Springbok Nude Girls
Cassette
Cassper Nyovest, rapper and record producer
Tony Cedras (born 1952), musician
Chad, (born 1993), rapper
Yvonne Chaka Chaka, singer
Chris Chameleon, solo artist, lead singer and bass guitarist for Boo
Blondie Chaplin, singer and guitarist
Jesse Clegg (born 1988)
Johnny Clegg (born 1953)
Clout, 1970s rock group
Basil Coetzee (1944–1998), saxophonist
Mimi Coertse (born 1932), musician
Tony Cox (born 1954), guitarist
Crashcarburn
Crossingpoint, Christian progressive hardcore band
Cutting Jade
Civil Twilight
Crow Black Sky
D
Da L.E.S (born 1985), hip-hop artist
Simphiwe Dana (born 1980)
Danny K (Daniel Koppel), R&B singer-songwriter
Kurt Darren, singer
Pierre de Charmoy
Steven De Groote (1953–1989), classical pianist and winner of the Van Cliburn International Piano Competition
Fanie de Jager (born 1949), operatic tenor
Die Antwoord
Die Heuwels Fantasties
Bonginkosi Dlamini (born 1977), poet, actor and singer, also known as Zola
Dollar Brand (born 1934)
Donald, singer
Dorp
Downfall
Dr Victor and the Rasta Rebels, reggae
Dreamteam, hip-hop group from Durban
Jabulani Dubazana, singer, Ladysmith Black Mambazo
Lucky Dube (1964–2007)
Duck and Cover, hard rock band
Ampie du Preez, singer and guitarist
Johnny Dyani (1945–1986), jazz double bassist
DJ Speedsta , Hip Hop Dj
E
Dennis East, singer
Shane Eagle (b. 1996), hip-hop artist
Alton Edwards, singer
Eden, pop band
Elaine, singer and songwriter
Endorphine
Emtee (b. 1992), hip-hop artist
Dawid Engela (1931–1967), composer and musicologist
éVoid, 1980s new wave
Erica Eloff, soprano
F
The Fake Leather Blues Band
Falling Mirror
Brenda Fassie (1964–2004)
Ricky Fataar (born 1952), drummer
Duncan Faure, singer-songwriter formerly with the band Rabbitt
Mongezi Feza (1945–1975), trumpet player and flautist
Anton Fig, drummer
Josh Fix
Fokofpolisiekar, Afrikaans rock band
Foto na Dans, Afrikaans rock band
Four Jacks and a Jill
Johnny Fourie (1937–2007), jazz guitarist
Freshlyground
Fuzigish
Fifi Cooper
G
Hotep Idris Galeta (born 1941), jazz pianist
Goldfish
Anton Goosen (born 1946), singer
Die Grafsteensangers
Goodluck
H
Half Price (band)
Paul Hanmer, composer, pianist, and jazz musician
The Helicopters
Ken E Henson (born 1947), musician
Henry Ate
Sonja Herholdt
Hog Hoggidy Hog
Steve Hofmeyr (born 1964), singer and actor
Die Heuwels Fantasties
I
Abdullah Ibrahim (born 1934)
iFANi
Isochronous
J
Jabu Khanyile (1957–2006)
Jack Parow
Robbie Jansen (1949–2010)
Jeremy Loops (born 1986), modern folk, singer
Jesse Jordan Band
Theuns Jordaan (born 1971), singer and songwriter
Claire Johnston (born 1967), lead singer of Mango Groove
Trevor Jones (born 1949), composer
Armand Joubert
Joy, a vocal group
John Edmond (born 1936), singer
John Ireland (born 1954), singer and songwriter
Julian Bahula, jazz drummer*Juluka
Just Jinjer (previously Just Jinger)
JR, rapper
Junkyard Lipstick
L-Tido (born 1982), hip-hop artist, aka 16V
K
Kabelo Mabalane (born 1976), kwaito artist, former member of TKZee
Kalahari Surfers
Wouter Kellerman, South African flautist
Johannes Kerkorrel (1960–2002)
Sibongile Khumalo (born 1957), singer
KOBUS!
Koos Kombuis (born 1954)
John Kongos (born 1945)
Kongos
Gé Korsten (1927–1999)
David Kramer (born 1951)
Kwesta, hip-hop artist and poet
K.O, hip-hop artist and record producer
Kabza De Small , King of Amapiano
L
Felix Laband, electronic musician
Riku Lätti, songwriter, composer, music producer
Ladysmith Black Mambazo (born 1960), isicathamiya group
Don Laka, jazz musician, pianist, producer
Robert Lange (born 1948), music producer
Lani Groves
Lark
Jack Lerole (c.1940–2003), tin whistle player; singer
Solomon Linda, songwriter
Lira
Locnville
Roger Lucey, singer and guitarist
Lucky Dube, singer and keyboard player
M
Mark Haze, Rock singer
Sipho Mabuse (born 1951), singer
Arthur Mafokate, kwaito singer and composer
Mahlathini and the Mahotella Queens, a mbaqanga band
Vusi Mahlasela (born 1965)
Makgona Tsohle Band (1964–1999), a mbaqanga instrumental band
Bongi Makeba (1950–1985), singer-songwriter
Miriam Makeba (1932–2008)
Malaika (group)
Petronel Malan (1974–), concert pianist
Man As Machine
Mandoza (born 1978), kwaito singer
Mango Groove
Mildred Mangxola (born 1944), singer in Mahlathini and the Mahotella Queens and member of the Mahotella Queens
Manfred Mann
MarcAlex, group known for the hit "Quick Quick"
Josef Marais (1905–1978)
Martin PK
Hugh Masekela (born 1939)
Dorothy Masuka (born 1935), jazz singer
Neels Mattheus (1935-2003), traditional musician
Dave Matthews (born 1967), lead singer and founding member of Dave Matthews Band
Irene Mawela (born 1940), veteran singer and composer
Illana May
Abednego Mazibuko, singer with Ladysmith Black Mambazo
Albert Mazibuko (born 1948), singer with Ladysmith Black Mambazo
Thandiswa Mazwai (born 1976)
Chris McGregor (1936–1990), jazz pianist and composer
Busi Mhlongo (1947–2010), singer, dancer and composer
Mind Assault
Moreira Chonguica (born 1077), jazz saxophonist and producer
Kippie Moeketsi (1925–1983), saxophonist
Pops Mohamed (born 1949), jazz musician
Louis Moholo (born 1940), drummer
Matthew Mole
Lebo Morake (aka Lebo M)
Shaun Morgan (born 1980), singer also known as Shaun Morgan Welgemoed
Ike Moriz (born 1972), singer, composer and lyricist
Jean Morrison
Mshengu White Mambazo (1976–2003), junior choir of Ladysmith Black Mambazo
Russel Mthembu, singer with Ladysmith Black Mambazo
Moozlie (born 1992), hip-hop artist and television presenter
Muzi (born 1991), electronic musician
Moonchild Sanelly Musician and Dancer
N
Nádine (born 1982), singer-songwriter
The Narrow
Nasty C (born 1997), hip-hop artist and record producer
Bongani Ndodana-Breen, composer
Jim Neversink, alternative country singer-songwriter and guitarist
New Academics
Steve Newman
Bernoldus Niemand (1959–1995)
Simon "Mahlathini" Nkabinde (1937–1999), Mbaqanga singer
West Nkosi (1940–1998), mbaqanga musician
No Friends of Harry
Nobesuthu Mbadu (born 1945), singer in Mahlathini and the Mahotella Queens and member of the Mahotella Queens
Siphiwo Ntshebe (1974–2010), operatic tenor from New Brighton, Port Elizabeth
Ashton Nyte, solo artist as well as lead singer and producer of The Awakening
Thys Nywerheid
Nadia Nakai (born 1990), hip-hop artist
O
Sarah Oates, violinist and associate leader Philharmonia orchestra
Wendy Oldfield, rock singer-songwriter
Oskido, record producer and songwriter
P
Jack Parow, hip-hop artist
The Parlotones
Al Paton, singer-songwriter, producer, and percussionist
Peter Toussaint
Petit Cheval
James Phillips, singer-songwriter also known as Bernoldus Niemand
Anke Pietrangeli (born 1982), winner of the second series of Idols
Dizu Plaatjies, founder and former lead singer of Amampondo
Plush
PJ Powers (born 1960)
Prime Circle
Professor (born 1978), Kwaito musician
Dudu Pukwana (1938–1990), saxophonist, pianist, and composer
Purified, Christian hip-hop artist
Patricia Majalisa, bubblegum artist
Q
Qkumba Zoo
R
Rabbitt
Rouge (rapper)
Trevor Rabin (born 1954), musician
Dolly Rathebe (1928–2004)
Laurika Rauch, Afrikaans singer
Riddare av Koden
Surendran Reddy (1962–2010) pianist and composer
Riky Rick (born 1987), hip-hop artist and record producer
Robin Auld
Ray Phiri (1947-2017), Jazz, jazz fusion, reggae and mbaqanga musician
S
Sandy B
Savuka
Robert Schneider of The Apples in Stereo
Leon Schuster
Seether, formerly called Saron Gas, hard rock and alternative metal band
Gerard Sekoto (1913–1993)
Judith Sephuma
Jockey Shabalala (1943–2006), singer with Ladysmith Black Mambazo
Joseph Shabalala (born 1941), lead singer and founder of Ladysmith Black Mambazo
Msizi Shabalala (born 1975), singer with Ladysmith Black Mambazo
Sibongiseni Shabalala (born 1973), singer with Ladysmith Black Mambazo
Troye Sivan (born 1995), South African-born
Thamsanqa Shabalala (born 1977), singer with Ladysmith Black Mambazo
Thulani Shabalala (born 1968), singer with Ladysmith Black Mambazo
Shane Eagle (born 1996), hip-hop artist and lyricist
Shiraz, band active between 1984 - 1984
Margaret Singana (1938–2000)
Robert Sithole, pennywhistle player
Skylight (band)
Kyla-Rose Smith (born 1982), violinist and dancer
Sonja Herholdt
Enoch Sontonga, teacher, lay-preacher and composer who wrote "Nkosi Sikelel' iAfrika"
South African National Youth Orchestra
Springbok Nude Girls
Zanne Stapelberg (born 1977), opera soprano
Dale Stewart (born 1979)
Sterling EQ
Stimela band formed in 1982
Straatligkinders
Sugardrive
Valiant Swart
Okmalumkoolkat (born 1983), hip-hop artist
Stogie T , Hip Hop Artist
T
Tananas
Taxi Violence
Peta Teanet, singer
TKZee, kwaito group
Hilda Tloubatla (born 1942), lead singer of Mahotella Queens, and singer in Mahlathini and the Mahotella Queens
Tokollo Tshabalala, kwaito singer also known as Magesh
Peter Toussaint, singer-songwriter and guitar player
Toya Delazy, pop singer and pianist
Tribe After Tribe
Tuks, hip-hop artist
Tumi and the Volume
Tweak
U
Uhuru— Kwaito and afropop music group
Urban Creep
V
Bobby van Jaarsveld (born 1987), singer-songwriter and actor
Bok van Blerk (born 1978)
Jason van Wyk (born 1990), composer, producer
Van Coke Kartel
Amor Vittone (born 1972)
Valiant Swart (born 1965)
W
Watershed
Wargrave
Shaun Welgemoed (born 1978)
Heinz Winckler (born 1978), singer who won the first series of Idols
Winston's Jive Mixup
Wonderboom
Markus Wormstorm, electronic musician and composer
Y
Pretty Yende (born 1985), operatic soprano from Piet Retief, Mpumalanga
Yorxe (born 1998), singer and songwriter
YoungstaCPT (born 1991), rapper and songwriter
Z
Zahara, singer-songwriter and poet
Zebra & Giraffe
Karen Zoid (born 1978)
Zola (born 1977)
Zonke (born 1979)
Auth3ntiC
South Sudan
Yaba Angelosi
Mary Boyoi
Emmanuel Jal
Silver X
Sudan
Abdel Aziz El Mubarak
Abdel Gadir Salim
AlKabli
Emmanuel Jal
Mohammed Wardi
Mohamed Gubara
Swaziland
Dusty & Stones
Kambi
Tendaness
Tanzania
Ali Kiba
Bill Nass
Joseph Lusungu
Mnenge Ramadhani
Muhiddin Maalim
Hassani Bitchuka
Saidi Mabera
Wilson Kinyonga
Remmy Ongala
Kasaloo Kyanga
Mr. Nice
Saida Karoli
Diamond Platnumz
Lady Jaydee
Professor Jay
TID
Rose Mhando
Vanessa Mdee
A.Y.
Ruby
Rayvanny
Bi Kidude
Carola Kinasha
Imani Sanga
Tudd Thomas
Harmonize
Joel lwaga
Paul Clement
Goodluck Gozbert
Bella Kombo
Sara Nyongole
Angel Benard
Zoravo
Kibonge Wa Yesu
Calvin John
Mirriam Mbepera
Derick Marton
Beda Andrew
Dr. Ipyana
Ashley Nassary
Jessica Honore
Christina Shusho
Walter Chilambo
Boaz Danken
Martha Mwaipaja
John Lisu
Togo
Bella Bellow
King Mensah
Uganda
Holy Keane Amooti
Aziz Azion
A Pass
Afrigo Band
Babaluku
Bataka Squad
Bebe Cool
Bobi Wine
Bosmic Otim
Fresh Kid Uganda
Jose Chameleone
Mac Elvis
Exodus
David Lutalo
Eddy Kenzo
Fik Fameica
Gabriel K
Goodlyfe Crew
Sam Gombya
Sophie Gombya
Giovanni Kiyingi
Jackie Akello
Jackie Chandiru
Janzi Band
Jemimah Sanyu
Jimmy Katumba
Judith Babirye
Juliana Kanyomozi
Paulo Kafeero
Michael Ross Kakooza
Angella Katatumba
Isaiah Katumwa
Joanita Kawalya
Leila Kayondo
Keko
Suzan Kerunen
Maurice Kirya
Klear Kut
Sylver Kyagulanyi
Philly Lutaaya
Levixone
Lydia Jazmine
Lumix Da Don
Mad Ice
Master Blaster
Rachael Magoola
Fred Masagazi
Moses Matovu
Mariam Ndagire
Lilian Mbabazi
Frank Mbalire
Milege
Peter Miles
Phina Mugerwa
Benon Mugumbya
Fille Mutoni
Grace Nakimera
Halima Namakula
Rema Namakula
Iryn Namubiru
Navio
Nick Nola
Irene Ntale
Gravity Omutujju
Geoffrey Oryema
Papa Cidy
Producer Hannz
Rabadaba
Rachel K
Ragga Dee
Radio and Weasle
Ruyonga
Saba Saba aka Krazy Native
Cinderella Sanyu
Ssewa Ssewa
Sera
Sheebah Karungi
Sister Charity
Spice Diana
Madoxx Ssemanda Sematimba
St. Nelly-Sade
The Mith
Henry Tigan
Allan Toniks
Tshila
Trix Lane
Undercover Brothers Ug
Vampino
Viboyo
Elly Wamala
Wilson Bugembe
Bobi Wine
GNL Zamba
Zambia
Alick Nkhata
B Flow
Ballad Zulu
Chef 187
Jordan Katembula
Just Slim
Larry Maluma
Lazarus Tembo
Leo "K'millian" Moyo
Lily Tembo
Macky 2
Maiko Zulu
Mampi
Moonga K.
Nashil Pichen
OC Osilliation
Paul Ngozi
Shom-C
Victor Kachaka
Yvonne Mwale
Petersen Zagaze
Bobby East
Amayenge
Distro Kuomboka
Mashome Blue Jeans
Witch
Zone Fam
Zimbabwe
Barura Express – band
Bhundu Boys – jit and chimurenga music band
Hohodza – band
Mbira dzeNharira – mbira band
Mechanic Manyeruke and the Puritans – gospel music group
R.U.N.N. family – mbira-inspired reggae and rhumba group
Siyaya – music and dance group
Flint Bedrock (born 1985) – pop singer-songwriter
Mkhululi Bhebhe (born 1984) - contemporary gospel vocalist
Charles Charamba (born 1971) – gospel singer[1]
Olivia Charamba (1999–1999) – gospel singer
Brian Chikwava (born 1971) – writer and musician
Simon Chimbetu (1955–2005) – singer-songwriter and guitarist[2]
James Chimombe (1951–1990) – singer and guitarist[2]
Musekiwa Chingodza (born 1970) – mbira and marimba player
Chirikure Chirikure (born 1962) – musician and songwriter
Stella Chiweshe (born 1946) – mbira player and singer-songwriter
Dizzy Dee (1999–1999) – Australia-based reggae artist
Leonard Dembo (1959–1996) – guitarist and singer-songwriter; member of the band Barura Express[2]
Tehn Diamond (born 1985) – Zimbabwean hip hop musician and rapper
Chartwell Dutiro (born 1957) – mbira player and singer-songwriter[3]
Mbuya Dyoko (1944–2013) – mbira player
John Edmond (born 1936) – Rhodesian folk singer
Tendayi Gahamadze (born 1959) – mbira player and singer-songwriter; member of Mbira dzeNharira
Michael Gibbs (born 1937) – England-based jazz composer
Derek Hudson (1934–2005) – English-born conductor and composer
Ngonidzashe Kambarami (born 1983) – urban grooves artist
Victor Kunonga (born 1974) – Afrojazz singer-songwriter
Forward Kwenda (born 1963) – mbira player
Jah Prayzah (born 1987) – Afropop and Afrojazz musician
Hope Masike mbira player and percussionist and singer
Ignatius Mabasa (born 1971) – writer and musician
Alick Macheso (born 1968) – singer-songwriter and guitarist
Safirio Madzikatire (1932–1996) – actor and musician[2]
Madzitatiguru (born 1989) – poet and musician
Takunda Mafika (1983–2011) – mbira player
Cosmas Magaya (born 1953) – mbira player
Tkay Maidza (born 1996) – Australia-based singer-songwriter and rapper
Lovemore Majaivana (born 1954) – Ndebele music singer-songwriter
Zeke Manyika (born 1955) – England-based rock and roll singer-songwriter and drummer
Leonard Mapfumo (born 1983) – urban grooves and hip hop artist
Thomas Mapfumo (born 1945) – chimurenga music artist
Chiwoniso Maraire (1976–2013) – mbira player and singer-songwriter[2]
Dumisani Maraire (1944–1999) – mbira payer and singer-songwriter
Mashasha (born 1982) – guitarist and singer-songwriter
Maskiri (born 1980) – hip hop artist and rapper
Dorothy Masuka (born 1935) – South Africa-based jazz singer
Paul Matavire (1961–2005) – blind jit musician[2]
Louis Mhlanga (born 1956) – South Africa-based Afrojazz singer-songwriter and guitarist
Obi Mhondera (born 1980) – England-based pop songwriter
Eric Moyo (born 1982) – singer
Tongai Moyo (1968–2011) – sungura singer-songwriter[2]
August Msarurgwa (1920–1968) – composer
Audius Mtawarira (born 1977) – Australia-based urban grooves artist
Oliver Mtukudzi (1952–2019) – Afrojazz singer-songwriter and guitarist
Sam Mtukudzi (1988–2010) – Afrojazz musician[2]
Anna Mudeka – England-based musician
Carol Mujokoro – gospel music artist
Ephat Mujuru (1950–2001) – mbira player[2]
Prince Kudakwashe Musarurwa (born 1988) – Afrojazz musician
Isaac Musekiwa – DR Congo-based soukous artist and saxophonist
Busi Ncube (born 1963) – mbira player and singer
Albert Nyathi (born 1962) – poet and singer-songwriter
Jah Prayzah, musician
Ramadu (born 1975) – singer-songwriter
Roki (born 1985) – Madagascar-born urban grooves artist
Kingsley Sambo (1936–1977) – jazz guitarist
Herbert Schwamborn (born 1973) – Germany-based hip hop and electronic music artist; member of the band Söhne Mannheims
Jonah Sithole (1952–1997) – chimurenga music artist and guitarist[2]
Solomon Skuza (1956–1995) – pop singer-songwriter[2]
Buffalo Souljah (born 1980) – Zimdancehall and reggae artist
Shingisai Suluma (born 1971) – gospel music artist
Takura (born 1991) – house music and hip hop artist
Tocky Vibes (born 1993) - Singer Lyricist Songwriter
System Tazvida (born 1968) – singer-songwriter
Biggie Tembo Jr. (born 1988) – jit musician
Clem Tholet (1948–2004) – Rhodesian guitarist and folk singer
Garikayi Tirikoti (born 1961) – mbira player
Diego Tryno (born 1998) - urban contemporary and hip-hop musician
Viomak – protest musician and activist
Patrick Mukwamba (born 1951) – pop singer
Tarisai Vushe (born 1987) – Australia-based singer who appeared on Australian Idol
Edith WeUtonga (born 1979) – Afrojazz singer-songwriter and bass guitarist
Winky D (born 1983) – dancehall and reggae artist
Jonathan Wutawunashe – gospel artist
Leonard Zhakata (born 1968) – sungura and adult contemporary music artist
Zinjaziyamluma- maskandi singer
Charity Zisengwe – contemporary Christian music artist
Soukous
Antoine Kolosoy, a.k.a. Papa Wendo
Aurlus Mabele
Awilo Longomba
Bozi Boziana
Diblo Dibala
Dindo Yogo
Dr Nico Kasanda
Empire Bakuba
Evoloko Jocker
Fally Ipupa
Ferre Gola
François Luambo Makiadi, band leader of OK Jazz
Grand Kalle, band leader of Grand Kalle et l'African Jazz
Kanda Bongo Man
Kasaloo Kyanga
King Kester Emeneya
Koffi Olomide
Les Quatre Étoiles 4 Etoiles
Loketo
M'bilia Bel
Meiway
Mose Fan Fan
Monique Séka
Nyboma
Oliver N'Goma
Papa Wemba
Pepe Kalle
Quartier Latin International
Les Quatre Étoiles
Remmy Ongala
Rigo Star
Sam Fan Thomas
Sam Mangwana
Samba Mapangala, band leader of Orchestra Virunga
Tabu Ley Rochereau, band leader of African Fiesta
Tshala Muana
Werrason
Yondo Sister
Zaiko Langa Langa""".split("\n")
from string import ascii_uppercase
data = {}
country = None
for line in txt:
if country is None:
country = line
data[country] = []
continue
if len(line) == 0:
country = None
continue
artist = line
artist = artist.split(" (born")[0]
artist = artist.split(" (1")[0]
artist = artist.split(" (b")[0]
artist = artist.split(" (c")[0]
artist = artist.split(" (p")[0]
artist = artist.split(", ")[0]
artist = artist.split(" – ")[0]
artist = artist.split(" - ")[0]
artist = artist.replace("(band)", "").strip()
artist = artist.replace("(group)", "").strip()
artist = artist.replace("(rapper)", "").strip()
if artist in ascii_uppercase:
continue
data[country].append(artist)
from pandas import Series
african_artists = DataFrame(Series(data))
african_artists.columns = ["Artists"]
african_artists.head()
saveFile(ifile="/Volumes/Piggy/Charts/data/africa/categories/Artists.p", idata=african_artists, debug=True)
%autoreload
from Africa import africaData
africa = africaData()
africa.parse()
%autoreload
from Africa import africaData
africa = africaData()
#africa.setDBRenames(manDB)
#africa.setMultiDBRenames(multimanDB)
africa.setChartUsage(rank=[0,1,2,3])
africa.setFullChartData()
africa.setArtistAlbumData()
africa.saveArtistAlbumData()
africa.saveFullChartData()
from searchUtils import findExt
from fileUtils import getBaseFilename
names = [getBaseFilename(ifile) for ifile in findExt("/Volumes/Piggy/Charts/data/africa/results/", ".p")]
for name in names:
key = name.replace("(", "")
key = key.replace(")", "")
key = key.replace("-", "_")
print("self.{0: <50} = ['{1}']".format(key.lower(), name))
#uDisc.setDBRenames(manDB)
#uDisc.setMultiDBRenames(multimanDB)
uDisc.setChartUsage(rank=[0])
uDisc.setFullChartData()
uDisc.setArtistAlbumData()
uDisc.saveArtistAlbumData()
uDisc.saveFullChartData()
```
|
github_jupyter
|
# WELL NOTEBOOK
## Well logs visualization & petrophysics
Install the the repository reservoirpy from github and import the required packages
```
import os
path = os.path.join('/home/santiago/Documents/dev/reservoirpy')
import sys
sys.path.insert(0,path)
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from shapely.geometry import Point
import folium
from pyproj import Proj, transform, CRS, Transformer
import pyvista as pv
from reservoirpy.wellpy import path as ph
```
### Well atributes
Well atributes, name, rte, coordinates, survey
```
deviation = pd.read_csv('survey.csv', header=[0])
deviation.head()
tops1 = ph.tops({'formation':['fm1','fm2'],'md_top':[5000,5100],'md_bottom':[5099,5145]})
tops1
```
## Create some wells
```
#Create the well object
name1 = 'well-1'
rte1 = 1515.78 # Rotary table Elevation
surf_coord1 = [1000000,1000000]#Point(1000100,1000000,520)
crs1 = 'EPSG:3117'
tops1 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})
deviation1 = deviation.copy()
deviation1['azi'] = deviation1['azi'] + 0
w1 = ph.well(name=name1,
rte=rte1,
surf_coord=surf_coord1,
survey = deviation1,
tops=tops1,
crs=crs1)
#Create the well object
name2 = 'well-2'
rte2 = 515 # Rotary table Elevation
surf_coord2 = Point(1000100,1000000)
crs2 = 'EPSG:3117'
tops2 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})
deviation2 = deviation.copy()
deviation2['azi'] = deviation1['azi'] + 0
w2 = ph.well(name=name2,
rte=rte2,
surf_coord=surf_coord2,
survey = deviation2,
tops=tops2,
crs=crs2)
#Create the well object
name3 = 'well-3'
rte3 = 515 # Rotary table Elevation
surf_coord3 = Point(1000500,1000000)
crs3 = 'EPSG:3117'
tops3 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})
deviation3 = deviation.copy()
deviation3['azi'] = deviation1['azi'] + 30
w3 = ph.well(name=name3,
rte=rte3,
surf_coord=surf_coord3,
survey = deviation3,
tops=tops3,
crs=crs3)
#Create the well object
name4 = 'well-4'
rte4 = 515 # Rotary table Elevation
surf_coord4 = Point(1100500,1200000)
crs4 = 'EPSG:3117'
tops4 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})
w4 = ph.well(name=name4,
rte=rte4,
surf_coord=surf_coord4,
tops=tops4,
crs=crs4)
#Create the well object
name5 = 'well-5'
rte5 = 515 # Rotary table Elevation
surf_coord5 = Point(1170500,1200000)
crs5 = 'EPSG:3117'
tops5 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})
w5 = ph.well(name=name5,
rte=rte5,
surf_coord=surf_coord5,
tops=tops5,
crs=crs5,
td=8452)
w4.survey
```
## Create an empty wells group
You can create a `wells_group` object either empty or not. It only receives `well` object.
```
g1 = ph.wells_group(w1)
```
To see the list of wells call the method `wells_group.wells`. It contains a dictionary with the name of each well as the key and the `well` object as the item
```
g1.wells
```
### Add more wells to existing list
by calling the method `wells_group.add_well()` you can add more wells to an existing group
```
g1.add_well(w2,w3)
g1.wells
```
### Get attributes from a `wells_group`
```
g1.wells['well-3'].surf_coord.wkt
```
### Describe each well with its attributes
```
g1.describe()
```
#### Wells tops
Get the wells formations tops. If no parameters passed, it returns all wells and formations. You can pass `wells` and `formations` parameter to get the selected wells and formations
```
g1.wells_tops()
g1.wells_tops(wells=['well-1','well-2'], formations=['fm1'])
```
#### Wells survey
```
g1.wells_surveys().head()
g1.wells_surveys(wells=['well-1','well-2'])
g1.wells_distance(dims=['z'])
dist = g1.wells_distance(wells=['well-1','well-2'],dims=['y','z','x'])
dist
m = g1.wells_map(zoom=13)
m
g1.wells_coordinates()
g1.wells_tops().head()
g1.formation_distance(formation='fm2')
g1.formation_distance(wells=['well-1','well-2','well-3'],formation='fm2', dims=['tvdss_top'])
fig, ax = plt.subplots()
for i in g1.wells:
_t = g1.wells[i].tops
_s = g1.wells[i].survey
ax.scatter(_t['easting']-1000000,_t['northing']-1000000)
ax.plot(_s['easting']-1000000,_s['northing']-1000000)
df, c = g1.wells_tops(projection1d=True, azi=45)
print(c)
print(df)
surv,ce = g1.wells_surveys(projection1d=True, azi=45, center=c)
print(surv)
azi= 0
tops, center = g1.wells_tops(projection1d=True, azi=azi)
surv,ce = g1.wells_surveys(projection1d=True, azi=azi, center=center)
fig, ax = plt.subplots()
sns.lineplot(x='projection',y='tvdss_top', data=tops,
hue='formation', style='formation',markers=True, ax=ax, palette='Set1')
sns.lineplot(x='projection',y='tvdss', data=surv,
hue='well', style='well', ax=ax,palette='GnBu_d')
g1.structural_view(azi=45,ylims=[-4000,-12000],formations=['fm2'])
g1.structural_view(azi=45,formations=['fm1'], wells=['well-1','well-2'])
```
## Export wells survey to PyVista object vtk
```
w1_vtk = g1.wells['well-1'].get_vtk()
w1_vtk
w1_vtk.plot(notebook=False)
ss=g1.wells_surveys_vtk()
ss.plot(notebook=False)
p=pv.Plotter(notebook=False)
p.add_mesh(ss['well-1'], scalars='azi')
p.add_mesh(ss['well-2'], scalars='tvdss')
p.show()
tops_vtk = g1.tops_vtk()
tops_vtk.plot(notebook=False)
str_vtk = g1.structural_view_vtk()
str_vtk.plot(notebook=False)
```
|
github_jupyter
|
```
import paddle
from paddle.nn import Linear
import paddle.nn.functional as F
import numpy as np
import os
import random
def load_data():
# 从文件导入数据
datafile = 'external-libraries/housing.data'
data = np.fromfile(datafile, sep=' ', dtype=np.float32)
# 每条数据包括14项,其中前面13项是影响因素,第14项是相应的房屋价格中位数
feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', \
'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]
feature_num = len(feature_names)
# 将原始数据进行Reshape,变成[N, 14]这样的形状
data = data.reshape([data.shape[0] // feature_num, feature_num])
# 将原数据集拆分成训练集和测试集
# 这里使用80%的数据做训练,20%的数据做测试
# 测试集和训练集必须是没有交集的
ratio = 0.8
offset = int(data.shape[0] * ratio)
training_data = data[:offset]
# 计算train数据集的最大值,最小值,平均值
maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), \
training_data.sum(axis=0) / training_data.shape[0]
# 记录数据的归一化参数,在预测时对数据做归一化
global max_values
global min_values
global avg_values
max_values = maximums
min_values = minimums
avg_values = avgs
# 对数据进行归一化处理
for i in range(feature_num):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
# 训练集和测试集的划分比例
training_data = data[:offset]
test_data = data[offset:]
return training_data, test_data
class Regressor(paddle.nn.Layer):
# self代表类的实例自身
def __init__(self):
# 初始化父类中的一些参数
super(Regressor, self).__init__()
# 定义两层全连接层,输入维度是13,输出维度是1
self.fc1=Linear(in_features=13, out_features=20)
self.fc2=Linear(in_features=20, out_features=1)
# 网络的前向计算
def forward(self, inputs):
outputs1 = self.fc1(inputs)
outputs2 = F.relu(outputs1)
x = self.fc2(outputs2)
return x
# 声明定义好的线性回归模型
model = Regressor()
# 开启模型训练模式
model.train()
# 加载数据
training_data, test_data = load_data()
# 定义优化算法,使用随机梯度下降SGD
# 学习率设置为0.01
opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
EPOCH_NUM = 10 # 设置外层循环次数
BATCH_SIZE = 10 # 设置batch大小
# 定义外层循环
for epoch_id in range(EPOCH_NUM):
# 在每轮迭代开始之前,将训练数据的顺序随机的打乱
np.random.shuffle(training_data)
# 将训练数据进行拆分,每个batch包含10条数据
mini_batches = [training_data[k:k+BATCH_SIZE] for k in range(0, len(training_data), BATCH_SIZE)]
# 定义内层循环
for iter_id, mini_batch in enumerate(mini_batches):
x = np.array(mini_batch[:, :-1]) # 获得当前批次训练数据
y = np.array(mini_batch[:, -1:]) # 获得当前批次训练标签(真实房价)
# 将numpy数据转为飞桨动态图tensor形式
house_features = paddle.to_tensor(x)
prices = paddle.to_tensor(y)
# 前向计算
predicts = model(house_features)
# 计算损失
loss = F.square_error_cost(predicts, label=prices)
avg_loss = paddle.mean(loss)
if iter_id%20==0:
print("epoch: {}, iter: {}, loss: {}".format(epoch_id, iter_id, avg_loss.numpy()))
# 反向传播
avg_loss.backward()
# 最小化loss,更新参数
opt.step()
# 清除梯度
opt.clear_grad()
# 保存模型参数,文件名为LR_model.pdparams
paddle.save(model.state_dict(), 'LR_model.pdparams')
def load_one_example():
# 从上边已加载的测试集中,随机选择一条作为测试数据
idx = np.random.randint(0, test_data.shape[0])
idx = -10 #测试倒数第10个数据
one_data, label = test_data[idx, :-1], test_data[idx, -1]
# 修改该条数据shape为[1,13]
one_data = one_data.reshape([1,-1])
return one_data, label
# 参数为保存模型参数的文件地址
model_dict = paddle.load('LR_model.pdparams')
model.load_dict(model_dict)
model.eval()
# 参数为数据集的文件地址
one_data, label = load_one_example()
# 将数据转为动态图的variable格式
one_data = paddle.to_tensor(one_data)
predict = model(one_data)
# 对结果做反归一化处理
predict = predict * (max_values[-1] - min_values[-1]) + avg_values[-1]
# 对label数据做反归一化处理
label = label * (max_values[-1] - min_values[-1]) + avg_values[-1]
print("predicted {}, real {}".format(predict.numpy(), label))
```
|
github_jupyter
|
# **<div align="center"> Dolby.io Developer Days Media APIs 101 - Getting Started </div>**
### **<div align="center"> Notebook #1: Getting Started</div>**
### Starting with a Raw Audio File
We can run code blocks like this in Binder by pressing "Control+Enter". Try it now after clicking the below code block!
```
import IPython # Helper library to play audio files in Python natively.
# Set this link to any publically accessible media file you would like!
original_audio_file = "https://dolbyio.s3-us-west-1.amazonaws.com/public/shelby/airplane.original.mp4"
IPython.display.Audio(original_audio_file) # Display the audio embedded within python
```
This installed IPython to our workspace, to let us play media files natively within Python, and set a variable to this public media file we will use for the rest of this notebook.
### **Step #1:** Gathering Credentials
- Go to http://dashboard.dolby.io/signup/ to sign up for a Dolby.io account.
- At the bottom of the "Applications" widget on the dashboard, click "_my first app_"
- Scroll down to the box labeled **'Media APIs'**.
- Copy the key text under "API Key:" and replace the string below, then run the cell.
- Also enter in your name to customize the output URL later.
- _Press Control+Enter to run the cell._

```
# Enter your Dolby.io Media API Key here.
api_key = "<YOUR_API_KEY_HERE>"
# Enter your name here to customize the output URL later.
name = "<YOUR_NAME_HERE>"
print("API Key and Name set!")
```
Now we have two key variables set:
1. The link to the original media file we want to process.
2. Our API key so we can properly call the REST API endpoints.
As well as your name, just so we can differentiate output later on.
### **Step #2:** Calling the Enhance Job
> Note: all of the following code is adapted from the Enhance quickstart found here: https://docs.dolby.io/media-apis/docs/quick-start-to-enhancing-media
- Run the cell below to start the enhance job, this should output a JSON response with only a `job_id` in the body if no errors occur.
```
import requests # Python library to make HTTP requests
output_url = f"dlb://out/workshop-{name}.mp4" # Setting the output URL to have a different location based on your name!
# Building the body of the request
body = {
"input" : original_audio_file,
"output" : output_url,
}
# Building the headers and url of the request
url = "https://api.dolby.com/media/enhance"
headers = {
"x-api-key": api_key,
"Content-Type": "application/json",
"Accept": "application/json"
}
# Call the API request!
response = requests.post(url, json=body, headers=headers)
response.raise_for_status()
print(response.json()) # Prints out the output of the request
```
### **Step #3:** Checking Job Status
- Now that we have created a job, we should check its status.
- Run the cell below to check the status, this file is small so it should take only a couple of seconds.
```
url = "https://api.dolby.com/media/enhance"
headers = {
"x-api-key": api_key,
"Content-Type": "application/json",
"Accept": "application/json"
}
params = {
"job_id": response.json()["job_id"]
}
response = requests.get(url, params=params, headers=headers)
response.raise_for_status()
print(response.json())
```
This should look like the following when done:
```json
{'path': '/media/enhance', 'status': 'Success', 'progress': 100, 'api_version': 'v1.1.2', 'result': {}}
```
### **Step #4:** Download the Processed File
- Now we want to download the file!
- We can do this with another request.
```
import shutil
# The name of the file that will be downloaded locally!
output_path = f"workshop-{name}.mp4"
url = "https://api.dolby.com/media/output"
headers = {
"x-api-key": api_key,
"Content-Type": "application/json",
"Accept": "application/json",
}
args = {
"url": output_url
}
# Take the response and download it locally
with requests.get(url, params=args, headers=headers, stream=True) as response:
response.raise_for_status()
response.raw.decode_content = True
print("Downloading from {0} into {1}".format(response.url, output_path))
with open(output_path, "wb") as output_file:
shutil.copyfileobj(response.raw, output_file)
```
When it is done downloading, you'll see it pop up on the left side bar.
Now that the file is downloaded lets give it a listen. Does it sound better?
```
IPython.display.Audio(output_path)
```
### **Congratulations you made your first call with the Dolby.io Enhance API!**
We can now move onto Workshop Part 2 on the left sidebar!

References:
https://docs.python-requests.org/en/latest/
https://ipython.org/
|
github_jupyter
|
# Import
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.__version__
np.__version__, pd.__version__
```
# Dataset:
```
from sklearn.datasets import california_housing
data = california_housing.fetch_california_housing()
X = data['data']
y = data['target']
columns = data['feature_names']
train_df = pd.DataFrame(X, index=np.arange(len(X)), columns=columns)
train_df['target'] = y
train_df.head()
```
# 1) Initialize:
```
import sys
sys.path.append('../SWMat/')
from SWMat import SWMat
from matplotlib.patches import Wedge, Polygon
from matplotlib.collections import PatchCollection
fig = plt.figure(figsize=(10, 7))
ax = plt.gca()
for pos in ["right", "left", "top", "bottom"]:
ax.spines[pos].set_visible(False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
patches = []
patches += [Wedge((.2, .6), .1, 45, 270, width=0.05),
Wedge((.2, .45), .1, 225, 450, width=0.05),
Polygon(np.array([[.22, .23], [.26, .23], [.33, .48], [.39, .40], [.43, .47], [.49, .22],
[.52, .22], [.45, .53], [.42, .54], [.39, .46], [.34, .53], [.32, .54]]) + np.array([0.25, 0.3])),
Polygon(np.array([[.32, .70], [.27, .32], [.32, .31], [.36, .44], [.40, .44], [.43, .30], [.45, .30],
[.53, .66], [.50, .67], [.45, .39], [.43, .39], [.42, .48], [.38, .50], [.32, .37], [.29, .37], [.35, .70]]))
]
colors = 100*np.random.rand(len(patches))
p = PatchCollection(patches, alpha=0.85)
p.set_array(np.array(colors))
ax.add_collection(p);
plt.text(0.1, 0.09, "Storytelling With Matplotlib", fontsize=30, color="#3b5998")
plt.annotate("Cluttered Data...", xy=(.8, .5), xytext=(1.1, .75), color="black",
arrowprops={'arrowstyle':'->', 'color': 'black',
"connectionstyle":"arc3,rad=-0.2"},
bbox={'pad':6, 'edgecolor':'orange', 'facecolor':
'orange', 'alpha':0.4}, fontsize=17)
#plt.text(x=1.3, y=.1, s="Communicating Data\nEffectively.", fontsize=20, ha="center")
swm = SWMat(plt, ax=ax)
swm.text("\> Communicating <prop color='#3b5998' fontsize='30'>Data</prop>Effectively.", fontsize=20,
position="out-lower-right");
# Simple Text
swm = SWMat(plt) # And... base beautifications will be added.
y = np.arange(500) + np.random.random(500)*50 + np.random.random(500)*40 + np.random.random(500)*50 + np.random.random(500)*10
x = np.arange(500)
plt.scatter(x, y)
swm.text("Here goes your text!\nAnother Text!!");
swm = SWMat(plt)
ls = swm.line_plot(np.array([[1, 2, 3, 4], [1, 2, 3, 4]]).T, np.array([[1, 4, 2, 6], [4, 2, 6, 5]]).T, line_labels=["A", "B"],
highlight=0, lw=3)
swm = SWMat(plt)
hist = swm.hist(train_df['target'], highlight=3, bins=[0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5], ec='w', hide_y=True)
#t = swm.text("My first text!<prop>Possible Outliers</prop><prop>haleluya\nyo lib ipsum dipsum</prop>\nipsum",
# fontsize=18)
swm = SWMat(plt)
swm.bar(np.array([[1, 2, 3], [1, 2, 3]]), np.array([[2, 5, 3], [4, 1, 3]]), data_labels=["Alpha", "Beta"], highlight={"data":1, "cat":1},
cat_labels=["One", "Two", "Three"], plot_type="stacked100%", width=0.8);
swm = SWMat(plt)
v = swm.violinplot(train_df['target'], show="top", highlight={"0":[(0.7, 2.3), (4.7, 6)]})
swm = SWMat(plt)
swm.bar(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[2, 5, 3], [4, 3, 6], [2, 4, 2], [2, 4, 1]]), data_labels=["A", "B", "C", "D"], cat_labels=["One", "Two", "Three"], highlight={"data":1});
swm = SWMat(plt)
swm.bar(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[2, 5, 3], [4, 3, 6], [2, 4, 2]]), data_labels=["A", "B", "C"], cat_labels=["One", "Two", "Three"]);
```
|
github_jupyter
|
```
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('../docs')
from gen_doc.nbdoc import show_doc as sd
#export
from nb_001b import *
import sys, PIL, matplotlib.pyplot as plt, itertools, math, random, collections, torch
import scipy.stats, scipy.special
from enum import Enum, IntEnum
from torch import tensor, Tensor, FloatTensor, LongTensor, ByteTensor, DoubleTensor, HalfTensor, ShortTensor
from operator import itemgetter, attrgetter
from numpy import cos, sin, tan, tanh, log, exp
from dataclasses import field
from functools import reduce
from collections import defaultdict, abc, namedtuple, Iterable
from typing import Tuple, Hashable, Mapping, Dict
import mimetypes
import abc
from abc import abstractmethod, abstractproperty
```
# CIFAR subset data
First we want to view our data to check if everything is how we expect it to be.
## Setup
```
DATA_PATH = Path('data')
PATH = DATA_PATH/'cifar10_dog_air'
TRAIN_PATH = PATH/'train'
dog_fn = list((TRAIN_PATH/'dog').iterdir())[0]
dog_image = PIL.Image.open(dog_fn)
dog_image.resize((256,256))
air_fn = list((TRAIN_PATH/'airplane').iterdir())[1]
air_image = PIL.Image.open(air_fn)
air_image.resize((256,256))
```
## Simple Dataset/Dataloader
We will build a Dataset class for our image files. A Dataset class needs to have two functions: `__len__` and `__getitem__`. Our `ImageDataset` class additionally gets image files from their respective directories and transforms them to tensors.
```
#export
def image2np(image:Tensor)->np.ndarray:
"convert from torch style `image` to numpy/matplot style"
res = image.cpu().permute(1,2,0).numpy()
return res[...,0] if res.shape[2]==1 else res
def show_image(img:Tensor, ax:plt.Axes=None, figsize:tuple=(3,3), hide_axis:bool=True,
title:Optional[str]=None, cmap:str='binary', alpha:Optional[float]=None)->plt.Axes:
"plot tensor `img` using matplotlib axis `ax`. `figsize`,`axis`,`title`,`cmap` and `alpha` pass to `ax.imshow`"
if ax is None: fig,ax = plt.subplots(figsize=figsize)
ax.imshow(image2np(img), cmap=cmap, alpha=alpha)
if hide_axis: ax.axis('off')
if title: ax.set_title(title)
return ax
class Image():
def __init__(self, px): self.px = px
def show(self, ax=None, **kwargs): return show_image(self.px, ax=ax, **kwargs)
@property
def data(self): return self.px
#export
FilePathList = Collection[Path]
TensorImage = Tensor
NPImage = np.ndarray
def find_classes(folder:Path)->FilePathList:
"return class subdirectories in imagenet style train `folder`"
classes = [d for d in folder.iterdir()
if d.is_dir() and not d.name.startswith('.')]
assert(len(classes)>0)
return sorted(classes, key=lambda d: d.name)
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
def get_image_files(c:Path, check_ext:bool=True)->FilePathList:
"return list of files in `c` that are images. `check_ext` will filter to `image_extensions`."
return [o for o in list(c.iterdir())
if not o.name.startswith('.') and not o.is_dir()
and (not check_ext or (o.suffix in image_extensions))]
def pil2tensor(image:NPImage)->TensorImage:
"convert PIL style `image` array to torch style image tensor `get_image_files`"
arr = torch.ByteTensor(torch.ByteStorage.from_buffer(image.tobytes()))
arr = arr.view(image.size[1], image.size[0], -1)
return arr.permute(2,0,1)
PathOrStr = Union[Path,str]
def open_image(fn:PathOrStr):
"return `Image` object created from image in file `fn`"
x = PIL.Image.open(fn).convert('RGB')
return Image(pil2tensor(x).float().div_(255))
#export
NPArrayableList = Collection[Union[np.ndarray, list]]
NPArrayMask = np.ndarray
SplitArrayList = List[Tuple[np.ndarray,np.ndarray]]
def arrays_split(mask:NPArrayMask, *arrs:NPArrayableList)->SplitArrayList:
"given `arrs` is [a,b,...] and `mask`index - return[(a[mask],a[~mask]),(b[mask],b[~mask]),...]"
mask = array(mask)
return list(zip(*[(a[mask],a[~mask]) for a in map(np.array, arrs)]))
def random_split(valid_pct:float, *arrs:NPArrayableList)->SplitArrayList:
"randomly `array_split` with `valid_pct` ratio. good for creating validation set."
is_train = np.random.uniform(size=(len(arrs[0]),)) > valid_pct
return arrays_split(is_train, *arrs)
class DatasetBase(Dataset):
"base class for all fastai datasets"
def __len__(self): return len(self.x)
@property
def c(self):
"number of classes expressed by dataset y variable"
return self.y.shape[-1] if len(self.y.shape)>1 else 1
def __repr__(self): return f'{type(self).__name__} of len {len(self)}'
class LabelDataset(DatasetBase):
"base class for fastai datasets that do classification"
@property
def c(self):
"number of classes expressed by dataset y variable"
return len(self.classes)
#export
ImgLabel = str
ImgLabels = Collection[ImgLabel]
Classes = Collection[Any]
class ImageDataset(LabelDataset):
"Dataset for folders of images in style {folder}/{class}/{images}"
def __init__(self, fns:FilePathList, labels:ImgLabels, classes:Optional[Classes]=None):
self.classes = ifnone(classes, list(set(labels)))
self.class2idx = {v:k for k,v in enumerate(self.classes)}
self.x = np.array(fns)
self.y = np.array([self.class2idx[o] for o in labels], dtype=np.int64)
def __getitem__(self,i): return open_image(self.x[i]),self.y[i]
@staticmethod
def _folder_files(folder:Path, label:ImgLabel, check_ext=True)->Tuple[FilePathList,ImgLabels]:
"from `folder` return image files and labels. The labels are all `label`. `check_ext` means only image files"
fnames = get_image_files(folder, check_ext=check_ext)
return fnames,[label]*len(fnames)
@classmethod
def from_single_folder(cls, folder:PathOrStr, classes:Classes, check_ext=True):
"typically used for test set. label all images in `folder` with `classes[0]`"
fns,labels = cls._folder_files(folder, classes[0], check_ext=check_ext)
return cls(fns, labels, classes=classes)
@classmethod
def from_folder(cls, folder:Path, classes:Optional[Classes]=None,
valid_pct:float=0., check_ext:bool=True) -> Union['ImageDataset', List['ImageDataset']]:
"""dataset of `classes` labeled images in `folder`. Optional `valid_pct` split validation set."""
if classes is None: classes = [cls.name for cls in find_classes(folder)]
fns,labels = [],[]
for cl in classes:
f,l = cls._folder_files(folder/cl, cl, check_ext=check_ext)
fns+=f; labels+=l
if valid_pct==0.: return cls(fns, labels, classes=classes)
return [cls(*a, classes=classes) for a in random_split(valid_pct, fns, labels)]
sd(ImageDataset.from_folder)
```
# Data augmentation
We are going to augment our data to increase the size of our training set with artificial images. These new images are basically "free" data that we can use in our training to help our model generalize better (reduce overfitting).
## Lighting
We will start by changing the **brightness** and **contrast** of our images.
### Method
**Brightness**
Brightness refers to where does our image stand on the dark-light spectrum. Brightness is applied by adding a positive constant to each of the image's channels. This works because each of the channels in an image goes from 0 (darkest) to 255 (brightest) in a dark-light continum. (0, 0, 0) is black (total abscence of light) and (255, 255, 255) is white (pure light). You can check how this works by experimenting by yourself [here](https://www.w3schools.com/colors/colors_rgb.asp).
_Parameters_
1. **Change** How much brightness do we want to add to (or take from) the image.
Domain: Real numbers
**Contrast**
Contrast refers to how sharp a distinction there is between brighter and darker sections of our image. To increase contrast we need darker pixels to be darker and lighter pixels to be lighter. In other words, we would like channels with a value smaller than 128 to decrease and channels with a value of greater than 128 to increase.
_Parameters_
1. **Scale** How much contrast do we want to add to (or remove from) the image.
Domain: [0, +inf]
***On logit and sigmoid***
Notice that for both transformations we first apply the logit to our tensor, then apply the transformation and finally take the sigmoid. This is important for two reasons.
First, we don't want to overflow our tensor values. In other words, we need our final tensor values to be between [0,1]. Imagine, for instance, a tensor value at 0.99. We want to increase its brightness, but we can’t go over 1.0. By doing logit first, which first moves our space to -inf to +inf, this works fine. The same applies to contrast if we have a scale S > 1 (might make some of our tensor values greater than one).
Second, when we apply contrast, we need to affect the dispersion of values around the middle value. Say we want to increase contrast. Then we need the bright values (>0.5) to get brighter and dark values (<0.5) to get darker. We must first transform our tensor values so our values which were originally <0.5 are now negative and our values which were originally >0.5 are now positive. This way, when we multiply by a constant, the dispersion around 0 will increase. The logit function does exactly this and allows us to increase or decrease dispersion around a mid value.
### Implementation
```
#export
def logit(x:Tensor)->Tensor: return -(1/x-1).log()
def logit_(x:Tensor)->Tensor: return (x.reciprocal_().sub_(1)).log_().neg_()
def contrast(x:Tensor, scale:float)->Tensor: return x.mul_(scale)
#export
FlowField = Tensor
LogitTensorImage = TensorImage
AffineMatrix = Tensor
KWArgs = Dict[str,Any]
ArgStar = Collection[Any]
CoordSize = Tuple[int,int,int]
LightingFunc = Callable[[LogitTensorImage, ArgStar, KWArgs], LogitTensorImage]
PixelFunc = Callable[[TensorImage, ArgStar, KWArgs], TensorImage]
CoordFunc = Callable[[FlowField, CoordSize, ArgStar, KWArgs], LogitTensorImage]
AffineFunc = Callable[[KWArgs], AffineMatrix]
class ItemBase():
"All tranformable dataset items use this type"
@property
@abstractmethod
def device(self): pass
@property
@abstractmethod
def data(self): pass
class ImageBase(ItemBase):
"Img based `Dataset` items dervie from this. Subclass to handle lighting, pixel, etc"
def lighting(self, func:LightingFunc, *args, **kwargs)->'ImageBase': return self
def pixel(self, func:PixelFunc, *args, **kwargs)->'ImageBase': return self
def coord(self, func:CoordFunc, *args, **kwargs)->'ImageBase': return self
def affine(self, func:AffineFunc, *args, **kwargs)->'ImageBase': return self
def set_sample(self, **kwargs)->'ImageBase':
"set parameters that control how we `grid_sample` the image after transforms are applied"
self.sample_kwargs = kwargs
return self
def clone(self)->'ImageBase':
"clones this item and its `data`"
return self.__class__(self.data.clone())
#export
class Image(ImageBase):
"supports appying transforms to image data"
def __init__(self, px)->'Image':
"create from raw tensor image data `px`"
self._px = px
self._logit_px=None
self._flow=None
self._affine_mat=None
self.sample_kwargs = {}
@property
def shape(self)->Tuple[int,int,int]:
"returns (ch, h, w) for this image"
return self._px.shape
@property
def size(self)->Tuple[int,int,int]:
"returns (h, w) for this image"
return self.shape[-2:]
@property
def device(self)->torch.device: return self._px.device
def __repr__(self): return f'{self.__class__.__name__} ({self.shape})'
def refresh(self)->None:
"applies any logit or affine transfers that have been "
if self._logit_px is not None:
self._px = self._logit_px.sigmoid_()
self._logit_px = None
if self._affine_mat is not None or self._flow is not None:
self._px = grid_sample(self._px, self.flow, **self.sample_kwargs)
self.sample_kwargs = {}
self._flow = None
return self
@property
def px(self)->TensorImage:
"get the tensor pixel buffer"
self.refresh()
return self._px
@px.setter
def px(self,v:TensorImage)->None:
"set the pixel buffer to `v`"
self._px=v
@property
def flow(self)->FlowField:
"access the flow-field grid after applying queued affine transforms"
if self._flow is None:
self._flow = affine_grid(self.shape)
if self._affine_mat is not None:
self._flow = affine_mult(self._flow,self._affine_mat)
self._affine_mat = None
return self._flow
@flow.setter
def flow(self,v:FlowField): self._flow=v
def lighting(self, func:LightingFunc, *args:Any, **kwargs:Any)->'Image':
"equivalent to `image = sigmoid(func(logit(image)))`"
self.logit_px = func(self.logit_px, *args, **kwargs)
return self
def pixel(self, func:PixelFunc, *args, **kwargs)->'Image':
"equivalent to `image.px = func(image.px)`"
self.px = func(self.px, *args, **kwargs)
return self
def coord(self, func:CoordFunc, *args, **kwargs)->'Image':
"equivalent to `image.flow = func(image.flow, image.size)`"
self.flow = func(self.flow, self.shape, *args, **kwargs)
return self
def affine(self, func:AffineFunc, *args, **kwargs)->'Image':
"equivalent to `image.affine_mat = image.affine_mat @ func()`"
m = tensor(func(*args, **kwargs)).to(self.device)
self.affine_mat = self.affine_mat @ m
return self
def resize(self, size:Union[int,CoordSize])->'Image':
"resize the image to `size`, size can be a single int"
assert self._flow is None
if isinstance(size, int): size=(self.shape[0], size, size)
self.flow = affine_grid(size)
return self
@property
def affine_mat(self)->AffineMatrix:
"get the affine matrix that will be applied by `refresh`"
if self._affine_mat is None:
self._affine_mat = torch.eye(3).to(self.device)
return self._affine_mat
@affine_mat.setter
def affine_mat(self,v)->None: self._affine_mat=v
@property
def logit_px(self)->LogitTensorImage:
"get logit(image.px)"
if self._logit_px is None: self._logit_px = logit_(self.px)
return self._logit_px
@logit_px.setter
def logit_px(self,v:LogitTensorImage)->None: self._logit_px=v
def show(self, ax:plt.Axes=None, **kwargs:Any)->None:
"plots the image into `ax`"
show_image(self.px, ax=ax, **kwargs)
@property
def data(self)->TensorImage:
"returns this images pixels as a tensor"
return self.px
train_ds = ImageDataset.from_folder(PATH/'train')
valid_ds = ImageDataset.from_folder(PATH/'test')
x = lambda: train_ds[1][0]
img = x()
img.logit_px = contrast(img.logit_px, 0.5)
img.show()
x().lighting(contrast, 0.5).show()
```
## Transform class
```
class Transform():
_wrap=None
def __init__(self, func): self.func=func
def __call__(self, x, *args, **kwargs):
if self._wrap: return getattr(x, self._wrap)(self.func, *args, **kwargs)
else: return self.func(x, *args, **kwargs)
class TfmLighting(Transform): _wrap='lighting'
@TfmLighting
def brightness(x, change): return x.add_(scipy.special.logit(change))
@TfmLighting
def contrast(x, scale): return x.mul_(scale)
_,axes = plt.subplots(1,4, figsize=(12,3))
x().show(axes[0])
contrast(x(), 1.0).show(axes[1])
contrast(x(), 0.5).show(axes[2])
contrast(x(), 2.0).show(axes[3])
_,axes = plt.subplots(1,4, figsize=(12,3))
x().show(axes[0])
brightness(x(), 0.8).show(axes[1])
brightness(x(), 0.5).show(axes[2])
brightness(x(), 0.2).show(axes[3])
def brightness_contrast(x, scale_contrast, change_brightness):
return brightness(contrast(x, scale=scale_contrast), change=change_brightness)
_,axes = plt.subplots(1,4, figsize=(12,3))
brightness_contrast(x(), 0.75, 0.7).show(axes[0])
brightness_contrast(x(), 2.0, 0.3).show(axes[1])
brightness_contrast(x(), 2.0, 0.7).show(axes[2])
brightness_contrast(x(), 0.75, 0.3).show(axes[3])
```
## Random lighting
Next, we will make our previous transforms random since we are interested in automatizing the pipeline. We will achieve this by making our parameters stochastic with a specific distribution.
We will use a <a href="https://en.wikipedia.org/wiki/Uniform_distribution_(continuous)">uniform</a> distribution for brightness change since its domain is the real numbers and the impact varies linearly with the scale. For contrast change we use [log_uniform](https://www.vosesoftware.com/riskwiki/LogUniformdistribution.php) for two reasons. First, contrast scale has a domain of [0, inf]. Second, the impact of the scale in the transformation is non-linear (i.e. 0.5 is as extreme as 2.0, 0.2 is as extreme as 5). The log_uniform function is appropriate because it has the same domain and correctly represents the non-linearity of the transform, P(0.5) = P(2).
```
#export
def uniform(low:Number, high:Number, size:List[int]=None)->float:
"draw 1 or shape=`size` random floats from uniform dist: min=`low`, max=`high`"
return random.uniform(low,high) if size is None else torch.FloatTensor(*listify(size)).uniform_(low,high)
def log_uniform(low, high, size=None):
"draw 1 or shape=`size` random floats from uniform dist: min=log(`low`), max=log(`high`)"
res = uniform(log(low), log(high), size)
return exp(res) if size is None else res.exp_()
def rand_bool(p:float, size=None):
"draw 1 or shape=`size` random booleans (True occuring probability p)"
return uniform(0,1,size)<p
scipy.stats.gmean([log_uniform(0.5,2.0) for _ in range(1000)])
#export
import inspect
from copy import copy,deepcopy
def get_default_args(func):
return {k: v.default
for k, v in inspect.signature(func).parameters.items()
if v.default is not inspect.Parameter.empty}
def listify(p=None, q=None):
"Makes `p` same length as `q`"
if p is None: p=[]
elif not isinstance(p, Iterable): p=[p]
n = q if type(q)==int else len(p) if q is None else len(q)
if len(p)==1: p = p * n
assert len(p)==n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
#export
class Transform():
_wrap=None
order=0
def __init__(self, func, order=None):
if order is not None: self.order=order
self.func=func
self.params = copy(func.__annotations__)
self.def_args = get_default_args(func)
setattr(Image, func.__name__,
lambda x, *args, **kwargs: self.calc(x, *args, **kwargs))
def __call__(self, *args, p=1., is_random=True, **kwargs):
if args: return self.calc(*args, **kwargs)
else: return RandTransform(self, kwargs=kwargs, is_random=is_random, p=p)
def calc(tfm, x, *args, **kwargs):
if tfm._wrap: return getattr(x, tfm._wrap)(tfm.func, *args, **kwargs)
else: return tfm.func(x, *args, **kwargs)
@property
def name(self): return self.__class__.__name__
def __repr__(self): return f'{self.name} ({self.func.__name__})'
class TfmLighting(Transform): order,_wrap = 8,'lighting'
#export
@dataclass
class RandTransform():
tfm:Transform
kwargs:dict
p:int=1.0
resolved:dict = field(default_factory=dict)
do_run:bool = True
is_random:bool = True
def resolve(self):
if not self.is_random:
self.resolved = {**self.tfm.def_args, **self.kwargs}
return
self.resolved = {}
# for each param passed to tfm...
for k,v in self.kwargs.items():
# ...if it's annotated, call that fn...
if k in self.tfm.params:
rand_func = self.tfm.params[k]
self.resolved[k] = rand_func(*listify(v))
# ...otherwise use the value directly
else: self.resolved[k] = v
# use defaults for any args not filled in yet
for k,v in self.tfm.def_args.items():
if k not in self.resolved: self.resolved[k]=v
# anything left over must be callable without params
for k,v in self.tfm.params.items():
if k not in self.resolved: self.resolved[k]=v()
self.do_run = rand_bool(self.p)
@property
def order(self): return self.tfm.order
def __call__(self, x, *args, **kwargs):
return self.tfm(x, *args, **{**self.resolved, **kwargs}) if self.do_run else x
#export
@TfmLighting
def brightness(x, change:uniform): return x.add_(scipy.special.logit(change))
@TfmLighting
def contrast(x, scale:log_uniform): return x.mul_(scale)
x().contrast(scale=2).show()
x().contrast(scale=2).brightness(0.8).show()
tfm = contrast(scale=(0.3,3))
tfm.resolve()
tfm,tfm.resolved,tfm.do_run
# all the same
tfm.resolve()
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes: tfm(x()).show(ax)
tfm = contrast(scale=(0.3,3))
# different
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes:
tfm.resolve()
tfm(x()).show(ax)
tfm = contrast(scale=2, is_random=False)
tfm.resolve()
tfm(x()).show()
```
## Composition
We are interested in composing the transform functions so as to apply them all at once. We will try to feed a list of transforms to our pipeline for it to apply all of them.
Applying a function to our transforms before calling them in Python is easiest if we use a decorator. You can find more about decorators [here](https://www.thecodeship.com/patterns/guide-to-python-function-decorators/).
```
#export
def resolve_tfms(tfms):
for f in listify(tfms): f.resolve()
def apply_tfms(tfms, x, do_resolve=True):
if not tfms: return x
tfms = listify(tfms)
if do_resolve: resolve_tfms(tfms)
x = x.clone()
for tfm in tfms: x = tfm(x)
return x
x = train_ds[1][0]
tfms = [contrast(scale=(0.3,3.0), p=0.9),
brightness(change=(0.35,0.65), p=0.9)]
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes: apply_tfms(tfms,x).show(ax)
_,axes = plt.subplots(2,4, figsize=(12,6))
for i in range(4):
apply_tfms(tfms,x).show(axes[0,i])
apply_tfms(tfms,x,do_resolve=False).show(axes[1,i])
apply_tfms([],x).show()
```
## DatasetTfm
```
#export
class DatasetTfm(Dataset):
def __init__(self, ds:Dataset, tfms:Collection[Callable]=None, **kwargs):
self.ds,self.tfms,self.kwargs = ds,tfms,kwargs
def __len__(self): return len(self.ds)
def __getitem__(self,idx):
x,y = self.ds[idx]
return apply_tfms(self.tfms, x, **self.kwargs), y
def __getattr__(self,k): return getattr(self.ds, k)
import nb_001b
nb_001b.DatasetTfm = DatasetTfm
bs=64
#export
def to_data(b):
if is_listy(b): return [to_data(o) for o in b]
return b.data if isinstance(b,ItemBase) else b
def data_collate(batch):
return torch.utils.data.dataloader.default_collate(to_data(batch))
@dataclass
class DeviceDataLoader():
dl: DataLoader
device: torch.device
def __post_init__(self): self.dl.collate_fn=data_collate
def __len__(self): return len(self.dl)
def __getattr__(self,k): return getattr(self.dl, k)
def proc_batch(self,b): return to_device(b, self.device)
def __iter__(self):
self.gen = map(self.proc_batch, self.dl)
return iter(self.gen)
@classmethod
def create(cls, *args, device=default_device, **kwargs):
return cls(DataLoader(*args, **kwargs), device=device)
nb_001b.DeviceDataLoader = DeviceDataLoader
data = DataBunch.create(train_ds, valid_ds, bs=bs, num_workers=4)
len(data.train_dl), len(data.valid_dl), data.train_dl.dataset.c
#export
def show_image_batch(dl, classes, rows=None, figsize=(12,15)):
x,y = next(iter(dl))
if rows is None: rows = int(math.sqrt(len(x)))
show_images(x[:rows*rows],y[:rows*rows],rows, classes)
def show_images(x,y,rows, classes, figsize=(9,9)):
fig, axs = plt.subplots(rows,rows,figsize=figsize)
for i, ax in enumerate(axs.flatten()):
show_image(x[i], ax)
ax.set_title(classes[y[i]])
plt.tight_layout()
show_image_batch(data.train_dl, train_ds.classes, 6)
data = DataBunch.create(train_ds, valid_ds, bs=bs, train_tfm=tfms)
show_image_batch(data.train_dl, train_ds.classes, 6)
```
# Affine
We will now add affine transforms that operate on the coordinates instead of pixels like the lighting transforms we just saw. An [affine transformation](https://en.wikipedia.org/wiki/Affine_transformation) is a function "(...) between affine spaces which preserves points, straight lines and planes."
## Details
Our implementation first creates a grid of coordinates for the original image. The grid is normalized to a [-1, 1] range with (-1, -1) representing the top left corner, (1, 1) the bottom right corner and (0, 0) the center. Next, we build an affine matrix representing our desired transform and we multiply it by our original grid coordinates. The result will be a set of x, y coordinates which references where in the input image will each of the pixels in the output image be mapped. It has a size of w \* h \* 2 since it needs two coordinates for each of the h * w pixels of the output image.
This is clearest if we see it graphically. We will build an affine matrix of the following form:
`[[a, b, e],
[c, d, f]]`
with which we will transform each pair of x, y coordinates in our original grid into our transformation grid:
`[[a, b], [[x], [[e], [[x'],
[c, d]] x [y]] + [f]] = [y']]`
So after the transform we will get a new grid with which to map our input image into our output image. This will be our **map of where from exactly does our transformation source each pixel in the output image**.
**Enter problems**
Affine transforms face two problems that must be solved independently:
1. **The interpolation problem**: The result of our transformation gives us float coordinates, and we need to decide, for each (i,j), how to assign these coordinates to pixels in the input image.
2. **The missing pixel problem**: The result of our transformation may have coordinates which exceed the [-1, 1] range of our original grid and thus fall outside of our original grid.
**Solutions to problems**
1. **The interpolation problem**: We will perform a [bilinear interpolation](https://en.wikipedia.org/wiki/Bilinear_interpolation). This takes an average of the values of the pixels corresponding to the four points in the grid surrounding the result of our transformation, with weights depending on how close we are to each of those points.
2. **The missing pixel problem**: For these values we need padding, and we face a few options:
1. Adding zeros on the side (so the pixels that fall out will be black)
2. Replacing them by the value at the border
3. Mirroring the content of the picture on the other side (reflect padding).
### Transformation Method
**Zoom**
Zoom changes the focus of the image according to a scale. If a scale of >1 is applied, grid pixels will be mapped to coordinates that are more central than the pixel's coordinates (closer to 0,0) while if a scale of <1 is applied, grid pixels will be mapped to more perispheric coordinates (closer to the borders) in the input image.
We can also translate our transform to zoom into a non-centrical area of the image. For this we use $col_c$ which displaces the x axis and $row_c$ which displaces the y axis.
_Parameters_
1. **Scale** How much do we want to zoom in or out to our image.
Domain: Real numbers
2. **Col_pct** How much do we want to displace our zoom along the x axis.
Domain: Real numbers between 0 and 1
3. **Row_pct** How much do we want to displace our zoom along the y axis.
Domain: Real numbers between 0 and 1
<u>Affine matrix</u>
`[[1/scale, 0, col_c],
[0, 1/scale, row_c]]`
**Rotate**
Rotate shifts the image around its center in a given angle theta. The rotation is counterclockwise if theta is positive and clockwise if theta is negative. If you are curious about the derivation of the rotation matrix you can find it [here](https://matthew-brett.github.io/teaching/rotation_2d.html).
_Parameters_
1. **Degrees** By which angle do we want to rotate our image.
Domain: Real numbers
<u>Affine matrix</u>
`[[cos(theta), -sin(theta), 0],
[sin(theta), cos(theta), 0]]`
## Deterministic affine
```
#export
def grid_sample_nearest(input, coords, padding_mode='zeros'):
if padding_mode=='border': coords.clamp(-1,1)
bs,ch,h,w = input.size()
sz = tensor([w,h]).float()[None,None]
coords.add_(1).mul_(sz/2)
coords = coords[0].round_().long()
if padding_mode=='zeros':
mask = (coords[...,0] < 0) + (coords[...,1] < 0) + (coords[...,0] >= w) + (coords[...,1] >= h)
mask.clamp_(0,1)
coords[...,0].clamp_(0,w-1)
coords[...,1].clamp_(0,h-1)
result = input[...,coords[...,1],coords[...,0]]
if padding_mode=='zeros': result[...,mask] = result[...,mask].zero_()
return result
#export
def grid_sample(x, coords, mode='bilinear', padding_mode='reflect'):
if padding_mode=='reflect': padding_mode='reflection'
if mode=='nearest': return grid_sample_nearest(x[None], coords, padding_mode)[0]
return F.grid_sample(x[None], coords, mode=mode, padding_mode=padding_mode)[0]
def affine_grid(size):
size = ((1,)+size)
N, C, H, W = size
grid = FloatTensor(N, H, W, 2)
linear_points = torch.linspace(-1, 1, W) if W > 1 else tensor([-1])
grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(grid[:, :, :, 0])
linear_points = torch.linspace(-1, 1, H) if H > 1 else tensor([-1])
grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(grid[:, :, :, 1])
return grid
def affine_mult(c,m):
if m is None: return c
size = c.size()
c = c.view(-1,2)
c = torch.addmm(m[:2,2], c, m[:2,:2].t())
return c.view(size)
def rotate(degrees):
angle = degrees * math.pi / 180
return [[cos(angle), -sin(angle), 0.],
[sin(angle), cos(angle), 0.],
[0. , 0. , 1.]]
def xi(): return train_ds[1][0]
x = xi().data
c = affine_grid(x.shape)
m = rotate(30)
m = x.new_tensor(m)
m
c[0,...,0]
c[0,...,1]
m
c = affine_mult(c,m)
c[0,...,0]
c[0,...,1]
img2 = grid_sample(x, c, padding_mode='zeros')
show_image(img2);
xi().affine(rotate, 30).show()
```
## Affine transform
```
#export
class TfmAffine(Transform): order,_wrap = 5,'affine'
class TfmPixel(Transform): order,_wrap = 10,'pixel'
@TfmAffine
def rotate(degrees:uniform):
angle = degrees * math.pi / 180
return [[cos(angle), -sin(angle), 0.],
[sin(angle), cos(angle), 0.],
[0. , 0. , 1.]]
def get_zoom_mat(sw, sh, c, r):
return [[sw, 0, c],
[0, sh, r],
[0, 0, 1.]]
@TfmAffine
def zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):
s = 1-1/scale
col_c = s * (2*col_pct - 1)
row_c = s * (2*row_pct - 1)
return get_zoom_mat(1/scale, 1/scale, col_c, row_c)
@TfmAffine
def squish(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):
if scale <= 1:
col_c = (1-scale) * (2*col_pct - 1)
return get_zoom_mat(scale, 1, col_c, 0.)
else:
row_c = (1-1/scale) * (2*row_pct - 1)
return get_zoom_mat(1, 1/scale, 0., row_c)
rotate(xi(), 30).show()
zoom(xi(), 0.6).show()
zoom(xi(), 0.6).set_sample(padding_mode='zeros').show()
zoom(xi(), 2, 0.2, 0.2).show()
scales = [0.75,0.9,1.1,1.33]
_,axes = plt.subplots(1,4, figsize=(12,3))
for i, ax in enumerate(axes): squish(xi(), scales[i]).show(ax)
_,axes=plt.subplots(1,3,figsize=(9,3))
xi().show(axes[0])
img2 = rotate(xi(), 30).refresh()
img2 = zoom(img2, 1.6)
img2.show(axes[1])
zoom(rotate(xi(), 30), 1.6).show(axes[2])
xi().resize(48).show()
img2 = zoom(xi().resize(48), 1.6, 0.8, 0.2)
rotate(img2, 30).show()
img2 = zoom(xi().resize(24), 1.6, 0.8, 0.2)
rotate(img2, 30).show(hide_axis=False)
img2 = zoom(xi().resize(48), 1.6, 0.8, 0.2)
rotate(img2, 30).set_sample(mode='nearest').show()
```
## Random affine
As we did with the Lighting transform, we now want to build randomness into our pipeline so we can increase the automatization of the transform process.
We will use a uniform distribution for both our transforms since their impact is linear and their domain is the real numbers.
**Apply all transforms**
We will make all transforms try to do as little calculations as possible.
We do only one affine transformation by multiplying all the affine matrices of the transforms, then we apply to the coords any non-affine transformation we might want (jitter, elastic distorsion). Next, we crop the coordinates we want to keep and, by doing it before the interpolation, we don't need to compute pixel values that won't be used afterwards. Finally we perform the interpolation and we apply all the transforms that operate pixelwise (brightness, contrast).
```
tfm = rotate(degrees=(-45,45.), p=0.75); tfm
tfm.resolve(); tfm
x = xi()
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes: apply_tfms(tfm, x).show(ax)
tfms = [rotate(degrees=(-45,45.), p=0.75),
zoom(scale=(0.5,2.0), p=0.75)]
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes: apply_tfms(tfms,x).show(ax)
#export
def apply_tfms(tfms, x, do_resolve=True, xtra=None, size=None, **kwargs):
if not (tfms or size): return x
if not xtra: xtra={}
tfms = sorted(listify(tfms), key=lambda o: o.tfm.order)
if do_resolve: resolve_tfms(tfms)
x = x.clone()
if kwargs: x.set_sample(**kwargs)
if size: x.resize(size)
for tfm in tfms:
if tfm.tfm in xtra: x = tfm(x, **xtra[tfm.tfm])
else: x = tfm(x)
return x
tfms = [rotate(degrees=(-45,45.), p=0.75),
zoom(scale=(1.0,2.0), row_pct=(0,1.), col_pct=(0,1.))]
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes: apply_tfms(tfms,x, padding_mode='zeros', size=64).show(ax)
tfms = [squish(scale=(0.5,2), row_pct=(0,1.), col_pct=(0,1.))]
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes: apply_tfms(tfms,x).show(ax)
```
# Coord and pixel
## Jitter / flip
The last two transforms we will use are **jitter** and **flip**.
**Jitter**
Jitter is a transform which adds a random value to each of the pixels to make them somewhat different than the original ones. In our implementation we first get a random number between (-1, 1) and we multiply it by a constant M which scales it.
_Parameters_
1. **Magnitude** How much random noise do we want to add to each of the pixels in our image.
Domain: Real numbers between 0 and 1.
**Flip**
Flip is a transform that reflects the image on a given axis.
_Parameters_
1. **P** Probability of applying the transformation to an input.
Domain: Real numbers between 0 and 1.
```
#export
class TfmCoord(Transform): order,_wrap = 4,'coord'
@TfmCoord
def jitter(c, size, magnitude:uniform):
return c.add_((torch.rand_like(c)-0.5)*magnitude*2)
@TfmPixel
def flip_lr(x): return x.flip(2)
tfm = jitter(magnitude=(0,0.1))
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes:
tfm.resolve()
tfm(xi()).show(ax)
tfm = flip_lr(p=0.5)
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes:
tfm.resolve()
tfm(xi()).show(ax)
```
## Crop/pad
**Crop**
Crop is a transform that cuts a series of pixels from an image. It does this by removing rows and columns from the input image.
_Parameters_
1. **Size** What is the target size of each side in pixels. If only one number *s* is specified, image is made square with dimensions *s* \* *s*.
Domain: Positive integers.
2. **Row_pct** Determines where to cut our image vertically on the bottom and top (which rows are left out). If <0.5, more rows will be cut in the top than in the bottom and viceversa (varies linearly).
Domain: Real numbers between 0 and 1.
3. **Col_pct** Determines where to cut our image horizontally on the left and right (which columns are left out). If <0.5, more rows will be cut in the left than in the right and viceversa (varies linearly).
Domain: Real numbers between 0 and 1.
Our three parameters are related with the following equations:
1. output_rows = [**row_pct***(input_rows-**size**):**size**+**row_pct***(input_rows-**size**)]
2. output_cols = [**col_pct***(input_cols-**size**):**size**+**col_pct***(input_cols-**size**)]
**Pad**
Pads each of the four borders of our image with a certain amount of pixels. Can pad with reflection (reflects border pixels to fill new pixels) or zero (adds black pixels).
_Parameters_
1. **Padding** Amount of pixels to add to each border. [More details](https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad)
Domain: Positive integers.
2. **Mode** How to fill new pixels. For more detail see the Pytorch subfunctions for padding.
Domain:
- Reflect (default): reflects opposite pixels to fill new pixels. [More details](https://pytorch.org/docs/stable/nn.html#torch.nn.ReflectionPad2d)
- Constant: adds pixels with specified value (default is 0, black pixels) [More details](https://pytorch.org/docs/stable/nn.html#torch.nn.ConstantPad2d)
- Replicate: replicates border row or column pixels to fill new pixels [More details](https://pytorch.org/docs/stable/nn.html#torch.nn.ReplicationPad2d)
***On using padding and crop***
A nice way to use these two functions is to combine them into one transform. We can add padding to the image and then crop some of it out. This way, we can create a new image to augment our training set without losing image information by cropping. Furthermore, this can be done in several ways (modifying the amount and type of padding and the crop style) so it gives us great flexibility to add images to our training set. You can find an example of this in the code below.
```
[(o.__name__,o.order) for o in
sorted((Transform,TfmAffine,TfmCoord,TfmLighting,TfmPixel),key=attrgetter('order'))]
#export
@partial(TfmPixel, order=-10)
def pad(x, padding, mode='reflect'):
return F.pad(x[None], (padding,)*4, mode=mode)[0]
@TfmPixel
def crop(x, size, row_pct:uniform=0.5, col_pct:uniform=0.5):
size = listify(size,2)
rows,cols = size
row = int((x.size(1)-rows+1) * row_pct)
col = int((x.size(2)-cols+1) * col_pct)
return x[:, row:row+rows, col:col+cols].contiguous()
pad(xi(), 4, 'constant').show()
crop(pad(xi(), 4, 'constant'), 32, 0.25, 0.75).show(hide_axis=False)
crop(pad(xi(), 4), 32, 0.25, 0.75).show()
```
## Combine
```
tfms = [flip_lr(p=0.5),
pad(padding=4, mode='constant'),
crop(size=32, row_pct=(0,1.), col_pct=(0,1.))]
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes: apply_tfms(tfms, x).show(ax)
tfms = [
flip_lr(p=0.5),
contrast(scale=(0.5,2.0)),
brightness(change=(0.3,0.7)),
rotate(degrees=(-45,45.), p=0.5),
zoom(scale=(0.5,1.2), p=0.8)
]
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes: apply_tfms(tfms, x).show(ax)
_,axes = plt.subplots(2,4, figsize=(12,6))
for i in range(4):
apply_tfms(tfms, x, padding_mode='zeros', size=48).show(axes[0][i], hide_axis=False)
apply_tfms(tfms, x, mode='nearest', do_resolve=False).show(axes[1][i], hide_axis=False)
```
## RandomResizedCrop (Torchvision version)
```
#export
def compute_zs_mat(sz, scale, squish, invert, row_pct, col_pct):
orig_ratio = math.sqrt(sz[2]/sz[1])
for s,r,i in zip(scale,squish, invert):
s,r = math.sqrt(s),math.sqrt(r)
if s * r <= 1 and s / r <= 1: #Test if we are completely inside the picture
w,h = (s/r, s*r) if i else (s*r,s/r)
w /= orig_ratio
h *= orig_ratio
col_c = (1-w) * (2*col_pct - 1)
row_c = (1-h) * (2*row_pct - 1)
return get_zoom_mat(w, h, col_c, row_c)
#Fallback, hack to emulate a center crop without cropping anything yet.
if orig_ratio > 1: return get_zoom_mat(1/orig_ratio**2, 1, 0, 0.)
else: return get_zoom_mat(1, orig_ratio**2, 0, 0.)
@TfmCoord
def zoom_squish(c, size, scale:uniform=1.0, squish:uniform=1.0, invert:rand_bool=False,
row_pct:uniform=0.5, col_pct:uniform=0.5):
#This is intended for scale, squish and invert to be of size 10 (or whatever) so that the transform
#can try a few zoom/squishes before falling back to center crop (like torchvision.RandomResizedCrop)
m = compute_zs_mat(size, scale, squish, invert, row_pct, col_pct)
return affine_mult(c, FloatTensor(m))
rrc = zoom_squish(scale=(0.25,1.0,10), squish=(0.5,1.0,10), invert=(0.5,10),
row_pct=(0,1.), col_pct=(0,1.))
_,axes = plt.subplots(2,4, figsize=(12,6))
for i in range(4):
apply_tfms(rrc, x, size=48).show(axes[0][i])
apply_tfms(rrc, x, do_resolve=False, mode='nearest').show(axes[1][i])
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/JimKing100/nfl-test/blob/master/predictions/Prediction_Offense_Final.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# Installs
!pip install pmdarima
# Imports
import numpy as np
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
import pmdarima as pm
from sklearn import preprocessing
# Import data
original_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-actuals/actuals_offense.csv')
kickers_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-revised/rookies_non_kicker.csv')
offense_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-revised/rookies_non_offense.csv')
player_df = pd.concat([kickers_df, offense_df], ignore_index=True)
# The dataframe of actual offensive points for each game from 2000-2019
original_df.head()
# The dataframe of all 2019 offensive players (kickers and offense)
player_df.head(50)
# Add a row to the final_df dataframe
# Each row represents the predicted points for each team
def add_row(df, p, f, l, n, pos, pred, act):
df = df.append({'player': p,
'first': f,
'last': l,
'name': n,
'position': pos,
'week1-pred': pred,
'week1-act': act
}, ignore_index=True)
return df
# The main code for iterating through the player(offense and kicker) list, calculating the points and adding the rows
# to the final_df dataframe.
column_names = ['player',
'first',
'last',
'name',
'position',
'week1-pred',
'week1-act'
]
player_list = offense_df['player'].tolist()
final_df = pd.DataFrame(columns = column_names)
for player in player_list:
first = player_df['first'].loc[(player_df['player']==player)].iloc[0]
last = player_df['last'].loc[(player_df['player']==player)].iloc[0]
name = player_df['name'].loc[(player_df['player']==player)].iloc[0]
position1 = player_df['position1'].loc[(player_df['player']==player)].iloc[0]
start_year = player_df['start'].loc[(player_df['player']==player)].iloc[0]
row = original_df.index[(original_df['player']==player)][0]
if start_year < 2000:
start_year = 2000
col = ((start_year - 2000) * 16) + 5
train_data = original_df.iloc[row, col:309]
actuals = original_df.iloc[row, 309:325]
act_points = actuals.sum()
print(player)
if (player != 'GG-0310') & (player != 'KA-0737') & (player != 'JM-6775') & \
(player != 'AL-0387') & (player != 'JW-5475'):
# ARIMA model
model = pm.auto_arima(train_data, start_p=1, start_q=1,
test='adf', # use adftest to find optimal 'd'
max_p=3, max_q=3, # maximum p and q
m=1, # frequency of series
d=None, # let model determine 'd'
seasonal=False, # No Seasonality
start_P=0,
D=0,
trace=False,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
# Forecast
n_periods = 16
fc = model.predict(n_periods=n_periods, return_conf_int=False)
index_of_fc = np.arange(len(train_data), len(train_data)+n_periods)
fc_series = pd.Series(fc, index=index_of_fc)
pred_points = fc_series.sum()
else:
pred_points = 0
final_df = add_row(final_df, player, first, last, name, position1, pred_points, act_points)
# The final_df dataframe
final_df['week1-diff'] = final_df['week1-pred'] - final_df['week1-act']
final_df['week1-pct'] = final_df['week1-diff']/final_df['week1-pred']
# Calculate the metrics
pred_median_error = final_df['week1-pct'].median()
print('Median Error - %.4f%%' % (pred_median_error * 100))
final_df.head(50)
# Save the results to .csv file
final_df.to_csv('/content/week1-pred-offense-norookies.csv', index=False)
```
|
github_jupyter
|

### Instructions
1. Make sure you are using a version of notebook greater than v.3. If you installed Anaconda with python 3 - this is likely to be fine. The next piece of code will check if you have the right version.
2. The notebook has both some open test cases that you can use to test the functionality of your code - however it will be run on another set of test cases that you can't from which marks will be awarded. So passing all the tests in this notebook is not a guarantee that you have done things correctly - though its highly probable.
3. Also make sure you submit a notebook that doesn't return any errors. One way to ensure this is to run all the cells before you submit the notebook.
4. When you are done create a zip file of your notebook and upload that
5. For each cell where you see "YOUR CODE HERE" delete the return notImplemented statement when you write your code there - don't leave it in the notebook.
6. Once you are done, you are done.
# DSA 2018 Nyeri Preparatory Notebook
By Ciira Maina
In preparation for DSA 2018 Nyeri, we would like potential participants to complete a number of exercises in probability, machine learning and programming to ensure that they have the necessary prerequisite knowledge to attend the summer school. You will be required to submit notebooks with solutions to these exercises during the application process.
In this first exercise we will require you to download a dataset and perform computations on the data. These data are from a paper in 1966 by Cox and Lewis and report the time difference between nerve pulses on a nerve fibre. 799 observations are reported. These data are used for some examples in the text ["All of Statistics"](http://www.stat.cmu.edu/~larry/all-of-statistics/) by Larry Wasserman.
The data are available [here](http://www.stat.cmu.edu/~larry/all-of-statistics/=data/nerve.dat)
```
import matplotlib.pyplot as plt
%matplotlib inline
import urllib.request
import numpy as np
```
## Obtain the data
Write code to obtain the data from the website above and store it in a one dimensional array of floating point numbers.
```
nerve_data_url='http://www.stat.cmu.edu/~larry/all-of-statistics/=data/nerve.dat'
def read_data(url):
# Read in data from url and return 1-D array
fromUrl = urllib.request.urlopen(url).read().decode('utf-8')
fromUrl = fromUrl.split('\r\n')
newlist=[]
for x in fromUrl:
newlist.append(x.split('\t'))
finalist=[]
for i in newlist:
for x in i:
if x != '':
finalist.append(float(x))
return np.array(finalist)
nerve_data = read_data(nerve_data_url)
assert len(nerve_data) == 799
```
## Preliminary Visualisation
Plot a histogram of the data. Ensure you label your axes.
```
plt.hist(nerve_data)
plt.xlabel('Nerve Pulses')
plt.ylabel('Frequency of occurence')
plt.show()
```
## Preliminary analysis
The cumulative distribution function of a random variable $\mathbf{X}$ is given by
$
\begin{equation}
F_X(x)=P(\mathbf{X}\leq x)
\end{equation}$
If we obtain $n$ observations $X_1,\ldots,X_n$ from this distribution, the empirical distibution function is given by
$
\begin{equation}
\hat{F}_n(x)=\frac{\sum_{i=1}^n\mathbf{I}(X_i\leq x)}{n}
\end{equation}$
where
$
\begin{equation}
\mathbf{I}(X_i\leq x) =
\begin{cases}
1 & \text{if $X_i\leq x $} \\
0 & \text{if $X_i> x$}
\end{cases}
\end{equation}
$
* Plot the empirical distribution function of the nerve pulse data
* Estimate the probability that the wait time between nerve pulses is less than $0.3$ - We will call this P1
* Estimate the probability that the wait time between nerve pulses is between $0.1$ and $0.3$ - We will call this P2
Given a random variable X obtain the empirical distribution of a given set of data
```
def cdf(X, data):
# Return a vector the size of X representing the CDF
# YOUR CODE HERE
raise NotImplementedError()
```
Plot the empirical distribution function of the nerve pulse data
```
X=np.linspace(0,np.max(nerve_data),100)
# YOUR CODE HERE
raise NotImplementedError()
```
Estimate the probability that the wait time between nerve pulses is less than 0.3. Hint: refer to the previous fomula for the cummulative distribution
```
def prob_x(x, data):
# YOUR CODE HERE
raise NotImplementedError()
P1 = prob_x(0.3, nerve_data)
assert abs(P1-0.760951188986) < 1e-6
```
Estimate the probability that the wait time between nerve pulses is between 0.1 and 0.3
```
def prob_xy(x,y,data):
# Return probability of wait time between x, and y
# YOUR CODE HERE
raise NotImplementedError()
P2 = prob_xy(0.1,0.3,nerve_data)
assert abs(P2-0.377972465582) < 1e-6
```
## Estimating properties of the distribution
We can estimate properties of the true distribution of the data $F_X(x)$ using the empirical distribution function $\hat{F}_n(x)$. To do this we can use "plug in" estimators. Here we will estimate the mean, variance and skewness. The expressions for the "plug in" estimators for these quantities are
* Mean: $\hat{\mu}=\frac{1}{n}\sum_{i=1}^nX_i$
* Variance: $\hat{\sigma}^2=\frac{1}{n}\sum_{i=1}^n(X_i-\hat{\mu})^2$
* Skewness: $\hat{\kappa}=\frac{\frac{1}{n}\sum_{i=1}^n(X_i-\hat{\mu})^3}{\hat{\sigma}^3}$
Compute the plug in estimators of the mean, variance and skewness for the nerve pulse wait time data.
```
def dist_properties(data):
# Return the mean, variance, skewness of the distribution
# YOUR CODE HERE
raise NotImplementedError()
mu, var, kappa = dist_properties(nerve_data)
assert np.round(mu,3) == 0.219
assert np.round(var,3) == 0.044
assert np.round(kappa,3) == 1.761
```
|
github_jupyter
|
```
# Itertools
# product --> Returns the Cartesian product of iterables such as lists
from itertools import product
lst_a=[2,4]
lst_b=[3,6]
print('List a -->',lst_a)
print('List b -->',lst_b)
ab=product(lst_a,lst_b)
print('Returns a product object -->',type(ab))
lst_ab=list(ab)
print('Returns a list -->',type(lst_ab))
print('Product of the two lists -->',lst_ab)
# Itertools
# product(repeat=x) --> Returns the product of an iterable with itself x times
from itertools import product
lst_a=['a']
lst_b=['b','c']
print('List a -->',lst_a)
print('List b -->',lst_b)
print('\r')
# repeat=1
ab=product(lst_a,lst_b,repeat=1)
lst_ab=list(ab)
print('Product (repeat=1) -->',lst_ab)
# repeat=2
ab=product(lst_a,lst_b,repeat=2)
lst_ab=list(ab)
print('Product (repeat=2) -->',lst_ab)
#repeat=3
print('\r')
a=[0,1]
print('List a -->',a)
p=product(a,repeat=3)
lst_p=list(p)
print('Product (repeat=3) -->',lst_p)
# Itertools Infinite Iterators
# count(n) --> Returns consecutive values (start= number n, end = infinite if stop condition is not specified)
from itertools import count
for x in count(1):
print(x,end=' ')
if x==10:
break
# Itertools Infinite Iterators
# cycle(iterable) --> Cycling through an iterable (i.e.list). Infinite loops if stop condition is not specified)
from itertools import cycle
c=[0,1,2,3,4]
lst=[]
for x in cycle(c):
print(x,end=' ')
lst.append(x)
if len(lst)==12:
break
# Itertools Infinite Iterators
# repeat (object/iterable,times) --> Repeats the elements of an iterable/object/value
# repeat() runs indefinitely unless a times argument value is selected
from itertools import repeat
c=[0,1,2,3,4]
for x in repeat(c,times=3):
print(x)
# Itertools
# permutations(iterable,r) --> Returns all possible combinations (length=r) of the elements in an iterable
from itertools import permutations
lst=[0,1,2]
print('List -->',lst)
print('\r')
# length r=None
res=list(permutations(lst))
print('List elements permutations (length r= None) -->',res)
# length r=2
res2=list(permutations(lst,r=2))
print('List elements permutations (length r= 2) -->',res2)
# Itertools
# combinations(iterable,r) --> Returns iterable elements subsequences of length r (sorted order based on index)
# where the iterable elements are not repeated in the subsequences
from itertools import combinations
lst=[0,1,2,3]
print('List -->',lst)
print('\r')
# length r=4
res=list(combinations(lst,4))
print('List elements combinations (length r= 4) -->',res)
# length r=3
res1=list(combinations(lst,3))
print('List elements combinations (length r= 3) -->',res1)
# length r=2
res2=list(combinations(lst,2))
print('List elements combinations (length r= 2) -->',res2)
print('\r')
# string - length r=2
string='car'
print('String -->',string)
print('\r')
# length r=4
res_s=tuple(combinations(string,2))
print('String elements combinations (length r= 2) -->',res_s)
# Itertools
# combinations_with_replacement(iterable,r) --> Returns iterable elements subsequences of length r (sorted
# order based on index) where thr iterable elements are repeated in the subsequences
from itertools import combinations,combinations_with_replacement
lst=[0,1,2]
print('List -->',lst)
print('\r')
# combinations
res=list(combinations(lst,2))
print('List elements combinations (length r= 2) -->',res)
# combinations with replacement
res1=list(combinations_with_replacement(lst,2))
print('List elements combinations with replacement (length r= 2) -->',res1)
# Itertools
# accumulate(iterable) --> Returns accumulated sums (running totals)
from itertools import accumulate
lst=[0,1,2,3]
print('List -->',lst)
print('\r')
# accumulate
res=list(accumulate(lst))
print('Accumulated Sums -->',res)
# Itertools
# accumulate(iterable,func=operator.x) -->where x =(add,sub,mul,truediv,floordiv etc.),default value = add
from itertools import accumulate
import operator
lst=[6,4,2,1]
print('List -->',lst)
print('\r')
# operator.add --> default
res=list(accumulate(lst,func=operator.add))
print('Operator.add -->',res)
# operator.sub
res1=list(accumulate(lst,func=operator.sub))
print('Operator.sub -->',res1)
# operator.mul
res2=list(accumulate(lst,func=operator.mul))
print('Operator.mul -->',res2)
# operator.truediv
res3=list(accumulate(lst,func=operator.truediv))
print('Operator.truediv -->',res3)
# operator.floordiv
res4=list(accumulate(lst,func=operator.floordiv))
print('Operator.floordiv -->',res4)
# Itertools
# accumulate(iterable,func=x) -->where x =(min,max)
from itertools import accumulate
import operator
lst=[2,5,1,8,4]
print('List -->',lst)
print('\r')
# func=min
res=list(accumulate(lst,func=min))
print('func=min -->',res)
# func=max
res1=list(accumulate(lst,func=max))
print('func=max -->',res1)
# Itertools
# groupby(iterable,key) --> Takes a) an iterable (list,dict) and b) a key that is a function that determines # the keys for each iterable element. Groupby returns consecutive keys & groups from the selected iterable.
from itertools import groupby
lst=[1,2,3,4,5,6,7,8,9,10]
print('List -->',lst)
print('\r')
# groupby --> 1st Example
print('List elements greater than 5:\n')
res=groupby(lst,key=lambda x:x>5)
for k,v in res:
print(k,list(v))
# groupby -->2nd Example
print('\r')
cars=[{'make':'Chevrolet','year':2017},{'make':'Honda','year':2017},{'make':'GMC','year':2018}
,{'make':'Honda','year':2019},{'make':'BMW','year':2020},{'make':'Nissan','year':2020}]
print('---------------------------------------------------------------------------')
print('Car Make & Year:\n')
print(cars)
print('\r')
print('Group cars by year:\n')
res1=groupby(cars,key=lambda x:x['year'])
for k,v in res1:
print(k,list(v))
```
|
github_jupyter
|
### As before, let's find the set of compounds for which both simulations and experimental measurements exist
Matt Robinson posted a `moonshot_initial_activity_data.csv` file of the initial activity data:
```
import numpy as np
import pandas as pd
df_activity = pd.read_csv('../data-release-2020-05-10/moonshot_initial_activity_data.csv')
# Find all that have IC50 data
IC50_measured = pd.notnull(df_activity["IC50 (µM)"])
df_activity[IC50_measured]
# Translate the new IDs back to the old IDs so we can find them in our results
## make a translation table
all_df = pd.read_csv("https://covid.postera.ai/covid/submissions.csv")
new_CID_list = list(all_df.CID)
old_CID_list = list(all_df.old_CID)
new2old_CID = {}
old2new_CID = {}
for i in range(len(new_CID_list)):
new2old_CID[new_CID_list[i]] = old_CID_list[i]
old2new_CID[old_CID_list[i]] = new_CID_list[i]
for s in df_activity[IC50_measured].CID:
print(s, '-->', new2old_CID[s])
## Are THESE in the latest results pkl???
# df_results = pd.read_pickle('master_results_WL0.12_051820.pkl') # these have covalent warheads in them
df_results = pd.read_pickle('master_results_WL0.12_051920.pkl')
for s in df_activity[IC50_measured].CID:
df_hits = df_results[df_results.identity.str.contains(new2old_CID[s])]
if len(df_hits) > 0:
print(s, '<--', new2old_CID[s])
print(df_hits)
print('\n##########\n\n')
# Let's look at our current ranking:
df_results
top10_indices = df_results.index[0:10]
for i in range(len(top10_indices)):
index = top10_indices[i]
oldID = df_results.loc[index].identity
if oldID.count('ÁLV') > 0:
oldID = oldID.replace('ÁLV','ALV')
try:
newID = old2new_CID[oldID]
except:
newID = ''
print('rank:', i+1, 'oldID:', oldID, 'newID:', newID, df_results.loc[index].dataset, df_results.loc[index].fah)
```
## Top 10 profiles
### \# 1 NIM-UNI-36e-3 NIM-UNI-36e12f95-3
https://covid.postera.ai/covid/submissions/36e12f95-0811-4857-8bc6-a4aee0788f1c/3
<img src="https://covid.postera.ai/synthesize/CC(=O)c1ccc(Br)c2%5BnH%5Dc(=O)n(-c3cccnc3)c12">
<img src="http://yabmtm.hopto.org:31415/MS0323/plots/MS0323_v3_1-500_p14822_127_19May2020.png">
### \# 2 JON-UIO-066-14 JON-UIO-066ce08b-14 MS0326_v3 PROJ14824/RUN2448
https://covid.postera.ai/covid/submissions/066ce08b-1104-439d-946f-d7c319de995c/14
<img src="https://covid.postera.ai/synthesize/C%5BC@H%5D(NC(=O)C(F)F)c1cccc(F)c1">
<img src="http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_3000-5538_p14824_2448_19May2020.png">
### \# 3 CHR-SOS-709-10 CHR-SOS-7098f804-10
https://covid.postera.ai/covid/submissions/7098f804-b66c-4fb6-89f4-8e4e0c78a7cb/10
<img src="https://covid.postera.ai/synthesize/O=C(Nc1cnccc1Cl)c1cc(Cl)ccc1O">
<img src="http://yabmtm.hopto.org:31415/MS0406-2/plots/MS0406-2_v3_0-2999_p14827_360_19May2020.png">
### \# 4 LIZ-THE-f11-1 newID: LIZ-THE-f118233e-1 MS0326_v2 PROJ14723/RUN404
https://covid.postera.ai/covid/submissions/7023c732-4bbd-4499-a930-9b1b18b131ec/1
<img src="https://covid.postera.ai/synthesize/CNc1ncc(C%23N)cc1Oc1ccccc1">
### \# 5 ALV-UNI-7ff-36 newID: MS0326_v2 PROJ14723/RUN2963
https://covid.postera.ai/covid/submissions/7ff1a6f9-745f-4b82-81e0-c1d353ea5dfe/36
<img src="https://covid.postera.ai/synthesize/Cc1cc(-c2c(-c3ccc(F)cc3)nn3nc(C)ccc23)%5BnH%5Dn1">
<img src="http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v2_1-3000_p14723_2963_19May2020.png">
### \# 6 TRY-UNI-714-16 newID: TRY-UNI-714a760b-16 MS0326_v3 PROJ14824/RUN189
https://covid.postera.ai/covid/submissions/714a760b-0e02-4b09-8736-f27f854f8c22/16
<img src="https://covid.postera.ai/synthesize/Cc1ccncc1NC(=O)C(C)C1CCCCC1">
<img src="http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_3000-5538_p14824_189_19May2020.png">
### \#7 ALV-UNI-7ff-43 newID: MS0326_v3 PROJ14824/RUN19
https://covid.postera.ai/covid/submissions/7ff1a6f9-745f-4b82-81e0-c1d353ea5dfe/43
<img src="https://covid.postera.ai/synthesize/Cc1cn2c(-c3cccnc3)c(-c3ccc(F)cc3)nc2s1">
<img src="http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_300">
### \#8 BEN-VAN-d8f-12 BEN-VAN-d8fd1356-12 MS0326_v3 PROJ14823/RUN713
https://covid.postera.ai/covid/submissions/d8fd1356-48a3-47db-b12f-ee2f1a630081/12
<img src="https://covid.postera.ai/synthesize/CNc1c%5BnH%5Dc2c(Oc3cc(C)c(Br)cn3)c(Cl)c(F)cc12">
<img src="http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_1-3000_p14823_713_19May2020.png">
### \#9 ALE-HEI-f28-17 ALE-HEI-f28a35b5-17 MS0326_v3 PROJ14823/RUN403
https://covid.postera.ai/covid/submissions/f28a35b5-9f3e-4135-a6b4-7ce39ba4980a/17
<img src="https://covid.postera.ai/synthesize/Cc1ccncc1NC(=O)N1CCN(C)CC1">
<img src="http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_1-3000_p14823_403_19May2020.png">
### \#10 CHR-SOS-709-6 CHR-SOS-7098f804-6 MS0323_v3 PROJ14822/RUN454
https://covid.postera.ai/covid/submissions/7098f804-b66c-4fb6-89f4-8e4e0c78a7cb/6
<img src="https://covid.postera.ai/synthesize/O=C(Nc1ccc(%5BN+%5D(=O)%5BO-%5D)cc1)c1ccccc1">
<img src="http://yabmtm.hopto.org:31415/MS0323/plots/MS0323_v3_1-500_p14822_454_19May2020.png">
|
github_jupyter
|
## Model one policy variables
This notebook extracts the selected policy variables in the `indicator_list` from IMF and World Bank (wb) data sources, and writes them to a csv file.
```
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
warnings.filterwarnings('ignore')
pd.options.display.float_format = '{:20,.2f}'.format
```
| variable | origin | source |granularity|countries| description | composition |
| --------------------------|-------------------|-------------|-----------|---------|-------------------------------------------------------------|-------------------------------------------------------------------|
| total debt service | - | wb econ | yearly | 217 | Total debt service (% of GNI) | - |
| interest payments | - | wb econ | yearly | 217 | Interest payments on external debt (% of GNI) | - |
| lending interest rate | - | wb econ | yearly | 217 | Lending interest rate (%) | - |
| firms using banks | - | wb econ | yearly | 217 | Firms using banks to finance investment (% of firms) | - |
| bank capital ratio | - | wb econ | yearly | 217 | Bank capital to assets ratio (%) | - |
| tax revenue gdp share | - | wb econ | yearly | 217 | Tax revenue (% of GDP) | - |
| short term debt | - | wb econ | yearly | 217 | Short-term debt (% of total external debt) | - |
| inflation | - | wb econ | yearly | 217 | Inflation, GDP deflator (annual %) | - |
| GDP growth | - | wb econ | yearly | 217 | GDP growth (annual %) | - |
| real interest rate | - | wb econ | yearly | 217 | Real interest rate (%) | - |
| firm market cap | - | wb econ | yearly | 217 | Market capitalization of listed domestic companies (% of GDP) | - |
| GDP per capita growth | - | wb econ | yearly | 217 | GDP per capita growth (annual %) | - |
| GDP | - | wb econ | yearly | 217 | GDP (constant 2010 USD) | - |
| GNI growth | - | wb econ | yearly | 217 | GNI growth (annual %) | - |
| interest payments | - | wb econ | yearly | 217 | Interest payments (% of expense) | - |
| nonperforming bank loans | - | wb econ | yearly | 217 | Bank nonperforming loans to total gross loans (%) | - |
| savings | - | wb econ | yearly | 217 | Gross domestic savings (% of GDP) | - |
| gross savings | - | wb econ | yearly | 217 | Gross savings (% of GNI) | - |
| GNI per capita growth | - | wb econ | yearly | 217 | GNI per capita growth (annual %) | - |
| employee compensation | - | wb econ | yearly | 217 | Compensation of employees (% of expense) | - |
| reserves | - | wb econ | yearly | 217 | Total reserves (% of total external debt) | - |
| broad money | - | wb econ | yearly | 217 | Broad money (% of GDP) | - |
| GNI | - | wb econ | yearly | 217 | GNI (constant 2010 USD) | - |
| government debt | - | wb econ | yearly | 217 | Central government debt, total (% of GDP) | - |
```
indicator_list = ['Total debt service (% of GNI)', 'Interest payments on external debt (% of GNI)',
'Lending interest rate (%)', 'Firms using banks to finance investment (% of firms)',
'Bank capital to assets ratio (%)', 'Tax revenue (% of GDP)', 'Short-term debt (% of total external debt)',
'Inflation, GDP deflator (annual %)', 'GDP growth (annual %)', 'Real interest rate (%)',
'Market capitalization of listed domestic companies (% of GDP)', 'GDP per capita growth (annual %)',
'GDP (constant 2010 US$)', 'GNI growth (annual %)', 'Interest payments (% of expense)',
'Bank nonperforming loans to total gross loans (%)', 'Gross domestic savings (% of GDP)',
'Gross savings (% of GNI)', 'GNI per capita growth (annual %)', 'Compensation of employees (% of expense)',
'Total reserves (% of total external debt)', 'Broad money (% of GDP)', 'GNI (constant 2010 US$)',
'Central government debt, total (% of GDP)']
len(indicator_list)
```
## Load imf monthly data
```
%%bash
wc -l imf/*.csv
time_values = [str('%sM%s' % (y, m)) for m in list(range(1, 13)) for y in list(range(1960, 2018))]
imf_columns = ['Country Name', 'Indicator Name'] + time_values
imf_country_aggregates = ['Euro Area']
def load_imf_monthly(file_name, indicators, imf_columns, country_aggregates):
csv_df = pd.read_csv('data/imf/%s' % file_name).fillna(0)
base_df = csv_df.loc[csv_df['Attribute'] == 'Value'].drop(columns=['Attribute'])
monthly_df = base_df.loc[(base_df['Indicator Name'].isin(indicators))]
imf_df = monthly_df[imf_columns].fillna(0)
df = pd.melt(imf_df, id_vars=['Country Name', 'Indicator Name'], var_name='date', value_name='value')
df['date'] = pd.to_datetime(df['date'], format='%YM%m')
df.columns = ['country', 'indicator', 'date', 'value']
return df.loc[~df['country'].isin(country_aggregates)]
imf_pplt_df = load_imf_monthly('PPLT_11-25-2018 19-25-01-32_timeSeries.csv', indicator_list, imf_columns, imf_country_aggregates)
imf_cpi_df = load_imf_monthly('CPI_11-25-2018 19-14-47-26_timeSeries.csv', indicator_list, imf_columns, imf_country_aggregates)
imf_df = pd.concat([imf_cpi_df, imf_pplt_df], join='outer')
imf_df.size
imf_df.head(15)
len(imf_df['country'].unique())
imf_countries = sorted(list(imf_df['country'].unique()))
```
### Load world bank yearly data
```
%%bash
wc -l world_bank/*.csv
wb_country_aggregates = ['nan', 'Lower middle income', 'Post-demographic dividend', 'High income',
'Pre-demographic dividend', 'East Asia & Pacific (IDA & IBRD countries)',
'Europe & Central Asia (excluding high income)', 'Heavily indebted poor countries (HIPC)',
'Caribbean small states', 'Pacific island small states', 'Middle income',
'Late-demographic dividend', 'OECD members', 'IDA & IBRD total', 'Not classified',
'East Asia & Pacific (excluding high income)',
'Latin America & the Caribbean (IDA & IBRD countries)', 'Low income', 'Low & middle income',
'IDA blend', 'IBRD only', 'Sub-Saharan Africa (excluding high income)',
'Fragile and conflict affected situations', 'Europe & Central Asia (IDA & IBRD countries)',
'Euro area', 'Other small states', 'Europe & Central Asia', 'Arab World',
'Latin America & Caribbean (excluding high income)',
'Sub-Saharan Africa (IDA & IBRD countries)', 'Early-demographic dividend', 'IDA only',
'Small states', 'Middle East & North Africa (excluding high income)', 'East Asia & Pacific',
'South Asia', 'European Union', 'Least developed countries: UN classification',
'Middle East & North Africa (IDA & IBRD countries)', 'Upper middle income',
'South Asia (IDA & IBRD)', 'Central Europe and the Baltics', 'Sub-Saharan Africa',
'Latin America & Caribbean', 'Middle East & North Africa', 'IDA total', 'North America',
'Last Updated: 11/14/2018', 'Data from database: World Development Indicators', 'World']
wb_cols = ['Country Name', 'Series Name'] + [str('%s [YR%s]' % (y, y)) for y in list(range(1960, 2018))]
def load_wb_yearly(file_name, indicators, wb_columns, country_aggregates):
csv_df = pd.read_csv('world_bank/%s' % file_name).fillna(0)
base_df = csv_df.loc[(csv_df['Series Name'].isin(indicators))]
wb_df = base_df[wb_columns].fillna(0)
df = pd.melt(wb_df, id_vars=['Country Name', 'Series Name'], var_name='date', value_name='value')
df['date'] = pd.to_datetime(df['date'].map(lambda x: int(x.split(' ')[0])), format='%Y')
df.columns = ['country', 'indicator', 'date', 'value']
return df.loc[~df['country'].isin(country_aggregates)]
wb_econ_df = load_wb_yearly('ECON.csv', indicator_list, wb_cols, wb_country_aggregates)
wb_hnp_df = load_wb_yearly('HNP.csv', indicator_list, wb_cols, wb_country_aggregates)
wb_pop_df = load_wb_yearly('POP.csv', indicator_list, wb_cols, wb_country_aggregates)
wb_df = pd.concat([wb_econ_df, wb_hnp_df, wb_pop_df], join='outer')
wb_df.size
wb_df.head(15)
len(wb_df['country'].unique())
wb_countries = sorted(list(wb_df['country'].unique()))
```
### Combine the two datasets
```
imf_specific = [country for country in imf_countries if country not in wb_countries]
len(imf_specific)
imf_to_wb_country_map = {
'Afghanistan, Islamic Republic of': 'Afghanistan',
'Armenia, Republic of': 'Armenia',
'Azerbaijan, Republic of': 'Azerbaijan',
'Bahrain, Kingdom of': 'Bahrain',
'China, P.R.: Hong Kong': 'Hong Kong SAR, China',
'China, P.R.: Macao': 'Macao SAR, China',
'China, P.R.: Mainland': 'China',
'Congo, Democratic Republic of': 'Congo, Dem. Rep.',
'Congo, Republic of': 'Congo, Rep.',
'Egypt': 'Egypt, Arab Rep.',
'French Territories: New Caledonia': 'New Caledonia',
'Iran, Islamic Republic of': 'Iran',
'Korea, Republic of': 'Korea, Rep.',
'Kosovo, Republic of': 'Kosovo',
"Lao People's Democratic Republic": 'Lao PDR',
'Serbia, Republic of': 'Serbia',
'Sint Maarten': 'Sint Maarten (Dutch part)',
'Timor-Leste, Dem. Rep. of': 'Timor-Leste',
'Venezuela, Republica Bolivariana de': 'Venezuela, RB',
'Venezuela, República Bolivariana de': 'Venezuela, RB',
'Yemen, Republic of': 'Yemen'
}
imf_df = imf_df.replace({'country': imf_to_wb_country_map})
policy_df = pd.concat([wb_df, imf_df], join='outer')
policy_df.size
policy_df.head(15)
indicators = sorted(list(policy_df['indicator'].unique()))
assert len(indicators) == len(indicator_list), 'The number of retrieved variables (%s) does not match the number of specified variables (%s).\nThe following variables are missing:\n\n %s' % (len(indicators), len(indicator_list), [i for i in indicator_list if i not in indicators])
policy_df.to_csv('model_one/policy.csv', sep=';', index=False)
```
|
github_jupyter
|
```
from opentrons import simulate
ctx = simulate.get_protocol_api('2.1')
NUM_SAMPLES = 48
VOLUME_MMIX = 20
ELUTION_LABWARE = '2ml tubes'
PREPARE_MASTERMIX = True
MM_TYPE = 'MM1'
EL_LW_DICT = {
'large strips': 'opentrons_96_aluminumblock_generic_pcr_strip_200ul',
'short strips': 'opentrons_96_aluminumblock_generic_pcr_strip_200ul',
'2ml tubes': 'opentrons_24_tuberack_generic_2ml_screwcap',
'1.5ml tubes': 'opentrons_24_tuberack_nest_1.5ml_screwcap'
}
source_racks = [
ctx.load_labware(EL_LW_DICT[ELUTION_LABWARE], slot,
'RNA elution labware ' + str(i+1))
for i, slot in enumerate(['4', '5', '1', '2'])
]
tips20 = [
ctx.load_labware('opentrons_96_filtertiprack_20ul', slot)
for slot in ['6', '9', '8', '7']
]
tips300 = [ctx.load_labware('opentrons_96_filtertiprack_200ul', '3')]
tempdeck = ctx.load_module('tempdeck', '10')
pcr_plate = tempdeck.load_labware(
'biorad_96_wellplate_200ul_pcr', 'PCR plate')
tempdeck.set_temperature(4)
mm_rack = ctx.load_labware(
'opentrons_24_tuberack_generic_2ml_screwcap', '11',
'2ml screw tube aluminum block for mastermix')
# pipette
p20 = ctx.load_instrument('p20_single_gen2', 'right', tip_racks=tips20)
p300 = ctx.load_instrument('p300_single_gen2', 'left', tip_racks=tips300)
print(source_racks)
print(tips20)
print(tips300)
print(tempdeck)
print(pcr_plate)
print(mm_rack)
# Know which class cames the object from.
mm_rack.__class__
# Know which methods are available for the object.
dir(mm_rack)
# Example, access wells in rack object.
mm_rack.wells()
sources = [
tube
for rack in source_racks for tube in rack.wells()][:NUM_SAMPLES]
print(sources)
sources=list()
for rack in source_racks:
for tube in rack.wells():
sources.append(tube)
print(sources[:NUM_SAMPLES])
dests = [
well
for h_block in range(2)
for v_block in range(2)
for col in pcr_plate.columns()[6*v_block:6*(v_block+1)]
for well in col[4*h_block:4*(h_block+1)]][:NUM_SAMPLES]
print(dests)
dests = list()
for h_block in range(2):
print("hblock = " + str(h_block))
for v_block in range(2):
print("vblock = " + str(v_block))
for col in pcr_plate.columns()[6*v_block:6*(v_block+1)]:
print("col = " + str(col))
for well in col[4*h_block:4*(h_block+1)]:
print(well)
dests.append(well)
dests = dests[:NUM_SAMPLES]
max_trans_per_asp = 8
#print(max_trans_per_asp)
split_ind = [ind for ind in range(0, NUM_SAMPLES, max_trans_per_asp)]
dest_sets = [dests[split_ind[i]:split_ind[i+1]]
for i in range(len(split_ind)-1)] + [dests[split_ind[-1]:]]
dest_sets
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/wearlianbaguio/OOP-1-1/blob/main/OOP_Concepts_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
##Application 1
1. Create a Python program that displays the name of the students (Student 1, Student 2, Student 3) and their term grades
2. Create a class name Person and attributes - std1, std2, std3, pre, mid, fin
3. Compute the average of each term grade using Grade() method
4. Information about student's grades must be hidden from others
5. Save your python program named as "OOP Concepts 2" to your GitHub repository "OOP 1-1"
6. Share your GitHub link attached with this manuscript.
```
class Person:
def __init__(self,std1_fullname,std1_PreMidFin,std2_fullname,std2_PreMidFin,std3_fullname,std3_PreMidFin):
self.std1_fullname = std1_fullname
self.__std1_PreMidFin = std1_PreMidFin
self.std2_fullname = std2_fullname
self.__std2_PreMidFin = std2_PreMidFin
self.std3_fullname = std3_fullname
self.__std3_PreMidFin = std3_PreMidFin
def set_std1_PreMidFin(self, std1_PreMidFin):
self.__std1_PreMidFin = std1_PreMidFin
def get_std1_PreMidFin(self):
return self.__std1_PreMidFin
def set_std1_PreMidFin(self, std1_PreMidFin):
self.__std2_PreMidFin = std2_PreMidFin
def get_std1_PreMidFin(self):
return self.__std2_PreMidFin
def set_std1_PreMidFin(self, std1_PreMidFin):
self.__std3_PreMidFin = std3_PreMidFin
def get_std1_PreMidFin(self):
return self.__std3_PreMidFin
def Info(self):
print(self.std1_fullname)
print(self.__std1_PreMidFin)
print(self.std2_fullname)
print(self.__std2_PreMidFin)
print(self.std3_fullname)
print(self.__std3_PreMidFin)
student = Person("Barabasz Viscenzo"," Prelim:86 Midterms:90 Final:89","Isabella Sebastian"," Prelim:92 Midterms:90 Final:93","Laszlo Samaniego"," Prelim:95 Midterms:98 Final:97")
student.Info()
```
##The average of three students grade based on each of their term
```
class grades:
def __init__(self, prelim, midterms, finals):
self.__prelim = prelim
self.__midterms = midterms
self.__finals = finals
def set_prelim(self, prelim):
self.__prelim = prelim
def get_prelim(self):
return self.__prelim
def set_prelim(self, midterms):
self.__midterms = midterms
def get_midterms(self):
return self.__midterms
def set_finals(self, finals):
self.__finals = finals
def get_finals(self):
return self.__finals
def Grade(self):
return (self.__prelim + self.__midterms + self.__finals)// 3
std1 = grades(86, 90, 89)
std2 = grades(92, 90, 93)
std3 = grades(95, 98, 97)
print("Barabasz Viscenzo's average is", std1.Grade())
print("Isablla Sebastian's average is", std2.Grade())
print("Laszlo Samaniego's average is", std3.Grade())
```
|
github_jupyter
|
# ETL Pipeline Preparation
Follow the instructions below to help you create your ETL pipeline.
### 1. Import libraries and load datasets.
- Import Python libraries
- Load `messages.csv` into a dataframe and inspect the first few lines.
- Load `categories.csv` into a dataframe and inspect the first few lines.
```
# import libraries
import pandas as pd
import sqlite3
from sqlalchemy import create_engine
# load messages dataset
messages = pd.read_csv(r'E:\Dropbox\Pessoal\Python\Udacity\Disaster-Response-Pipelines\data\messages.csv')
messages.head()
# load categories dataset
categories = pd.read_csv(r'E:\Dropbox\Pessoal\Python\Udacity\Disaster-Response-Pipelines\data\categories.csv')
categories.head()
```
### 2. Merge datasets.
- Merge the messages and categories datasets using the common id
- Assign this combined dataset to `df`, which will be cleaned in the following steps
```
# merge datasets
df = messages.merge(categories, on='id')
df.head()
```
### 3. Split `categories` into separate category columns.
- Split the values in the `categories` column on the `;` character so that each value becomes a separate column. You'll find [this method](https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.Series.str.split.html) very helpful! Make sure to set `expand=True`.
- Use the first row of categories dataframe to create column names for the categories data.
- Rename columns of `categories` with new column names.
```
# create a dataframe of the 36 individual category columns
categories = categories['categories'].str.split(';', expand=True)
#Creating a list of the columns names with the first row value and then extract the final and assign to the DF
col_list = list(categories.iloc[0].values)
categories.columns = [x.split("-")[0] for x in col_list]
categories.head()
```
### 4. Convert category values to just numbers 0 or 1.
- Iterate through the category columns in df to keep only the last character of each string (the 1 or 0). For example, `related-0` becomes `0`, `related-1` becomes `1`. Convert the string to a numeric value.
- You can perform [normal string actions on Pandas Series](https://pandas.pydata.org/pandas-docs/stable/text.html#indexing-with-str), like indexing, by including `.str` after the Series. You may need to first convert the Series to be of type string, which you can do with `astype(str)`.
```
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str.split('-',expand=True)[1]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
categories.head()
```
### 5. Replace `categories` column in `df` with new category columns.
- Drop the categories column from the df dataframe since it is no longer needed.
- Concatenate df and categories data frames.
```
# drop the original categories column from `df`
df.drop("categories", inplace = True, axis=1)
df.head()
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df,categories], axis=1, join="inner")
df.head()
pd.set_option('display.max_columns', None)
df['related'] = df.loc[df['related'] == 2] = 1
df.describe()
```
### 6. Remove duplicates.
- Check how many duplicates are in this dataset.
- Drop the duplicates.
- Confirm duplicates were removed.
```
# check number of duplicates
sum(df.id.duplicated())
# drop duplicates
df.drop_duplicates(['id'],inplace=True)
# check number of duplicates
sum(df.id.duplicated())
```
### 7. Save the clean dataset into an sqlite database.
You can do this with pandas [`to_sql` method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html) combined with the SQLAlchemy library. Remember to import SQLAlchemy's `create_engine` in the first cell of this notebook to use it below.
```
conn = sqlite3.connect(r'E:\Dropbox\Pessoal\Python\Udacity\Disaster-Response-Pipelines\databases\Disaster.db')
df.to_sql('Disaster', con=conn, index=False)
```
### 8. Use this notebook to complete `etl_pipeline.py`
Use the template file attached in the Resources folder to write a script that runs the steps above to create a database based on new datasets specified by the user. Alternatively, you can complete `etl_pipeline.py` in the classroom on the `Project Workspace IDE` coming later.
```
def load_data (data1,data2,db_name='Disaster',table_name='Disaster'):
'''
This function receives two paths to data files in the .csv format (data1, data 2).
After that it merges both dataframes in the common column ('id'). Uses the second dataframe
(with categories column) to create a new dataframe with columns names based on the first row name
(the string before the "-" character). The values of the columns of this new dataframe are the numeric part
of the end of each row (the number after the "-" character). Then it concats this dataframe with
the one merged and drop the old "categories" column. Finally it drops the "id" duplicates and saves the
dataframe on a table in a database.
The user can change the database and table names in the function db_name and table_name.
'''
#Read the dfs
df1 = pd.read_csv(data1)
df2 = pd.read_csv(data2)
#Merge the dfs
df = df1.merge(df2, on='id')
# create a dataframe of the 36 individual category columns
categories = df2['categories'].str.split(';', expand=True)
#Creating a list of the columns names with the first row value and then extract the final and assign to the DF
col_list = list(categories.iloc[0].values)
categories.columns = [x.split("-")[0] for x in col_list]
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str.split('-',expand=True)[1]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# drop the original categories column from `df`
df.drop("categories", inplace = True, axis=1)
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df,categories], axis=1)
# drop duplicates
df.drop_duplicates(['id'],inplace=True)
engine = create_engine('sqlite:///{}.db'.format(db_name))
df.to_sql('{}'.format(table_name), engine, index=False)
return
conn = sqlite3.connect(r'E:\Dropbox\Pessoal\Python\Udacity\Disaster-Response-Pipelines\databases\Disaster.db')
df = pd.read_sql('select * from disaster', con = conn)
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
genre_counts
genre_names
df.head()
```
|
github_jupyter
|
# Assignment 1
This assignment is to test your understanding of Python basics.
Answer the questions and complete the tasks outlined below; use the specific method described, if applicable. In order to get complete points on your homework assigment you have to a) complete this notebook, b) based on your results answer the multiple choice questions on QoestromTools.
**Important note:** make sure you spend some time to review the basics of python notebooks under the folder `00-Python-Basics` in course repo or [A Whirlwind Tour of Python](https://www.oreilly.com/programming/free/files/a-whirlwind-tour-of-python.pdf).
# Question 1
**What is 9 to the power of 7?**
```
# Your answer goes here
```
# Question 2
**What is the quotient and remainder of 453634/34?**
```
# Your answer goes here
print('Quotient of 453634/34:')
print('Remainder of 453634/34:')
```
# Question 3
Write a statement to check whether `a` is a multiple of 12 and within the range of [1000, 1800) or (0, 300].
**What is the outcome of `a = 780`?**
Note: (0, 300] represents a range from 0 to 300, where 0 is not included in the range, but 300 is.
```
a = 780
# Your answer goes here
```
# Question 4
**Given this nested list, what indexing yields to the word "hello"?**
```
lst = [[5,[100,200,{'target':[1,2,3,'hello']}],23,11],1,71,2,[3,4],'bye']
print(lst)
# Your answer goes here
```
# Question 5
Using a list comprehension, create a new list out of the list `L1`, which contains only the even numbers from `L1`, and converts them into absolute values (using `abs()` function). Call this new list `L2`.
**What is the sum of all of the elements of `L2`?**
Hint: Use `sum(L2)` to get the sum of all the elements.
```
L1 = [64, 34, 112, 91, 62, 40, 117, 80, 96, 34, 48, -9, -33,
99, 16, 118, -51, 60, 115, 4, -10, 82, -7, 77, -33, -40,
77, 90, -9, 52, -44, 25, -43, 28, -37, 92, 25, -45, 3,
103, 22, 39, -52, 74, -54, -76, -10, 5, -54, 95, -59, -2,
110, 63, -53, 113, -43, 18, 49, -20, 81, -67, 1, 38, -24,
57, -11, -69, -66, -67, -68, -16, 64, -34, 52, -37, -7, -40,
11, -3, 76, 91, -57, -48, -10, -16, 14, 13, -65]
# Your answer goes here
```
# Question 6
Write a function that receives a list of integer numbers and returns a list of numbers that are multiples of 4. Call this function `mult4_filter()`.
**Given the list `L3` below how many elements the outcome of `mult4_filter(L3)` has?**
Hint: use `len(mult4_filter(L3))` to get the number of elements.
```
L3 = [15, 11, 1, 3, 13, 3, 14, 16, 17, 17, 6, 18, 10, 19, 8, 1, 18,
17, 14, 1, 5, 2, 13, 0, 1, 13, 16, 8, 5, 11, 12, 8, 17, 14,
10, 18, 17, 16, 3, 7, 8, 15, 18, 7, 10, 5, 7, 16, 6, 5]
# Your answer goes here
def mult4_filter(L):
# Your code goes here
return
```
|
github_jupyter
|
# Sklearn
## sklearn.model_selection
документация: http://scikit-learn.org/stable/modules/cross_validation.html
```
from sklearn import model_selection, datasets
import numpy as np
```
### Разовое разбиение данных на обучение и тест с помощью train_test_split
```
iris = datasets.load_iris()
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(iris.data, iris.target,
test_size = 0.3)
#убедимся, что тестовая выборка действительно составляет 0.3 от всех данных
float(len(test_labels))/len(iris.data)
print 'Размер обучающей выборки: {} объектов \nРазмер тестовой выборки: {} объектов'.format(len(train_data),
len(test_data))
print 'Обучающая выборка:\n', train_data[:5]
print '\n'
print 'Тестовая выборка:\n', test_data[:5]
print 'Метки классов на обучающей выборке:\n', train_labels
print '\n'
print 'Метки классов на тестовой выборке:\n', test_labels
```
### Стратегии проведения кросс-валидации
```
#сгенерируем короткое подобие датасета, где элементы совпадают с порядковым номером
X = range(0,10)
```
#### KFold
```
kf = model_selection.KFold(n_splits = 5)
for train_indices, test_indices in kf.split(X):
print train_indices, test_indices
kf = model_selection.KFold(n_splits = 2, shuffle = True)
for train_indices, test_indices in kf.split(X):
print train_indices, test_indices
kf = model_selection.KFold(n_splits = 2, shuffle = True, random_state = 1)
for train_indices, test_indices in kf.split(X):
print train_indices, test_indices
```
#### StratifiedKFold
```
y = np.array([0] * 5 + [1] * 5)
print y
skf = model_selection.StratifiedKFold(n_splits = 2, shuffle = True, random_state = 0)
for train_indices, test_indices in skf.split(X, y):
print train_indices, test_indices
target = np.array([0, 1] * 5)
print target
skf = model_selection.StratifiedKFold(n_splits = 2,shuffle = True)
for train_indices, test_indices in skf.split(X, target):
print train_indices, test_indices
```
#### ShuffleSplit
```
ss = model_selection.ShuffleSplit(n_splits = 10, test_size = 0.2)
for train_indices, test_indices in ss.split(X):
print train_indices, test_indices
```
#### StratifiedShuffleSplit
```
target = np.array([0] * 5 + [1] * 5)
print target
sss = model_selection.StratifiedShuffleSplit(n_splits = 4, test_size = 0.2)
for train_indices, test_indices in sss.split(X, target):
print train_indices, test_indices
```
#### Leave-One-Out
```
loo = model_selection.LeaveOneOut()
for train_indices, test_index in loo.split(X):
print train_indices, test_index
```
Больше стратегий проведения кросс-валидации доступно здесь: http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators
|
github_jupyter
|
# Black Litterman with Investor Views Optimization: Oldest Country ETFs
# Charts
## 1. Data Fetching
### 1.1 Model configuration
```
import os
import sys
import datetime as dt
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from hmmlearn import hmm
import cvxportfolio as cp
import alphamodel as am
config = {'name': 'bl_sim_charts',
'universe':
{'list': ['SPY', 'EWA', 'EWC', 'EWG', 'EWH', 'EWJ', 'EWS', 'EWU', 'EWW'],
'ticker_col': 'Symbol',
'risk_free_symbol': 'USDOLLAR'},
'data':
{'name': 'eod_returns',
'source': 'quandl',
'table': 'EOD',
'api_key': "6XyApK2BBj_MraQg2TMD"},
'model':
{'start_date': '19970102',
'end_date': '20091231',
'halflife': 65,
'min_periods': 3,
'hidden_states': 2,
'train_len': 1700,
'process': 'none',
'data_dir': '/Users/razvan/PyRepo/research_masc/data_store/bl/',
'returns':
{'sampling_freq': 'daily'},
'covariance':
{'method' : 'SS',
'sampling_freq' : 'monthly',
'train_days': 360}
}
}
# Logging
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
```
### 1.2 Fetch return data
```
# Fetch returns / volumes
ss = am.SingleStockBLEWM(config)
ss.train(force=True)
# Realized Data for Simulation
prices = ss.get('prices', 'realized', ss.cfg['returns']['sampling_freq']).iloc[1:,:]
returns = ss.get('returns', 'realized', ss.cfg['returns']['sampling_freq'])
volumes = ss.get('volumes', 'realized', ss.cfg['returns']['sampling_freq'])
sigmas = ss.get('sigmas', 'realized', ss.cfg['returns']['sampling_freq'])
simulated_tcost = cp.TcostModel(half_spread=0.0005/2., nonlin_coeff=1., sigma=sigmas, volume=volumes)
simulated_hcost = cp.HcostModel(borrow_costs=0.0001)
simulator = cp.MarketSimulator(returns, costs=[simulated_tcost, simulated_hcost],
market_volumes=volumes, cash_key=ss.risk_free_symbol)
```
### 1.3 Plot return data
```
# Process returns for charting
chart_returns = returns[returns.index >= dt.datetime(2005, 1, 2)]
chart_growth = (chart_returns + 1).cumprod()
chart_returns_cum = chart_growth - 1
chart_returns_cum = chart_returns_cum.stack().reset_index()
chart_returns_cum.columns = ['Date', 'Ticker', 'Value']
plt.figure(figsize=(15,8))
sns.set(font_scale=1.5)
with sns.axes_style('ticks'):
data = chart_returns_cum
ax = sns.lineplot(x='Date', y='Value', hue='Ticker', data=data)
ax.set(xlabel='Date', ylabel='Return')
plt.savefig(ss.cfg['data_dir'] + 'bl_asset_returns.png')
```
## 2. Model fitting
### 2.1 Extract Black Litterman equilibrium returns
```
# Aggregate market stats for cal
market_stats = pd.DataFrame({'MarketCap/GDP': [1.25, 1, 1.25, 0.45, 3.5, 0.8, 2, 1.25, 0.3, 0],
'GDP': [2543500, 150000, 239000, 853000, 22500, 1037500, 10000, 422500, 164500, 0]},
index=ss.universe + ['USDOLLAR'])
market_stats.loc[:, 'MarketCap'] = market_stats.loc[:, 'MarketCap/GDP'] * market_stats.loc[:, 'GDP']
market_stats.loc[:, 'MarketCap Weights'] = market_stats.loc[:, 'MarketCap'] / market_stats.loc[:, 'MarketCap'].sum()
market_stats
# Generate market cap weights pandas.Series
w_mktcap = pd.Series(index=market_stats.index, data=market_stats.loc[:, 'MarketCap Weights'])
w_mktcap['USDOLLAR'] = 0.
```
### 2.2 Generate BL posterior returns/covariance
```
# Parameters that match simulations
risk_aversion = 2.5
confidence = 0.8
vconf = 0.7
gamma_risk = 0.1
gamma_trade = 0.1
gamma_hold = 0
```
#### 2.2.1 Correct View
```
# Predicted Data for Optimization
# US underperforms Germany 4% per year - correct view
ss.predict(w_market_cap_init=w_mktcap, risk_aversion=risk_aversion, c=confidence,
P_view=np.array([-1, 0, 0, 1, 0, 0, 0, 0, 0, 0]), Q_view=np.array(0.04 / 252),
view_confidence=vconf
)
# Black Litterman output
r_cor_pred = ss.get('returns', 'predicted')
covariance_cor_pred = ss.get('covariance', 'predicted')
volumes_cor_pred = ss.get('volumes', 'predicted')
sigmas_cor_pred = ss.get('sigmas', 'predicted')
```
#### 2.2.2 Incorrect View
```
# Predicted Data for Optimization
# US outperforms Germany 4% per year - correct view
ss.predict(w_market_cap_init=w_mktcap, risk_aversion=risk_aversion, c=confidence,
P_view=np.array([1, 0, 0, -1, 0, 0, 0, 0, 0, 0]), Q_view=np.array(0.04 / 252),
view_confidence=vconf
)
# Black Litterman output
r_incor_pred = ss.get('returns', 'predicted')
covariance_incor_pred = ss.get('covariance', 'predicted')
volumes_incor_pred = ss.get('volumes', 'predicted')
sigmas_incor_pred = ss.get('sigmas', 'predicted')
```
## 3. Simulation Results
### Input Data
```
# Start and end date
start_date = dt.datetime(2005, 1, 2)
end_date = dt.datetime.strptime(config['model']['end_date'], '%Y%m%d')
# Predicted costs
optimization_tcost = cp.TcostModel(half_spread=0.0005/2., nonlin_coeff=1.,
sigma=sigmas_cor_pred,
volume=volumes_cor_pred)
optimization_hcost=cp.HcostModel(borrow_costs=0.0001)
```
## 3.1 Single Period Optimization for Allocation
### 3.1.1 Market Capitalization Weights
```
%%time
# Market cap weights
mktcap_rebalance = cp.Hold(trading_freq="once")
# Backtest
market_cap_w = simulator.run_multiple_backtest(1E6*w_mktcap,
start_time=start_date, end_time=end_date,
policies=[mktcap_rebalance],
loglevel=logging.WARNING, parallel=True)
market_cap_w[0].summary()
market_cap_w[0].v.plot(figsize=(17,7))
```
### 3.1.2 Black Litterman Returns & Covariance Simulation
```
# Optimization parameters
leverage_limit = cp.LeverageLimit(1)
fully_invested = cp.ZeroCash()
long_only = cp.LongOnly()
```
#### 3.1.2.1 Correct View
```
%%time
# Covariance setup
bl_cor_risk_model = cp.FullSigma(covariance_cor_pred)
# Optimization policy
bl_cor_policy = cp.SinglePeriodOpt(return_forecast=r_cor_pred,
costs=[gamma_risk*bl_cor_risk_model,
gamma_trade*optimization_tcost,
gamma_hold*optimization_hcost],
constraints=[leverage_limit, fully_invested, long_only],
trading_freq='hour')
# Backtest
bl_cor_results = simulator.run_multiple_backtest(1E6*w_mktcap,
start_time=start_date, end_time=end_date,
policies=[bl_cor_policy],
loglevel=logging.WARNING, parallel=True)
bl_cor_results[0].summary()
bl_cor_results[0].v.plot(figsize=(17,7))
bl_cor_results[0].w.plot(figsize=(17,6))
```
#### 3.1.2.2 Incorrect View
```
%%time
# Covariance setup
bl_incor_risk_model = cp.FullSigma(covariance_incor_pred)
# Optimization policy
bl_incor_policy = cp.SinglePeriodOpt(return_forecast=r_incor_pred,
costs=[gamma_risk*bl_incor_risk_model,
gamma_trade*optimization_tcost,
gamma_hold*optimization_hcost],
constraints=[leverage_limit, fully_invested, long_only],
trading_freq='hour')
# Backtest
bl_incor_results = simulator.run_multiple_backtest(1E6*w_mktcap,
start_time=start_date, end_time=end_date,
policies=[bl_incor_policy],
loglevel=logging.WARNING, parallel=True)
bl_incor_results[0].summary()
bl_incor_results[0].v.plot(figsize=(17,7))
bl_incor_results[0].w.plot(figsize=(17,6))
```
### 3.1.3 Weight Allocation Difference
```
# Market capitalization weights
w_mktcap
w_mktcap.name = 'Equilibrium'
# Correct view weights
w_bl_cor = bl_cor_results[0].w.iloc[1,:]
w_bl_cor.name = 'Correct View'
#Incorrect view weights
w_bl_incor = bl_incor_results[0].w.iloc[1,:]
w_bl_incor.name = 'Incorrect View'
# Construct weight dataframe
bl_weights = pd.concat([w_mktcap, w_bl_cor, w_bl_incor], axis=1)
bl_weights = bl_weights.stack().reset_index()
bl_weights.columns = ['Ticker', 'Scenario', 'Value']
%matplotlib inline
with sns.axes_style('ticks', {'figure.figsize': (15,8), 'font_scale': 1.5}):
data = bl_weights
ax = sns.catplot(x='Ticker', y='Value', hue='Scenario', data=data, kind='bar', palette='muted', height=10)
ax.set(xlabel='Scenario', ylabel='Portfolio Weight')
ax.fig.set_size_inches(12,5)
plt.xticks(rotation=30, horizontalalignment='right')
plt.savefig(ss.cfg['data_dir'] + 'bl_view_weights.png', bbox_inches="tight")
```
### 3.1.4 View Confidence Sharpe Difference
```
# Grab Black-Litterman view simulation results
bl_eq_results = market_cap_w[0]
bl_eq = pd.DataFrame.from_dict({'Ex-Post View': ['Equilibrium'],
'view_confidence': [0],
'excess_return': [bl_eq_results.excess_returns.mean() * 100 * bl_eq_results.ppy],
'excess_risk': [bl_eq_results.excess_returns.std() * 100 * np.sqrt(bl_eq_results.ppy)]})
bl_cor_view = pd.read_csv(ss.cfg['data_dir'] + 'bl_ewm_corview.csv')
bl_cor = bl_cor_view[['view_confidence', 'excess_return', 'excess_risk']].copy()
bl_cor.loc[:, 'Ex-Post View'] = 'Correct View'
bl_incor_view = pd.read_csv(ss.cfg['data_dir'] + 'bl_ewm_incorview.csv')
bl_incor = bl_incor_view[['view_confidence', 'excess_return', 'excess_risk']].copy()
bl_incor.loc[:, 'Ex-Post View'] = 'Incorrect View'
bl_results = pd.concat([bl_eq, bl_cor, bl_incor])
bl_results.loc[:, 'sharpe'] = bl_results.loc[:, 'excess_return'] / bl_results.loc[:, 'excess_risk']
bl_results
plt.figure(figsize=(15,8))
with sns.axes_style('ticks', {'font_scale': 1.5}):
data = bl_results
ax = sns.lineplot(x='view_confidence', y='sharpe', hue='Ex-Post View', style='Ex-Post View', data=data, markers=True)
ax.set(xlabel='Static View Confidence', ylabel='Sharpe Ratio')
ax.axhline(0.100230, ls='--')
plt.savefig(ss.cfg['data_dir'] + 'bl_view_sharpe.png')
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/Conscious-Mind/TensorFlow-Course-DeepLearning.AI/blob/main/C1/W2/ungraded_labs/C1_W2_Lab_1_beyond_hello_world_completed.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Ungraded Lab: Beyond Hello World, A Computer Vision Example
In the previous exercise, you saw how to create a neural network that figured out the problem you were trying to solve. This gave an explicit example of learned behavior. Of course, in that instance, it was a bit of overkill because it would have been easier to write the function `y=2x-1` directly instead of bothering with using machine learning to learn the relationship between `x` and `y`.
But what about a scenario where writing rules like that is much more difficult -- for example a computer vision problem? Let's take a look at a scenario where you will build a neural network to recognize different items of clothing, trained from a dataset containing 10 different types.
## Start Coding
Let's start with our import of TensorFlow.
```
import tensorflow as tf
print(tf.__version__)
```
The [Fashion MNIST dataset](https://github.com/zalandoresearch/fashion-mnist) is a collection of grayscale 28x28 pixel clothing images. Each image is associated with a label as shown in this table⁉
| Label | Description |
| --- | --- |
| 0 | T-shirt/top |
| 1 | Trouser |
| 2 | Pullover |
| 3 | Dress |
| 4 | Coat |
| 5 | Sandal |
| 6 | Shirt |
| 7 | Sneaker |
| 8 | Bag |
| 9 | Ankle boot |
This dataset is available directly in the [tf.keras.datasets](https://www.tensorflow.org/api_docs/python/tf/keras/datasets) API and you load it like this:
```
# Load the Fashion MNIST dataset
fmnist = tf.keras.datasets.fashion_mnist
```
Calling `load_data()` on this object will give you two tuples with two lists each. These will be the training and testing values for the graphics that contain the clothing items and their labels.
```
# Load the training and test split of the Fashion MNIST dataset
(training_images, training_labels), (test_images, test_labels) = fmnist.load_data()
```
What does these values look like? Let's print a training image (both as an image and a numpy array), and a training label to see. Experiment with different indices in the array. For example, also take a look at index `42`. That's a different boot than the one at index `0`.
```
import numpy as np
import matplotlib.pyplot as plt
# You can put between 0 to 59999 here
index = 0
# Set number of characters per row when printing
np.set_printoptions(linewidth=320)
# Print the label and image
print(f'LABEL: {training_labels[index]}')
print(f'\nIMAGE PIXEL ARRAY:\n {training_images[index]}')
# Visualize the image
plt.imshow(training_images[index])
# You can put between 0 to 59999 here
index = 22
# Set number of characters per row when printing
np.set_printoptions(linewidth=320)
# Print the label and image
print(f'LABEL: {training_labels[index]}')
print(f'\nIMAGE PIXEL ARRAY:\n {training_images[index]}')
# Visualize the image
plt.imshow(training_images[index])
```
You'll notice that all of the values in the number are between 0 and 255. If you are training a neural network especially in image processing, for various reasons it will usually learn better if you scale all values to between 0 and 1. It's a process called _normalization_ and fortunately in Python, it's easy to normalize an array without looping. You do it like this:
```
# Normalize the pixel values of the train and test images
training_images = training_images / 255.0
test_images = test_images / 255.0
```
Now you might be wondering why the dataset is split into two: training and testing? Remember we spoke about this in the intro? The idea is to have 1 set of data for training, and then another set of data that the model hasn't yet seen. This will be used to evaluate how good it would be at classifying values.
Let's now design the model. There's quite a few new concepts here. But don't worry, you'll get the hang of them.
```
# Build the classification model
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
```
[Sequential](https://keras.io/api/models/sequential/): That defines a sequence of layers in the neural network.
[Flatten](https://keras.io/api/layers/reshaping_layers/flatten/): Remember earlier where our images were a 28x28 pixel matrix when you printed them out? Flatten just takes that square and turns it into a 1-dimensional array.
[Dense](https://keras.io/api/layers/core_layers/dense/): Adds a layer of neurons
Each layer of neurons need an [activation function](https://keras.io/api/layers/activations/) to tell them what to do. There are a lot of options, but just use these for now:
[ReLU](https://keras.io/api/layers/activations/#relu-function) effectively means:
```
if x > 0:
return x
else:
return 0
```
In other words, it it only passes values 0 or greater to the next layer in the network.
[Softmax](https://keras.io/api/layers/activations/#softmax-function) takes a list of values and scales these so the sum of all elements will be equal to 1. When applied to model outputs, you can think of the scaled values as the probability for that class. For example, in your classification model which has 10 units in the output dense layer, having the highest value at `index = 4` means that the model is most confident that the input clothing image is a coat. If it is at index = 5, then it is a sandal, and so forth. See the short code block below which demonstrates these concepts. You can also watch this [lecture](https://www.youtube.com/watch?v=LLux1SW--oM&ab_channel=DeepLearningAI) if you want to know more about the Softmax function and how the values are computed.
```
# Declare sample inputs and convert to a tensor
inputs = np.array([[1.0, 3.0, 4.0, 2.0]])
inputs = tf.convert_to_tensor(inputs)
print(f'input to softmax function: {inputs.numpy()}')
# Feed the inputs to a softmax activation function
outputs = tf.keras.activations.softmax(inputs)
print(f'output of softmax function: {outputs.numpy()}')
# Get the sum of all values after the softmax
sum = tf.reduce_sum(outputs)
print(f'sum of outputs: {sum}')
# Get the index with highest value
prediction = np.argmax(outputs)
print(f'class with highest probability: {prediction}')
```
The next thing to do, now that the model is defined, is to actually build it. You do this by compiling it with an optimizer and loss function as before -- and then you train it by calling `model.fit()` asking it to fit your training data to your training labels. It will figure out the relationship between the training data and its actual labels so in the future if you have inputs that looks like the training data, then it can predict what the label for that input is.
```
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
```
Once it's done training -- you should see an accuracy value at the end of the final epoch. It might look something like `0.9098`. This tells you that your neural network is about 91% accurate in classifying the training data. That is, it figured out a pattern match between the image and the labels that worked 91% of the time. Not great, but not bad considering it was only trained for 5 epochs and done quite quickly.
But how would it work with unseen data? That's why we have the test images and labels. We can call [`model.evaluate()`](https://keras.io/api/models/model_training_apis/#evaluate-method) with this test dataset as inputs and it will report back the loss and accuracy of the model. Let's give it a try:
```
# Evaluate the model on unseen data
model.evaluate(test_images, test_labels)
```
You can expect the accuracy here to be about `0.88` which means it was 88% accurate on the entire test set. As expected, it probably would not do as well with *unseen* data as it did with data it was trained on! As you go through this course, you'll look at ways to improve this.
# Exploration Exercises
To explore further and deepen your understanding, try the below exercises:
### Exercise 1:
For this first exercise run the below code: It creates a set of classifications for each of the test images, and then prints the first entry in the classifications. The output, after you run it is a list of numbers. Why do you think this is, and what do those numbers represent?
```
classifications = model.predict(test_images)
print(classifications[0])
print(classifications)
```
**Hint:** try running `print(test_labels[0])` -- and you'll get a `9`. Does that help you understand why this list looks the way it does?
```
print(test_labels[0])
```
### E1Q1: What does this list represent?
1. It's 10 random meaningless values
2. It's the first 10 classifications that the computer made
3. It's the probability that this item is each of the 10 classes
<details><summary>Click for Answer</summary>
<p>
#### Answer:
The correct answer is (3)
The output of the model is a list of 10 numbers. These numbers are a probability that the value being classified is the corresponding value (https://github.com/zalandoresearch/fashion-mnist#labels), i.e. the first value in the list is the probability that the image is of a '0' (T-shirt/top), the next is a '1' (Trouser) etc. Notice that they are all VERY LOW probabilities.
For index 9 (Ankle boot), the probability was in the 90's, i.e. the neural network is telling us that the image is most likely an ankle boot.
</p>
</details>
### E1Q2: How do you know that this list tells you that the item is an ankle boot?
1. There's not enough information to answer that question
2. The 10th element on the list is the biggest, and the ankle boot is labelled 9
2. The ankle boot is label 9, and there are 0->9 elements in the list
<details><summary>Click for Answer</summary>
<p>
#### Answer
The correct answer is (2). Both the list and the labels are 0 based, so the ankle boot having label 9 means that it is the 10th of the 10 classes. The list having the 10th element being the highest value means that the Neural Network has predicted that the item it is classifying is most likely an ankle boot
</p>
</details>
### Exercise 2:
Let's now look at the layers in your model. Experiment with different values for the dense layer with 512 neurons. What different results do you get for loss, training time etc? Why do you think that's the case?
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation=tf.nn.relu), # Try experimenting with this layer
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### E2Q1: Increase to 1024 Neurons -- What's the impact?
1. Training takes longer, but is more accurate
2. Training takes longer, but no impact on accuracy
3. Training takes the same time, but is more accurate
<details><summary>Click for Answer</summary>
<p>
#### Answer
The correct answer is (1) by adding more Neurons we have to do more calculations, slowing down the process, but in this case they have a good impact -- we do get more accurate. That doesn't mean it's always a case of 'more is better', you can hit the law of diminishing returns very quickly!
</p>
</details>
### Exercise 3:
### E3Q1: What would happen if you remove the Flatten() layer. Why do you think that's the case?
<details><summary>Click for Answer</summary>
<p>
#### Answer
You get an error about the shape of the data. It may seem vague right now, but it reinforces the rule of thumb that the first layer in your network should be the same shape as your data. Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, so it makes more sense to 'flatten' that 28,28 into a 784x1. Instead of writng all the code to handle that ourselves, we add the Flatten() layer at the begining, and when the arrays are loaded into the model later, they'll automatically be flattened for us.
</p>
</details>
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([#tf.keras.layers.Flatten(), #Try removing this layer
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### Exercise 4:
Consider the final (output) layers. Why are there 10 of them? What would happen if you had a different amount than 10? For example, try training the network with 5.
<details><summary>Click for Answer</summary>
<p>
#### Answer
You get an error as soon as it finds an unexpected value. Another rule of thumb -- the number of neurons in the last layer should match the number of classes you are classifying for. In this case it's the digits 0-9, so there are 10 of them, hence you should have 10 neurons in your final layer.
</p>
</details>
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(5, activation=tf.nn.softmax) # Try experimenting with this layer
])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### Exercise 5:
Consider the effects of additional layers in the network. What will happen if you add another layer between the one with 512 and the final layer with 10.
<details><summary>Click for Answer</summary>
<p>
#### Answer
There isn't a significant impact -- because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary.
</p>
</details>
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu), # Add a layer here
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax) # Add a layer here
])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### Exercise 6:
### E6Q1: Consider the impact of training for more or less epochs. Why do you think that would be the case?
- Try 15 epochs -- you'll probably get a model with a much better loss than the one with 5
- Try 30 epochs -- you might see the loss value stops decreasing, and sometimes increases.
This is a side effect of something called 'overfitting' which you can learn about later and it's something you need to keep an eye out for when training neural networks. There's no point in wasting your time training if you aren't improving your loss, right! :)
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=15) # Experiment with the number of epochs
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[34])
print(test_labels[34])
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=30) # Experiment with the number of epochs
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[34])
print(test_labels[34])
```
### Exercise 7:
Before you trained, you normalized the data, going from values that were 0-255 to values that were 0-1. What would be the impact of removing that? Here's the complete code to give it a try. Why do you think you get different results?
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
# training_images=training_images/255.0 # Experiment with removing this line
# test_images=test_images/255.0 # Experiment with removing this line
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### Exercise 8:
Earlier when you trained for extra epochs you had an issue where your loss might change. It might have taken a bit of time for you to wait for the training to do that, and you might have thought 'wouldn't it be nice if I could stop the training when I reach a desired value?' -- i.e. 95% accuracy might be enough for you, and if you reach that after 3 epochs, why sit around waiting for it to finish a lot more epochs....So how would you fix that? Like any other program...you have callbacks! Let's see them in action...
```
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy') >= 0.9): # Experiment with changing this value
print("\nReached 90% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images/255.0
test_images=test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=10, callbacks=[callbacks])
```
|
github_jupyter
|
# <strong>Road networks and robustness to flooding on US Atlantic and Gulf barrier islands</strong>
## <strong>- Download elevations for the US Atlantic and Gulf barrier islands -</strong>
### The purpose of this notebook is to download CUDEM tiles from https://coast.noaa.gov/htdata/raster2/elevation/NCEI_ninth_Topobathy_2014_8483/. These tiles will be clipped to the extent of the study area and used to retrieve elevations for each network node.
```
### Packages
import os
import requests
import urllib
import ssl
from osgeo import gdal, ogr
import numpy as np
import matplotlib.pyplot as plt
import rasterio
from rasterio.merge import merge
from rasterio.plot import show
from rasterio.plot import show_hist
from rasterio.mask import mask
from shapely.geometry import box
import geopandas as gpd
from fiona.crs import from_epsg
import pycrs
import rasterio
import glob
%matplotlib inline
### Set working directory
path='' # introduce path to your working directory
os.chdir(path)
### Download CUDEM tiles
# Create folders if they don't exist
outdir= './Data/CUDEM'
if not os.path.exists(outdir):
os.makedirs(outdir)
outdir = './Data/CUDEM/Tiles'
if not os.path.exists(outdir):
os.makedirs(outdir)
# Retrive url list with cudem tiles and save it
url = 'https://coast.noaa.gov/htdata/raster2/elevation/NCEI_ninth_Topobathy_2014_8483/urllist8483.txt'
r = requests.get(url, allow_redirects=True)
open('./Data/CUDEM/urllist8483.txt', 'wb').write(r.content)
## If urlopen error pops up [SSL: CERTIFICATE_VERIFY_FAILED] use this line of code but keep in mind that allowing the use of unverified ssl can introduce security risks if the
## data source is not trustworthy
# ssl._create_default_https_context = ssl._create_unverified_context
# Function to get the name of the file from the url
def get_filename(url):
"""
Parses filename from given url
"""
if url.find('/'):
return url.rsplit('/', 1)[1]
# Download tiles using url list
with open('./Data/CUDEM/urllist8483.txt') as f:
flat_list=[word for line in f for word in line.split()]
url_list = flat_list
for url in url_list:
# parse filename
fname = get_filename(url)
outfp = os.path.join(outdir, fname)
# download the file if it does not exist already
try:
if not os.path.exists(outfp):
print('Downloading', fname)
r = urllib.request.urlretrieve(url, outfp)
except:
continue
### Check if extent of raster tile and barrier island shp (200m buffer) overlap. If true, clip raster using polygon and save it
def getFeatures(gdf):
"""Function to parse features from GeoDataFrame in such a manner that rasterio wants them"""
import json
return [json.loads(gdf.to_json())['features'][0]['geometry']]
# Create folder if it does no exist
outdir= './Data/CUDEM/CUDEM_Clip'
if not os.path.exists(outdir):
os.makedirs(outdir)
for filename1 in os.listdir('./Data/CUDEM/Tiles'):
if filename1.endswith('.tif'):
raster_dir=('./Data/CUDEM/Tiles/{0}'.format(filename1))
raster_name=filename1.replace ('.tif', '')
raster = gdal.Open(raster_dir)
# get raster geometry
transform = raster.GetGeoTransform()
pixelWidth = transform[1]
pixelHeight = transform[5]
cols = raster.RasterXSize
rows = raster.RasterYSize
xLeft = transform[0]
yTop = transform[3]
xRight = xLeft+cols*pixelWidth
yBottom = yTop+rows*pixelHeight
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(xLeft, yTop)
ring.AddPoint(xLeft, yBottom)
ring.AddPoint(xRight, yBottom)
ring.AddPoint(xRight, yTop)
ring.AddPoint(xLeft, yTop)
rasterGeometry = ogr.Geometry(ogr.wkbPolygon)
rasterGeometry.AddGeometry(ring)
for filename2 in os.listdir('./Data/Barriers/Buffers_200m'):
if filename2.endswith('.shp'):
vector_dir=('./Data/Barriers/Buffers_200m/{0}'.format(filename2))
vector_name=filename2.replace('.shp', '')
vector = ogr.Open(vector_dir)
# get vector geometry
layer = vector.GetLayer()
feature = layer.GetFeature(0)
vectorGeometry = feature.GetGeometryRef()
# check if they intersect and if they do clip raster tile using polygon
if rasterGeometry.Intersect(vectorGeometry) == True:
# output clipped raster
out_tif = os.path.join('./Data/CUDEM/CUDEM_Clip/{0}_{1}.tif'.format(vector_name,raster_name))
# read the data
data = rasterio.open(raster_dir)
barrier = gpd.read_file(vector_dir)
# project the Polygon into same CRS as the grid
barrier = barrier.to_crs(crs=data.crs.data)
coords = getFeatures(barrier)
# clip raster with polygon
out_img, out_transform = mask(dataset=data, shapes=coords, crop=True)
# copy the metadata
out_meta = data.meta.copy()
out_meta.update({"driver": "GTiff",
"height": out_img.shape[1],
"width": out_img.shape[2],
"transform": out_transform,
"crs": "EPSG:4269"})
# write clipped raster to disk
with rasterio.open(out_tif, "w", **out_meta) as dest:
dest.write(out_img)
else:
continue
### With the clipped rasters, create CUDEM mosaic for each barrier
# Create folder if it does no exist
outdir= './Data/CUDEM/CUDEM_Mosaic'
if not os.path.exists(outdir):
os.makedirs(outdir)
# Merge all clipped rasters that start with the same name (belong to the same barrier) in one mosaic
for vector in os.listdir('./Data/Barriers/Buffers_200m'):
if vector.endswith('.shp'):
vector_name= vector.replace('.shp', '')
# list for the source files
src_files_to_mosaic = []
for raster in os.listdir('./Data/CUDEM/CUDEM_Clip'):
if raster.startswith(vector_name):
src = rasterio.open('./Data/CUDEM/CUDEM_Clip/{0}'.format(raster))
src_files_to_mosaic.append(src)
# merge function returns a single mosaic array and the transformation info
mosaic, out_trans = merge(src_files_to_mosaic)
# copy the metadata
out_meta = src.meta.copy()
# update the metadata
out_meta.update({"driver": "GTiff",
"height": mosaic.shape[1],
"width": mosaic.shape[2],
"transform": out_trans,
"crs": "EPSG:4269"})
# write the mosaic raster to disk
with rasterio.open("./Data/CUDEM/CUDEM_Mosaic/{0}.tif".format(vector_name), "w", **out_meta) as dest:
dest.write(mosaic)
else:
continue
```
|
github_jupyter
|
```
"""
Training script to train a model on MultiNLI and, optionally, on SNLI data as well.
The "alpha" hyperparamaters set in paramaters.py determines if SNLI data is used in training. If alpha = 0, no SNLI data is used in training. If alpha > 0, then down-sampled SNLI data is used in training.
"""
%tb
import tensorflow as tf
import os
import importlib
import random
from util import logger
import util.parametersipynb as params
from util.data_processing_ipynb import *
from util.evaluate import *
args = params.argparser("cbow petModel-0 --keep_rate 0.9 --seq_length 25 --emb_train")
FIXED_PARAMETERS = params.load_parameters(args)
test_matched = "{}/multinli_0.9/multinli_0.9_test_matched_unlabeled.jsonl".format(args.datapath)
if os.path.isfile(test_matched):
test_matched = "{}/multinli_0.9/multinli_0.9_test_matched_unlabeled.jsonl".format(args.datapath)
test_mismatched = "{}/multinli_0.9/multinli_0.9_test_matched_unlabeled.jsonl".format(args.datapath)
test_path = "{}/multinli_0.9/".format(args.datapath)
else:
test_path = "{}/multinli_0.9/".format(args.datapath)
temp_file = os.path.join(test_path, "temp.jsonl")
io.open(temp_file, "wb")
test_matched = temp_file
test_mismatched = temp_file
modname = FIXED_PARAMETERS["model_name"]
logpath = os.path.join(FIXED_PARAMETERS["log_path"], modname) + ".log"
logger = logger.Logger(logpath)
model = FIXED_PARAMETERS["model_type"]
module = importlib.import_module(".".join(['models', model]))
MyModel = getattr(module, 'MyModel')
# Logging parameter settings at each launch of training script
# This will help ensure nothing goes awry in reloading a model and we consistenyl use the same hyperparameter settings.
logger.Log("FIXED_PARAMETERS\n %s" % FIXED_PARAMETERS)
######################### LOAD DATA #############################
logger.Log("Loading data")
training_snli = load_nli_data(FIXED_PARAMETERS["training_snli"], snli=True)
dev_snli = load_nli_data(FIXED_PARAMETERS["dev_snli"], snli=True)
test_snli = load_nli_data(FIXED_PARAMETERS["test_snli"], snli=True)
training_mnli = load_nli_data(FIXED_PARAMETERS["training_mnli"])
dev_matched = load_nli_data(FIXED_PARAMETERS["dev_matched"])
dev_mismatched = load_nli_data(FIXED_PARAMETERS["dev_mismatched"])
# test_matched = load_nli_data(FIXED_PARAMETERS["test_matched"])
# test_mismatched = load_nli_data(FIXED_PARAMETERS["test_mismatched"])
# if 'temp.jsonl' in FIXED_PARAMETERS["test_matched"]:
# # Removing temporary empty file that was created in parameters.py
# os.remove(FIXED_PARAMETERS["test_matched"])
# logger.Log("Created and removed empty file called temp.jsonl since test set is not available.")
dictpath = os.path.join(FIXED_PARAMETERS["log_path"], modname) + ".p"
if not os.path.isfile(dictpath):
logger.Log("Building dictionary")
if FIXED_PARAMETERS["alpha"] == 0:
word_indices_uni, word_indices_bi, word_indices_tri = build_dictionary_ngrams([training_mnli])
else:
word_indices_uni, word_indices_bi, word_indices_tri = build_dictionary_ngrams([training_mnli, training_snli])
logger.Log("Padding and indexifying sentences")
sentences_to_padded_index_sequences_ngrams(word_indices_uni, word_indices_bi, word_indices_tri, [training_mnli, training_snli, dev_matched, dev_mismatched, dev_snli, test_snli])
# pickle.dump(word_indices_uni, word_indices_bi, word_indices_tri, open(dictpath, "wb"))
else:
logger.Log("Loading dictionary from %s" % (dictpath))
word_indices_uni, word_indices_bi, word_indices_tri = pickle.load(open(dictpath, "rb"))
logger.Log("Padding and indexifying sentences")
sentences_to_padded_index_sequences_ngrams(word_indices_uni, word_indices_bi, word_indices_tri, [training_mnli, training_snli, dev_matched, dev_mismatched, dev_snli, test_snli])
logger.Log("Loading embeddings")
loaded_embeddings = loadEmbedding_rand(FIXED_PARAMETERS["embedding_data_path"], word_indices_uni)
word_indices_bi[PADDING]
training_mnli[0]
sent = training_mnli[0]['sentence1']
def tokenize(string):
string = re.sub(r'\(|\)', '', string)
return string.split()
bigrammed_sent = list(nltk.bigrams(tokenize(sent)))
bigrammed_sent
# bigrams
sent2 = [0]*FIXED_PARAMETERS["seq_length"]
token_sequence = list(nltk.bigrams(tokenize(sent)))
padding = FIXED_PARAMETERS["seq_length"] - len(token_sequence)
for i in range(FIXED_PARAMETERS["seq_length"]):
if i >= len(token_sequence):
index = bigrams[PADDING]
else:
if token_sequence[i] in bigrams:
index = bigrams[token_sequence[i]]
else:
index = bigrams[UNKNOWN]
sent2[i] = index
sent2
bigrams = word_indices_bi.values()
len(word_indices_bi)
len(word_indices_uni)
bigrams = collections.Counter(word_indices_bi)
most_common = bigrams.most_common(500)
least_common = bigrams.most_common()[-500:]
words_to_get_rid_off = dict(most_common+least_common)
words_to_get_rid_off = words_to_get_rid_off.keys()
bigrams_fin = {k:v for k,v in bigrams.iteritems() if k not in words_to_get_rid_off}
bigrams_fin
a = [i for i in range(10)]
b = [i for i in range(20)]
a,b
import pickle
pickle.dump(a,b,open('./test1.p',"wb"))
```
|
github_jupyter
|
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import pandas as pd
import hashlib
import shutil
import glob
import time
import re
import os
from tqdm import tqdm
from datetime import datetime
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
class Net(nn.Module):
def __init__(self, sequenceSize=20000, embeddingDim=128, vocabularySize=2**16, filterWidth=5, filterNumber=1024):
super(Net, self).__init__()
self.sequenceSize = sequenceSize
self.embeddingDim = embeddingDim
self.vocabularySize = vocabularySize
self.filterWidth = filterWidth
self.filterNumber = filterNumber
self.embedding = nn.Embedding(self.vocabularySize, self.embeddingDim)
self.conv = nn.Sequential(
nn.Conv2d(1, self.filterNumber, (self.filterWidth, self.embeddingDim)),
nn.BatchNorm2d(self.filterNumber),
nn.ReLU()
)
self.fc = nn.Sequential(
nn.Linear(self.filterNumber , 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, x):
x = self.embedding(x)
#print(x.size())
x = self.conv(x)
#print(x.size())
x = x.max(dim=2)[0]
#print(x.size())
x = x.view(-1, self.filterNumber)
x = self.fc(x)
return x
class SampleDataset(Dataset):
def __init__(self, filePathList, labels, sequenceSize=20000, featureName='functionMethodCallsArgs'):
self.filePathList = filePathList
self.labels = labels
self.sequenceSize = sequenceSize
self.featureName = featureName
def __len__(self):
return len(self.filePathList)
def __getitem__(self, idx):
df = pd.read_parquet(self.filePathList[idx])
seed = int(round(time.time()%1, 6) * 1000000)
x = np.concatenate(df.iloc[np.random.RandomState(seed).permutation(len(df))][self.featureName].values)
if len(x) > self.sequenceSize:
x = x[:self.sequenceSize]
else:
x = np.concatenate((x, np.zeros([self.sequenceSize - len(x)])))
sample = torch.from_numpy(x)
return (sample.long(), self.labels[idx], self.filePathList[idx])
def train(model, optimizer, dataLoader, device):
running_loss = 0.0
label_lst = list()
predicted_lst = list()
model.train()
for inputs, labels, _ in dataLoader:
#
inputs = inputs.unsqueeze(1).to(device)
labels = labels.to(device)
#
optimizer.zero_grad()
#
outputs = model(inputs)
predicted = (outputs > 0.5).squeeze().long()
loss = F.binary_cross_entropy(outputs.squeeze(), labels.float())
#
loss.backward()
optimizer.step()
#
label_lst.append(labels.cpu().numpy())
predicted_lst.append(predicted.cpu().numpy())
running_loss += loss.item()
labels = np.concatenate(label_lst)
predicted = np.concatenate(predicted_lst)
loss = running_loss / len(predicted)
return labels, predicted, loss
def assess(model, dataLoader, device):
running_loss = 0.0
label_lst = list()
predicted_lst = list()
proba_lst = list()
path_lst = list()
with torch.no_grad():
model.eval()
for inputs, labels, paths in dataLoader:
#
inputs = inputs.unsqueeze(1).to(device)
labels = labels.to(device)
#
outputs = model(inputs)
predicted = (outputs > 0.5).squeeze().long()
loss = F.binary_cross_entropy(outputs.squeeze(), labels.float())
#
if len(inputs) > 1:
label_lst.append(labels.cpu().numpy())
predicted_lst.append(predicted.cpu().numpy())
proba_lst.append(outputs.squeeze().cpu().numpy())
path_lst.append(paths)
running_loss += loss.item()
labels = np.concatenate(label_lst)
predicted = np.concatenate(predicted_lst)
proba = np.concatenate(proba_lst)
paths = np.concatenate(path_lst)
loss = running_loss / len(predicted)
return labels, predicted, loss, proba, paths
def trainModel(ws, modelTag, epochNum, trainLoader, validLoader, device, lr=3e-4, weightDecay=9e-5):
#
model = Net()
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weightDecay)
scheduler = ReduceLROnPlateau(optimizer, 'min', verbose=True, patience=5, factor=0.8)
outputlogFilePath = f'./traces/{ws}/logs'
outputtracesPath = f'./traces/{ws}'
#shutil.rmtree(outputtracesPath)
#os.mkdir(outputtracesPath)
result_lst = list()
message = '----------'
with open(outputlogFilePath, 'a') as writer:
writer.write(message + '\n')
print(message)
for epoch in range(epochNum):
tlabel, tpredicted, tloss = train(model, optimizer, trainLoader, device)
vlabel, vpredicted, vloss, vproba, vproba = assess(model, validLoader, device)
message = f'Train: {modelTag} '
message += '[{:04d}] '.format(epoch)
tf1score = f1_score(tlabel, tpredicted)
message += 'TF1: {:2.4f}, '.format(tf1score*100)
message += 'Tloss: {:2.8f}, '.format(tloss)
vf1score = f1_score(vlabel, vpredicted)
message += 'VF1: {:2.4f}, '.format(vf1score*100)
message += 'VLoss: {:2.8f},'.format(vloss)
with open(outputlogFilePath, 'a') as writer:
writer.write(message + '\n')
print(message)
modelOutputPath = f'{outputtracesPath}/model_{modelTag}_{epoch:03d}.pth'
torch.save(model.state_dict(), modelOutputPath)
result_lst.append((epoch, modelOutputPath, vlabel, vpredicted, vproba, vf1score, vloss, tf1score, tloss))
scheduler.step(tloss)
df = pd.DataFrame(result_lst,
columns=['epoch', 'path', 'labels', 'predicted', 'proba', 'vf1score', 'vloss', 'tf1score', 'tloss'])
df.to_parquet(f'{outputtracesPath}/{modelTag}.parquet')
message = '----------'
with open(outputlogFilePath, 'a') as writer:
writer.write(message + '\n')
print(message)
return df
def evaluate(ws, modelPathList, dataloader, device, numberFragments=1):
modelResultList = []
outputlogFilePath = f'./traces/{ws}/logs'
for modelPath in modelPathList:
for fragment in range(numberFragments):
mdl = Net().to(device)
mdl.load_state_dict(torch.load(modelPath))
mdl.eval()
modelResult = assess(mdl, dataloader, device)
modelF1Score = f1_score(modelResult[0], modelResult[1])
modelResultList.append((modelPath, modelF1Score,) + modelResult)
message = f'Evaluate: '
message += f'ModelPath={modelPath} Fragment={fragment:02d} '
message += f'score={modelF1Score}'
print(message)
with open(outputlogFilePath, 'a') as writer:
writer.write(message + '\n')
return pd.DataFrame(modelResultList, columns=['name', 'f1score', 'Truth', 'Predicted', 'loss', 'Proba', 'Path'])
def getDataloaders(dataset_df, test_df, batchSize=32, numWorkers=16, trainPercentage=0.7, validPercentage=0.8):
rand_idx = np.random.permutation(len(dataset_df))
train_df = dataset_df.iloc[rand_idx[:int(trainPercentage * len(dataset_df))]]
valid_df = dataset_df.iloc[rand_idx[int(trainPercentage * len(dataset_df)):]]
#test_df = dataset_df.iloc[rand_idx[int(validPercentage * len(dataset_df)):]]
print(len(train_df))
print(train_df.label.value_counts())
print(len(valid_df))
print(valid_df.label.value_counts())
print(len(test_df))
print(test_df.label.value_counts())
trainDataset = SampleDataset(train_df.filePath.values, train_df.label.values)
trainLoader = DataLoader(trainDataset, batch_size=batchSize, shuffle=True, num_workers=numWorkers)
validDataset = SampleDataset(valid_df.filePath.values, valid_df.label.values)
validLoader = DataLoader(validDataset, batch_size=2*batchSize, shuffle=False, num_workers=numWorkers)
testDataset = SampleDataset(test_df.filePath.values, test_df.label.values)
testLoader = DataLoader(testDataset, batch_size=2*batchSize, shuffle=False, num_workers=numWorkers)
return trainLoader, validLoader, testLoader
def evalDataset(ws, result_df, probaUpperBorn = 0.9, probaLowerBorn = 0.1):
outputlogFilePath = f'./traces/{ws}/logs'
results = np.vstack(result_df.Proba.values)
truth = result_df.Truth.iloc[0]
paths = result_df.Path.iloc[0]
result_mean = results.mean(axis=0)
predicted = (result_mean > 0.5).astype('int')
f1score = f1_score(truth, predicted)
vtruth = truth[(result_mean >= probaUpperBorn) | (result_mean <= probaLowerBorn)]
vpaths = paths[(result_mean >= probaUpperBorn) | (result_mean <= probaLowerBorn)]
vresult_prob = result_mean[(result_mean >= probaUpperBorn) | (result_mean <= probaLowerBorn)]
vpredicted = (vresult_prob > 0.5).astype('int')
vcoverage = (len(vtruth)/len(truth))
vextendSize = len(vtruth)
vf1score = f1_score(vtruth, vpredicted)
etruth = truth[(result_mean < probaUpperBorn) & (result_mean > probaLowerBorn)]
epaths = paths[(result_mean < probaUpperBorn) & (result_mean > probaLowerBorn)]
eresult_prob = result_mean[(result_mean < probaUpperBorn) & (result_mean > probaLowerBorn)]
epredicted = (eresult_prob > 0.5).astype('int')
ecoverage = (len(etruth)/len(truth))
erestSize = len(etruth)
ef1score = f1_score(etruth, epredicted)
message = f'Extend: '
message += f'f1score={f1score*100:2.4f}, '
message += f'vcoverage={vcoverage*100:2.4f}, vf1score={vf1score*100:2.4f}, vexentdSize={vextendSize}, '
message += f'ecoverage={ecoverage*100:2.4f}, ef1score={ef1score*100:2.4f}, erestSize={erestSize}'
print(message)
with open(outputlogFilePath, 'a') as writer:
writer.write(message + '\n')
#
ws = 'studyWS01'
epochNum = 100
device = torch.device('cuda:5')
ensembleSize = 10
trainPercentageParam = 0.8
validPercentageParam = 0.9
outputlogFilePath = f'./traces/{ws}/logs'
outputtracesPath = f'./traces/{ws}'
os.mkdir(outputtracesPath)
test_df = pd.read_parquet('dataset/androzooDone_meta.parquet')
test_df['label'] = (test_df.vt_detection == 0).apply(int)
test_df['filePath'] = '/ws/mnt/local/data/output/datasets/zoo/' + test_df.sha256
dataset_metaList = [10000, 20000, 50000, 100000]
for sizeMeta in dataset_metaList:
currentTag = str(sizeMeta)
message = '######## '
message += currentTag
with open(outputlogFilePath, 'a') as writer:
writer.write(message + '\n')
print(message)
#
dataset_df = test_df.sample(sizeMeta, random_state=54)
#
trainLoader, validLoader, testLoader = getDataloaders(dataset_df, test_df, trainPercentage=trainPercentageParam,
validPercentage=validPercentageParam)
#
models_df = trainModel(ws, f'train_{currentTag}', epochNum, trainLoader, validLoader, device)
models_df.sort_values(by=['vloss', 'tloss'], inplace=True)
selectedModelPaths = models_df.path.iloc[:ensembleSize].tolist()
#
evalresult_df = evaluate(ws, selectedModelPaths, testLoader, device)
#
evalDataset(ws, evalresult_df, probaUpperBorn = 0.8, probaLowerBorn = 0.2)
#
outputPath = f'traces/{ws}/{currentTag}.pickle'
currentResults = pd.DataFrame([(currentTag, models_df, evalresult_df)], columns=['TimeTag', 'models', 'evalResuls'])
currentResults.to_pickle(outputPath)
#
message = '########'
with open(outputlogFilePath, 'a') as writer:
writer.write(message + '\n')
print(message)
```
|
github_jupyter
|
# Testing `TFNoiseAwareModel`
We'll start by testing the `textRNN` model on a categorical problem from `tutorials/crowdsourcing`. In particular we'll test for (a) basic performance and (b) proper construction / re-construction of the TF computation graph both after (i) repeated notebook calls, and (ii) with `GridSearch` in particular.
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
os.environ['SNORKELDB'] = 'sqlite:///{0}{1}crowdsourcing.db'.format(os.getcwd(), os.sep)
from snorkel import SnorkelSession
session = SnorkelSession()
```
### Load candidates and training marginals
```
from snorkel.models import candidate_subclass
from snorkel.contrib.models.text import RawText
Tweet = candidate_subclass('Tweet', ['tweet'], cardinality=5)
train_tweets = session.query(Tweet).filter(Tweet.split == 0).order_by(Tweet.id).all()
len(train_tweets)
from snorkel.annotations import load_marginals
train_marginals = load_marginals(session, train_tweets, split=0)
train_marginals.shape
```
### Train `LogisticRegression`
```
# Simple unigram featurizer
def get_unigram_tweet_features(c):
for w in c.tweet.text.split():
yield w, 1
# Construct feature matrix
from snorkel.annotations import FeatureAnnotator
featurizer = FeatureAnnotator(f=get_unigram_tweet_features)
%time F_train = featurizer.apply(split=0)
F_train
%time F_test = featurizer.apply_existing(split=1)
F_test
from snorkel.learning.tensorflow import LogisticRegression
model = LogisticRegression(cardinality=Tweet.cardinality)
model.train(F_train.todense(), train_marginals)
```
### Train `SparseLogisticRegression`
Note: Testing doesn't currently work with `LogisticRegression` above, but no real reason to use that over this...
```
from snorkel.learning.tensorflow import SparseLogisticRegression
model = SparseLogisticRegression(cardinality=Tweet.cardinality)
model.train(F_train, train_marginals, n_epochs=50, print_freq=10)
import numpy as np
test_labels = np.load('crowdsourcing_test_labels.npy')
acc = model.score(F_test, test_labels)
print(acc)
assert acc > 0.6
# Test with batch size s.t. N % batch_size == 1...
model.score(F_test, test_labels, batch_size=9)
```
### Train basic LSTM
With dev set scoring during execution (note we use test set here to be simple)
```
from snorkel.learning.tensorflow import TextRNN
test_tweets = session.query(Tweet).filter(Tweet.split == 1).order_by(Tweet.id).all()
train_kwargs = {
'dim': 100,
'lr': 0.001,
'n_epochs': 25,
'dropout': 0.2,
'print_freq': 5
}
lstm = TextRNN(seed=123, cardinality=Tweet.cardinality)
lstm.train(train_tweets, train_marginals, X_dev=test_tweets, Y_dev=test_labels, **train_kwargs)
acc = lstm.score(test_tweets, test_labels)
print(acc)
assert acc > 0.60
# Test with batch size s.t. N % batch_size == 1...
lstm.score(test_tweets, test_labels, batch_size=9)
```
### Run `GridSearch`
```
from snorkel.learning.utils import GridSearch
# Searching over learning rate
param_ranges = {'lr': [1e-3, 1e-4], 'dim': [50, 100]}
model_class_params = {'seed' : 123, 'cardinality': Tweet.cardinality}
model_hyperparams = {
'dim': 100,
'n_epochs': 20,
'dropout': 0.1,
'print_freq': 10
}
searcher = GridSearch(TextRNN, param_ranges, train_tweets, train_marginals,
model_class_params=model_class_params,
model_hyperparams=model_hyperparams)
# Use test set here (just for testing)
lstm, run_stats = searcher.fit(test_tweets, test_labels)
acc = lstm.score(test_tweets, test_labels)
print(acc)
assert acc > 0.60
```
### Reload saved model outside of `GridSearch`
```
lstm = TextRNN(seed=123, cardinality=Tweet.cardinality)
lstm.load('TextRNN_best', save_dir='checkpoints/grid_search')
acc = lstm.score(test_tweets, test_labels)
print(acc)
assert acc > 0.60
```
### Reload a model with different structure
```
lstm.load('TextRNN_0', save_dir='checkpoints/grid_search')
acc = lstm.score(test_tweets, test_labels)
print(acc)
assert acc < 0.60
```
# Testing `GenerativeModel`
### Testing `GridSearch` on crowdsourcing data
```
from snorkel.annotations import load_label_matrix
import numpy as np
L_train = load_label_matrix(session, split=0)
train_labels = np.load('crowdsourcing_train_labels.npy')
from snorkel.learning import GenerativeModel
# Searching over learning rate
searcher = GridSearch(GenerativeModel, {'epochs': [0, 10, 30]}, L_train)
# Use training set labels here (just for testing)
gen_model, run_stats = searcher.fit(L_train, train_labels)
acc = gen_model.score(L_train, train_labels)
print(acc)
assert acc > 0.97
```
|
github_jupyter
|
# Analyze population data from https://covidtracking.com
**Note:** This is a Jupyter notebook which is also available as its executable export as a Python 3 script (therefore with automatically generated comments).
### Sept 29,2021: Obsolete data
Our source https://covidtracking.com/data/api says:
- `As of March 7, 2021 we are no longer collecting new data. Learn about available federal data at https://covidtracking.com/analysis-updates/federal-covid-data-101-how-to-find-data.`
- https://covidtracking.com/analysis-updates/simple-covid-data
- https://covidtracking.com/about-data/data-summary
- https://covidtracking.com/about-data/federal-resources
**The following loads and analyses data up to March 7, 2021.**
# Libraries
```
import sys,os
addPath= [os.path.abspath("../venv/lib/python3.9/site-packages/"),
os.path.abspath("../source")]
addPath.extend(sys.path)
sys.path = addPath
# Sys import
import sys, os, re
# Common imports
import math
import numpy as NP
import numpy.random as RAND
import scipy.stats as STATS
from scipy import sparse
from scipy import linalg
# Better formatting functions
from IPython.display import display, HTML
from IPython import get_ipython
import matplotlib as MPL
import matplotlib.pyplot as PLT
import seaborn as SNS
SNS.set(font_scale=1)
# Python programming
from itertools import cycle
from time import time
import datetime
# Using pandas
import pandas as PAN
import xlrd
sys.path.append('/home/alain/test/MachLearn/COVID/source')
import libApp.appUSA as appUSA
import warnings
warnings.filterwarnings('ignore')
print("For now, reduce python warnings, I will look into this later")
```
### Import my own modules
The next cell attempts to give user some information if things improperly setup.
Intended to work both in Jupyter and when executing the Python file directly.
```
if not get_ipython() is None and os.path.abspath("../source/") not in sys.path:
sys.path.append(os.path.abspath("../source/"))
try:
from lib.utilities import *
from lib.figureHelpers import *
from lib.DataMgrJSON import *
from lib.DataMgr import *
from lib.pandaUtils import *
except Exception as err:
print("Could not find library 'lib' with contents 'DataGouvFr' ")
if get_ipython() is None:
print("Check the PYTHONPATH environment variable which should point to 'source' wich contains 'lib'")
else:
print("You are supposed to be running in JupySessions, and '../source/lib' should exist")
raise err
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
```
## Check environment
It is expected that:
- your working directory is named `JupySessions`,
- that it has subdirectories
- `images/*` where generated images may be stored to avoid overcrowding.
- At the same level as your working dir there should be directories
- `../data` for storing input data and
- `../source` for python scripts.
My package library is in `../source/lib`, and users running under Python (not in Jupyter) should
set their PYTHONPATH to include "../source" ( *or whatever appropriate* ).
```
checkSetup(chap="Chap04")
ImgMgr = ImageMgr(chapdir="Chap04")
```
# Load Data
## Functions
## Load CSV and XLSX data from remote
The `dataFileVMgr` will manage a cache of data files in `../dataUSCovidTrack`.
We check what is in the cache/data directory; for each file, we identify the latest version,
and list this below to make sure. Files of interest are documented in `.filespecs.json`
Consulted: https://github.com/COVID19Tracking/covid-tracking-api
Downloaded: see `.filespecs.json`
```
dataFileVMgr = manageAndCacheFilesJSONHandwritten("../dataUSCovidTrack")
dataFileVMgr.getRemoteInfo()
dataFileVMgr.updatePrepare()
dataFileVMgr.cacheUpdate()
print("Most recent versions of files in data directory:")
for f in dataFileVMgr.listMostRecent() :
print(f"\t{f}")
last = lambda x: dataFileVMgr.getRecentVersion(x,default=True)
```
This ensures we load the most recent version, so that it is not required to update the list
below. The timestamps shown in the following sequence will be update by the call to `getRecentVersion`.
```
USStatesDailyCSV = last('CTStatesDaily.csv' )
USStatesInfoCSV = last('CTStatesInfo.csv')
USDailyCSV = last('CTUSDaily.csv')
USAPopChangeCSV = last('USACensusPopchange.csv')
USAPopChangeRankCSV = last('USACensusPopchangeRanks.csv')
```
Now load the stuff
```
ad = lambda x: "../dataUSCovidTrack/"+x
data_USStatesDaily = read_csvPandas(ad(USStatesDailyCSV) , error_bad_lines=False, sep="," )
data_USStatesInfo = read_csvPandas(ad(USStatesInfoCSV), error_bad_lines=False, sep="," )
data_USDaily = read_csvPandas(ad(USDailyCSV), error_bad_lines=False, sep="," )
data_USAPopChange = read_csvPandas(ad(USAPopChangeCSV) , error_bad_lines=False, sep="," )
data_USAPopChangeRank = read_csvPandas(ad(USAPopChangeRankCSV), error_bad_lines=False, sep="," )
```
Show the shape of the loaded data:
```
def showBasics(data,dataName):
print(f"{dataName:24}\thas shape {data.shape}")
dataListDescr = ( (data_USStatesDaily, "data_USStatesDaily"),
(data_USStatesInfo, "data_USStatesInfo"),
(data_USDaily , "data_USDaily"),
(data_USAPopChange, "data_USAPopChange"),
(data_USAPopChangeRank, "data_USAPopChangeRank"),
)
for (dat,name) in dataListDescr:
showBasics(dat,name)
for (dat,name) in dataListDescr:
if name[0:5]=="meta_": continue
print(f"\nDescription of data in '{name}'\n")
display(dat.describe().transpose())
for (dat,name) in dataListDescr:
if name[0:5]=="meta_": continue
print(f"\nInformation about '{name}'\n")
dat.info()
```
### Get demographics information
The metadata is in `../dataUSCovidTrack/*.pdf`. We need to preprocess the demographics information for ease of use below. Notice that column `STATE` features state's **FIPS codes**.
```
demogrCols=("SUMLEV","STATE","NAME","POPESTIMATE2019" )
demogrX = data_USAPopChange.loc[:,demogrCols]
demogrX["SUMLEV"]== 40
demogr = demogrX[demogrX["SUMLEV"]== 40 ].copy()
dtCols = ('date','fips', 'state',
'positive', 'negative',
'hospitalizedCurrently', 'hospitalizedCumulative',
'inIcuCurrently', 'inIcuCumulative',
'onVentilatorCurrently', 'onVentilatorCumulative',
'recovered','death', 'hospitalized'
)
dt = data_USStatesDaily.loc[ :, dtCols].copy()
dt["dateNum"] = PAN.to_datetime(dt.loc[:,"date"], format="%Y%m%d")
dateStart = dt["dateNum"].min()
dateEnd = dt["dateNum"].max()
dateSpan = dateEnd - dateStart
print(f"Our statistics span {dateSpan.days+1} days, start: {dateStart} and end {dateEnd}")
dt["elapsedDays"] = (dt["dateNum"] - dateStart).dt.days
dt = dt.set_index("state")
dtg = dt.groupby("state")
#dtx = dt[dt.index == "Europe"]
#dtg = dtx.groupby("countriesAndTerritories")
```
Now, the figure making process is generalized into this class, since we plan to emit multiple figures.
First attempt, just get the first!
```
plotCols=("recovered","death","hospitalized")
psFig = appUSA.perStateFigure(dateStart)
psFig.getDemographics(data_USAPopChange)
psFig.initPainter(subnodeSpec=15, maxCol=3)
psFig.mkImage(dtg,plotCols)
ImgMgr.save_fig("FIG001")
print(f"Had issues with state encodings:{psFig.abbrevIssueList}")
```
## Now select States according to multiple criteria
### Start with most populated states
```
tble = psFig.getPopStateTble(dtg)
mostPopulated = tble.sort_values(by=["pop"], ascending=False,).iloc[:15,0].values
psFig2 = appUSA.perStateSelected(dateStart,mostPopulated)
psFig2.getDemographics(data_USAPopChange)
psFig2.initPainter(subnodeSpec=15, maxCol=3)
psFig2.mkImage(dtg,plotCols)
ImgMgr.save_fig("FIG002")
print(f"Had issues with state encodings:{psFig2.abbrevIssueList}")
dtgMax = dtg.max().loc[:,["fips","death","recovered","hospitalized"]]
dtgMerged = PAN.merge(dtgMax.reset_index(), demogr, left_on="fips", right_on="STATE")
dtgMerged["deathPM"]= dtgMerged.loc[:,"death"]/dtgMerged.loc[:,"POPESTIMATE2019"]*1.0e6
mostDeadly = dtgMerged.sort_values(by=["deathPM"], ascending=False,).iloc[:15,0].values
psFig3 = appUSA.perStateSelected(dateStart,mostDeadly)
psFig3.getDemographics(data_USAPopChange)
psFig3.initPainter(subnodeSpec=15, maxCol=3)
psFig3.mkImage(dtg,plotCols)
ImgMgr.save_fig("FIG003")
print(f"Had issues with state encodings:{psFig3.abbrevIssueList}")
```
|
github_jupyter
|
```
from scripts.setup_libs import *
```
# [CatBoost](https://github.com/catboost/catboost)
Бустинг от Яндекса для категориальных фичей и много чего еще.
Для начала настоятельно рекомендуется посмотреть видео. Там идет основная теория по CatBoost
```
from IPython.display import YouTubeVideo
YouTubeVideo('UYDwhuyWYSo', width=640, height=360)
```
Резюмируя видео:
Catboost строится на **Obvious Decision Tree** (ODT полное бинарное дерево) - это значит, что на каждом уровне дерева во всех вершинах идет разбиение по одному и тому же признаку. Дерево полное и симметричное. Листов - $2^H$, где $H$ - высота дерева и количество используемых фич.
В Catboost куча фичей для скорости и регуляризации.
Регуляризация (стараемся делать как можно более разные деревья):
* Чтобы базовое дерево было небольшое, обычно берется какая-то часть фич (max_features) например $0.1$ от общего числа. В силу большого количества деревьев в композиции, информация не потеряется.
* При построении дерева можно использовать **бутстреп для выборки**.
* При слитинге в дереве к скору можно добавлять случайную величину.
Скорость:
* Так как мы еще до обучения знаем схему дерева (потому что ODT) - мы знаем количество листьев. Количество разных значений будет равно количеству листьев, поэтому на шаге обучения базового дерева давайте приближать не **полный вектор антиградиентов** (который размера количества фич), а **вектор листов**. В этом случае сильно сокращается время выбора наилучшего сплита на каждом этапе обучения базового дерева.
* Бинаризация численных данных, для ускорения нахождения наилучшего сплита. Слабая - равномерная или медианная. Хорошие **MaxLogSum**, **GreedyLogSum**
* На верхних вершинах дерева делаем только один градиентный шаг, на нижних можно несколько.
* **Ordered boosting**
# [Примеры](https://catboost.ai/docs/concepts/python-usages-examples.html#custom-objective-function) работы с CatBoost
Еще одно очень полезное видео, но теперь уже с практикой.
```
from IPython.display import YouTubeVideo
YouTubeVideo('xl1fwCza9C8', width=640, height=360)
```
## Простой пример
```
train_data = [[1, 4, 5, 6],
[4, 5, 6, 7],
[30, 40, 50, 60]]
eval_data = [[2, 4, 6, 8],
[1, 4, 50, 60]]
train_labels = [10, 20, 30]
# Initialize CatBoostRegressor
model = CatBoostRegressor(iterations=2,
learning_rate=1,
depth=2)
# Fit model
model.fit(train_data, train_labels)
# Get predictions
preds = model.predict(eval_data)
```
## Визуализация
```
rng = np.random.RandomState(31337)
boston = load_boston()
y = boston['target']
X = boston['data']
kf = KFold(n_splits=3, shuffle=True, random_state=rng)
X_train, X_rest, y_train, y_rest = train_test_split(X, y, test_size=0.25)
X_val, X_test, y_val, y_test = train_test_split(X_rest, y_rest, test_size=0.5)
cb = CatBoostRegressor(silent=True, eval_metric="MAE", custom_metric=["MAPE"])
```
Тут включена крутая визуализация, с которой можно поиграться, она не работает в Jupyter Lab, но работает в Jupyter Notebook
```
cb.fit(X_train, y_train, eval_set=[(X_val , y_val ), (X_test, y_test)], plot=True)
```
## Бинаризации float
Выбрать стратегию бинаризации можно установив параметр *feature_border_type*.
- **Uniform**. Границы выбираются равномерно по значениям;
- **Median**. В каждый бин попадает примерно одинаковое число различных значений;
- **UniformAndQuantiles**. Uniform + Median;
- **MaxLogSum, GreedyLogSum**. Максимизируется значение формулы $\sum_{i=1}^K \log(n_i)$, где $K$ - требуемое кол-во бинов, $n_i$ число объектов в этом бакете;
- **MinEntropy**. Аналогично, но максимизируется энтропия: $-\sum_{i=1}^K n_i \log(n_i)$
```
from sklearn.model_selection import GridSearchCV
params = {"feature_border_type": [
"Uniform",
"Median",
"UniformAndQuantiles",
"MaxLogSum",
"GreedyLogSum",
"MinEntropy"
]}
cb = CatBoostRegressor(silent=True)
grid = GridSearchCV(cb, params)
grid.fit(X, y)
for score, strategy in sorted(zip(grid.cv_results_['mean_test_score'],
grid.cv_results_['param_feature_border_type'].data)):
print("MSE: {}, strategy: {}".format(score, strategy))
```
## Feature importance
```
cb = CatBoostRegressor(silent=True)
cb.fit(X_train, y_train)
for value, name in sorted(zip(cb.get_feature_importance(fstr_type="FeatureImportance"),
boston["feature_names"])):
print("{}\t{}".format(name, value))
```
# Categorical features
```
from catboost.datasets import titanic
titanic_df = titanic()
X = titanic_df[0].drop('Survived',axis=1)
y = titanic_df[0].Survived
X.head(5)
is_cat = (X.dtypes != float)
is_cat.to_dict()
is_cat = (X.dtypes != float)
for feature, feat_is_cat in is_cat.to_dict().items():
if feat_is_cat:
X[feature].fillna("NAN", inplace=True)
cat_features_index = np.where(is_cat)[0]
cat_features_index
X.columns
```
Аналогом для класса DMatrix в катбусте служит класс **catboost.Pool**. Помимо прочего, содержит индексы категориальных факторов и описание пар для режима попарного обучения.
[Подробнее](https://tech.yandex.com/catboost/doc/dg/concepts/python-reference_pool-docpage/)
```
from catboost import Pool
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.85, random_state=1234)
train_pool = Pool(data=X_train,
label=y_train,
cat_features=cat_features_index, # в явном виде передаем категориальные фичи, которыми хотим работать
feature_names=list(X_train.columns)) # названия фич, для удобной визуализации и дебага
test_pool = Pool(data=X_test,
label=y_test,
cat_features=cat_features_index,
feature_names=list(X_test.columns))
from catboost import CatBoostClassifier
from sklearn.metrics import roc_auc_score
model = CatBoostClassifier(eval_metric='Accuracy', use_best_model=True, random_seed=42)
model.fit(train_pool, eval_set=test_pool, metric_period=100)
y_pred = model.predict_proba(X_test)
roc_auc_score(y_test, y_pred[:, 1])
```
На самом деле в Catboost происходит еще много чего интересного при обработке категорий:
- среднее сглаживается некоторым априорным приближением;
- по факту обучается несколько (3) модели на разных перестановках;
- рассматриваются композиции категориальных факторов (max_ctr_complexity);
- в момент применения модели, новые объекты приписываются в конец перестановки по обучающей выборке и, таким образом, статистика для них считается по всем имеющимся данным;
- таргето-независимые счетчики считаются по всем данным.
- для факторов с небольшим числом различных значений производится OneHotEncoding (параметр one_hot_max_size - максимальное значение для OneHotEncoding'а)
# [Категориальные статистики](https://catboost.ai/docs/concepts/algorithm-main-stages_cat-to-numberic.html)
Одно из основных преимуществ катбуста - обработка категориальных факторов.
Такие факторы заменяются на "счетчики": для каждого значения кат.фактора **по таргету** вычисляется некоторая **статистика** этого значения (счетчик, ctr), например, среднее значение таргета по объектам, которые имеют данное значение категориального фактора. Далее категориальный фактор заменяется на подсчитанные для него статистики (каждое значение фактора на свою статистику).
Будем использовать технику кодирования категориальных признаков средним значением целевого признака.
Основная идея – для каждого значения категориального признака посчитать среднее значение целевого признака и заменить категориальный признак на посчитанные средние.
Давайте попробуем сделать следующую операцию:
* Возьмем категориальную фичу (один столбец). Пусть фича принимает $m$ значений: $l_1, \ldots, l_m$
* Заменим значение $l_k$ на $\frac{1}{N_{l_k}}\sum_{i \in l_k}y_i$ - среднее значение целевой переменной для данного значения категориальной фичи.
* Переменной в тесте будут приравниваться все средние значение данных
```
df_train = pd.DataFrame({'float':[1,2,3,4,5],
'animal': ['cat', 'dog', 'cat', 'dog', 'cat'],
'sign': ['rock', 'rock', 'paper', 'paper', 'paper']})
y_train = np.array([0,1,0,1, 0])
df_test = pd.DataFrame({'float':[6,7,8,9],
'animal': ['cat', 'dog', 'cat', 'dog'],
'sign': ['rock', 'rock', 'paper', 'paper']})
import warnings
warnings.filterwarnings("ignore")
def mean_target(df_train, y_train, df_test):
n = len(df_train)
cat_features = df_train.columns[df_train.dtypes == 'object'].tolist()
float_features = df_train.columns[df_train.dtypes != 'object'].tolist()
new_X_train = df_train.copy()
new_X_train['y'] = y_train
new_X_test = df_test.copy()
for col in cat_features:
mean_dict = new_X_train.groupby(col)['y'].mean().to_dict()
new_X_train[col + '_mean'] = df_train[col].map(mean_dict)
new_X_test[col + '_mean'] = df_test[col].map(mean_dict)
return new_X_train, new_X_test
X_train, X_test = mean_target(df_train, y_train, df_test)
X_train
X_test
```
Данный подход лучше чем One-Hot, так как при нем мы можем серьезно вылететь за пределы памяти.
#### Важный момент.
В ходе подсчета статистики мы по сути сильно привязываемся к данным. Из-за чего может произойти сильное **переобучение**.
## Накопительные статистики
Такие манипуляции очень легко могут привести к переобучению, потому что в данные подливается информация о метках объектов, после чего происходит обучение.
Поэтому в катбусте делают **накопительные статистики**
Особенности работы с категориальными факторами:
- объекты перемешиваются в случайном порядке;
- для i-го объекта и j-го признака в перестановке **статистика** (счетчик) вычисляется по всем объектам, идущим **до него** с таким же значением признака
- заменяем все категориальные факторы в выборке и обучаем модель
- Тестовую же выборку просто приравниваем к средним значениям по
```
def late_mean_target(df_train, df_test, y_train):
n = len(df_train)
cat_features = df_train.columns[df_train.dtypes == 'object'].tolist()
num_features = df_train.columns[df_train.dtypes != 'object'].tolist()
new_X_test = df_test.copy()
new_X_train = df_train.copy()
new_X_train['y'] = y_train
new_X_train = new_X_train.sample(frac=1).reset_index() #shuffling
new_X_train['ones'] = np.ones((len(X_train),))
for col in cat_features:
mean_dict = new_X_train.groupby(col)['y'].mean().to_dict()
new_X_test[col + '_mean'] = df_test[col].map(mean_dict) / n
count = new_X_train.groupby([col])['ones'].apply(lambda x: x.cumsum())
cum = new_X_train.groupby([col])['y'].apply(lambda x: x.cumsum())
new_X_train[col + '_mean'] = (cum - new_X_train['y'])/count
return new_X_train, new_X_test
df_train = pd.DataFrame({'float':[1,2,3,4,5],
'animal': ['cat', 'dog', 'cat', 'dog', 'cat'],
'sign': ['rock', 'rock', 'paper', 'paper', 'paper']})
y_train = np.array([0,1,0,1, 0])
df_test = pd.DataFrame({'float':[6,7,8,9],
'animal': ['cat', 'dog', 'cat', 'dog'],
'sign': ['rock', 'rock', 'paper', 'paper']})
X_train, X_test = late_mean_target(df_train, df_test, y_train)
X_train
X_test
```
# Полезные ссылки
* [Tutorial](https://github.com/catboost/tutorials)
* [Github Catboost](https://github.com/catboost/catboost)
* [Статья о Catboost на arxiv](https://arxiv.org/pdf/1706.09516.pdf)
|
github_jupyter
|
### Evaluación 1. Parte Computacional (60 puntos)
#### (Elementos de Probabilidad y Estadística: 3008450)
Se tiene información acerca de 694 propiedades ubicadas en el valle de aburra. La base de datos fue recolectada en el año 2015, e incluye las siguientes variables:
1. valor comercial de la propiedad en millones de pesos `precio`
2. el área de la propiedad en metros cuadrados `mt2`
3. el sector donde está ubicada la propiedad `ubicacion`
4. el estrato socieconómico al que pertenece `estrato`
5. el número de alcobas `alcobas`
6. el número de baños `banos`
7. si tiene o no balcón `balcon`
8. si tiene o no parqueadero `parqueadero`
9. el valor de la administración en millones de pesos `administracion`
10. el avalúo catastral en millones de pesos `avaluo` y si la propiedad tiene o no mejoras desde que fue entregado como nuevo `terminado`.
La base de datos está disponible en: [**https://tinyurl.com/hwhb769**](https://tinyurl.com/hwhb769)
Con estos datos resuelva los siguientes puntos:
1. Recategorice la variable estrato así: `medio-bajo` para los estratos 2 y 3, `medio-alto` para los estratos 4 y 5, y `alto` para el estrato 6.
2. Realice un análisis descriptivo de las variables de la base de datos y resalte las características más destacadas de cada una de ellas. Utilice todos los gráficos y resúmenes que considere pertinentes.
3. Elabore una tabla de doble entrada donde relacione si la propiedad tiene o no mejoras con la nueva categorización de la variable estrato. ¿En cuál de los nuevos estratos se presenta la mayor proporción de propiedades que tienen mejoras? Explique.
4. Elabore un histograma de frecuencias relativas para la variable precio. ¿Qué observa? Comente
5. ¿Es muy diferente el comportamiento de los precios de las propiedades de acuerdo a la ubicación? Comente. Elabore los gráficos y/o resúmenes que considere pertinentes.
6. Grafique un diagrama de dispersión de las variables avalúo y precio. ¿Qué observa? ¿Qué pasa si los puntos graficados se separan por color de acuerdo a la nueva categorización del estrato?. Explique.
7. Calcule el coeficiente de correlación de Pearson entre precio y avalúo. ¿Qué se puede decir sobre este valor?
8. Realice un análisis descriptivo adicional que considere pertinente sobre una o varias de las variables restantes de la base de datos.
```
# importar librerias
import pandas as pd
from bokeh.server.server import Server
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
from bokeh.io import output_notebook, push_notebook, show
from bokeh.layouts import column, row, gridplot
from bokeh.models import ColumnDataSource, DataTable, TableColumn, NumberFormatter, Select, MultiSelect, BoxSelectTool, LassoSelectTool, CategoricalColorMapper
from bokeh.plotting import figure
import numpy as np
import matplotlib.pyplot as plt
from bokeh.palettes import Spectral6
from bokeh.transform import factor_cmap
output_notebook()
df = pd.read_csv('https://raw.githubusercontent.com/fhernanb/datos/master/aptos2015',sep='\s+', header=4)
df['estrato'] = df.estrato.apply(lambda x: 'medio-bajo' if x < 3.1 else('medio-alto' if x < 5.1 else 'alto'))
```
Recategorice la variable estrato así: medio-bajo para los estratos 2 y 3, medio-alto para los estratos 4 y 5, y alto para el estrato 6. <br>
Realice un análisis descriptivo de las variables de la base de datos y resalte las características más destacadas de cada una de ellas. Utilice todos los gráficos y resúmenes que considere pertinentes.
```
def modify_doc(doc):
def update():
current = df[(df.estrato.isin([str(x) for x in estrato.value])) & (df.ubicacion.isin([str(x) for x in ubicacion.value])) &
(df.alcobas.isin([int(x) for x in alcobas.value])) & (df.banos.isin([int(x) for x in banos.value])) &
(df.parqueadero.isin([str(x) for x in parqueadero.value])) & (df.terminado.isin([str(x) for x in terminado.value]))]
source.data = current
source_static.data = current.describe()[['precio', 'mt2', 'alcobas', 'banos', 'administracion', 'avaluo']].reset_index()
corr.xaxis.axis_label = ticker1.value
corr.yaxis.axis_label = ticker2.value
plot_source.data = {'x': current[ticker1.value], 'y': current[ticker2.value], 'legend': [str(x) for x in current[ticker4.value]],
'color': current[ticker4.value].replace(dict(zip(current[ticker4.value].unique(),Spectral6)))}
hist_1, hedges_1 = np.histogram(current[ticker1.value], density=False, bins=ticker3.value)
ph.quad(bottom=0, left=hedges_1[:-1], right=hedges_1[1:], top=hist_1)
hist_2, hedges_2 = np.histogram(current[ticker2.value], density=False, bins=ticker3.value)
pv.quad(bottom=hedges_2[:-1], left=0, right=hist_2, top=hedges_2[1:])
def ticker_change(attr, old, new):
update()
# Create Input controls
estrato = MultiSelect(title="estrato", value=[str(x) for x in df.estrato.unique()], options=[str(x) for x in df.estrato.unique()])
estrato.on_change('value',ticker_change)
ubicacion = MultiSelect(title="ubicacion", value=[str(x) for x in df.ubicacion.unique()], options=[str(x) for x in df.ubicacion.unique()])
ubicacion.on_change('value',ticker_change)
alcobas = MultiSelect(title="# de alcobas", value=[str(x) for x in df.alcobas.unique()], options=[str(x) for x in df.alcobas.unique()])
alcobas.on_change('value',ticker_change)
banos = MultiSelect(title="# de baños", value=[str(x) for x in df.banos.unique()], options=[str(x) for x in df.banos.unique()])
banos.on_change('value',ticker_change)
parqueadero = MultiSelect(title="parqueadero", value=[str(x) for x in df.parqueadero.unique()], options=[str(x) for x in df.parqueadero.unique()])
parqueadero.on_change('value',ticker_change)
terminado = MultiSelect(title="terminado", value=[str(x) for x in df.terminado.unique()], options=[str(x) for x in df.terminado.unique()])
terminado.on_change('value',ticker_change)
# create table view
source = ColumnDataSource(data=dict(mt2=[], ubicacion=[], estrato=[], alcobas=[], banos=[], balcon=[], parqueadero=[], administracion=[],
avaluo=[], terminado=[]))
source_static = ColumnDataSource(data=dict())
columns = [TableColumn(field="mt2", title="tamaño", formatter=NumberFormatter(format="0,0.00")),
TableColumn(field="ubicacion", title="ubicacion"),
TableColumn(field="estrato", title="estrato"),
TableColumn(field="alcobas", title="alcobas"),
TableColumn(field="banos", title="baños"),
TableColumn(field="balcon", title="balcon"),
TableColumn(field="parqueadero", title="parqueadero"),
TableColumn(field="administracion", title="administracion", formatter=NumberFormatter(format="$0.0")),
TableColumn(field="avaluo", title="avaluo", formatter=NumberFormatter(format="$0.0")),
TableColumn(field="terminado", title="terminado")]
columns_static = [TableColumn(field="metrica", title="metrica"),
TableColumn(field="precio", title="precio"),
TableColumn(field="mt2", title="tamaño"),
TableColumn(field="alcobas", title="alcobas"),
TableColumn(field="banos", title="baños"),
TableColumn(field="administracion", title="administracion"),
TableColumn(field="avaluo", title="avaluo")]
data_table = DataTable(source=source, columns=columns)
static_table = DataTable(source=source_static, columns=columns_static)
# plot
ticker_options = ['precio', 'mt2', 'administracion', 'avaluo']
ticker1 = Select(value='precio', options=ticker_options)
ticker2 = Select(value='mt2', options=ticker_options)
ticker3 = Select(value='auto', options=['auto', 'Freedman Diaconis Estimator', 'scott', 'rice', 'sturges', 'doane', 'sqrt'])
ticker4 = Select(value='estrato', options=['estrato', 'alcobas', 'ubicacion', 'banos', 'parqueadero', 'terminado'])
#plot_source = ColumnDataSource(data=dict(x=[], y=[], color=[]))
plot_source = ColumnDataSource(data=dict(x=[], y=[], color=[], legend=[]))
ticker1.on_change('value', ticker_change)
ticker2.on_change('value', ticker_change)
ticker3.on_change('value', ticker_change)
ticker4.on_change('value', ticker_change)
tools = "pan,wheel_zoom,box_select,lasso_select,reset,box_zoom,reset"
corr = figure(plot_width=350, plot_height=350, tools=tools)
corr.circle(x='x', y='y', size=2, source=plot_source, color='color', legend='legend')
ph = figure(toolbar_location=None, plot_width=corr.plot_width, plot_height=200, x_range=corr.x_range, min_border=10, min_border_left=50, y_axis_location="right")
pv = figure(toolbar_location=None, plot_width=200, plot_height=corr.plot_height, y_range=corr.y_range, min_border=10, y_axis_location="right")
corr.select(BoxSelectTool).select_every_mousemove = False
corr.select(LassoSelectTool).select_every_mousemove = False
ph.xgrid.grid_line_color = None
ph.yaxis.major_label_orientation = np.pi/4
pv.ygrid.grid_line_color = None
pv.xaxis.major_label_orientation = np.pi/4
# grid
widgets = column(estrato, ubicacion, alcobas, banos, parqueadero, terminado)
data_panel = column(data_table, static_table)
plots_panel = gridplot([[corr, pv], [ph, None]], merge_tools=False)
top_layout = row(widgets, data_panel)
down_layout = row(column(ticker1, ticker2, ticker3, ticker4), plots_panel)
layout = column(top_layout, down_layout)
update()
doc.add_root(layout)
handler = FunctionHandler(modify_doc)
app = Application(handler)
show(app, notebook_url="localhost:8888")
```
Elabore una tabla de doble entrada donde relacione si la propiedad tiene o no mejoras con la nueva categorización de la variable estrato. ¿En cuál de los nuevos estratos se presenta la mayor proporción de propiedades que tienen mejoras? Explique.
```
frec_clase_peso = pd.pivot_table(df, columns='terminado', index='estrato', values='administracion', aggfunc='count')
frec_rel_clase_peso = frec_clase_peso/frec_clase_peso.sum().sum()
pd.concat([frec_clase_peso, frec_rel_clase_peso], axis=1)
frec_rel_clase_peso.plot(kind='bar')
plt.show()
```
el estrarto alto es el que mas modificaciones ha hecho a sus vivienda esto se debe a su poder adqusitivo puede gastar en comodities
Elabore un histograma de frecuencias relativas para la variable precio. ¿Qué observa? Comente
```
hist, hedges = np.histogram(df.precio, density=True, bins='sturges')
plot = figure(plot_width=350, plot_height=350)
plot.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hist)
show(plot, notebook_url="localhost:8888")
```
Right-Skewed <br>
Mode < Median < Mean
5. ¿Es muy diferente el comportamiento de los precios de las propiedades de acuerdo a la ubicación? Comente. Elabore los gráficos y/o resúmenes que considere pertinentes.
6. Grafique un diagrama de dispersión de las variables avalúo y precio. ¿Qué observa? ¿Qué pasa si los puntos graficados se separan por color de acuerdo a la nueva categorización del estrato?. Explique.
7. Calcule el coeficiente de correlación de Pearson entre precio y avalúo. ¿Qué se puede decir sobre este valor?
8. Realice un análisis descriptivo adicional que considere pertinente sobre una o varias de las variables restantes de la base de datos.
* para analizar todos estos puntos se puede usar los resultados dinamicos del punto 2 donde esta un dashboard se obsevra una rango de precio muy uniforme en todas exeptuando en el sectro del poblado. me atreveria a tratar el resto de las zonas para el analisis de precio como un sola componente
* si se separa por colores ya que existe una correlacion entre las variables donde el precio es directamente porporcional al avaluo y al estrato.
* existe una alta correlacion entre precio y avaluo de 0.79 pearson, 0.73 kendall y 0.89 spearman. son directamente proporcionales estas variable
```
print('pearson')
df.corr(method='pearson')
print('kendall')
df.corr(method='kendall')
print('spearman')
df.corr(method='spearman')
```
|
github_jupyter
|
# Sorting Madonna Songs
This project will randomly order the entire backlog of Madonna's songs. This was motivated following a colleague's offhand remark about one's favourite song being *Material Girl* by Madge, which triggered another colleague to provide the challenge of ranking all of Madonna's songs.
In particular, there are two key stages to this random ordering:
1. Assign a random, distinct integer number next to each song to act as its preference ranking
1. Employ a sorting algorithm to sort this list via the preference ranking column just created
Will try a variety of sorting algorithms detailed below
- **Quicksort**
- **Bubble sort**
- **Breadth-first search**
- **Heapsort**
- **Insertion sort**
- **Shell sort**
## Set-up
Need to load in the relevant packages, set-up the right environemnt, and import the dataset.
```
# Export our environment, "NewEnv" and save it as "anomaly-detection.yml"
!conda env export -n NewEnv -f environment_anaconda.yml
# Check working directory to ensure user notebook is easily transferable
import os
os.getcwd()
# Import required libraries
import numpy as np
import pandas as pd
import xlrd
import csv
```
### Convert to CSV
Do not have Excel installed so cannot convert it via that. Instead, get round it via the *xlrd* and *csv* packages.
Note, could directly read in Excel file and play with that. However, learn less that way!
Code for function was taken from this [link](https://stackoverflow.com/questions/9884353/xls-to-csv-converter). However, first encountered an issue on using subfolders. This was resolved in this [link](https://stackoverflow.com/questions/7165749/open-file-in-a-relative-location-in-python). Then encountered an issue concerning the reading of entries as `bytes` instead of `str` which was resolved in this [link](https://stackoverflow.com/questions/33054527/typeerror-a-bytes-like-object-is-required-not-str-when-writing-to-a-file-in).
```
def csv_from_excel(file_input, file_output, sheet_index):
wb = xlrd.open_workbook(filename = file_input)
sh = wb.sheet_by_index(sheet_index)
file_csv = open(file = file_output, mode = 'wt')
wr = csv.writer(file_csv, quoting = csv.QUOTE_ALL)
for rownum in range(sh.nrows):
wr.writerow(sh.row_values(rownum))
file_csv.close()
# run function to output .csv file
csv_from_excel(file_input = 'data\songs_madonna.xlsx', file_output = 'data\songs_madonna.csv', sheet_index = 0)
```
## Data Wrangle
Load in our .csv file so that we can add distinct random numbers as a column which we will use to sort on.
Note: File is encoded as *ANSI* which is `mbcs` in the `pd.red_csv()`.
```
# import data
data_madge = pd.read_csv(filepath_or_buffer = 'data\songs_madonna.csv', encoding = 'mbcs')
# display data
data_madge.head()
```
In the code below, are following a naive method for creating a column of distinct random numbers. This will be in steps:
1. Store the number of rows in a variable.
1. Generate a random sample without replacement using the number of rows as our region of interest.
1. Bind this random sample onto our `data_madge` dataframe.
```
# import package for random-sampling
import random
# set random seed
seed_random = np.random.RandomState(123)
# 1. store number of rows in a variable
n_rows = len(data_madge.index)
# 2. generate random sample without replacement
# note: using try-catch logic to ensure we generate a sample
try:
sample_random = random.sample(population = range(0, n_rows), k = n_rows)
print('Random sample generated is of object type: ', type(sample_random))
except ValueError:
print('Sample size exceeded population size.')
# 3. bind random sample onto dataframe
data_madge['Preference_Avision'] = sample_random
data_madge = data_madge[['Songs', 'Preference_Avision']]
# check new dataframe
data_madge.head(57)
```
### Specific preferences
Whilst broadly indifferent between the vast majority of Madge's discology, two songs stand out to the author:
- Material Girl
- La Isla Bonita
What one wants to do is thus ensure that these two songs have the highest two preference rankings, `0` and `1`.
```
# 1. find songs randomly classified as favourites
value_top_preferences_random = [0, 1]
data_top_random = data_madge[data_madge.Preference_Avision.isin(value_top_preferences_random)]
# 2. find 'Material Girl' and 'La Isla Bonita'
songs_top_own = ['Material Girl', 'La Isla Bonita']
data_top_own = data_madge[data_madge.Songs.isin(songs_top_own)]
# 3. rename columns so can distinguish them
data_top_random = data_top_random.rename(columns = {"Songs": "SongsRandom",
"Preference_Avision": "PreferenceRandom"})
data_top_random
data_top_own = data_top_own.rename(columns = {"Preference_Avision": "Preference"})
data_top_own
# 3. append dataframes together
# need to reset indices to do so
data_temp = pd.concat(objs = [data_top_own.reset_index(drop = True),
data_top_random.reset_index(drop = True)],
axis = 1)
data_temp
# 4.i select correct preference columns now to accurately map preferences
data_top_random = data_temp.loc[:, ['SongsRandom', 'Preference']]
data_top_own = data_temp.loc[:, ['Songs', 'PreferenceRandom']]
# 4.ii rename columns so we can append/union
data_top_random = data_top_random.rename(columns = {"SongsRandom": "Song"})
data_top_own = data_top_own.rename(columns = {"PreferenceRandom": "Preference",
"Songs": "Song"})
# 5. append/union two dataframes together
data_temp = pd.concat(objs = [data_top_random, data_top_own], ignore_index = True)
data_temp
# 6. bring back to original dataframe
data_preference = pd.merge(left = data_madge, right = data_temp,
left_on = 'Songs', right_on = 'Song',
how = 'left')
del(data_madge, data_temp, data_top_own, data_top_random, value_top_preferences_random, songs_top_own)
data_preference.head()
# 7. create a final preference column
data_preference['SongPreference'] = np.where(data_preference.Preference.isnull(),
data_preference['Preference_Avision'],
data_preference['Preference'])
data_preference = data_preference.loc[:, ['Songs', 'SongPreference']]
data_preference = data_preference.rename(columns = {'Songs': 'Song'})
data_preference.head(58)
```
## Data Export
Now that we have the dataframe in a suitable format and reflective of the author's preferences, can export it as a .csv file for use in Java when we will apply assorted sorting algorithms on it.
```
data_preference.to_csv(path_or_buf = 'output\output_preference.csv',
sep = ',', encoding =
)
```
|
github_jupyter
|
# Homework (16 pts) - Hypothesis Testing
```
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
```
1. You measure the duration of high frequency bursts of action potentials under two different experimental conditions (call them conditions A and B). Based on your measured data below, determine if the conditions affect the mean burst duration or if differences are just due to random fluctuations? See 1a-d below.
```
burstDurationsA_ms = np.array([180.38809356, 118.54316518, 47.36070342, 258.43152543,
157.58441772, 53.00241256, 97.87549106, 98.58339172,
3.82151168, 149.63437886, 78.36434292, 207.1499196 ,
249.99308288, 52.33575872, 177.16295745, 20.90902826,
355.53831638, 17.14676607, 194.82448255, 364.30099202,
10.46025411, 63.80995802, 186.96964679, 16.76391482,
66.04825185, 169.95991378, 174.85051452, 95.51534595,
164.81818483, 165.92316127, 21.99840476, 176.27450914,
367.20238806, 53.55081561, 18.54310649, 309.36915353,
34.8110391 , 170.70514854, 4.80755719, 185.70861565,
42.81031454, 77.63480453, 22.78673497, 27.15480627,
81.19289909, 7.5754338 , 143.53588895, 1.45355329,
56.93153072, 35.7227909 , 120.88947208, 268.68459917,
36.56451611, 335.29492244, 18.88246351, 698.21607381,
47.24456065, 68.47935918, 246.50352868, 39.17939247,
130.00962739, 12.63485608, 16.5060213 , 85.73872575,
30.34193446, 12.18596266, 133.13145381, 39.68448593,
227.5104642 , 274.45272375, 167.76767172, 23.93871685,
319.05649273, 6.3491122 , 35.14797547, 170.29631475,
33.54342976, 2.71282041, 134.5042415 , 42.498552 ,
144.87658813, 122.78633957, 46.58727698, 143.74260009,
27.95191179, 462.66535543, 187.17111074, 21.05730056,
27.92875799, 73.0405984 , 137.67114744, 25.51076087,
68.71066451, 188.46823412, 20.58525518, 18.06289499,
388.79209834, 9.42246312, 270.11609469, 20.51123798])
burstDurationsB_ms = np.array([ 19.1579061 , 103.28099491, 155.40048778, 54.00532297,
19.60552475, 38.33218511, 172.39377537, 100.60095889,
123.39067736, 32.30752807, 140.81577413, 10.03036383,
76.95250023, 111.4112118 , 106.77958145, 100.03741994,
54.40736747, 169.72641863, 170.51048794, 84.31738796,
32.48573515, 71.14968724, 18.07487628, 48.27775752,
249.00817236, 40.88078534, 149.55876359, 171.68318734,
64.7972247 , 179.67199065, 211.24354393, 49.54367304,
5.97816835, 270.82356699, 99.33133967, 14.35603709,
61.8917307 , 48.13722571, 65.23703418, 119.95425274,
64.3948595 , 57.40459219, 18.76680104, 37.37173184,
143.4622583 , 21.6463496 , 45.86107014, 3.98511098,
11.8424448 , 105.59224929, 71.49909777, 29.64941255,
117.62835465, 31.33284437, 124.17263642, 249.31437673,
92.15958114, 66.2842341 , 5.01333126, 18.53478564,
44.09316335, 119.8752612 , 52.31171617, 3.03888107,
109.94031571, 5.52411681, 43.88839751, 48.63036147,
22.71317076, 30.20052081, 32.10942778, 117.08796453,
53.83369891, 68.82006208, 92.29204674, 93.829404 ,
0.67985216, 10.42751195, 4.35827727, 127.21452508,
42.69414115, 34.9520911 , 20.16096766, 178.44190716,
43.04340469, 89.11997718, 163.48474361, 277.29716851,
17.08902205, 103.74782303, 49.29308393, 72.1459098 ,
11.4600829 , 4.09194418, 51.55511185, 91.81103802,
31.36955782, 23.24407568, 90.13594215, 69.37118937])
```
1a. (1 pt) State the null and alternative hypotheses.
H0: Conditions have no affect on mean burst durations.
Ha: Mean burst duration differs between conditions.
1b. (3 ps) Plot the burst distributions for conditions A and B overlaid with your best estimate for the probability density function that describes them.
```
distA = st.expon(loc=0, scale=burstDurationsA_ms.mean())
distB = st.expon(loc=0, scale=burstDurationsB_ms.mean())
plt.hist(burstDurationsA_ms, bins=20, density=True, alpha=0.25, label='A')
plt.hist(burstDurationsB_ms, bins=20, density=True, alpha=0.25, label='B')
dur_ms = np.linspace(0, 500, 100)
plt.plot(dur_ms, distA.pdf(dur_ms), label='dist A')
plt.plot(dur_ms, distB.pdf(dur_ms), label='dist B')
plt.xlabel('Burst Duration (ms)')
plt.ylabel('pdf')
plt.legend();
```
1c. (3 pts) Use a permutation test with 1000 permutations to test your null hypothesis. Compute the difference between mean burst durations for all 1000 permutations of the datasets.
```
nA = len(burstDurationsA_ms)
nB = len(burstDurationsB_ms)
allBurstDurations = np.zeros((nA + nB,))
allBurstDurations[:nA] = burstDurationsA_ms
allBurstDurations[-nB:] = burstDurationsB_ms
numPermutations = 1000
permutedMeanBurstDurationDiffs = np.zeros((numPermutations,))
for i in range(numPermutations):
np.random.shuffle(allBurstDurations)
permutedBurstDurationsA = allBurstDurations[:nA]
permutedBurstDurationsB = allBurstDurations[-nB:]
permutedMeanBurstDurationDiffs[i] = permutedBurstDurationsB.mean() - permutedBurstDurationsA.mean()
```
1d. (3 pts) Plot the distribtuion of mean burst time differences from each permutation and use vertical dashed lines ot indicate the 95% confidence interval and a vertical solid line to indicate the measured mean burst time difference between the actual datasets. Finally, answer the original question, do the conditions affect mean burst duration?
```
# plot the distribution differences between taus for each permutation
plt.hist(permutedMeanBurstDurationDiffs, bins=50, alpha=0.25, label='Expected under H0');
plt.xlabel('Mean Burst Duration Diff B - A (ms)')
plt.ylabel('# Permutations');
# add 95% confidence intervals to the plot
lb, ub = np.quantile(permutedMeanBurstDurationDiffs, [0.025, 0.975])
plt.axvline(lb, linestyle='--', label='95% CI')
plt.axvline(ub, linestyle='--');
# add measured difference to plot
measuredMeanBurstDurationDiff = burstDurationsB_ms.mean() - burstDurationsA_ms.mean()
plt.axvline(measuredMeanBurstDurationDiff, color='r', label='Measured')
plt.legend();
```
Reject H0 as measured difference falls outside of 95% confidence interval for expected differenece if H0 was true.
Thus, we infer that condition B did affect the mean burst duration as compared to condition A.
2. You record the resting potential of a cell (see below). See 2a-c below.
```
restingPotential_mV = np.array([-85.06885608, -68.0333149 , -77.04147864, -70.82636201,
-73.11516394, -70.87124656, -69.8945143 , -71.35017797,
-78.97700081, -76.06762065, -80.16301496, -75.53757879,
-66.29208026, -84.46635021, -74.99594162, -81.64926101,
-69.43971079, -60.09946296, -66.79822251, -60.85633766,
-54.32637416, -66.45195357, -82.98456323, -81.95661922,
-60.47209247, -80.55272128, -62.85999264, -86.59379859,
-78.64488589, -68.84506935, -80.77647186, -67.85623328,
-74.45114227, -89.65579119, -82.64751201, -63.75968145,
-74.22283582, -59.31586296, -93.0908073 , -73.64374549,
-62.68738212, -57.96506437, -72.3717666 , -86.33058942,
-78.92751452, -58.80136699, -85.71378949, -57.19191734,
-91.30229149, -75.05287933, -75.33300218, -62.74969485,
-79.59156555, -52.61256484, -77.21434863, -83.18228806,
-62.06267252, -68.56599363, -74.33860286, -74.25433867,
-67.10062548, -70.91001388, -74.54319772, -89.15247536,
-72.25311527, -88.42966306, -77.76328165, -68.46582471,
-75.94389499, -58.47565688, -71.13726886, -82.4352595 ,
-61.93586705, -83.83289675, -51.7473573 , -72.18052423,
-77.19392687, -87.97762782, -68.17409172, -62.04925685,
-72.86214908, -69.43243604, -82.89191418, -67.91943956,
-59.00530849, -62.53955662, -68.66192422, -73.86176431,
-63.33605874, -84.78928316, -79.38590405, -85.06698722,
-77.99176887, -70.8097979 , -70.458364 , -77.83905415,
-79.05549124, -67.7530506 , -86.29135786, -60.87285052,
-68.75028368, -69.48216823, -87.97546221, -74.25401398,
-72.00639248, -73.25242423, -99.49034043, -81.86020062,
-78.38191113, -68.64333415, -62.26209287, -75.46279644,
-82.18768283, -77.45752358, -79.82870353, -69.4572625 ,
-78.32253067, -73.59782921, -72.25046001, -80.64590368,
-76.92874101, -90.79517065, -73.90324566, -81.67875556,
-67.59862905, -81.49491813, -75.79660561, -81.14508062,
-78.95641057, -80.56089537, -80.23390812, -72.4244641 ,
-87.47818531, -73.59907449, -66.92882851, -67.87048944,
-69.79223622, -67.11253617, -64.8935525 , -80.52556846,
-78.19259758, -62.10604477, -95.98603544, -75.95599522,
-66.3355366 , -80.87436998, -81.5009947 , -88.22430255,
-83.72971765, -75.86416506, -82.52663772, -53.76916602,
-66.21196557, -72.93868097, -91.42283677, -80.22444843,
-75.08391826, -52.05541454, -72.0154604 , -80.24943593,
-65.97047566, -81.62631839, -73.18646105, -70.85923137,
-66.05248632, -60.82923084, -59.49883812, -78.38967591,
-84.79797173, -95.00305539, -78.06355062, -71.60393851,
-70.37115932, -86.7155815 , -65.38955127, -76.78546928,
-79.85586826, -76.65572665, -71.50214043, -83.65681821,
-59.9250123 , -76.05986927, -82.68107711, -70.01703154,
-74.46337865, -63.38903087, -78.73136431, -76.56253395,
-72.43137511, -52.60067507, -54.23945626, -63.68117735,
-88.19424095, -76.29322833, -77.01457066, -72.88256829,
-67.46931905, -60.91331725, -79.17094879, -74.96126989])
```
2a. (3 pts) You only have one sample (above) with a single mean. Use the Central Limit Theorem to estimate the distribution of mean resting potentials were you to collect a bunch more samples. Plot this distribution and indicate its 95% confidence interval with vertical lines on the plot.
```
mu = restingPotential_mV.mean()
sem = restingPotential_mV.std() / np.sqrt(len(restingPotential_mV))
meanDist = st.norm(mu, sem)
mV = np.linspace(-77, -71, 101)
plt.plot(mV, meanDist.pdf(mV))
plt.xlabel('Mean Resting Potential (mV)')
plt.ylabel('pdf')
plt.title('Central Limit Theorem')
lb, ub = meanDist.ppf([0.025, 0.975])
plt.axvline(lb, linestyle='--', label='95% CI')
plt.axvline(ub, linestyle='--')
plt.legend();
```
2b. (3 pts) Use 1000 bootstrapped samples to estimate the 95% confidence interval for the mean resting potential. Plot the distribution of bootstrap mean resting potentials and indicate the 95% confidence intervals with vertical lines. How do these compare to that obtained by the Central Limit Theorem?
```
numBootstraps = 1000
bootstrappedMeans = np.zeros((numBootstraps,))
for i in range(numBootstraps):
bootstrappedRestingPotentials_mV = \
np.random.choice(restingPotential_mV, size=restingPotential_mV.shape, replace=True)
bootstrappedMeans[i] = bootstrappedRestingPotentials_mV.mean()
bootstrappedMeansCI = np.quantile(bootstrappedMeans, [0.025, 0.975])
plt.hist(bootstrappedMeans, bins=30, alpha=0.25, label='Bootstrapped')
plt.axvline(bootstrappedMeansCI[0], linestyle='--', label='95% CI')
plt.axvline(bootstrappedMeansCI[1], linestyle='--')
plt.xlabel('Mean Resting Potential (mV)')
plt.ylabel('# Bootstrap Samples')
plt.legend();
```
2c. (3 pts) Use a t-Test to determine whether this cell belongs to a set of cells that you previously determined have a resting potential of -60 mV?
```
# I didn't specifically ask for the normality test, so it is ok if it was not included.
# But you should do some sort of check for normality if you are using a t-Test.
stat, pvalue = st.normaltest(restingPotential_mV)
isNormallyDistributed = pvalue >= 0.05
isNormallyDistributed
t, pvalue = st.ttest_1samp(restingPotential_mV, -60)
pvalue
```
|
github_jupyter
|
### TDD for data with pytest
TDD is great for software engineering, but did you know TDD can add a lot of speed and quality to Data Science projects too?
We'll learn how we can use TDD to save you time - and quickly improve functions which extract and process data.
# About me
**Chris Brousseau**
*Surface Owl - Founder & Data Scientist*
<br>
*Pragmatic AI Labs - Cloud-Native ML*
<br>
<br>
Prior work at Accenture & USAF
<br>
Engineering @Boston University
<img src="data/images/detective_and_murderer.jpg" alt="Filipe Fortes circa 2013" style="width: 1400px;">
# 0 - Problem to solve
- speed up development & improve quality on data science projects
<br><br>
# Two main cases
1. test tidy input *(matrix - columns = var, row = observation)* **(tidy data != clean data)**
<br><br>
2. test ingest/transformations of complex input *(creating tidy & clean data)*
# Our Objectives
- Intro TDD (Test Driven Development)
- Learn about two packages: pytest & datatest
1. For tidy data - *see datatest in action*
2. For data engineering - *see TDD for complex input*
- Understand When not to use TDD
- Get links to Resources
### Why TDD?
<img src=data/images/debugging_switches.200w.webp style="height: 600px;"/>
### What is TDD
- process for software development
- **themes:** intentional -> small -> explicit -> automated
### How does it work?
- confirm requirements
- write a failing test (vs ONLY these requirements!)
- write code to pass the test (keep it small)
- refactor & retest
- automate
### Why TDD?
1. first - focus on requirements and outcomes
2. save time debugging
3. boost confidence in your code
4. improve refactoring - *speed and confidence*
5. encourages "clean code" - *SRP, organization*
6. speed up onboarding new team members - *read 1K lines, or a test?*
### Why TDD for data?
1. all the above
2. confidence in pipeline
# Relevant Packages: pytest
<br>
**pytest:**
framework for writing and running tests
- pypi
- auto-discovery of your tests (prefix `test` on files, classes & functions)
- runs unittest and nose tests
- write functions not classes
- useful plugins (coverage, aws, selenium, databases, etc)
- [Human-readable usage here](https://gist.github.com/kwmiebach/3fd49612ef7a52b5ce3a)
<br><br>
# Relevant Packages: datatest
**datatest:**
helps speed up and formalize data-wrangling and data validation tasks
- pypi
- sits on top of pytest or unittest
- Test data pipeline components and end-to-end behavior
**ipytest:**
helper package - run tests inside jupyter notebook
(labs coming)
# 1- TDD for tidy data
### datatest deets!
- *core functions:*
1. validation
2. error reporting
3. acceptance declarations (data is dirty!)
<br><br>
- built-in classes: selecting, querying, and iterating over data
- both pytest & unittest styles
- works with Pandas
- useful for pipelines
- https://github.com/shawnbrown/datatest
- https://datatest.readthedocs.io/en/stable/index.html
#### datatest - what does it do for you?
- **validation:** check that raw data meets requirements you specify
- columns exist
- values are in: specific set, range, types
- match order and sequences@specific index, mapping
- fuzzy
- **compute differences** between inputs & test conditions
- **acceptances** - based on differences
- tolerance - absolute
- tolerance - percentage
- fuzzy, others
- composable - construct acceptance criteria based on *intersection of lower-level datatest acceptances*
- **all in a test framework**
**[link: validate docs](https://datatest.readthedocs.io/en/stable/reference/datatest-core.html#datatest.validate)**
### Example 0 - datatest cases
#### sources:
- https://datatest.readthedocs.io/en/stable/tutorial/dataframe.html
<br><br>
- https://github.com/moshez/interactive-unit-test/blob/master/unit_testing.ipynb
```
# setup - thank you Moshe!
import unittest
def test(klass):
loader = unittest.TestLoader()
# suite=loader.loadTestsFromTestCase(klass) # original
suite=loader.loadTestsFromModule(klass) # to work with datatest example
runner = unittest.TextTestRunner()
runner.run(suite)
# other helpful setup
# ipytest - https://github.com/chmp/ipytest
import ipytest
import ipytest.magics
# enable pytest's assertions and ipytest's magics
ipytest.config(rewrite_asserts=False, magics=True)
# load datatest example
import pandas as pd
df = pd.read_csv("./data/test_datatest/movies.csv")
df.head(5)
# %load tests/test_01_datatest_movies_df_unit
#!/usr/bin/env python
import pandas as pd
import datatest as dt
import os
def setUpModule():
global df
print(os.getcwd())
df = pd.read_csv('data/test_datatest/movies.csv')
class TestMovies(dt.DataTestCase):
@dt.mandatory
def test_columns(self):
self.assertValid(
df.columns,
{'title', 'rating', 'year', 'runtime'},
)
def test_title(self):
self.assertValidRegex(df['title'], r'^[A-Z]')
def test_rating(self):
self.assertValidSuperset(
df['rating'],
{'G', 'PG', 'PG-13', 'R', 'NC-17'},
)
def test_year(self):
self.assertValid(df['year'], int)
def test_runtime(self):
self.assertValid(df['runtime'], int)
test(TestMovies())
# what is going on with our original data?
df_fixed = pd.read_csv('data/test_datatest/movies.csv')
print(df_fixed.iloc[7:11, :]) #looks better w/print
# fix the bad data - through pipeline or manually
df_fixed = pd.read_csv('data/test_datatest/movies_fixed.csv')
print(df_fixed.iloc[7:11, :])
# clear existing test objects in jupyter notebook - similar to reset
%reset_selective -f df
%reset_selective -f TestMovies
# fixed data - rerun tests
def setUpModule():
global df
print(os.getcwd())
df = pd.read_csv('data/test_datatest/movies_fixed.csv') # note new source
class TestMovies(dt.DataTestCase):
@dt.mandatory
def test_columns(self):
self.assertValid(
df.columns,
{'title', 'rating', 'year', 'runtime'},
)
def test_title(self):
self.assertValidRegex(df['title'], r'^[A-Z]')
def test_rating(self):
self.assertValidSuperset(
df['rating'],
{'G', 'PG', 'PG-13', 'R', 'NC-17'},
)
def test_year(self):
self.assertValid(df['year'], int)
def test_runtime(self):
self.assertValid(df['runtime'], int)
test(TestMovies())
```
# 2 - TDD for data engineering
### Example 1 - finding urls in excel
- url test case
- multiple url test case which breaks prior tests
- regex101.com illustration (edit function to make tests pass)
https://regex101.com/
- final regex to rule them all
#### Sample data (under /data/test_cais) - needs transformation
|example 1 |~ |example 2 |
|:--- |:--- |:---|
| <img src="data/images/excel_sample2013.png" alt="excel example 1" style="height: 900px;"> | ...|<img src="data/images/excel_sample2018.png" alt="excel example 2" style="height: 900px;"> |
```
# %load tests/test_02_cais_find_single_url
"""
test functions to find url in cell content from an excel worksheet
functions below have "do_this_later_" prefix to prevent tests from running during early part of talk
remove prefix as we walk through examples, and re-run tests
"""
from src.excel_find_url import find_url
def test_find_single_url():
"""
unit test to find url in a single text string
:return: None
"""
# the find_url function we are testing takes cell content as a string, and current results dict
# pass an empty results dict, so no existing value is found
result = {}
# inputs we expect to pass
input01 = "Coeducational Boarding/Day School Grades 6-12; Enrollment 350 www.prioryca.org"
# declare result we expect to find here
assert find_url(input01, result) == "www.prioryca.org"
# %load src/excel_find_url.py
# %load src/excel_find_url.py
# %%writefile src/excel_find_url.py
import re
from src.excel_read_cell_info import check_if_already_found
def find_url(content, result):
"""
finds url of school if it exists in cell
:param content: cell content from spreadsheet
:type content: string
:param result: dict of details on current school
:type result: dict
:return: url
:rtype: basestring
"""
if check_if_already_found("url", result):
return result['url']
# different regex to use during python talk
# https://regex101.com
# regex = re.compile(r"w{3}.*", re.IGNORECASE)
# regex = re.compile(r"(http|https):\/\/.*", re.IGNORECASE) # EDIT THIS LIVE
regex = re.compile(
r"((http|https):\/\/)?[a-zA-Z0-9.\/?::-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9..\/&\/\-_=#])*",
re.IGNORECASE)
try:
match = re.search(regex,
str(content))
except TypeError:
raise TypeError
if match:
url = str(match.group()).strip()
return url
else:
return None
%ls "tests/"
test02 = "tests/test_02_cais_find_single_url.py"
__file__ = test02
ipytest.clean_tests()
ipytest.config.addopts=['-v']
# ['-k test_03_cais_find_https_url.py']
ipytest.run()
# %load tests/test_03_cais_find_https_url.py
"""
test functions to find url in cell content from an excel worksheet
functions below have "do_this_later_" prefix to prevent tests from running during early part of talk
remove prefix as we walk through examples, and re-run tests
"""
from src.excel_find_url import find_url
def test_find_https_url():
"""
unit test multiple strings for urls in bulk - rather than separate test functions for each
one way to rapidly iterate on your code, nicely encapsulates similar cases
requires editing REGEX in excel_read_cell_info.find_url to make this test pass
"""
result = {}
# inputs we expect to pass
input01 = "Coed Boarding/Day School Grades 6-12; Enrollment 350 http://www.prioryca.org"
input02 = "https://windwardschool.org"
assert find_url(input01, result) == "http://www.prioryca.org"
assert find_url(input02, result) == "https://windwardschool.org"
# %load src/excel_find_url.py
# %load src/excel_find_url.py
# %%writefile src/excel_find_url.py
import re
from src.excel_read_cell_info import check_if_already_found
def find_url(content, result):
"""
finds url of school if it exists in cell
:param content: cell content from spreadsheet
:type content: string
:param result: dict of details on current school
:type result: dict
:return: url
:rtype: basestring
"""
if check_if_already_found("url", result):
return result['url']
# different regex to use during python talk
# https://regex101.com
# regex = re.compile(r"w{3}.*", re.IGNORECASE)
# regex = re.compile(r"(http|https):\/\/.*", re.IGNORECASE) # EDIT THIS LIVE
regex = re.compile(
r"((http|https):\/\/)?[a-zA-Z0-9.\/?::-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9..\/&\/\-_=#])*",
re.IGNORECASE)
try:
match = re.search(regex,
str(content))
except TypeError:
raise TypeError
if match:
url = str(match.group()).strip()
return url
else:
return None
### Switch to PyCharm
```
### [regex101](https://regex101.com)
w{3}.*
(http|https):\/\/.*
((http|https):\/\/)?[a-zA-Z0-9.\/?::-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9..\/&\/\-_=#])*
www.prioryca.org
http://www.prioryca.org
https://prioryca.org
### Switch to PyCharm or your IDE to edit code and run multiple tests
### Example 2 - finding names & use of supplementary data summaries
- use of expected results file bundled with data as pytest input
- structured discovery of edge cases
- **objective:** find school names in messy excel document
- **strategy:** find names by finding specific formats - removing stopwords & addresses
- **test goals:** confirm code finds same # of names as we do manually
- **test approach:** summarize names manually in new tab, *then test code results vs. manual results*
#### Recall our data (under /data/test_cais) - needs transformation
|example 1 |~ |example 2 |
|:--- |:--- |:---|
| <img src="data/images/excel_sample2013.png" alt="excel example 1" style="height: 900px;"> | ...|<img src="data/images/excel_sample2018.png" alt="excel example 2" style="height: 900px;"> |
#### Review input files (/data/test_cais)
<br><br>
<img src="data/images/excel_summarize_expected_results.png" alt="excel example 1" style="height: 900px;">
```
"""
tests focused on ability to pull all the names from a cais excel file
"""
def test_find_2013_cais_name_table10():
"""
test finding names in first member schools tab
test function to dynamically look up names vs. expected result from separate file
:return: True or False
"""
test_file = "School_Directory_2013-2014-converted.xlsx"
results_file = "cais_name_counts_manual_2013-2014.xlsx"
table_num = 10
found_in_table_10, expected_in_table_10 = common_search(test_file, results_file, table_num)
assert found_in_table_10 == expected_in_table_10
```
#### Data Driven transformation accuracy (/data/test_cais)
<br><br>
<img src="data/images/test_results.excel_table_accuracy.png" alt="dynamic input testing">
# 3 - When not to use TDD for data?
- EDA
- quick prototypes
- data source is complete & managed
- cost / time >> benefits
# 4 - Resources
**this talk:** https://github.com/surfaceowl/pythontalk_tdd_for_data
<br><br>
**pytest**
[pytest on pypi](https://pypi.org/project/pytest/) [pytest docs](https://docs.pytest.org/en/latest/)
<br><br>
**ipytest**
[ipytest pypi](https://pypi.org/project/ipytest/) [ipytest github](https://github.com/chmp/ipytest)
<br><br>
**datatest**
[datatest on pypi](https://pypi.org/project/datatest/) [github](https://github.com/shawnbrown/datatest) [docs](https://datatest.readthedocs.io/en/stable/)
**TDD for data**
[towards data science article](https://towardsdatascience.com/tdd-datascience-689c98492fcc)
# Recap: Our Objectives
- Intro TDD (Test Driven Development)
- Learned about pytest & datatest
- Saw testing in action for:
1. tidy data
2. transformation / data engineering
- Understand When not to use TDD
- Have links to Resources
# END
### setup notes
venv, then pip install -r requirements.txt
conftest.py must be in project root
run pytest from terminal - must be in tests dir
pycharm setup -- set test runner to pytest
### resources
https://nbviewer.jupyter.org/github/agostontorok/tdd_data_analysis/blob/master/TDD%20in%20data%20analysis%20-%20Step-by-step%20tutorial.ipynb#Step-by-step-TDD-in-a-data-science-task
http://www.tdda.info/
fix pytest Module not found
https://medium.com/@dirk.avery/pytest-modulenotfounderror-no-module-named-requests-a770e6926ac5
#### regex
https://regex101.com/
|
github_jupyter
|
# Information Flow
In this chapter, we detail how to track information flows in python by tainting input strings, and tracking the taint across string operations.
Some material on `eval` exploitation is adapted from the excellent [blog post](https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html) by Ned Batchelder.
**Prerequisites**
* You should have read the [chapter on coverage](Coverage.ipynb).
Setting up our infrastructure
```
import fuzzingbook_utils
from ExpectError import ExpectError
import inspect
import enum
%%html
<div>
<style>
div.todo {
color:red;
font-weight: bold;
}
div.todo::before {
content: "TODO: ";
}
div.done {
color:blue;
font-weight: bold;
}
div.done::after {
content: " :DONE";
}
</style>
<script>
function todo_toggle() {
if (todo_shown){
$('div.todo').hide('500');
$('div.done').hide('500');
$('#toggleButton').val('Show Todo')
} else {
$('div.todo').show('500');
$('div.done').show('500');
$('#toggleButton').val('Hide Todo')
}
todo_shown = !todo_shown
}
$( document ).ready(function(){
todo_shown=false;
$('div.todo').hide()
});
</script>
<form action="javascript:todo_toggle()"><input type="submit" id="toggleButton" value="Show Todo"></form>
```
Say we want to implement a calculator service in Python. A really simple way to do that is to rely on the `eval()` function in Python. Since we do not want our users to be able to execute arbitrary commands on our server, we use `eval()` with empty `locals` and `globals`
```
def my_calculator(my_input):
result = eval(my_input, {}, {})
print("The result of %s was %d" % (my_input, result))
```
It wors as expected:
```
my_calculator('1+2')
```
Does it?
```
with ExpectError():
my_calculator('__import__("os").popen("ls").read()')
```
As you can see from the error, `eval()` completed successfully, with the system command `ls` executing successfully. It is easy enough for the user to see the output if needed.
```
my_calculator("1 if __builtins__['print'](__import__('os').popen('ls').read()) else 0")
```
The problem is that the Python `__builtins__` is [inserted by default](https://docs.python.org/3/library/functions.html#eval) when one uses `eval()`. We can avoid this by restricting `__builtins__` in `eval` explicitly.
```
def my_calculator(my_input):
result = eval(my_input, {"__builtins__":None}, {})
print("The result of %s was %d" % (my_input, result))
```
Does it help?
```
with ExpectError():
my_calculator("1 if __builtins__['print'](__import__('os').popen('ls').read()) else 0")
```
But does it actually?
```
my_calculator("1 if [x['print'](x['__import__']('os').popen('ls').read()) for x in ([x for x in (1).__class__.__base__.__subclasses__() if x.__name__ == 'Sized'][0].__len__.__globals__['__builtins__'],)] else 0")
```
The problem here is that when the user has a way to inject **uninterpreted strings** that can reach a dangerous routine such as `eval()` or an `exec()`, it makes it possible for them to inject dangerous code. What we need is a way to restrict the ability of uninterpreted input string fragments from reaching dangerous portions of code.
## A Simple Taint Tracker
For capturing information flows we need a new string class. The idea is to use the new tainted string class `tstr` as a wrapper on the original `str` class.
We need to write the `tstr.__new__()` method because we want to track the parent object responsible for the taint (essentially because we want to customize the object creation, and `__init__` is [too late](https://docs.python.org/3/reference/datamodel.html#basic-customization) for that.).
The taint map in variable `_taint` contains non-overlapping taints mapped to the original string.
```
class tstr_(str):
def __new__(cls, value, *args, **kw):
return super(tstr_, cls).__new__(cls, value)
class tstr(tstr_):
def __init__(self, value, taint=None, parent=None, **kwargs):
self.parent = parent
l = len(self)
if taint:
if isinstance(taint, int):
self._taint = list(range(taint, taint + len(self)))
else:
assert len(taint) == len(self)
self._taint = taint
else:
self._taint = list(range(0, len(self)))
def has_taint(self):
return any(True for i in self._taint if i >= 0)
def __repr__(self):
return str.__repr__(self)
def __str__(self):
return str.__str__(self)
t = tstr('hello')
t.has_taint(), t._taint
t = tstr('world', taint = 6)
t._taint
```
By default, when we wrap a string, it is tainted. Hence we also need a way to `untaint` the string.
```
class tstr(tstr):
def untaint(self):
self._taint = [-1] * len(self)
return self
t = tstr('hello world')
t.untaint()
t.has_taint()
```
However, the taint does not transition from the whole string to parts.
```
with ExpectError():
t = tstr('hello world')
t[0:5].has_taint()
```
### Slice
The Python `slice` operator `[n:m]` relies on the object being an `iterator`. Hence, we define the `__iter__()` method.
```
class tstr(tstr):
def __iter__(self):
return tstr_iterator(self)
def create(self, res, taint):
return tstr(res, taint, self)
def __getitem__(self, key):
res = super().__getitem__(key)
if type(key) == int:
key = len(self) + key if key < 0 else key
return self.create(res, [self._taint[key]])
elif type(key) == slice:
return self.create(res, self._taint[key])
else:
assert False
```
The Python `slice` operator `[n:m]` relies on the object being an `iterator`. Hence, we define the `__iter__()` method.
#### The iterator class
The `__iter__()` method requires a supporting `iterator` object.
```
class tstr_iterator():
def __init__(self, tstr):
self._tstr = tstr
self._str_idx = 0
def __next__(self):
if self._str_idx == len(self._tstr): raise StopIteration
# calls tstr getitem should be tstr
c = self._tstr[self._str_idx]
assert type(c) is tstr
self._str_idx += 1
return c
t = tstr('hello world')
t[0:5].has_taint()
```
### Helper Methods
We define a few helper methods that deals with the mapped taint index.
```
class tstr(tstr):
class TaintException(Exception):
pass
def x(self, i=0):
v = self._x(i)
if v < 0:
raise taint.TaintException('Invalid mapped char idx in tstr')
return v
def _x(self, i=0):
return self.get_mapped_char_idx(i)
def get_mapped_char_idx(self, i):
if self._taint:
return self._taint[i]
else:
raise taint.TaintException('Invalid request idx')
def get_first_mapped_char(self):
for i in self._taint:
if i >= 0:
return i
return -1
def is_tpos_contained(self, tpos):
return tpos in self._taint
def is_idx_tainted(self, idx):
return self._taint[idx] != -1
my_str = tstr('abcdefghijkl', taint=list(range(4,16)))
my_str[0].x(),my_str[-1].x(),my_str[-2].x()
s = my_str[0:4]
s.x(0),s.x(3)
s = my_str[0:-1]
len(s),s.x(10)
```
### Concatenation
Implementing concatenation is straight forward:
```
class tstr(tstr):
def __add__(self, other):
if type(other) is tstr:
return self.create(str.__add__(self, other), (self._taint + other._taint))
else:
return self.create(str.__add__(self, other), (self._taint + [-1 for i in other]))
```
Testing concatenations
```
my_str1 = tstr("hello")
my_str2 = tstr("world", taint=6)
my_str3 = "bye"
v = my_str1 + my_str2
print(v._taint)
w = my_str1 + my_str3 + my_str2
print(w._taint)
class tstr(tstr):
def __radd__(self, other): #concatenation (+) -- other is not tstr
if type(other) is tstr:
return self.create(str.__add__(other, self), (other._taint + self._taint))
else:
return self.create(str.__add__(other, self), ([-1 for i in other] + self._taint))
my_str1 = "hello"
my_str2 = tstr("world")
v = my_str1 + my_str2
v._taint
```
### Replace
```
class tstr(tstr):
def replace(self, a, b, n=None):
old_taint = self._taint
b_taint = b._taint if type(b) is tstr else [-1] * len(b)
mystr = str(self)
i = 0
while True:
if n and i >= n: break
idx = mystr.find(a)
if idx == -1: break
last = idx + len(a)
mystr = mystr.replace(a, b, 1)
partA, partB = old_taint[0:idx], old_taint[last:]
old_taint = partA + b_taint + partB
i += 1
return self.create(mystr, old_taint)
my_str = tstr("aa cde aa")
res = my_str.replace('aa', 'bb')
res, res._taint
```
### Split
We essentially have to re-implement split operations, and split by space is slightly different from other splits.
```
class tstr(tstr):
def _split_helper(self, sep, splitted):
result_list = []
last_idx = 0
first_idx = 0
sep_len = len(sep)
for s in splitted:
last_idx = first_idx + len(s)
item = self[first_idx:last_idx]
result_list.append(item)
first_idx = last_idx + sep_len
return result_list
def _split_space(self, splitted):
result_list = []
last_idx = 0
first_idx = 0
sep_len = 0
for s in splitted:
last_idx = first_idx + len(s)
item = self[first_idx:last_idx]
result_list.append(item)
v = str(self[last_idx:])
sep_len = len(v) - len(v.lstrip(' '))
first_idx = last_idx + sep_len
return result_list
def rsplit(self, sep=None, maxsplit=-1):
splitted = super().rsplit(sep, maxsplit)
if not sep:
return self._split_space(splitted)
return self._split_helper(sep, splitted)
def split(self, sep=None, maxsplit=-1):
splitted = super().split(sep, maxsplit)
if not sep:
return self._split_space(splitted)
return self._split_helper(sep, splitted)
my_str = tstr('ab cdef ghij kl')
ab, cdef, ghij, kl = my_str.rsplit(sep=' ')
print(ab._taint, cdef._taint, ghij._taint, kl._taint)
my_str = tstr('ab cdef ghij kl', taint=100)
ab, cdef, ghij, kl = my_str.rsplit()
print(ab._taint, cdef._taint, ghij._taint, kl._taint)
my_str = tstr('ab cdef ghij kl', taint=list(range(0, 15)))
ab, cdef, ghij, kl = my_str.split(sep=' ')
print(ab._taint, cdef._taint, kl._taint)
my_str = tstr('ab cdef ghij kl', taint=list(range(0, 20)))
ab, cdef, ghij, kl = my_str.split()
print(ab._taint, cdef._taint, kl._taint)
```
### Strip
```
class tstr(tstr):
def strip(self, cl=None):
return self.lstrip(cl).rstrip(cl)
def lstrip(self, cl=None):
res = super().lstrip(cl)
i = self.find(res)
return self[i:]
def rstrip(self, cl=None):
res = super().rstrip(cl)
return self[0:len(res)]
my_str1 = tstr(" abc ")
v = my_str1.strip()
v, v._taint
my_str1 = tstr(" abc ")
v = my_str1.lstrip()
v, v._taint
my_str1 = tstr(" abc ")
v = my_str1.rstrip()
v, v._taint
```
### Expand Tabs
```
class tstr(tstr):
def expandtabs(self, n=8):
parts = self.split('\t')
res = super().expandtabs(n)
all_parts = []
for i, p in enumerate(parts):
all_parts.extend(p._taint)
if i < len(parts) - 1:
l = len(all_parts) % n
all_parts.extend([p._taint[-1]] * l)
return self.create(res, all_parts)
my_tstr = tstr("ab\tcd")
my_str = str("ab\tcd")
v1 = my_str.expandtabs(4)
v2 = my_tstr.expandtabs(4)
print(len(v1), repr(my_tstr), repr(v2), v2._taint)
class tstr(tstr):
def join(self, iterable):
mystr = ''
mytaint = []
sep_taint = self._taint
lst = list(iterable)
for i, s in enumerate(lst):
staint = s._taint if type(s) is tstr else [-1] * len(s)
mytaint.extend(staint)
mystr += str(s)
if i < len(lst)-1:
mytaint.extend(sep_taint)
mystr += str(self)
res = super().join(iterable)
assert len(res) == len(mystr)
return self.create(res, mytaint)
my_str = tstr("ab cd", taint=100)
(v1, v2), v3 = my_str.split(), 'ef'
print(v1._taint, v2._taint)
v4 = tstr('').join([v2,v3,v1])
print(v4, v4._taint)
my_str = tstr("ab cd", taint=100)
(v1, v2), v3 = my_str.split(), 'ef'
print(v1._taint, v2._taint)
v4 = tstr(',').join([v2,v3,v1])
print(v4, v4._taint)
```
### Partitions
```
class tstr(tstr):
def partition(self, sep):
partA, sep, partB = super().partition(sep)
return (
self.create(partA, self._taint[0:len(partA)]), self.create(sep, self._taint[len(partA): len(partA) + len(sep)]), self.create(partB, self._taint[len(partA) + len(sep):]))
def rpartition(self, sep):
partA, sep, partB = super().rpartition(sep)
return (self.create(partA, self._taint[0:len(partA)]), self.create(sep, self._taint[len(partA): len(partA) + len(sep)]), self.create(partB, self._taint[len(partA) + len(sep):]))
```
### Justify
```
class tstr(tstr):
def ljust(self, width, fillchar=' '):
res = super().ljust(width, fillchar)
initial = len(res) - len(self)
if type(fillchar) is tstr:
t = fillchar.x()
else:
t = -1
return self.create(res, [t] * initial + self._taint)
def rjust(self, width, fillchar=' '):
res = super().rjust(width, fillchar)
final = len(res) - len(self)
if type(fillchar) is tstr:
t = fillchar.x()
else:
t = -1
return self.create(res, self._taint + [t] * final)
```
### String methods that do not change taint
```
def make_str_wrapper_eq_taint(fun):
def proxy(*args, **kwargs):
res = fun(*args, **kwargs)
return args[0].create(res, args[0]._taint)
return proxy
for name, fn in inspect.getmembers(str, callable):
if name in ['swapcase', 'upper', 'lower', 'capitalize', 'title']:
setattr(tstr, name, make_str_wrapper_eq_taint(fn))
a = tstr('aa', taint=100).upper()
a, a._taint
```
### General wrappers
These are not strictly needed for operation, but can be useful for tracing
```
def make_str_wrapper(fun):
def proxy(*args, **kwargs):
res = fun(*args, **kwargs)
return res
return proxy
import types
tstr_members = [name for name, fn in inspect.getmembers(tstr,callable)
if type(fn) == types.FunctionType and fn.__qualname__.startswith('tstr')]
for name, fn in inspect.getmembers(str, callable):
if name not in set(['__class__', '__new__', '__str__', '__init__',
'__repr__','__getattribute__']) | set(tstr_members):
setattr(tstr, name, make_str_wrapper(fn))
```
### Methods yet to be translated
These methods generate strings from other strings. However, we do not have the right implementations for any of these. Hence these are marked as dangerous until we can generate the right translations.
```
def make_str_abort_wrapper(fun):
def proxy(*args, **kwargs):
raise TaintException('%s Not implemented in TSTR' % fun.__name__)
return proxy
for name, fn in inspect.getmembers(str, callable):
if name in ['__format__', '__rmod__', '__mod__', 'format_map', 'format',
'__mul__','__rmul__','center','zfill', 'decode', 'encode', 'splitlines']:
setattr(tstr, name, make_str_abort_wrapper(fn))
```
## EOF Tracker
Sometimes we want to know where an empty string came from. That is, if an empty string is the result of operations on a tainted string, we want to know the best guess as to what the taint index of the preceding character is.
### Slice
For detecting EOF, we need to carry the cursor. The main idea is the cursor indicates the taint of the character in front of it.
```
class eoftstr(tstr):
def create(self, res, taint):
return eoftstr(res, taint, self)
def __getitem__(self, key):
def get_interval(key):
return ((0 if key.start is None else key.start),
(len(res) if key.stop is None else key.stop))
res = super().__getitem__(key)
if type(key) == int:
key = len(self) + key if key < 0 else key
return self.create(res, [self._taint[key]])
elif type(key) == slice:
if res:
return self.create(res, self._taint[key])
# Result is an empty string
t = self.create(res, self._taint[key])
key_start, key_stop = get_interval(key)
cursor = 0
if key_start < len(self):
assert key_stop < len(self)
cursor = self._taint[key_stop]
else:
if len(self) == 0:
# if the original string was empty, we assume that any
# empty string produced from it should carry the same taint.
cursor = self.x()
else:
# Key start was not in the string. We can reply only
# if the key start was just outside the string, in
# which case, we guess.
if key_start != len(self):
raise taint.TaintException('Can\'t guess the taint')
cursor = self._taint[len(self) - 1] + 1
# _tcursor gets created only for empty strings.
t._tcursor = cursor
return t
else:
assert False
class eoftstr(eoftstr):
def get_mapped_char_idx(self, i):
if self._taint:
return self._taint[i]
else:
if i != 0:
raise taint.TaintException('Invalid request idx')
# self._tcursor gets created only for empty strings.
# use the exception to determine which ones need it.
return self._tcursor
t = eoftstr('hello world')
print(repr(t[11:]))
print(t[11:].x(), t[11:]._taint)
```
## A Comparison Tracker
Sometimes, we also want to know what each character in an input was compared to.
### Operators
```
class Op(enum.Enum):
LT = 0
LE = enum.auto()
EQ = enum.auto()
NE = enum.auto()
GT = enum.auto()
GE = enum.auto()
IN = enum.auto()
NOT_IN = enum.auto()
IS = enum.auto()
IS_NOT = enum.auto()
FIND_STR = enum.auto()
COMPARE_OPERATORS = {
Op.EQ: lambda x, y: x == y,
Op.NE: lambda x, y: x != y,
Op.IN: lambda x, y: x in y,
Op.NOT_IN: lambda x, y: x not in y,
Op.FIND_STR: lambda x, y: x.find(y)
}
Comparisons = []
```
### Instructions
```
class Instr:
def __init__(self, o, a, b):
self.opA = a
self.opB = b
self.op = o
def o(self):
if self.op == Op.EQ:
return 'eq'
elif self.op == Op.NE:
return 'ne'
else:
return '?'
def opS(self):
if not self.opA.has_taint() and type(self.opB) is tstr:
return (self.opB, self.opA)
else:
return (self.opA, self.opB)
@property
def op_A(self):
return self.opS()[0]
@property
def op_B(self):
return self.opS()[1]
def __repr__(self):
return "%s,%s,%s" % (self.o(), repr(self.opA), repr(self.opB))
def __str__(self):
if self.op == Op.EQ:
if str(self.opA) == str(self.opB):
return "%s = %s" % (repr(self.opA), repr(self.opB))
else:
return "%s != %s" % (repr(self.opA), repr(self.opB))
elif self.op == Op.NE:
if str(self.opA) == str(self.opB):
return "%s = %s" % (repr(self.opA), repr(self.opB))
else:
return "%s != %s" % (repr(self.opA), repr(self.opB))
elif self.op == Op.IN:
if str(self.opA) in str(self.opB):
return "%s in %s" % (repr(self.opA), repr(self.opB))
else:
return "%s not in %s" % (repr(self.opA), repr(self.opB))
elif self.op == Op.NOT_IN:
if str(self.opA) in str(self.opB):
return "%s in %s" % (repr(self.opA), repr(self.opB))
else:
return "%s not in %s" % (repr(self.opA), repr(self.opB))
else:
assert False
```
### Equivalance
```
class ctstr(eoftstr):
def create(self, res, taint):
o = ctstr(res, taint, self)
o.comparisons = self.comparisons
return o
def with_comparisons(self, comparisons):
self.comparisons = comparisons
return self
class ctstr(ctstr):
def __eq__(self, other):
if len(self) == 0 and len(other) == 0:
self.comparisons.append(Instr(Op.EQ, self, other))
return True
elif len(self) == 0:
self.comparisons.append(Instr(Op.EQ, self, other[0]))
return False
elif len(other) == 0:
self.comparisons.append(Instr(Op.EQ, self[0], other))
return False
elif len(self) == 1 and len(other) == 1:
self.comparisons.append(Instr(Op.EQ, self, other))
return super().__eq__(other)
else:
if not self[0] == other[0]:
return False
return self[1:] == other[1:]
t = ctstr('hello world', taint=100).with_comparisons([])
print(t.comparisons)
t == 'hello'
for c in t.comparisons:
print(repr(c))
class ctstr(ctstr):
def __ne__(self, other):
return not self.__eq__(other)
t = ctstr('hello', taint=100).with_comparisons([])
print(t.comparisons)
t != 'bye'
for c in t.comparisons:
print(repr(c))
class ctstr(ctstr):
def __contains__(self, other):
self.comparisons.append(Instr(Op.IN, self, other))
return super().__contains__(other)
class ctstr(ctstr):
def find(self, sub, start=None, end=None):
if start == None:
start_val = 0
if end == None:
end_val = len(self)
self.comparisons.append(Instr(Op.IN, self[start_val:end_val], sub))
return super().find(sub, start, end)
```
## Lessons Learned
* One can track the information flow form input to the internals of a system.
## Next Steps
_Link to subsequent chapters (notebooks) here:_
## Background
\cite{Lin2008}
## Exercises
_Close the chapter with a few exercises such that people have things to do. To make the solutions hidden (to be revealed by the user), have them start with_
```markdown
**Solution.**
```
_Your solution can then extend up to the next title (i.e., any markdown cell starting with `#`)._
_Running `make metadata` will automatically add metadata to the cells such that the cells will be hidden by default, and can be uncovered by the user. The button will be introduced above the solution._
### Exercise 1: _Title_
_Text of the exercise_
```
# Some code that is part of the exercise
pass
```
_Some more text for the exercise_
**Solution.** _Some text for the solution_
```
# Some code for the solution
2 + 2
```
_Some more text for the solution_
### Exercise 2: _Title_
_Text of the exercise_
**Solution.** _Solution for the exercise_
|
github_jupyter
|
## Face and Facial Keypoint detection
After you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.
1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).
2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.
3. Use your trained model to detect facial keypoints on the image.
---
In the next python cell we load in required libraries for this section of the project.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
```
#### Select an image
Select an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.
```
import cv2
# load in color image for face detection
image = cv2.imread('images/michelle_detected.png')
# switch red and blue color channels
# --> by default OpenCV assumes BLUE comes first, not RED as in many images
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plot the image
fig = plt.figure(figsize=(9,9))
plt.imshow(image)
```
## Detect all faces in an image
Next, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.
In the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.
An example of face detection on a variety of images is shown below.
<img src='images/haar_cascade_ex.png' width=80% height=80%/>
```
# load in a haar cascade classifier for detecting frontal faces
face_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_eye.xml')
# run the detector
# the output here is an array of detections; the corners of each detection box
# if necessary, modify these parameters until you successfully identify every face in a given image
faces = face_cascade.detectMultiScale(image, 1.2, 3)
# make a copy of the original image to plot detections on
image_with_detections = image.copy()
# from color to gray
gray = cv2.cvtColor(image_with_detections, cv2.COLOR_BGR2GRAY)
# loop over the detected faces, mark the image where each face is found
for (x,y,w,h) in faces:
# draw a rectangle around each detected face
# you may also need to change the width of the rectangle drawn depending on image resolution
cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3)
roi_gray = gray[y:y+h, x:x+w]
roi_color = image_with_detections[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.9, 3)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_with_detections)
```
## Loading in a trained model
Once you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.
First, load your best model by its filename.
```
import torch
from models import Net
net = Net()
#keypoints_model_BachSize32_Relu_NaimishNet.pt
## TODO: load the best saved model parameters (by your path name)
## You'll need to un-comment the line below and add the correct name for *your* saved model
net.load_state_dict(torch.load('saved_models/keypoints_model_BachSize64_Relu_NaimishNet.pt'))
## print out your net and prepare it for testing (uncomment the line below)
net.eval()
```
## Keypoint detection
Now, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.
### TODO: Transform each detected face into an input Tensor
You'll need to perform the following steps for each detected face:
1. Convert the face from RGB to grayscale
2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
4. Reshape the numpy image into a torch image.
**Hint**: The sizes of faces detected by a Haar detector and the faces your network has been trained on are of different sizes. If you find that your model is generating keypoints that are too small for a given face, try adding some padding to the detected `roi` before giving it as input to your model.
You may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.
### TODO: Detect and display the predicted keypoints
After each face has been appropriately converted into an input Tensor for your network to see as input, you can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be "un-normalized" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:
<img src='images/michelle_detected.png' width=30% height=30%/>
```
image_copy = np.copy(image)
# loop over the detected faces from your haar cascade
for (x,y,w,h) in faces:
# Select the region of interest that is the face in the image
#NOTE: The faces in the datasets in the training set are not as zoomed in as the ones Haar Cascade detects.
# That s why I HAVE to grab more area around the detected faces to make sure the entire head is present in the
# input image.
margin = int(w*100)
roi = image_copy[max(y-margin,0):min(y+h+margin,image.shape[0]),
max(x-margin,0):min(x+w+margin,image.shape[1])]
## TODO: Convert the face region from RGB to grayscale
roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
roi = roi/255.0
## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
roi_resize = cv2.resize(roi, (224, 224))
print('Original size ', roi.shape, 'Resize image ', roi_resize.shape)
## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)
roi = torch.from_numpy(roi_resize.reshape(1, 1, 224, 224))
roi = roi.type(torch.FloatTensor)
## TODO: Make facial keypoint predictions using your loaded, trained network
pred = net(roi)
pred = pred.view(68, -1)
pred_key = pred.data.numpy()
pred_key = pred_key*50 + 0
## TODO: Display each detected face and the corresponding keypoints
plt.figure(figsize = (6,6))
plt.imshow(roi_resize, cmap = 'gray')
plt.scatter(pred_key[:, 0], pred_key[:, 1])
plt.show()
```
**Observation** The predicted keypoints seems to be doing an okay job! The misfitting of some keypoint is due to the fact the training of the model is not yet optimal at 7 iterations and has a loss of 0.03 in average. I could improve it by running a bact size of 64 and increasing the epoch to 10 or 12.
**Questions** How do we tune the model to identify keypoints on the around the nose and the mouth?
|
github_jupyter
|
## Reinforcement Learning Tutorial -1: Q Learning
#### MD Muhaimin Rahman
sezan92[at]gmail[dot]com
Q learning , can be said one of the most famous -and kind of intuitive- of all Reinforcement learning algorithms. In fact ,the recent all algorithms using Deep learning , are based on the Q learning algorithms. So, to work on recent algorithms, one must have a good idea on Q learning.
### Intuition
First , start with an Intuition. Lets assume , you are in a maze

Okay okay! I admit, it is not a maze. just a house with 5 rooms. And I got it from, this [link](http://mnemstudio.org/path-finding-q-learning-tutorial.htm) . Your goal is to get out of this place, no matter where you are. But you dont know - atleast pretend to - how to get there! After wondering about the map, you stumbled upon a mysterious letter with a lot of numbers in the room.

The matrix has 6 columns and 6 rows. What you will have to do , is to go to the room with highest value. Suppose, you are in room number 2. Then , you will have to move to room number 3 . Then you get out! Look at the picture again! You can try with every state, you are guaranteed to get out of the house, using this matrix! .
In the world of RL, every room is called a ```state```, movement from one state to another is called ```action```. Our game has a very ***JARGONISH*** name, ```Markov Decision Process``` . Maybe they invented this name to freak everybody out. But in short, this process means, your action from current state never depends on previous state. Practically such processes are impossible, but it helps to simplify problems
Now the question is , how can we get this ?
- First , initialize the matrix as Zeros

- Then we will apply the Q learning update equation
\begin{equation}
Q(s_t,a) = Q(s_t,a) + \alpha (Q'(s_{t+1},a)-Q(s_t,a))
\end{equation}
Here, $s_t$ is state at time $t$ , $s_{t+1}$ means the next state, $a$ is action , $r$ is reward we get-if we can get - from one state to another state. Q(s_t,a_t) means Q matrix value for state $s_t$ and action $a_t$ , $Q'(s_{t+1},a)$ means target Q value with state $s_{t+1}$ and the ***BEST ACTION*** for next state. Here $\alpha $ is learning rate}
Before we proceed, let me ask you, does this equation ring a bell ? I mean, haven't you seen a similar equation ?
Yeah, you got it , it is similar to Gradient descent Equation. If you dont know Gradient descent equation, I am sorry, you wont be able to get the future tutorials. So I suggest you get the basic and working Idea of Neural Networks and Gradient descent algorithms
Now ,How can we get $Q'(s_{t+1},a)$ ?
Using Bellman Equation
\begin{equation}
Q'(s_{t+1},a) = r+ \gamma max(Q(s_{t+1},a_t))
\end{equation}
It means the target $Q$ value for every state and action is the sum of reward with that state and action, and the maximum $Q$ value of next state multiplied with discount factor $\gamma$
***Where did this equation came from ? ***
Okay chill! let's start from the game again ! So suppose , every room has reward, $R_t,R_{t+1},R_{t+2},R_{t+3},R_{t+4},R_{t+5}$.. So obviously , the value of a state will be the expected cumulative reward
\begin{equation}
Q(s,a) = R_t + R_{t+1} + R_{t+2}+ R_{t+3}+ R_{t+4}+ R_{t+5}
\end{equation}
Suppose, someone comes here, and says, He wants give more weight to sooner rewards than later rewards. What should we do ? We will introduce, discount factor, $\gamma$ , which is $0<\gamma<1$ ..
\begin{equation}
Q(s,a) = R_t + \gamma R_{t+1} + \gamma^2 R_{t+2}+ \gamma^3 R_{t+3}+ \gamma^4 R_{t+4}+ \gamma^5 R_{t+5}
\end{equation}
\begin{equation}
Q(s,a) = R_t + \gamma [R_{t+1} + \gamma R_{t+2}+ \gamma^2 R_{t+3}+ \gamma^3 R_{t+4}+ \gamma^4 R_{t+5}]
\end{equation}
This equation can be rewritten as
\begin{equation}
Q(s_t,a) = R_t+\gamma Q(s_{t+1},a_{t+1})
\end{equation}
Suppose, we have some finite discrete actions in our hand, and each resulting $Q$ values of its own, what we will do ? We will try to take the action of maximum $Q$ value!
\begin{equation}
Q(s_t,a) = R_t+\gamma max(Q(s_{t+1},a))
\end{equation}
### Coding!
Let's start coding!
I will be using ***Open Ai*** gym environment. The Introduction and Installtion of environments are given [here](https://github.com/openai/gym)
```
import gym
import numpy as np
```
Initialization of Environments
I will use the Mountaincar environment by Open AI gym. It is a classic problem invented from 90s. I intend to use this environment for all algorithms .

In this game, your task is to get the car reach that green flag. For every step you will get -1 .So , your job is to reach the goal position with minimum steps. Maximum steps limit is 200.
```
env = gym.make('MountainCar-v0')
s = env.reset() #Reset the car
```
```env.reset()``` gives the initial state. State is the position and velocity of the car in a given time
This game's actions can be 0,1,2 . 0 for left, 1 for doing nothing, 2 for right
```env.step(action)``` returns four arguments
- next state
- reward
- terminal , it means if game is over or not
- info , for now , it is unnecessary
Hyper Parameters
- ```legal_actions``` number of actions
- ```actions``` the actions list
- ```gamma``` discount factor $\gamma$
- ```lr``` learning rate $\alpha$
- ```num_episodes``` number of episodes
- ```epsilon``` epsilon , to choose random actions
- ```epsilon_decay``` epsilon decay rate
```
legal_actions=env.action_space.n
actions = [0,1,2]
gamma =0.99
lr =0.5
num_episodes =30000
epsilon =0.5
epsilon_decay =0.99
```
Codeblock to discretize the state. Because ***Q learning*** doesnt work on continuous state space, we have to convert states into 10 discrete states
```
N_BINS = [10,10]
MIN_VALUES = [0.6,0.07]
MAX_VALUES = [-1.2,-.07]
BINS = [np.linspace(MIN_VALUES[i], MAX_VALUES[i], N_BINS[i]) for i in range(len(N_BINS))]
rList =[]
def discretize(obs):
return tuple([int(np.digitize(obs[i], BINS[i])) for i in range(len(N_BINS))])
```
Q Learning CLass
```
class QL:
def __init__(self,Q,policy,
legal_actions,
actions,
gamma,
lr):
self.Q = Q #Q matrix
self.policy =policy
self.legal_actions=legal_actions
self.actions = actions
self.gamma =gamma
self.lr =lr
def q_value(self,s,a):
"""Gets the Q value for a certain state and action"""
if (s,a) in self.Q:
self.Q[(s,a)]
else:
self.Q[s,a]=0
return self.Q[s,a]
def action(self,s):
"""Gets the action for cetain state"""
if s in self.policy:
return self.policy[s]
else:
self.policy[s] = np.random.randint(0,self.legal_actions)
return self.policy[s]
def learn(self,s,a,s1,r,done):
"""Updates the Q matrix"""
if done== False:
self.Q[(s,a)] =self.q_value(s,a)+ self.lr*(r+self.gamma*max([self.q_value(s1,a1) for a1 in self.actions]) - self.q_value(s,a))
else:
self.Q[(s,a)] =self.q_value(s,a)+ self.lr*(r - self.q_value(s,a))
self.q_values = [self.q_value(s,a1) for a1 in self.actions]
self.policy[s] = self.actions[self.q_values.index(max(self.q_values))]
```
Q Matrix Parameters
- ```Q``` - Q table. We will use dictionary data structure.
- ```policy``` - policy table , it will give us the action for given state
```
Q = {}
policy ={}
legal_actions =3
QL = QL(Q,policy,legal_actions,actions,gamma,lr)
```
Training
### Psuedocode
- get initial state $s_{raw}$
- discretize initial state , $s \gets discretize(s_{raw})$
- set total reward to zero , $r_{total} \gets 0$
- set terminal $d$ to false , $d \gets False$
- for each step
- - choose action based on epsilon greedy policy
- - get next state $s1_{raw} $, reward , $r$, terminal $d$ doing the action
- - $s1 \gets discretize(s1_{raw}) $
- - $r_{total} \gets r_{total}+r$
- - if $d == True $
- - - if $r_{total}<-199$
- - - - then give $r \gets -100$
- - - - Update $Q$ table
- - - - break
- - else
- - - Update $Q$ table
- - - break
- - $s \gets s1$
```
for i in range(num_episodes):
s_raw= env.reset() #initialize
s = discretize(s_raw) #discretize the state
rAll =0 #total reward
d = False
j = 0
for j in range(200):
#epsilon greedy. to choose random actions initially when Q is all zeros
if np.random.random()< epsilon:
a = np.random.randint(0,legal_actions)
epsilon = epsilon*epsilon_decay
else:
a =QL.action(s)
s1_raw,r,d,_ = env.step(a)
rAll=rAll+r
s1 = discretize(s1_raw)
env.render()
if d:
if rAll<-199:
r =-100 #punishment, if the game finishes before reaching the goal , we can give punishment
QL.learn(s,a,s1,r,d)
print("Failed! Reward %d"%rAll)
elif rAll>-199:
print("Passed! Reward %d"%rAll)
break
QL.learn(s,a,s1,r,d)
if j==199:
print("Reward %d after full episode"%(rAll))
s = s1
env.close()
```
|
github_jupyter
|
```
import sys; sys.path.insert(0, '..')
import spot
spot.setup()
import buddy
from spot.jupyter import display_inline
from decimal import Decimal
import decimal
from fimdp.core import ConsMDP
from fimdp.energy_solvers import BasicES
from fimdp.labeled import LabeledConsMDP
from fimdp.objectives import BUCHI
```
# Product of lCMDP and DBA with the link to LTL
The goal of this notebook is, given an LTL formula over the set $AP$ of atomic proposition and a consumption MDP with states labeled by subsets of $AP$, decide if there is a strategy for the MDP such that the LTL formula is satisfied with probability 1. We ilustrate the whole concept of a running example in which we want to enforce visiting 2 states infinitely often.
Let's first create a CMDP, we will use the following function for easier definitions of actions using uniform distributions.
```
def uniform(dests):
"""Create a uniform distribution for given destinations.
dests: iterable of states
"""
count = len(dests)
mod = 100 % count
decimal.getcontext().prec = 2
prob = Decimal(1)/Decimal(count)
dist = {i: prob for i in dests}
last = dests[-1]
dist[last] = dist[last] + Decimal("0.01")*mod
return dist
```
In the following code, we verify that we can achieve the Büchi objective with targets set `{1,2}` with capacity `5` and that is not enough to visit the state `1`. What we actualy want is to visit **both** of these states infinitely often which we solve later.
```
mdp = ConsMDP()
mdp.new_states(4)
mdp.set_reload(3)
mdp.add_action(0, uniform([1,2]), "α", 3)
mdp.add_action(0, uniform([2,3]), "β", 1)
mdp.add_action(1, uniform([3]), "r", 3)
mdp.add_action(2, uniform([3]), "r", 1)
mdp.add_action(3, uniform([0]), "s", 3)
solver = BasicES(mdp, 5, [1,2])
solver.get_min_levels(BUCHI)
solver
```
The corresponding strategy confirms that the state 1 won't be visited by the strategy as there is no occurence of the action `α`.
```
solver.get_selector(4, True)
```
## LTL and Büchi automata
Our goal of visiting both states `1` \& `2` infinitely often can be expressed by the LTL formula $\mathsf{G}\mathsf{F} s_1 \land \mathsf{G}\mathsf{F}s_2$ (or in the box-diamond notation: $\Box \diamond s_1 \land \Box \diamond s_2$) where the atomic proposition $s_1$ corresponds to visiting state `1` and the tomic proposition $s_2$ corresponds to visiting state`2`.
This formula can be expressed by a **deterministic** üchi automaton (DBA). We use Spot to make the translation for us. The option `BA` forces Spot to deliver a state-based Büchi automaton (default is transition-based generalized Büchi automaton), the option `deterministic` indicates that we prefer deterministic automata, and `complete` asks for an automaton with complete transition function. If you are not sure that your formula can be translated to a DBA, consult [hierarchy of LTL](https://spot.lrde.epita.fr/hierarchy.html). It is also a good practice to make yourself sure by running
```python
aut.is_deterministic()
```
```
f = spot.formula("GF s1 & GF s2")
aut = spot.translate(f, "BA", "deterministic", "complete")
display(aut, aut.is_deterministic())
```
The produced automaton can be used in parallel with our input MDP; this is achieved by a _product_ (alternatively _parallel synchonous composition_) of this automaton an the input MDP. But we need to label states of the MDP with the atomic propositions `s₁` and `s₂`.
## Labeled CMDP
We create a copy of our CMDP and label the states `1` and `2` with the corresponding atomic propositions using the function
```python
LabeledConsMDP.state_labels(labels)
```
where `labels` is a list (of length equal to number of states) of sets of ints; the ints are indices to the list `AP` given in the constructor of `LabeledConsMDP`.
```
lmdp = LabeledConsMDP(AP=["s1","s2"], mdp=mdp)
lmdp.state_labels = [set(), {0}, {1}, set()]
display(lmdp, lmdp.state_labels)
```
## Product of labeled CMDP and DBA
In the following, we explain and show the (simplified) implementation of `LabeledConsMDP.product_with_dba(self, dba)`.
The states of the product are tuples `(ms,as)` where `ms` stands for a state of the MDP and `as` stands for a states of the automaton. Let's call the set of states of the MDP $S$ and the set of states of the DBA $Q$; further, the labeling function of the labeled MDP is $\lambda \colon S \to 2^{AP}$ and the transition function of the DBA as $\delta \colon Q \times 2^{AP} \to Q$. For each action `α` and each successor `ms'` for this action from state `ms`, the action `α` of `(ms,as)` has an `α` successor (with the same probability) `(ms', as')` where `as'` is equal to $\delta(as, \lambda(ms'))$.
All tuples that contain a reload state of the mdp, are again reloading. All tuples with an accepting state of the automaton will become targets. The following function `product(lmdp, aut)` returns a CMDP that is the product of `lmdp` and `aut` and a list of target states.
#### Treatment of atomic propositions
Labels (sets of atomic propositions) are represented by sets of integers in LabeledConsMDP, while they are represented by _binary decission diagrams (BDD)_ in Spot. One BDD can actually represent a set of labels as it in fact represents a boolean function over AP. In our algorithm, we need to evaluate $\delta(as, \lambda(ms'))$, which is, we need to find an edge in the automaton whose label (guard) is satisfied by the label of `ms'`. We do this in 2 steps:
1. Create a BDD representing exactly the desired label $\lambda(md')$. This is implemented in
```python
def get_bdd_for_label(label)
```
2. Perform logical and on this BDD and BDD of all outgoing edges of the current state of the automaton. For all but one edge this operation returns false. We choose the one that is not false (is, in fact, equal to $\lambda(ms')$.
```
def product(lmdp, aut):
#TODO check for correct type of mdp
result = ConsMDP()
num_ap = len(lmdp.AP)
# Check the type of automaton and convert it into
# complete DBA if needed
if not aut.is_sba() or not spot.is_complete(aut):
aut = aut.postprocess("BA", "complete")
# This will store the list of Büchi states
targets = []
# This will be our state dictionary
sdict = {}
# The list of output states for which we have not yet
# computed the successors. Items on this list are triplets
# of the form `(mdps, auts, p)` where `mdps` is the state
# number in the mdp, `auts` is the state number in the
# automaton, and p is the state number in the output mdp.
todo = []
# Mapping of AP representation in MDP to repr. in automaton
ap2bdd_var = {}
aut_ap = aut.ap()
for ap_i, ap in enumerate(lmdp.AP):
if ap in aut_ap:
ap2bdd_var[ap_i] = aut_ap.index(ap)
# Given label in mdp, return corresponding BDD
def get_bdd_for_label(mdp_label):
cond = buddy.bddtrue
for ap_i in ap2bdd_var.keys():
if ap_i in mdp_label:
cond &= buddy.bdd_ithvar(ap2bdd_var[ap_i])
else:
cond -= buddy.bdd_ithvar(ap2bdd_var[ap_i])
return cond
# Transform a pair of state numbers (mdps, auts) into a state
# number in the output mdp, creating a new state if needed.
# Whenever a new state is created, we can add it to todo.
def dst(mdps, auts):
pair = (mdps, auts)
p = sdict.get(pair)
if p is None:
p = result.new_state(name=f"{mdps},{auts}",
reload=lmdp.is_reload(mdps))
sdict[pair] = p
todo.append((mdps, auts, p))
if aut.state_is_accepting(auts):
targets.append(p)
return p
# Get a successor state in automaton based on label
def get_successor(aut_state, mdp_label):
for e in aut.out(aut_state):
mdp_bdd = get_bdd_for_label(mdp_label)
if mdp_bdd & e.cond != buddy.bddfalse:
return e.dst
# Initialization
# For each state of mdp add a new initial state
aut_i = aut.get_init_state_number()
for mdp_s in range(lmdp.num_states):
label = lmdp.state_labels[mdp_s]
aut_s = get_successor(aut_i, label)
dst(mdp_s, aut_s)
# Build all states and edges in the product
while todo:
msrc, asrc, osrc = todo.pop()
for a in lmdp.actions_for_state(msrc):
# build new distribution
odist = {}
for mdst, prob in a.distr.items():
adst = get_successor(asrc, lmdp.state_labels[mdst])
odst = dst(mdst, adst)
odist[odst] = prob
result.add_action(osrc, odist, a.label, a.cons)
return result, targets
p, T = product(lmdp, aut)
psolver = BasicES(p, 5, T)
psolver.get_min_levels(BUCHI, True)
display_inline(psolver)
```
We can now see the result of the product on the labeled MDP and the automaton for $\mathsf{G}\mathsf{F}s_1 \land \mathsf{G}\mathsf{F} s_2$. We can also see that capacity 5 is no longer sufficient for the Büchi objective (the green ∞ indicate that no initial load is sufficient from given state to satisfy the Büchi objectives with targets `T`). In fact, we need at least 9 units of energy to pass the path through mdp-state `1`.
```
psolver.cap = 9
psolver.get_min_levels(BUCHI, recompute=True)
display_inline(psolver)
```
In fact, the function `product` is implemented as a method of `LabeledConsMDP` class.
```
p, T = lmdp.product_with_dba(aut)
psolver = BasicES(p, 9, T)
psolver.get_min_levels(BUCHI, True)
display_inline(psolver)
```
|
github_jupyter
|
```
"""
The concept of the creation of the test data in order to evaluate the similarities, a synthethic data creation is introduced.
The idea is as follows:
1. Outlier Instances.
These are the users who deviate from the other in the data.
Identifying them would of interest to understand how they behave against most of the dataset
The outlier/dissimilar users are identified through the outlier_detection concept we have introduced in this work.
Identified outliers are [8,20,27,149] -- Of them [8,27,149] are more suspectful
4 - Outlier Instances.
2. Twin Instances.
Creating an instance with similar properties as in the data. This is a virtual data having same properties.
For the time series following thing needs to be done. A sample from a start_date to end_date to create such a data in range from 0 to 1.
1. A random set of user are first extracted. We extract 3 for example.
2. Then we create exact same features for a virtual user and change their identity such as user_id... (only static properties)
3. Virtual user would also require to have their time series recordings.
For this following steps are done:
1. For a randomly choosen start date between (first timestamp) to (end time)
3. Normal Instances.
The usual test instances.
"""
import pandas as pd
import numpy as np
import seaborn as sns
static_data_tchq = pd.read_csv("data/input_csv/3_q.csv")
static_data_hq = pd.read_csv("data/input_csv/4_q.csv")
tyt_data = pd.read_csv("data/input_csv/1_q.csv")
drop_user_ids = [54, 60, 140, 170, 4, 6, 7, 9, 12, 19, 25, 39, 53, 59, 128, 130, 144, 145, 148, 156, 166, 167]
valid_static_data_tchq = static_data_tchq[~static_data_tchq["user_id"].isin(drop_user_ids)]
valid_static_data_hq = static_data_hq[~static_data_hq["user_id"].isin(drop_user_ids)]
valid_tyt_data = tyt_data[~tyt_data["user_id"].isin(drop_user_ids)]
valid_static_data_tchq.head()
valid_static_data_tchq.info()
user_ids=valid_static_data_tchq["user_id"].to_numpy()
np.random.choice(user_ids, 3, replace=False)
valid_static_data_tchq[valid_static_data_tchq["user_id"].isin([8,20,27,149])]
valid_static_data_hq[valid_static_data_hq["user_id"].isin([8,20,27,149])]
valid_static_data_hq[valid_static_data_hq["user_id"].isin([8,20,27,149,])]
d1 = [str(date) for date in pd.date_range(start='10-01-2018', end='05-09-2019', periods=100)]
def generate_synthetic_tyt_data(user_id, start_date, end_date, sampling_data, sample_length=80):
created_at = [str(date) for date in pd.date_range(start=start_date, end=end_date, periods=sample_length)]
u_id_list = [user_id for _ in range(sample_length)]
q_id = [3 for _ in range(sample_length)]
columns=["s01","s02","s03","s04","s05","s06","s07", "s08"]
synthetic_data = sampling_data[columns].sample(n=sample_length)
synthetic_data["user_id"] = u_id_list
synthetic_data["questionnaire_id"] = q_id
synthetic_data["created_at"] = created_at
return synthetic_data[["user_id","questionnaire_id","created_at","s01","s02","s03","s04","s05","s06","s07","s08"]]
m1 = generate_synthetic_tyt_data("44428", "10-10-2018","05-09-2019",valid_tyt_data)
m2 = generate_synthetic_tyt_data("444154", "11-11-2018","05-07-2019",valid_tyt_data, sample_length=60)
m3 = generate_synthetic_tyt_data("444133", "11-11-2018","05-07-2019",valid_tyt_data, sample_length=60)
DT_tyt_data = m1.append([m2,m3])
DT_tyt_data.head()
valid_static_data_tchq.sample(n=3,random_state=42)
DT_static_data = valid_static_data_tchq.sample(n=3,random_state=42)
DT_static_data["user_id"] = DT_static_data["user_id"].apply(lambda x: int("".join("444" + str(x))))
DT_static_data["Unnamed: 0"] = DT_static_data["Unnamed: 0"].apply(lambda x: int("".join("444" + str(x))))
DT_static_data
DT_static_data
DT_static_data_hq = valid_static_data_hq.sample(n=3,random_state=42)
DT_static_data_hq["user_id"] = DT_static_data_hq["user_id"].apply(lambda x: int("".join("444" + str(x))))
DT_static_data_hq["Unnamed: 0"] = DT_static_data_hq["Unnamed: 0"].apply(lambda x: int("".join("444" + str(x))))
type(DT_static_data_hq["user_id"].iloc[0])
valid_static_data_tchq["user_id"].sample(n=3,random_state=0).to_list()
# Final Simulated Data for 3 users .
#1. Static Data - DT_tyt_data
#2. Static Data Hq - DT_static_data
#3. Time Series Data - DT_static_data_hq
len(DT_tyt_data)
len(DT_static_data)
len(DT_static_data_hq)
DT_static_data.head()
static_data_tchq["tschq04-2"].iloc[31]
DT_static_data_hq.head()
DT_tyt_data.head()
type(DT_static_data["tschq04-2"].iloc[0])
valid_static_data_tchq["tschq04-2"][11]
import ast
# Converting string to list
res = ast.literal_eval(valid_static_data_tchq["tschq04-2"][11])
res
list_to_str = "_".join([val for val in res + ["CHILDREN"]])
list_to_str
res = "[1,2,3]"
isinstance(res, str)
ast.literal_eval(res)
## Save all the simulated files as csv and pickle
DT_static_data.iloc[:,1:].to_csv("data/simulate/3_q_sim.csv")
DT_static_data_hq.iloc[:,1:].to_csv("data/simulate/4_q_sim.csv")
DT_tyt_data.to_csv("data/simulate/1_q_sim.csv")
DT_static_data.iloc[:,1:].to_pickle("data/simulate/3_q_sim.pckl")
DT_static_data_hq.iloc[:,1:].to_pickle("data/simulate/4_q_sim.pckl")
DT_tyt_data.to_pickle("data/simulate/1_q_sim.pckl")
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/michelmunoz99/daa_2021_1/blob/master/20enero.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
class NodoArbol:
def __init__(self, value, left=None, right=None):
self.data=value
self.left=left
self.right=right
class BinarySearchTree:
def __init__(self):
self.__root=None
def insert(self, value):
if self.__root==None:
self.__root= NodoArbol(value,None,None)
else:
# preguntar si value es menor que root, de ser el caso
# insertar a la izq, pero puede ser el caso de que el
# sub arbol izq ya tenga muchos elementos
self.insert_nodo(self.__root,value)
def insert_nodo(self,nodo,value):
if nodo.data==value:
pass
elif value<nodo.data: # True va a la izq
if nodo.left==None: # si hay espacio en la izq, ahi va
nodo.left=NodoArbol(value,None,None)#insertamos el nodo
else:
self.insert_nodo(nodo.left,value)# Buscar el sub arbol izq
else:
if nodo.right==None:
nodo.right=NodoArbol(value,None,None)
else:
self.insert_nodo(nodo.right,value)# Buscar en sub arbol der
def buscar(self, value):
if self.__root==None:
return None
else:
# Haremos busqueda recursiva
return self.__busca_nodo(self.__root,value)
def __busca_nodo(self,nodo,value):
if nodo ==None:
return None
elif nodo.data==value:
return nodo.data
elif value< nodo.data:
return self.__busca_nodo(nodo.left,value)
else:
return self.__busca_nodo(nodo.right,value)
def transversal(self,format="inorden"):
if format =="inorden":
self.__recorrido_in(self.__root)
elif format=="preorden":
self.__recorrido_pre(self.__root)
elif format =="posorden":
self.__recorrido_pos(self.__root)
else:
print("Formato de recorrido no válido")
def __recorrido_pre(self, nodo):
if nodo != None:
print(nodo.data, end=",")
self.__recorrido_pre(nodo.left)
self.__recorrido_pre(nodo.right)
def __recorrido_in(self, nodo):
if nodo != None:
self.__recorrido_in(nodo.left)
print(nodo.data, end=",")
self.__recorrido_in(nodo.right)
def __recorrido_pos(self, nodo):
if nodo!= None:
self.__recorrido_pos(nodo.left)
self.__recorrido_pos(nodo.right)
print(nodo.data, end=",")
bst=BinarySearchTree()
bst.insert(50)
bst.insert(30)
bst.insert(20)
res=bst.buscar(30)#true o false?
print("Dato:", str(res))
print(bst.buscar(40))
print("Recorrido:")
bst.transversal(format="preorden")
print("recorrido in orden:")
bst.transversal()
print("recorrido pos:")
bst.transversal(format="pos")
```
|
github_jupyter
|
# Marginalized Gaussian Mixture Model
Author: [Austin Rochford](http://austinrochford.com)
```
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import pymc3 as pm
import seaborn as sns
SEED = 383561
np.random.seed(SEED) # from random.org, for reproducibility
```
Gaussian mixtures are a flexible class of models for data that exhibits subpopulation heterogeneity. A toy example of such a data set is shown below.
```
N = 1000
W = np.array([0.35, 0.4, 0.25])
MU = np.array([0., 2., 5.])
SIGMA = np.array([0.5, 0.5, 1.])
component = np.random.choice(MU.size, size=N, p=W)
x = np.random.normal(MU[component], SIGMA[component], size=N)
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(x, bins=30, normed=True, lw=0);
```
A natural parameterization of the Gaussian mixture model is as the [latent variable model](https://en.wikipedia.org/wiki/Latent_variable_model)
$$
\begin{align*}
\mu_1, \ldots, \mu_K
& \sim N(0, \sigma^2) \\
\tau_1, \ldots, \tau_K
& \sim \textrm{Gamma}(a, b) \\
\boldsymbol{w}
& \sim \textrm{Dir}(\boldsymbol{\alpha}) \\
z\ |\ \boldsymbol{w}
& \sim \textrm{Cat}(\boldsymbol{w}) \\
x\ |\ z
& \sim N(\mu_z, \tau^{-1}_i).
\end{align*}
$$
An implementation of this parameterization in PyMC3 is available [here](gaussian_mixture_model.ipynb). A drawback of this parameterization is that is posterior relies on sampling the discrete latent variable $z$. This reliance can cause slow mixing and ineffective exploration of the tails of the distribution.
An alternative, equivalent parameterization that addresses these problems is to marginalize over $z$. The marginalized model is
$$
\begin{align*}
\mu_1, \ldots, \mu_K
& \sim N(0, \sigma^2) \\
\tau_1, \ldots, \tau_K
& \sim \textrm{Gamma}(a, b) \\
\boldsymbol{w}
& \sim \textrm{Dir}(\boldsymbol{\alpha}) \\
f(x\ |\ \boldsymbol{w})
& = \sum_{i = 1}^K w_i\ N(x\ |\ \mu_i, \tau^{-1}_i),
\end{align*}
$$
where
$$N(x\ |\ \mu, \sigma^2) = \frac{1}{\sqrt{2 \pi} \sigma} \exp\left(-\frac{1}{2 \sigma^2} (x - \mu)^2\right)$$
is the probability density function of the normal distribution.
Marginalizing $z$ out of the model generally leads to faster mixing and better exploration of the tails of the posterior distribution. Marginalization over discrete parameters is a common trick in the [Stan](http://mc-stan.org/) community, since Stan does not support sampling from discrete distributions. For further details on marginalization and several worked examples, see the [_Stan User's Guide and Reference Manual_](http://www.uvm.edu/~bbeckage/Teaching/DataAnalysis/Manuals/stan-reference-2.8.0.pdf).
PyMC3 supports marginalized Gaussian mixture models through its `NormalMixture` class. (It also supports marginalized general mixture models through its `Mixture` class.) Below we specify and fit a marginalized Gaussian mixture model to this data in PyMC3.
```
with pm.Model() as model:
w = pm.Dirichlet('w', np.ones_like(W))
mu = pm.Normal('mu', 0., 10., shape=W.size)
tau = pm.Gamma('tau', 1., 1., shape=W.size)
x_obs = pm.NormalMixture('x_obs', w, mu, tau=tau, observed=x)
with model:
trace = pm.sample(5000, n_init=10000, tune=1000, random_seed=SEED)[1000:]
```
We see in the following plot that the posterior distribution on the weights and the component means has captured the true value quite well.
```
pm.traceplot(trace, varnames=['w', 'mu']);
pm.plot_posterior(trace, varnames=['w', 'mu']);
```
We can also sample from the model's posterior predictive distribution, as follows.
```
with model:
ppc_trace = pm.sample_posterior_predictive(trace, 5000, random_seed=SEED)
```
We see that the posterior predictive samples have a distribution quite close to that of the observed data.
```
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(x, bins=30, normed=True,
histtype='step', lw=2,
label='Observed data');
ax.hist(ppc_trace['x_obs'], bins=30, normed=True,
histtype='step', lw=2,
label='Posterior predictive distribution');
ax.legend(loc=1);
```
|
github_jupyter
|
<img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200>
<br></br>
<br></br>
# Major Neural Network Architectures Challenge
## *Data Science Unit 4 Sprint 3 Challenge*
In this sprint challenge, you'll explore some of the cutting edge of Data Science. This week we studied several famous neural network architectures:
recurrent neural networks (RNNs), long short-term memory (LSTMs), convolutional neural networks (CNNs), and Autoencoders. In this sprint challenge, you will revisit these models. Remember, we are testing your knowledge of these architectures not your ability to fit a model with high accuracy.
__*Caution:*__ these approaches can be pretty heavy computationally. All problems were designed so that you should be able to achieve results within at most 5-10 minutes of runtime on SageMaker, Colab or a comparable environment. If something is running longer, doublecheck your approach!
## Challenge Objectives
*You should be able to:*
* <a href="#p1">Part 1</a>: Train a LSTM classification model
* <a href="#p2">Part 2</a>: Utilize a pre-trained CNN for objective detection
* <a href="#p3">Part 3</a>: Describe the components of an autoencoder
* <a href="#p4">Part 4</a>: Describe yourself as a Data Science and elucidate your vision of AI
<a id="p1"></a>
## Part 1 - RNNs
Use an RNN/LSTM to fit a multi-class classification model on reuters news articles to distinguish topics of articles. The data is already encoded properly for use in an RNN model.
Your Tasks:
- Use Keras to fit a predictive model, classifying news articles into topics.
- Report your overall score and accuracy
For reference, the [Keras IMDB sentiment classification example](https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py) will be useful, as well the RNN code we used in class.
__*Note:*__ Focus on getting a running model, not on maxing accuracy with extreme data size or epoch numbers. Only revisit and push accuracy if you get everything else done!
```
from tensorflow.keras.datasets import reuters
import numpy as np
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
(X_train, y_train), (X_test, y_test) = reuters.load_data(num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=723812,
start_char=1,
oov_char=2,
index_from=3)
# restore np.load for future normal usage
np.load = np_load_old
# Demo of encoding
word_index = reuters.get_word_index(path="reuters_word_index.json")
print(f"Iran is encoded as {word_index['iran']} in the data")
print(f"London is encoded as {word_index['london']} in the data")
print("Words are encoded as numbers in our dataset.")
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, LSTM
batch_size = 46
max_features = len(word_index.values())
maxlen = 200
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features+1, 64))
model.add(LSTM(64))
model.add(Dense(max_features, activation='softmax'))
# You should only run this cell once your model has been properly configured
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=1,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
```
## Sequence Data Question
#### *Describe the `pad_sequences` method used on the training dataset. What does it do? Why do you need it?*
pad_sequences truncates sequences longer than the maxlen variable, and will add padding to sequences shorter than maxlen.
pad_sequences makes all of the sequences the same length. This is necessary to ensure the batches will all work properly when processed by the RNN
## RNNs versus LSTMs
#### *What are the primary motivations behind using Long-ShortTerm Memory Cell unit over traditional Recurrent Neural Networks?*
Long-ShortTerm Memory Cell units are used to allow the RNN to learn patterns by referencing data that was presented to it in the past.
This allows it to detect inputs it has seen before and to give a similar activation to when it saw that data before.
## RNN / LSTM Use Cases
#### *Name and Describe 3 Use Cases of LSTMs or RNNs and why they are suited to that use case*
1. Language Generation: They are suited for this as they can remember patterns presented in the training data set and can generate useful sequences that actually make sense.
2. Speech Recognition: Can be used to detect patterns in input sound wave sequences to detremine the most likely word spoken.
3. Speech Sythesis: In a similar way to being able to recognize speech, an RNN could be used to generate speech based on what it was trained on.
<a id="p2"></a>
## Part 2- CNNs
### Find the Frog
Time to play "find the frog!" Use Keras and ResNet50 (pre-trained) to detect which of the following images contain frogs:
<img align="left" src="https://d3i6fh83elv35t.cloudfront.net/newshour/app/uploads/2017/03/GettyImages-654745934-1024x687.jpg" width=400>
```
!pip install google_images_download
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
arguments = {"keywords": "lilly frog pond", "limit": 5, "print_urls": True}
absolute_image_paths = response.download(arguments)
```
At time of writing at least a few do, but since the Internet changes - it is possible your 5 won't. You can easily verify yourself, and (once you have working code) increase the number of images you pull to be more sure of getting a frog. Your goal is to validly run ResNet50 on the input images - don't worry about tuning or improving the model.
*Hint* - ResNet 50 doesn't just return "frog". The three labels it has for frogs are: `bullfrog, tree frog, tailed frog`
*Stretch goals*
- Check for fish or other labels
- Create a matplotlib visualizations of the images and your prediction as the visualization label
```
# You've got something to do in this cell. ;)
import numpy as np
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
frogs = ['bullfrog', 'tree_frog', 'tailed_frog']
def process_img_path(img_path):
return image.load_img(img_path, target_size=(224, 224))
def img_contains_frog(img):
""" Scans image for Frogs
Should return a boolean (True/False) if a frog is in the image.
Inputs:
---------
img: Precrossed image ready for prediction. The `process_img_path` function should already be applied to the image.
Returns:
---------
frogs (boolean): TRUE or FALSE - There are frogs in the image.
"""
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
model = ResNet50(weights='imagenet')
features = model.predict(x)
results = decode_predictions(features, top=3)[0]
print(results)
for result in results:
if result[1] in frogs:
return 'frog'
return 'not frog'
return None
```
#### Stretch Goal: Displaying Predictions
```
import matplotlib.pyplot as plt
import os
path = './downloads/lilly frog pond/'
paths = os.listdir(path)
for file in paths:
img = process_img_path(path+file)
plt.imshow(img)
plt.show()
prediction = img_contains_frog(img)
print(prediction)
print()
```
<a id="p3"></a>
## Part 3 - Autoencoders
Describe a use case for an autoencoder given that an autoencoder tries to predict its own input.
__*Your Answer:*__
An autoencoder can be used for creating a format to use in the compression of files to the smallest possible size with minimal information loss. This is even more effective if all the files to be encoded are very similar in starting format.
<a id="p4"></a>
## Part 4 - More...
Answer the following questions, with a target audience of a fellow Data Scientist:
- What do you consider your strongest area, as a Data Scientist?
- What area of Data Science would you most like to learn more about, and why?
- Where do you think Data Science will be in 5 years?
- What are the threats posed by AI to our society?
- How do you think we can counteract those threats?
- Do you think achieving General Artifical Intelligence is ever possible?
A few sentences per answer is fine - only elaborate if time allows.
1. My strongest area as a Data Scientist would be ability to experiment and think conceptually about issues to come to a conclusion more efficiently. I am also fairly competent at deploying Flask APIs to Heroku.
2. I would like to learn more about how I would implement more APIs on different platforms such as AWS instead of Heroku.
3. I think data science will have evolved immensly. Honestly, it might be impossible to determine the state of data science since it could advance to such a degree in the areas video processing and modeling, including realistic water and physics models with reduced computational load. I also believe a lot of progress will be made in the areas of computer vision and AI robotics.
4. Threats posed to our society by AI technology is the idea of an advanced AI that can perform tasks we cannot counteract or access information that it is not intended to. This can lead to breaches of saftey or privacy which is not good.
5. We can counteract the threats posed to our society by devloping in limited environments such as a sort of sandbox environment limited in its scope to the outside world. Another common suggestion is to pass legislation to prevent development of advanced machine intelligence.
6. I do believe general artificial intelligence is possible eventually.
## Congratulations!
Thank you for your hard work, and congratulations! You've learned a lot, and you should proudly call yourself a Data Scientist.
```
from IPython.display import HTML
HTML("""<iframe src="https://giphy.com/embed/26xivLqkv86uJzqWk" width="480" height="270" frameBorder="0" class="giphy-embed" allowFullScreen></iframe><p><a href="https://giphy.com/gifs/mumm-champagne-saber-26xivLqkv86uJzqWk">via GIPHY</a></p>""")
```
|
github_jupyter
|
# The Data
to see where we got the data go here: https://www.ndbc.noaa.gov/station_history.php?station=42040
```
import pandas as pd
import numpy as np
import datetime
```
This is the first set of data from 1995
```
from utils import read_file, build_median_df
df1995 = read_file('data/42040/buoy_data_1995.txt') #allows you to a table for each year
df1995.head(6)#allows you to print certain sections of the data0
df1995d= df1995.set_index("timestamp").resample("D").mean()
df1995d.head(5)
df1996 = read_file('data/42040/buoy_data_1996.txt') #allows you to a table for each year
df1996.head(6)#allows you to print certain sections of the data0
df1997 = read_file('data/42040/buoy_data_1997.txt') #allows you to a table for each year
df1997.head(6)#allows you to print certain sections of the data0
df1998 = read_file('data/42040/buoy_data_1998.txt') #allows you to a table for each year
df1998.head(6)#allows you to print certain sections of the data0
df1999 = read_file('data/42040/buoy_data_1999.txt') #allows you to a table for each year
df1999.head(6)#allows you to print certain sections of the data0
df2000 = read_file('data/42040/buoy_data_2000.txt') #allows you to a table for each year
df2000.head(6)#allows you to print certain sections of the data0
df2001 = read_file('data/42040/buoy_data_2001.txt') #allows you to a table for each year
df2001.head(6)#allows you to print certain sections of the data0
df2002 = read_file('data/42040/buoy_data_2002.txt') #allows you to a table for each year
df2002.head(6)#allows you to print certain sections of the data0
df2003 = read_file('data/42040/buoy_data_2003.txt') #allows you to a table for each year
df2003.head(6)#allows you to print certain sections of the data0
df2004 = read_file('data/42040/buoy_data_2004.txt') #allows you to a table for each year
df2004.head(6)#allows you to print certain sections of the data0
df2005 = read_file('data/42040/buoy_data_2005.txt') #allows you to a table for each year
df2005.head(6)#allows you to print certain sections of the data0
df2006 = read_file('data/42040/buoy_data_2006.txt') #allows you to a table for each year
df2006.head(6)#allows you to print certain sections of the data0
#has incomplete data. 999 points are NaN
df2007 = read_file('data/42040/buoy_data_2007.txt') #allows you to a table for each year
df2007.head(6)#allows you to print certain sections of the data0
df2008 = read_file('data/42040/buoy_data_2008.txt') #allows you to a table for each year
df2008.head(6)#allows you to print certain sections of the data0
df2009 = read_file('data/42040/buoy_data_2009.txt') #allows you to a table for each year
df2009.head(6)#allows you to print certain sections of the data0
df2010 = read_file('data/42040/buoy_data_2010.txt') #allows you to a table for each year
df2010.head(6)#allows you to print certain sections of the data0
df2011 = read_file('data/42040/buoy_data_2011.txt') #allows you to a table for each year
df2011.head(6)#allows you to print certain sections of the data0
df2012 = read_file('data/42040/buoy_data_2010.txt') #allows you to a table for each year
df2012.head(6)#allows you to print certain sections of the data0
df2013 = read_file('data/42040/buoy_data_2013.txt') #allows you to a table for each year
df2013.head(6)#allows you to print certain sections of the data0
df2014 = read_file('data/42040/buoy_data_2014.txt') #allows you to a table for each year
df2014.head(6)#allows you to print certain sections of the data0
df2015 = read_file('data/42040/buoy_data_2015.txt') #allows you to a table for each year
df2015.head(6)#allows you to print certain sections of the data0
df2016 = read_file('data/42040/buoy_data_2016.txt') #allows you to a table for each year
df2016.head(6)#allows you to print certain sections of the data0
df2017 = read_file('data/42040/buoy_data_2017.txt') #allows you to a table for each year
df2017.head(6)#allows you to print certain sections of the data0
grouped2016=build_median_df(df2016, 'ATMP', 2016)
grouped1996=build_median_df(df1996, 'ATMP', 1996)
grouped2000=build_median_df(df2000, 'ATMP', 2000)
grouped2005=build_median_df(df2005, 'ATMP', 2005)
grouped2010=build_median_df(df2010, 'ATMP', 2010,
index=['03-Mar', '04-Apr', '05-May', '06-Jun', '07-Jul', '08-Aug', '09-Sep', '10-Oct', '11-Nov', '12-Dec'])
grouped=pd.concat([grouped1996, grouped2000, grouped2005, grouped2010, grouped2016], axis=1, sort=True)
grouped.plot(figsize=(15,10), kind='bar');
import matplotlib.pyplot as plt
import calendar
plt.title("Monthly median air temperature for buoy: LUKE OFFSHORE TEST PLATFORM - 63 NM South of Dauphin Island, AL");
plt.ylabel("Temperature, degrees Celsius");
plt.xticks(np.arange(12), calendar.month_name[1:13], rotation=20);
plt.savefig('42040-airtemp.pdf')
```
|
github_jupyter
|
Lambda School Data Science
*Unit 1, Sprint 2, Module 1*
---
_Lambda School Data Science_
# Join and Reshape datasets
Objectives
- concatenate data with pandas
- merge data with pandas
- understand tidy data formatting
- melt and pivot data with pandas
Links
- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)
- [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)
- Combine Data Sets: Standard Joins
- Tidy Data
- Reshaping Data
- Python Data Science Handbook
- [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append
- [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join
- [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping
- [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables
Reference
- Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)
- Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
## Download data
We’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)!
```
# we can use !wget to gt the file.
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
# use !tar code to unzip a .tar file.
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
# we can see directory the files are stored in.
%cd instacart_2017_05_01
# list out all the .csv files.
!ls -lh *.csv
```
# Join Datasets
## Goal: Reproduce this example
The first two orders for user id 1:
```
# we can use this code to displqy an image in our code.
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png'
example = Image(url=url, width=600)
display(example)
```
## Load data
Here's a list of all six CSV filenames
```
# list out all the .csv files.
!ls -lh *.csv
```
For each CSV
- Load it with pandas
- Look at the dataframe's shape
- Look at its head (first rows)
- `display(example)`
- Which columns does it have in common with the example we want to reproduce?
### aisles
```
# import pandas library to load the .csv files to data sets.
import pandas as pd
# label the data set and load with pd.read_csv().
aisles = pd.read_csv('aisles.csv')
# show the shape of the data frame.
print(aisles.shape)
# show the data set with headers.
aisles.head()
# we can always check the 'image' of the data set that we are trying to replicate.
display(example)
```
### departments
```
# label the data set and load with pd.read_csv().
departments = pd.read_csv('departments.csv')
# show the shape of the data frame.
print(departments.shape)
# show the data set with headers.
departments.head()
```
### order_products__prior
```
# label the data set and load with pd.read_csv().
order_products__prior = pd.read_csv('order_products__prior.csv')
# show the shape of the data frame.
print(order_products__prior.shape)
# show the data set with headers.
order_products__prior.head()
```
### order_products__train
```
# label the data set and load with pd.read_csv().
order_products__train = pd.read_csv('order_products__train.csv')
# show the shape of the data frame.
print(order_products__train.shape)
# show the data set with headers.
order_products__train.head()
# we can always check out how much memory we are using and have left to use.
!free -m
# we can always delete data sets if we wish.
## del order_products__train
```
### orders
```
# label the data set and load with pd.read_csv().
orders = pd.read_csv('orders.csv')
# show the shape of the data frame.
print(orders.shape)
# show the data set with headers.
orders.head()
```
### products
```
# label the data set and load with pd.read_csv().
products = pd.read_csv('products.csv')
# show the shape of the data frame.
print(products.shape)
# show the data set with headers.
products.head()
```
## Concatenate order_products__prior and order_products__train
```
# check 'order_products__prior' for NA'S.
order_products__prior.isna().sum()
# check 'order_products__train' for NA'S.
order_products__train.isna().sum()
# both these data sets havce the same column names so we will merge all columns of the data sets.
# label the new data set amd use pd.concat() to merge them.
order_products = pd.concat([order_products__prior, order_products__train])
# show the shape of the data set.
print(order_products.shape)
# show the data set with headers.
order_products.head()
# check 'order_products' for NA'S.
order_products.isna().sum()
# we can use assert to check certain things, len(op) is = to len(opp) + len(opt) so this will run with no error.
assert len(order_products) == len(order_products__prior) + len(order_products__train)
# we can see 1 == 0 so it will show an error.
assert 1 == 0
```
## Get a subset of orders — the first two orders for user id 1
From `orders` dataframe:
- user_id
- order_id
- order_number
- order_dow
- order_hour_of_day
```
# we can look at the first 2 orders with .head(2).
orders.head(2)
# we first set the columns we want to use that were listed above.
columns = ['order_id','user_id','order_number', 'order_dow','order_hour_of_day']
# we want to set a condition for the 'user_id' ==1 & only the first 2 orders so 'order_number' <=2.
condition = (orders.user_id == 1) & (orders.order_number <= 2)
# create the subset.
subset = orders.loc[condition, columns]
# show the subset data set.
subset
```
## Merge dataframes
Merge the subset from `orders` with columns from `order_products`
```
display(example)
# look at the headers data set and headers.
order_products.head(2)
# label the merged data set and use pd.merge with the 2 data sets and the columns of set we are adding to the subset.
merged = pd.merge(subset, order_products[['order_id','add_to_cart_order','product_id']], how='left', on='order_id')
# show the new merged data set.
merged
# we can check the shape of the 3 data sets we created with merging at once.
subset.shape, order_products.shape, merged.shape
```
Merge with columns from `products`
```
# show the dat set headers.
products.head(1)
# label the merged data set and use pd.merge with the 2 data sets and the columns of set we are adding to the subset.
final = pd.merge(merged, products[['product_id','product_name']], how='left', on='product_id')
# show the merged data set with headers.
final
display(example)
```
# Reshape Datasets
## Why reshape data?
#### Some libraries prefer data in different formats
For example, the Seaborn data visualization library prefers data in "Tidy" format often (but not always).
> "[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.html#organizing-datasets) This format ia alternately called “long-form” or “tidy” data and is described in detail by Hadley Wickham. The rules can be simply stated:
> - Each variable is a column
- Each observation is a row
> A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot."
#### Data science is often about putting square pegs in round holes
Here's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): “Invent a way to put a square peg in a round hole.” It's a good metaphor for data wrangling!
## Hadley Wickham's Examples
From his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html)
```
# import all the libraries we are using.
%matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
# create our own data set.
table1 = pd.DataFrame(
[[np.nan, 2],
[16, 11],
[3, 1]],
index=['John Smith', 'Jane Doe', 'Mary Johnson'],
columns=['treatmenta', 'treatmentb'])
# table2 is the transverse of table1.
table2 = table1.T
```
"Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild.
The table has two columns and three rows, and both rows and columns are labelled."
```
# show the data set table 1.
table1
```
"There are many ways to structure the same underlying data.
Table 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different."
```
# show the data set table2.
table2
```
"Table 3 reorganises Table 1 to make the values, variables and obserations more clear.
Table 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable."
| name | trt | result |
|--------------|-----|--------|
| John Smith | a | - |
| Jane Doe | a | 16 |
| Mary Johnson | a | 3 |
| John Smith | b | 2 |
| Jane Doe | b | 11 |
| Mary Johnson | b | 1 |
## Table 1 --> Tidy
We can use the pandas `melt` function to reshape Table 1 into Tidy format.
```
# show data set table1.
table1
# show the columns of table1.
table1.columns
# we can reset the table1 to the original with .reset_index().
table1 = table1.reset_index()
# show the data set table1 with reset.
table1
# label the data set and use .met(id_vars='index') to met into 'tidy' format.
tidy1 = table1.melt(id_vars='index')
# show the tidy data set.
tidy1
# we can rename the columns in the tidy data set.
tidy1 = tidy1.rename(columns={'index': 'name', 'variable': 'trt', 'value': 'result'})
# we can remove the 'treatment' text from the 'trt' column.
tidy1['trt'] = tidy1.trt.str.replace('treatment','')
# show the tidy data set with new names and values.
tidy1
```
## Table 2 --> Tidy
```
table2 = table2.reset_index()
table2
# show the columns of data set table2.
table2.columns
# label the data set and use .met(id_vars='') to met into 'tidy' format.
tidy2 = table2.melt(id_vars='index')
# show the tidy data set.
tidy2
# we can rename the columns in the tidy data set.
tidy2 = tidy2.rename(columns={'index': 'trt', 'variable': 'name', 'value': 'result'})
# we can remove the 'treatment' text from the 'trt' column.
tidy2['trt'] = tidy2.trt.str.replace('treatment','')
# show the tidy data set with new names and values.
tidy2
```
## Tidy --> Table 1
The `pivot_table` function is the inverse of `melt`.
```
# label the data set and use.pivot_table().
wide = tidy1.pivot_table(values='result', index='name', columns='trt')
# show the table1 data set.
wide
table1
```
## Tidy --> Table 2
```
# label the data set and use.pivot_table().
wide2 = tidy2.pivot_table(values='result', index='name', columns='name')
# show the table2 data set.
wide2
table2
```
# Seaborn example
The rules can be simply stated:
- Each variable is a column
- Each observation is a row
A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot."
```
# using seaborn we can create bar plots for our tidy data set.
sns.catplot(x='trt', y='result', col='name', kind='bar', data=tidy1, height=2);
```
## Now with Instacart data
```
# import the pandas libary to load the data set.
import pandas as pd
# label and load the 'products' csv file.
products = pd.read_csv('products.csv')
# we can use pd.concat() and merge the 'prior' and 'train' data sets here.
order_products = pd.concat([pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
# lavel and load the 'orders' csv file.
orders = pd.read_csv('orders.csv')
```
## Goal: Reproduce part of this example
Instead of a plot with 50 products, we'll just do two — the first products from each list
- Half And Half Ultra Pasteurized
- Half Baked Frozen Yogurt
```
# we can display an image in our code.
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png'
example = Image(url=url, width=600)
display(example)
```
So, given a `product_name` we need to calculate its `order_hour_of_day` pattern.
## Subset and Merge
One challenge of performing a merge on this data is that the `products` and `orders` datasets do not have any common columns that we can merge on. Due to this we will have to use the `order_products` dataset to provide the columns that we will use to perform the merge.
```
# label a data set for the values we want to use.
product_names = ['Half And Half Ultra Pasteurized', 'Half Baked Frozen Yogurt']
# show the columns in the 'products' data set.
products.columns.to_list()
# show the columns in the 'order_products' data set.
order_products.columns.to_list()
# show the columns in the 'orders' data set.
orders.columns.to_list()
# label the merged data and we can use .merge to merge the columns we want from the 3 data sets, we must have a common column in the data we are merging.
merged = (products[['product_id','product_name']]
.merge(order_products[['product_id','order_id']])
.merge(orders[['order_id','order_hour_of_day']]))
# show the shape of the data set.
print(merged.shape)
# show the data set with headers.
merged.head()
# we are looking for specific products that we label as 'product_names' above.
# we can write a condition that will show if the product name is in 'product_names' is 1 or 0, true or false.
condition = ((merged['product_name'] == product_names[0]) |
(merged['product_name'] == product_names[1]))
# Other approach that works
condition = merged['product_name'].isin(product_names)
condition
# label the subset and use merged[] to implement the condition into the 'merged' data set we created.
subset = merged[condition]
# show the data set shape.
print(subset.shape)
# show the data set and headers.
subset.head()
```
## 4 ways to reshape and plot
### 1. value_counts
```
# we can use groupby() to plot our 'product_name' column vs 'order_hour_of_day', we use unstack to seperate the values.
subset.groupby('order_hour_of_day').product_name.value_counts().unstack().plot();
```
### 2. crosstab
```
# we can use crosstab() to plot our 'product_name' column vs 'order_hour_of_day', we use normalize='columns * 100'.
(pd.crosstab(subset['order_hour_of_day'], subset['product_name'], normalize='columns')*100).plot();
```
### 3. Pivot Table
```
# we can use.pivot to plot our 'product_name' column vs 'order_hour_of_day', we use set the value 'order_id'.
subset.pivot_table(index='order_hour_of_day', columns='product_name', values='order_id', aggfunc=len).plot();
```
### 4. melt
```
# imnport seaborn to create a plot.
import seaborn as sns
table = pd.crosstab(subset['order_hour_of_day'], subset['product_name'], normalize='columns')
melted = table.reset_index().melt(id_vars='order_hour_of_day')
sns.relplot(x='order_hour_of_day', y='value', hue='product_name', data=melted, kind='line');
```
|
github_jupyter
|
## Model training
In this notebook we'll first define a baseline model and then train a couple of ML models to try to better that performance.
The dataset is quite small and the features are few, so I'm going to keep it simple in terms of algotrithms. We'll see how a logistic regression model and a random forest model compare against each other and the baseline model-
```
# Import packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import auc
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import RocCurveDisplay
from sklearn.metrics import roc_curve
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
sns.set()
default_color='steelblue'
features = [
'is_white',
'rating',
'rating_diff',
'is_3min',
'did_win_last',
'avg_points_last_5'
]
data_path = '/home/jovyan/work/data/'
train_data_filename = 'train_data.csv'
test_data_filename = 'test_data.csv'
model_output_path = '/home/jovyan/work/model/'
model_filename = 'chess_prediction_model.sav'
```
### Define functions
```
def avg_points_prev_rows(df, num_games):
"""Return the average of column 'won_points' over a set number of rows.
Need data to be sorted ascending by time to return a chonological history
"""
avg_points = (
df.rolling(num_games, min_periods=1, closed='both')
['won_points'].sum()
.sub(df['won_points']) # Don't inlude this record's result
.div(num_games))
return avg_points
def get_X_and_y(df, features):
"""Return a feature dataframe (X) and a target dataframe (y) from downloaded data
"""
# Sort data by time to get chronological features right
df['end_ts'] = pd.to_datetime(df['end_date_local'].astype('str') + " " + \
df['end_time_local'].astype('str'))
df = df.sort_values('end_ts', ascending=True)
df['rating_diff'] = df['rating'].sub(df['opp_rating'])
df['is_3min'] = df['time_control'].str.startswith('180').astype('int')
df['did_win_last'] = (avg_points_prev_rows(df, 1) == 1).astype('int')
df['avg_points_last_5'] = avg_points_prev_rows(df, 5)
X = df[features]
y = df['is_loss']
return (X, y)
def print_prob_dist(clf, X):
""" Plot KDE curves for predicted probabilities of actual 0's and 1's
"""
probs = clf.predict_proba(X_train)[:, 1]
plot_df = pd.DataFrame({'y': y_train,'p': probs})
sns.kdeplot(data=plot_df[plot_df['y'] == 0], x='p', common_norm=False, label='Is Not Loss')
sns.kdeplot(data=plot_df[plot_df['y'] == 1], x='p', common_norm=False, label='Is Loss')
plt.title(f"Prediction probabilities by actual label\n {clf.estimator.named_steps['clf']}")
plt.xlabel("Probability of losing game")
plt.legend()
plt.xlim(0, 1)
plt.show()
```
### Load data
```
# Read training data into a pandas DataFrame : df
df_train = pd.read_csv(data_path + train_data_filename)
df_test = pd.read_csv(data_path + test_data_filename)
X_train, y_train = get_X_and_y(df_train, features)
X_test, y_test = get_X_and_y(df_test, features)
# Display sample rows from feature dataframe X_train
display(X_train.head(3))
```
### Target balance
```
print("Share of losses in the training data:",
np.round(y_train.mean(), 2))
```
So, 52% accuracy is the best we could do without knowing or learning anything at all and just always predicting 0.
### Baseline model
In this first step we'll create a simple model to benchmark later models against. Let's use the seemingly strongest feature that measures the difference of ratings (rating_diff). We
ll always predict loss when an underdog ratingwise, and never predict a loss else.
```
baseline_preds = np.where(X_train['rating_diff'] < 0, 1, 0)
baseline_accuracy = accuracy_score(baseline_preds, y_train)
print("Baseline accuracy:", np.round(baseline_accuracy, 2))
```
### Logistic regression
Let's first train a logistic regression classifier, and use sklearn's GridSearchCV to tune the parameters.
```
param_grid = {'clf__penalty': ['l1', 'l2'],
'clf__C' : np.logspace(-4, 4, 50)}
pipe = Pipeline([
('scaler', StandardScaler()),
('clf', LogisticRegression(solver='liblinear',
class_weight='balanced',
max_iter=1_000,
random_state=1))])
lr_clf = GridSearchCV(pipe,
param_grid=param_grid,
cv=5,
return_train_score=True,
verbose=True,
n_jobs=-1,
scoring='accuracy')
# Fit on data
lr_clf = lr_clf.fit(X_train, y_train)
# Print the best model parameters and best score
print("Chosen parameters:", lr_clf.best_params_)
print("Best mean cross validation score",
np.round(lr_clf.best_score_, 2))
```
The cross validated accuracy is better than the beseline model's, but only slightly so.
```
# Plot kernel densities for predicted probabilities by actual outcome
print_prob_dist(lr_clf, X_train)
```
The plot above graphically shows the substantial overlap between the distributions meaning that when using a cutoff value of 0.5 there will be quite a lot of misclassification for both labels.
```
sns.barplot(x=lr_clf.best_estimator_.named_steps['clf'].coef_[0],
y=lr_clf.best_estimator_.feature_names_in_,
color=default_color)
plt.title('Logistic regression classifier feature weights')
plt.show()
```
The traning pipeline standardized the features so the coefficients can be interpreted as a form of feature impact score in the fitted model. The feature rating_diff stands out. The lower the rating_diff goes (and it's negative when the opponent has the higher rating) the higher the probability of losing.
## Random Forest
Next we'll see if a random forest classifier can beat logistic regression for this task. The non-linearity of the algorithm could potentially pick up different patters both between features and target, and between different features.
```
param_grid = {
'clf__n_estimators': [500, 1_000, 1_500],
'clf__max_features': [2, 'auto', 'sqrt', 'log2'],
'clf__max_depth' : [2, 3, 4, 5],
'clf__criterion' : ['gini', 'entropy']
}
pipe = Pipeline([
('scaler', StandardScaler()),
('clf', RandomForestClassifier(class_weight='balanced',
random_state=1))])
rf_clf = GridSearchCV(pipe,
param_grid=param_grid,
cv=5,
verbose=True,
n_jobs=-1,
scoring='accuracy')
# Fit on data
rf_clf = rf_clf.fit(X_train, y_train)
# Print the best model parameters and best score
print("Chosen parameters:", rf_clf.best_params_)
print("Best mean cross validation score",
np.round(rf_clf.best_score_, 2))
```
The cross validatet accurracy is a little worse for the random forest model than the logistic regression model, but it's very even.
```
# Visualise the probability
print_prob_dist(rf_clf, X_train)
```
The random forest model can classify a good chunk of the records correctly as clear wins and losses. But there's also many records in the middle where the model doesn't do that great in telling the classes apart.
### Conclusion
Neither model really improves much on the baseline model which only predicts from who's got the higher rating. Whether the extra couple of percentages of correct classifications would warrant the added complexity of using a ML-model rather than a 'business rule' would be up for debate depending on the use case.
The logistic regression model performed slightly better than the random forrest model, so I'll go with that one. Although the improvment compared to the baseline is underwhelming remeber that we kep everythin as simple as possible in this first iteration. Another round of feature engineering could well improve the results. The general conclusion will likely still hold though - that there is much noise in the signal and that it's unlikely one could get near perfect accuracy only based on the game history.
Now it's time to see how the baseline model and logistic regression classifier performs on the test data and take a look at the confusion matrix to get a feel for how the trained model predicting.
##### Performance on test data
```
baseline_test_preds = np.where(X_test['rating_diff'] < 0, 1, 0)
baseline_test_accuracy = accuracy_score(baseline_test_preds, y_test)
print("Baseline accuracy on test data:",
np.round(baseline_test_accuracy, 2))
print("Logistic regression model accuracy on test data:",
np.round(lr_clf.score(X_test, y_test), 2))
# Plot confusion matrix
sns.set_style("whitegrid", {'axes.grid' : False})
preds = lr_clf.predict(X_test)
cm = confusion_matrix(y_test, preds, labels=lr_clf.classes_)
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=lr_clf.classes_)
disp.plot()
plt.show()
```
#### Save model
```
pickle.dump(lr_clf, open(model_output_path + model_filename, 'wb'))
```
|
github_jupyter
|
```
library(magrittr)
treatment_df = readr::read_tsv('../summary/indications.tsv') %>%
dplyr::filter(rel_type == 'TREATS_CtD') %>%
dplyr::select(compound_id, disease_id) %>%
dplyr::mutate(status = 1)
degree_prior_df = readr::read_tsv('data/degree-prior.tsv') %>%
dplyr::mutate(Empiric = n_treatments / n_possible, Permuted = prior_perm) %>%
dplyr::mutate(logit_prior_perm = boot::logit(prior_perm)) %>%
dplyr::mutate(prior_theoretic = compound_treats * disease_treats) %>%
dplyr::mutate(prior_theoretic = prior_theoretic / sum(prior_theoretic)) %>%
dplyr::mutate(logit_prior_theoretic = boot::logit(prior_theoretic))
degree_prior_df %>% head(2)
width = 3
height = 3
options(repr.plot.width=width, repr.plot.height=height)
gg_scatter = degree_prior_df %>%
ggplot2::ggplot(ggplot2::aes(logit_prior_theoretic, logit_prior_perm)) +
ggplot2::geom_point(alpha = 0.1, shape = 16) +
ggplot2::theme_bw() +
ggplot2::coord_equal() +
ggplot2::xlab('Logit of the Theoretic Prob') +
ggplot2::ylab('Logit of the Permuted Prob')
gg_scatter
ggplot2::ggsave('viz/scatter-theoretic-v-perm.png', gg_scatter, dpi = 300, width = width, height = height)
#lm(logit_prior_perm ~ logit_prior_theoretic, data = degree_prior_df) %>% summary
plot_tiles <- function(df, scale_name='') {
gg = ggplot2::ggplot(df, ggplot2::aes(x = disease_treats, y = compound_treats, fill = prior)) +
ggplot2::geom_tile() +
viridis::scale_fill_viridis(scale_name) +
ggplot2::theme_bw() +
ggplot2::coord_equal() +
ggplot2::theme(
legend.position='top',
legend.key.width=grid::unit(0.85, 'inches'),
legend.key.size=grid::unit(0.12, 'inches')
) +
ggplot2::xlab('Disease Degree') +
ggplot2::ylab('Compound Degree') +
ggplot2::scale_x_continuous(expand=c(0, 1)) +
ggplot2::scale_y_continuous(expand=c(0, 1)) +
ggplot2::theme(plot.margin = grid::unit(c(2,2,2,2), 'points'))
return(gg)
}
width = 6
height = 4.5
options(repr.plot.width=width, repr.plot.height=height)
gg = degree_prior_df %>%
tidyr::gather(kind, prior, Permuted, Empiric) %>%
plot_tiles('Prob') +
ggplot2::facet_grid(kind ~ ., 'Probability') +
ggplot2::theme(strip.background=ggplot2::element_rect(fill='#FEF2E2'))
ggplot2::ggsave('viz/prob-tiled-empiric-v-perm.png', gg, dpi = 300, width = width, height = height)
gg = degree_prior_df %>%
tidyr::gather(kind, prior, Permuted, Empiric) %>%
dplyr::mutate(prior = log(0.01 + prior)) %>%
plot_tiles('log(0.01 + Prob)') +
ggplot2::facet_grid(kind ~ .) +
ggplot2::theme(strip.background=ggplot2::element_rect(fill='#FEF2E2'))
ggplot2::ggsave('viz/log-prob-tiled-empiric-v-perm.png', gg, dpi = 300, width = width, height = height)
# obs_prior_df = obs_prior_df %>%
# dplyr::left_join(treatment_df)
width = 6
height = 2.75
options(repr.plot.width=width, repr.plot.height=height)
gg_tile_logit = degree_prior_df %>%
dplyr::mutate(prior = boot::logit(prior_perm)) %>%
plot_tiles('logit(Prob)')
ggplot2::ggsave('viz/logit-perm-prior-tiled.png', gg_tile_logit, dpi = 300, width = width, height = height)
plot(gg_tile_logit)
width = 6
height = 2.75
options(repr.plot.width=width, repr.plot.height=height)
gg_tile_prob = degree_prior_df %>%
dplyr::mutate(prior = prior_perm) %>%
plot_tiles('Prob')
ggplot2::ggsave('viz/perm-prior-tiled.png', gg_tile_prob, dpi = 300, width = width, height = height)
plot(gg_tile_prob)
obs_prior_df = readr::read_tsv('data/observation-prior.tsv')
width = 6
height = 2.5
gg_hist = obs_prior_df %>%
#dplyr::mutate(prior = prior_perm) %>%
dplyr::mutate(logit_prior_perm = boot::logit(prior_perm)) %>%
tidyr::gather(kind, prior, prior_perm, logit_prior_perm) %>%
ggplot2::ggplot(ggplot2::aes(x = prior)) +
ggplot2::geom_histogram(bins=100) +
ggplot2::facet_wrap( ~ kind, scales='free') +
ggplot2::theme_bw() +
ggplot2::theme(strip.background=ggplot2::element_rect(fill='#FEF2E2')) +
ggplot2::theme(plot.margin=grid::unit(c(2, 2, 2, 2), 'points')) +
ggplot2::xlab('Compound–Disease Prior') +
ggplot2::ylab('Count')
ggplot2::ggsave('viz/prob-histograms.png', gg_hist, dpi = 300, width = width, height = height)
# gg = gridExtra::arrangeGrob(gg_hist, gg_tile_prob, gg_tile_logit, ncol=1)
# ggplot2::ggsave('viz/combined.png', gg, dpi = 300, width = width, height = 8, device = cairo_pdf)
```
|
github_jupyter
|
```
import pandas as pd
train_data_file = 'data/zhengqi_train.txt'
test_data_file = 'data/zhengqi_test.txt'
train_data = pd.read_csv(train_data_file, sep = '\t', encoding = 'utf-8')
test_data = pd.read_csv(test_data_file, sep = '\t', encoding = 'utf_8')
```
## 定义特征构造方法
```
eps = 1e-5
# 交叉特征方式
func_dict = {
'add':lambda x, y: x + y,
'multi':lambda x, y: x * y,
'div':lambda x, y: x / (y + eps),
}
# 特征构造方法
def auto_features(train_data, test_data, func_dict, col_list):
train_data, test_data = train_data.copy(), test_data.copy()
for col_i in col_list:
for col_j in col_list:
for func_name, func in func_dict.items():
for data in [train_data, test_data]:
func_features = func(data[col_i], data[col_j])
col_func_features = '-'.join([col_i, func_name, col_j])
data[col_func_features] = func_features
return train_data, test_data
```
## 构造特征并降维
```
# 构造特征
train_data2, test_data2 = auto_features(train_data,test_data,func_dict,col_list=test_data.columns)
# PCA降维
from sklearn.decomposition import PCA
pca = PCA(n_components = 500)
train_data2_pca = pca.fit_transform(train_data2.iloc[:,0:-1])
test_data2_pca = pca.transform(test_data2)
train_data2_pca = pd.DataFrame(train_data2_pca)
test_data2_pca = pd.DataFrame(test_data2_pca)
train_data2_pca['target'] = train_data2['target']
# 训练准备
X_train2 = train_data2[test_data2.columns].values
y_train = train_data2['target']
```
## LGB模型训练
```
# ls_validation i
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
import numpy as np
# 5折交叉验证
Folds = 5
kf = KFold(n_splits = Folds, random_state = 0, shuffle = True)
# 记录训练和预测MSE
MSE_DICT = {
'train_mse':[],
'test_mse':[]
}
# 线下训练预测
for i, (train_index, test_index) in enumerate(kf.split(X_train2)):
# lgb树模型
lgb_reg = lgb.LGBMRegressor(
boosting_type = 'gbdt',
objective = 'regression',
metric = 'mse',
train_metric = True,
n_estimators = 3000,
early_stopping_rounds = 100,
n_jobs = -1,
learning_rate = 0.01,
max_depth = 4,
feature_fraction = 0.8,
feature_fraction_seed = 0,
bagging_fraction = 0.8,
bagging_freq = 2,
bagging_seed = 0,
lambda_l1 = 1,
lambda_l2 = 1,
verbosity = 1
)
# 切分训练集和预测集
X_train_KFold, X_test_KFold = X_train2[train_index], X_train2[test_index]
y_train_KFold, y_test_KFold = y_train[train_index], y_train[test_index]
# 训练模型
lgb_reg.fit(
X=X_train_KFold,y=y_train_KFold,
eval_set=[(X_train_KFold, y_train_KFold),(X_test_KFold, y_test_KFold)],
eval_names=['Train','Test'],
early_stopping_rounds=100,
eval_metric='MSE',
verbose=600
)
# 训练集预测 测试集预测
y_train_KFold_predict = lgb_reg.predict(X_train_KFold,num_iteration=lgb_reg.best_iteration_)
y_test_KFold_predict = lgb_reg.predict(X_test_KFold,num_iteration=lgb_reg.best_iteration_)
print('第{}折 训练和预测 训练MSE 预测MSE'.format(i))
train_mse = mean_squared_error(y_train_KFold_predict, y_train_KFold)
print('------\t', '训练MSE\t', train_mse, '\t------')
test_mse = mean_squared_error(y_test_KFold_predict, y_test_KFold)
print('------\t', '预测MSE\t', test_mse, '\t------\n')
MSE_DICT['train_mse'].append(train_mse)
MSE_DICT['test_mse'].append(test_mse)
print('------\t', '训练平均MSE\t', np.mean(MSE_DICT['train_mse']), '\t------')
print('------\t', '预测平均MSE\t', np.mean(MSE_DICT['test_mse']), '\t------')
```
|
github_jupyter
|
# 基于注意力的神经机器翻译
此笔记本训练一个将波斯语翻译为英语的序列到序列(sequence to sequence,简写为 seq2seq)模型。此例子难度较高,需要对序列到序列模型的知识有一定了解。
训练完此笔记本中的模型后,你将能够输入一个波斯语句子,例如 *"من می دانم."*,并返回其英语翻译 *"I know."*
对于一个简单的例子来说,翻译质量令人满意。但是更有趣的可能是生成的注意力图:它显示在翻译过程中,输入句子的哪些部分受到了模型的注意。
<img src="https://tensorflow.google.cn/images/spanish-english.png" alt="spanish-english attention plot">
请注意:运行这个例子用一个 P100 GPU 需要花大约 10 分钟。
```
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import io
import time
```
## 下载和准备数据集
我们将使用 http://www.manythings.org/anki/ 提供的一个语言数据集。这个数据集包含如下格式的语言翻译对:
```
May I borrow this book? ¿Puedo tomar prestado este libro?
```
这个数据集中有很多种语言可供选择。我们将使用英语 - 波斯语数据集。为方便使用,我们在谷歌云上提供了此数据集的一份副本。但是你也可以自己下载副本。下载完数据集后,我们将采取下列步骤准备数据:
1. 给每个句子添加一个 *开始* 和一个 *结束* 标记(token)。
2. 删除特殊字符以清理句子。
3. 创建一个单词索引和一个反向单词索引(即一个从单词映射至 id 的词典和一个从 id 映射至单词的词典)。
4. 将每个句子填充(pad)到最大长度。
```
'''
# 下载文件
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
'''
path_to_file = "./lan/pes.txt"
# 将 unicode 文件转换为 ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# 在单词与跟在其后的标点符号之间插入一个空格
# 例如: "he is a boy." => "he is a boy ."
# 参考:https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# 除了 (a-z, A-Z, ".", "?", "!", ","),将所有字符替换为空格
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# 给句子加上开始和结束标记
# 以便模型知道何时开始和结束预测
w = '<start> ' + w + ' <end>'
return w
en_sentence = u"May I borrow this book?"
sp_sentence = u"¿Puedo tomar prestado este libro?"
print(preprocess_sentence(en_sentence))
print(preprocess_sentence(sp_sentence).encode('utf-8'))
# 1. 去除重音符号
# 2. 清理句子
# 3. 返回这样格式的单词对:[ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return zip(*word_pairs)
en, sp = create_dataset(path_to_file, None)
print(en[-1])
print(sp[-1])
def max_length(tensor):
return max(len(t) for t in tensor)
def tokenize(lang):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(
filters='')
lang_tokenizer.fit_on_texts(lang)
tensor = lang_tokenizer.texts_to_sequences(lang)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,
padding='post')
return tensor, lang_tokenizer
def load_dataset(path, num_examples=None):
# 创建清理过的输入输出对
targ_lang, inp_lang = create_dataset(path, num_examples)
input_tensor, inp_lang_tokenizer = tokenize(inp_lang)
target_tensor, targ_lang_tokenizer = tokenize(targ_lang)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
```
### 限制数据集的大小以加快实验速度(可选)
在超过 10 万个句子的完整数据集上训练需要很长时间。为了更快地训练,我们可以将数据集的大小限制为 3 万个句子(当然,翻译质量也会随着数据的减少而降低):
```
# 尝试实验不同大小的数据集
num_examples = 30000
input_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples)
# 计算目标张量的最大长度 (max_length)
max_length_targ, max_length_inp = max_length(target_tensor), max_length(input_tensor)
# 采用 80 - 20 的比例切分训练集和验证集
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
# 显示长度
print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val))
def convert(lang, tensor):
for t in tensor:
if t!=0:
print ("%d ----> %s" % (t, lang.index_word[t]))
print ("Input Language; index to word mapping")
convert(inp_lang, input_tensor_train[0])
print ()
print ("Target Language; index to word mapping")
convert(targ_lang, target_tensor_train[0])
```
### 创建一个 tf.data 数据集
```
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
steps_per_epoch = len(input_tensor_train)//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word_index)+1
vocab_tar_size = len(targ_lang.word_index)+1
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
example_input_batch, example_target_batch = next(iter(dataset))
example_input_batch.shape, example_target_batch.shape
```
## 编写编码器 (encoder) 和解码器 (decoder) 模型
实现一个基于注意力的编码器 - 解码器模型。关于这种模型,你可以阅读 TensorFlow 的 [神经机器翻译 (序列到序列) 教程](https://github.com/tensorflow/nmt)。本示例采用一组更新的 API。此笔记本实现了上述序列到序列教程中的 [注意力方程式](https://github.com/tensorflow/nmt#background-on-the-attention-mechanism)。下图显示了注意力机制为每个输入单词分配一个权重,然后解码器将这个权重用于预测句子中的下一个单词。下图和公式是 [Luong 的论文](https://arxiv.org/abs/1508.04025v5)中注意力机制的一个例子。
<img src="https://tensorflow.google.cn/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism">
输入经过编码器模型,编码器模型为我们提供形状为 *(批大小,最大长度,隐藏层大小)* 的编码器输出和形状为 *(批大小,隐藏层大小)* 的编码器隐藏层状态。
下面是所实现的方程式:
<img src="https://tensorflow.google.cn/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800">
<img src="https://tensorflow.google.cn/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800">
本教程的编码器采用 [Bahdanau 注意力](https://arxiv.org/pdf/1409.0473.pdf)。在用简化形式编写之前,让我们先决定符号:
* FC = 完全连接(密集)层
* EO = 编码器输出
* H = 隐藏层状态
* X = 解码器输入
以及伪代码:
* `score = FC(tanh(FC(EO) + FC(H)))`
* `attention weights = softmax(score, axis = 1)`。 Softmax 默认被应用于最后一个轴,但是这里我们想将它应用于 *第一个轴*, 因为分数 (score) 的形状是 *(批大小,最大长度,隐藏层大小)*。最大长度 (`max_length`) 是我们的输入的长度。因为我们想为每个输入分配一个权重,所以 softmax 应该用在这个轴上。
* `context vector = sum(attention weights * EO, axis = 1)`。选择第一个轴的原因同上。
* `embedding output` = 解码器输入 X 通过一个嵌入层。
* `merged vector = concat(embedding output, context vector)`
* 此合并后的向量随后被传送到 GRU
每个步骤中所有向量的形状已在代码的注释中阐明:
```
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
# 样本输入
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)
print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))
print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# 隐藏层的形状 == (批大小,隐藏层大小)
# hidden_with_time_axis 的形状 == (批大小,1,隐藏层大小)
# 这样做是为了执行加法以计算分数
hidden_with_time_axis = tf.expand_dims(query, 1)
# 分数的形状 == (批大小,最大长度,1)
# 我们在最后一个轴上得到 1, 因为我们把分数应用于 self.V
# 在应用 self.V 之前,张量的形状是(批大小,最大长度,单位)
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
# 注意力权重 (attention_weights) 的形状 == (批大小,最大长度,1)
attention_weights = tf.nn.softmax(score, axis=1)
# 上下文向量 (context_vector) 求和之后的形状 == (批大小,隐藏层大小)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
attention_layer = BahdanauAttention(10)
attention_result, attention_weights = attention_layer(sample_hidden, sample_output)
print("Attention result shape: (batch size, units) {}".format(attention_result.shape))
print("Attention weights shape: (batch_size, sequence_length, 1) {}".format(attention_weights.shape))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
# 用于注意力
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
# 编码器输出 (enc_output) 的形状 == (批大小,最大长度,隐藏层大小)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x 在通过嵌入层后的形状 == (批大小,1,嵌入维度)
x = self.embedding(x)
# x 在拼接 (concatenation) 后的形状 == (批大小,1,嵌入维度 + 隐藏层大小)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# 将合并后的向量传送到 GRU
output, state = self.gru(x)
# 输出的形状 == (批大小 * 1,隐藏层大小)
output = tf.reshape(output, (-1, output.shape[2]))
# 输出的形状 == (批大小,vocab)
x = self.fc(output)
return x, state, attention_weights
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
sample_decoder_output, _, _ = decoder(tf.random.uniform((64, 1)),
sample_hidden, sample_output)
print ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape))
```
## 定义优化器和损失函数
```
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
```
## 检查点(基于对象保存)
```
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
```
## 训练
1. 将 *输入* 传送至 *编码器*,编码器返回 *编码器输出* 和 *编码器隐藏层状态*。
2. 将编码器输出、编码器隐藏层状态和解码器输入(即 *开始标记*)传送至解码器。
3. 解码器返回 *预测* 和 *解码器隐藏层状态*。
4. 解码器隐藏层状态被传送回模型,预测被用于计算损失。
5. 使用 *教师强制 (teacher forcing)* 决定解码器的下一个输入。
6. *教师强制* 是将 *目标词* 作为 *下一个输入* 传送至解码器的技术。
7. 最后一步是计算梯度,并将其应用于优化器和反向传播。
```
@tf.function
def train_step(inp, targ, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)
# 教师强制 - 将目标词作为下一个输入
for t in range(1, targ.shape[1]):
# 将编码器输出 (enc_output) 传送至解码器
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# 使用教师强制
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
enc_hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = train_step(inp, targ, enc_hidden)
total_loss += batch_loss
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# 每 2 个周期(epoch),保存(检查点)一次模型
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / steps_per_epoch))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
```
## 翻译
* 评估函数类似于训练循环,不同之处在于在这里我们不使用 *教师强制*。每个时间步的解码器输入是其先前的预测、隐藏层状态和编码器输出。
* 当模型预测 *结束标记* 时停止预测。
* 存储 *每个时间步的注意力权重*。
请注意:对于一个输入,编码器输出仅计算一次。
```
def evaluate(sentence):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_inp,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_out)
# 存储注意力权重以便后面制图
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.index_word[predicted_id] + ' '
if targ_lang.index_word[predicted_id] == '<end>':
return result, sentence, attention_plot
# 预测的 ID 被输送回模型
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# 注意力权重制图函数
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def translate(sentence):
result, sentence, attention_plot = evaluate(sentence)
print('Input: %s' % (sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
```
## 恢复最新的检查点并验证
```
# 恢复检查点目录 (checkpoint_dir) 中最新的检查点
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate(u'hace mucho frio aqui.')
translate(u'esta es mi vida.')
translate(u'¿todavia estan en casa?')
# 错误的翻译
translate(u'trata de averiguarlo.')
```
|
github_jupyter
|
This is from a "Getting Started" competition from Kaggle [Titanic competition](https://www.kaggle.com/c/titanic) to showcase how we can use Auto-ML along with datmo and docker, in order to track our work and make machine learning workflow reprocible and usable. Some part of data analysis is inspired from this [kernel](https://www.kaggle.com/sinakhorami/titanic-best-working-classifier)
This approach can be categorized into following methods,
1. Exploratory Data Analysis (EDA)
2. Data Cleaning
3. Using Auto-ML to figure out the best algorithm and hyperparameter
During the process of EDA and feature engineering, we would be using datmo to create versions of work by creating snapshot.
```
%matplotlib inline
import numpy as np
import pandas as pd
import re as re
train = pd.read_csv('./input/train.csv', header = 0, dtype={'Age': np.float64})
test = pd.read_csv('./input/test.csv' , header = 0, dtype={'Age': np.float64})
full_data = [train, test]
print (train.info())
```
#### 1. Exploratory Data Analysis
###### To understand how each feature has the contribution to Survive
###### a. `Sex`
```
print (train[["Sex", "Survived"]].groupby(['Sex'], as_index=False).mean())
```
###### b. `Pclass`
```
print (train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean())
```
c. `SibSp and Parch`
With the number of siblings/spouse and the number of children/parents we can create new feature called Family Size.
```
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
print (train[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean())
```
`FamilySize` seems to have a significant effect on our prediction. `Survived` has increased until a `FamilySize` of 4 and has decreased after that. Let's categorize people to check they are alone or not.
```
for dataset in full_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
print (train[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean())
```
d. `Embarked`
we fill the missing values with most occured value `S`
```
for dataset in full_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
print (train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean())
```
e. `Fare`
Fare also has some missing values which will be filled with the median
```
for dataset in full_data:
dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
train['CategoricalFare'] = pd.qcut(train['Fare'], 4)
print (train[['CategoricalFare', 'Survived']].groupby(['CategoricalFare'], as_index=False).mean())
```
It shows the `Fare` has a significant affect on survival, showcasing that people haivng paid higher fares had higher chances of survival
f. `Age`
There are plenty of missing values in this feature. # generate random numbers between (mean - std) and (mean + std). then we categorize age into 5 range.
```
for dataset in full_data:
age_avg = dataset['Age'].mean()
age_std = dataset['Age'].std()
age_null_count = dataset['Age'].isnull().sum()
age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)
dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list
dataset['Age'] = dataset['Age'].astype(int)
train['CategoricalAge'] = pd.cut(train['Age'], 5)
print (train[['CategoricalAge', 'Survived']].groupby(['CategoricalAge'], as_index=False).mean())
```
g. `Name`
Let's the title of people
```
def get_title(name):
title_search = re.search(' ([A-Za-z]+)\.', name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
for dataset in full_data:
dataset['Title'] = dataset['Name'].apply(get_title)
print("=====Title vs Sex=====")
print(pd.crosstab(train['Title'], train['Sex']))
print("")
print("=====Title vs Survived=====")
print (train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean())
```
Let's categorize it and check the title impact on survival rate convert the rare titles to `Rare`
```
for dataset in full_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
print (train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean())
import json
config = {"features analyzed": ["Sex", "Pclass", "FamilySize", "IsAlone", "Embarked", "Fare", "Age", "Title"]}
with open('config.json', 'w') as outfile:
json.dump(config, outfile)
```
#### Creating a datmo snapshot to save my work, this helps me save my current work before proceeding onto data cleaning
```bash
home:~/datmo-tutorials/auto-ml$ datmo snapshot create -m "EDA"
Creating a new snapshot
Created snapshot with id: 30803662ab49bb1ef67a5d0861eecf91cff1642f
home:~/datmo-tutorials/auto-ml$ datmo snapshot ls
+---------+-------------+-------------------------------------------+-------+---------+-------+
| id | created at | config | stats | message | label |
+---------+-------------+-------------------------------------------+-------+---------+-------+
| 30803662| 2018-05-15 | {u'features analyzed': [u'Sex', | {} | EDA | None |
| | 23:15:44 | u'Pclass', u'FamilySize', u'IsAlone', | | | |
| | | u'Embarked', u'Fare', u'Age', u'Title']} | | | |
+---------+-------------+-------------------------------------------+-------+---------+-------+
```
#### 2. Data Cleaning
Now let's clean our data and map our features into numerical values.
```
train_copy = train.copy()
test_copy = test.copy()
full_data_copy = [train_copy, test_copy]
for dataset in full_data_copy:
# Mapping Sex
dataset['Sex'] = dataset['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Mapping titles
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Mapping Embarked
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
# Mapping Fare
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
# Mapping Age
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
# Feature Selection
drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp',\
'Parch', 'FamilySize']
train_copy = train_copy.drop(drop_elements, axis = 1)
train_copy = train_copy.drop(['CategoricalAge', 'CategoricalFare'], axis = 1)
test_copy = test_copy.drop(drop_elements, axis = 1)
print (train_copy.head(10))
train_copy = train_copy.values
test_copy = test_copy.values
config = {"selected features": ["Sex", "Pclass", "Age", "Fare", "Embarked", "Fare", "IsAlone", "Title"]}
with open('config.json', 'w') as outfile:
json.dump(config, outfile)
```
#### 3. Using Auto-ML to figure out the best algorithm and hyperparameter
##### Now we have cleaned our data it's time to use auto-ml in order to get the best algorithm for this data

```
from tpot import TPOTClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
X = train_copy[0::, 1::]
y = train_copy[0::, 0]
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=0.75, test_size=0.25)
tpot = TPOTClassifier(generations=5, population_size=50, verbosity=2)
tpot.fit(X_train, y_train)
print(tpot.score(X_test, y_test))
tpot.export('tpot_titanic_pipeline.py')
stats = {"accuracy": (tpot.score(X_test, y_test))}
with open('stats.json', 'w') as outfile:
json.dump(stats, outfile)
```
### Let's again create a datmo snapshot to save my work, this helps me save my current work before changing my feature selection
```bash
home:~/datmo-tutorials/auto-ml$ datmo snapshot create -m "auto-ml-1"
Creating a new snapshot
Created snapshot with id: adf76fa7d0800cc6eec033d4b00f97536bcb0c20
home:~/datmo-tutorials/auto-ml$ datmo snapshot ls
+---------+-------------+-------------------------------------------+-----------------+---------------+-------+
| id | created at | config | stats | message | label |
+---------+-------------+-------------------------------------------+-----------------+---------------+-------+
| adf76fa7| 2018-05-16 | {u'selected features': [u'Sex', u'Pclass',|{u'accuracy': | auto-ml-1 | None |
| | 01:24:53 | u'Age', u'Fare', u'Embarked', | 0.8206278} | | |
| | | u'Fare', u'IsAlone', u'Title']} | | | |
| 30803662| 2018-05-15 | {u'features analyzed': [u'Sex', | {} | EDA | None |
| | 23:15:44 | u'Pclass', u'FamilySize', u'IsAlone', | | | |
| | | u'Embarked', u'Fare', u'Age', u'Title']} | | | |
+---------+-------------+-------------------------------------------+-----------------+---------------+-------+
```
#### Another feature selection
1. Let's leave `FamilySize` rather than just unsing `IsAlone`
2. Let's use `Fare_Per_Person` insted of binning `Fare`
```
train_copy = train.copy()
test_copy = test.copy()
full_data_copy = [train_copy, test_copy]
for dataset in full_data_copy:
# Mapping Sex
dataset['Sex'] = dataset['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Mapping titles
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Mapping Embarked
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
# Mapping Fare
dataset['FarePerPerson']=dataset['Fare']/(dataset['FamilySize']+1)
# Mapping Age
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
# Feature Selection
drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp',\
'Parch', 'IsAlone', 'Fare']
train_copy = train_copy.drop(drop_elements, axis = 1)
train_copy = train_copy.drop(['CategoricalAge', 'CategoricalFare'], axis = 1)
test_copy = test_copy.drop(drop_elements, axis = 1)
print (train_copy.head(10))
train_copy = train_copy.values
test_copy = test_copy.values
from tpot import TPOTClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
X = train_copy[0::, 1::]
y = train_copy[0::, 0]
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=0.75, test_size=0.25)
tpot = TPOTClassifier(generations=5, population_size=50, verbosity=2)
tpot.fit(X_train, y_train)
print(tpot.score(X_test, y_test))
tpot.export('tpot_titanic_pipeline.py')
config = {"selected features": ["Sex", "Pclass", "Age", "Fare", "Embarked", "FarePerPerson", "FamilySize", "Title"]}
with open('config.json', 'w') as outfile:
json.dump(config, outfile)
stats = {"accuracy": (tpot.score(X_test, y_test))}
with open('stats.json', 'w') as outfile:
json.dump(stats, outfile)
```
### Let's again create a datmo snapshot to save my final work
```bash
home:~/datmo-tutorials/auto-ml$ datmo snapshot create -m "auto-ml-2"
Creating a new snapshot
Created snapshot with id: 30f8366b7de96d58a7ef8cda266216b01cab4940
home:~/datmo-tutorials/auto-ml$ datmo snapshot ls
+---------+-------------+-------------------------------------------+-----------------+---------------+-------+
| id | created at | config | stats | message | label |
+---------+-------------+-------------------------------------------+-----------------+---------------+-------+
| 30f8366b| 2018-05-16 | {u'selected features': [u'Sex', u'Pclass',|{u'accuracy': | auto-ml-2 | None |
| | 03:04:06 | u'Age', u'Fare', u'Embarked', u'Title', | 0.8206278} | | |
| | | u'FarePerPerson', u'FamilySize']} | | | |
| adf76fa7| 2018-05-16 | {u'selected features': [u'Sex', u'Pclass',|{u'accuracy': | auto-ml-1 | None |
| | 01:24:53 | u'Age', u'Fare', u'Embarked', | 0.8206278} | | |
| | | u'Fare', u'IsAlone', u'Title']} | | | |
| 30803662| 2018-05-15 | {u'features analyzed': [u'Sex', | {} | EDA | None |
| | 23:15:44 | u'Pclass', u'FamilySize', u'IsAlone', | | | |
| | | u'Embarked', u'Fare', u'Age', u'Title']} | | | |
+---------+-------------+-------------------------------------------+-----------------+---------------+-------+
```
#### Let's now move to a different snapshot in order to either get the `experimentation.ipynb`, `submission.csv` or `tpot_titanice_pipeline.py` or any other files in that version
We perform `checkout` command in order to achieve it
```bash
home:~/datmo-tutorials/auto-ml$ # Run this command: datmo snapshot checkout --id <snapshot-id>
home:~/datmo-tutorials/auto-ml$ datmo snapshot checkout --id 30803662
```
|
github_jupyter
|
```
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import os
from scipy.optimize import curve_fit
os.getcwd()
data = pd.read_csv('data/CA_Fc_GC_MeCN_0V-1.2V_P-06-14/data.csv', sep=',')
data
data.plot('t', 'iw', xlim=(0,2))
index_max = data['iw'].idxmax()
time_max = data.loc[index_max,'t']
print(time_max)
# In E4, near optimal values of Rm and Cm were:
Rm = 10000 #10 kOhm
Cm = 100e-9 #100 nF
pstat_time_constant = Rm*Cm
# From an EIS spectrum of Fc in dry MeCN, Ru and Cdl are approximately:
Ru = 4.04e+02
Cdl = 3.93e-06
cell_time_constant = Ru*Cdl
#Value of the combined time constant tau
print(cell_time_constant + pstat_time_constant)
pot_step_time = time_max # step time start in s
pot_rest_time = data.iloc[-1,-1] # rest time start in s
# For both of these capacitors to charge, we should ignore data before at least 5τ of each:
fit_start_time = pot_step_time + (5 * (cell_time_constant + pstat_time_constant))
# Fit until 5 ms before the rest step
fit_times = data[data['t'].between(fit_start_time, pot_rest_time - 0.005)]['t'].to_numpy()
fit_currents = data[data['t'].between(fit_start_time, pot_rest_time - 0.005)]['iw'].to_numpy()
fit_times_no_offset = fit_times - pot_step_time
#print(fit_times_no_offset)
#Defines a function for curve_fit to fit to
def Empirical_Cottrell(t, a):
return a / np.sqrt(t)
#Implementing curve_fit to solve for the empirical Cottrell prefactor a
guess_prefactor = 1e-10
fit_prefactor, cov = curve_fit(Empirical_Cottrell, fit_times_no_offset, fit_currents, guess_prefactor)
print('a = {0:.3E}'.format(fit_prefactor[0]))
#Calculating the diffusion constant D based on the fitted prefactor a, and the Cottrell Equation
a = fit_prefactor[0]
n = 1
F = 96485 #C/mol
A = np.pi*2.5**2/1000**2 #m^2
C_bulk = 0.8 #mol*m^-2
D = (a**2 * np.pi) / (n*F*A*C_bulk)**2 * 100**2 #cm^2/s
print('D = {0:.3E}'.format(D) + ' cm^2 s^-1')
#Plotting the chronoamperometry curve with the Cottrell Equation fit
fig, (ax1, ax2) = plt.subplots(1,2, figsize = (15,10))
ax1.scatter(data['t'], data['iw'], label = 'Data', color = 'greenyellow')
ax1.set_ylabel('$i_w$ / A', fontsize = 15)
ax1.set_xlabel('t / s', fontsize = 15)
#ax.set_xlim(.99, 2.01)
ax1.plot(fit_times, Empirical_Cottrell(fit_times_no_offset,a), color='red', label = 'Cottrell Equation Fit - Forward Step', linewidth=3)
ax1.legend(fontsize = 15)
ax2.scatter(data['t'], data['iw'], label = 'Data', color = 'greenyellow')
ax2.set_title('Zoomed-In')
ax2.set_ylabel('$i_w$ / A', fontsize = 15)
ax2.set_xlabel('t / s', fontsize = 15)
ax2.set_xlim(0, 3)
ax2.plot(fit_times, Empirical_Cottrell(fit_times_no_offset,a), color='red', label = 'Cottrell Equation Fit - Forward Step', linewidth=3)
#ax2.legend(fontsize = 15)
#Integrating under the current vs. time curve to obtain the charge passed.
from scipy.integrate import trapz
total_charge = trapz(fit_currents,fit_times)
print('Charge passed is '+'{0:.3E}'.format(total_current) + ' C')
#Using this charge, calcs the moles of Fc oxidized assuming 100% Faradaic efficiency
F = 96485 #C / mol
moles_Fc_oxidized = total_charge/F
print('Moles of Fc Oxidized is '+'{0:.3E}'.format(moles_Fc_oxidized) + ' mol')
#If you assume that the Cottrell equation fit for a bulk [Fc+] concentration of 0.8 mM is good,
#then you can backcalculate an electrolyzed volume of 0.0129 mL. That assumes that the concentration
#is constant throughout that volume which we know is not true
volume = total_charge / (F*C_bulk) * 1000**2
print('Vol. of [Fc+] electrolyzed is '+'{0:.3}'.format(volume) + ' mL')
#Assume volume electrolyzed of 20 mL. This is a bad assumption.
volume = 0.020/1000 #L
conc = moles_Fc_oxidized / volume *1000
print('Conc. of [Fc+] is '+'{0:.3E}'.format(conc) + ' mM')
#This is just recalculating the Cottrell equation.. and confirming that the bulk conc. we assumed
#to be 0.8 mM is indeed 0.8 mM
a = -1.706E-05 #From #CA-fitting-06-16
n = 1
F = 96485 #C/mol
A = np.pi*2.5**2/1000**2 #m^2
D = 3.979E-06 #cm^2 s^-1
C_bulk = (-a*100)/(n*F*A) * np.sqrt(np.pi/D)
print('Conc. of [Fc+] is '+'{0:.3E}'.format(C_bulk) + ' mM')
```
|
github_jupyter
|
# Extract from JSON and XML
You'll now get practice extracting data from JSON and XML. You'll extract the same population data from the previous exercise, except the data will be in a different format.
Both JSON and XML are common formats for storing data. XML was established before JSON, and JSON has become more popular over time. They both tend to be used for sending data via web APIs, which you'll learn about later in the lesson.
Sometimes, you can obtain the same data in either JSON or XML format. That is the case for this exercise. You'll use the same data except one file is formatted as JSON and the other as XML.
There is a solution file for these exercises. Go to File->Open and click on 2_extract_exercise_solution.ipynb.
# Extract JSON and JSON Exercise
First, you'll practice extracting data from a JSON file. Run the cell below to print out the first line of the JSON file.
```
###
# Run the following cell.
# This cell loads a function that prints the first n lines of
# a file.
#
# Then this function is called on the JSON file to print out
# the first line of the population_data.json file
###
def print_lines(n, file_name):
f = open(file_name)
for i in range(n):
print(f.readline())
f.close()
print_lines(1, 'population_data.json')
```
The first "line" in the file is actually the entire file. JSON is a compact way of representing data in a dictionary-like format. Luckily, pandas has a method to [read in a json file](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_json.html).
If you open the link with the documentation, you'll see there is an *orient* option that can handle JSON formatted in different ways:
```
'split' : dict like {index -> [index], columns -> [columns], data -> [values]}
'records' : list like [{column -> value}, ... , {column -> value}]
'index' : dict like {index -> {column -> value}}
'columns' : dict like {column -> {index -> value}}
'values' : just the values array
```
In this case, the JSON is formatted with a 'records' orientation, so you'll need to use that value in the read_json() method. You can tell that the format is 'records' by comparing the pattern in the documentation with the pattern in the JSON file.
Next, read in the population_data.json file using pandas.
```
# TODO: Read in the population_data.json file using pandas's
# read_json method. Don't forget to specific the orient option
# store the results in df_json
import pandas as pd
df_json = None
# TODO: Use the head method to see the first few rows of the resulting
# dataframe
```
Notice that this population data is the same as the data from the previous exercise. The column order might have changed, but the data is otherwise the same.
# Other Ways to Read in JSON
Besides using pandas to read JSON files, you can use the json library. Run the code cell below to see an example of reading in JSON with the json library. Python treats JSON data like a dictionary.
```
import json
# read in the JSON file
with open('population_data.json') as f:
json_data = json.load(f)
# print the first record in the JSON file
print(json_data[0])
print('\n')
# show that JSON data is essentially a dictionary
print(json_data[0]['Country Name'])
print(json_data[0]['Country Code'])
```
# Extract XML
Next, you'll work with the same data except now the data is in xml format. Run the next code cell to see what the first fifteen lines of the data file look like.
```
# Run the code cell to print out the first 15 lines of the xml file
print_lines(15, 'population_data.xml')
```
XML looks very similar to HTML. XML is formatted with tags with values inside the tags. XML is not as easy to navigate as JSON. Pandas cannot read in XML directly. One reason is that tag names are user defined. Every XML file might have different formatting. You can imagine why XML has fallen out of favor relative to JSON.
### How to read and navigate XML
There is a Python library called BeautifulSoup, which makes reading in and parsing XML data easier. Here is the link to the documentation: [Beautiful Soup Documentation](https://www.crummy.com/software/BeautifulSoup/)
The find() method will find the first place where an xml element occurs. For example using find('record') will return the first record in the xml file:
```xml
<record>
<field name="Country or Area" key="ABW">Aruba</field>
<field name="Item" key="SP.POP.TOTL">Population, total</field>
<field name="Year">1960</field>
<field name="Value">54211</field>
</record>
```
The find_all() method returns all of the matching tags. So find_all('record') would return all of the elements with the `<record>` tag.
Run the code cells below to get a basic idea of how to navigate XML with BeautifulSoup. To navigate through the xml file, you search for a specific tag using the find() method or find_all() method.
Below these code cells, there is an exercise for wrangling the XML data.
```
# import the BeautifulSoup library
from bs4 import BeautifulSoup
# open the population_data.xml file and load into Beautiful Soup
with open("population_data.xml") as fp:
soup = BeautifulSoup(fp, "lxml") # lxml is the Parser type
# output the first 5 records in the xml file
# this is an example of how to navigate the XML document with BeautifulSoup
i = 0
# use the find_all method to get all record tags in the document
for record in soup.find_all('record'):
# use the find_all method to get all fields in each record
i += 1
for record in record.find_all('field'):
print(record['name'], ': ' , record.text)
print()
if i == 5:
break
```
# XML Exercise (Challenge)
Create a data frame from the xml file. This exercise is somewhat tricky. One solution would be to convert the xml data into dictionaries and then use the dictionaries to create a data frame.
The dataframe should have the following layout:
| Country or Area | Year | Item | Value |
|----|----|----|----|
| Aruba | 1960 | Population, total | 54211 |
| Aruba | 1961 | Population, total | 55348 |
etc...
Technically, extracting XML, transforming the results, and putting it into a data frame is a full ETL pipeline. This exercise is jumping ahead in terms of what's to come later in the lesson. But it's a good chance to familiarize yourself with XML.
```
# TODO: Create a pandas data frame from the XML data.
# HINT: You can use dictionaries to create pandas data frames.
# HINT: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.from_dict.html#pandas.DataFrame.from_dict
# HINT: You can make a dictionary for each column or for each row (See the link above for more information)
# HINT: Modify the code from the previous code cell
```
# Conclusion
Like CSV, JSON and XML are ways to format data. If everything is formatted correctly, JSON is especially easy to work with. XML is an older standard and a bit trickier to handle.
As a reminder, there is a solution file for these exercises. You can go to File->Open and then click on 2_extract_exercise.
|
github_jupyter
|
# Main Code
```
import os
import time
import numpy as np
import redis
from IPython.display import clear_output
from PIL import Image
from io import BytesIO
import base64
import json
import matplotlib.pyplot as plt
from face_detection import get_face
from utils import img_to_txt, decode_img, log_error
##########################
#
# Global Variables
#
#
##########################
# Get Request
server = os.environ['face_input_redis_server'] if 'os.environ' in os.environ and len(os.environ['redis_server']) > 1 else 'localhost'
# connect with redis server as Bob
r = redis.Redis(host=server, port=6379)
# Publish and suscribe redis
req_p = r.pubsub()
# subscribe to request Channel
req_p.subscribe('new_request')
# Forward Request
out_server = os.environ['face_ouput_redis_server'] if 'os.environ' in os.environ and len(os.environ['redis_server']) > 1 else 'localhost'
print(f"User Server {out_server}")
# connect with redis server as Bob
out_r = redis.Redis(host=out_server, port=6379)
def process_request(request ):
'''
Do you request processing here
'''
im = decode_img(request['image'])
face = get_face(im)
plt.imshow(face)
plt.show()
return face
def forward_request(id_, face):
global out_r
with out_r.pipeline() as pipe:
image= {
'id' : id_
'request_time' : str(datetime.today()),
'image' : img_to_txt("test_images/test.jpeg"),
'status' : 'pending'
}
# Publishing to the stream for testing
pipe.publish('new_request', json.dumps(image))
count+=1
pipe.execute()
print(f"Request Forward to {ip}")
def listen_stream():
'''
Listening to the stream.
IF got any request from the stream then process it at the same time.
'''
count = 0
requests =[]
while 1:
try:
try:
# Listening To the stream
request = str(req_p.get_message()['data'].decode())
if request is not None :requests.append(request)
except TypeError as e: log_error(e)
# If got any request from stream then process the function
if len(requests) > 0:
req_id = requests.pop(0)
process_request(json.loads(request) )
count += 1
print(count)
except Exception as e: log_error(e)
listen_stream()
from PIL import Image
import base64
import numpy as np
from io import BytesIO
image = np.asarray(Image.open("test_images/test.jpeg").convert("RGB"))
print(image.shape)
import base64
import numpy as np
import matplotlib.pyplot as plt
plt.imshow(image)
plt.show()
import cv2
def npImage_to_txt(image):
'''
Convert numpy image to base64
'''
_, im_arr = cv2.imencode('.jpg', image) # im_arr: image in Numpy one-dim array format.
im_bytes = im_arr.tobytes()
im_b64 = base64.b64encode(im_bytes)
return im_b64.decode()
npImage_to_txt(image)
im_bytes = base64.b64decode(im_b64)
im_arr = np.frombuffer(im_bytes, dtype=np.uint8) # im_arr is one-dim Numpy array
img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)
from utils import decode_img
image = "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCABsAFUDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD8MrWK3mhVkd1VfuB5Dgn6dOtWbFdcu5/stvAWLMAAqH+dZF1DcTXENrFIVyhZfr1r7d/Yg/4J36r8cPC1v4ruPEUlnLkEZi3KRjuMZHPvXHjMbDCR5paH0GV4Crj6rjHY+UbbwF4vumVV06VwGwQytgCnnwncW+oCzurF1kU4OOFNfopf/wDBJL9pfXboaT4F8faJDatJ81xcqwwPYd69E+DX/BBjxpo122teN/H8OtXwO6FlsCIAfoTk46dR0rz1m7s7anuvJKMJLmlZH5mW/wANfEem2Y1L+zZY0l4CSqWU9vw/AVveGvh74vGqwWmi6fd2N3LgxmFC45/iIIIx9f06V+2Pw2/4JC+A7GSK68bz/wBp3CgFvMgCBT2CgD5QBgdeetewaT/wT3+D/h6NJ7DwjbJKsZQyLCAcZ6cCud43FS1SOtYPLKS1kfhV4g+CXxfTRynxI8G2+u6Yg3Ot7pLxlx1ys8JWQY+uOOleb614T8JS2k1houvapaxxyBv7Du9P+1xbwOsLxBWVwOMOmf8AbPU/0aT/ALIPgZ9LaxbQYtjrtJfnjpjmvDPj9/wSa+EnxB0e6ufD2gQ2molCIpojjBx7joaI4/FRfvrQwlg8urfw5WZ+SvwI/bzv/BGiL8JviN4atfHnhK9g8jUfC/jWwNykYSRsNaO2J4pFB7O4VgcVwHxZ8S2fhPVZtc+B/iDUIPD1zO0thZ3urq91p4b70RyQzpnIweoxuyRmvX/2s/8AgnV+0d8EfE8uoXGjSXuleaga5tIdyrjpkFTjgAdD07dvPtJ/Y4+MfxHW01qC1e3jnV1ulWB5ktmHIb5OcbMErnOT26V2wnhqvvM5JRxVD3VqcJ4V8f8AxL8QT3eoaTqOrSXB8sXN1p0aP5igEIrMQ2Svz/ng5IzRX098WP2Mr/4FazH8PPhh4k0+6axTZrGpPcG3NzcFEcjy2OUC7yBnr170UP2Ld0QpY6x8ffsv/C/UPjJ8ZdG8JW1jJMs0oWUquQg75447c9K/dT9lD4a2PgDwVYeDdE0xHks4VRzGmFV+4JGN3XNfDf7FPgjSP2av2TdN+MVl4Bm13xV4xuvJ0u3s4AZGhfPRiPkXbGTk5+8K9T8S67/wVZ8WRpq/ww8MweH9MEIksdL0y3RpIxjpJI7nc+c5OAM9q8DHVYZjjmpStGJ9Hl2E+oZdeCvKR+mfw98MWkCie6nTeFGVCgD347YNen6Ra6MYkgaaIk/Nhmxz/n3r8OvEvxZ/4LU+Crdrubwt4qmA/wBddQR2z475GK7P9nD/AIKgft06B4lt9E+N3hrUhiVVlnvbXyyy/hkE4wOK76Tw1GN+ZM8ythMTinazR+11tZwuMxuhQfd2HirH9nwSn5gAOwzXivwW/aEn8deE7TWdQ0+aKaSINMWTAAIyOPpiuv8AEHxw8GeGdEl17WtYWCCOIMGYfePQr9a6YV6U48y2PJqYLEU6nJrc7kaRBcgxIFbHG3FaOn+ALi5jWCK1XbnOWWvjrxl/wWP/AGWfg9qkdr4z8ROv2i58qE2ymQjnG5sdAOn4V9KfA79vX9nz4r6VFqfhX4i6bcxTIPLZbgde6nPCn6kV0UvY1bXOfE0MXQV1e50viX9mTwR4ssLmy8V6dHPDPCUdCvQEHoeo69sV8p/Ej9hb4c/s6eCZrT4VeH7aFJLyaVfNBdg8isSSTznI7+tfemi+IPDfii187Q9VguhIpYlZRgD8M5/CvBf24re+0j4Sa3r1hII1s7Pzml8s8bSDgc8dP1Nc+Ow1GlDnRWWY7Fyq+yn+J+cnxFtfB954s/tbW2umN5p0Eu1oYmzId++Q/IDub5QeTwi9KK6Xwlonwy/aG0G28Wa1cz6dcwReSEdlKPFuYqRkjBzvyOeNtFZQtyI9x0rszf2c/hTe6J8CvBehlYYLbQfD1ukrSR8pmNWcficj6GvNvE3/AAUXlX4ot8FfhdHc3WrXFwtpYQ2trFaRPITgGa8uQYolz2CsSO46V9v/AAl8F6ZHoUenWmmRqixgbJPmwMcbuxOPYVwfj79k3w3aazca7c/D/T9XjuXLSW4tEG1ucHAABIPTIPavm4UPfdSSvc+hp4qnyexUrNaHwR8QP+CsnxC8EeI7v4ffFP4YazpN9BgNLpWv2V+77n8tD5Qt41kyedodSVwR1r0X4JftSaV8SPEl14J8Y6VFLqltJiOJrHyZQpwfngY5hbBBxkg/e3c17frX7Od1YMx8J/s7WeS+YxfmBYU7g4MZzg8jGPxrjdS/Y3v9S8ZwfETxH8OfDtv4jtn3Wt5okDwSHJyQxjZUceoZcE/nXTVo0K0LwTi0deDnPDwanNSTPrv9m+y0/wAS6Mtvps7MWwPvbuny4J9sY/Dv1roP2sPhVpEPw4Om3w2LLHvZUXrj+tdF+xb8I7vw/wCH1vNbKtcy7flxjHPPAxiu+/ax+HK6h4bhvra0e5jiUiSMPg/d6D19a7KWDqfUW29T5SvjorOoxT0ufjL+0P8Asd/sq6pfHWfH3iC8sZZnHkxi82hieu1QCzc5PC9c1sfsqfsG6T4E1Sfxz8DP2mdYttOuFK3Wk6zoFw+mtGy4+fcq7zjkHK4qt8bvgfqut/Gy51Xxd8WbzR7QTvHZaNHor2YJIIG+7Db3POcKy9h2rnfg9/wTj/bM8D+M7TxT8Kf22NZ0meO6VrSS1lvYJki67S4ZkkO3H3tykdR2rmwcJQj70z6rMY05RThC59yfCfwL+1p+zdaxeMvhP8Z7TXYoFLvpMpMttKg5KjD5TjtuJUY4NfW3w++LWn/tU/DHUdJ8U+EbjTri5svs2r6bOmVDyJt3oT95c4IPcEcdq+ZPgF4V/auvLW2tvjh4A06+kS4aO48V+ErlIfttuWO2S8tNiqs23GZYwm4jJBya+vfhz4Tk0PT4JQxIQlSWTazJkFQ2OuBjmuuFStKTi9YnyWOeEpuMlG07n5d/CHSPC3h2fxP8FvHNrbXNx4M8VXlrFLdo+4RyPvVRhhwFC8e/vRXTftOeGdQ8H/tZ/EQaNYpN/aOqw3k+5d3zvCORjGBgY7/doq4zmlY74RjViprqfVnwIngXw7aQSFpZXjUyyMOWbbnnPua9UTRtMucNcRqCyjKgcdK8U+Dut6Vb+FbbVILqQxJEjCQ9WG0Y/DFenWPi2zu4YzZ3RK4yWftU4OvTjGzOHGUZupzxL2q6Fo1tCSIR8o+6Op/OuOu9L0bUNXFlBFHtRh5oCFsseR8x9q0PF+r6lqMTrp8hzHESfc+lcpo37S3wK0fXbL4b6x4+0ew16aRPs2lXN/HHcXDFjuwrkHGemNx9q6ZSoznawUYVp02o3bPpD4P6ELR4QqKxWPnI7ZzXe+ItGs9f0xtNuYwQTuHHQ+vNeb+B/Gem6XOkk1xvXaCzxMpUZ529ev0/IdB6JaeM/DN22xdUj81hkQuGUqvr90g16tCdFUeRnymMp4p4j2kE9DxH4kfsfeA/iBBIupeHbS4jZyXjuIw4J9RuBIP0rlPC37BHhDw9cqdPgv7eIEny4tQk2r9AScV7H458bjw94pe2imZ4NiMCvTlRWn4b+IcGpFB5nyZIJbr1rjWHws6jTR6kcxzSOH0lo0N+GXwh8OeDLAWtpYs0RUA/aH3liONxz1/l7V0mo6fY2EJFvbJgqcj3q0dUs5IvME42AAgj0zzWJreoi583yZSqF9quDkHt/SuutToUqXunjxqYrEV7zPkv4g/Cjw749+PXjB7y/s4ZbVrLPnzohIeDfgZGSASfzoqD40/BLwr8U/jXr/iKHxKtjcwx2trdxSM0ZcpGSr5zhgQ2M+qkUV4MqzUrWPs6CpqjHXofL37E/wC1h4Z+J/wP0XUor8Syxabai8MLbx5m0R7M8YOV3cjoR9a+nNF8Ss1k2p30K20RcqS0g2sF4z+n61/Pr+xt+1Nq/wAJdc03T59ZlttLt7pJbpFkO13HTIz8xIxX6Z+Nf24/EniL9kDVfEXg/T31bWLmPEIhcJFYqwPzOfTYARjvXHWws8PiXd6Nno4evRx2FU4dtT7Yu/2i/hFoVjMb7xfpxMCFpGN2vXA4+mDX5Vftj/t7/BbTP2sojpngfSvEmkC6H9ptMo8yCVMbJI3z98DGCOgwK+b/AIcaD8Wfjjd3mo6r4wurDTzn7Zqd1dMsXzHP446D2FS2/wCwYdY19rqP4r6NKJZCys0xaYknqqtjP4GvWhHBygk5ak4ejmMfeoQvqffWi/8ABWcah8NLu+0L4lrZQ6eqwRz3DebcxoV4CxuTuYDjd7Zre/4JsQ61+2F+0+fFmiftKePdN0/SttxrAuL6RkvQRxGoZtiZ2nIC18I6r/wTV+JU+hx3Fl43t47drhWcHTpW34/2kPGcepr6h/YTl8RfsgQaoNFnifU9QnhEU2oTtHb24RGXJBwWzuLdfT0qIU8NTlfmZ2V6eZexcfZJX6n7W694S0iDThZz+ZLIYtsMk0u5yoPUt3P/AOqvKvEev3/w5vZFmc+RIcxzH7i+xrwbXf8AgpXd6R4BuR4rggv7ywEX2a6s72NmnIjUsEVWHHOPrgfxVrfs3/tUeEv2tvBN1qVrqKTx+cyS2jkebZ7SQyycnJPBVhgYI4p4isuW9NnzdDCV6Mn7ZH0N4P8AiVHqNt9tt7wSNJtRdsh2Yz2B967HVvGlhpFkZNQ1FIiqGVzxtAVdxz74IxXgmm30Pw50Ke41h0tYYAzIoYNtwxwPfPX8a+Yv+CjH7feleA/gxP4QsNXktNZ12wlaO5t5gDBCp5JP94hePY4xXNRxVSq+VhUwUJvngtD6r/4Jq6tb/H/R/iR8WviN4RW9F54+ns9IW/jYmK1to1jAHPRm3t/wKit//gmp8Wfh3b/sVeA9V1TxNpVjqer6ONR1SKC4RN0srMdxDEkEqBweeM9xRXZHH5ZFWlUin6ni1cWoVHFS0XmfyZeE0u7HX7XT41X7UswKJIOFYZ65OK++f2QPi5q1/wCB9U+GX2CPVob+NIWDOsKJgkNsBPz85B/SvlD9qj4X+Hfh74xHiHwhrwuIbyUmaEx7DEwwOP51g/DT4qa3oOqW7S6u8EKTqWkUgbRnsfet8RQ/tDCe0jpodHC2b0KMuWa3P3N+Bv7LnwU1P4UReHY/DGni2uoRHPbpDnY/OSTkjOaqat+yT4Y8FWsmgfEHwFY6/wCG3JWC4gsx5lsM8DcuGX8DXmv/AAT7/a28M+JfCkPhyK5kjRVwbmR9xll5IUAY/Ova/jp8QdZX4eXN1oGtXFhexIz27KD8zYJ28kg84r5aDqYeryuNz9HoYqq5c1OXus53TP2OP2TxbCTSp/EK6eAA2nWni25WIAnJyhbK85716t4O/Yw/Y3u9MFp4T+BFnqd4sOFuL29uLtQT3YySHcefw6dq/KvUf2+f2nrT4l3Xh/StQjvgZ/KMJs13Kf7x24z0r9Hv2DPjF8ffEPhuD/haF7BBE9sksTWsYjL57bec+nFey5qEE5x3FjcfXq037OWx0uqf8EpP2XPDdhqPiKXwdCurXdqwkmE0iQ24YE4jUEBeOO/r1wa+ef8Agl18KdL/AGZviP8AEbxTf6nJEhunsfs7SHyXiErOHUHOT5ZUEj0r7m+Mvj+Lwx8Ori/1ENEbiMx/KxYruGQxB+tflh8Xf2lfG3wt8d3+mtcwxpfTNKtyDsjIxgCRWJxk8DGPWudqU5+51PKw1VV6T9ueg/8ABQr/AIKW3llND8O/CWq3duGmWWC+t0AV4yWzDyT85GCCeOelfnv8T/jne/FTW7ZvF2u3N4lvbS20dnJJ5kjSykkKcYBwXwcY6VU/ah+MUPjG7mmj02IXc7KiqrrIUYYB5zgEY4OOBxz1r3D/AIJZf8E0dZ/ayluPif4u8QnRPD1obq2ivry3JVpyAUeFQCCQ2QWJwSD0rsrww+EwjqVXZtHyOb5msPTnTpvQ3rP44+PvAvhLStN+D2o3uvaWYisfnRSb7RVVAsTeWpHHzfgBRX318H/+CLH7Ovwy8KjSNR+NHjXX5ppDJLNaXyWEUbdwsaA5B45JOcZ4zgFfm86VGU29Hd318z8+eKcne5+C/iHxn4x+J0n27xhrk1480qgbolQBsqCflAzx9Kr+OfAHiH4X+K5/D2s2c6hJv3PmJsMowCCAc+tX9I060f4bDVzH+9DyD7xx94DPrnmvtv8Abo/Zw+Gc+i3OuyWt0bkadaTQOZwfIZ7VGbYSucFsnknrX6vUxKw2IVNLRn1OT5bHFUZ1E7OJ80/AD9pPxD8PWkkt9WmimSMx2oUBQinqMDHOc8179oX7afja/wDBS+GL7xLd3bAbYI7ibcsW5yzM3cnHTnjPevhLSLiVLR7hXO9W4Ofeu58P+IdW0rWIr21uTvEIIDcjpivReBoTfNY9LD5niaS5b7H0l8CY9L0/4t2fifVJ/Mjguo7l5iPNW4EjMQkgGDj6Yr9V/wBnX9q34b6N4W0mLXfDptVjLixFrCsixrtMh+bHQnceem5RnqR+Gfgjxx4j8K+KoL3R74o4XeQxJDY34BGenzH8hX0f4W+OHxJ0b4XG60nX2t5FsIAzIM79kkkfIJI+ZcBsYz7ZOeXE4SOh6WGzKdSEoy6n6B/tm/tw6LoPgvU9X1fUIEtoZbW2mSKcN5M8gdiHXghV2hsZywyAQa/Jr4q/tba146g1vSdVuUuJb/UW+y+TbpJGHU7VUMcs0Z2lhggg9T2rlPip8c/iV4gmuxrevG6h15rk6ja3CB43ZpThwDzuUqCpJODnqCRXMeDgfF3jm3utX2+bqt60c728SRiMMefLVQFU4wOnYd8kqhh6VOPM0clXGVak/Zw0O4+HP7Nviv4teFfFPxMtZWufD3h3QZdQ1u++y+Rg/aIbaKL5gRvklnVhg/dDd+R+mD/tvfstfsv/APBOnwTDpHiqNr3xH4LjEHhTS5Fku0umj2T/AC8iBVl3nccNnnnNemeLf2d/hZ8M/wDghP4rsvDOg4Gr+HLue+lmYF3ltxJJG+QBz5iBznjcTgAYA/Ca4vri7vlilb5JrUM6DpkgHp+PTpXl5hl9POEoVHaMWfO5zhVKSjfc++P2Zv8AgqT+0l8KfBUuk+D9YGtadcT77dPEdw8klqAWGxJCwLj1z0IOOMUV8XxeIdU8P6ba6ZpVx5UaRZJGSWJOeaK+frZBhlVaR81PLoqbXMf/2Q==",
image = decode_img(bytes(image[0], encoding="utf-8"))
plt.imshow(image)
plt.show()
```
|
github_jupyter
|
```
from mxnet import nd
from mxnet.contrib import text
glove_vec = text.embedding.get_pretrained_file_names("glove")
print(glove_vec)
glove_6b50d = text.embedding.create('glove', pretrained_file_name="glove.6B.50d.txt")
word_size = len(glove_6b50d)
print(word_size)
#词的索引
index = glove_6b50d.token_to_idx['happy']
print(index)
#索引到词
word = glove_6b50d.idx_to_token[1752]
print(word)
#词向量
print(glove_6b50d.idx_to_vec[1752])
```
# Glove应用
```
#余玄相似度
def cos_sim(x, y):
return nd.dot(x,y)/(x.norm() * y.norm())
a = nd.array([4,5])
b = nd.array([400,500])
print(cos_sim(a,b))
#求近义词
def norm_vecs_by_row(x):
# 分母中添加的 1e-10 是为了数值稳定性。
return x / (nd.sum(x * x, axis=1) + 1e-10).sqrt().reshape((-1, 1))
def get_knn(token_embedding, k, word):
word_vec = token_embedding.get_vecs_by_tokens([word]).reshape((-1, 1))
vocab_vecs = norm_vecs_by_row(token_embedding.idx_to_vec)
dot_prod = nd.dot(vocab_vecs, word_vec)
indices = nd.topk(dot_prod.reshape((len(token_embedding), )), k=k+1,
ret_typ='indices')
indices = [int(i.asscalar()) for i in indices]
# 除去输入词。
return token_embedding.to_tokens(indices[1:])
sim_list = get_knn(glove_6b50d, 10, 'baby')
print(sim_list)
sim_val = cos_sim(glove_6b50d.get_vecs_by_tokens('baby'), glove_6b50d.get_vecs_by_tokens('babies'))
print(sim_val)
print(get_knn(glove_6b50d, 10, 'computer'))
print(get_knn(glove_6b50d, 10, 'run'))
print(get_knn(glove_6b50d, 10, 'love'))
#求类比词
#vec(c)+vec(b)−vec(a)
def get_top_k_by_analogy(token_embedding, k, word1, word2, word3):
word_vecs = token_embedding.get_vecs_by_tokens([word1, word2, word3])
word_diff = (word_vecs[1] - word_vecs[0] + word_vecs[2]).reshape((-1, 1))
vocab_vecs = norm_vecs_by_row(token_embedding.idx_to_vec)
dot_prod = nd.dot(vocab_vecs, word_diff)
indices = nd.topk(dot_prod.reshape((len(token_embedding), )), k=k,
ret_typ='indices')
indices = [int(i.asscalar()) for i in indices]
return token_embedding.to_tokens(indices)
#验证vec(son)+vec(woman)-vec(man) 与 vec(daughter) 两个向量之间的余弦相似度
def cos_sim_word_analogy(token_embedding, word1, word2, word3, word4):
words = [word1, word2, word3, word4]
vecs = token_embedding.get_vecs_by_tokens(words)
return cos_sim(vecs[1] - vecs[0] + vecs[2], vecs[3])
word_list = get_top_k_by_analogy(glove_6b50d,1, 'man', 'woman', 'son')
print(word_list)
word_list = get_top_k_by_analogy(glove_6b50d, 1, 'man', 'son', 'woman')
print(word_list)
sim_val = cos_sim_word_analogy(glove_6b50d, 'man', 'woman', 'son', 'daughter')
print(sim_val)
word_list = get_top_k_by_analogy(glove_6b50d, 1, 'beijing', 'china', 'tokyo')
print(word_list)
word_list = get_top_k_by_analogy(glove_6b50d, 1, 'bad', 'worst', 'big')
print(word_list)
```
|
github_jupyter
|
# Dictionaries
### # Dictionary in Python is an unordered collection of data values.
### # Dictionary holds key:value pair.
### # Each key-value pair in a Dictionary is separated by a colon whereas each key is separated by a ‘comma’.
### # Keys of a Dictionary must be unique.
```
Dictionary = {1:'Nokia',2:'EDP',3:'Nokia',4:'Nokia', 'Nokia':1}
print(Dictionary)
Dictionary = {1:'Nokia', 2:'EDP',3:'Nokia', 1:'ML', 2:'Pritish'}
print(Dictionary)
```
### Dictionary keys can be of different data types:
```
Dictionary = {'name':'Pritish',1:[2,3,4]}
print(Dictionary)
```
### Accessing a particular element in a dictionary via the key:
```
Dictionary = {1:'Nokia',2:'EDP',3:'Machine Learning'}
print(Dictionary[1])
```
### Length of a dictionary:
### # len(dictionary_name)
```
Dictionary = {1:'Nokia',2:'EDP',3:'Machine Learning'}
print(len(Dictionary))
```
### Keys of the dictionary:
### # dictionary_name.keys()
```
Dictionary = {1:'Nokia',2:'EDP',3:'Machine Learning'}
print(Dictionary.keys())
```
### Values corresponding to the keys of a dictionary:
### # dictionary_name.values()
```
Dictionary = {1:'Nokia',2:'EDP',3:'Machine Learning'}
print(Dictionary.values())
```
### Check for all the items in the dictionary:
### # dictionary_name.items()
```
Dictionary = {1:'Nokia',2:'ML'} # item is the combination of the key, value pair
print(Dictionary.items())
```
### Getting the value of a particular key element in dictionary
### # dictionary_name.get(element_index_key)
```
Dictionary = {1:'Nokia',2:'ML'}
print(Dictionary.get(1))
print(Dictionary.get(2))
```
### Updating a value in the dictionary:
```
Dictionary = {1:'Nokia',2:'EDP',3:'Project Management'}
Dictionary.update( {3:'ML',1:'NOKIA'} ) # Can update multiple keys at a single time
print(Dictionary)
```
### Updating a value in the dictionary without using built-in update:
```
edp_subjects = {"ML":93, "Project Management":86, "Cloud Ambessador":90}
edp_subjects["ML"]=95
print(edp_subjects["ML"])
print(edp_subjects)
```
### Pop an element from the dictionary:
### # dictionary_name.pop(key)
```
Dictionary = {1:'Nokia',2:'EDP',3:'ML'}
print(Dictionary.pop(2)) # pop mandatorily needs a value as an argument to be passed
print(Dictionary)
```
### Clearing the dictionary elements, deleting the dictionary:
### # dictionary_name.clear()
### # del dictionary_name
```
students = {"Eric":14, "Bob":12, "Alice":26}
students.clear()
print(students)
del students
```
### Delete an element in the dictionary:
### # del dictionary_name[element_index]
```
students = {"Eric":14, "Bob":12, "Alice":26}
del students["Bob"]
print(students)
del students
```
### Updating a dictionary by adding one more dictionary to an existing dictionary:
### # dictionary_name1.update(dictionary_name2)
```
students_1 = {"Eric":14, "Bob":12, "Alice":26}
students_2 = {1:'John',2:'Bob',3:'James'}
students_1.update(students_2)
print(students_1)
```
### Ordered Dictionary
### # usage of OrderedDict
#### OrderedDict preserves the order in which the keys are inserted. A regular dict doesn’t track the
#### insertion order, and iterating it gives the values in an arbitrary order. By contrast, the order the
#### items are inserted is remembered by OrderedDict.
```
from collections import OrderedDict
print("This is a normal Dictionary: ")
d = {}
d['a'] = 1
d['b'] = 2
d['c'] = 3
d['d'] = 4
d['e'] = 5
d['f'] = 6
for key, value in d.items():
print(key, value)
print("\nThis is an Ordered Dictionary: ")
od = OrderedDict()
od['a'] = 1
od['b'] = 2
od['c'] = 3
od['d'] = 4
od['e'] = 5
od['f'] = 6
for key, value in od.items():
print(key, value)
```
### Dictionary built-in function
```
Dictionary = {1:'Nokia',2:'EDP',3:'Machine Learning'}
print(dir(Dictionary[1]))
```
|
github_jupyter
|
# Tutorial 01: Running Sumo Simulations
This tutorial walks through the process of running non-RL traffic simulations in Flow. Simulations of this form act as non-autonomous baselines and depict the behavior of human dynamics on a network. Similar simulations may also be used to evaluate the performance of hand-designed controllers on a network. This tutorial focuses primarily on the former use case, while an example of the latter may be found in `exercise07_controllers.ipynb`.
In this exercise, we simulate a initially perturbed single lane ring road. We witness in simulation that as time advances the initially perturbations do not dissipate, but instead propagates and expands until vehicles are forced to periodically stop and accelerate. For more information on this behavior, we refer the reader to the following article [1].
## 1. Components of a Simulation
All simulations, both in the presence and absence of RL, require two components: a *scenario*, and an *environment*. Scenarios describe the features of the transportation network used in simulation. This includes the positions and properties of nodes and edges constituting the lanes and junctions, as well as properties of the vehicles, traffic lights, inflows, etc. in the network. Environments, on the other hand, initialize, reset, and advance simulations, and act the primary interface between the reinforcement learning algorithm and the scenario. Moreover, custom environments may be used to modify the dynamical features of an scenario.
## 2. Setting up a Scenario
Flow contains a plethora of pre-designed scenarios used to replicate highways, intersections, and merges in both closed and open settings. All these scenarios are located in flow/scenarios. In order to recreate a ring road network, we begin by importing the scenario `LoopScenario`.
```
from flow.scenarios.loop import LoopScenario
```
This scenario, as well as all other scenarios in Flow, is parametrized by the following arguments:
* name
* vehicles
* net_params
* initial_config
* traffic_lights
These parameters allow a single scenario to be recycled for a multitude of different network settings. For example, `LoopScenario` may be used to create ring roads of variable length with a variable number of lanes and vehicles.
### 2.1 Name
The `name` argument is a string variable depicting the name of the scenario. This has no effect on the type of network created.
```
name = "ring_example"
```
### 2.2 VehicleParams
The `VehicleParams` class stores state information on all vehicles in the network. This class is used to identify the dynamical behavior of a vehicle and whether it is controlled by a reinforcement learning agent. Morover, information pertaining to the observations and reward function can be collected from various get methods within this class.
The initial configuration of this class describes the number of vehicles in the network at the start of every simulation, as well as the properties of these vehicles. We begin by creating an empty `VehicleParams` object.
```
from flow.core.params import VehicleParams
vehicles = VehicleParams()
```
Once this object is created, vehicles may be introduced using the `add` method. This method specifies the types and quantities of vehicles at the start of a simulation rollout. For a description of the various arguements associated with the `add` method, we refer the reader to the following documentation (reference readthedocs).
When adding vehicles, their dynamical behaviors may be specified either by the simulator (default), or by user-generated models. For longitudinal (acceleration) dynamics, several prominent car-following models are implemented in Flow. For this example, the acceleration behavior of all vehicles will be defined by the Intelligent Driver Model (IDM) [2].
```
from flow.controllers.car_following_models import IDMController
```
Another controller we define is for the vehicle's routing behavior. For closed network where the route for any vehicle is repeated, the `ContinuousRouter` controller is used to perpetually reroute all vehicles to the initial set route.
```
from flow.controllers.routing_controllers import ContinuousRouter
```
Finally, we add 22 vehicles of type "human" with the above acceleration and routing behavior into the `Vehicles` class.
```
vehicles.add("human",
acceleration_controller=(IDMController, {}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=22)
```
### 2.3 NetParams
`NetParams` are network-specific parameters used to define the shape and properties of a network. Unlike most other parameters, `NetParams` may vary drastically depending on the specific network configuration, and accordingly most of its parameters are stored in `additional_params`. In order to determine which `additional_params` variables may be needed for a specific scenario, we refer to the `ADDITIONAL_NET_PARAMS` variable located in the scenario file.
```
from flow.scenarios.loop import ADDITIONAL_NET_PARAMS
print(ADDITIONAL_NET_PARAMS)
```
Importing the `ADDITIONAL_NET_PARAMS` dict from the ring road scenario, we see that the required parameters are:
* **length**: length of the ring road
* **lanes**: number of lanes
* **speed**: speed limit for all edges
* **resolution**: resolution of the curves on the ring. Setting this value to 1 converts the ring to a diamond.
At times, other inputs (for example `no_internal_links`) may be needed from `NetParams` to recreate proper network features/behavior. These requirements can be founded in the scenario's documentation. For the ring road, no attributes are needed aside from the `additional_params` terms. Furthermore, for this exercise, we use the scenario's default parameters when creating the `NetParams` object.
```
from flow.core.params import NetParams
net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS)
```
### 2.4 InitialConfig
`InitialConfig` specifies parameters that affect the positioning of vehicle in the network at the start of a simulation. These parameters can be used to limit the edges and number of lanes vehicles originally occupy, and provide a means of adding randomness to the starting positions of vehicles. In order to introduce a small initial disturbance to the system of vehicles in the network, we set the `perturbation` term in `InitialConfig` to 1m.
```
from flow.core.params import InitialConfig
initial_config = InitialConfig(spacing="uniform", perturbation=1)
```
### 2.5 TrafficLightParams
`TrafficLightParams` are used to desribe the positions and types of traffic lights in the network. These inputs are outside the scope of this tutorial, and instead are covered in `exercise06_traffic_lights.ipynb`. For our example, we create an empty `TrafficLightParams` object, thereby ensuring that none are placed on any nodes.
```
from flow.core.params import TrafficLightParams
traffic_lights = TrafficLightParams()
```
## 3. Setting up an Environment
Several envionrments in Flow exist to train autonomous agents of different forms (e.g. autonomous vehicles, traffic lights) to perform a variety of different tasks. These environments are often scenario or task specific; however, some can be deployed on an ambiguous set of scenarios as well. One such environment, `AccelEnv`, may be used to train a variable number of vehicles in a fully observable network with a *static* number of vehicles.
```
from flow.envs.loop.loop_accel import AccelEnv
```
Although we will not be training any autonomous agents in this exercise, the use of an environment allows us to view the cumulative reward simulation rollouts receive in the absence of autonomy.
Envrionments in Flow are parametrized by three components:
* `EnvParams`
* `SumoParams`
* `Scenario`
### 3.1 SumoParams
`SumoParams` specifies simulation-specific variables. These variables include the length a simulation step (in seconds) and whether to render the GUI when running the experiment. For this example, we consider a simulation step length of 0.1s and activate the GUI.
```
from flow.core.params import SumoParams
sumo_params = SumoParams(sim_step=0.1, render=True)
```
### 3.2 EnvParams
`EnvParams` specify environment and experiment-specific parameters that either affect the training process or the dynamics of various components within the scenario. Much like `NetParams`, the attributes associated with this parameter are mostly environment specific, and can be found in the environment's `ADDITIONAL_ENV_PARAMS` dictionary.
```
from flow.envs.loop.loop_accel import ADDITIONAL_ENV_PARAMS
print(ADDITIONAL_ENV_PARAMS)
```
Importing the `ADDITIONAL_ENV_PARAMS` variable, we see that it consists of only one entry, "target_velocity", which is used when computing the reward function associated with the environment. We use this default value when generating the `EnvParams` object.
```
from flow.core.params import EnvParams
env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)
```
## 4. Setting up and Running the Experiment
Once the inputs to the scenario and environment classes are ready, we are ready to set up a `Experiment` object.
```
from flow.core.experiment import Experiment
```
These objects may be used to simulate rollouts in the absence of reinforcement learning agents, as well as acquire behaviors and rewards that may be used as a baseline with which to compare the performance of the learning agent. In this case, we choose to run our experiment for one rollout consisting of 3000 steps (300 s).
**Note**: When executing the below code, remeber to click on the <img style="display:inline;" src="img/play_button.png"> Play button after the GUI is rendered.
```
# create the scenario object
scenario = LoopScenario(name="ring_example",
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=traffic_lights)
# create the environment object
env = AccelEnv(env_params, sumo_params, scenario)
# create the experiment object
exp = Experiment(env)
# run the experiment for a set number of rollouts / time steps
_ = exp.run(1, 3000)
```
As we can see from the above simulation, the initial perturbations in the network instabilities propogate and intensify, eventually leading to the formation of stop-and-go waves after approximately 180s.
## 5. Modifying the Simulation
This tutorial has walked you through running a single lane ring road experiment in Flow. As we have mentioned before, these simulations are highly parametrizable. This allows us to try different representations of the task. For example, what happens if no initial perturbations are introduced to the system of homogenous human-driven vehicles?
```
initial_config = InitialConfig()
```
In addition, how does the task change in the presence of multiple lanes where vehicles can overtake one another?
```
net_params = NetParams(
additional_params={
'length': 230,
'lanes': 2,
'speed_limit': 30,
'resolution': 40
}
)
```
Feel free to experiment with all these problems and more!
## Bibliography
[1] Sugiyama, Yuki, et al. "Traffic jams without bottlenecks—experimental evidence for the physical mechanism of the formation of a jam." New journal of physics 10.3 (2008): 033001.
[2] Treiber, Martin, Ansgar Hennecke, and Dirk Helbing. "Congested traffic states in empirical observations and microscopic simulations." Physical review E 62.2 (2000): 1805.
|
github_jupyter
|
# Quick start guide
## Installation
### Stable
Fri can be installed via the Python Package Index (PyPI).
If you have `pip` installed just execute the command
pip install fri
to get the newest stable version.
The dependencies should be installed and checked automatically.
If you have problems installing please open issue at our [tracker](https://github.com/lpfann/fri/issues/new).
### Development
To install a bleeding edge dev version of `FRI` you can clone the GitHub repository using
git clone git@github.com:lpfann/fri.git
and then check out the `dev` branch: `git checkout dev`.
We use [poetry](https://poetry.eustace.io/) for dependency management.
Run
poetry install
in the cloned repository to install `fri` in a virtualenv.
To check if everything works as intented you can use `pytest` to run the unit tests.
Just run the command
poetry run pytest
in the main project folder
## Using FRI
Now we showcase the workflow of using FRI on a simple classification problem.
### Data
To have something to work with, we need some data first.
`fri` includes a generation method for binary classification and regression data.
In our case we need some classification data.
```
from fri import genClassificationData
```
We want to create a small set with a few features.
Because we want to showcase the all-relevant feature selection, we generate multiple strongly and weakly relevant features.
```
n = 100
features = 6
strongly_relevant = 2
weakly_relevant = 2
X,y = genClassificationData(n_samples=n,
n_features=features,
n_strel=strongly_relevant,
n_redundant=weakly_relevant,
random_state=123)
```
The method also prints out the parameters again.
```
X.shape
```
We created a binary classification set with 6 features of which 2 are strongly relevant and 2 weakly relevant.
#### Preprocess
Because our method expects mean centered data we need to standardize it first.
This centers the values around 0 and deviation to the standard deviation
```
from sklearn.preprocessing import StandardScaler
X_scaled = StandardScaler().fit_transform(X)
```
### Model
Now we need to creata a Model.
We use the `FRI` module.
```
import fri
```
`fri` provides a convenience class `fri.FRI` to create a model.
`fri.FRI` needs the type of problem as a first argument of type `ProblemName`.
Depending on the Problem you want to analyze pick from one of the available models in `ProblemName`.
```
list(fri.ProblemName)
```
Because we have Classification data we use the `ProblemName.CLASSIFICATION` to instantiate our model.
```
fri_model = fri.FRI(fri.ProblemName.CLASSIFICATION,slack_loss=0.2,slack_regularization=0.2)
fri_model
```
We used no parameters for creation so the defaults are active.
#### Fitting to data
Now we can just fit the model to the data using `scikit-learn` like commands.
```
fri_model.fit(X_scaled,y)
```
The resulting feature relevance bounds are saved in the `interval_` variable.
```
fri_model.interval_
```
If you want to print out the relevance class use the `print_interval_with_class()` function.
```
print(fri_model.print_interval_with_class())
```
The bounds are grouped in 2d sublists for each feature.
To acess the relevance bounds for feature 2 we would use
```
fri_model.interval_[2]
```
The relevance classes are saved in the corresponding variable `relevance_classes_`:
```
fri_model.relevance_classes_
```
`2` denotes strongly relevant features, `1` weakly relevant and `0` irrelevant.
#### Plot results
The bounds in numerical form are useful for postprocesing.
If we want a human to look at it, we recommend the plot function `plot_relevance_bars`.
We can also color the bars according to `relevance_classes_`
```
# Import plot function
from fri.plot import plot_relevance_bars
import matplotlib.pyplot as plt
%matplotlib inline
# Create new figure, where we can put an axis on
fig, ax = plt.subplots(1, 1,figsize=(6,3))
# plot the bars on the axis, colored according to fri
out = plot_relevance_bars(ax,fri_model.interval_,classes=fri_model.relevance_classes_)
```
### Setting constraints manually
Our model also allows to compute relevance bounds when the user sets a given range for the features.
We use a dictionary to encode our constraints.
```
preset = {}
```
#### Example
As an example, let us constrain the third from our example to the minimum relevance bound.
```
preset[2] = fri_model.interval_[2, 0]
```
We use the function `constrained_intervals`.
Note: we need to fit the model before we can use this function.
We already did that, so we are fine.
```
const_ints = fri_model.constrained_intervals(preset=preset)
const_ints
```
Feature 3 is set to its minimum (at 0).
How does it look visually?
```
fig, ax = plt.subplots(1, 1,figsize=(6,3))
out = plot_relevance_bars(ax, const_ints)
```
Feature 3 is reduced to its minimum (no contribution).
In turn, its correlated partner feature 4 had to take its maximum contribution.
### Print internal Parameters
If we want to take at internal parameters, we can use the `verbose` flag in the model creation.
```
fri_model = fri.FRI(fri.ProblemName.CLASSIFICATION, verbose=True)
fri_model.fit(X_scaled,y)
```
This prints out the parameters of the baseline model
One can also see the best selected hyperparameter according to gridsearch and the training score of the model in `score`.
### Multiprocessing
To enable multiprocessing simply use the `n_jobs` parameter when init. the model.
It expects an integer parameter which defines the amount of processes used.
`n_jobs=-1` uses all available on the CPU.
```
fri_model = fri.FRI(fri.ProblemName.CLASSIFICATION, n_jobs=-1, verbose=1)
fri_model.fit(X_scaled,y)
```
|
github_jupyter
|
# Amortized Neural Variational Inference for a toy probabilistic model
Consider a certain number of sensors placed at known locations, $\mathbf{s}_1,\mathbf{s}_2,\ldots,\mathbf{s}_L$. There is a target at an unknown position $\mathbf{z}\in\mathbb{R}^2$ that is emitting a certain signal that is received at the $i$-th sensor with a signal strength distributed as follows:
\begin{align}
x_i \sim \mathcal{N}\Big(- A \log\left(||\mathbf{s}_i-\mathbf{z} ||^2\right), \sigma^2\Big),
\end{align}
where $A$ is a constant related to how fast signal strength degrades with distance. We assume a Gaussian prior for the unknown position $\mathcal{N}(\mathbf{0},\mathbf{I})$. Given a set of $N$ i.i.d. samples for each sensor, $\mathbf{X}\in\mathbb{R}^{L\times N}$, we will use a Amortized Neural Variational Inference to find a Gaussian approximation to
\begin{align}
p(\mathbf{z}|\mathbf{X}) \propto p(\mathbf{X}|\mathbf{z}) p(\mathbf{z})
\end{align}
Our approximation to $p(\mathbf{z}|\mathbf{X})$ is of the form
\begin{align}
p(\mathbf{z}|\mathbf{X}) \approx q(\mathbf{z}|\mathbf{X})=\mathcal{N}\Big(\mu(\mathbf{X}),\Sigma(\mathbf{X})\Big),
\end{align}
where
- $\mu(\mathbf{X})$ --> Given by a Neural Network with parameter vector $\theta$ and input $\mathbf{X}$
- $\Sigma(\mathbf{X})$ --> Diagonal covariance matrix, where the log of the main diagonal is constructed by a Neural Network with parameter vector $\gamma$ and input $\mathbf{X}$
## ELBO lower-bound to $p(\mathbf{X})$
We will optimize $q(\mathbf{z}|\mathbf{X})$ w.r.t. $\theta,\gamma$ by optimizing the Evidence-Lower-Bound (ELBO):
\begin{align}
p(\mathbf{X}) &= \int p(\mathbf{X}|\mathbf{z}) p(\mathbf{z}) d\mathbf{z}\\
&\geq \int q(\mathbf{X}|\mathbf{z}) \log \left(\frac{p(\mathbf{X},\mathbf{z})}{q(\mathbf{X}|\mathbf{z})}\right)d\mathbf{z}\\
& = \mathbb{E}_{q}\left[\log p(\mathbf{X}|\mathbf{z})\right] - D_{KL}(q(\mathbf{z}|\mathbf{X})||p(\mathbf{z})\triangleq \mathcal{L}(\mathbf{X},\theta,\gamma),
\end{align}
where $D_{KL}(q(\mathbf{z}|\mathbf{X})||p(\mathbf{z})$ is known in closed form since it is the KL divergence between two Gaussian pdfs:
\begin{align}
D_{KL}(q(\mathbf{z}|\mathbf{X})||p(\mathbf{z})) = \frac{1}{2} \left[\text{tr}\left(\Sigma(\mathbf{X})\right)+\left(\mu(\mathbf{X})^T\mu(\mathbf{X})\right)-2-\log\det \left(\Sigma(\mathbf{X})\right) \right]
\end{align}
## SGD optimization
- Sample $\mathbf{\epsilon}\sim \mathcal{N}(\mathbf{0},\mathbf{I})$
- Sample from $q(\mathbf{z}|\mathbf{X})$:
\begin{align}
\mathbf{z}^0 = \mu(\mathbf{X}) + \sqrt{\text{diag}(\Sigma(\mathbf{X}))} \circ \mathbf{\epsilon}
\end{align}
- Compute gradients of
\begin{align}
\hat{\mathcal{L}}(\mathbf{X},\theta,\gamma) =\log p(\mathbf{X}|\mathbf{z}^0) - D_{KL}(q(\mathbf{z}|\mathbf{X})||p(\mathbf{z})
\end{align}
w.r.t. $\theta,\gamma$
```
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
%matplotlib inline
# use seaborn plotting defaults
import seaborn as sns; sns.set()
```
### Probabilistic model definition and generating samples
```
############## Elements of the true probabilistic model ####################
loc_info = {}
loc_info['S'] = 3 # Number o sensors
loc_info['pos_s'] = np.array([[0.5,1], [3.5,1], [2,3]]) #Position of sensors
#loc_info['target'] = np.random.uniform(-3,3,[2,]) #(Unknown target position)
loc_info['target'] = np.array([-1,2]) #(Unknown target position)
loc_info['var_s'] = 5.*np.ones(loc_info['S']).reshape([loc_info['S'],1]) #Variance of sensors
loc_info['A'] = np.ones(loc_info['S'],dtype=np.float32) * 10.0 #Attenuation mean factor per sensor
loc_info['N'] = 5 # Number of measurements per sensor
def sample_X(S,M,z,pos_s,A,var_s):
means = -1*A*np.log(np.sum((pos_s-z)**2,1))
X = means.reshape([S,1]) + np.random.randn(S,M) * np.sqrt(var_s)
return X
# Sampling from model for the right target
X = sample_X(loc_info['S'],loc_info['N'], loc_info['target'],loc_info['pos_s'],loc_info['A'],loc_info['var_s'])
plt.plot(loc_info['pos_s'][:,0],loc_info['pos_s'][:,1],'b>',label='Sensors',ms=15)
plt.plot(loc_info['target'][0],loc_info['target'][1],'ro',label='Target',ms=15)
plt.legend()
```
### TensorFlow Computation Graph and Loss Function
```
z_dim = 2 #Latent Space
model_name = 'model1' #In 'model1.py' we define the variational family
learning_rate = 1e-2
num_samples_avg = 1 #Number of samples to approximate the expectation in the ELBO
num_samples = 10 #Number of samples from the posterior (for testing)
num_it = int(1e4) #SGD iterations
period_plot = int(1000) #Show resuts every period_plot iterations
dims = X.shape #X.shape
sess_VAE = tf.Graph()
with sess_VAE.as_default():
print('[*] Importing model: ' + model_name)
model = __import__(model_name)
print('[*] Defining placeholders')
inputX = tf.placeholder(tf.float32, shape=dims, name='x-input')
print('[*] Defining the encoder')
log_var, mean, samples_z, KL = model.encoder(inputX,dims,z_dim,num_samples_avg)
print('[*] Defining the log_likelyhood')
loglik = model.decoder(loc_info,inputX,samples_z,num_samples_avg)
loss = -(loglik-KL)
optim = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# Output dictionary -> Useful if computation graph is defined in a separate .py file
tf_nodes = {}
tf_nodes['X'] = inputX
tf_nodes['mean'] = mean
tf_nodes['logvar'] = log_var
tf_nodes['KL'] = KL
tf_nodes['loglik'] = loglik
tf_nodes['optim'] = optim
tf_nodes['samples'] = samples_z
```
## SGD optimization
```
############ SGD Inference #####################################
mean_list = []
with tf.Session(graph=sess_VAE) as session:
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
tf.global_variables_initializer().run()
print('Training the VAE ...')
for it in range(num_it):
feedDict = {tf_nodes['X'] : X}
_= session.run(tf_nodes['optim'],feedDict)
if(it % period_plot ==0):
mean, logvar,loglik,KL = session.run([tf_nodes['mean'],tf_nodes['logvar'],tf_nodes['loglik'],tf_nodes['KL']],feedDict)
print("It = %d, loglik = %.5f, KL = %.5f" %(it,loglik,KL))
mean_list.append(mean)
samples = session.run(tf_nodes['samples'],feedDict)
#Samples from q(z|x)
m_evol = np.vstack(mean_list)
nsamples = 50
samples = mean + np.sqrt(np.exp(logvar)) * np.random.randn(nsamples,2)
plt.plot(loc_info['pos_s'][:,0],loc_info['pos_s'][:,1],'b>',label='Sensors',ms=15)
plt.plot(loc_info['target'][0],loc_info['target'][1],'ro',label='Target',ms=15)
plt.plot(m_evol[:,0],m_evol[:,1],'g>',label='Post Mean')
plt.scatter(samples[:,0],samples[:,1],label='Post Samples')
plt.rcParams["figure.figsize"] = [8,8]
plt.legend()
```
|
github_jupyter
|
```
%matplotlib inline
from pyvista import set_plot_theme
set_plot_theme('document')
```
Colormap Choices {#colormap_example}
================
Use a Matplotlib, Colorcet, cmocean, or custom colormap when plotting
scalar values.
```
from pyvista import examples
import pyvista as pv
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
```
Any colormap built for `matplotlib`, `colorcet`, or `cmocean` is fully
compatible with PyVista. Colormaps are typically specified by passing
the string name of the colormap to the plotting routine via the `cmap`
argument.
See [Matplotlib\'s complete list of available
colormaps](https://matplotlib.org/tutorials/colors/colormaps.html),
[Colorcet\'s complete
list](https://colorcet.holoviz.org/user_guide/index.html), and
[cmocean\'s complete list](https://matplotlib.org/cmocean/).
Custom Made Colormaps
=====================
To get started using a custom colormap, download some data with scalar
values to plot.
```
mesh = examples.download_st_helens().warp_by_scalar()
# Add scalar array with range (0, 100) that correlates with elevation
mesh['values'] = pv.plotting.normalize(mesh['Elevation']) * 100
```
Build a custom colormap - here we make a colormap with 5 discrete colors
and we specify the ranges where those colors fall:
```
# Define the colors we want to use
blue = np.array([12/256, 238/256, 246/256, 1])
black = np.array([11/256, 11/256, 11/256, 1])
grey = np.array([189/256, 189/256, 189/256, 1])
yellow = np.array([255/256, 247/256, 0/256, 1])
red = np.array([1, 0, 0, 1])
mapping = np.linspace(mesh['values'].min(), mesh['values'].max(), 256)
newcolors = np.empty((256, 4))
newcolors[mapping >= 80] = red
newcolors[mapping < 80] = grey
newcolors[mapping < 55] = yellow
newcolors[mapping < 30] = blue
newcolors[mapping < 1] = black
# Make the colormap from the listed colors
my_colormap = ListedColormap(newcolors)
```
Simply pass the colormap to the plotting routine!
```
mesh.plot(scalars='values', cmap=my_colormap)
```
Or you could make a simple colormap\... any Matplotlib colormap can be
passed to PyVista!
```
boring_cmap = plt.cm.get_cmap("viridis", 5)
mesh.plot(scalars='values', cmap=boring_cmap)
```
You can also pass a list of color strings to the color map. This
approach divides up the colormap into 5 equal parts.
```
mesh.plot(scalars=mesh['values'], cmap=['black', 'blue', 'yellow', 'grey', 'red'])
```
If you still wish to have control of the separation of values, you can
do this by creating a scalar array and passing that to the plotter along
with the the colormap
```
scalars = np.empty(mesh.n_points)
scalars[mesh['values'] >= 80] = 4 # red
scalars[mesh['values'] < 80] = 3 # grey
scalars[mesh['values'] < 55] = 2 # yellow
scalars[mesh['values'] < 30] = 1 # blue
scalars[mesh['values'] < 1] = 0 # black
mesh.plot(scalars=scalars, cmap=['black', 'blue', 'yellow', 'grey', 'red'])
```
Matplotlib vs. Colorcet
=======================
Let\'s compare Colorcet\'s perceptually uniform \"fire\" colormap to
Matplotlib\'s \"hot\" colormap much like the example on the [first page
of Colorcet\'s docs](https://colorcet.holoviz.org/index.html).
The \"hot\" version washes out detail at the high end, as if the image
is overexposed, while \"fire\" makes detail visible throughout the data
range.
Please note that in order to use Colorcet\'s colormaps including
\"fire\", you must have Colorcet installed in your Python environment:
`pip install colorcet`
```
p = pv.Plotter(shape=(2, 2), border=False)
p.subplot(0, 0)
p.add_mesh(mesh, scalars='Elevation', cmap="fire",
lighting=True, scalar_bar_args={'title': "Colorcet Fire"})
p.subplot(0, 1)
p.add_mesh(mesh, scalars='Elevation', cmap="fire",
lighting=False, scalar_bar_args={'title': "Colorcet Fire (No Lighting)"})
p.subplot(1, 0)
p.add_mesh(mesh, scalars='Elevation', cmap="hot",
lighting=True, scalar_bar_args={'title': "Matplotlib Hot"})
p.subplot(1, 1)
p.add_mesh(mesh, scalars='Elevation', cmap="hot",
lighting=False, scalar_bar_args={'title': "Matplotlib Hot (No Lighting)"})
p.show()
```
|
github_jupyter
|
```
# Originally made by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings)
# The original BigGAN+CLIP method was by https://twitter.com/advadnoun
import math
import random
# from email.policy import default
from urllib.request import urlopen
from tqdm import tqdm
import sys
import os
import flask
# pip install taming-transformers doesn't work with Gumbel, but does not yet work with coco etc
# appending the path does work with Gumbel, but gives ModuleNotFoundError: No module named 'transformers' for coco etc
sys.path.append('taming-transformers')
from itertools import product
from omegaconf import OmegaConf
from taming.models import cond_transformer, vqgan
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
from torch.cuda import get_device_properties
torch.backends.cudnn.benchmark = False # NR: True is a bit faster, but can lead to OOM. False is more deterministic.
#torch.use_deterministic_algorithms(True) # NR: grid_sampler_2d_backward_cuda does not have a deterministic implementation
from torch_optimizer import DiffGrad, AdamP, RAdam
from CLIP import clip
import kornia.augmentation as K
import numpy as np
import imageio
from PIL import ImageFile, Image, PngImagePlugin, ImageChops
ImageFile.LOAD_TRUNCATED_IMAGES = True
from subprocess import Popen, PIPE
import re
# Supress warnings
import warnings
warnings.filterwarnings('ignore')
# Various functions and classes
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
input, = ctx.saved_tensors
return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
def vector_quantize(x, codebook):
d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return replace_grad(x_q, x)
class Prompt(nn.Module):
def __init__(self, embed, weight=1., stop=float('-inf')):
super().__init__()
self.register_buffer('embed', embed)
self.register_buffer('weight', torch.as_tensor(weight))
self.register_buffer('stop', torch.as_tensor(stop))
def forward(self, input):
input_normed = F.normalize(input.unsqueeze(1), dim=2)
embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
dists = dists * self.weight.sign()
return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()
#NR: Split prompts and weights
def split_prompt(prompt):
vals = prompt.rsplit(':', 2)
vals = vals + ['', '1', '-inf'][len(vals):]
return vals[0], float(vals[1]), float(vals[2])
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow # not used with pooling
# Pick your own augments & their order
augment_list = []
for item in augments[0]:
if item == 'Ji':
augment_list.append(K.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1, p=0.7))
elif item == 'Sh':
augment_list.append(K.RandomSharpness(sharpness=0.3, p=0.5))
elif item == 'Gn':
augment_list.append(K.RandomGaussianNoise(mean=0.0, std=1., p=0.5))
elif item == 'Pe':
augment_list.append(K.RandomPerspective(distortion_scale=0.7, p=0.7))
elif item == 'Ro':
augment_list.append(K.RandomRotation(degrees=15, p=0.7))
elif item == 'Af':
augment_list.append(K.RandomAffine(degrees=15, translate=0.1, shear=5, p=0.7, padding_mode='zeros', keepdim=True)) # border, reflection, zeros
elif item == 'Et':
augment_list.append(K.RandomElasticTransform(p=0.7))
elif item == 'Ts':
augment_list.append(K.RandomThinPlateSpline(scale=0.8, same_on_batch=True, p=0.7))
elif item == 'Cr':
augment_list.append(K.RandomCrop(size=(self.cut_size,self.cut_size), pad_if_needed=True, padding_mode='reflect', p=0.5))
elif item == 'Er':
augment_list.append(K.RandomErasing(scale=(.1, .4), ratio=(.3, 1/.3), same_on_batch=True, p=0.7))
elif item == 'Re':
augment_list.append(K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,1), ratio=(0.75,1.333), cropping_mode='resample', p=0.5))
self.augs = nn.Sequential(*augment_list)
self.noise_fac = 0.1
# Pooling
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
def forward(self, input):
cutouts = []
for _ in range(self.cutn):
# Use Pooling
cutout = (self.av_pool(input) + self.max_pool(input))/2
cutouts.append(cutout)
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
def load_vqgan_model(config_path, checkpoint_path):
global gumbel
gumbel = False
config = OmegaConf.load(config_path)
if config.model.target == 'taming.models.vqgan.VQModel':
model = vqgan.VQModel(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
elif config.model.target == 'taming.models.vqgan.GumbelVQ':
model = vqgan.GumbelVQ(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
gumbel = True
elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
parent_model.eval().requires_grad_(False)
parent_model.init_from_ckpt(checkpoint_path)
model = parent_model.first_stage_model
else:
raise ValueError(f'unknown model type: {config.model.target}')
del model.loss
return model
def resize_image(image, out_size):
ratio = image.size[0] / image.size[1]
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
return image.resize(size, Image.LANCZOS)
# Set the optimiser
def get_opt(opt_name, opt_lr ,z):
if opt_name == "Adam":
opt = optim.Adam([z], lr=opt_lr) # LR=0.1 (Default)
elif opt_name == "AdamW":
opt = optim.AdamW([z], lr=opt_lr)
elif opt_name == "Adagrad":
opt = optim.Adagrad([z], lr=opt_lr)
elif opt_name == "Adamax":
opt = optim.Adamax([z], lr=opt_lr)
elif opt_name == "DiffGrad":
opt = DiffGrad([z], lr=opt_lr, eps=1e-9, weight_decay=1e-9) # NR: Playing for reasons
elif opt_name == "AdamP":
opt = AdamP([z], lr=opt_lr)
elif opt_name == "RAdam":
opt = RAdam([z], lr=opt_lr)
elif opt_name == "RMSprop":
opt = optim.RMSprop([z], lr=opt_lr)
else:
print("Unknown optimiser. Are choices broken?")
opt = optim.Adam([z], lr=opt_lr)
return opt
"""
Takes in a latent
"""
# Vector quantize
def synth(z):
z_q = vector_quantize(z.movedim(1, 3), model.quantize.embedding.weight).movedim(3, 1)
return clamp_with_grad(model.decode(z_q).add(1).div(2), 0, 1)
"""
Writes the loss
synthesizes
Saves the output
"""
@torch.no_grad()
def checkin(i, losses, z, output):
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
out = synth(z)
info = PngImagePlugin.PngInfo()
# info.add_text('comment', f'{prompts}')
TF.to_pil_image(out[0].cpu()).save(output, pnginfo=info)
"""
iii is the image
"""
def ascend_txt(z, pMs):
out = synth(z)
iii = perceptor.encode_image(normalize(make_cutouts(out))).float()
result = []
for prompt in pMs:
result.append(prompt(iii))
return result # return loss
def train(i,z, opt, pMs, output, z_min, z_max):
opt.zero_grad(set_to_none=True)
lossAll = ascend_txt(z, pMs)
if i % display_freq == 0:
checkin(i, lossAll,z, output)
loss = sum(lossAll)
loss.backward()
opt.step()
with torch.no_grad():
z.copy_(z.maximum(z_min).minimum(z_max))
cutn = 32
cut_pow = 1
optimizer = 'Adam'
torch.backends.cudnn.deterministic = True
augments = [['Af', 'Pe', 'Ji', 'Er']]
replace_grad = ReplaceGrad.apply
clamp_with_grad = ClampWithGrad.apply
cuda_device = 0
device = torch.device(cuda_device)
clip_model='ViT-B/32'
vqgan_config=f'checkpoints/vqgan_imagenet_f16_16384.yaml'
vqgan_checkpoint=f'checkpoints/vqgan_imagenet_f16_16384.ckpt'
# Do it
device = torch.device(cuda_device)
model = load_vqgan_model(vqgan_config, vqgan_checkpoint).to(device)
jit = True if float(torch.__version__[:3]) < 1.8 else False
perceptor = clip.load(clip_model, jit=jit)[0].eval().requires_grad_(False).to(device)
cut_size = perceptor.visual.input_resolution
replace_grad = ReplaceGrad.apply
clamp_with_grad = ClampWithGrad.apply
make_cutouts = MakeCutouts(cut_size, cutn, cut_pow=cut_pow)
torch.backends.cudnn.deterministic = True
augments = [['Af', 'Pe', 'Ji', 'Er']]
optimizer = 'Adam'
step_size=0.1
cutn = 32
cut_pow = 1
seed = 64
display_freq=50
subjects = ["blue", "cornucopia", "pumpkin pie", "turkey", "family feud" ]
styles = ["abstract art", "collage", "computer art", "drawing", "chalk drawing", "charcoal drawing", "conte crayon drawing", \
"pastel drawing", "pen and ink drawing", "pencil drawing", "graffiti art", "mosaic art", "painting", \
"acrylic painting", "encaustic painting", "fresco painting", "gouache painting", "ink and wash painting" \
"oil painting", "watercolor painting", "printmaking", "engraving", "etching", "giclee print", \
"lithography", "screenprinting", "woodcut printing", "sand art", "stained glass art", "tapestry art", "vector art", \
"flat illustration"]
def generate(prompt_string, output_name,iterations = 100, size=(256, 256), seed=16, width=256, height=256):
pMs=[]
prompts = [prompt_string]
output = output_name
for prompt in prompts:
txt, weight, stop = split_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
learning_rate = 0.1
# Output for the user
print('Using device:', device)
print('Optimising using:', optimizer)
print('Using text prompts:', prompts)
print('Using seed:', seed)
i = 0
f = 2**(model.decoder.num_resolutions - 1)
toksX, toksY = width // f, height // f
sideX, sideY = toksX * f, toksY * f
e_dim = model.quantize.e_dim
n_toks = model.quantize.n_e
z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
one_hot = F.one_hot(torch.randint(n_toks, [toksY * toksX], device=device), n_toks).float()
z = one_hot @ model.quantize.embedding.weight
z = z.view([-1, toksY, toksX, e_dim]).permute(0, 3, 1, 2)
z_orig = z.clone()
z.requires_grad_(True)
opt = get_opt(optimizer, learning_rate,z)
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
with tqdm() as pbar:
while True:
# Training time
train(i,z, opt, pMs, output_name, z_min, z_max)
# Ready to stop yet?
if i == iterations:
break
i += 1
pbar.update()
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
for subject, style in product(subjects, styles):
generate(f"{subject} in the style of {style}", output_name=f"data/{subject}_{style}_100.png")
```
|
github_jupyter
|
## Homework: Multilingual Embedding-based Machine Translation (7 points)
**In this homework** **<font color='red'>YOU</font>** will make machine translation system without using parallel corpora, alignment, attention, 100500 depth super-cool recurrent neural network and all that kind superstuff.
But even without parallel corpora this system can be good enough (hopefully).
For our system we choose two kindred Slavic languages: Ukrainian and Russian.
### Feel the difference!
(_синій кіт_ vs. _синій кит_)

### Frament of the Swadesh list for some slavic languages
The Swadesh list is a lexicostatistical stuff. It's named after American linguist Morris Swadesh and contains basic lexis. This list are used to define subgroupings of languages, its relatedness.
So we can see some kind of word invariance for different Slavic languages.
| Russian | Belorussian | Ukrainian | Polish | Czech | Bulgarian |
|-----------------|--------------------------|-------------------------|--------------------|-------------------------------|-----------------------|
| женщина | жанчына, кабета, баба | жінка | kobieta | žena | жена |
| мужчина | мужчына | чоловік, мужчина | mężczyzna | muž | мъж |
| человек | чалавек | людина, чоловік | człowiek | člověk | човек |
| ребёнок, дитя | дзіця, дзіцёнак, немаўля | дитина, дитя | dziecko | dítě | дете |
| жена | жонка | дружина, жінка | żona | žena, manželka, choť | съпруга, жена |
| муж | муж, гаспадар | чоловiк, муж | mąż | muž, manžel, choť | съпруг, мъж |
| мать, мама | маці, матка | мати, матір, неня, мама | matka | matka, máma, 'стар.' mateř | майка |
| отец, тятя | бацька, тата | батько, тато, татусь | ojciec | otec | баща, татко |
| много | шмат, багата | багато | wiele | mnoho, hodně | много |
| несколько | некалькі, колькі | декілька, кілька | kilka | několik, pár, trocha | няколко |
| другой, иной | іншы | інший | inny | druhý, jiný | друг |
| зверь, животное | жывёла, звер, істота | тварина, звір | zwierzę | zvíře | животно |
| рыба | рыба | риба | ryba | ryba | риба |
| птица | птушка | птах, птиця | ptak | pták | птица |
| собака, пёс | сабака | собака, пес | pies | pes | куче, пес |
| вошь | вош | воша | wesz | veš | въшка |
| змея, гад | змяя | змія, гад | wąż | had | змия |
| червь, червяк | чарвяк | хробак, черв'як | robak | červ | червей |
| дерево | дрэва | дерево | drzewo | strom, dřevo | дърво |
| лес | лес | ліс | las | les | гора, лес |
| палка | кій, палка | палиця | patyk, pręt, pałka | hůl, klacek, prut, kůl, pálka | палка, пръчка, бастун |
But the context distribution of these languages demonstrates even more invariance. And we can use this fact for our for our purposes.
## Data
```
import gensim
import numpy as np
from gensim.models import KeyedVectors
```
Download embeddings here:
* [cc.uk.300.vec.zip](https://yadi.sk/d/9CAeNsJiInoyUA)
* [cc.ru.300.vec.zip](https://yadi.sk/d/3yG0-M4M8fypeQ)
Load embeddings for ukrainian and russian.
```
uk_emb = KeyedVectors.load_word2vec_format("cc.uk.300.vec")
ru_emb = KeyedVectors.load_word2vec_format("cc.ru.300.vec")
ru_emb.most_similar([ru_emb["август"]], topn=10)
uk_emb.most_similar([uk_emb["серпень"]])
ru_emb.most_similar([uk_emb["серпень"]])
```
Load small dictionaries for correspoinding words pairs as trainset and testset.
```
def load_word_pairs(filename):
uk_ru_pairs = []
uk_vectors = []
ru_vectors = []
with open(filename, "r") as inpf:
for line in inpf:
uk, ru = line.rstrip().split("\t")
if uk not in uk_emb or ru not in ru_emb:
continue
uk_ru_pairs.append((uk, ru))
uk_vectors.append(uk_emb[uk])
ru_vectors.append(ru_emb[ru])
return uk_ru_pairs, np.array(uk_vectors), np.array(ru_vectors)
uk_ru_train, X_train, Y_train = load_word_pairs("ukr_rus.train.txt")
uk_ru_test, X_test, Y_test = load_word_pairs("ukr_rus.test.txt")
```
## Embedding space mapping
Let $x_i \in \mathrm{R}^d$ be the distributed representation of word $i$ in the source language, and $y_i \in \mathrm{R}^d$ is the vector representation of its translation. Our purpose is to learn such linear transform $W$ that minimizes euclidian distance between $Wx_i$ and $y_i$ for some subset of word embeddings. Thus we can formulate so-called Procrustes problem:
$$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$$
or
$$W^*= \arg\min_W ||WX - Y||_F$$
where $||*||_F$ - Frobenius norm.
In Greek mythology, Procrustes or "the stretcher" was a rogue smith and bandit from Attica who attacked people by stretching them or cutting off their legs, so as to force them to fit the size of an iron bed. We make same bad things with source embedding space. Our Procrustean bed is target embedding space.


But wait...$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$ looks like simple multiple linear regression (without intercept fit). So let's code.
```
from sklearn.linear_model import LinearRegression
mapping = LinearRegression(fit_intercept=False).fit(X_train, Y_train)
```
Let's take a look at neigbours of the vector of word _"серпень"_ (_"август"_ in Russian) after linear transform.
```
august = mapping.predict(uk_emb["серпень"].reshape(1, -1))
ru_emb.most_similar(august)
```
We can see that neighbourhood of this embedding cosists of different months, but right variant is on the ninth place.
As quality measure we will use precision top-1, top-5 and top-10 (for each transformed Ukrainian embedding we count how many right target pairs are found in top N nearest neighbours in Russian embedding space).
```
def precision(pairs, mapped_vectors, topn=1):
"""
:args:
pairs = list of right word pairs [(uk_word_0, ru_word_0), ...]
mapped_vectors = list of embeddings after mapping from source embedding space to destination embedding space
topn = the number of nearest neighbours in destination embedding space to choose from
:returns:
precision_val, float number, total number of words for those we can find right translation at top K.
"""
assert len(pairs) == len(mapped_vectors)
num_matches = 0
for i, (_, ru) in enumerate(pairs):
if ru in {x[0] for x in ru_emb.most_similar([mapped_vectors[i]])[:topn]}:
num_matches+=1
precision_val = num_matches / len(pairs)
return precision_val
assert precision([("серпень", "август")], august, topn=5) == 0.0
assert precision([("серпень", "август")], august, topn=9) == 1.0
assert precision([("серпень", "август")], august, topn=10) == 1.0
assert precision(uk_ru_test, X_test) == 0.0
assert precision(uk_ru_test, Y_test) == 1.0
precision_top1 = precision(uk_ru_test, mapping.predict(X_test), 1)
precision_top5 = precision(uk_ru_test, mapping.predict(X_test), 5)
assert precision_top1 >= 0.635
assert precision_top5 >= 0.813
```
## Making it better (orthogonal Procrustean problem)
It can be shown (see original paper) that a self-consistent linear mapping between semantic spaces should be orthogonal.
We can restrict transform $W$ to be orthogonal. Then we will solve next problem:
$$W^*= \arg\min_W ||WX - Y||_F \text{, where: } W^TW = I$$
$$I \text{- identity matrix}$$
Instead of making yet another regression problem we can find optimal orthogonal transformation using singular value decomposition. It turns out that optimal transformation $W^*$ can be expressed via SVD components:
$$X^TY=U\Sigma V^T\text{, singular value decompostion}$$
$$W^*=UV^T$$
```
def learn_transform(X_train, Y_train):
"""
:returns: W* : float matrix[emb_dim x emb_dim] as defined in formulae above
"""
u, s, vh = np.linalg.svd(X_train.T.dot(Y_train))
return u.dot(vh)
W = learn_transform(X_train, Y_train)
ru_emb.most_similar([np.matmul(uk_emb["серпень"], W)])
assert precision(uk_ru_test, np.matmul(X_test, W)) >= 0.653
assert precision(uk_ru_test, np.matmul(X_test, W), 5) >= 0.824
```
## UK-RU Translator
Now we are ready to make simple word-based translator: for earch word in source language in shared embedding space we find the nearest in target language.
```
with open("fairy_tale.txt", "r") as inpf:
uk_sentences = [line.rstrip().lower() for line in inpf]
def translate(sentence):
"""
:args:
sentence - sentence in Ukrainian (str)
:returns:
translation - sentence in Russian (str)
* find ukrainian embedding for each word in sentence
* transform ukrainian embedding vector
* find nearest russian word and replace
"""
words = sentence.split()
translation = [ru_emb.most_similar([np.matmul(uk_emb[word], W)])[0][0] if word in uk_emb else word for word in words ]
return " ".join(translation)
assert translate(".") == "."
assert translate("1 , 3") == "1 , 3"
assert translate("кіт зловив мишу") == "кот поймал мышку"
for sentence in uk_sentences:
print("src: {}\ndst: {}\n".format(sentence, translate(sentence)))
```
Not so bad, right? We can easily improve translation using language model and not one but several nearest neighbours in shared embedding space. But next time.
## Would you like to learn more?
### Articles:
* [Exploiting Similarities among Languages for Machine Translation](https://arxiv.org/pdf/1309.4168) - entry point for multilingual embedding studies by Tomas Mikolov (the author of W2V)
* [Offline bilingual word vectors, orthogonal transformations and the inverted softmax](https://arxiv.org/pdf/1702.03859) - orthogonal transform for unsupervised MT
* [Word Translation Without Parallel Data](https://arxiv.org/pdf/1710.04087)
* [Loss in Translation: Learning Bilingual Word Mapping with a Retrieval Criterion](https://arxiv.org/pdf/1804.07745)
* [Unsupervised Alignment of Embeddings with Wasserstein Procrustes](https://arxiv.org/pdf/1805.11222)
### Repos (with ready-to-use multilingual embeddings):
* https://github.com/facebookresearch/MUSE
* https://github.com/Babylonpartners/fastText_multilingual -
|
github_jupyter
|
# Matplotlib and NumPy crash course
You may install numpy, matplotlib, sklearn and many other usefull package e.g. via Anaconda distribution.
```
import numpy as np
```
## NumPy basics
### Array creation
```
np.array(range(10))
np.ndarray(shape=(5, 4))
np.linspace(0, 1, num=20)
np.arange(0, 20)
np.zeros(shape=(5, 4))
np.ones(shape=(5,4))
```
Possible types of array:
- bool
- various ints
- float, double
- string
```
np.ones(shape=(2, 3), dtype="string")
np.zeros(shape=(2, 3), dtype=bool)
np.savetxt("eye.txt", np.eye(5, 6))
np.loadtxt("eye.txt")
%rm eye.txt
```
## Array operations
```
a = np.linspace(0, 9, num=10)
a + 1
a * a
a - a
print a.max()
print a.min()
np.sum(a)
a = np.random.standard_normal(size=(25, ))
a
b = a.reshape((5, 5))
b
b.T
np.sum(b)
print np.sum(b, axis=1)
print np.sum(b, axis=0)
### Matrix multiplication
np.dot(b, b)
np.vstack([b, b])
```
### Custom functions
```
def plus(x, y):
return x + y
plus_v = np.vectorize(plus)
plus_v(np.arange(10), np.arange(10, 20))
plus_v(np.arange(10), 10)
@np.vectorize
def plus(x, y):
return x + y
plus(np.arange(10), 10)
```
### Performance
```
N = 10000000
a = np.random.standard_normal(size=N)
b = np.random.standard_normal(size=N)
%%time
a + b
ab = zip(range(N), range(N))
%%time
_ = [ a + b for a, b in ab ]
```
### Slices
```
a = np.arange(15)
a = a.reshape((3,5))
a
# Just a copy of the array
a[:]
a[:, 0]
a[1, :]
a[2, :] = (np.arange(5) + 1) * 10
a
a < 10
a[a < 12]
np.where(a < 12)
xs, ys = np.where(a < 20)
a[xs, ys]
```
## Matplotlib
```
import matplotlib.pyplot as plt
# Don't forget this magic expression if want to show plots in notebook
%matplotlib inline
xs = np.arange(100)
ys = np.cumsum(np.random.standard_normal(size=100))
```
### Line plot
```
plt.figure()
plt.plot(xs, ys)
plt.show()
# A little bit of options
plt.figure()
plt.plot(xs, ys, label="1st series", color="green")
plt.plot(xs, ys.max() - ys, label="2nd series", color="red")
plt.legend(loc="upper right")
plt.xlabel("Time, sec")
plt.ylabel("Something")
plt.title("Just two random series")
plt.show()
```
### Bar plot
```
plt.figure()
plt.bar(xs, ys)
plt.show()
plt.figure()
h, bins, patches = plt.hist(ys)
plt.show()
```
### Scatter plot
```
xs1 = np.random.standard_normal(size=100)
ys1 = np.random.standard_normal(size=100)
xs2 = np.random.standard_normal(size=100) + 3
ys2 = np.random.standard_normal(size=100)
plt.scatter(xs1, ys1, label="class1", color="green")
plt.scatter(xs2, ys2, label="class2", color="red")
plt.plot([1.5, 1.5], [-4, 4], linewidth=3)
plt.legend()
```
### Images
```
means=np.array([[-1, 1], [-1, 1]])
stds = np.array([1, 1.1])
@np.vectorize
def normal_density(mx, my, std, x, y):
return np.exp(
-((x - mx) ** 2 + (y - my) ** 2) / 2.0 / std / std
) / std / std
@np.vectorize
def f(x, y):
return np.sum(
normal_density(means[0, :], means[1, :], stds, x, y)
)
mx, my = np.meshgrid(np.linspace(-2, 2, 100), np.linspace(-2, 2, 100))
fs = f(mx, my)
plt.contourf(mx, my, fs, 20, cmap=plt.cm.coolwarm)
plt.colorbar()
plt.contour(mx, my, fs, 20, cmap=plt.cm.coolwarm)
plt.colorbar()
plt.matshow(fs)
plt.colorbar()
plt.imshow(fs)
plt.colorbar()
plt.imshow(np.rot90(fs), extent=[-2, 2, -2, 2])
plt.colorbar()
plt.contour(mx, my, fs, 15, colors="black")
```
# Exercises
- load MNIST dataset
- create arrays of features and labels
- write a procedure to plot digits
- calculate mean, std of images for each class, plot the results
- plot distribution of pixel values: general, for different classes
- *find out which pixel has the most information about label (advanced)*
- *make 3D plots using mplot3d or plotly (advanced)*
|
github_jupyter
|
<a href="https://colab.research.google.com/github/piyushsharma1812/recurrent-neural-networks/blob/master/NER_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# Required Libraries
import numpy as np
import pandas as pd
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import LSTM, Dense, TimeDistributed, Embedding, Bidirectional
from keras.models import Model, Input
from keras_contrib.layers import CRF
from keras.callbacks import ModelCheckpoint
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn_crfsuite.metrics import flat_classification_report
from sklearn.metrics import f1_score
from seqeval.metrics import precision_score, recall_score, f1_score, classification_report
from keras.preprocessing.text import text_to_word_sequence
import pickle
!pip install sklearn_crfsuite
!pip install git+https://www.github.com/keras-team/keras-contrib.git
!pip install seqeval
from google.colab import drive
drive.mount('/content/drive')
path = "/content/drive/My Drive/Colab Notebooks/DataSet/ner_dataset.csv"
df= pd.read_csv(path,encoding = "ISO-8859-1")
df.head()
df["Tag"].unique()
df.describe()
#Checking null values, if any.
df.isnull().sum()
df = df.fillna(method = 'ffill')
# This is a class te get sentence. The each sentence will be list of tuples with its tag and pos.
class sentence(object):
def __init__(self, df):
self.n_sent = 1
self.df = df
self.empty = False
agg = lambda s : [(w, p, t) for w, p, t in zip(s['Word'].values.tolist(),
s['POS'].values.tolist(),
s['Tag'].values.tolist())]
self.grouped = self.df.groupby("Sentence #").apply(agg)
self.sentences = [s for s in self.grouped]
def get_text(self):
try:
s = self.grouped['Sentence: {}'.format(self.n_sent)]
self.n_sent +=1
return s
except:
return None
#Displaying one full sentence
getter = sentence(df)
sentences = [" ".join([s[0] for s in sent]) for sent in getter.sentences]
sentences[0]
sentences[2]
#sentence with its pos and tag.
sent = getter.get_text()
print(sent)
# Taking all the sentences
sentences = getter.sentences
sentences[:2]
#Defining the parameter of LSTM
# Number of data points passed in each iteration
batch_size = 64
# Passes through entire dataset
epochs = 8
# Maximum length of review
max_len = 75
# Dimension of embedding vector
embedding = 40
#Getting unique words and labels from data
words = list(df['Word'].unique())
tags = list(df['Tag'].unique())
# Dictionary word:index pair
# word is key and its value is corresponding index
word_to_index = {w : i + 2 for i, w in enumerate(words)}
word_to_index["UNK"] = 1
word_to_index["PAD"] = 0
# Dictionary lable:index pair
# label is key and value is index.
tag_to_index = {t : i + 1 for i, t in enumerate(tags)}
tag_to_index["PAD"] = 0
idx2word = {i: w for w, i in word_to_index.items()}
idx2tag = {i: w for w, i in tag_to_index.items()}
print("The word India is identified by the index: {}".format(word_to_index["India"]))
print("The label B-org for the organization is identified by the index: {}".format(tag_to_index["B-org"]))
# Converting each sentence into list of index from list of tokens
X = [[word_to_index[w[0]] for w in s] for s in sentences]
# Padding each sequence to have same length of each word
X = pad_sequences(maxlen = max_len, sequences = X, padding = "post", value = word_to_index["PAD"])
# Convert label to index
y = [[tag_to_index[w[2]] for w in s] for s in sentences]
# padding
y = pad_sequences(maxlen = max_len, sequences = y, padding = "post", value = tag_to_index["PAD"])
num_tag = df['Tag'].nunique()
# One hot encoded labels
y = [to_categorical(i, num_classes = num_tag + 1) for i in y]
y[0]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.15)
print("Size of training input data : ", X_train.shape)
print("Size of training output data : ", np.array(y_train).shape)
print("Size of testing input data : ", X_test.shape)
print("Size of testing output data : ", np.array(y_test).shape)
# Let's check the first sentence before and after processing.
print('*****Before Processing first sentence : *****\n', ' '.join([w[0] for w in sentences[0]]))
print('*****After Processing first sentence : *****\n ', X[0])
# First label before and after processing.
print('*****Before Processing first sentence : *****\n', ' '.join([w[2] for w in sentences[0]]))
print('*****After Processing first sentence : *****\n ', y[0])
!pip install keras==2.2.4
df.head()
num_tags = df['Tag'].nunique()
num_tags = df['Tag'].nunique()
# Model architecture
input = Input(shape = (max_len,))
model = Embedding(input_dim = len(words) + 2, output_dim = embedding, input_length = max_len, mask_zero = True)(input)
model = Bidirectional(LSTM(units = 50, return_sequences=True, recurrent_dropout=0.1))(model)
model = TimeDistributed(Dense(50, activation="relu"))(model)
crf = CRF(num_tags+1) # CRF layer
out = crf(model) # output
model = Model(input, out)
model.compile(optimizer="rmsprop", loss=crf.loss_function, metrics=[crf.accuracy])
model.summary()
checkpointer = ModelCheckpoint(filepath = 'model.h5',
verbose = 0,
mode = 'auto',
save_best_only = True,
monitor='val_loss')
history = model.fit(X_train, np.array(y_train), batch_size=batch_size, epochs=epochs,
validation_split=0.1, callbacks=[checkpointer])
history.history.keys()
acc = history.history['crf_viterbi_accuracy']
val_acc = history.history['val_crf_viterbi_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize = (8, 8))
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure(figsize = (8, 8))
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# Evaluation
y_pred = model.predict(X_test)
y_pred = np.argmax(y_pred, axis=-1)
y_test_true = np.argmax(y_test, -1)
```
please use
keras==2.2.4
```
# Evaluation
y_pred = model.predict(X_test)
y_pred = np.argmax(y_pred, axis=-1)
y_test_true = np.argmax(y_test, -1)
# Convert the index to tag
y_pred = [[idx2tag[i] for i in row] for row in y_pred]
y_test_true = [[idx2tag[i] for i in row] for row in y_test_true]
print("F1-score is : {:.1%}".format(f1_score(y_test_true, y_pred)))
report = flat_classification_report(y_pred=y_pred, y_true=y_test_true)
print(report)
# At every execution model picks some random test sample from test set.
i = np.random.randint(0,X_test.shape[0]) # choose a random number between 0 and len(X_te)b
p = model.predict(np.array([X_test[i]]))
p = np.argmax(p, axis=-1)
true = np.argmax(y_test[i], -1)
print("Sample number {} of {} (Test Set)".format(i, X_test.shape[0]))
# Visualization
print("{:15}||{:5}||{}".format("Word", "True", "Pred"))
print(30 * "=")
for w, t, pred in zip(X_test[i], true, p[0]):
if w != 0:
print("{:15}: {:5} {}".format(words[w-2], idx2tag[t], idx2tag[pred]))
with open('word_to_index.pickle', 'wb') as f:
pickle.dump(word_to_index, f)
with open('tag_to_index.pickle', 'wb') as f:
pickle.dump(tag_to_index, f)
```
|
github_jupyter
|
# 虚谷号WebGPIO应用(客户端Python版)
虚谷号和手机(App inventor)如何互动控制?
虚谷号和掌控板如何互动控制?
为了让虚谷号和其他开源硬件、编程语言快速互动,虚谷号的WebGPIO应运而生。简单的说,只要在虚谷号上运行一个python文件,就可以用WebAPI的形式来与虚谷号互动,可以获取虚谷号板载Arduino的所有引脚的电平,也可以控制所有引脚。
## 1.接口介绍
要在虚谷号上运行“webgpio.py”。也可以将“webgpio.py”文件更名为“main.py”,复制到vvBoard的Python目录,只要一开机,虚谷号就会执行。
下载地址:https://github.com/vvlink/vvBoard-docs/tree/master/webgpio
WebAPI地址:
http://[虚谷号ip]:1024/
注:下面假设虚谷号的IP地址为:192.168.1.101
### 1.1 获取引脚状态
method方式:GET
参数示例: { pin:"D2" }
url范例:http://192.168.1.101:1024/?pin=D2
信息返回:
当pin为D0--D13时,读取数字引脚的数字值,0为低电平,1为高电平。
{ "pin":"D1", "error_code":0, "msg":1 }
当pin为A0--A5时,读取模拟引脚的模拟值,0-255之间。
{ "pin":"A0", "error_code":0, "msg":255 }
### 1.2. 控制引脚电平
method方式: POST
参数示例:
{ pin:"D1" value:255 type:"digital" }
注:Digital、Analog、Servo等词语不分大小写,也可以用“1、2、3”等数字来代替。
- 当type为digital时,设置引脚的电平值为value的值,0表示LOW,非0表示HIGH;
- 当type为analog时,设置引脚的PWM值为value的值,即0-255之间;
- 当type为servo时,设置引脚上舵机的转动角度为value的值,即0-180之间。
返回参数:
{ "pin":"D2", "error_code":0, "msg":"success,set [pin] to [value] with [types] mode" }
当pin不在D0--D13,A0--A5之间时:
{ "pin":"D2", "error_code":1 "msg":"error,invalid Pin" }
当value不能转换整数时:
{ "pin":"D2", "error_code":1, "msg":"error,Value is wrong" }
当type不正确时:
{ "pin":"D2", "error_code":1, "msg":"error,Type is wrong" }
## 2. 客户端代码范例(Python)
虽然通过任何一个能够发送Http请求的工具,包括浏览器、Word、掌控板、手机等,都可以和虚谷号互动。接下来选择Python语言写一个Demo代码。Python借助Requests库来发送Http请求,是非常方便的。参数传递方面,同时支持params和data两种模式。
### 2.1.调用POST方法,对虚谷号的引脚进行控制
在该案例中可以修改的参数有:
- url:设置成虚谷号的IP地址
- pin:对应的引脚 A0-A5,D0-D13
- value:对应的数值
- type:控制的类型可以是1,2,3,分别代表“digital”、“analog”、“servo”
当设置D13号引脚的电平为1,该引脚对应的LED就会亮起。
```
import requests
vvboardip='192.168.3.42'
pin='D13'
value=1
t=1
payload = {"pin":pin,'value':value,'type':t}
re = requests.post(url='http://'+ vvboardip +':1024/',params=payload)
if (re.status_code==200):
r=re.json()
print('成功发送控制命令:'+ r["msg"])
print('返回的信息为:')
print(re.text)
```
### 2.2. 调用GET方法,读取A0号引脚的电平。
在该案例中可以修改的参数有:
- url:设置成虚谷号的IP地址
- pin:对应的引脚 A0-A5,D0-D13
注意:该方法需要外接传感器,否则数字口默认返回为低电平,模拟口返回随机数。
```
import requests
vvboardip='192.168.3.42'
pin='A0'
payload = {"pin":pin}
re = requests.get(url='http://'+ vvboardip +':1024/',params=payload)
if (re.status_code==200):
r=re.json()
print('成功获取引脚'+ r["pin"] + '的状态:'+ r["msg"])
print('返回的原始信息为:')
print(re.text)
```
## 3. 其他说明
1.手机上快速控制,如何实现?
访问:http://192.168.3.42:1024/help/
可以直接在网页上测试接口。
2.App invntor如何借助这一接口与虚谷号互动?
请参考github,提供了范例。
https://github.com/vvlink/vvBoard-docs/tree/master/webgpio,
3.掌控板如何利用这一接口与虚谷号互动?
掌控板中提供了urequests库,在mPython软件中可以编写发送Http请求的应用。
另外,掌控板中提供了WebtinyIO,使用方式和虚谷号的WebGPIO基本一致。
|
github_jupyter
|
# Toy Environment

In this exercise we will learn how to implement a simple Toy environment using Python. The environment is illustrated in figure. It is composed of 3 states and 2 actions. The initial state is state 1.
The goal of this exercise is to implement a class Environment with a method step() taking as input the agent’s action and returning the pair (next state, reward). The environment can be implemented using pure python. In addition, write also a reset() method that restarts the environment state.
```
from typing import Tuple
class Environment:
def __init__(self):
"""
Constructor of the Environment class.
"""
self._initial_state = 1
self._allowed_actions = [0, 1] # 0: A, 1: B
self._states = [1, 2, 3]
self._current_state = self._initial_state
def step(self, action: int) -> Tuple[int, int]:
"""
Step function: compute the one-step dynamic from the given action.
Args:
action (int): the action taken by the agent.
Returns:
The tuple current_state, reward.
"""
# check if the action is allowed
if action not in self._allowed_actions:
raise ValueError("Action is not allowed")
reward = 0
if action == 0 and self._current_state == 1:
self._current_state = 2
reward = 1
elif action == 1 and self._current_state == 1:
self._current_state = 3
reward = 10
elif action == 0 and self._current_state == 2:
self._current_state = 1
reward = 0
elif action == 1 and self._current_state == 2:
self._current_state = 3
reward = 1
elif action == 0 and self._current_state == 3:
self._current_state = 2
reward = 0
elif action == 1 and self._current_state == 3:
self._current_state = 3
reward = 10
return self._current_state, reward
def reset(self) -> int:
"""
Reset the environment starting from the initial state.
Returns:
The environment state after reset (initial state).
"""
self._current_state = self._initial_state
return self._current_state
env = Environment()
state = env.reset()
actions = [0, 0, 1, 1, 0, 1]
print(f"Initial state is {state}")
for action in actions:
next_state, reward = env.step(action)
print(f"From state {state} to state {next_state} with action {action}, reward: {reward}")
state = next_state
```
|
github_jupyter
|
The second example shows how to set up a power-law spectral source, as well as how to add two sets of photons together.
This example will also briefly show how to set up a mock dataset "in memory" using yt. For more details on how to do this, check out [the yt docs on in-memory datasets](http://yt-project.org/doc/examining/generic_array_data.html).
Load up the necessary modules:
```
%matplotlib inline
import matplotlib
matplotlib.rc("font", size=18, family="serif")
import yt
import numpy as np
import matplotlib.pyplot as plt
from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV
from yt.units import mp
import pyxsim
```
The cluster we set up will be a simple isothermal $\beta$-model system, with a temperature of 4 keV. We'll set it up on a uniform grid of 2 Mpc and 256 cells on a side. The parameters of the model are:
```
R = 1000. # radius of cluster in kpc
r_c = 100. # scale radius of cluster in kpc
rho_c = 1.673e-26 # scale density in g/cm^3
beta = 1. # beta parameter
kT = 4. # cluster temperature in keV
nx = 256
ddims = (nx,nx,nx)
```
and we set up the density and temperature arrays:
```
x, y, z = np.mgrid[-R:R:nx*1j,
-R:R:nx*1j,
-R:R:nx*1j]
r = np.sqrt(x**2+y**2+z**2)
dens = np.zeros(ddims)
dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta)
dens[r > R] = 0.0
temp = kT*K_per_keV*np.ones(ddims)
```
Next, we will take the density and temperature arrays and put them into a dictionary with their units, where we will also set up velocity fields set to zero. Then, we'll call the yt function `load_uniform_grid` to set this up as a full-fledged yt dataset.
```
data = {}
data["density"] = (dens, "g/cm**3")
data["temperature"] = (temp, "K")
data["velocity_x"] = (np.zeros(ddims), "cm/s")
data["velocity_y"] = (np.zeros(ddims), "cm/s")
data["velocity_z"] = (np.zeros(ddims), "cm/s")
bbox = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]]) # The bounding box of the domain in code units
ds = yt.load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
```
The next thing we have to do is specify a derived field for the normalization of the power-law emission. This could come from a variety of sources, for example, relativistic cosmic-ray electrons. For simplicity, we're not going to assume a specific model, except that we will only specify that the source of the power law emission is proportional to the gas mass in each cell:
```
norm = yt.YTQuantity(1.0e-19, "photons/s/keV")
def _power_law_emission(field, data):
return norm*data["cell_mass"]/mp
ds.add_field(("gas","power_law_emission"), function=_power_law_emission, units="photons/s/keV")
```
where we have normalized the field arbitrarily. Note that the emission field for a power-law model is a bit odd in that it is technically a specific *luminosity* for the cell. This is done primarily for simplicity in designing the underlying algorithm.
Now, let's set up a sphere to collect photons from:
```
sp = ds.sphere("c", (0.5, "Mpc"))
```
And set the parameters for the initial photon sample:
```
A = yt.YTQuantity(500., "cm**2")
exp_time = yt.YTQuantity(1.0e5, "s")
redshift = 0.03
```
Set up two source models, a thermal model and a power-law model:
```
thermal_model = pyxsim.ThermalSourceModel("apec", 0.01, 80.0, 80000, Zmet=0.3)
plaw_model = pyxsim.PowerLawSourceModel(1.0, 0.01, 80.0, "power_law_emission", 1.0)
```
Now, generate the photons for each source model. After we've generated the photons for both, we'll add them together.
```
thermal_photons = pyxsim.PhotonList.from_data_source(sp, redshift, A, exp_time, thermal_model)
plaw_photons = pyxsim.PhotonList.from_data_source(sp, redshift, A, exp_time, plaw_model)
photons = thermal_photons + plaw_photons
```
Now, we want to project the photons along a line of sight. We'll specify the `"wabs"` model for foreground galactic absorption. We'll create events from the total set of photons as well as the power-law only set, to see the difference between the two.
```
events = photons.project_photons("x", (30.0, 45.0), absorb_model="wabs", nH=0.02)
plaw_events = plaw_photons.project_photons("x", (30.0, 45.0), absorb_model="wabs", nH=0.02)
```
Finally, create energy spectra for both sets of events. We won't bother convolving with instrument responses here, because we just want to see what the spectra look like.
```
events.write_spectrum("all_spec.fits", 0.1, 80.0, 8000, overwrite=True)
plaw_events.write_spectrum("plaw_spec.fits", 0.1, 80.0, 8000, overwrite=True)
```
To visualize the spectra, we'll load them up using [AstroPy's FITS I/O](http://docs.astropy.org/en/stable/io/fits/) and use [Matplotlib](http://matplotlib.org) to plot the spectra:
```
import astropy.io.fits as pyfits
f1 = pyfits.open("all_spec.fits")
f2 = pyfits.open("plaw_spec.fits")
plt.figure(figsize=(9,7))
plt.loglog(f2["SPECTRUM"].data["ENERGY"], f2["SPECTRUM"].data["COUNTS"])
plt.loglog(f1["SPECTRUM"].data["ENERGY"], f1["SPECTRUM"].data["COUNTS"])
plt.xlim(0.1, 50)
plt.ylim(1, 2.0e4)
plt.xlabel("E (keV)")
plt.ylabel("counts/bin")
```
As you can see, the green line shows a sum of a thermal and power-law spectrum, the latter most prominent at high energies. The blue line shows the power-law spectrum. Both spectra are absorbed at low energies, thanks to the Galactic foreground absorption.
|
github_jupyter
|
```
# extract features of region of an image from mask-rcnn by Detectron2
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
import random
import io
import torch
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances
from pprint import pprint
# Show the image in ipynb
from IPython.display import clear_output, Image, display
import PIL.Image
def build_cfg(score_thresh=0.5):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = score_thresh
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
return cfg
def extract_features(im, predictor):
"""
@param im: directly imread from OpenCV, e.g. cv2.imread("data/input.jpg")
@return
- box_features: shape=(num_rois, feature_dim=1024)
- outputs: outputs of predictor
"""
outputs = predictor(im)
instances = outputs["instances"]
# components of model
model = predictor.model
backbone = model.backbone
proposal_generator = model.proposal_generator
roi_heads = model.roi_heads
# preprocess image
height, width = im.shape[:2]
x = predictor.transform_gen.get_transform(im).apply_image(im)
x = torch.as_tensor(x.astype("float32").transpose(2, 0, 1))
batched_inputs = [{"image": x, "height": height, "width": width}]
# main procedure
images = model.preprocess_image(batched_inputs)
features = backbone(images.tensor)
proposals = [Instances(image_size=x.shape[1:], proposal_boxes=instances.pred_boxes)]
features = [features[f] for f in roi_heads.in_features]
box_features = roi_heads.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = roi_heads.box_head(box_features)
return box_features, outputs
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = io.BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# load image
im = cv2.imread("data/input.jpg")
im_rgb = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
showarray(im_rgb)
cfg = build_cfg()
predictor = DefaultPredictor(cfg)
box_features, outputs = extract_features(im, predictor)
print("box_features.shape: {}".format(box_features.shape))
print("box_location.shape: {}".format(outputs["instances"].pred_boxes.tensor.shape))
# visualization
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
showarray(v.get_image())
```
|
github_jupyter
|
```
from helpers.utilities import *
%run helpers/notebook_setup.ipynb
```
While attempting to compare limma's results for log-transformed an non-transformed data, it was noticed (and brought up by Dr Tim) That the values of logFC produced by limma for non-transformed data are of wrong order of magnitude.
I have investigated the issue, following the limma calculations for non-transformed data step by step:
```
indexed_by_target_path = 'data/clean/protein/indexed_by_target.csv'
clinical_path = 'data/clean/protein/clinical_data_ordered_to_match_proteins_matrix.csv'
clinical = read_csv(clinical_path, index_col=0)
raw_protein_matrix = read_csv(indexed_by_target_path, index_col=0)
by_condition = clinical.Meningitis
tb_lysozyme = raw_protein_matrix[
raw_protein_matrix.columns[by_condition == 'Tuberculosis']
].loc['Lysozyme'].mean()
hc_lysozyme = raw_protein_matrix[
raw_protein_matrix.columns[by_condition == 'Healthy control']
].loc['Lysozyme'].mean()
tb_lysozyme / hc_lysozyme
tb_lysozyme
hc_lysozyme
```
While for the transformed data:
```
from numpy import log10
log10(tb_lysozyme)
log10(hc_lysozyme)
log10(tb_lysozyme) / log10(hc_lysozyme)
protein_matrix = raw_protein_matrix.apply(log10)
%%R -i protein_matrix -i by_condition
import::here(space_to_dot, dot_to_space, .from='helpers/utilities.R')
import::here(
limma_fit, limma_diff_ebayes, full_table,
design_from_conditions, calculate_means,
.from='helpers/differential_expression.R'
)
diff_ebayes = function(a, b, data=protein_matrix, conditions_vector=by_condition, ...) {
limma_diff_ebayes(a, b, data=data, conditions_vector=conditions_vector, ...)
}
%%R -o tb_all_proteins_raw -i raw_protein_matrix
result = diff_ebayes('Tuberculosis', 'Healthy control', data=raw_protein_matrix)
tb_all_proteins_raw = full_table(result)
%%R
head(full_table(result, coef=1))
%%R
# logFC is taken from the coefficient of fit (result):
# it seems that the coefficients do not represent the FC as would expected...
result$coefficients['Lysozyme', ]
```
We can trace it back to:
```
%%R
fit = limma_fit(
data=raw_protein_matrix, conditions_vector=by_condition,
a='Tuberculosis', b='Healthy control'
)
%%R
fit$coefficients['Lysozyme', ]
```
It changes when using using only the data from TB and HC, though it continues to produce large values:
```
%%R
fit = limma_fit(
data=raw_protein_matrix, conditions_vector=by_condition,
a='Tuberculosis', b='Healthy control', use_all=F
)
%%R
fit$coefficients['Lysozyme', ]
```
Getting back to the previous version, we can see that the meansare correctly calculated:
```
%%R
design <- design_from_conditions(by_condition)
fit <- calculate_means(raw_protein_matrix, design)
%%R
fit$coefficients['Lysozyme', ]
tb_lysozyme, hc_lysozyme
%%R
contrast_specification <- paste(
space_to_dot('Tuberculosis'),
space_to_dot('Healthy control'),
sep='-'
)
contrast.matrix <- limma::makeContrasts(contrasts=contrast_specification, levels=design)
contrast.matrix
```
There is only one step more:
> fit <- limma::contrasts.fit(fit, contrast.matrix)
so the problem must be here
```
%%R
fit_contrasted <- limma::contrasts.fit(fit, contrast.matrix)
fit_contrasted$coefficients['Lysozyme', ]
```
Note the result we got: 61798.20 is:
```
tb_lysozyme - hc_lysozyme
%%R
final_fit = limma::eBayes(fit_contrasted, trend=T, robust=T)
final_fit$coefficients['Lysozyme', ]
```
This shows that limma does not produce the fold change at all.
This is because it assumes that the data are log-transformed upfront. **If we gave it log-transformed data, the difference of logs would be equivalent to division.**
|
github_jupyter
|
### AVGN Tutorial
This tutorial walks you through getting started with AVGN on a sample dataset, so you can figure out how to use it on your own data.
If you're not too familiar with Python, make sure you've first familiarized yourself with Jupyter notebooks and installing pyhton packages. Then come back and try the tutorial.
There may be some packages here that you need that aren't installed by default. If you find one of these, just try `pip install`-ing it locally. If you're still having trouble add an issue on the GitHub repository and I'll help you figure it out.
### Installing AVGN on your computer
First, download the repository locally and install it:
1. Navigate to the folder in your local environment where you want to install the repository.
2. Type `git clone https://github.com/timsainb/avgn_paper.git`
3. Open the `avgn_paper` folder
4. Install the package by typing `python setup.py develop`
Now in python you should be able to `import avgn`
### Downloading a sample dataset
In this example, we'll download [a dataset of .WAV files of acoustically isolated Bengalese finch song](https://figshare.com/articles/BirdsongRecognition/3470165?file=5463221). Each .WAV is accompanied by a set of hand annotations, giving us the boundaries for each syllable.
```
from avgn.downloading.download import download_tqdm
from avgn.utils.paths import DATA_DIR
from avgn.utils.general import unzip_file
from tqdm.autonotebook import tqdm
# where the files are located online (url, filename)
data_urls = [
('https://ndownloader.figshare.com/articles/3470165/versions/1', 'all_files.zip'),
]
# where to save the files
output_loc = DATA_DIR/"raw/koumura/"
# download the files locally
for url, filename in data_urls:
download_tqdm(url, output_location=output_loc/filename)
# list the downloaded files
zip_files = list((output_loc/"zip_contents").glob('*.zip'))
zip_files[:2]
# unzip the files
for zf in tqdm(zip_files):
unzip_file(zf, output_loc/"zip_contents")
```
### Getting the data into a usable format
Now that the data is saved, we want to get the annotations into the same format as all of the other datasets.
The format we use is JSON, which just holds a dictionary of information about the dataset.
For each .WAV file, we will create a JSON that looks something like this:
```
{
"length_s": 15,
"samplerate_hz": 30000,
"wav_location": "/location/of/my/dataset/myfile.wav",
"indvs": {
"Bird1": {
"species": "Bengalese finch",
"units": {
"syllables": {
"start_times": [1.5, 2.5, 6],
"end_times": [2.3, 4.5, 8],
"labels": ["a", "b", "c"],
},
}
},
}
```
To get data into this format, you're generally going to have two write a custom parser to convert your data from your format into AVGN format. We're going to create a custom parser here for this dataset, as an example. You could also create these JSONs by hand.
**Note:** If your dataset is more annotated than that, take a look at the readme.md in the github repository for more examples of JSONs. If your dataset is not already segmented for syllables, don't add "units", and you can add them after automatic segmentation.
```
from datetime import datetime
import avgn.utils
import numpy as np
RAW_DATASET_LOC = output_loc/"zip_contents"
RAW_DATASET_LOC
# first we create a name for our dataset
DATASET_ID = 'koumura_bengalese_finch'
# create a unique datetime identifier for the files output by this notebook
DT_ID = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# grab a list of all the raw waveforms
wav_list = list(RAW_DATASET_LOC.glob('Bird*/Wave/*.wav'))
len(wav_list), np.sort(wav_list)[-2:]
# grab a list of all of the raw annotation files for each bird
annotation_files = list(RAW_DATASET_LOC.glob('Bird*/Annotation.xml'))
len(annotation_files), np.sort(annotation_files)[-2:]
```
#### Now, for each wav file, we want to generate a JSON, using information from the XML.
Lets take a look inside an XML first, to see what's in there. It might be useful to take a look at this XML file in your web browser to get a better idea of what's in there as well.
```
import xml.etree.ElementTree
import xml.dom.minidom
# print a sample of the XML
parssed = xml.dom.minidom.parse(annotation_files[0].as_posix())
pretty_xml_as_string = dom.toprettyxml()
print(pretty_xml_as_string[:400] + '...')
```
### Parse XML
Before we create a JSON, we can create a pandas dataframe with all the relevant info from the XML. This is all very specific to this dataset, but hopefully it gives you an idea of what you need to do for your dataset.
```
import pandas as pd
song_df = pd.DataFrame(
columns=[
"bird",
"WaveFileName",
"Position",
"Length",
"NumNote",
"NotePositions",
"NoteLengths",
"NoteLabels",
]
)
song_df
# loop through XML annotation files
for bird_loc in tqdm(annotation_files):
# grab the
bird_xml = xml.etree.ElementTree.parse(bird_loc).getroot()
bird = bird_loc.parent.stem
# loop through each "sequence" in the datset (corresponding to a bout)
for element in tqdm(bird_xml.getchildren(), leave=False):
if element.tag == "Sequence":
notePositions = []
noteLengths = []
noteLabels = []
# get the metadata for that sequence
for seq_element in element.getchildren():
if seq_element.tag == "Position":
position = seq_element.text
elif seq_element.tag == "Length":
length = seq_element.text
elif seq_element.tag == "WaveFileName":
WaveFileName = seq_element.text
elif seq_element.tag == "NumNote":
NumNote = seq_element.text
# get the metadata for the note
elif seq_element.tag == "Note":
for note_element in seq_element.getchildren():
if note_element.tag == "Label":
noteLabels.append(note_element.text)
elif note_element.tag == "Position":
notePositions.append(note_element.text)
elif note_element.tag == "Length":
noteLengths.append(note_element.text)
# add to the pandas dataframe
song_df.loc[len(song_df)] = [
bird,
WaveFileName,
position,
length,
NumNote,
notePositions,
noteLengths,
noteLabels,
]
song_df[:3]
```
### Now we can generate a JSON from that pandas dataframe
```
from avgn.utils.audio import get_samplerate
import librosa
from avgn.utils.json import NoIndent, NoIndentEncoder
# for each bird
for bird in tqdm(np.unique(song_df.bird)):
# grab that bird's annotations
bird_df = song_df[song_df.bird == bird]
# for each wav file produced by that bird
for wfn in tqdm(bird_df.WaveFileName.unique(), leave=False):
wfn_df = bird_df[bird_df.WaveFileName == wfn]
# get the location of the wav
wav_loc = RAW_DATASET_LOC / bird / "Wave" / wfn
# get the wav samplerate and duration
sr = get_samplerate(wav_loc.as_posix())
wav_duration = librosa.get_duration(filename=wav_loc)
# make json dictionary
json_dict = {}
# add species
json_dict["species"] = "Lonchura striata domestica"
json_dict["common_name"] = "Bengalese finch"
json_dict["wav_loc"] = wav_loc.as_posix()
# rate and length
json_dict["samplerate_hz"] = sr
json_dict["length_s"] = wav_duration
# make a dataframe of wav info
seq_df = pd.DataFrame(
(
[
[
list(np.repeat(sequence_num, len(row.NotePositions))),
list(row.NoteLabels),
np.array(
(np.array(row.NotePositions).astype("int") + int(row.Position))
/ sr
).astype("float64"),
np.array(
(
np.array(row.NotePositions).astype("int")
+ np.array(row.NoteLengths).astype("int")
+ int(row.Position)
)
/ sr
).astype("float64"),
]
for sequence_num, (idx, row) in enumerate(wfn_df.iterrows())
]
),
columns=["sequence_num", "labels", "start_times", "end_times"],
)
# add syllable information
json_dict["indvs"] = {
bird: {
"notes": {
"start_times": NoIndent(
list(np.concatenate(seq_df.start_times.values))
),
"end_times": NoIndent(list(np.concatenate(seq_df.end_times.values))),
"labels": NoIndent(list(np.concatenate(seq_df.labels.values))),
"sequence_num": NoIndent(
[int(i) for i in np.concatenate(seq_df.sequence_num.values)]
),
}
}
}
# dump dict into json format
json_txt = json.dumps(json_dict, cls=NoIndentEncoder, indent=2)
wav_stem = bird + "_" + wfn.split(".")[0]
json_out = (
DATA_DIR / "processed" / DATASET_ID / DT_ID / "JSON" / (wav_stem + ".JSON")
)
# save json
avgn.utils.paths.ensure_dir(json_out.as_posix())
print(json_txt, file=open(json_out.as_posix(), "w"))
# print an example JSON corresponding to the dataset we just made
print(json_txt)
```
### Now this dataset is in the right format for further analysis.
In the next notebook, we'll segment out the notes/syllables and compute spectrograms that can be projected.
|
github_jupyter
|
# First BigQuery ML models for Taxifare Prediction
In this notebook, we will use BigQuery ML to build our first models for taxifare prediction.BigQuery ML provides a fast way to build ML models on large structured and semi-structured datasets.
## Learning Objectives
1. Choose the correct BigQuery ML model type and specify options
2. Evaluate the performance of your ML model
3. Improve model performance through data quality cleanup
4. Create a Deep Neural Network (DNN) using SQL
Each learning objective will correspond to a __#TODO__ in the [student lab notebook](../labs/first_model.ipynb) -- try to complete that notebook first before reviewing this solution notebook.
We'll start by creating a dataset to hold all the models we create in BigQuery
### Import libraries
```
import os
```
### Set environment variables
```
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["BUCKET"] = PROJECT # DEFAULT BUCKET WILL BE PROJECT ID
os.environ["REGION"] = REGION
if PROJECT == "your-gcp-project-here":
print("Don't forget to update your PROJECT name! Currently:", PROJECT)
```
## Create a BigQuery Dataset and Google Cloud Storage Bucket
A BigQuery dataset is a container for tables, views, and models built with BigQuery ML. Let's create one called __serverlessml__ if we have not already done so in an earlier lab. We'll do the same for a GCS bucket for our project too.
```
%%bash
## Create a BigQuery dataset for serverlessml if it doesn't exist
datasetexists=$(bq ls -d | grep -w serverlessml)
if [ -n "$datasetexists" ]; then
echo -e "BigQuery dataset already exists, let's not recreate it."
else
echo "Creating BigQuery dataset titled: serverlessml"
bq --location=US mk --dataset \
--description 'Taxi Fare' \
$PROJECT:serverlessml
echo "\nHere are your current datasets:"
bq ls
fi
## Create GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${PROJECT}/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${PROJECT}
echo "\nHere are your current buckets:"
gsutil ls
fi
```
## Model 1: Raw data
Let's build a model using just the raw data. It's not going to be very good, but sometimes it is good to actually experience this.
The model will take a minute or so to train. When it comes to ML, this is blazing fast.
```
%%bigquery
CREATE OR REPLACE MODEL
serverlessml.model1_rawdata
OPTIONS(input_label_cols=['fare_amount'],
model_type='linear_reg') AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count * 1.0 AS passengers
FROM
`nyc-tlc.yellow.trips`
WHERE
MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 100000) = 1
```
Once the training is done, visit the [BigQuery Cloud Console](https://console.cloud.google.com/bigquery) and look at the model that has been trained. Then, come back to this notebook.
Note that BigQuery automatically split the data we gave it, and trained on only a part of the data and used the rest for evaluation. We can look at eval statistics on that held-out data:
```
%%bigquery
SELECT * FROM ML.EVALUATE(MODEL serverlessml.model1_rawdata)
```
Let's report just the error we care about, the Root Mean Squared Error (RMSE)
```
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model1_rawdata)
```
We told you it was not going to be good! Recall that our heuristic got 8.13, and our target is $6.
Note that the error is going to depend on the dataset that we evaluate it on.
We can also evaluate the model on our own held-out benchmark/test dataset, but we shouldn't make a habit of this (we want to keep our benchmark dataset as the final evaluation, not make decisions using it all along the way. If we do that, our test dataset won't be truly independent).
```
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model1_rawdata, (
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count * 1.0 AS passengers
FROM
`nyc-tlc.yellow.trips`
WHERE
MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 100000) = 2
AND trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
))
```
## Model 2: Apply data cleanup
Recall that we did some data cleanup in the previous lab. Let's do those before training.
This is a dataset that we will need quite frequently in this notebook, so let's extract it first.
```
%%bigquery
CREATE OR REPLACE TABLE
serverlessml.cleaned_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers
FROM
`nyc-tlc.yellow.trips`
WHERE
MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 100000) = 1
AND trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM serverlessml.cleaned_training_data
LIMIT 0
%%bigquery
CREATE OR REPLACE MODEL
serverlessml.model2_cleanup
OPTIONS(input_label_cols=['fare_amount'],
model_type='linear_reg') AS
SELECT
*
FROM
serverlessml.cleaned_training_data
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model2_cleanup)
```
## Model 3: More sophisticated models
What if we try a more sophisticated model? Let's try Deep Neural Networks (DNNs) in BigQuery:
### DNN
To create a DNN, simply specify __dnn_regressor__ for the model_type and add your hidden layers.
```
%%bigquery
-- This model type is in alpha, so it may not work for you yet.
-- This training takes on the order of 15 minutes.
CREATE OR REPLACE MODEL
serverlessml.model3b_dnn
OPTIONS(input_label_cols=['fare_amount'],
model_type='dnn_regressor', hidden_units=[32, 8]) AS
SELECT
*
FROM
serverlessml.cleaned_training_data
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model3b_dnn)
```
Nice!
## Evaluate DNN on benchmark dataset
Let's use the same validation dataset to evaluate -- remember that evaluation metrics depend on the dataset. You can not compare two models unless you have run them on the same withheld data.
```
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model3b_dnn, (
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count * 1.0 AS passengers,
'unused' AS key
FROM
`nyc-tlc.yellow.trips`
WHERE
MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 10000) = 2
AND trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
))
```
Wow! Later in this sequence of notebooks, we will get to below $4, but this is quite good, for very little work.
In this notebook, we showed you how to use BigQuery ML to quickly build ML models. We will come back to BigQuery ML when we want to experiment with different types of feature engineering. The speed of BigQuery ML is very attractive for development.
Copyright 2019 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
github_jupyter
|
# Plotting
Here you can explore the different possibilities that the hep_spt package offers for plotting.
```
%matplotlib inline
import hep_spt
hep_spt.set_style()
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import norm
```
## Plotting a (non)weighted sample
Use the function "errorbar_hist" to plot the same sample without and with weights. In the non-weighted case, we will ask for frequentist poissonian errors, so we will get asymmetric error bars for low values of the number of entries.
```
# Create a random sample
size = 200
smp = np.random.normal(0, 3, size)
wgts = np.random.uniform(0, 1, size)
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(10, 5))
# Make the non-weighted plot
values, edges, ex, ey = hep_spt.errorbar_hist(smp, bins=10, range=(-7, 7), uncert='freq')
centers = (edges[1:] + edges[:-1])/2.
ax0.errorbar(centers, values, ey, ex, ls='none')
ax0.set_title('Non-weighted sample')
# Make the weighted plot
values, edges, ex, ey = hep_spt.errorbar_hist(smp, bins=10, range=(-7, 7), weights=wgts)
centers = (edges[1:] + edges[:-1])/2.
ax1.errorbar(centers, values, ey, ex, ls='none')
ax1.set_title('Weighted sample');
```
## Calculating the pull of a distribution
Sometimes we want to calculate the distance in terms of standard deviations from a curve to our measurements. This example creates a random sample of events following a normal distribution and overlies it with the original curve. The pull plot is shown below.
```
# Create the samples
size=5e3
sample = norm.rvs(size=int(size))
values, edges, ex, ey = hep_spt.errorbar_hist(sample, 40, range=(-4, 4), uncert='freq')
centers = (edges[1:] + edges[:-1])/2.
# Extract the PDF values in each center, and make the pull
ref = norm.pdf(centers)
ref *= size/ref.sum()
pull, perr = hep_spt.pull(values, ey, ref)
# Make the reference to plot (with more points than just the centers of the bins)
rct, step = np.linspace(-4., 4., 1000, retstep=True)
pref = norm.pdf(rct)
pref = size*pref/pref.sum()*(edges[1] - edges[0])/step
fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True, gridspec_kw = {'height_ratios':[3, 1]}, figsize=(10, 8))
# Draw the histogram and the reference
ax0.errorbar(centers, values, ey, ex, color='k', ls='none', label='data')
ax0.plot(rct, pref, color='blue', marker='', label='reference')
ax0.set_xlim(-4., 4.)
ax0.set_ylabel('Entries')
ax0.legend(prop={'size': 15})
# Draw the pull and lines for -3, 0 and +3 standard deviations
add_pull_line = lambda v, c: ax1.plot([-4., 4.], [v, v], color=c, marker='')
add_pull_line(0, 'blue')
add_pull_line(-3, 'red')
add_pull_line(+3, 'red')
ax1.errorbar(centers, pull, perr, ex, color='k', ls='none')
ax1.set_ylim(-4, 4)
ax1.set_yticks([-3, 0, 3])
ax1.set_ylabel('Pull');
```
## Plotting efficiencies
Let's suppose we build two histograms from the same sample, one of them after having applied some requirements. The first histogram will follow a gaussian distribution with center at 0 and standard deviation equal to 2, with 1000 entries. The second, with only 100 entries, will have the same center but the standard deviation will be 0.5. The efficiency plot would be calculated as follows:
```
# Create a random sample
raw = np.random.normal(0, 2, 1000)
cut = np.random.normal(0, 0.5, 100)
# Create the histograms (we do not care about the errors for the moment). Note that the two
# histograms have the same number of bins and range.
h_raw, edges = np.histogram(raw, bins=10, range=(-2, 2))
h_cut, _ = np.histogram(cut, bins=10, range=(-2, 2))
centers = (edges[1:] + edges[:-1])/2.
ex = (edges[1:] - edges[:-1])/2.
# Calculate the efficiency and the errors
eff = h_cut.astype(float)/h_raw
ey = hep_spt.clopper_pearson_unc(h_cut, h_raw)
plt.errorbar(centers, eff, ey, ex, ls='none');
```
## Displaying the correlation between variables on a sample
The hep_spt package also provides a way to easily plot the correlation among the variables on a given sample. Let's create a sample composed by 5 variables, two being independent and three correlated with them, and plot the results. Note that we must specify the minimum and maximum values for the histogram in order to correctly assign the colors, making them universal across our plots.
```
# Create a random sample
a = np.random.uniform(0, 1, 1000)
b = np.random.normal(0, 1, 1000)
c = a + np.random.uniform(0, 1, 1000)
ab = a*b
abc = ab + c
smp = np.array([a, b, c, ab, abc])
# Calculate the correlation
corr = np.corrcoef(smp)
# Plot the results
fig = plt.figure()
hep_spt.corr_hist2d(corr, ['a', 'b', 'c', 'a$\\times$b', 'a$\\times$b + c'], vmin=-1, vmax=+1)
```
## Plotting a 2D profile
When making 2D histograms, it is often useful to plot the profile in X or Y of the given distribution. This can be done as follows:
```
# Create a random sample
s = 10000
x = np.random.normal(0, 1, s)
y = np.random.normal(0, 1, s)
# Make the figure
fig = plt.figure()
ax = fig.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
h, xe, ye, im = ax.hist2d(x, y, (40, 40), range=[(-2.5, +2.5), (-2.5, +2.5)])
# Calculate the profile together with the standard deviation of the sample
prof, _, std = hep_spt.profile(x, y, xe, std_type='sample')
eb = ax.errorbar(hep_spt.cfe(xe), prof, xerr=(xe[1] - xe[0])/2., yerr=std, color='teal', ls='none')
eb[-1][1].set_linestyle(':')
# Calculate the profile together with the default standard deviation (that of the mean)
prof, _, std = hep_spt.profile(x, y, xe)
ax.errorbar(hep_spt.cfe(xe), prof, xerr=(xe[1] - xe[0])/2., yerr=std, color='r', ls='none')
fig.colorbar(im, cax=cax, orientation='vertical');
```
|
github_jupyter
|
```
Copyright 2021 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
# Decision Tree on Credit Card Fraud Dataset
## Background
The goal of this learning task is to predict if a credit card transaction is fraudulent or genuine based on a set of anonymized features.
## Source
The raw dataset can be obtained directly from [Kaggle](https://www.kaggle.com/mlg-ulb/creditcardfraud).
In this example, we download the dataset directly from Kaggle using their API.
In order for this to work, you must login into Kaggle and folow [these instructions](https://www.kaggle.com/docs/api) to install your API token on your machine.
## Goal
The goal of this notebook is to illustrate how Snap ML can accelerate training of a decision tree model on this dataset.
## Code
```
cd ../../
CACHE_DIR='cache-dir'
import numpy as np
import time
from datasets import CreditCardFraud
from sklearn.tree import DecisionTreeClassifier
from snapml import DecisionTreeClassifier as SnapDecisionTreeClassifier
from sklearn.metrics import roc_auc_score as score
dataset = CreditCardFraud(cache_dir=CACHE_DIR)
X_train, X_test, y_train, y_test = dataset.get_train_test_split()
print("Number of examples: %d" % (X_train.shape[0]))
print("Number of features: %d" % (X_train.shape[1]))
print("Number of classes: %d" % (len(np.unique(y_train))))
# the dataset is highly imbalanced
labels, sizes = np.unique(y_train, return_counts=True)
print("%6.2f %% of the training transactions belong to class 0" % (sizes[0]*100.0/(sizes[0]+sizes[1])))
print("%6.2f %% of the training transactions belong to class 1" % (sizes[1]*100.0/(sizes[0]+sizes[1])))
from sklearn.utils.class_weight import compute_sample_weight
w_train = compute_sample_weight('balanced', y_train)
w_test = compute_sample_weight('balanced', y_test)
model = DecisionTreeClassifier(max_depth=16, random_state=42)
t0 = time.time()
model.fit(X_train, y_train, sample_weight=w_train)
t_fit_sklearn = time.time()-t0
score_sklearn = score(y_test, model.predict_proba(X_test)[:,1], sample_weight=w_test)
print("Training time (sklearn): %6.2f seconds" % (t_fit_sklearn))
print("ROC AUC score (sklearn): %.4f" % (score_sklearn))
model = SnapDecisionTreeClassifier(max_depth=16, n_jobs=4, random_state=42)
t0 = time.time()
model.fit(X_train, y_train, sample_weight=w_train)
t_fit_snapml = time.time()-t0
score_snapml = score(y_test, model.predict_proba(X_test)[:,1], sample_weight=w_test)
print("Training time (snapml): %6.2f seconds" % (t_fit_snapml))
print("ROC AUC score (snapml): %.4f" % (score_snapml))
speed_up = t_fit_sklearn/t_fit_snapml
score_diff = (score_snapml-score_sklearn)/score_sklearn
print("Speed-up: %.1f x" % (speed_up))
print("Relative diff. in score: %.4f" % (score_diff))
```
## Disclaimer
Performance results always depend on the hardware and software environment.
Information regarding the environment that was used to run this notebook are provided below:
```
import utils
environment = utils.get_environment()
for k,v in environment.items():
print("%15s: %s" % (k, v))
```
## Record Statistics
Finally, we record the enviroment and performance statistics for analysis outside of this standalone notebook.
```
import scrapbook as sb
sb.glue("result", {
'dataset': dataset.name,
'n_examples_train': X_train.shape[0],
'n_examples_test': X_test.shape[0],
'n_features': X_train.shape[1],
'n_classes': len(np.unique(y_train)),
'model': type(model).__name__,
'score': score.__name__,
't_fit_sklearn': t_fit_sklearn,
'score_sklearn': score_sklearn,
't_fit_snapml': t_fit_snapml,
'score_snapml': score_snapml,
'score_diff': score_diff,
'speed_up': speed_up,
**environment,
})
```
|
github_jupyter
|
```
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from scipy.optimize import minimize
import networkx as nx
from networkx.generators.random_graphs import erdos_renyi_graph
from IPython.display import Image
from qiskit import QuantumCircuit, execute, Aer
from qiskit.tools.visualization import circuit_drawer, plot_histogram
from quantuminspire.credentials import get_authentication
from quantuminspire.api import QuantumInspireAPI
from quantuminspire.qiskit import QI
QI_URL = 'https://api.quantum-inspire.com/'
```
In this notebook you will apply what you have just learned about cqasm and Quantum Inspire. We will consider a simple quantum algorithm, the quantum approximate optimization algorithm (QAOA), for which you will code the circuit in cqasm and send some jobs to real quantum hardware on the Quantum Inspire platform.
## 1. Recap: QAOA and MAXCUT
### Introduction to the Quantum Approximate Optimization Algorithm
$$\newcommand{\ket}[1]{\left|{#1}\right\rangle}$$
$$\newcommand{\bra}[1]{\left\langle{#1}\right|}$$
$$\newcommand{\braket}[2]{\left\langle{#1}\middle|{#2}\right\rangle}$$
Consider some combinatorial optimization problem with objective function $C:x\rightarrow \mathbb{R}$ acting on $n$-bit strings $x\in \{0,1\}^n$, domain $\mathcal{D} \subseteq \{0,1\}^n$, and objective
\begin{align}
\max_{x \in \mathcal{D}} C(x).
\end{align}
In maximization, an approximate optimization algorithm aims to find a string $x'$ that achieves a desired approximation ratio $\alpha$, i.e.
\begin{equation}
\frac{C(x')}{C^*}\geq \alpha,
\end{equation}
where $C^* = \max_{x \in \mathcal{D}} C(x)$.
In QAOA, such combinatorial optimization problems are encoded into a cost Hamiltonian $H_C$, a mixing Hamiltonian $H_M$ and some initial quantum state $\ket{\psi_0}$. The cost Hamiltonian is diagonal in the computational basis by design, and represents $C$ if its eigenvalues satisfy
\begin{align}
H_C \ket{x} = C(x) \ket{x} \text{ for all } x \in \{0,1\}^n.
\end{align}
The mixing Hamiltonian $H_M$ depends on $\mathcal{D}$ and its structure, and is in the unconstrained case (i.e. when $\mathcal{D}=\{0,1\}^n$) usually taken to be the transverse field Hamiltonian $H_M = \sum_{j} X_j$. Constraints (i.e. when $\mathcal{D}\subset \{0,1\}^n$) can be incorporated directly into the mixing Hamiltonian or are added as a penalty function in the cost Hamiltonian. The initial quantum state $\ket{\psi_0}$ is usually taken as the uniform superposition over all possible states in the domain. $\text{QAOA}_p$, parametrized in $\gamma=(\gamma_0,\gamma_1,\dots,\gamma_{p-1}),\beta=(\beta_0,\beta_1,\dots,\beta_{p-1})$, refers to a level-$p$ QAOA circuit that applies $p$ steps of alternating time evolutions of the cost and mixing Hamiltonians on the initial state. At step $k$, the unitaries of the time evolutions are given by
\begin{align}
U_C(\gamma_k) = e^{-i \gamma_k H_C }, \label{eq:UC} \\
U_M(\beta_k) = e^{-i \beta_k H_M }. \label{eq:UM}
\end{align}
So the final state $\ket{\gamma,\beta}$ of $\text{QAOA}_p$ is given by
\begin{align}
\ket{\gamma,\beta} = \prod_{k=0}^{p-1} U_M(\beta_k) U_C(\gamma_k) \ket{\psi_0}.
\end{align}
The expectation value $ F_p(\gamma,\beta)$ of the cost Hamiltonian for state $\ket{\gamma,\beta}$ is given by
\begin{align}
F_p(\gamma,\beta) =
\bra{\gamma,\beta}H_C\ket{\gamma,\beta},
\label{eq:Fp}
\end{align}
and can be statistically estimated by taking samples of $\ket{\gamma,\beta}$. The achieved approximation ratio (in expectation) of $\text{QAOA}_p$ is then
\begin{equation}
\alpha = \frac{F_p(\gamma,\beta)}{C^*}.
\end{equation}
The parameter combinations of $\gamma,\beta$ are usually found through a classical optimization procedure that uses $F_p(\gamma,\beta)$ as a black-box function to be maximized.
### Example application: MAXCUT
MaxCut is an NP-hard optimisation problem that looks for an optimal 'cut' for a graph $G(V,E)$, in the sense that the cut generates a subset of nodes $S \subset V$ that shares the largest amount of edges with its complement $ V\setminus S$. In slightly modified form (omitting the constant), it has the following objective function
\begin{align}
\max_{s} \frac{1}{2} \sum_{
\langle i,j \rangle \in E} 1-s_i s_j,
\end{align}
where the $s_i\in\{-1,1\}$ are the variables and $i,j$ are the edge indices. This function can be easily converted into an Ising cost Hamiltonian, which takes the form
\begin{align}
H_C = \frac{1}{2}\sum_{\langle i,j\rangle \in E} I-Z_i Z_j.
\end{align}
We use the standard mixing Hamiltonian that sums over all nodes:
\begin{align}
H_M = \sum_{v \in V} X_v.
\end{align}
As the initial state $\ket{\Psi_0}$ we take the uniform superposition, given by
\begin{align}
\ket{\psi_0} = \frac{1}{\sqrt{2^{|V|}}}\sum_{x=0}^{2^{|V|}-1} \ket{x}
\end{align}
The goal of this workshop is to guide you through an implemented code that simulates a small quantum computer running the QAOA algorithm applied to the MAXCUT problem. We will use qiskit as well as cqasm as SDK's. For the sake of run time, you will always run the classical optimization part using the qiskit simulator: it would take too long for our purposes to do the actual function evualtions in the classical optimization step on the hardware.
## 2. Some useful functions and intializations
We first define some useful functions to be used later throughout the code.
```
# Just some function to draw graphs
def draw_cc_graph(G,node_color='b',fig_size=4):
plt.figure(figsize=(fig_size,fig_size))
nx.draw(G, G.pos,
node_color= node_color,
with_labels=True,
node_size=1000,font_size=14)
plt.show()
# Define the objective function
def maxcut_obj(x,G):
cut = 0
for i, j in G.edges():
if x[i] != x[j]:
# the edge is cut, negative value in agreement with the optimizer (which is a minimizer)
cut -= 1
return cut
# Brute force method
def brute_force(G):
n = len(G.nodes)
costs = np.zeros(0)
costs=[]
for i in range(2**n):
calc_costs = -1*maxcut_obj(bin(i)[2:].zfill(n),G)
costs.append(calc_costs)
max_costs_bf = max(costs)
index_max = costs.index(max(costs))
max_sol_bf = bin(index_max)[2:].zfill(n)
return max_costs_bf, max_sol_bf,costs
# Generating the distribution resulting from random guessing the solution
def random_guessing_dist(G):
dictio= dict()
n = len(G.nodes())
for i in range(2**n):
key = bin(i)[2:].zfill(n)
dictio[key] = maxcut_obj(bin(i)[2:].zfill(n),G)
RG_energies_dist = defaultdict(int)
for x in dictio:
RG_energies_dist[maxcut_obj(x,G)] += 1
return RG_energies_dist
# Visualize multiple distributions
def plot_E_distributions(E_dists,p,labels):
plt.figure()
x_min = 1000
x_max = - 1000
width = 0.25/len(E_dists)
for index,E_dist in enumerate(E_dists):
pos = width*index-width*len(E_dists)/4
label = labels[index]
X_list,Y_list = zip(*E_dist.items())
X = -np.asarray(X_list)
Y = np.asarray(Y_list)
plt.bar(X + pos, Y/np.sum(Y), color = 'C'+str(index), width = width,label= label+', $p=$'+str(p))
if np.min(X)<x_min:
x_min = np.min(X)
if np.max(X)>x_max:
x_max = np.max(X)
plt.xticks(np.arange(x_min,x_max+1))
plt.legend()
plt.xlabel('Objective function value')
plt.ylabel('Probability')
plt.show()
# Determinet the expected objective function value from the random guessing distribution
def energy_random_guessing(RG_energies_dist):
energy_random_guessing = 0
total_count = 0
for energy in RG_energies_dist.keys():
count = RG_energies_dist[energy]
energy_random_guessing += energy*count
total_count += count
energy_random_guessing = energy_random_guessing/total_count
return energy_random_guessing
```
### Test instances
```
w2 = np.matrix([
[0, 1],
[1, 0]])
G2 = nx.from_numpy_matrix(w2)
positions = nx.circular_layout(G2)
G2.pos=positions
print('G2:')
draw_cc_graph(G2)
w3 = np.matrix([
[0, 1, 1],
[1, 0, 1],
[1, 1, 0]])
G3 = nx.from_numpy_matrix(w3)
positions = nx.circular_layout(G3)
G3.pos=positions
print('G3:')
draw_cc_graph(G3)
```
## 3. Circuit generators
We provide you with an example written in qiskit. You have to write the one for cqasm yourself.
### Qiskit generators
```
class Qiskit(object):
# Cost operator:
def get_cost_operator_circuit(G, gamma):
N = G.number_of_nodes()
qc = QuantumCircuit(N,N)
for i, j in G.edges():
qc.cx(i,j)
qc.rz(2*gamma, j)
qc.cx(i,j)
return qc
# Mixing operator
def get_mixer_operator_circuit(G, beta):
N = G.number_of_nodes()
qc = QuantumCircuit(N,N)
for n in G.nodes():
qc.rx(2*beta, n)
return qc
# Build the circuit:
def get_qaoa_circuit(G, beta, gamma):
assert(len(beta) == len(gamma))
p = len(beta) # number of unitary operations
N = G.number_of_nodes()
qc = QuantumCircuit(N,N)
# first step: apply Hadamards to obtain uniform superposition
qc.h(range(N))
# second step: apply p alternating operators
for i in range(p):
qc.compose(Qiskit.get_cost_operator_circuit(G,gamma[i]),inplace=True)
qc.compose(Qiskit.get_mixer_operator_circuit(G,beta[i]),inplace=True)
# final step: measure the result
qc.barrier(range(N))
qc.measure(range(N), range(N))
return qc
# Show the circuit for the G3 (triangle) graph
p = 1
beta = np.random.rand(p)*2*np.pi
gamma = np.random.rand(p)*2*np.pi
qc = Qiskit.get_qaoa_circuit(G3,beta, gamma)
qc.draw(output='mpl')
```
### cqasm generators
Now it is up to you to apply what we have learned about cqasm to write the script for the cost and mixing operators:
```
class Cqasm(object):
### We give them this part
def get_qasm_header(N_qubits):
"""
Create cQASM header for `N_qubits` qubits and prepare all in |0>-state.
"""
header = f"""
version 1.0
qubits {N_qubits}
prep_z q[0:{N_qubits-1}]
"""
return header
def get_cost_operator(graph, gamma, p=1):
"""
Create cost operator for given angle `gamma`.
"""
layer_list = graph.number_of_edges()*[None]
for n, (i,j) in enumerate(graph.edges()):
layer_list[n] = '\n'.join([f"CNOT q[{i}], q[{j}]",
f"Rz q[{j}], {2*gamma}",
f"CNOT q[{i}], q[{j}]"])
return f".U_gamma_{p}\n" + '\n'.join(layer_list) + '\n'
def get_mixing_operator(graph, beta, p=1):
"""
Create mixing operator for given angle `beta`.
Use parallel application of single qubit gates.
"""
U_beta = "{" + ' | '.join([f"Rx q[{i}], {2*beta}" for i in graph.nodes()]) + "}"
return f".U_beta_{p}\n" + U_beta + '\n'
def get_qaoa_circuit(graph, beta, gamma):
"""
Create full QAOA circuit for given `graph` and angles `beta` and `gamma`.
"""
assert len(beta) == len(gamma)
p = len(beta) # number of layers
N_qubits = graph.number_of_nodes()
circuit_str = Cqasm.get_qasm_header(5) #N_qubits)
# first step: apply Hadamards to obtain uniform superposition
circuit_str += "{" + ' | '.join([f"H q[{i}]" for i in graph.nodes()]) + "}\n\n"
# second step: apply p alternating operators
circuit_str += '\n'.join([Cqasm.get_cost_operator(graph, gamma[i], i+1)
+ Cqasm.get_mixing_operator(graph, beta[i], i+1) for i in range(p)])
# final step: measure the result
circuit_str += "\n"
circuit_str += "measure_all"
return circuit_str
```
## 4. Hybrid-quantum classical optimization
Since QAOA is usually adopted as a hybrid quantum-classical algorithm, we need to construct an outer loop which optimizes the estimated $\bra{\gamma,\beta}H\ket{\gamma,\beta}$.
```
# Black-box function that describes the energy output of the QAOA quantum circuit
def get_black_box_objective(G, p, SDK = 'qiskit', backend = None, shots=2**10):
if SDK == 'cqasm':
if not backend:
backend = 'QX single-node simulator'
backend_type = qi.get_backend_type_by_name(backend)
def f(theta):
# first half is betas, second half is gammas
beta = theta[:p]
gamma = theta[p:]
qc = Cqasm.get_qaoa_circuit(G, beta, gamma)
result = qi.execute_qasm(qc, backend_type=backend_type, number_of_shots=shots)
counts = result['histogram']
# return the energy
return compute_maxcut_energy(counts, G)
if SDK == 'qiskit':
if not backend:
backend = 'qasm_simulator'
backend = Aer.get_backend(backend)
def f(theta):
# first half is betas, second half is gammas
beta = theta[:p]
gamma = theta[p:]
qc = Qiskit.get_qaoa_circuit(G,beta, gamma)
counts = execute(qc, backend,shots=shots).result().get_counts()
# return the energy
return compute_maxcut_energy(counts, G)
else:
return 'error: SDK not found'
return f
# Estimate the expectation value based on the circuit output
def compute_maxcut_energy(counts, G):
energy = 0
total_counts = 0
for meas, meas_count in counts.items():
obj_for_meas = maxcut_obj(meas, G)
energy += obj_for_meas * meas_count
total_counts += meas_count
return energy / total_counts
```
## 5. A simple instance on the quantum inspire platform: 2-qubit case
Let us first consider the most simple MAXCUT instance. We have just two nodes, and an optimal cut with objective value 1 would be to place both nodes in its own set.
```
G=G2
max_costs_bf, max_sol_bf,costs = brute_force(G)
print("brute force method best cut: ",max_costs_bf)
print("best string brute force method:",max_sol_bf)
colors = ['red' if x == '0' else 'b' for x in max_sol_bf]
draw_cc_graph(G,node_color = colors)
```
Using qiskit, the circuit would look the following:
```
# Test and show circuit for some beta,gamma
p = 1
beta = np.random.rand(p)*np.pi
gamma = np.random.rand(p)*2*np.pi
qc = Qiskit.get_qaoa_circuit(G,beta, gamma)
qc.draw(output='mpl')
```
Now let's run our hybrid-quantum algorithm simulation using qiskit:
```
# Parameters that can be changed:
p = 1
lb = np.zeros(2*p)
ub = np.hstack([np.full(p, np.pi), np.full(p, 2*np.pi)])
init_point = np.random.uniform(lb, ub, 2*p)
shots = 2**10
optimiser = 'COBYLA'
max_iter = 100
# Training of the parameters beta and gamma
obj = get_black_box_objective(G,p,SDK='qiskit',shots=shots)
# Lower and upper bounds: beta \in {0, pi}, gamma \in {0, 2*pi}
bounds = [lb,ub]
# Maximum number of iterations: 100
res = minimize(obj, init_point, method=optimiser, bounds = bounds, options={'maxiter':max_iter, 'disp': True})
print(res)
#Determine the approximation ratio:
print('Approximation ratio is',-res['fun']/max_costs_bf)
# Extract the optimal values for beta and gamma and run a new circuit with these parameters
optimal_theta = res['x']
qc = Qiskit.get_qaoa_circuit(G, optimal_theta[:p], optimal_theta[p:])
counts = execute(qc,backend = Aer.get_backend('qasm_simulator'),shots=shots).result().get_counts()
plt.bar(counts.keys(), counts.values())
plt.xlabel('String')
plt.ylabel('Count')
plt.show()
RG_dist = random_guessing_dist(G)
# Measurement distribution
E_dist = defaultdict(int)
for k, v in counts.items():
E_dist[maxcut_obj(k,G)] += v
plot_E_distributions([E_dist,RG_dist],p,['Qiskit','random guessing'])
E_random_guessing = energy_random_guessing(RG_dist)
print('Energy from random guessing is', E_random_guessing)
X_list,Y_list = zip(*E_dist.items())
X = -np.asarray(X_list)
Y = np.asarray(Y_list)
print('Probability of measuring the optimal solution is',Y[np.argmax(X)]/shots)
```
Now that we have obtained some good values for $\beta$ and $\gamma$ through classical simulation, let's see what Starmon-5 would give us.
The figure below shows the topology of Starmon-5. Since q0 is not connected to q1, we have to relabel the nodes. Networkx as such an option, by using 'nx.relabel_nodes(G,{1:2}' we can relabel node 1 as node 2. Since q0 is connected to q2, this does allow us to run our cqasm code on Starmon-5. For qiskit, this step is irrelevant as we have all-to-all connectivity in the simulation.
```
Image(filename='Starmon5.png')
qc_Cqasm = Cqasm.get_qaoa_circuit(nx.relabel_nodes(G, {1: 2}), optimal_theta[:p], optimal_theta[p:])
print(qc_Cqasm)
```
Now we run the Cqasm-circuit on the Starmon-5 Hardware.
```
authentication = get_authentication()
QI.set_authentication(authentication, QI_URL)
qiapi = QuantumInspireAPI(QI_URL, authentication)
result = qiapi.execute_qasm(qc_Cqasm, backend_type=qiapi.get_backend_type('Starmon-5'), number_of_shots=2**10)
counts_QI = result['histogram']
```
Inspecting 'counts_QI', we see that it returns the integer corresponding to the bit string result of the measurement
```
counts_QI
```
Note that we measure more than just the two relevant qubits, since we had the 'measure all' command in the the cqasm code. The distribution over the strings looks the following:
```
counts_bin = {}
for k,v in counts_QI.items():
counts_bin[f'{int(k):05b}'] = v
print(counts_bin)
plt.bar(counts_bin.keys(), counts_bin.values())
plt.xlabel('State')
plt.ylabel('Measurement probability')
plt.xticks(rotation='vertical')
plt.show()
```
Let's create another counts dictionary with only the relevant qubits, which are q0 and q2:
```
counts_bin_red = defaultdict(float)
for string in counts_bin:
q0 = string[-1]
q1 = string[-3]
counts_bin_red[(q0+q1)]+=counts_bin[string]
counts_bin_red
```
We now plot all distributions (qiskit, Starmon-5, and random guessing) in a single plot.
```
#Determine the approximation ratio:
print('Approximation ratio on the hardware is',-compute_maxcut_energy(counts_bin_red,G)/max_costs_bf)
# Random guessing distribution
RG_dist = random_guessing_dist(G)
# Measurement distribution
E_dist_S5 = defaultdict(int)
for k, v in counts_bin_red.items():
E_dist_S5[maxcut_obj(k,G)] += v
plot_E_distributions([E_dist,E_dist_S5,RG_dist],p,['Qiskit','Starmon-5','random guessing'])
X_list,Y_list = zip(*E_dist_S5.items())
X = -np.asarray(X_list)
Y = np.asarray(Y_list)
print('Probability of measuring the optimal solution is',Y[np.argmax(X)])
E_random_guessing = energy_random_guessing(RG_dist)
print('Expected approximation ratio random guessing is', -E_random_guessing/max_costs_bf)
```
## 6. Compilation issues: the triangle graph
For the graph with just two nodes we already had some minor compilation issues, but this was easily fixed by relabeling the nodes. We will now consider an example for which relabeling is simply not good enough to get it mapped to the Starmon-5 toplogy.
```
G=G3
max_costs_bf, max_sol_bf,costs = brute_force(G)
print("brute force method best cut: ",max_costs_bf)
print("best string brute force method:",max_sol_bf)
colors = ['red' if x == '0' else 'b' for x in max_sol_bf]
draw_cc_graph(G,node_color = colors)
```
Due to the topology of Starmon-5 this graph cannot be executed without any SWAPS. Therefore, we ask you to write a new circuit generator that uses SWAPS in order to make the algorithm work with the Starmon-5 topology. Let's also swap back to the original graph configuration, so that we can in the end measure only the qubits that correspond to a node in the graph (this is already written for you)
```
def QAOA_triangle_circuit_cqasm(graph, beta, gamma):
circuit_str = Cqasm.get_qasm_header(5)
circuit_str += "{" + ' | '.join([f"H q[{i}]" for i in graph.nodes()]) + "}\n\n"
def get_triangle_cost_operator(graph, gamma, p):
layer_list = graph.number_of_edges() * [None]
for n, edge in enumerate(graph.edges()):
if 0 in edge and 1 in edge:
layer_list[n] = '\n'.join([f"SWAP q[{edge[0]}], q[2]",
f"CNOT q[2], q[{edge[1]}]",
f"Rz q[{edge[1]}], {2*gamma}",
f"CNOT q[2], q[{edge[1]}]",
f"SWAP q[{edge[0]}], q[2]" ])
else:
layer_list[n] = '\n'.join([f"CNOT q[{edge[0]}], q[{edge[1]}]",
f"Rz q[{edge[1]}], {2*gamma}",
f"CNOT q[{edge[0]}], q[{edge[1]}]"])
return f".U_gamma_{p}\n" + '\n'.join(layer_list) + '\n'
circuit_str += '\n'.join([get_triangle_cost_operator(graph, gamma[i], i+1)
+ Cqasm.get_mixing_operator(graph, beta[i], i+1) for i in range(p)])
circuit_str += "\n"
circuit_str += "{" + ' | '.join([f"measure q[{i}]" for i in graph.nodes()]) + "}\n"
return circuit_str
```
We now run the same procedure as before to obtain good parameter values
```
# Parameters that can be changed:
p = 1
lb = np.zeros(2*p)
ub = np.hstack([np.full(p, np.pi), np.full(p, 2*np.pi)])
init_point = np.random.uniform(lb, ub, 2*p)
shots = 2**10
optimiser = 'COBYLA'
max_iter = 100
# Training of the parameters beta and gamma
obj = get_black_box_objective(G,p,SDK='qiskit',shots=shots)
# Lower and upper bounds: beta \in {0, pi}, gamma \in {0, 2*pi}
bounds = [lb,ub]
# Maximum number of iterations: 100
res = minimize(obj, init_point, method=optimiser, bounds = bounds,options={'maxiter':max_iter, 'disp': True})
print(res)
#Determine the approximation ratio:
print('Approximation ratio is',-res['fun']/max_costs_bf)
# Extract the optimal values for beta and gamma and run a new circuit with these parameters
optimal_theta = res['x']
qc = Qiskit.get_qaoa_circuit(G, optimal_theta[:p], optimal_theta[p:])
counts = execute(qc,backend = Aer.get_backend('qasm_simulator'),shots=shots).result().get_counts()
# Random guessing distribution
RG_dist = random_guessing_dist(G)
# Measurement distribution
E_dist = defaultdict(int)
for k, v in counts.items():
E_dist[maxcut_obj(k,G)] += v
X_list,Y_list = zip(*E_dist.items())
X = -np.asarray(X_list)
Y = np.asarray(Y_list)
print('Probability of measuring the optimal solution is',Y[np.argmax(X)]/shots)
E_random_guessing = energy_random_guessing(RG_dist)
print('Expected approximation ratio random guessing is', -E_random_guessing/max_costs_bf)
plt.bar(counts.keys(), counts.values())
plt.xlabel('String')
plt.ylabel('Count')
plt.show()
```
Let's run it on Starmon-5 again!
```
# Extract the optimal values for beta and gamma and run a new circuit with these parameters
optimal_theta = res['x']
qasm_circuit = QAOA_triangle_circuit_cqasm(G, optimal_theta[:p], optimal_theta[p:])
qiapi = QuantumInspireAPI(QI_URL, authentication)
result = qiapi.execute_qasm(qasm_circuit, backend_type=qiapi.get_backend_type('Starmon-5'), number_of_shots=shots)
counts = result['histogram']
print(qasm_circuit)
print(result)
counts
counts_bin = {}
for k,v in counts.items():
counts_bin[f'{int(k):03b}'] = v
print(counts_bin)
plt.bar(counts_bin.keys(), counts_bin.values())
plt.xlabel('String')
plt.ylabel('Probability')
plt.show()
#Determine the approximation ratio:
print('Approximation ratio on the hardware is',-compute_maxcut_energy(counts_bin,G)/max_costs_bf)
# Random guessing distribution
RG_dist = random_guessing_dist(G)
# Measurement distribution
E_dist_S5 = defaultdict(int)
for k, v in counts_bin.items():
E_dist_S5[maxcut_obj(k,G)] += v
plot_E_distributions([E_dist,E_dist_S5,RG_dist],p,['Qiskit','Starmon-5','random guessing'])
X_list,Y_list = zip(*E_dist_S5.items())
X = -np.asarray(X_list)
Y = np.asarray(Y_list)
print('Probability of measuring the optimal solution is',Y[np.argmax(X)])
E_random_guessing = energy_random_guessing(RG_dist)
print('Expected approximation ratio random guessing is', -E_random_guessing/max_costs_bf)
```
## 7. More advanced questions
Some questions you could look at:
- What is the performance on other graph instances?
- How scalable is this hardware for larger problem sizes?
- How much can the circuit be optimized for certain graph instances?
- Are the errors perfectly random or is there some correlation?
- Are there tricks to find good parameters?
|
github_jupyter
|
##### Detection and Location Chain
**Abstract**: This hackathon project represents our effort to combine our existing machine learning and photogrametry efforts and further combine those efforts with both Cloud and Edge based solutions based upon Xilinx FPGA acceleration.
The Trimble team decided that the Xilinx hackathon would provide an excellent oppertunity to take the first steps in combining these technologies and learning how to use the varied Xilinx techologies.
Our initial hope was to use a TensorFlow system to provide the machine learning component of our test based on an AWS Ultrascale instance. That technology was unavailable for the hackathon, so during the event we trained a system based on a more stardard AWS Tensorflow instance and accessed that instance via Pynq networking.
The Team Trimble is composed of
* Roy Godzdanker – Trimble Product Architect for ICT
* Robert Banefield – Trimble Data Machine Learning Specialist
* Vinod Khare – Trimble ICT Photogrammetry
* Ashish Khare – Trimble Geomatics Photogrammetry
* Young-Jin Lee – Trimble ICT Photogrammetry
* Matt Compton - Trimble ICT Design Engineer
_NOTES_:
1. The TensorFlow system is sitting at an AWS instance. This is the slow and simple one for my debug effort. In the spirit of the hackathon, we started in training at the beginning of the night. This implies that it's capabilities were not exceptional at the beginning of the night and it will be better as the newly trained net is swapped in in the morning. Further tests back at the ranch will include testing this chain against some of the other theoretical models. The current net underperforms some previous efforts, further exploration is needed here
2. We also need to explore the TensorFlow element as an edge device. Advances in Xilinx FPGA tools may make that cost competative with a GPU box.
3. Xilinx HLS looks to be able to add needed acceleration functions but this needs further exploration going forward. We explored the idea of overly with python controled DMA, this is very promising
The following are globals used within this project To Change this to different image set, simply change the images indicated and run through the notebook again.
1. Camera data is sent to the system from a remote repository.
2. The Camera Data is sent to the Pynq to being processing.
3. The TensorFlow cloud delivers metadata for the images that were transferred to it back to the Pynq via net transfer
4. The Pynq software uses the photogrammetric OpenCV software chain that we wrote to estimate and calculate geometric position. In addition, images are displayed on the HDMI monitor and LCD display so we can see what is going on and to serve as a debug aid
5. The calculated position of the object is returned.
```
## Imports
import cv2
import json
import matplotlib.pyplot as pyplot
import numpy
import matplotlib.patches as patches
import pynq.overlays.base
import pynq.lib.arduino as arduino
import pynq.lib.video as video
import requests
import scipy
import sys
import PIL
## Config
gAWS_TENSORFLOW_INSTANCE = 'http://34.202.159.80'
gCAMERA0_IMAGE = "/home/xilinx/jupyter_notebooks/trimble-mp/CAM2_image_0032.jpg"
gCAMERA1_IMAGE = "/home/xilinx/jupyter_notebooks/trimble-mp/CAM3_image_0032.jpg"
```
Turn on the HDMI coming off the pink board. This is used in a fashion that is different than their primary test notes and may be difficult to complete during the time period. Specifically, the hdmi out is used without the input
```
base = pynq.overlays.base.BaseOverlay("base.bit")
hdmi_in = base.video.hdmi_in
hdmi_out = base.video.hdmi_out
v = video.VideoMode(1920,1080,24)
hdmi_out.configure(v, video.PIXEL_BGR)
hdmi_out.start()
outframe = hdmi_out.newframe()
```
Using Pillow, pull in the chosen image for Camera 0
```
# Read images
image0BGR = cv2.imread(gCAMERA0_IMAGE)
image1BGR = cv2.imread(gCAMERA1_IMAGE)
image0 = image0BGR[...,::-1]
image1 = image1BGR[...,::-1]
```
Do exactly the same for the second image of the overlapping pair from camera 1
To send one of these to the HDMI, we are going to have to reformat it to fit the provided HDMI display
```
# Show image 0 on HDMI
# Need to resize it first
outframe[:] = cv2.resize(image0BGR, (1920, 1080));
hdmi_out.writeframe(outframe)
```
We will also display Young-Jin to the LCD screen. Why ? Because Young Jin does awesome work and deserves to be famous and also because I can
```
## Show image on LCD
# Open LCD object and clear
lcd = arduino.Arduino_LCD18(base.ARDUINO)
lcd.clear()
# Write image to disk
nw = 160
nl = 128
cv2.imwrite("/home/xilinx/small.jpg", cv2.resize(image0BGR, (nw,nl)))
# Display!
lcd.display("/home/xilinx/small.jpg",x_pos=0,y_pos=127,orientation=3,background=[255,255,255])
```
We now need to classify the images. This runs the remote version of TensorFlow on the image to get the bounding box. The following routine wraps this for simplicity. The spun up AWS TensorFlow instance is expecting to get be
sent a JPEG and will classify and send back the results as JSON.
The IP address of the spun up AWS instance is given by the global gAWS_TENSORFLOW_INSTANCE which is specified at the
beginning of this note book.
```
def RemoteTensorFlowClassify(image_name_string):
f = open(image_name_string,'rb')
r = requests.put(gAWS_TENSORFLOW_INSTANCE, data=f)
return json.loads(r.content.decode())
```
Actually call the defined function on images from camera 1 and camera 2.
```
#Return the object that camera zero sees with the maximum score
cam0_json_return = RemoteTensorFlowClassify(gCAMERA0_IMAGE)
json0 = cam0_json_return["image_detection"]
max = 0.0
out = []
for var in json0['object']:
if (var['score'] > max):
out = var
json0 = out
json0
#Return the object that camera one sees with the maximum score
cam1_json_return = RemoteTensorFlowClassify(gCAMERA1_IMAGE)
json1 = cam1_json_return["image_detection"]
max = 0.0
out = []
for var in json1['object']:
if (var['score'] > max):
out = var
json1 = out
json1
```
The AWS tensorflow reports the bounding boxes for the required object.
```
def DrawRect(the_json,the_image, x1, x2, y1, y2 ):
# Currently offline until the TesnorFlow net is fixed
#x1 = int(the_json["xmin"])
#y1 = int(the_json["ymin"])
#x2 = int(the_json["xmax"])
#y2 = int(the_json["ymax"])
fig, ax = pyplot.subplots(1)
ax.imshow(the_image)
rect = patches.Rectangle((x1,y1), (x2-x1), (y2-y1), linewidth = 1 , edgecolor = 'r', facecolor='none')
ax.add_patch(rect)
pyplot.show()
## Convert to grayscale
grayImage0 = cv2.cvtColor(image0, cv2.COLOR_RGB2GRAY)
grayImage1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)
def IsInsideROI(pt, the_json, x1, x2, y1, y2):
# x_min = int(the_json["object"]["xmin"])
# y_min = int(the_json["object"]["ymin"])
# x_max = int(the_json["object"]["xmax"])
# y_max = int(the_json["object"]["ymax"])
x_min = x1
y_min = y1
x_max = x2
y_max = y2
if(pt[0]>=x_min and pt[0] <=x_max and pt[1]>=y_min and pt[1]<=y_max):
return True
else:
return False
## Detect keypoints
Brisk = cv2.BRISK_create()
keyPoints0 = Brisk.detect(grayImage0)
keyPoints1 = Brisk.detect(grayImage1)
## Find keypoints inside ROI
roiKeyPoints0 = numpy.asarray([k for k in keyPoints0 if IsInsideROI(k.pt,json0, 955, 1045, 740, 1275 )])
roiKeyPoints1 = numpy.asarray([k for k in keyPoints1 if IsInsideROI(k.pt,json1, 1335, 1465, 910, 1455 )])
## Compute descriptors for keypoitns inside ROI
[keyPoints0, desc0] = Brisk.compute(grayImage0, roiKeyPoints0);
[keyPoints1, desc1] = Brisk.compute(grayImage1, roiKeyPoints1);
## Find matches of ROI keypoints
BF = cv2.BFMatcher()
matches = BF.match(desc0, desc1)
## Extract pixel coordinates from matched keypoints
x_C0 = numpy.asarray([keyPoints0[match.queryIdx].pt for match in matches])
x_C1 = numpy.asarray([keyPoints1[match.trainIdx].pt for match in matches])
```
Full mesh triangularization is off line until we reconsile the camera calibration. There was an issue discovered during the hackathon that needs to be examined in teh lab setup s the code below this will not function until we reconsile the camera calibration config.
```
# Triangulate points
# We need projection matrices for camera 0 and camera 1
f = 8.350589e+000 / 3.45E-3
cx = -3.922872e-002 / 3.45E-3
cy = -1.396717e-004 / 3.45E-3
K_C0 = numpy.transpose(numpy.asarray([[f, 0, 0], [0, f, 0], [cx, cy, 1]]))
k_C0 = numpy.asarray([1.761471e-003, -2.920431e-005, -8.341438e-005, -9.470247e-006, -1.140118e-007])
[R_C0, J] = cv2.Rodrigues(numpy.asarray([1.5315866633, 2.6655790203, -0.0270418317]))
T_C0 = numpy.transpose(numpy.asarray([[152.9307390952, 260.3066944976, 351.7405264829]])) * 1000
f = 8.259861e+000 / 3.45E-3
cx = 8.397453e-002 / 3.45E-3
cy = -2.382030e-002 / 3.45E-3
K_C0 = numpy.transpose(numpy.asarray([[f, 0, 0], [0, f, 0], [cx, cy, 1]]))
K_C1 = numpy.asarray([1.660053e-003, -2.986269e-005, -7.461966e-008, -2.247960e-004, -2.290483e-006])
[R_C1, J] = cv2.Rodrigues(numpy.asarray([1.4200199799, -2.6113619450, -0.1371719827]))
T_C1 = numpy.transpose(numpy.asarray([[146.8718203137, 259.9661037150, 351.5832136366]])) * 1000
P_C0 = numpy.dot(K_C0,numpy.concatenate((R_C0, T_C0), 1))
P_C1 = numpy.dot(K_C1,numpy.concatenate((R_C1, T_C1), 1))
# Compute 3D coordinates of detected points
X_C0 = cv2.convertPointsFromHomogeneous(numpy.transpose(cv2.triangulatePoints(P_C0, P_C1, numpy.transpose(x_C0), numpy.transpose(x_C1))))
```
|
github_jupyter
|
# 6 - Pivot Table
In this sixth step I'll show you how to reshape your data using a pivot table.
This will provide a nice condensed version.
We'll reshape the data so that we can see how much each customer spent in each category.
```
import pandas as pd
import numpy as np
df = pd.read_json("customer_data.json", convert_dates=False)
df.head()
```
Taking a quick look using the <code>.head()</code> function, we can see all of the columns, and the first few rows of the data.
For this example, let's just use the first 50 rows of the data.
```
df_subset = df[0:50]
df_subset
```
Let's take a look at the types for each column using the <code>.dtypes</code> method.
```
df_subset.dtypes
```
The amount column should be a numeric type, but Pandas thinks it's an <code>object</code>. Let's go ahead and change that column to a numeric <code>float</code> type using the <code>.astype()</code> method.
```
df_subset["amount"] = df_subset["amount"].astype(float)
df_subset.dtypes
```
Now we can see that the <code>amount</code> column is a numeric <code>float</code> type.
We don't need all of the columns, just the <code>customer_id</code>, <code>category</code>, and <code>amount</code> columns.
Here's what that smaller dataframe would look like.
```
df_subset[["customer_id", "category", "amount"]]
```
Let's finish up by creating our <code>pivot_table</code>.
We'll set the index to <code>customer_id</code>, the columns to <code>category</code>, and the values to <code>amount</code>. This will reshape the data so that we can see how much each customer spent in each category. Let's create this using a new dataframe called <code>df_pivot</code>.
The final important point before we reshape the data is the <code>aggfunc</code> parameter. Since customers probably spent multiple purchase in the same categories, we'll want to collect all of the purchase. We'll do that using Numpy's <code>sum</code> method. I've shorted the Numpy library name to <code>np</code>, so that's why I've set the <code>aggfunc</code> to <code>np.sum</code>.
```
# pivot table; aggregation function "sum"
df_pivot = df_subset.pivot_table(index="customer_id", columns="category", values="amount", aggfunc=np.sum)
print(df_pivot)
```
Now we have a new dataframe showing how much each customer spent in each category.
There's a lot of <code>NaN</code> values because a lot of customers didn't spend any money in certain categories.
You should also note that there's a <code>house</code> and <code>household</code> column. We need to clean the data so that we have consistent strings before we reshape it. Look back at <strong>Step 3 - Consistent Strings</strong> to help you with that.
|
github_jupyter
|
```
#export
from fastai.basics import *
from fastai.callback.progress import *
from fastai.text.data import TensorText
from fastai.tabular.all import TabularDataLoaders, Tabular
from fastai.callback.hook import total_params
#hide
from nbdev.showdoc import *
#default_exp callback.wandb
```
# Wandb
> Integration with [Weights & Biases](https://docs.wandb.com/library/integrations/fastai)
First thing first, you need to install wandb with
```
pip install wandb
```
Create a free account then run
```
wandb login
```
in your terminal. Follow the link to get an API token that you will need to paste, then you're all set!
```
#export
import wandb
from wandb.wandb_config import ConfigError
#export
class WandbCallback(Callback):
"Saves model topology, losses & metrics"
toward_end,remove_on_fetch,run_after = True,True,FetchPredsCallback
# Record if watch has been called previously (even in another instance)
_wandb_watch_called = False
def __init__(self, log="gradients", log_preds=True, log_model=True, log_dataset=False, dataset_name=None, valid_dl=None, n_preds=36, seed=12345):
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError('You must call wandb.init() before WandbCallback()')
# W&B log step
self._wandb_step = wandb.run.step - 1 # -1 except if the run has previously logged data (incremented at each batch)
self._wandb_epoch = 0 if not(wandb.run.step) else math.ceil(wandb.run.summary['epoch']) # continue to next epoch
store_attr(self, 'log,log_preds,log_model,log_dataset,dataset_name,valid_dl,n_preds,seed')
def before_fit(self):
"Call watch method to log model topology, gradients & weights"
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
# Log config parameters
log_config = self.learn.gather_args()
_format_config(log_config)
try:
wandb.config.update(log_config, allow_val_change=True)
except Exception as e:
print(f'WandbCallback could not log config parameters -> {e}')
if not WandbCallback._wandb_watch_called:
WandbCallback._wandb_watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log)
# log dataset
assert isinstance(self.log_dataset, (str, Path, bool)), 'log_dataset must be a path or a boolean'
if self.log_dataset is True:
if Path(self.dls.path) == Path('.'):
print('WandbCallback could not retrieve the dataset path, please provide it explicitly to "log_dataset"')
self.log_dataset = False
else:
self.log_dataset = self.dls.path
if self.log_dataset:
self.log_dataset = Path(self.log_dataset)
assert self.log_dataset.is_dir(), f'log_dataset must be a valid directory: {self.log_dataset}'
metadata = {'path relative to learner': os.path.relpath(self.log_dataset, self.learn.path)}
log_dataset(path=self.log_dataset, name=self.dataset_name, metadata=metadata)
# log model
if self.log_model and not hasattr(self, 'save_model'):
print('WandbCallback requires use of "SaveModelCallback" to log best model')
self.log_model = False
if self.log_preds:
try:
if not self.valid_dl:
#Initializes the batch watched
wandbRandom = random.Random(self.seed) # For repeatability
self.n_preds = min(self.n_preds, len(self.dls.valid_ds))
idxs = wandbRandom.sample(range(len(self.dls.valid_ds)), self.n_preds)
if isinstance(self.dls, TabularDataLoaders):
test_items = getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[idxs]
self.valid_dl = self.dls.test_dl(test_items, with_labels=True, process=False)
else:
test_items = [getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[i] for i in idxs]
self.valid_dl = self.dls.test_dl(test_items, with_labels=True)
self.learn.add_cb(FetchPredsCallback(dl=self.valid_dl, with_input=True, with_decoded=True))
except Exception as e:
self.log_preds = False
print(f'WandbCallback was not able to prepare a DataLoader for logging prediction samples -> {e}')
def after_batch(self):
"Log hyper-parameters and training loss"
if self.training:
self._wandb_step += 1
self._wandb_epoch += 1/self.n_iter
hypers = {f'{k}_{i}':v for i,h in enumerate(self.opt.hypers) for k,v in h.items()}
wandb.log({'epoch': self._wandb_epoch, 'train_loss': self.smooth_loss, 'raw_loss': self.loss, **hypers}, step=self._wandb_step)
def after_epoch(self):
"Log validation loss and custom metrics & log prediction samples"
# Correct any epoch rounding error and overwrite value
self._wandb_epoch = round(self._wandb_epoch)
wandb.log({'epoch': self._wandb_epoch}, step=self._wandb_step)
# Log sample predictions
if self.log_preds:
try:
inp,preds,targs,out = self.learn.fetch_preds.preds
b = tuplify(inp) + tuplify(targs)
x,y,its,outs = self.valid_dl.show_results(b, out, show=False, max_n=self.n_preds)
wandb.log(wandb_process(x, y, its, outs), step=self._wandb_step)
except Exception as e:
self.log_preds = False
print(f'WandbCallback was not able to get prediction samples -> {e}')
wandb.log({n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}, step=self._wandb_step)
def after_fit(self):
if self.log_model:
if self.save_model.last_saved_path is None:
print('WandbCallback could not retrieve a model to upload')
else:
metadata = {n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}
log_model(self.save_model.last_saved_path, metadata=metadata)
self.run = True
if self.log_preds: self.remove_cb(FetchPredsCallback)
wandb.log({}) # ensure sync of last step
```
Optionally logs weights and or gradients depending on `log` (can be "gradients", "parameters", "all" or None), sample predictions if ` log_preds=True` that will come from `valid_dl` or a random sample pf the validation set (determined by `seed`). `n_preds` are logged in this case.
If used in combination with `SaveModelCallback`, the best model is saved as well (can be deactivated with `log_model=False`).
Datasets can also be tracked:
* if `log_dataset` is `True`, tracked folder is retrieved from `learn.dls.path`
* `log_dataset` can explicitly be set to the folder to track
* the name of the dataset can explicitly be given through `dataset_name`, otherwise it is set to the folder name
* *Note: the subfolder "models" is always ignored*
For custom scenarios, you can also manually use functions `log_dataset` and `log_model` to respectively log your own datasets and models.
```
#export
def _make_plt(img):
"Make plot to image resolution"
# from https://stackoverflow.com/a/13714915
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = img.shape[:2]
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
return fig, ax
#export
def _format_config(log_config):
"Format config parameters before logging them"
for k,v in log_config.items():
if callable(v):
if hasattr(v,'__qualname__') and hasattr(v,'__module__'): log_config[k] = f'{v.__module__}.{v.__qualname__}'
else: log_config[k] = str(v)
if isinstance(v, slice): log_config[k] = dict(slice_start=v.start, slice_step=v.step, slice_stop=v.stop)
#export
def _format_metadata(metadata):
"Format metadata associated to artifacts"
for k,v in metadata.items(): metadata[k] = str(v)
#export
def log_dataset(path, name=None, metadata={}):
"Log dataset folder"
# Check if wandb.init has been called in case datasets are logged manually
if wandb.run is None:
raise ValueError('You must call wandb.init() before log_dataset()')
path = Path(path)
if not path.is_dir():
raise f'path must be a valid directory: {path}'
name = ifnone(name, path.name)
_format_metadata(metadata)
artifact_dataset = wandb.Artifact(name=name, type='dataset', description='raw dataset', metadata=metadata)
# log everything except "models" folder
for p in path.ls():
if p.is_dir():
if p.name != 'models': artifact_dataset.add_dir(str(p.resolve()), name=p.name)
else: artifact_dataset.add_file(str(p.resolve()))
wandb.run.use_artifact(artifact_dataset)
#export
def log_model(path, name=None, metadata={}):
"Log model file"
if wandb.run is None:
raise ValueError('You must call wandb.init() before log_model()')
path = Path(path)
if not path.is_file():
raise f'path must be a valid file: {path}'
name = ifnone(name, f'run-{wandb.run.id}-model')
_format_metadata(metadata)
artifact_model = wandb.Artifact(name=name, type='model', description='trained model', metadata=metadata)
artifact_model.add_file(str(path.resolve()))
wandb.run.log_artifact(artifact_model)
#export
@typedispatch
def wandb_process(x:TensorImage, y, samples, outs):
"Process `sample` and `out` depending on the type of `x/y`"
res_input, res_pred, res_label = [],[],[]
for s,o in zip(samples, outs):
img = s[0].permute(1,2,0)
res_input.append(wandb.Image(img, caption='Input data'))
for t, capt, res in ((o[0], "Prediction", res_pred), (s[1], "Ground Truth", res_label)):
fig, ax = _make_plt(img)
# Superimpose label or prediction to input image
ax = img.show(ctx=ax)
ax = t.show(ctx=ax)
res.append(wandb.Image(fig, caption=capt))
plt.close(fig)
return {"Inputs":res_input, "Predictions":res_pred, "Ground Truth":res_label}
#export
@typedispatch
def wandb_process(x:TensorImage, y:(TensorCategory,TensorMultiCategory), samples, outs):
return {"Prediction Samples": [wandb.Image(s[0].permute(1,2,0), caption=f'Ground Truth: {s[1]}\nPrediction: {o[0]}')
for s,o in zip(samples,outs)]}
#export
@typedispatch
def wandb_process(x:TensorImage, y:TensorMask, samples, outs):
res = []
class_labels = {i:f'{c}' for i,c in enumerate(y.get_meta('codes'))} if y.get_meta('codes') is not None else None
for s,o in zip(samples, outs):
img = s[0].permute(1,2,0)
masks = {}
for t, capt in ((o[0], "Prediction"), (s[1], "Ground Truth")):
masks[capt] = {'mask_data':t.numpy().astype(np.uint8)}
if class_labels: masks[capt]['class_labels'] = class_labels
res.append(wandb.Image(img, masks=masks))
return {"Prediction Samples":res}
#export
@typedispatch
def wandb_process(x:TensorText, y:(TensorCategory,TensorMultiCategory), samples, outs):
data = [[s[0], s[1], o[0]] for s,o in zip(samples,outs)]
return {"Prediction Samples": wandb.Table(data=data, columns=["Text", "Target", "Prediction"])}
#export
@typedispatch
def wandb_process(x:Tabular, y:Tabular, samples, outs):
df = x.all_cols
for n in x.y_names: df[n+'_pred'] = y[n].values
return {"Prediction Samples": wandb.Table(dataframe=df)}
```
## Example of use:
Once your have defined your `Learner`, before you call to `fit` or `fit_one_cycle`, you need to initialize wandb:
```
import wandb
wandb.init()
```
To use Weights & Biases without an account, you can call `wandb.init(anonymous='allow')`.
Then you add the callback to your `learner` or call to `fit` methods, potentially with `SaveModelCallback` if you want to save the best model:
```
from fastai.callback.wandb import *
# To log only during one training phase
learn.fit(..., cbs=WandbCallback())
# To log continuously for all training phases
learn = learner(..., cbs=WandbCallback())
```
Datasets and models can be tracked through the callback or directly through `log_model` and `log_dataset` functions.
For more details, refer to [W&B documentation](https://docs.wandb.com/library/integrations/fastai).
```
#hide
#slow
from fastai.vision.all import *
path = untar_data(URLs.MNIST_TINY)
items = get_image_files(path)
tds = Datasets(items, [PILImageBW.create, [parent_label, Categorize()]], splits=GrandparentSplitter()(items))
dls = tds.dataloaders(after_item=[ToTensor(), IntToFloatTensor()])
os.environ['WANDB_MODE'] = 'dryrun' # run offline
wandb.init(anonymous='allow')
learn = cnn_learner(dls, resnet18, loss_func=CrossEntropyLossFlat(), cbs=WandbCallback(log_model=False))
learn.fit(1)
# add more data from a new learner on same run
learn = cnn_learner(dls, resnet18, loss_func=CrossEntropyLossFlat(), cbs=WandbCallback(log_model=False))
learn.fit(1, lr=slice(0.05))
#export
_all_ = ['wandb_process']
```
## Export -
```
#hide
from nbdev.export import *
notebook2script()
```
|
github_jupyter
|
# Detecting Payment Card Fraud
In this section, we'll look at a credit card fraud detection dataset, and build a binary classification model that can identify transactions as either fraudulent or valid, based on provided, *historical* data. In a [2016 study](https://nilsonreport.com/upload/content_promo/The_Nilson_Report_10-17-2016.pdf), it was estimated that credit card fraud was responsible for over 20 billion dollars in loss, worldwide. Accurately detecting cases of fraud is an ongoing area of research.
<img src=notebook_ims/fraud_detection.png width=50% />
### Labeled Data
The payment fraud data set (Dal Pozzolo et al. 2015) was downloaded from [Kaggle](https://www.kaggle.com/mlg-ulb/creditcardfraud/data). This has features and labels for thousands of credit card transactions, each of which is labeled as fraudulent or valid. In this notebook, we'd like to train a model based on the features of these transactions so that we can predict risky or fraudulent transactions in the future.
### Binary Classification
Since we have true labels to aim for, we'll take a **supervised learning** approach and train a binary classifier to sort data into one of our two transaction classes: fraudulent or valid. We'll train a model on training data and see how well it generalizes on some test data.
The notebook will be broken down into a few steps:
* Loading and exploring the data
* Splitting the data into train/test sets
* Defining and training a LinearLearner, binary classifier
* Making improvements on the model
* Evaluating and comparing model test performance
### Making Improvements
A lot of this notebook will focus on making improvements, as discussed in [this SageMaker blog post](https://aws.amazon.com/blogs/machine-learning/train-faster-more-flexible-models-with-amazon-sagemaker-linear-learner/). Specifically, we'll address techniques for:
1. **Tuning a model's hyperparameters** and aiming for a specific metric, such as high recall or precision.
2. **Managing class imbalance**, which is when we have many more training examples in one class than another (in this case, many more valid transactions than fraudulent).
---
First, import the usual resources.
```
import io
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import boto3
import sagemaker
from sagemaker import get_execution_role
%matplotlib inline
```
I'm storing my **SageMaker variables** in the next cell:
* sagemaker_session: The SageMaker session we'll use for training models.
* bucket: The name of the default S3 bucket that we'll use for data storage.
* role: The IAM role that defines our data and model permissions.
```
# sagemaker session, role
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
# S3 bucket name
bucket = sagemaker_session.default_bucket()
```
## Loading and Exploring the Data
Next, I am loading the data and unzipping the data in the file `creditcardfraud.zip`. This directory will hold one csv file of all the transaction data, `creditcard.csv`.
As in previous notebooks, it's important to look at the distribution of data since this will inform how we develop a fraud detection model. We'll want to know: How many data points we have to work with, the number and type of features, and finally, the distribution of data over the classes (valid or fraudulent).
```
# only have to run once
!wget https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c534768_creditcardfraud/creditcardfraud.zip
!unzip creditcardfraud
# read in the csv file
local_data = 'creditcard.csv'
# print out some data
transaction_df = pd.read_csv(local_data)
print('Data shape (rows, cols): ', transaction_df.shape)
print()
transaction_df.head()
```
### EXERCISE: Calculate the percentage of fraudulent data
Take a look at the distribution of this transaction data over the classes, valid and fraudulent.
Complete the function `fraudulent_percentage`, below. Count up the number of data points in each class and calculate the *percentage* of the data points that are fraudulent.
```
transaction_df[transaction_df['Class']==1].count()[0]
# Calculate the fraction of data points that are fraudulent
def fraudulent_percentage(transaction_df):
'''Calculate the fraction of all data points that have a 'Class' label of 1; fraudulent.
:param transaction_df: Dataframe of all transaction data points; has a column 'Class'
:return: A fractional percentage of fraudulent data points/all points
'''
# your code here
# pass
# Solution:
frauds = transaction_df[transaction_df['Class']==1].count()[0]
total = transaction_df.count()[0]
return frauds/total
```
Test out your code by calling your function and printing the result.
```
# call the function to calculate the fraud percentage
fraud_percentage = fraudulent_percentage(transaction_df)
print('Fraudulent percentage = ', fraud_percentage)
print('Total # of fraudulent pts: ', fraud_percentage*transaction_df.shape[0])
print('Out of (total) pts: ', transaction_df.shape[0])
```
### EXERCISE: Split into train/test datasets
In this example, we'll want to evaluate the performance of a fraud classifier; training it on some training data and testing it on *test data* that it did not see during the training process. So, we'll need to split the data into separate training and test sets.
Complete the `train_test_split` function, below. This function should:
* Shuffle the transaction data, randomly
* Split it into two sets according to the parameter `train_frac`
* Get train/test features and labels
* Return the tuples: (train_features, train_labels), (test_features, test_labels)
```
from sklearn import model_selection
# split into train/test
def train_test_split(transaction_df, train_frac= 0.7, seed=1):
'''Shuffle the data and randomly split into train and test sets;
separate the class labels (the column in transaction_df) from the features.
:param df: Dataframe of all credit card transaction data
:param train_frac: The decimal fraction of data that should be training data
:param seed: Random seed for shuffling and reproducibility, default = 1
:return: Two tuples (in order): (train_features, train_labels), (test_features, test_labels)
'''
# shuffle and split the data
[train_features, test_features, train_labels, test_labels] = model_selection.train_test_split(transaction_df.drop('Class',axis=1).astype('float32').to_numpy(), transaction_df['Class'].astype('float32').to_numpy(), test_size=1-train_frac, random_state=seed)
return (train_features, train_labels), (test_features, test_labels)
```
### Test Cell
In the cells below, I'm creating the train/test data and checking to see that result makes sense. The tests below test that the above function splits the data into the expected number of points and that the labels are indeed, class labels (0, 1).
```
# get train/test data
(train_features, train_labels), (test_features, test_labels) = train_test_split(transaction_df, train_frac=0.7)
train_labels
# manual test
# for a split of 0.7:0.3 there should be ~2.33x as many training as test pts
print('Training data pts: ', len(train_features))
print('Test data pts: ', len(test_features))
print()
# take a look at first item and see that it aligns with first row of data
print('First item: \n', train_features[0])
print('Label: ', train_labels[0])
print()
# test split
assert len(train_features) > 2.333*len(test_features), \
'Unexpected number of train/test points for a train_frac=0.7'
# test labels
assert np.all(train_labels)== 0 or np.all(train_labels)== 1, \
'Train labels should be 0s or 1s.'
assert np.all(test_labels)== 0 or np.all(test_labels)== 1, \
'Test labels should be 0s or 1s.'
print('Tests passed!')
```
---
# Modeling
Now that you've uploaded your training data, it's time to define and train a model!
In this notebook, you'll define and train the SageMaker, built-in algorithm, [LinearLearner](https://sagemaker.readthedocs.io/en/stable/linear_learner.html).
A LinearLearner has two main applications:
1. For regression tasks in which a linear line is fit to some data points, and you want to produce a predicted output value given some data point (example: predicting house prices given square area).
2. For binary classification, in which a line is separating two classes of data and effectively outputs labels; either 1 for data that falls above the line or 0 for points that fall on or below the line.
<img src='notebook_ims/linear_separator.png' width=50% />
In this case, we'll be using it for case 2, and we'll train it to separate data into our two classes: valid or fraudulent.
### EXERCISE: Create a LinearLearner Estimator
You've had some practice instantiating built-in models in SageMaker. All estimators require some constructor arguments to be passed in. See if you can complete this task, instantiating a LinearLearner estimator, using only the [LinearLearner documentation](https://sagemaker.readthedocs.io/en/stable/linear_learner.html) as a resource. This takes in a lot of arguments, but not all are required. My suggestion is to start with a simple model, utilizing default values where applicable. Later, we will discuss some specific hyperparameters and their use cases.
#### Instance Types
It is suggested that you use instances that are available in the free tier of usage: `'ml.c4.xlarge'` for training and `'ml.t2.medium'` for deployment.
```
# import LinearLearner
from sagemaker import LinearLearner
# instantiate LinearLearner
predictor = LinearLearner(role=role,
instance_count=1,
instance_type='ml.c4.xlarge',
predictor_type = 'binary_classifier',
)
```
### EXERCISE: Convert data into a RecordSet format
Next, prepare the data for a built-in model by converting the train features and labels into numpy array's of float values. Then you can use the [record_set function](https://sagemaker.readthedocs.io/en/stable/linear_learner.html#sagemaker.LinearLearner.record_set) to format the data as a RecordSet and prepare it for training!
```
# create RecordSet of training data
formatted_train_data = predictor.record_set(train=train_features, labels=train_labels, channel='train')
```
### EXERCISE: Train the Estimator
After instantiating your estimator, train it with a call to `.fit()`, passing in the formatted training data.
```
%%time
# train the estimator on formatted training data
predictor.fit(formatted_train_data)
```
### EXERCISE: Deploy the trained model
Deploy your model to create a predictor. We'll use this to make predictions on our test data and evaluate the model.
```
%%time
# deploy and create a predictor
linear_predictor = predictor.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
```
---
# Evaluating Your Model
Once your model is deployed, you can see how it performs when applied to the test data.
According to the deployed [predictor documentation](https://sagemaker.readthedocs.io/en/stable/linear_learner.html#sagemaker.LinearLearnerPredictor), this predictor expects an `ndarray` of input features and returns a list of Records.
> "The prediction is stored in the "predicted_label" key of the `Record.label` field."
Let's first test our model on just one test point, to see the resulting list.
```
# test one prediction
test_x_np = test_features.astype('float32')
result = linear_predictor.predict(test_x_np[0])
print(result)
```
### Helper function for evaluation
The provided function below, takes in a deployed predictor, some test features and labels, and returns a dictionary of metrics; calculating false negatives and positives as well as recall, precision, and accuracy.
```
# code to evaluate the endpoint on test data
# returns a variety of model metrics
def evaluate(predictor, test_features, test_labels, verbose=True):
"""
Evaluate a model on a test set given the prediction endpoint.
Return binary classification metrics.
:param predictor: A prediction endpoint
:param test_features: Test features
:param test_labels: Class labels for test data
:param verbose: If True, prints a table of all performance metrics
:return: A dictionary of performance metrics.
"""
# We have a lot of test data, so we'll split it into batches of 100
# split the test data set into batches and evaluate using prediction endpoint
prediction_batches = [predictor.predict(batch) for batch in np.array_split(test_features, 100)]
# LinearLearner produces a `predicted_label` for each data point in a batch
# get the 'predicted_label' for every point in a batch
test_preds = np.concatenate([np.array([x.label['predicted_label'].float32_tensor.values[0] for x in batch])
for batch in prediction_batches])
# calculate true positives, false positives, true negatives, false negatives
tp = np.logical_and(test_labels, test_preds).sum()
fp = np.logical_and(1-test_labels, test_preds).sum()
tn = np.logical_and(1-test_labels, 1-test_preds).sum()
fn = np.logical_and(test_labels, 1-test_preds).sum()
# calculate binary classification metrics
recall = tp / (tp + fn)
precision = tp / (tp + fp)
accuracy = (tp + tn) / (tp + fp + tn + fn)
# printing a table of metrics
if verbose:
print(pd.crosstab(test_labels, test_preds, rownames=['actual (row)'], colnames=['prediction (col)']))
print("\n{:<11} {:.3f}".format('Recall:', recall))
print("{:<11} {:.3f}".format('Precision:', precision))
print("{:<11} {:.3f}".format('Accuracy:', accuracy))
print()
return {'TP': tp, 'FP': fp, 'FN': fn, 'TN': tn,
'Precision': precision, 'Recall': recall, 'Accuracy': accuracy}
```
### Test Results
The cell below runs the `evaluate` function.
The code assumes that you have a defined `predictor` and `test_features` and `test_labels` from previously-run cells.
```
print('Metrics for simple, LinearLearner.\n')
# get metrics for linear predictor
metrics = evaluate(linear_predictor,
test_features.astype('float32'),
test_labels,
verbose=True) # verbose means we'll print out the metrics
```
## Delete the Endpoint
I've added a convenience function to delete prediction endpoints after we're done with them. And if you're done evaluating the model, you should delete your model endpoint!
```
# Deletes a precictor.endpoint
def delete_endpoint(predictor):
try:
boto3.client('sagemaker').delete_endpoint(EndpointName=predictor.endpoint)
print('Deleted {}'.format(predictor.endpoint))
except:
print('Already deleted: {}'.format(predictor.endpoint))
# delete the predictor endpoint
delete_endpoint(linear_predictor)
```
---
# Model Improvements
The default LinearLearner got a high accuracy, but still classified fraudulent and valid data points incorrectly. Specifically classifying more than 30 points as false negatives (incorrectly labeled, fraudulent transactions), and a little over 30 points as false positives (incorrectly labeled, valid transactions). Let's think about what, during training, could cause this behavior and what we could improve.
**1. Model optimization**
* If we imagine that we are designing this model for use in a bank application, we know that users do *not* want any valid transactions to be categorized as fraudulent. That is, we want to have as few **false positives** (0s classified as 1s) as possible.
* On the other hand, if our bank manager asks for an application that will catch almost *all* cases of fraud, even if it means a higher number of false positives, then we'd want as few **false negatives** as possible.
* To train according to specific product demands and goals, we do not want to optimize for accuracy only. Instead, we want to optimize for a metric that can help us decrease the number of false positives or negatives.
<img src='notebook_ims/precision_recall.png' width=40% />
In this notebook, we'll look at different cases for tuning a model and make an optimization decision, accordingly.
**2. Imbalanced training data**
* At the start of this notebook, we saw that only about 0.17% of the training data was labeled as fraudulent. So, even if a model labels **all** of our data as valid, it will still have a high accuracy.
* This may result in some overfitting towards valid data, which accounts for some **false negatives**; cases in which fraudulent data (1) is incorrectly characterized as valid (0).
So, let's address these issues in order; first, tuning our model and optimizing for a specific metric during training, and second, accounting for class imbalance in the training set.
## Improvement: Model Tuning
Optimizing according to a specific metric is called **model tuning**, and SageMaker provides a number of ways to automatically tune a model.
### Create a LinearLearner and tune for higher precision
**Scenario:**
* A bank has asked you to build a model that detects cases of fraud with an accuracy of about 85%.
In this case, we want to build a model that has as many true positives and as few false negatives, as possible. This corresponds to a model with a high **recall**: true positives / (true positives + false negatives).
To aim for a specific metric, LinearLearner offers the hyperparameter `binary_classifier_model_selection_criteria`, which is the model evaluation criteria for the training dataset. A reference to this parameter is in [LinearLearner's documentation](https://sagemaker.readthedocs.io/en/stable/linear_learner.html#sagemaker.LinearLearner). We'll also have to further specify the exact value we want to aim for; read more about the details of the parameters, [here](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html).
I will assume that performance on a training set will be within about 5% of the performance on a test set. So, for a recall of about 85%, I'll aim for a bit higher, 90%.
```
# specify an output path
prefix = 'creditcard'
output_path = 's3://{}/{}'.format(bucket, prefix)
# instantiate a LinearLearner
# tune the model for a higher recall
linear_recall = LinearLearner(role=role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
predictor_type='binary_classifier',
output_path=output_path,
sagemaker_session=sagemaker_session,
epochs=15,
binary_classifier_model_selection_criteria='precision_at_target_recall', # target recall
target_recall=0.9) # 90% recall
```
### Train the tuned estimator
Fit the new, tuned estimator on the formatted training data.
```
%%time
# train the estimator on formatted training data
linear_recall.fit(formatted_train_data)
```
### Deploy and evaluate the tuned estimator
Deploy the tuned predictor and evaluate it.
We hypothesized that a tuned model, optimized for a higher recall, would have fewer false negatives (fraudulent transactions incorrectly labeled as valid); did the number of false negatives get reduced after tuning the model?
```
%%time
# deploy and create a predictor
recall_predictor = linear_recall.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
print('Metrics for tuned (recall), LinearLearner.\n')
# get metrics for tuned predictor
metrics = evaluate(recall_predictor,
test_features.astype('float32'),
test_labels,
verbose=True)
```
## Delete the endpoint
As always, when you're done evaluating a model, you should delete the endpoint. Below, I'm using the `delete_endpoint` helper function I defined earlier.
```
# delete the predictor endpoint
delete_endpoint(recall_predictor)
```
---
## Improvement: Managing Class Imbalance
We have a model that is tuned to get a higher recall, which aims to reduce the number of false negatives. Earlier, we discussed how class imbalance may actually bias our model towards predicting that all transactions are valid, resulting in higher false negatives and true negatives. It stands to reason that this model could be further improved if we account for this imbalance.
To account for class imbalance during training of a binary classifier, LinearLearner offers the hyperparameter, `positive_example_weight_mult`, which is the weight assigned to positive (1, fraudulent) examples when training a binary classifier. The weight of negative examples (0, valid) is fixed at 1.
### EXERCISE: Create a LinearLearner with a `positive_example_weight_mult` parameter
In **addition** to tuning a model for higher recall (you may use `linear_recall` as a starting point), you should *add* a parameter that helps account for class imbalance. From the [hyperparameter documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html) on `positive_example_weight_mult`, it reads:
> "If you want the algorithm to choose a weight so that errors in classifying negative vs. positive examples have equal impact on training loss, specify `balanced`."
You could also put in a specific float value, in which case you'd want to weight positive examples more heavily than negative examples, since there are fewer of them.
```
# instantiate a LinearLearner
# include params for tuning for higher recall
# *and* account for class imbalance in training data
linear_balanced = LinearLearner(role=role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
predictor_type='binary_classifier',
output_path=output_path,
sagemaker_session=sagemaker_session,
epochs=15,
binary_classifier_model_selection_criteria='precision_at_target_recall', # target recall
target_recall=0.9,
positive_example_weight_mult = 'balanced') # 90% recall
```
### EXERCISE: Train the balanced estimator
Fit the new, balanced estimator on the formatted training data.
```
%%time
# train the estimator on formatted training data
linear_balanced.fit(formatted_train_data)
```
### EXERCISE: Deploy and evaluate the balanced estimator
Deploy the balanced predictor and evaluate it. Do the results match with your expectations?
```
%%time
# deploy and create a predictor
balanced_predictor = linear_balanced.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
print('Metrics for balanced, LinearLearner.\n')
# get metrics for balanced predictor
metrics = evaluate(balanced_predictor,
test_features.astype('float32'),
test_labels,
verbose=True)
```
## Delete the endpoint
When you're done evaluating a model, you should delete the endpoint.
```
# delete the predictor endpoint
delete_endpoint(balanced_predictor)
```
A note on metric variability:
The above model is tuned for the best possible precision with recall fixed at about 90%. The recall is fixed at 90% during training, but may vary when we apply our trained model to a test set of data.
---
## Model Design
Now that you've seen how to tune and balance a LinearLearner. Create, train and deploy your own model. This exercise is meant to be more open-ended, so that you get practice with the steps involved in designing a model and deploying it.
### EXERCISE: Train and deploy a LinearLearner with appropriate hyperparameters, according to the given scenario
**Scenario:**
* A bank has asked you to build a model that optimizes for a good user experience; users should only ever have up to about 15% of their valid transactions flagged as fraudulent.
This requires that you make a design decision: Given the above scenario, what metric (and value) should you aim for during training?
You may assume that performance on a training set will be within about 5-10% of the performance on a test set. For example, if you get 80% on a training set, you can assume that you'll get between about 70-90% accuracy on a test set.
Your final model should account for class imbalance and be appropriately tuned.
```
%%time
# instantiate and train a LinearLearner
# include params for tuning for higher precision
# *and* account for class imbalance in training data
%%time
# deploy and evaluate a predictor
## IMPORTANT
# delete the predictor endpoint after evaluation
```
## Final Cleanup!
* Double check that you have deleted all your endpoints.
* I'd also suggest manually deleting your S3 bucket, models, and endpoint configurations directly from your AWS console.
You can find thorough cleanup instructions, [in the documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html).
---
# Conclusion
In this notebook, you saw how to train and deploy a LinearLearner in SageMaker. This model is well-suited for a binary classification task that involves specific design decisions and managing class imbalance in the training set.
Following the steps of a machine learning workflow, you loaded in some credit card transaction data, explored that data and prepared it for model training. Then trained, deployed, and evaluated several models, according to different design considerations!
|
github_jupyter
|
***
***
# 11. 튜플과 집합
***
***
***
## 1 튜플 활용법
***
- 튜플(Tuples): 순서있는 임의의 객체 모음 (시퀀스형)
- 튜플은 변경 불가능(Immutable)
- 시퀀스형이 가지는 다음 연산 모두 지원
- 인덱싱, 슬라이싱, 연결, 반복, 멤버쉽 테스트
### 1-1 튜플 연산
```
t1 = () # 비어있는 튜플
t2 = (1,2,3) # 괄호 사용
t3 = 1,2,3 # 괄호가 없어도 튜플이 됨
print(type(t1), type(t2), type(t3))
# <type 'tuple'> <type 'tuple'> <type 'tuple'>
r1 = (1,) # 자료가 한 개일 때는 반드시 콤마가 있어야 한다.
r2 = 1, # 괄호는 없어도 콤마는 있어야 한다.
print(type(r1))
print(type(r2))
# <type 'tuple'>
# <type 'tuple'>
t = (1, 2, 3)
print(t * 2) # 반복
print(t + ('PyKUG', 'users')) # 연결
print(t)
print()
print(t[0], t[1:3]) # 인덱싱, 슬라이싱
print(len(t)) # 길이
print(1 in t) # 멤버십 테스트
t[0] = 100 # 튜플은 변경 불가능, 에러발생
t = (12345, 54321, 'hello!')
u = t, (1, 2, 3, 4, 5) # 튜플 내부 원소로 다른 튜플을 가질 수 있음
print(u)
t2 = [1, 2, 3] # 튜플 내부 원소로 리스트 가질 수 있음
u2 = t2, (1, 2, 4)
print(u2)
t3 = {1:"abc", 2:"def"} # 튜플 내부 원소로 사전 가질 수 있음
u3 = t3, (1, 2, 3)
print(u3)
x, y, z = 1, 2, 3 # 튜플을 이용한 복수 개의 자료 할당
print(type(x), type(y), type(z))
print(x)
print(y)
print(z)
# <type 'int'> <type 'int'> <type 'int'>
# 1
# 2
# 3
x = 1
y = 2
x, y = y, x # 튜플을 이용한 두 자료의 값 변경
print(x, y)
```
### 1-2 패킹과 언패킹
- 패킹 (Packing): 하나의 튜플 안에 여러 개의 데이터를 넣는 작업
```
t = 1, 2, 'hello'
print(t)
print(type(t))
```
- 언패킹 (Unpacking): 하나의 튜플에서 여러 개의 데이터를 한꺼번에 꺼내와 각각 변수에 할당하는 작업
```
x, y, z = t
```
- 리스트로도 비슷한 작업이 가능하지만, 단순 패킹/언패킹 작업만을 목적으로 한다면 튜플 사용 추천
```
a = ['foo', 'bar', 4, 5]
[x, y, z, w] = a
print(x)
print(y)
print(z)
print(w)
print()
x, y, z, w = a
print(x)
print(y)
print(z)
print(w)
```
- 튜플과 리스트와의 공통점
- 원소에 임의의 객체를 저장
- 시퀀스 자료형
- 인덱싱, 슬라이싱, 연결, 반복, 멤버쉽 테스트 연산 지원
- 리스트와 다른 튜플만의 특징
- 변경 불가능 (Immutable)
- 튜플은 count와 index 외에 다른 메소드를 가지지 않는다.
```
T = (1, 2, 2, 3, 3, 4, 4, 4, 4, 5)
print(T.count(4))
print(T.index(1))
```
- list() 와 tuple() 내장 함수를 사용하여 리스트와 튜플을 상호 변환할 수 있음
```
T = (1, 2, 3, 4, 5)
L = list(T)
L[0] = 100
print(L)
T = tuple(L)
print(T)
```
### 1-3 튜플의 사용 용도
- 튜플을 사용하는 경우 1: 함수가 하나 이상의 값을 리턴하는 경우
```
def calc(a, b):
return a+b, a*b
x, y = calc(5, 4)
```
- 튜플을 사용하는 경우 2: 문자열 포멧팅
```
print('id : %s, name : %s' % ('gslee', 'GangSeong'))
```
- 튜플을 사용하는 경우 3: 고정된 값을 쌍으로 표현하는 경우
```
d = {'one':1, 'two':2}
print(d.items())
# [('two', 2), ('one', 1)]
```
***
## 2 집합 자료형
***
- set 내장 함수를 사용한 집합 자료 생성
- 변경 가능(Mutable)한 객체이다.
- 각 원소간에 순서는 없다.
- 각 원소는 중복될 수 없다.
- [note] set은 컨네이너 자료형이지만 시퀀스 자료형은 아니다.
### 2-1 집합 자료형 생성
```
a = set([1, 2, 3])
print(type(a))
print(a)
b = set((1, 2, 3))
print(type(b))
print(b)
c = set({'a':1, 'b':2, 'c':3})
print(type(c))
print(c)
d = set({'a':1, 'b':2, 'c':3}.values())
print(type(d))
print(d)
```
- E-learning에서 언급하지 않았던 집합 만드는 방법 --> 꼭 기억하세요~
```
e = {1, 2, 3, 4, 5}
print(type(e))
print(e)
```
- set에는 동일한 자료가 중복해서 저장되지 않는다. 즉 중복이 자동으로 제거됨
```
f = {1, 1, 2, 2, 3, 3}
g = set([1, 1, 2, 2, 3, 3])
print(f)
print(g)
```
- set의 원소로는 변경 불가능(Immutable)한 것만 할당 가능하다.
```
print(set()) # 빈 set 객체 생성
print(set([1, 2, 3, 4, 5])) # 초기 값은 일반적으로 시퀀스 자료형인 리스트를 넣어준다.
print(set([1, 2, 3, 2, 3, 4])) # 중복된 원소는 한 나만 저장됨
print(set('abc')) # 문자열은 각 문자를 집합 원소로 지닌다.
print(set([(1, 2, 3), (4, 5, 6)])) # 각 튜플은 원소로 가질 수 있음
print(set([[1, 2, 3], [4, 5, 6]])) # 변경 가능 자료인 리스트는 집합의 원소가 될 수 없다.
print(set([{1:"aaa"}, {2:"bbb"}])) # 변경 가능 자료인 사전도 집합의 원소가 될 수 없다.
```
- set의 기본 연산
| set 연산 | 동일 연산자 | 내용 |
|---------------------------|-------------|-----------------------------|
| len(s) | | 원소의 개수 |
| x in s | | x가 집합 s의 원소인가? |
| x not in s | | x가 집합 s의 원소가 아닌가? |
```
A = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
print(len(A)) # 집합의 원소의 수
print(5 in A) # 멤버십 테스트
print(10 not in A) # 멤버십 테스트
```
### 2-2 집합 자료형 메소드
- set의 주요 메소드
- 다음 연산은 원래 집합은 변경하지 않고 새로운 집합을 반환한다.
| set 연산 | 동일 연산자 | 내용 |
|---------------------------|-------------|-----------------------------|
| s.issubset(t) | s <= t | s가 t의 부분집합인가? |
| s.issuperset(t) | s >= t | s가 t의 슈퍼집합인가? |
| s.union(t) | s | t | 새로운 s와 t의 합집합 |
| s.intersection(t) | s & t | 새로운 s와 t의 교집합 |
| s.difference(t) | s - t | 새로운 s와 t의 차집합 |
| s.symmetric_difference(t) | s ^ t | 새로운 s와 t의 배타집합 |
| s.copy() | | 집합 s의 shallow 복사 |
```
B = set([4, 5, 6, 10, 20, 30])
C = set([10, 20, 30])
print(C.issubset(B)) # C가 B의 부분집합?
print(C <= B)
print(B.issuperset(C)) # B가 C를 포함하는 집합?
print(B >= C)
print()
A = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
B = set([4, 5, 6, 10, 20, 30])
print(A.union(B)) # A와 B의 합집합
print(A | B)
print(A)
print(A.intersection(B)) # A와 B의 교집합
print(A & B)
print(A)
print(A.difference(B)) # A - B (차집합)
print(A - B)
print(A)
print(A.symmetric_difference(B)) # 베타집합. A와 B의 합집합에서 교집합의 원소를 제외한 집합
print(A ^ B)
print(A)
A = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
D = A.copy()
print(D)
print()
print(A == D) #자료값 비교
print(A is D) #객체 동등성 비교
```
- set은 시퀀스 자료형이 아니므로 인덱싱, 슬라이싱, 정렬 등을 지원하지 않는다.
```
A = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
print(A[0])
print(A[1:4])
print(A.sort())
```
- 집합을 리스트나 튜플로 변경가능
- 집합에 인덱싱, 슬라이싱, 정렬 등을 적용하기 위해서는 리스트나 튜플로 변경한다.
```
print(list(A))
print(tuple(A))
```
- 하지만 집합에 for ~ in 연산은 적용 가능하다.
```
A = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
for ele in A:
print(ele,end=" ")
```
- set은 변경 가능(Mutable)한 자료 구조 객체
- 다음 메소드들은 set을 변경하는 집합 자료 구조 메소드들임
| set 연산 | 동일 연산자 | 내용 |
|---------------------------|-------------|-----------------------------|
| s.update(t) | s |= t | s와 t의 합집합을 s에 저장 |
| s.intersection_update(t) | s &= t | s와 t의 교집합을 s에 저장 |
| s.difference_update(t) | s -= t | s와 t의 차집합을 s에 저장 |
| s.symmetric_difference_update(t)| s ^= t | s와 t의 배타집합을 s에 저장 |
| s.add(x) | | 원소 x를 집합 s에 추가 |
| s.remove(x) | | 원소 x를 집합 s에서 제거, 원소 x가 집합 s에 없으면 예외 발생 |
| s.discard(x) | | 원소 x를 집합 s에서 제거 |
| s.pop() | | 임의의 원소를 집합 s에서 제거, 집합 s가 공집합이면 예외 발생 |
| s.clear() | | 집합 s의 모든 원소 제거 |
```
A = set([1, 2, 3, 4])
B = set([3, 4, 5, 6])
A.update(B) # A에 B 집합의 원소를 추가 시킴
print(A)
A.intersection_update([4,5,6,7,8]) # &=
print(A)
A.difference_update([6,7,8]) # -=
print(A)
A.symmetric_difference_update([5,6,7]) # ^=
print(A)
A.add(8) # 원소 추가
print(A)
A.remove(8) # 원소 제거
print(A)
A.remove(10) # 없는 원소를 제거하면 KeyError 발생
A.discard(10) # remove와 같으나 예외가 발생하지 않음
A.discard(6) # 원소 6제거
print(A)
A.pop() # 임의의 원소 하나 꺼내기
print(A)
A = set([1,2,3,4])
A.clear() # 모든 원소 없애기
print(A)
```
<p style='text-align: right;'>참고 문헌: 파이썬(열혈강의)(개정판 VER.2), 이강성, FreeLec, 2005년 8월 29일</p>
|
github_jupyter
|
```
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from rmgpy.kinetics import *
# Set global plot styles
plt.style.use('seaborn-paper')
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Set temperature range and pressure
pressure = 1e5 # Pa
temperature = np.linspace(298, 2000, 50)
def plot_kinetics(kinetics, kunits, labels=None, styles=None, colors=None, filename=None):
# Set colormap here if desired
colormap = mpl.cm.Set1
if colors is None:
colors = range(len(kinetics))
if styles is None:
styles = ['-'] * len(kinetics)
fig = plt.figure()
for i, rate in enumerate(kinetics):
# Evaluate kinetics
k = []
for t in temperature:
# Rates are returned in SI units by default
# This hardcodes a conversion to cm
if kunits == 'cm^3/(mol*s)':
k.append(1e6 * rate.getRateCoefficient(t, pressure))
else:
k.append(rate.getRateCoefficient(t, pressure))
x = 1000 / temperature
plt.semilogy(x, k, styles[i], c=colormap(colors[i]))
plt.xlabel('1000/T (K)')
plt.ylabel('k [{0}]'.format(kunits))
if labels:
plt.legend(labels, fontsize=12, loc=8, bbox_to_anchor=(0.5, 1.02))
if filename is not None:
plt.savefig(filename, bbox_inches="tight", dpi=300)
kunits = 'cm^3/(mol*s)'
# List of RMG kinetics objects
# Entries from RMG-database can be copied as is
# Can be any RMG kinetics type, not just Arrhenius
kinetics = [
Arrhenius(
A = (261.959, 'cm^3/(mol*s)'),
n = 2.67861,
Ea = (148.685, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (303.03, 'K'),
Tmax = (2500, 'K'),
comment = 'Fitted to 59 data points; dA = *|/ 1.00756, dn = +|- 0.000987877, dEa = +|- 0.00543432 kJ/mol',
),
Arrhenius(
A = (286.364, 'cm^3/(mol*s)'),
n = 2.61958,
Ea = (116.666, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (303.03, 'K'),
Tmax = (2500, 'K'),
comment = 'Fitted to 59 data points; dA = *|/ 1.01712, dn = +|- 0.00222816, dEa = +|- 0.0122571 kJ/mol',
),
Arrhenius(
A = (232.129, 'cm^3/(mol*s)'),
n = 2.57899,
Ea = (86.4148, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (303.03, 'K'),
Tmax = (2500, 'K'),
comment = 'Fitted to 59 data points; dA = *|/ 1.02472, dn = +|- 0.00320486, dEa = +|- 0.0176299 kJ/mol',
),
]
# Labels corresponding to each rate, can be empty list for no legend
labels = [
'Rate A',
'Rate B',
'Rate C',
]
# Matplotlib style descriptors corresponding to each rate
styles = ['-', '--', '-.']
# Colormap indices corresponding to each rate
colors = [0, 0, 1]
plot_kinetics(kinetics, kunits, labels=labels, styles=styles, colors=colors)
```
|
github_jupyter
|
## Test Riksdagen SFS dokument
* Denna [Jupyter Notebook](https://github.com/salgo60/open-data-examples/blob/master/Riksdagens%20dokument%20SFS.ipynb)
* [KU anmälningar](https://github.com/salgo60/open-data-examples/blob/master/Riksdagens%20dokument%20KU-anm%C3%A4lningar.ipynb)
* [Motioner](https://github.com/salgo60/open-data-examples/blob/master/Riksdagens%20dokument%20Motioner.ipynb)
* [Ledamöter](https://github.com/salgo60/open-data-examples/blob/master/Riksdagens%20ledam%C3%B6ter.ipynb)
* [Dokumenttyper](https://github.com/salgo60/open-data-examples/blob/master/Riksdagens%20dokumenttyper.ipynb)
* [Skapa sökfråga](http://data.riksdagen.se/dokumentlista/)
* 13980 hämtade verkar som diff med [Dokument & lagar (10 504 träffar)](https://www.riksdagen.se/sv/dokument-lagar/?doktyp=sfs)
### Test SFS nr 2020-577
* [Fulltext](https://www.riksdagen.se/sv/dokument-lagar/dokument/svensk-forfattningssamling/forordning-2020577-om-statligt-stod-for_sfs-2020-577) [text](http://data.riksdagen.se/dokument/sfs-2020-577.text) / [html](http://data.riksdagen.se/dokument/sfs-2020-577.html) / [json](http://data.riksdagen.se/dokument/sfs-2020-577.json)
```
from datetime import datetime
now = datetime.now()
print("Last run: ", datetime.now())
import urllib3, json
import pandas as pd
from tqdm.notebook import trange
http = urllib3.PoolManager()
pd.set_option("display.max.columns", None)
urlbase ="http://data.riksdagen.se/dokumentlista/?sok=&doktyp=SFS&utformat=json&start="
dftot = pd.DataFrame()
for i in trange(1,700): # looks we today have 10504 SFS --> 10503/20
url = urlbase + str(i)
r = http.request('GET', url)
data = json.loads(r.data)
r = http.request('GET', url)
dftot = dftot.append(pd.DataFrame(data["dokumentlista"]["dokument"]),sort=False)
dftot.head()
print("Min och Max publicerad: ", dftot.publicerad.min(), dftot.publicerad.max())
print("Min och Max datum: ", dftot.datum.min(), dftot.datum.max())
print("Min och Max systemdatum: ", dftot.systemdatum.min(), dftot.systemdatum.max())
dftot.info()
dftot[['nummer','titel','publicerad','beslutad','datum','summary']]
dftot.publicerad.unique()
dftot.publicerad.value_counts()
dftot.publicerad.value_counts().sort_index(ascending=False)
ftot.publicerad.value_counts().sort_index(ascending=False)[:50]
%matplotlib inline
import matplotlib.pyplot as plt
plot = dftot.publicerad.value_counts()[1:30].plot.bar(y='counts', figsize=(25, 5))
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
plot = dftot.datum.value_counts()[1:30].plot.bar(y='counts', figsize=(25, 5))
plt.show()
plotPublishedSFSperMonth = dftot['publicerad'].groupby(dftot.publicerad.dt.to_period("M")).agg('count')
plotPublishedSFSperMonth.plot( kind = 'bar')
plt.title("SFS per month")
plt.show()
plotDatumSFSperMonth = dftot['datum'].groupby(dftot.datum.dt.to_period("M")).agg('count')
plotDatumSFSperMonth.plot( kind = 'bar')
plt.title("SFS Datum per month")
plt.show()
plotDatumSFSperMonth = dftot['datum'].groupby(dftot.datum.dt.to_period("M")).agg('count')[10:]
plotDatumSFSperMonth.plot( kind = 'bar')
plt.title("SFS Datum per month")
plt.figsize=(5, 35)
plt.show()
plotDatumSFSperMonth
#Last year
PublishedSFS2016perMonth = dftot[dftot["publicerad"].dt.year > 2016 ]
plotPublishedSFS2016perMonth = PublishedSFS2016perMonth['publicerad'].groupby(PublishedSFS2016perMonth.publicerad.dt.to_period("M")).agg('count')
plotPublishedSFS2016perMonth.plot( kind = 'bar',)
plt.title("SFS > 2016 per month")
plt.figsize=(5, 35)
figure(figsize=(1,1))
plt.show()
plotDatumSFSperMonth[100:]
dftot.debattnamn.value_counts()
dftot.info()
organCount = dftot.organ.value_counts()
organCount
dftot.organ.value_counts().plot.pie(y='counts', figsize=(15, 15))
plt.show()
dftot.organ.value_counts()[1:50]
dftot.organ.value_counts()[50:100]
dftot.organ.value_counts()[100:150]
dftot.domain.value_counts()
dftot.rm.value_counts()
plotRM = dftot.rm.value_counts().plot.bar(y='counts', figsize=(25, 5))
plt.show()
dftot['datum'] =pd.to_datetime(dftot.datum)
dftot['publicerad'] =pd.to_datetime(dftot.publicerad)
dftot['systemdatum'] =pd.to_datetime(dftot.systemdatum, format='%Y-%m-%d')
# 2016-02-11 15:26:06
dftot.info()
dftot = dftot.sort_values('datum')
dftot.head()
dftot.tail()
dftot.subtyp.value_counts()
```
Gissning
* regl-riksg verkar vara Reglemente för Riksgäldskontoret
* regl-riksb är nog Riksbanken
```
dftot.debattnamn.value_counts()
ftot = dftot.sort_values(by='id', ascending=False)
dftot.info()
dftot.head(1000)
print("End run: ", datetime.now())
```
|
github_jupyter
|
```
import os
import numpy as np
from mnucosmomap import util as UT
from mnucosmomap import catalogs as mNuCats
%load_ext line_profiler
fullbox = mNuCats.mNuICs(1, sim='paco')
x, y, z = fullbox['Position'].T
vx, vy, vz = fullbox['Velocity'].T
nside = 8
L_subbox = 1000./float(nside) # L_subbox
L_res = 1000./512.
L_halfres = 0.5 * L_res
N_partside = 512/nside
N_subbox = (N_partside)**3
def method1(isubbox):
i_x = ((isubbox % nside**2) % nside)
i_y = ((isubbox % nside**2) // nside)
i_z = (isubbox // nside**2)
xmin = L_subbox * float(i_x) + L_halfres
xmax = (L_subbox * float(i_x+1) + L_halfres) % 1000.
ymin = L_subbox * float(i_y) + L_halfres
ymax = (L_subbox * float(i_y+1) + L_halfres) % 1000.
zmin = L_subbox * float(i_z) + L_halfres
zmax = (L_subbox * float(i_z+1) + L_halfres) % 1000.
if xmin <= xmax: xlim = ((x >= xmin) & (x < xmax))
else: xlim = ((x >= xmin) | (x < xmax))
if ymin <= ymax: ylim = ((y >= ymin) & (y < ymax))
else: ylim = ((y >= ymin) | (y < ymax))
if zmin <= zmax: zlim = ((z >= zmin) & (z < zmax))
else: zlim = ((z >= zmin) | (z < zmax))
in_subbox = (xlim & ylim & zlim)
assert np.sum(in_subbox) == N_subbox
ID_sub = fullbox['ID'][in_subbox]
x_subbox = x[in_subbox]
y_subbox = y[in_subbox]
z_subbox = z[in_subbox]
x_sub = (x_subbox - i_x * L_subbox) % 1000.
y_sub = (y_subbox - i_y * L_subbox) % 1000.
z_sub = (z_subbox - i_z * L_subbox) % 1000.
vx_subbox = vx[in_subbox]
vy_subbox = vy[in_subbox]
vz_subbox = vz[in_subbox]
subbox_ID = np.zeros((N_partside, N_partside, N_partside))
subbox_pos = np.zeros((3, N_partside, N_partside, N_partside))
subbox_vel = np.zeros((3, N_partside, N_partside, N_partside))
for j_z in range(N_partside):
#print('j_z = %i , %f < z < %f' % (j_z, L_res* float(j_z) + L_halfres, L_res * float(j_z + 1) + L_halfres))
zlim_sub = ((z_sub > L_res* float(j_z) + L_halfres) &
(z_sub < L_res * float(j_z + 1) + L_halfres))
for j_y in range(N_partside):
#print('j_y = %i , %f < y < %f' % (j_y, L_res* float(j_y) + L_halfres, L_res * float(j_y + 1) + L_halfres))
ylim_sub = ((y_sub > L_res * float(j_y) + L_halfres) &
(y_sub < L_res * float(j_y + 1) + L_halfres))
#for j_x in range(N_partside):
j_x_sorted = np.argsort(x_sub[ylim_sub & zlim_sub])
subbox_ID[:,j_y,j_z] = ID_sub[ylim_sub & zlim_sub][j_x_sorted]
subbox_pos[0,:,j_y,j_z] = x_subbox[ylim_sub & zlim_sub][j_x_sorted]
subbox_pos[1,:,j_y,j_z] = y_subbox[ylim_sub & zlim_sub][j_x_sorted]
subbox_pos[2,:,j_y,j_z] = z_subbox[ylim_sub & zlim_sub][j_x_sorted]
subbox_vel[0,:,j_y,j_z] = vx_subbox[ylim_sub & zlim_sub][j_x_sorted]
subbox_vel[1,:,j_y,j_z] = vy_subbox[ylim_sub & zlim_sub][j_x_sorted]
subbox_vel[2,:,j_y,j_z] = vz_subbox[ylim_sub & zlim_sub][j_x_sorted]
return None
def method2(isubbox):
i_x = ((isubbox % nside**2) % nside)
i_y = ((isubbox % nside**2) // nside)
i_z = (isubbox // nside**2)
xmin = L_subbox * float(i_x) + L_halfres
xmax = (L_subbox * float(i_x+1) + L_halfres) % 1000.
ymin = L_subbox * float(i_y) + L_halfres
ymax = (L_subbox * float(i_y+1) + L_halfres) % 1000.
zmin = L_subbox * float(i_z) + L_halfres
zmax = (L_subbox * float(i_z+1) + L_halfres) % 1000.
if xmin <= xmax: xlim = ((x >= xmin) & (x < xmax))
else: xlim = ((x >= xmin) | (x < xmax))
if ymin <= ymax: ylim = ((y >= ymin) & (y < ymax))
else: ylim = ((y >= ymin) | (y < ymax))
if zmin <= zmax: zlim = ((z >= zmin) & (z < zmax))
else: zlim = ((z >= zmin) | (z < zmax))
in_subbox = (xlim & ylim & zlim)
assert np.sum(in_subbox) == N_subbox
ID_sub = fullbox['ID'][in_subbox]
x_subbox = x[in_subbox]
y_subbox = y[in_subbox]
z_subbox = z[in_subbox]
x_sub = (x_subbox - i_x * L_subbox) % 1000.
y_sub = (y_subbox - i_y * L_subbox) % 1000.
z_sub = (z_subbox - i_z * L_subbox) % 1000.
vx_subbox = vx[in_subbox]
vy_subbox = vy[in_subbox]
vz_subbox = vz[in_subbox]
subbox_ID = np.zeros((N_partside, N_partside, N_partside))
subbox_pos = np.zeros((3, N_partside, N_partside, N_partside))
subbox_vel = np.zeros((3, N_partside, N_partside, N_partside))
j_x = ((x_sub - L_halfres) // L_res).astype(int)
j_y = ((y_sub - L_halfres) // L_res).astype(int)
j_z = ((z_sub - L_halfres) // L_res).astype(int)
subbox_ID[j_x,j_y,j_z] = ID_sub
subbox_pos[0,j_x,j_y,j_z] = x_subbox
subbox_pos[1,j_x,j_y,j_z] = y_subbox
subbox_pos[2,j_x,j_y,j_z] = z_subbox
subbox_vel[0,j_x,j_y,j_z] = vx_subbox
subbox_vel[1,j_x,j_y,j_z] = vy_subbox
subbox_vel[2,j_x,j_y,j_z] = vz_subbox
return None
%timeit -n 3 method1(0)
%timeit -n 3 method2(0)
```
|
github_jupyter
|
<h1>Lists in Python</h1>
<p><strong>Welcome!</strong> This notebook will teach you about the lists in the Python Programming Language. By the end of this lab, you'll know the basics list operations in Python, including indexing, list operations and copy/clone list.</p>
<h2>Table of Contents</h2>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ul>
<li>
<a href="#dataset">About the Dataset</a>
</li>
<li>
<a href="#list">Lists</a>
<ul>
<li><a href="index">Indexing</a></li>
<li><a href="content">List Content</a></li>
<li><a href="op">List Operations</a></li>
<li><a href="co">Copy and Clone List</a></li>
</ul>
</li>
<li>
<a href="#quiz">Quiz on Lists</a>
</li>
</ul>
<p>
Estimated time needed: <strong>15 min</strong>
</p>
</div>
<hr>
<h2 id="#dataset">About the Dataset</h2>
Imagine you received album recommendations from your friends and compiled all of the recommandations into a table, with specific information about each album.
The table has one row for each movie and several columns:
- **artist** - Name of the artist
- **album** - Name of the album
- **released_year** - Year the album was released
- **length_min_sec** - Length of the album (hours,minutes,seconds)
- **genre** - Genre of the album
- **music_recording_sales_millions** - Music recording sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)
- **claimed_sales_millions** - Album's claimed sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)
- **date_released** - Date on which the album was released
- **soundtrack** - Indicates if the album is the movie soundtrack (Y) or (N)
- **rating_of_friends** - Indicates the rating from your friends from 1 to 10
<br>
<br>
The dataset can be seen below:
<font size="1">
<table font-size:xx-small style="width:70%">
<tr>
<th>Artist</th>
<th>Album</th>
<th>Released</th>
<th>Length</th>
<th>Genre</th>
<th>Music recording sales (millions)</th>
<th>Claimed sales (millions)</th>
<th>Released</th>
<th>Soundtrack</th>
<th>Rating (friends)</th>
</tr>
<tr>
<td>Michael Jackson</td>
<td>Thriller</td>
<td>1982</td>
<td>00:42:19</td>
<td>Pop, rock, R&B</td>
<td>46</td>
<td>65</td>
<td>30-Nov-82</td>
<td></td>
<td>10.0</td>
</tr>
<tr>
<td>AC/DC</td>
<td>Back in Black</td>
<td>1980</td>
<td>00:42:11</td>
<td>Hard rock</td>
<td>26.1</td>
<td>50</td>
<td>25-Jul-80</td>
<td></td>
<td>8.5</td>
</tr>
<tr>
<td>Pink Floyd</td>
<td>The Dark Side of the Moon</td>
<td>1973</td>
<td>00:42:49</td>
<td>Progressive rock</td>
<td>24.2</td>
<td>45</td>
<td>01-Mar-73</td>
<td></td>
<td>9.5</td>
</tr>
<tr>
<td>Whitney Houston</td>
<td>The Bodyguard</td>
<td>1992</td>
<td>00:57:44</td>
<td>Soundtrack/R&B, soul, pop</td>
<td>26.1</td>
<td>50</td>
<td>25-Jul-80</td>
<td>Y</td>
<td>7.0</td>
</tr>
<tr>
<td>Meat Loaf</td>
<td>Bat Out of Hell</td>
<td>1977</td>
<td>00:46:33</td>
<td>Hard rock, progressive rock</td>
<td>20.6</td>
<td>43</td>
<td>21-Oct-77</td>
<td></td>
<td>7.0</td>
</tr>
<tr>
<td>Eagles</td>
<td>Their Greatest Hits (1971-1975)</td>
<td>1976</td>
<td>00:43:08</td>
<td>Rock, soft rock, folk rock</td>
<td>32.2</td>
<td>42</td>
<td>17-Feb-76</td>
<td></td>
<td>9.5</td>
</tr>
<tr>
<td>Bee Gees</td>
<td>Saturday Night Fever</td>
<td>1977</td>
<td>1:15:54</td>
<td>Disco</td>
<td>20.6</td>
<td>40</td>
<td>15-Nov-77</td>
<td>Y</td>
<td>9.0</td>
</tr>
<tr>
<td>Fleetwood Mac</td>
<td>Rumours</td>
<td>1977</td>
<td>00:40:01</td>
<td>Soft rock</td>
<td>27.9</td>
<td>40</td>
<td>04-Feb-77</td>
<td></td>
<td>9.5</td>
</tr>
</table></font>
<hr>
<h2 id="list">Lists</h2>
We are going to take a look at lists in Python.
* A list is a sequenced collection of different objects such as integers, strings, Bool, Float,complex and other lists as well.
* The address of each element within a list is called an <b>index</b>.
* An index is used to access and refer to Element/items within a list.
* List will allow us to Perfrom `index`,`Slice`,`Extended Slice` and we asign a Element to it as well.
* List is Mutable(Which we can Change at any time), We Can Add, Delete, modify the Element.
* List is having different Methods
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsIndex.png" width="1000" />
To create a list, type the list within square brackets <b>[ ]</b>, with your content inside the parenthesis and separated by commas. Let’s try it!
```
# Create a list
L = ["Michael Jackson", 10.1, 1982]
L
```
We can use negative and regular indexing with a list :
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsNeg.png" width="1000" />
```
L[0],L[-3]
# Print the elements on each index
print('the same element using negative and positive indexing:\n Postive:',L[0],
'\n Negative:' , L[-3] )
print('the same element using negative and positive indexing:\n Postive:',L[1],
'\n Negative:' , L[-2] )
print('the same element using negative and positive indexing:\n Postive:',L[2],
'\n Negative:' , L[-1] )
# Print the elements on each slice
L[0:2] # Start and End-1-->Slice
L[1:]
```
<h3 id="content">List Content</h3>
Lists can contain strings, floats, and integers. We can nest other lists, and we can also nest tuples and other data structures. The same indexing conventions apply for nesting:
```
# Sample List
Sample_list = ["Michael Jackson", 10.1, 1982,2j+3,True ,[1, 2], ("A", 1)]
Sample_list
Sample_list[1],Sample_list[-6]
Sample_list[2]
Sample_list[0:5]
Sample_list[-5:-1]
```
<h3 id="op">List Operations</h3>
We can also perform slicing in lists. For example, if we want the last two elements, we use the following command:
```
# Sample List
L = ["Michael Jackson", 10.1,1982,"MJ",1]
L
```
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsSlice.png" width="1000">
```
# List slicing
L[3:5]
```
We can use the method <code>extend</code> to add new elements to the list will be add at last:
```
# Use extend to add elements to list
L = [ "Michael Jackson", 10.2]
L.extend(['pop', 10])
L
```
Another similar method is <code>append</code>. If we apply <code>append</code> instead of <code>extend</code>, we add one element to the list:
```
# Use append to add elements to list
L = [ "Michael Jackson", 10.2]
L.append(['pop', 10])
L
```
Each time we apply a method, the list changes. If we apply <code>extend</code> we add two new elements to the list. The list <code>L</code> is then modified by adding two new elements:
```
# Use extend to add elements to list
L = [ "Michael Jackson", 10.2]
L.extend(['pop', 10])
L
```
If we append the list <code>['a','b']</code> we have one new element consisting of a nested list:
```
# Use append to add elements to list
L.append(['a','b'])
L
```
As lists are mutable, we can change them. For example, we can change the first element as follows:
```
# Change the element based on the index
A = ["disco", 10, 1.2]
print('Before change:', A)
A[0]
A[0] = 'hard rock' # Mutable
print('After change:', A)
```
We can also delete an element of a list using the <code>del</code> command:
```
# Delete the element based on the index
print('Before change:', A)
del(A[0])
print('After change:', A)
```
We can convert a string to a list using <code>split</code>. For example, the method <code>split</code> translates every group of characters separated by a space into an element in a list:
```
# Split the string, default is by space
'hard rock'.split()
```
We can use the split function to separate strings on a specific character. We pass the character we would like to split on into the argument, which in this case is a comma. The result is a list, and each element corresponds to a set of characters that have been separated by a comma:
```
# Split the string by comma
'A,B,C,D'.split(',')
```
<h3 id="co">Copy and Clone List</h3>
When we set one variable <b>B</b> equal to <b>A</b>; both <b>A</b> and <b>B</b> are referencing the same list in memory:
```
# Copy (copy by reference) the list A
A = ["hard rock", 10, 1.2]
B = A # copy by reference
print('A:', A)
print('B:', B)
```
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsRef.png" width="1000" align="center">
```
id(A)
id(B)
```
Initially, the value of the first element in <b>B</b> is set as hard rock. If we change the first element in <b>A</b> to <b>banana</b>, we get an unexpected side effect. As <b>A</b> and <b>B</b> are referencing the same list, if we change list <b>A</b>, then list <b>B</b> also changes. If we check the first element of <b>B</b> we get banana instead of hard rock:
```
# Examine the copy by reference
print('B[0]:', B[0])
A[0] = "banana"
A
print('B[0]:', B[0])
B
```
This is demonstrated in the following figure:
<img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsRefGif.gif" width="1000" />
You can clone list **A** by using the following syntax:
```
# Clone (clone by value) the list A
B = A[:]
B
```
Variable **B** references a new copy or clone of the original list; this is demonstrated in the following figure:
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsVal.gif" width="1000" />
Now if you change <b>A</b>, <b>B</b> will not change:
```
print('B[0]:', B[0])
A[0] = "hard rock"
print('B[0]:', B[0])
A
B
li = list(range(25,40)) # hear 25 is Starting Element and 39 Is Ending Element
li
li.append(10.25)
li
li.clear()
li
li_1 = [10,20,30,'hi','hello',True,2.5]
li_1
li_2 = li_1.copy()
li_2
li_1
li_1.append(10)
li_1
li_1.count(10)
li
li.extend(li_1)
li
li_1
li_2
co = [10,20,30,40,50]
co
co.index(30)
co[1]
co.insert(1,"Hello")
co
co.pop() # it will remove last element
co.pop(1) # This Is Used Remove 1 position Element
co
co.remove('hi')
co.remove('hello')
co
co.reverse()
co
li
li.remove(2.5)
li.sort()
li
```
|Methods|Description|
|--------|----------|
|**Append()**|it is used to add the element at last in a list|
|**clear()**| it is used to Clear the all the elemnts in a list|
|**copy()**| it is used to copy all the elements in to list|
|**count()**|We are counting perticular element is Reparting in list|
|**extend()**|Add Multiple Values to the Existing list|
|**index()**| which used for find the first occurance of element in a list|
|**pop()**| it used for remove the last element|
|**pop(postion)**|it is used for remove perticular element|
|**remove(Element)**|it remove perticular Remove|
|**reverse()**|it is used for reverse order element|
|**sort()**|it i will work for the only perticular data type only|
### Nested List
```
a = [[10,20,30],
[2.5,3.5,4.5],
[True,False,True]]
a
a[0]
a[0][1]
a[1]
a[2]=10
a
```
<h2 id="quiz">Quiz on List</h2>
Create a list <code>a_list</code>, with the following elements <code>1</code>, <code>hello</code>, <code>[1,2,3]</code> and <code>True</code>.
```
# Write your code below and press Shift+Enter to execute
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
a_list = [1, 'hello', [1, 2, 3] , True]
a_list
-->
Find the value stored at index 1 of <code>a_list</code>.
```
# Write your code below and press Shift+Enter to execute
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
a_list[1]
-->
Retrieve the elements stored at index 1, 2 and 3 of <code>a_list</code>.
```
# Write your code below and press Shift+Enter to execute
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
a_list[1:4]
-->
Concatenate the following lists <code>A = [1, 'a']</code> and <code>B = [2, 1, 'd']</code>:
```
# Write your code below and press Shift+Enter to execute
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
A = [1, 'a']
B = [2, 1, 'd']
A + B
-->
|
github_jupyter
|
```
! wget http://corpora.linguistik.uni-erlangen.de/someweta/german_web_social_media_2018-12-21.model -P /mnt/data2/ptf
from someweta import ASPTagger
model = "/mnt/data2/ptf/german_web_social_media_2018-12-21.model"
# future versions will have sensible default values
asptagger = ASPTagger(beam_size=5, iterations=10)
asptagger.load(model)
sentences = ['Wer dürfen Atommacht sein, wer nicht. Da treffen sich Regierung, Atommacht und Anwärter auf die Bombe.',
'Über was werden da verhandeln?',
'Die Bombe selbst stehen nicht zur Disposition, für die Atommacht, sondern der Verfügungsanspruch eines Anwärter.',
'Der Besitz dieser Bombe verändern die politisch Option eines Staat, und damit auch die militärisch , in der Folge die politisch Option der existierend Atommacht.',
'Bereits der Wille zur Bombe werden deshalb von den real Atommacht beaufsichtigen. Diese Macht verhalten sich zum Wille eines ausländisch Souverän wie Polizei. Wer nicht gehorchen werden bestrafen.',
'Das können diese Macht, weil diese in der Lage sein ihre Anspruch an das Wohlverhalten anderer Regierung wirtschaftlich und militärisch zu erzwingen.',
'Von wegen hier gehen es um den Schutz vor einer militärisch Bedrohung.',
'Die Fähigkeit zu atomar Eskalation stehen doch nur für den Angeklagte zur Disposition.',
'Was bleiben? Die auch atomar Überlegenheit der selbsternannt Weltpolizist die sich als Helfer der Menschheit feiern lassen.',
'Und die Öffentlichkeit? Die finden wie immer alles toll, was die eigen Regierung machen. Auch kritisch; Da haben man sich über den Tisch zeihen lassen. Beweis: Die Aufhebung der Sanktion. Sein das nicht bereits ein einknick der eigen Herr?',
'So konstruktiv sein national Opportunismus,',
'Die Bombe in "unseren" Hand? Aber sicher, wir sein doch die Guter!',
'Alle anderen, wenn es so sagen werden im politisch Rundfunk, sein die Böses.',
'(i.) Sein "Satoshi Nakamoto" nicht der Name einer real Person, die den Bitcoin erfinden haben, sondern ein virtuell Nickname. Ob sich dahint eine real Person, eine real Organisation oder ein Computerprogramm verbergen, weiss kein Schwein.',
'(ii.) Sein Bitcoins nicht "mathematisch selten", sondern mit der gegenwärtig verfügbar Computer-Rechenleistung allenfalls mit einig, energetisch sauteuer Registerschiebe-Aufwand in Mikroprozessor auffindbar.',
'Ob es Bitcoins im Überfluss geben, sofern das gegenwärtig weltweit Forscher ernährend, physikalisch Konstrukt von Quantencomputer Realität werden, können "mathematisch" bis heute weder beweisen, noch widerlegen werden.',
'(iiien.) Erzeugen Bitcoins realweltlich nichts, sondern reduzieren erwas zuvor sauteuer Erzeugtes.',
'Bitcoins sein die elektrisch Heizlüfter unter den Währung.',
'Die reduzieren, Sommer wie Winter, aufwendig-geordnet erschaffen, elektrisch Energie zu popelig-ungeordnet Wärmeenergie.',
'Bitcoins machen das, was mittels Klimakonferenz reduzieren werden sollen.',
'(iv.) Eine einzig, mittels Bitcoin-Heizlüfter vorgenommen Transaktion benötigen zur Zeit 215 kWh elektrisch Energie.https://motherboard.vice....',
'Ein deutsch Haushalt verbraten ohne Bitcoin im Durchschnitt 3107 kWh, also schlapp 14 Bitcoin-Transaktion, elektrisch Energie pro Jahr.https://www.musterhaushal...',
'P.S.:',
'Wer wissen mögen, wie die virtuell "begehrenswert" Bitcoins "gebären" werden, der können sich sehr einfach ein realweltlich Bild davon machen."Photo: Life inside of China’s massiv and remote bitcoinen min"https://qz.com/1026605/ph...',
'Die Idee von bitcoin sein doch die Abschaffung gewöhnlich Währung. Das einzig, was man also tun muss, sein den investitionshyp aussitzen, bis cryptowährung zum Standard werden, international, und dann sein es auch egal, ob ein Bitcoin 500.000 Dollar wert sein, oder?^^',
'Und wenn der Bitcoin zwingen sein, so teuer zu bleiben, weil eben so viele Depp so viel investieren, wirdsen halt eine anderer Global Währung. Was ich damit sagen wollen: die cryptowährung Bitcoin an sich sein, glauben ich, zum scheit verurteilen, beziehungsweise besitzen nur ein sehr kurz Zeitfenster, in dem sie einem was nützen. Sein halt so‘n spannend Übergangsprodukt',
'Bitcoins werden auf Null oder nahe Null fallen.Das werden passieren.',
'Schon zweihundern Kommentar. Das zeigen tatsächlich die Expertise der Deutsch. Toll!Dies sein ein Fachgebiet in das man sich mindestens ein Jahr einarbeiten müssen und das drei Stunde täglich. Alles Andere sein Mumpitz. Gelten für den gesamt Kryptomarkt.Viele Akademiker. Nur mal so am Rand.',
'Wer damit real Geld machen, haben es verdienen. Wer seins verlieren auch.',
'"Derzeit vergehen kein Tag ohne Facebook-Schlagzeile.".',
'Dann lassen es doch einfach!',
'Wer entscheiden, was Fake News sein? Herr Kleber? Fake News sein von der Meinungsfreiheit decken.',
'Für anonym Account geben es keine Meinungsfreiheit.',
'Es sein ein leidig Thema mit diesem Facebook. Das einzig, was man als Einzelner dagegen tun können, sein der Boykott des Netzwerk.',
'Ich halten ja Twitter für eine groß Haß- und Fakenewsschleuder als Facebook. Allerdings sein auf Twitter hauptsächlich Politiker, Journalist und "Aktivist" unterwegs, während Facebook mehr so etwas für das gemein Volk sein.',
'Deshalb werden wohl auch mehr auf Facebook herumhacken, als auf Twitter. Der Pöbel haben ruhig zu sein.',
'Die Regierung mögen so gern handlungsfähig erscheinen, die Mitglied und die angeschlossen Medium beeilen sich, täglich neu "Grausamkeit" gegen Flüchtling zu verkünden ohne dabei die Kanzlerin und ihr "Schaff" weiter zu beschädigen.',
'Dabei sein offensichtlich: eine EU-Normalverteilung sein genauso wenig in Sicht wie eine Einigung mit Griechenland oder gar der Türkei.',
'In den Syriengespräch haben man sich nicht nur ins moralisch sondern auch ins diplomatisch Abseits manövrieren.',
'Die fortgesetzt Unterstützung für das Regime in Kiew und die beständig Wiederholung der dort verkünden Dogma engen die Handlungsoption für eine Einigung mit Russland entscheidend ein.',
'Amerika werden nicht helfen sondern erst mal wählen.',
'Nein, die Regierung sein nicht handlungsfähig.',
'Und so greifen man zu den verblieben Mittel:',
'Diffamierung der AfD wie zuvor schon der Pirat.',
'Angriff der auf Aussöhnung mit Russland bedachen Kraft.',
'Beide haben zuletzt etwas ungeschickt agieren bzw. nicht mit der an Verzweiflung grenzend Aggressivität der Medium hier rechnen.',
'Ein Witz- werden so niemals funktionieren, und das wissen die Beteilgten genau! Verzweiflungsreflex der CDU angesichts befürchtet massiv Stimmeneinbruch bei den Wahl im März.',
'Ein Witz?',
'Oder eher eine wirkungslos "Beruhigungspille" für den Wahlpöbel...',
'Erst gar nicht reinlassen sein die gut Lösung.',
'Das bedeuten 50-70 Milliarde pro Jahr an Beamten- und Materialaufwand, aber vor allem ein Anstieg der Stückgutkosten, da die lean production, Basis des Erfolg der Deutsch Industrie im Wettbewerb mit den Billiglohnland, nicht mit unkalkulierbar Transportzeit klar kommen.',
'Im Klartext Wirtschaftskrise. Nun mögen dem Beschäftigungslosen diese weniger schlimm erscheinen als eine Flüchtlingskrise, wir Arbeitenden werden aber ganz gerne unsere Job behalten.',
'Ich denken, man sollen es so machen, wie etwa die Israeli oder die Australier.',
'Wenn die Heimatstaat ihre Bürger nicht mehr zurück haben wollen, oder der Herkunftstaat unbekannt sein, sollen man in Drittstaat abschieben, mit denen man zu diesem Zweck entsprechend Vertrag machen.',
'Vielleicht fallen dem Migrant dann ja noch rechtzeitig sein Heimatland ein oder wo er seine Papier hintun haben, wenn er etwa nach Uganda abschieben werden sollen.',
'ich fragen mich, auf welcher Basis werden denn das alles prüfen.',
'Wenn einer erkären er sein Syrer, leider ohne Papier, muss das doch irgendwie prüfen werden, ihm stringent Frage stellen werden, zur Mitarbeit veranlassen werden.',
'Wenn sich dann rausstellen, er sein kein Syrer, er wollen sich nicht äussern, wo er eigentlich',
'herkommen, dann muss man doch den Antrag negativ bescheiden. Wer sein Herkunftsland nicht preisgeben, sich verweigern, wieso haben derjenige überhaupt ein Anrecht auf Asyl ? Wer wollen denn was von wem ?',
'Es gehen nicht um "links", "Linkskurs" oder das Gegenteil.',
'Es gehen um Politik für die eigen Bevölkerung.',
'Es gehen um Politik für die Deutsch von deutsch Politiker oder um',
'keine Politik für die Deutsch von deutsch Politiker.',
'Das sein die Alternative.',
'Und die SPD haben sich entscheiden.',
'Wahlergebnis von Parteivorsitzender im Bereich von 90% oder gar mehr',
'sein ein Indiz für stalinistisch Struktur innerhalb einer Partei.',
'https://www.youtube.com/w...',
'Unser Gottesgeschenk?!?',
'Mit Nahles und der jetzig Parteispitze werden die SPD leider den Weg der französisch, niederländisch, österreichisch und italienisch Sozialdemokrat gehen. Alles andere sein eine Überraschung. Die Delegierte können aber zeigen, dass die SPD DIE Demokratiepartei sein und Simone Lange ihre Stimme geben. Nur Mut: Ein Personeller Neuanfang sein alternativlos.',
'Ich stimmen Ihnen zu. Aber ich glauben nicht, dass das, was Sie aufzeigen, an einer Persönlichkeit festzumachen sein.',
'Insgesamt meinen ich, dass unsere Gesellschaft in einem anderer Fahrwasser denken und fühlen muss. Wir dürfen nicht die Verhältnis aus der Zeit des tief Menschenelends mit heute bei uns vergleichen und deshalb zeitgerecht Lösung finden. Auf dem Weg der Suche müssen gerecht Kompromiss finden werden.',
'Der feudalistisch Überfluss und die Zügellosigkeit der Gewinn- und Luxussucht sein die drastisch Gegenwart der Vergangenheit mit allem menschlich Elend weltweit.',
'Sein Las Vegas ein Vorbild, in dem Armut und Elend im Dunkele liegen?',
'Na bitten, und Söder gehen dann nach Berlin und werden Innenminister in der GroKo und können so sein Talent beim Management von Migration und Terrorbekämpfung mal richtig unter Beweis stellen....',
'Das Bild sagen mehr als tausend Wort. Go, Jo!',
"Sein sowieso flabbergasted in Anbetracht der Vorstellung, dieser blass Franke sollen ausgerechnet MP in Bayern werden. Dageg sein ja Stephan Weil ne Partymaus. Passt auch überhaupt nicht in die Reihe irgendwie. Bei Söder weißen du immer schon vorher, was er sagen werden und zwar genau wie er's sagen werden. Ein Politroboter vor dem Herr und genauso gucken er da ja auch drein. Also wie immer eigentlich.",
'Herrmann werden doch bei der Bundestagswahl komplett verbrennen. Söder sein kein Thema, wenn dem nicht so sein.',
'Mich werden eher interessieren, ob und welche politisch-inhaltlich Differenz es zwischen den Personalie geben.',
'Gegenfrage, gehen es in Bayern und seiner Führungskamarilla jemals um Politisch-Inhaltliches?',
'Eine sachlich Diskussion sein doch gar nicht erwünscht.Was haben ich denn jetzt schon wieder bös schreiben?',
'Dass sein Faschos hassen? Egal wie sie sich verkleiden und unter welchem Banner sie Meinung löschen?',
'Meinungsfreiheit nur noch für Journalist, die dann auch mal Falschzitat kommentieren dürfen?',
'Gabriel und Merkel schaden dem Ansehen DeutschlandsEntfernt. Bitte äußern Sie sich zum Thema des Artikel. Die Redaktion/cs',
'`Das Deutschen-Gen...Das Deutschen-Gen scheinen das Gen der Intoleranz zu sein, mit der ein Deutsche seine Meinung gegenüber Anderen in Forum verteidigen.',
'Können man tagtäglich bei der ZEIT beobachten.',
'Kürzen. Wir bitten Sie, sich in den Kommentar wieder dem Thema des Artikel zuwenden und weit Anmerkung zur Moderation direkt an community@zeit.de zu richten, damit im Forum eine sachlich Diskussion ermöglichen werden. Die Redaktion/cs',
'Liebe - Sarrazin - MitläuferWenn Herr Sarrazin sich zu Haus oder in seiner Kneipe mit seinen "dämlich Ansicht“ privat äußern - sein das "unter Meinungsfreiheit" noch hinnehmen - kein Hahn werden nach ihm krähen. Aber er nutzen seine exponieren Stellung zum Provozieren, um sein Buch möglichst oft zu verkaufen. Das sein nicht o.k. Für diese Provokation muss er entsprechend Kritik aushalten - die er doch so selbstverständlich an anderen üben. Die zahllos Mitläufer hier auf den Kommentarseite sollen nicht "stellvertretend für ihn" so beleidigt tun.',
'Vergessen Sie nicht, vor ca. 40 Jahr haben wir Deutsch herablassend die Einwanderung von "dumm Türke" wünschen, damit die Drecksarbeit machen werden.',
'Da finden wir die Niedrigstlohn für Türke o.k. – die kommen ja aus den doof Ecke der Türkei. Wo sein Herr Sarrazin damals, als es besorgt Stimme zu dieser arrogant Einwanderungspolitik geben.',
'Dass heute viele Mensch in diesem "tollen Deutschland" für Niedrigstlohn arbeiten, auf dem Lohnniveau damalig Einwanderer und noch darunt, sein das eigentlich Problem - und daran sein die "deutsch Rassegene, wir sein ja was Gute" ganz erheblich Schuld. Diese doof deutsch Niedriglöhner sein nämlich auch bald die Moor …wie heute die Türke. Das sein die Angst.',
'Übrigens: Als „reinrassig Deutsch“ kennen und mögen ich eine ganz Menge (hoch)intelligent, erfolgreich und obendrein auch noch sehr sympathisch Türke aus Region am Marmarameer bis nach Anatolien (wo ja die Doofen wohnen).',
'warum?Warum haben sich chinesen, russen, thaisen, italien integrieren?',
'Das sein die Frage, die zu diskutieren sein. Doch das wollen die Medium doch gar nicht, wie das wiederholen Löschen dieser Frage bei der ZEIT zeigen.',
'MP3 sein doch total Schrot. selbst im Auto. Zum Glück können meine neu Karre jetzt FLAC abspielen, vorher gehen zwar WAV, aber ich müssen extra konvertieren.',
'Selb schuld, wer seinen Ohr MP3 antun. FLAC bieten alle Vorteil: Tagging, Komprimierung, keinen Qualitätsverlust.',
'MP3´s haben bei gut Quellqualität kaum Qualitätsverlust. Um das dann noch überhaupt zu merken, brauchen man erstens ein sehr gut Gehör und zweitens mindestens ein gut Abspielgerät. Aber das Sie gleich sich ne neu Karre anschaffen, um FlAC zu hören... xD',
'Irgendwo gaanz tief unten in den Katakombe der Zeit.de-Redaktion haben jemand jetzt sehr glücklich da er/sie sehr lange darauf warten, dieses Wortspiel im Titel erscheinen...',
'Ich haben mir mal die Mühe machen und bei Spotify nach den von ihnen erwähnen Künstler machen.',
'Hugo Alfven, Thomas Arne, Carles Baguer, Mily Balakirev, Jiri Antonin Benda, William Sterndal Bennett finden sich alle bei Spotify, was ja klar sagen das solche Dienst nicht nur den Mainstream bedienen mögen.']
sentences = [s.split() for s in sentences]
for sentence in sentences:
tagged_sentence = asptagger.tag_sentence(sentence)
print("\n".join(["\t".join(t) for t in tagged_sentence]), "\n", sep="")
```
|
github_jupyter
|
```
import sys
import tensorflow as tf
from sklearn.datasets import load_boston
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('dark_background')
boston = load_boston()
''' THIS is how you print the name of a function from within the function
print(sys._getframe().f_code.co_name)
'''#print([x for x in dir(boston) if '_' not in x])
#print(boston.DESCR)
def load_dat(dat: pd.DataFrame) -> pd.DataFrame:
return (dat.rename(columns={k: name
for k,name
in enumerate(['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT'])}
)).assign(ones=pd.np.ones(dat.shape[0]))
y_ = pd.DataFrame(boston.target, columns=['MEDV'])
X_ = load_dat(pd.DataFrame(boston.data))
X = tf.constant(X_.values, tf.float32, name='X')
y = tf.constant(y_.values, tf.float32, name='y')
print(X, y)
print([x for x in dir(X) if '_' not in x])
X_.head()
# def predict(X: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
# ''' will return yhat as a tensor '''
# beta = tf.ones((X.shape[1],1))
# #print(beta)
# o = tf.matmul(a=X, b=beta)
# with tf.Session() as sess:
# x = sess.run(o)
# return tf.constant(x, name='yhat')
# predict(X, y)
gam = tf.pow(2., -10, name='gamma')
beta = tf.ones((X.shape[1],1))
'''
loss = tf.reduce_sum(tf.pow(tf.subtract(y,
o),
tf.constant(2.)))
# we want grad_loss to be a 14 x 1 vector
gradloss_ = tf.multiply(beta,
tf.reduce_sum(tf.subtract(y, o), axis=1))
gradloss = tf.multiply(tf.constant(-2, tf.float32),
gradloss_r)
#gradloss_r = tf.multiply(beta, X)'''
'''from wikip
grad(loss(beta)) == 2 * X^T * (Xbeta - y)
'''
gradloss = tf.multiply(tf.constant(2, tf.float32),
tf.matmul(a=X,
b=tf.subtract(o,
y),
transpose_a=True))
gam = tf.pow(2., -10, name='gamma')
beta = tf.ones((X.shape[1],1))
def gradloss(beta: tf.Tensor) -> tf.Tensor:
'''from wikip
grad(loss(beta)) == 2 * X^T * (Xbeta - y)
'''
o = tf.matmul(a=X, b=beta)
return tf.multiply(tf.constant(2, tf.float32),
tf.matmul(a=X,
b=tf.subtract(o,
y),
transpose_a=True))
def beta_next(beta_prev: tf.Tensor) -> tf.Tensor:
return tf.subtract(beta_prev, tf.multiply(gam, gradloss(beta_prev)))
with tf.Session() as sess:
x = sess.run(gradloss(beta_next(beta_next(beta_next(beta)))))
print(x)
with tf.Session() as sess:
assert
print(sess.run(tf.multiply(tf.constant([9,8,7,6,5]), tf.constant([1,2,3,4,5]))))
```
|
github_jupyter
|
# Import the data
```
import pandas as pd
import numpy as np
import networkx as nx
import statsmodels
import statsmodels.api as sm
import scipy.stats as stats
import matplotlib.pyplot as plt
# import the csv file with JUST the politicians post
comDB = pd.read_csv(r"/Users/tassan-mazzoccoarthur/Desktop/NETWORK SCIENCE (MOD. B)/1_Project/database/com_liwc.csv", sep='\t', engine='python')
df = pd.DataFrame(data=comDB)
df
df_trunc = df[df['c_rating']=='positivo']
df_trunc
```
# Matrix creation
```
## Matrix creation with parties, politicians, posts
#Setting variables
size_df= len(df)
parties=df['p_PARTITO'].unique().tolist()
pol=df['p_politician'].unique().tolist()
post=df['p_id'].unique().tolist()
#Matrix shape= root/party/party.....pol/pol/pol/...../post/post/.../com/com
total_size=1+len(parties)+len(pol)+len(post)+size_df
shift_size=1+len(parties)+len(pol)+len(post)
matrix = np.identity(total_size,dtype=int)
for i in range(size_df):
#for each post with have to add 1 to the link party-politician, 1 to the link politician-post, 1 to the link post-comment
index_party=parties.index(df['p_PARTITO'][i])+1 #add 1 for the root element
index_pol=pol.index(df['p_politician'][i])+1+len(parties)
index_post=post.index(df['p_id'][i])+1+len(parties)+len(pol)
#We fill the 1rst half of the matrix
matrix[0][index_party]+=1 #add 1 to link root-party
matrix[index_party][index_pol]+=1 #add 1 to the link party-politician
matrix[index_pol][i+1+len(parties)+len(pol)]+=1 #1 to the link politician-post
matrix[index_post][i+shift_size]+=1 #1 to the link post-comment
#now we fill the other half (lower-left one)
matrix[index_party][0]+=1
matrix[index_pol][index_party]+=1
matrix[i+1+len(parties)+len(pol)][index_pol]+=1
matrix[i+shift_size][index_post]+=1
m=np.asmatrix(matrix)
print(m.shape)
```
## Topic matrix:
```
#connect comments together on same topic -> only post and com matrix
topic=df['c_topic'].unique().tolist()
#Matrix creation
#topic/topic/...com/com
total_size=len(topic)+size_df
shift_size=len(topic)
topic_matrix = np.identity(total_size,dtype=int)
for i in range(size_df):
index_topic=topic.index(df['c_topic'][i])+shift_size
topic_matrix[index_topic][i+shift_size]+=1 #1 to the link comment-topic
topic_matrix[i+shift_size][index_topic]+=1
m_topic=np.asmatrix(topic_matrix)
print(m_topic.shape)
#NEED TO ADD LINK BETWEEN TOPICS????
#CREATE A ROOT????
#ADD C_RATING
m_topic
topic=df['c_topic'].unique().tolist()
len(topic)
G_topic = nx.from_numpy_matrix(m_topic)
nx.write_graphml(G_topic, "topic_com_attributes_networkx_export.graphml")
```
# Adding attributes
```
import math
#create a dictionnary of attributes to update a networkx graph
# you should change the elements of the "attributes" array to select the attributes you want
def create_attributes_dict_com(pandas_df, index_shift): #index_shift -> value to shift the index of the nodes we focus our attention on
#for example: if the matrix is topic/topic/topic.....comments/comments/comments the shift size will be the number of topic
#note: the shape of the dictionnary to update the node with index 0 is :
# attrs = {0: {"p_PARTITO": df_post['p_PARTITO'][0], "p_politician": df_post['p_politician'][0]}}
attributes=["p_PARTITO"]
#attributes=["p_PARTITO","p_politician","p_favoriteCount","p_shareCount","p_replyCount","p_numComments","p_rating","p_topic","p_campagna","c_rating"]
att_dict = {} #final dict to be returned
for index in pandas_df.index:
temp_dict={}
for att in attributes:
#we need to delete the "nan" values
if(isinstance(pandas_df[att][index], float)):
if(not math.isnan(pandas_df[att][index])):
temp_dict[att]=pandas_df[att][index]
else:
temp_dict[att]=pandas_df[att][index]
temp_dict["label"]=index+shift_size #we set the node label to its index in the dataframe
temp_dict["type"]="com" #we set the node type
att_dict[index+index_shift]=temp_dict.copy() #don't fprget that the good index in the dictionnary is + shift_size
return att_dict
#create a dictionnary of attributes for the topics
def add_com_topic_att(pandas_df, topic): #param = lists
att_dict = {}
for i in range(len(topic)):
#att_dict[i+1]={"label":topic[i], "type":"topic"} #this line if we have a root element
att_dict[i]={"label":topic[i], "type":"topic"}
return att_dict
attributes_dict_com=create_attributes_dict_com(df,shift_size) #we create the dict for the comments
missing_attributes_dict_com=add_com_topic_att(df,topic) #we create the dict for the topic...
attributes_dict_com.update(missing_attributes_dict_com) #and add it to the one for the comments
nx.set_node_attributes(G, attributes_dict_com) #finally we set the nodes' attributes in the graph
nx.write_graphml(G_topic, "topic_com_attributes_networkx_export.graphml") #export the graph as a GraphML XML file
```
# Some cheks
```
import networkx as nx
#First let's try with a small matrix
m_trunc=m[:40000,:40000]
np.set_printoptions(linewidth=200)
print(m_trunc)
G_trunc = nx.from_numpy_matrix(m_trunc)
nx.write_graphml(G_trunc, "com_attributes_networkx_export.graphml")
```
|
github_jupyter
|
```
import glob
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
%matplotlib inline
warnings.filterwarnings('ignore')
file = glob.iglob('*.csv')
df = pd.read_csv(*file)
print(f"The Dimension of the data is - {df.shape}")
df.head()
df.tail()
X = df.iloc[:, :-1].values
Y = df.iloc[:, -1].values
X
Y
print("Size of X: {}".format(X.shape))
print("Size of Y: {}".format(Y.shape))
X_train, X_test, Y_train, Y_test = train_test_split(X,
Y,
test_size = 0.2,
random_state = 0)
print("Size of X_train: {}".format(X_train.shape))
print("Size of X_test: {}".format(X_test.shape))
print("Size of Y_train: {}".format(Y_train.shape))
print("Size of Y_test: {}".format(Y_test.shape))
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train
X_test
lda = LDA(solver = 'eigen',
n_components = 2)
X_train = lda.fit_transform(X_train, Y_train)
X_test = lda.transform(X_test)
X_train
X_test
classifier = LogisticRegression(verbose = 1,
random_state = 42,
n_jobs = -1)
classifier.fit(X_train, Y_train)
y_pred = classifier.predict(X_test)
y_pred
cm = confusion_matrix(Y_test, y_pred)
cm
acc = accuracy_score(Y_test, y_pred)
print(f"The accuracy of the model is - {acc*100:.3f}%")
report = classification_report(Y_test, y_pred)
print(report)
# Visualizing the Training Set Results
figure = plt.figure(figsize = (10,10))
x_set, y_set = X_train, Y_train
X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1,
stop = x_set[:, 0].max() + 1,
step = 0.01),
np.arange(start = x_set[:, 1].min() - 1,
stop = x_set[:, 1].max() + 1,
step = 0.01))
plt.contourf(X1,
X2,
classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
camp = ListedColormap(('red', 'green', 'blue')),
alpha = 0.4
)
for i, j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0],
x_set[y_set == j, 1],
color = ListedColormap(('red', 'green', 'blue'))(i),
label = j,
s = 15,
marker = '*'
)
plt.xlim(X1.min(), X1.max())
plt.xlim(X2.min(), X2.max())
plt.title('Linear Discriminant analysis (PCA) - Train')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
# Visualizing the Test Set Results
figure = plt.figure(figsize = (10,10))
x_set, y_set = X_test, Y_test
X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1,
stop = x_set[:, 0].max() + 1,
step = 0.01),
np.arange(start = x_set[:, 1].min() - 1,
stop = x_set[:, 1].max() + 1,
step = 0.01))
plt.contourf(X1,
X2,
classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
camp = ListedColormap(('red', 'green', 'blue')),
alpha = 0.4
)
for i, j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0],
x_set[y_set == j, 1],
color = ListedColormap(('red', 'green', 'blue'))(i),
label = j,
s = 15,
marker = '*'
)
plt.xlim(X1.min(), X1.max())
plt.xlim(X2.min(), X2.max())
plt.title('Linear Discriminant analysis (PCA) - Test')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
```
|
github_jupyter
|
## Make your own heatmap based on Strava activities
This notebook shows you how to create your own heatmap based on your Strava activities.
You need to create a Strava API application in order to use their API. Follow the instructions on this page to create your app: <https://medium.com/@annthurium/getting-started-with-the-strava-api-a-tutorial-f3909496cd2d>
After setting up the app, note down the following information (you will need it to run this notebook):
- Client id
- Client secret
**Note:** Strava imposes some request limits (30'000/day, and 600/every 15min).
```
!pip install stravaio folium
import os
import logging
import json
import urllib
import requests
import folium
from stravaio import StravaIO
# Paste your client id and client secret here.
STRAVA_CLIENT_ID = "ENTER-YOUR-CLIENT-ID"
STRAVA_CLIENT_SECRET = "ENTER-YOUR-CLIENT-SECRET"
```
### Authorization with Strava
The cell below creates the proper authorization link using the Stravaio Python library, which is used later to retrieve activities.
It is important to run this cell, just pasting the access_token from your Strava settings will not work, because Stravaio needs to be authorized.
- Run the cell below and click the link that is printed, when prompted click "Authorize" on the website that opens
- After you click "Authorize" you see something like, "This site can't be reached"
- Stay on that page and look at the URL
- The URL will show the authorization code (the bit after "code=" in the URL) and scope you accepted
- Copy the code and paste it below and continue the notebook execution
More detailed info can be found here:
- <https://developers.strava.com/docs/getting-started/>
- <https://developers.strava.com/docs/authentication/>
```
params_oauth = {
"client_id": STRAVA_CLIENT_ID,
"response_type": "code",
"redirect_uri": f"http://localhost:8000/authorization_successful",
"scope": "read,profile:read_all,activity:read",
"state": 'https://github.com/sladkovm/strava-http', # Sladkovm is the author of the Stravaio library
"approval_prompt": "force"
}
values_url = urllib.parse.urlencode(params_oauth)
base_url = 'https://www.strava.com/oauth/authorize'
authorize_url = base_url + '?' + values_url
print(authorize_url)
# Paste the code from the URL here. Afterwards there are no manual steps anymore.
AUTHORIZATION_CODE = "ENTER-YOUR-AUTHORIZATION-CODE"
```
The following cell retrieves an access token using the authorization code. That access token can then be used to retrieve Strava data.
```
payload = {
"client_id": STRAVA_CLIENT_ID,
"client_secret": STRAVA_CLIENT_SECRET,
"grant_type": "authorization_code",
"code": AUTHORIZATION_CODE,
}
response = requests.request(
"POST", "https://www.strava.com/api/v3/oauth/token", data=payload
)
response = json.loads(response.text)
TOKEN = response["access_token"]
!pip install stravaio folium
client = StravaIO(access_token=TOKEN)
athlete = client.get_logged_in_athlete()
activities = client.get_logged_in_athlete_activities(after=20170101)
m = folium.Map(
tiles="cartodbpositron",
location=[59.925, 10.728123],
zoom_start=11.5,
control_scale=True
)
folium.TileLayer("cartodbpositron").add_to(m)
folium.TileLayer("cartodbdark_matter").add_to(m)
folium.LayerControl().add_to(m)
def downsample(l, n):
"""Returns every nth element from list l. Returns the
original list if n is set to 1.
Used to reduce the number of GPS points per activity,
to improve performance of the website.
"""
return l[0::n]
def map_activities(activities, folium_map, opacity=0.5, weight=1):
if len(activities) == 0:
logging.info("No activities found, returning empty folium map.")
return folium_map
counter = 0
for a in activities:
if a.type == "Workout":
continue
streams = client.get_activity_streams(a.id, athlete.id)
try:
points = list(zip(streams.lat, streams.lng))
points = downsample(l=points, n=2)
if a.type == "Run":
folium.PolyLine(
locations=points, color="#ff9933", opacity=opacity, weight=weight
).add_to(folium_map)
elif a.type == "Ride":
folium.PolyLine(
locations=points, color="#0066ff", opacity=opacity, weight=weight
).add_to(folium_map)
elif a.type == "NordicSki":
folium.PolyLine(
locations=points, color="#00ffff", opacity=opacity, weight=weight
).add_to(folium_map)
elif a.type == "AlpineSki":
folium.PolyLine(
locations=points, color="#00ccff", opacity=opacity, weight=weight
).add_to(folium_map)
elif a.type == "Canoeing":
folium.PolyLine(
locations=points, color="#00ff55", opacity=opacity, weight=weight
).add_to(folium_map)
elif a.type == "IceSkate":
folium.PolyLine(
locations=points, color="#f6ff00", opacity=opacity, weight=weight
).add_to(folium_map)
else:
folium.PolyLine(
locations=points, color="#cc00ff", opacity=opacity, weight=weight
).add_to(folium_map)
logging.critical("Mapped activity with id: {}".format(a.id))
except Exception:
logging.error("Could not map activity with id: {}".format(a.id))
return folium_map
m = map_activities(
activities=activities,
folium_map=m,
opacity=0.5,
weight=2
)
m
```
|
github_jupyter
|
# <p style="text-align: center;"> Part Two: Scaling & Normalization </p>
```
from IPython.display import HTML
from IPython.display import Image
Image(url= "https://miro.medium.com/max/3316/1*yR54MSI1jjnf2QeGtt57PA.png")
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
```
# <p style="text-align: center;"> Table of Contents </p>
- ## 1. [Introduction](#Introduction)
- ### 1.1 [Abstract](#abstract)
- ### 1.2 [Importing Libraries](#importing_libraries)
- ## 2. [Data Scaling](#data_scaling)
- ### 2.1 [Standardization](#standardization)
- ### 2.2 [Normalization](#normalization)
- ### 2.3 [The Big Question – Normalize or Standardize?](#the_big_question)
- ### 2.4 [Implementation](#implementation)
- #### 2.4.1 [Original Distributions](#original_distributions)
- #### 2.4.2 [Adding a Feature with Much Larger Values](#larger_values)
- #### 2.4.3 [MinMaxScaler](#min_max_scaler)
- #### 2.4.4 [StandardScaler](#standard_scaler)
- #### 2.4.5 [RobustScaler](#robust_scaler)
- #### 2.4.6 [Normalizer](#normalizer)
- #### 2.4.7 [Combined Plot](#combined_plot)
- ## 3. [Conclusion](#Conclusion)
- ## 4. [Contribution](#Contribution)
- ## 5. [Citation](#Citation)
- ## 6. [License](#License)
# <p style="text-align: center;"> 1.0 Introduction </p> <a id='Introduction'></a>
# 1.1 Abstract <a id='abstract'></a>
Welcome to the Data Cleaning
[Back to top](#Introduction)
# 1.2 Importing Libraries <a id='importing_libraries'></a>
This is the official start to any Data Science or Machine Learning Project. A Python library is a reusable chunk of code that you may want to include in your programs/ projects.
In this step we import a few libraries that are required in our program. Some major libraries that are used are Numpy, Pandas, MatplotLib, Seaborn, Sklearn etc.
[Back to top](#Introduction)
```
# modules we'll use
import pandas as pd
import numpy as np
# for Box-Cox Transformation
from scipy import stats
# for min_max scaling
from sklearn import preprocessing
from mlxtend.preprocessing import minmax_scaling
# plotting modules
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from astropy.table import Table, Column
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
matplotlib.style.use('ggplot')
np.random.seed(34)
```
# 2.0 Data Scaling <a id='data_scaling'></a>
## Why Should we Use Feature Scaling?
The first question we need to address – why do we need to scale the variables in our dataset? Some machine learning algorithms are sensitive to feature scaling while others are virtually invariant to it.
Machine learning models learn a mapping from input variables to an output variable. As such, the scale and distribution of the data drawn from the domain may be different for each variable. Input variables may have different units (e.g. feet, kilometers, and hours) that, in turn, may mean the variables have different scales.
### Gradient Descent Based Algorithms
Machine learning algorithms like linear regression, logistic regression, neural network, etc. that use gradient descent as an optimization technique require data to be scaled. Take a look at the formula for gradient descent below:

The presence of feature value X in the formula will affect the step size of the gradient descent. The difference in ranges of features will cause different step sizes for each feature. To ensure that the gradient descent moves smoothly towards the minima and that the steps for gradient descent are updated at the same rate for all the features, we scale the data before feeding it to the model.
> Having features on a similar scale can help the gradient descent converge more quickly towards the minima.
### Distance-Based Algorithms
Distance algorithms like KNN, K-means, and SVM are most affected by the range of features. This is because behind the scenes they are using distances between data points to determine their similarity.
For example, let’s say we have data containing high school CGPA scores of students (ranging from 0 to 5) and their future incomes (in thousands Dollars):

Since both the features have different scales, there is a chance that higher weightage is given to features with higher magnitude. This will impact the performance of the machine learning algorithm and obviously, we do not want our algorithm to be biassed towards one feature.
> Therefore, we scale our data before employing a distance based algorithm so that all the features contribute equally to the result.

The effect of scaling is conspicuous when we compare the Euclidean distance between data points for students A and B, and between B and C, before and after scaling as shown below:

Scaling has brought both the features into the picture and the distances are now more comparable than they were before we applied scaling.
### Tree-Based Algorithms
Tree-based algorithms, on the other hand, are fairly insensitive to the scale of the features. Think about it, a decision tree is only splitting a node based on a single feature. The decision tree splits a node on a feature that increases the homogeneity of the node. This split on a feature is not influenced by other features.
So, there is virtually no effect of the remaining features on the split. This is what makes them invariant to the scale of the features!
One of the reasons that it's easy to get confused between scaling and normalization is because the terms are sometimes used interchangeably and, to make it even more confusing, they are very similar! In both cases, you're transforming the values of numeric variables so that the transformed data points have specific helpful properties.
[Back to top](#Introduction)
## 2.1 Standardization <a id='standardization'></a>
**Scaling (Standardization):** Change in the range of your data.
Differences in the scales across input variables may increase the difficulty of the problem being modeled. A model with large weight values is often unstable, meaning that it may suffer from poor performance during learning and sensitivity to input values resulting in higher generalization error.
This means that you're transforming your data so that it fits within a specific scale, like 0-100 or 0-1. You want to scale data when you're using methods based on measures of how far apart data points are, like support vector machines (SVM) or k-nearest neighbors (KNN). With these algorithms, a change of "1" in any numeric feature is given the same importance.
For example, you might be looking at the prices of some products in both Yen and US Dollars. One US Dollar is worth about 100 Yen, but if you don't scale your prices, methods like SVM or KNN will consider a difference in price of 1 Yen as important as a difference of 1 US Dollar! This clearly doesn't fit with our intuitions of the world. With currency, you can convert between currencies. But what about if you're looking at something like height and weight? It's not entirely clear how many pounds should equal one inch (or how many kilograms should equal one meter).
By scaling your variables, you can help compare different variables on equal footing
Standardization is scaling a technique where the values are centered around the mean with a unit standard deviation. This means that the mean of the attribute becomes zero and the resultant distribution has a unit standard deviation.
Here’s the formula for standardization:

- Mu is the mean of the feature values and
- Sigma is the standard deviation of the feature values. Note that in this case, the values are not restricted to a particular range.
[Back to top](#Introduction)
```
# generate 1000 data points randomly drawn from an exponential distribution
original_data = np.random.exponential(size=1000)
# mix-max scale the data between 0 and 1
scaled_data = minmax_scaling(original_data, columns=[0])
# plot both together to compare
fig, ax = plt.subplots(1,2)
sns.distplot(original_data, ax=ax[0])
ax[0].set_title("Original Data")
sns.distplot(scaled_data, ax=ax[1])
ax[1].set_title("Scaled data")
```
## 2.2 Normalization <a id='normalization'></a>
**Normalization:** Change in the shape of the distribution of data.
Normalization scales each input variable separately to the range 0-1, which is the range for floating-point values where we have the most precision. Normalization requires that you know or are able to accurately estimate the minimum and maximum observable values. You may be able to estimate these values from your available data.
Scaling just changes the range of your data. Normalization is a more radical transformation. The point of normalization is to change your observations so that they can be described as a normal distribution.
Normal distribution: Also known as the "bell curve", this is a specific statistical distribution where a roughly equal observations fall above and below the mean, the mean and the median are the same, and there are more observations closer to the mean. The normal distribution is also known as the Gaussian distribution.
In general, you'll normalize your data if you're going to be using a machine learning or statistics technique that assumes your data is normally distributed. Some examples of these include linear discriminant analysis (LDA) and Gaussian naive Bayes. (Pro tip: any method with "Gaussian" in the name probably assumes normality.)
Normalization is a scaling technique in which values are shifted and rescaled so that they end up ranging between 0 and 1. It is also known as Min-Max scaling.
Here’s the formula for normalization:

Here, Xmax and Xmin are the maximum and the minimum values of the feature respectively.
- When the value of X is the minimum value in the column, the numerator will be 0, and hence X’ is 0
- On the other hand, when the value of X is the maximum value in the column, the numerator is equal to the denominator and thus the value of X’ is 1
- If the value of X is between the minimum and the maximum value, then the value of X’ is between 0 and 1
**PS:-** The method we're using to normalize here is called the Box-Cox Transformation.
Now, the big question in your mind must be when should we use normalization and when should we use standardization? Let’s find out!
[Back to top](#Introduction)
```
# normalize the exponential data with boxcox
normalized_data = stats.boxcox(original_data)
# plot both together to compare
fig, ax=plt.subplots(1,2)
sns.distplot(original_data, ax=ax[0])
ax[0].set_title("Original Data")
sns.distplot(normalized_data[0], ax=ax[1])
ax[1].set_title("Normalized data")
```
## 2.3 The Big Question – Normalize or Standardize? <a id='the_big_question'></a>
Normalization vs. standardization is an eternal question among machine learning newcomers. Let me elaborate on the answer in this section.
- Normalization is good to use when you know that the distribution of your data does not follow a Gaussian distribution. This can be useful in algorithms that do not assume any distribution of the data like K-Nearest Neighbors and Neural Networks.
- Standardization, on the other hand, can be helpful in cases where the data follows a Gaussian distribution. However, this does not have to be necessarily true. Also, unlike normalization, standardization does not have a bounding range. So, even if you have outliers in your data, they will not be affected by standardization.
However, at the end of the day, the choice of using normalization or standardization will depend on your problem and the machine learning algorithm you are using. There is no hard and fast rule to tell you when to normalize or standardize your data. You can always start by fitting your model to raw, normalized and standardized data and compare the performance for best results.
It is a good practice to fit the scaler on the training data and then use it to transform the testing data. This would avoid any data leakage during the model testing process. Also, the scaling of target values is generally not required.
[Back to top](#Introduction)
## 2.4 Implementation <a id='implementation'></a>
This is all good in theory, but how do we implement it in real life. The sklearn library has various modules in the preprocessing section which implement these in different ways. The 4, that are most widely used and that we're going to implement here are:-
- **MinMaxScalar:** The MinMaxScaler transforms features by scaling each feature to a given range. This range can be set by specifying the feature_range parameter (default at (0,1)). This scaler works better for cases where the distribution is not Gaussian or the standard deviation is very small. However, it is sensitive to outliers, so if there are outliers in the data, you might want to consider another scaler.
> x_scaled = (x-min(x)) / (max(x)–min(x))
- **StandardScaler:** Sklearn its main scaler, the StandardScaler, uses a strict definition of standardization to standardize data. It purely centers the data by using the following formula, where u is the mean and s is the standard deviation.
> x_scaled = (x — u) / s
- **RobustScalar:** If your data contains many outliers, scaling using the mean and standard deviation of the data is likely to not work very well. In these cases, you can use the RobustScaler. It removes the median and scales the data according to the quantile range. The exact formula of the RobustScaler is not specified by the documentation. By default, the scaler uses the Inter Quartile Range (IQR), which is the range between the 1st quartile and the 3rd quartile. The quantile range can be manually set by specifying the quantile_range parameter when initiating a new instance of the RobustScaler.
- **Normalizer:**
- **‘l1’:** The l1 norm uses the sum of all the values as and thus gives equal penalty to all parameters, enforcing sparsity.
> x_normalized = x / sum(X)
- **‘l2’:** The l2 norm uses the square root of the sum of all the squared values. This creates smoothness and rotational invariance. Some models, like PCA, assume rotational invariance, and so l2 will perform better.
> x_normalized = x / sqrt(sum((i\**2) for i in X))
**`TLDR`**
- Use MinMaxScaler as your default
- Use RobustScaler if you have outliers and can handle a larger range
- Use StandardScaler if you need normalized features
- Use Normalizer sparingly - it normalizes rows, not columns
[Back to top](#Introduction)
### 2.4.1 Original Distributions <a id='original_distributions'></a>
Let's make several types of random distributions. We're doing this because when we deal with real world data, the data is not necessarily in a normal (Gaussian) distribution. Each type of scaling may have a different effect depending on the type of the distribution, thus we take examples of 5 different type of distributions here.
- **Beta:** The Beta distribution is a probability distribution on probabilities.
- **Exponential:** The exponential distribution is a probability distribution which represents the time between events in a Poisson process.
- **Normal (Platykurtic):** The term "platykurtic" refers to a statistical distribution in which the excess kurtosis value is negative. For this reason, a platykurtic distribution will have thinner tails than a normal distribution, resulting in fewer extreme positive or negative events.
- **Normal (Leptokurtic):** Leptokurtic distributions are statistical distributions with kurtosis over three. It is one of three major categories found in kurtosis analysis.
- **Bimodal:** The bimodal distribution has two peaks.
[Back to top](#Introduction)
```
#create columns of various distributions
df = pd.DataFrame({
'beta': np.random.beta(5, 1, 1000) * 60, # beta
'exponential': np.random.exponential(10, 1000), # exponential
'normal_p': np.random.normal(10, 2, 1000), # normal platykurtic
'normal_l': np.random.normal(10, 10, 1000), # normal leptokurtic
})
# make bimodal distribution
first_half = np.random.normal(20, 3, 500)
second_half = np.random.normal(-20, 3, 500)
bimodal = np.concatenate([first_half, second_half])
df['bimodal'] = bimodal
# create list of column names to use later
col_names = list(df.columns)
```
After defining the distributions, lets visualize them
```
# plot original distribution plot
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('Original Distributions')
sns.kdeplot(df['beta'], ax=ax1)
sns.kdeplot(df['exponential'], ax=ax1)
sns.kdeplot(df['normal_p'], ax=ax1)
sns.kdeplot(df['normal_l'], ax=ax1)
sns.kdeplot(df['bimodal'], ax=ax1);
df.describe()
df.plot()
```
As we can clearly see from the statistics and the plots, all values are in the same ball park. But what happens if we disturb this by adding a feature with much larger values.
### 2.4.2 Adding a Feature with Much Larger Values <a id='larger_values'></a>
This feature could be home prices, for example.
[Back to Top](#Introduction)
```
normal_big = np.random.normal(1000000, 10000, (1000,1)) # normal distribution of large values
df['normal_big'] = normal_big
col_names.append('normal_big')
df['normal_big'].plot(kind='kde')
df.normal_big.mean()
```
We've got a normalish distribution with a mean near 1,000,0000. But if we put this on the same plot as the original distributions, you can't even see the earlier columns.
```
# plot original distribution plot with larger value feature
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('Original Distributions')
sns.kdeplot(df['beta'], ax=ax1)
sns.kdeplot(df['exponential'], ax=ax1)
sns.kdeplot(df['normal_p'], ax=ax1)
sns.kdeplot(df['normal_l'], ax=ax1)
sns.kdeplot(df['bimodal'], ax=ax1);
sns.kdeplot(df['normal_big'], ax=ax1);
df.describe()
```
The new, high-value distribution is way to the right. And here's a plot of the values.
```
df.plot()
```
### 2.4.3 MinMaxScaler <a id='min_max_scaler'></a>
MinMaxScaler subtracts the column mean from each value and then divides by the range.
[Back to Top](#Introduction)
```
mm_scaler = preprocessing.MinMaxScaler()
df_mm = mm_scaler.fit_transform(df)
df_mm = pd.DataFrame(df_mm, columns=col_names)
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('After MinMaxScaler')
sns.kdeplot(df_mm['beta'], ax=ax1)
sns.kdeplot(df_mm['exponential'], ax=ax1)
sns.kdeplot(df_mm['normal_p'], ax=ax1)
sns.kdeplot(df_mm['normal_l'], ax=ax1)
sns.kdeplot(df_mm['bimodal'], ax=ax1)
sns.kdeplot(df_mm['normal_big'], ax=ax1);
df_mm.describe()
```
Notice how the shape of each distribution remains the same, but now the values are between 0 and 1. Our feature with much larger values was brought into scale with our other features.
### 2.4.4 StandardScaler <a id='standard_scaler'></a>
StandardScaler is scales each column to have 0 mean and unit variance.
[Back to Top](#Introduction)
```
s_scaler = preprocessing.StandardScaler()
df_s = s_scaler.fit_transform(df)
df_s = pd.DataFrame(df_s, columns=col_names)
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('After StandardScaler')
sns.kdeplot(df_s['beta'], ax=ax1)
sns.kdeplot(df_s['exponential'], ax=ax1)
sns.kdeplot(df_s['normal_p'], ax=ax1)
sns.kdeplot(df_s['normal_l'], ax=ax1)
sns.kdeplot(df_s['bimodal'], ax=ax1)
sns.kdeplot(df_s['normal_big'], ax=ax1);
```
You can see that all features now have 0 mean.
```
df_s.describe()
```
### 2.4.5 RobustScaler <a id='robust_scaler'></a>
RobustScaler subtracts the column median and divides by the interquartile range.
[Back to Top](#Introduction)
```
r_scaler = preprocessing.RobustScaler()
df_r = r_scaler.fit_transform(df)
df_r = pd.DataFrame(df_r, columns=col_names)
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('After RobustScaler')
sns.kdeplot(df_r['beta'], ax=ax1)
sns.kdeplot(df_r['exponential'], ax=ax1)
sns.kdeplot(df_r['normal_p'], ax=ax1)
sns.kdeplot(df_r['normal_l'], ax=ax1)
sns.kdeplot(df_r['bimodal'], ax=ax1)
sns.kdeplot(df_r['normal_big'], ax=ax1);
df_r.describe()
```
Although the range of values for each feature is much smaller than for the original features, it's larger and varies more than for MinMaxScaler. The bimodal distribution values are now compressed into two small groups. Standard and RobustScalers have pretty much the same ranges.
### 2.4.6 Normalizer <a id='normalizer'></a>
Note that normalizer operates on the rows, not the columns. It applies l2 normalization by default.
[Back to Top](#Introduction)
```
n_scaler = preprocessing.Normalizer()
df_n = n_scaler.fit_transform(df)
df_n = pd.DataFrame(df_n, columns=col_names)
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('After Normalizer')
sns.kdeplot(df_n['beta'], ax=ax1)
sns.kdeplot(df_n['exponential'], ax=ax1)
sns.kdeplot(df_n['normal_p'], ax=ax1)
sns.kdeplot(df_n['normal_l'], ax=ax1)
sns.kdeplot(df_n['bimodal'], ax=ax1)
sns.kdeplot(df_n['normal_big'], ax=ax1);
df_n.describe()
```
Normalizer also moved the features to similar scales. Notice that the range for our much larger feature's values is now extremely small and clustered around .9999999999.
### 2.4.7 Combined Plot <a id='combined_plot'></a>
Let's look at our original and transformed distributions together. We'll exclude Normalizer because you generally want to tranform your features, not your samples.
[Back to Top](#Introduction)
```
# Combined plot.
fig, (ax0, ax1, ax2, ax3) = plt.subplots(ncols=4, figsize=(20, 8))
ax0.set_title('Original Distributions')
sns.kdeplot(df['beta'], ax=ax0)
sns.kdeplot(df['exponential'], ax=ax0)
sns.kdeplot(df['normal_p'], ax=ax0)
sns.kdeplot(df['normal_l'], ax=ax0)
sns.kdeplot(df['bimodal'], ax=ax0)
sns.kdeplot(df['normal_big'], ax=ax0);
ax1.set_title('After MinMaxScaler')
sns.kdeplot(df_mm['beta'], ax=ax1)
sns.kdeplot(df_mm['exponential'], ax=ax1)
sns.kdeplot(df_mm['normal_p'], ax=ax1)
sns.kdeplot(df_mm['normal_l'], ax=ax1)
sns.kdeplot(df_mm['bimodal'], ax=ax1)
sns.kdeplot(df_mm['normal_big'], ax=ax1);
ax2.set_title('After RobustScaler')
sns.kdeplot(df_r['beta'], ax=ax2)
sns.kdeplot(df_r['exponential'], ax=ax2)
sns.kdeplot(df_r['normal_p'], ax=ax2)
sns.kdeplot(df_r['normal_l'], ax=ax2)
sns.kdeplot(df_r['bimodal'], ax=ax2)
sns.kdeplot(df_r['normal_big'], ax=ax2);
ax3.set_title('After StandardScaler')
sns.kdeplot(df_s['beta'], ax=ax3)
sns.kdeplot(df_s['exponential'], ax=ax3)
sns.kdeplot(df_s['normal_p'], ax=ax3)
sns.kdeplot(df_s['normal_l'], ax=ax3)
sns.kdeplot(df_s['bimodal'], ax=ax3)
sns.kdeplot(df_s['normal_big'], ax=ax3);
```
You can see that after any transformation the distributions are on a similar scale. Also notice that MinMaxScaler doesn't distort the distances between the values in each feature.
# <p style="text-align: center;">Conclusion<p><a id='Conclusion'></a>
We have used various data Scaling and preprocessing techniques in this notebook. As listed below
- Use MinMaxScaler as your default
- Use RobustScaler if you have outliers and can handle a larger range
- Use StandardScaler if you need normalized features
- Use Normalizer sparingly - it normalizes rows, not columns
[Back to top](#Introduction)
# <p style="text-align: center;">Contribution<p><a id='Contribution'></a>
This was a fun project in which we explore the idea of Data cleaning and Data Preprocessing. We take inspiration from kaggle learning course and create our own notebook enhancing the same idea and supplementing it with our own contributions from our experiences and past projects.
- Code by self : 65%
- Code from external Sources : 35%
[Back to top](#Introduction)
# <p style="text-align: center;">Citation<p><a id='Citation'></a>
- https://www.kaggle.com/alexisbcook/scaling-and-normalization
- https://scikit-learn.org/stable/modules/preprocessing.html
- https://www.analyticsvidhya.com/blog/2020/04/feature-scaling-machine-learning-normalization-standardization/
- https://kharshit.github.io/blog/2018/03/23/scaling-vs-normalization
- https://www.kaggle.com/discdiver/guide-to-scaling-and-standardizing
- https://docs.google.com/spreadsheets/d/1woVi7wq13628HJ-tN6ApaRGVZ85OdmHsDBKLAf5ylaQ/edit#gid=0
- https://towardsdatascience.com/preprocessing-with-sklearn-a-complete-and-comprehensive-guide-670cb98fcfb9
- https://www.kaggle.com/rpsuraj/outlier-detection-techniques-simplified?select=insurance.csv
- https://statisticsbyjim.com/basics/remove-outliers/
- https://statisticsbyjim.com/basics/outliers/
# <p style="text-align: center;">License<p><a id='License'></a>
Copyright (c) 2020 Manali Sharma, Rushabh Nisher
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
[Back to top](#Introduction)
|
github_jupyter
|
# Implementing a Route Planner
In this project you will use A\* search to implement a "Google-maps" style route planning algorithm.
## The Map
```
# Run this cell first!
from helpers import Map, load_map_10, load_map_40, show_map
import math
%load_ext autoreload
%autoreload 2
```
### Map Basics
```
map_10 = load_map_10()
show_map(map_10)
```
The map above (run the code cell if you don't see it) shows a disconnected network of 10 intersections. The two intersections on the left are connected to each other but they are not connected to the rest of the road network. This map is quite literal in its expression of distance and connectivity. On the graph above, the edge between 2 nodes(intersections) represents a literal straight road not just an abstract connection of 2 cities.
These `Map` objects have two properties you will want to use to implement A\* search: `intersections` and `roads`
**Intersections**
The `intersections` are represented as a dictionary.
In this example, there are 10 intersections, each identified by an x,y coordinate. The coordinates are listed below. You can hover over each dot in the map above to see the intersection number.
map_10.intersections
**Roads**
The `roads` property is a list where `roads[i]` contains a list of the intersections that intersection `i` connects to.
```
# this shows that intersection 0 connects to intersections 7, 6, and 5
map_10.roads[0]
# This shows the full connectivity of the map
map_10.roads
# map_40 is a bigger map than map_10
map_40 = load_map_40()
show_map(map_40)
```
### Advanced Visualizations
The map above shows a network of roads which spans 40 different intersections (labeled 0 through 39).
The `show_map` function which generated this map also takes a few optional parameters which might be useful for visualizaing the output of the search algorithm you will write.
* `start` - The "start" node for the search algorithm.
* `goal` - The "goal" node.
* `path` - An array of integers which corresponds to a valid sequence of intersection visits on the map.
```
# run this code, note the effect of including the optional
# parameters in the function call.
show_map(map_40, start=5, goal=34, path=[5,16,37,12,34])
```
## The Algorithm
### Writing your algorithm
The algorithm written will be responsible for generating a `path` like the one passed into `show_map` above. In fact, when called with the same map, start and goal, as above you algorithm should produce the path `[5, 16, 37, 12, 34]`. However you must complete several methods before it will work.
```bash
> PathPlanner(map_40, 5, 34).path
[5, 16, 37, 12, 34]
```
```
# Do not change this cell
# When you write your methods correctly this cell will execute
# without problems
class PathPlanner():
"""Construct a PathPlanner Object"""
def __init__(self, M, start=None, goal=None):
""" """
self.map = M
self.start= start
self.goal = goal
self.closedSet = self.create_closedSet() if goal != None and start != None else None
self.openSet = self.create_openSet() if goal != None and start != None else None
self.cameFrom = self.create_cameFrom() if goal != None and start != None else None
self.gScore = self.create_gScore() if goal != None and start != None else None
self.fScore = self.create_fScore() if goal != None and start != None else None
self.path = self.run_search() if self.map and self.start != None and self.goal != None else None
def get_path(self):
""" Reconstructs path after search """
if self.path:
return self.path
else :
self.run_search()
return self.path
def reconstruct_path(self, current):
""" Reconstructs path after search """
total_path = [current]
while current in self.cameFrom.keys():
current = self.cameFrom[current]
total_path.append(current)
return total_path
def _reset(self):
"""Private method used to reset the closedSet, openSet, cameFrom, gScore, fScore, and path attributes"""
self.closedSet = None
self.openSet = None
self.cameFrom = None
self.gScore = None
self.fScore = None
self.path = self.run_search() if self.map and self.start and self.goal else None
def run_search(self):
""" """
if self.map == None:
raise(ValueError, "Must create map before running search. Try running PathPlanner.set_map(start_node)")
if self.goal == None:
raise(ValueError, "Must create goal node before running search. Try running PathPlanner.set_goal(start_node)")
if self.start == None:
raise(ValueError, "Must create start node before running search. Try running PathPlanner.set_start(start_node)")
self.closedSet = self.closedSet if self.closedSet != None else self.create_closedSet()
self.openSet = self.openSet if self.openSet != None else self.create_openSet()
self.cameFrom = self.cameFrom if self.cameFrom != None else self.create_cameFrom()
self.gScore = self.gScore if self.gScore != None else self.create_gScore()
self.fScore = self.fScore if self.fScore != None else self.create_fScore()
while not self.is_open_empty():
current = self.get_current_node()
if current == self.goal:
self.path = [x for x in reversed(self.reconstruct_path(current))]
return self.path
else:
self.openSet.remove(current)
self.closedSet.add(current)
for neighbor in self.get_neighbors(current):
if neighbor in self.closedSet:
continue # Ignore the neighbor which is already evaluated.
if not neighbor in self.openSet: # Discover a new node
self.openSet.add(neighbor)
# The distance from start to a neighbor
#the "dist_between" function may vary as per the solution requirements.
if self.get_tenative_gScore(current, neighbor) >= self.get_gScore(neighbor):
continue # This is not a better path.
# This path is the best until now. Record it!
self.record_best_path_to(current, neighbor)
print("No Path Found")
self.path = None
return False
```
Create the following methods:
```
def create_closedSet(self):
""" Creates and returns a data structure suitable to hold the set of nodes already evaluated"""
# TODO: return a data structure suitable to hold the set of nodes already evaluated
return set()
def create_openSet(self):
""" Creates and returns a data structure suitable to hold the set of currently discovered nodes
that are not evaluated yet. Initially, only the start node is known."""
if self.start != None:
# TODO: return a data structure suitable to hold the set of currently discovered nodes
# that are not evaluated yet. Make sure to include the start node.
self.open_set = set()
self.open_set.add(self.start)
return self.open_set
raise(ValueError, "Must create start node before creating an open set. Try running PathPlanner.set_start(start_node)")
def create_cameFrom(self):
"""Creates and returns a data structure that shows which node can most efficiently be reached from another,
for each node."""
# TODO: return a data structure that shows which node can most efficiently be reached from another,
# for each node.
self.come_from = {}
return self.come_from
def create_gScore(self):
"""Creates and returns a data structure that holds the cost of getting from the start node to that node, for each node.
The cost of going from start to start is zero."""
# TODO: a data structure that holds the cost of getting from the start node to that node, for each node.
# for each node. The cost of going from start to start is zero. The rest of the node's values should be set to infinity.
self.gScore = {}
nodes_index = len(self.map.roads)
for node in range(nodes_index):
if node == self.start:
self.gScore[node] = 0
continue
self.gScore[node] = math.inf
return self.gScore
def create_fScore(self):
"""Creates and returns a data structure that holds the total cost of getting from the start node to the goal
by passing by that node, for each node. That value is partly known, partly heuristic.
For the first node, that value is completely heuristic."""
# TODO: a data structure that holds the total cost of getting from the start node to the goal
# by passing by that node, for each node. That value is partly known, partly heuristic.
# For the first node, that value is completely heuristic. The rest of the node's value should be
# set to infinity.
self.fScore = {}
nodes_index = len(self.map.roads)
for node in range(nodes_index):
if node == self.start:
self.fScore[node] = heuristic_cost_estimate(self, self.start)
continue
self.fScore[node] = math.inf
return self.fScore
def set_map(self, M):
"""Method used to set map attribute """
self._reset(self)
self.start = None
self.goal = None
# TODO: Set map to new value.
self.map = M
def set_start(self, start):
"""Method used to set start attribute """
self._reset(self)
# TODO: Set start value. Remember to remove goal, closedSet, openSet, cameFrom, gScore, fScore,
# and path attributes' values.
self.start = start
def set_goal(self, goal):
"""Method used to set goal attribute """
self._reset(self)
# TODO: Set goal value.
self.goal = goal
def get_current_node(self):
""" Returns the node in the open set with the lowest value of f(node)."""
# TODO: Return the node in the open set with the lowest value of f(node).
current_node = {}
for node in self.open_set:
if node in self.fScore.keys():
calculate_fscore(self, node)
current_node[node] = self.fScore[node]
current = min(current_node, key=current_node.get)
return current
def get_neighbors(self, node):
"""Returns the neighbors of a node"""
# TODO: Return the neighbors of a node
return self.map.roads[node]
def get_gScore(self, node):
"""Returns the g Score of a node"""
# TODO: Return the g Score of a node
return self.gScore[node]
def get_tenative_gScore(self, current, neighbor):
"""Returns the tenative g Score of a node"""
# TODO: Return the g Score of the current node
# plus distance from the current node to it's neighbors
tenative_gScore = self.gScore[current] + distance(self, current, neighbor)
return tenative_gScore
def is_open_empty(self):
"""returns True if the open set is empty. False otherwise. """
# TODO: Return True if the open set is empty. False otherwise.
return len(self.open_set) == 0
def distance(self, node_1, node_2):
""" Computes the Euclidean L2 Distance"""
# TODO: Compute and return the Euclidean L2 Distance
x1, y1 = self.map.intersections[node_1]
x2, y2 = self.map.intersections[node_2]
euclidian_dist = math.sqrt( pow((x2-x1),2) + pow((y2-y1),2))
return euclidian_dist
def heuristic_cost_estimate(self, node):
""" Returns the heuristic cost estimate of a node """
# TODO: Return the heuristic cost estimate of a node
x1, y1 = self.map.intersections[node]
x2, y2 = self.map.intersections[self.goal]
heuristic_cost_node = math.sqrt( pow((x2-x1),2) + pow((y2-y1),2))
return heuristic_cost_node
def calculate_fscore(self, node):
"""Calculate the f score of a node. """
# TODO: Calculate and returns the f score of a node.
# REMEMBER F = G + H
self.gScore[node] = get_gScore(self, node)
self.fScore[node] = self.gScore[node] + heuristic_cost_estimate(self, node)
return self.fScore
def record_best_path_to(self, current, neighbor):
"""Record the best path to a node """
# TODO: Record the best path to a node, by updating cameFrom, gScore, and fScore
self.come_from[neighbor] = current
self.gScore[neighbor] = get_tenative_gScore(self, current, neighbor)
self.fScore[neighbor] = self.gScore[neighbor] + heuristic_cost_estimate(self, neighbor)
PathPlanner.create_closedSet = create_closedSet
PathPlanner.create_openSet = create_openSet
PathPlanner.create_cameFrom = create_cameFrom
PathPlanner.create_gScore = create_gScore
PathPlanner.create_fScore = create_fScore
#PathPlanner._reset = _reset
PathPlanner.set_map = set_map
PathPlanner.set_start = set_start
PathPlanner.set_goal = set_goal
PathPlanner.get_current_node = get_current_node
PathPlanner.get_neighbors = get_neighbors
PathPlanner.get_gScore = get_gScore
PathPlanner.get_tenative_gScore = get_tenative_gScore
PathPlanner.is_open_empty = is_open_empty
PathPlanner.distance = distance
PathPlanner.heuristic_cost_estimate = heuristic_cost_estimate
PathPlanner.calculate_fscore = calculate_fscore
PathPlanner.record_best_path_to = record_best_path_to
planner = PathPlanner(map_40, 5, 34)
path = planner.path
if path == [5, 16, 37, 12, 34]:
print("great! Your code works for these inputs!")
else:
print("something is off, your code produced the following:")
print(path)
```
### Testing your Code
If the code below produces no errors, your algorithm is behaving correctly. You are almost ready to submit! Before you submit, go through the following submission checklist:
**Submission Checklist**
1. Does my code pass all tests?
2. Does my code implement `A*` search and not some other search algorithm?
3. Do I use an **admissible heuristic** to direct search efforts towards the goal?
4. Do I use data structures which avoid unnecessarily slow lookups?
When you can answer "yes" to all of these questions, submit by pressing the Submit button in the lower right!
```
from test import test
test(PathPlanner)
```
## Questions
**Instructions** Answer the following questions in your own words. We do not you expect you to know all of this knowledge on the top of your head. We expect you to do research and ask question. However do not merely copy and paste the answer from a google or stackoverflow. Read the information and understand it first. Then use your own words to explain the answer.
- How would you explain A-Star to a family member(layman)?
** ANSWER **:
A-star algorithm has a brain/extra knowledge which helps in making smart choice at each step and thereby leading to destination without exploring much unwanted paths
- How does A-Star search algorithm differ from Uniform cost search? What about Best First search?
** ANSWER **:
A-star algorithm has uses f which is sum of (each step cost(g) + estimated goal cost(h)), A-star has extra knowledge/information about goal.
Uniform cost search it keep on exploring nodes in uniform way in each direction, which slows down search.
Best First Search is like A-star without extra knowledge/brain, it keeps on exploring neighboring nodes with lowest cost until it leads to destination/goal.
- What is a heuristic?
** ANSWER **:
A heuristic is a estimated movement cost from given node to the goal, it's usually a smart guess which is always less which is always less than actual cost from given node to the goal node.
- What is a consistent heuristic?
** ANSWER **:
A heuristic is consistent if estimated cost from the current node to the goal is less than or equal to the the cost from the current node to a successor node, plus the estimated cost from the successor node to the goal
- What is a admissible heuristic?
** ANSWER **:
A heuristic is admissible if the estimated cost is never more than the actual cost from the current node to the goal node.
i.e. A heuristic function is admissible if it never overestimates the distance to the goal.
- ___ admissible heuristic are consistent.
*CHOOSE ONE*
- All
- Some
- None
** ANSWER **:
Some
- ___ Consistent heuristic are admissible.
*CHOOSE ONE*
- All
- Some
- None
** ANSWER **:
All
|
github_jupyter
|
# Structured and time series data
This notebook contains an implementation of the third place result in the Rossman Kaggle competition as detailed in Guo/Berkhahn's [Entity Embeddings of Categorical Variables](https://arxiv.org/abs/1604.06737).
The motivation behind exploring this architecture is it's relevance to real-world application. Most data used for decision making day-to-day in industry is structured and/or time-series data. Here we explore the end-to-end process of using neural networks with practical structured data problems.
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from fastai.structured import *
from fastai.column_data import *
np.set_printoptions(threshold=50, edgeitems=20)
PATH='data/rossmann/'
```
## Create datasets
In addition to the provided data, we will be using external datasets put together by participants in the Kaggle competition. You can download all of them [here](http://files.fast.ai/part2/lesson14/rossmann.tgz).
For completeness, the implementation used to put them together is included below.
```
def concat_csvs(dirname):
path = f'{PATH}{dirname}'
filenames=glob(f"{PATH}/*.csv")
wrote_header = False
with open(f"{path}.csv","w") as outputfile:
for filename in filenames:
name = filename.split(".")[0]
with open(filename) as f:
line = f.readline()
if not wrote_header:
wrote_header = True
outputfile.write("file,"+line)
for line in f:
outputfile.write(name + "," + line)
outputfile.write("\n")
# concat_csvs('googletrend')
# concat_csvs('weather')
```
Feature Space:
* train: Training set provided by competition
* store: List of stores
* store_states: mapping of store to the German state they are in
* List of German state names
* googletrend: trend of certain google keywords over time, found by users to correlate well w/ given data
* weather: weather
* test: testing set
```
table_names = ['train', 'store', 'store_states', 'state_names',
'googletrend', 'weather', 'test']
```
We'll be using the popular data manipulation framework `pandas`. Among other things, pandas allows you to manipulate tables/data frames in python as one would in a database.
We're going to go ahead and load all of our csv's as dataframes into the list `tables`.
```
tables = [pd.read_csv(f'{PATH}{fname}.csv', low_memory=False) for fname in table_names]
from IPython.display import HTML
```
We can use `head()` to get a quick look at the contents of each table:
* train: Contains store information on a daily basis, tracks things like sales, customers, whether that day was a holdiay, etc.
* store: general info about the store including competition, etc.
* store_states: maps store to state it is in
* state_names: Maps state abbreviations to names
* googletrend: trend data for particular week/state
* weather: weather conditions for each state
* test: Same as training table, w/o sales and customers
```
for t in tables: display(t.head())
```
This is very representative of a typical industry dataset.
The following returns summarized aggregate information to each table accross each field.
```
for t in tables: display(DataFrameSummary(t).summary())
```
## Data Cleaning / Feature Engineering
As a structured data problem, we necessarily have to go through all the cleaning and feature engineering, even though we're using a neural network.
```
train, store, store_states, state_names, googletrend, weather, test = tables
len(train),len(test)
```
We turn state Holidays to booleans, to make them more convenient for modeling. We can do calculations on pandas fields using notation very similar (often identical) to numpy.
```
train.StateHoliday = train.StateHoliday!='0'
test.StateHoliday = test.StateHoliday!='0'
```
`join_df` is a function for joining tables on specific fields. By default, we'll be doing a left outer join of `right` on the `left` argument using the given fields for each table.
Pandas does joins using the `merge` method. The `suffixes` argument describes the naming convention for duplicate fields. We've elected to leave the duplicate field names on the left untouched, and append a "\_y" to those on the right.
```
def join_df(left, right, left_on, right_on=None, suffix='_y'):
if right_on is None: right_on = left_on
return left.merge(right, how='left', left_on=left_on, right_on=right_on,
suffixes=("", suffix))
```
Join weather/state names.
```
weather = join_df(weather, state_names, "file", "StateName")
```
In pandas you can add new columns to a dataframe by simply defining it. We'll do this for googletrends by extracting dates and state names from the given data and adding those columns.
We're also going to replace all instances of state name 'NI' to match the usage in the rest of the data: 'HB,NI'. This is a good opportunity to highlight pandas indexing. We can use `.loc[rows, cols]` to select a list of rows and a list of columns from the dataframe. In this case, we're selecting rows w/ statename 'NI' by using a boolean list `googletrend.State=='NI'` and selecting "State".
```
googletrend['Date'] = googletrend.week.str.split(' - ', expand=True)[0]
googletrend['State'] = googletrend.file.str.split('_', expand=True)[2]
googletrend.loc[googletrend.State=='NI', "State"] = 'HB,NI'
```
The following extracts particular date fields from a complete datetime for the purpose of constructing categoricals.
You should *always* consider this feature extraction step when working with date-time. Without expanding your date-time into these additional fields, you can't capture any trend/cyclical behavior as a function of time at any of these granularities. We'll add to every table with a date field.
```
add_datepart(weather, "Date", drop=False)
add_datepart(googletrend, "Date", drop=False)
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
```
The Google trends data has a special category for the whole of the Germany - we'll pull that out so we can use it explicitly.
```
trend_de = googletrend[googletrend.file == 'Rossmann_DE']
```
Now we can outer join all of our data into a single dataframe. Recall that in outer joins everytime a value in the joining field on the left table does not have a corresponding value on the right table, the corresponding row in the new table has Null values for all right table fields. One way to check that all records are consistent and complete is to check for Null values post-join, as we do here.
*Aside*: Why note just do an inner join?
If you are assuming that all records are complete and match on the field you desire, an inner join will do the same thing as an outer join. However, in the event you are wrong or a mistake is made, an outer join followed by a null-check will catch it. (Comparing before/after # of rows for inner join is equivalent, but requires keeping track of before/after row #'s. Outer join is easier.)
```
store = join_df(store, store_states, "Store")
len(store[store.State.isnull()])
joined = join_df(train, store, "Store")
joined_test = join_df(test, store, "Store")
len(joined[joined.StoreType.isnull()]),len(joined_test[joined_test.StoreType.isnull()])
joined = join_df(joined, googletrend, ["State","Year", "Week"])
joined_test = join_df(joined_test, googletrend, ["State","Year", "Week"])
len(joined[joined.trend.isnull()]),len(joined_test[joined_test.trend.isnull()])
joined = joined.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
joined_test = joined_test.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
len(joined[joined.trend_DE.isnull()]),len(joined_test[joined_test.trend_DE.isnull()])
joined = join_df(joined, weather, ["State","Date"])
joined_test = join_df(joined_test, weather, ["State","Date"])
len(joined[joined.Mean_TemperatureC.isnull()]),len(joined_test[joined_test.Mean_TemperatureC.isnull()])
for df in (joined, joined_test):
for c in df.columns:
if c.endswith('_y'):
if c in df.columns: df.drop(c, inplace=True, axis=1)
```
Next we'll fill in missing values to avoid complications with `NA`'s. `NA` (not available) is how Pandas indicates missing values; many models have problems when missing values are present, so it's always important to think about how to deal with them. In these cases, we are picking an arbitrary *signal value* that doesn't otherwise appear in the data.
```
for df in (joined,joined_test):
df['CompetitionOpenSinceYear'] = df.CompetitionOpenSinceYear.fillna(1900).astype(np.int32)
df['CompetitionOpenSinceMonth'] = df.CompetitionOpenSinceMonth.fillna(1).astype(np.int32)
df['Promo2SinceYear'] = df.Promo2SinceYear.fillna(1900).astype(np.int32)
df['Promo2SinceWeek'] = df.Promo2SinceWeek.fillna(1).astype(np.int32)
```
Next we'll extract features "CompetitionOpenSince" and "CompetitionDaysOpen". Note the use of `apply()` in mapping a function across dataframe values.
```
for df in (joined,joined_test):
df["CompetitionOpenSince"] = pd.to_datetime(dict(year=df.CompetitionOpenSinceYear,
month=df.CompetitionOpenSinceMonth, day=15))
df["CompetitionDaysOpen"] = df.Date.subtract(df.CompetitionOpenSince).dt.days
```
We'll replace some erroneous / outlying data.
```
for df in (joined,joined_test):
df.loc[df.CompetitionDaysOpen<0, "CompetitionDaysOpen"] = 0
df.loc[df.CompetitionOpenSinceYear<1990, "CompetitionDaysOpen"] = 0
```
We add "CompetitionMonthsOpen" field, limiting the maximum to 2 years to limit number of unique categories.
```
for df in (joined,joined_test):
df["CompetitionMonthsOpen"] = df["CompetitionDaysOpen"]//30
df.loc[df.CompetitionMonthsOpen>24, "CompetitionMonthsOpen"] = 24
joined.CompetitionMonthsOpen.unique()
```
Same process for Promo dates.
```
for df in (joined,joined_test):
df["Promo2Since"] = pd.to_datetime(df.apply(lambda x: Week(
x.Promo2SinceYear, x.Promo2SinceWeek).monday(), axis=1).astype(pd.datetime))
df["Promo2Days"] = df.Date.subtract(df["Promo2Since"]).dt.days
for df in (joined,joined_test):
df.loc[df.Promo2Days<0, "Promo2Days"] = 0
df.loc[df.Promo2SinceYear<1990, "Promo2Days"] = 0
df["Promo2Weeks"] = df["Promo2Days"]//7
df.loc[df.Promo2Weeks<0, "Promo2Weeks"] = 0
df.loc[df.Promo2Weeks>25, "Promo2Weeks"] = 25
df.Promo2Weeks.unique()
joined.to_feather(f'{PATH}joined')
joined_test.to_feather(f'{PATH}joined_test')
```
## Durations
It is common when working with time series data to extract data that explains relationships across rows as opposed to columns, e.g.:
* Running averages
* Time until next event
* Time since last event
This is often difficult to do with most table manipulation frameworks, since they are designed to work with relationships across columns. As such, we've created a class to handle this type of data.
We'll define a function `get_elapsed` for cumulative counting across a sorted dataframe. Given a particular field `fld` to monitor, this function will start tracking time since the last occurrence of that field. When the field is seen again, the counter is set to zero.
Upon initialization, this will result in datetime na's until the field is encountered. This is reset every time a new store is seen. We'll see how to use this shortly.
```
def get_elapsed(fld, pre):
day1 = np.timedelta64(1, 'D')
last_date = np.datetime64()
last_store = 0
res = []
for s,v,d in zip(df.Store.values,df[fld].values, df.Date.values):
if s != last_store:
last_date = np.datetime64()
last_store = s
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1))
df[pre+fld] = res
```
We'll be applying this to a subset of columns:
```
columns = ["Date", "Store", "Promo", "StateHoliday", "SchoolHoliday"]
df = train[columns]
df = test[columns]
```
Let's walk through an example.
Say we're looking at School Holiday. We'll first sort by Store, then Date, and then call `add_elapsed('SchoolHoliday', 'After')`:
This will apply to each row with School Holiday:
* A applied to every row of the dataframe in order of store and date
* Will add to the dataframe the days since seeing a School Holiday
* If we sort in the other direction, this will count the days until another holiday.
```
fld = 'SchoolHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
```
We'll do this for two more fields.
```
fld = 'StateHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
fld = 'Promo'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
```
We're going to set the active index to Date.
```
df = df.set_index("Date")
```
Then set null values from elapsed field calculations to 0.
```
columns = ['SchoolHoliday', 'StateHoliday', 'Promo']
for o in ['Before', 'After']:
for p in columns:
a = o+p
df[a] = df[a].fillna(0).astype(int)
```
Next we'll demonstrate window functions in pandas to calculate rolling quantities.
Here we're sorting by date (`sort_index()`) and counting the number of events of interest (`sum()`) defined in `columns` in the following week (`rolling()`), grouped by Store (`groupby()`). We do the same in the opposite direction.
```
bwd = df[['Store']+columns].sort_index().groupby("Store").rolling(7, min_periods=1).sum()
fwd = df[['Store']+columns].sort_index(ascending=False
).groupby("Store").rolling(7, min_periods=1).sum()
```
Next we want to drop the Store indices grouped together in the window function.
Often in pandas, there is an option to do this in place. This is time and memory efficient when working with large datasets.
```
bwd.drop('Store',1,inplace=True)
bwd.reset_index(inplace=True)
fwd.drop('Store',1,inplace=True)
fwd.reset_index(inplace=True)
df.reset_index(inplace=True)
```
Now we'll merge these values onto the df.
```
df = df.merge(bwd, 'left', ['Date', 'Store'], suffixes=['', '_bw'])
df = df.merge(fwd, 'left', ['Date', 'Store'], suffixes=['', '_fw'])
df.drop(columns,1,inplace=True)
df.head()
```
It's usually a good idea to back up large tables of extracted / wrangled features before you join them onto another one, that way you can go back to it easily if you need to make changes to it.
```
df.to_feather(f'{PATH}df')
df = pd.read_feather(f'{PATH}df')
df["Date"] = pd.to_datetime(df.Date)
df.columns
joined = join_df(joined, df, ['Store', 'Date'])
joined_test = join_df(joined_test, df, ['Store', 'Date'])
```
The authors also removed all instances where the store had zero sale / was closed. We speculate that this may have cost them a higher standing in the competition. One reason this may be the case is that a little exploratory data analysis reveals that there are often periods where stores are closed, typically for refurbishment. Before and after these periods, there are naturally spikes in sales that one might expect. By ommitting this data from their training, the authors gave up the ability to leverage information about these periods to predict this otherwise volatile behavior.
```
joined = joined[joined.Sales!=0]
```
We'll back this up as well.
```
joined.reset_index(inplace=True)
joined_test.reset_index(inplace=True)
joined.to_feather(f'{PATH}joined')
joined_test.to_feather(f'{PATH}joined_test')
```
We now have our final set of engineered features.
While these steps were explicitly outlined in the paper, these are all fairly typical feature engineering steps for dealing with time series data and are practical in any similar setting.
## Create features
```
joined = pd.read_feather(f'{PATH}joined')
joined_test = pd.read_feather(f'{PATH}joined_test')
joined.head().T.head(40)
```
Now that we've engineered all our features, we need to convert to input compatible with a neural network.
This includes converting categorical variables into contiguous integers or one-hot encodings, normalizing continuous features to standard normal, etc...
```
cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen',
'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',
'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw',
'SchoolHoliday_fw', 'SchoolHoliday_bw']
contin_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',
'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h',
'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',
'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']
n = len(joined); n
dep = 'Sales'
joined = joined[cat_vars+contin_vars+[dep, 'Date']].copy()
joined_test[dep] = 0
joined_test = joined_test[cat_vars+contin_vars+[dep, 'Date', 'Id']].copy()
for v in cat_vars: joined[v] = joined[v].astype('category').cat.as_ordered()
apply_cats(joined_test, joined)
for v in contin_vars:
joined[v] = joined[v].fillna(0).astype('float32')
joined_test[v] = joined_test[v].fillna(0).astype('float32')
```
We're going to run on a sample.
```
idxs = get_cv_idxs(n, val_pct=150000/n)
joined_samp = joined.iloc[idxs].set_index("Date")
samp_size = len(joined_samp); samp_size
```
To run on the full dataset, use this instead:
```
samp_size = n
joined_samp = joined.set_index("Date")
```
We can now process our data...
```
joined_samp.head(2)
df, y, nas, mapper = proc_df(joined_samp, 'Sales', do_scale=True)
yl = np.log(y)
joined_test = joined_test.set_index("Date")
df_test, _, nas, mapper = proc_df(joined_test, 'Sales', do_scale=True, skip_flds=['Id'],
mapper=mapper, na_dict=nas)
df.head(2)
```
In time series data, cross-validation is not random. Instead, our holdout data is generally the most recent data, as it would be in real application. This issue is discussed in detail in [this post](http://www.fast.ai/2017/11/13/validation-sets/) on our web site.
One approach is to take the last 25% of rows (sorted by date) as our validation set.
```
train_ratio = 0.75
# train_ratio = 0.9
train_size = int(samp_size * train_ratio); train_size
val_idx = list(range(train_size, len(df)))
```
An even better option for picking a validation set is using the exact same length of time period as the test set uses - this is implemented here:
```
val_idx = np.flatnonzero(
(df.index<=datetime.datetime(2014,9,17)) & (df.index>=datetime.datetime(2014,8,1)))
val_idx=[0]
```
## DL
We're ready to put together our models.
Root-mean-squared percent error is the metric Kaggle used for this competition.
```
def inv_y(a): return np.exp(a)
def exp_rmspe(y_pred, targ):
targ = inv_y(targ)
pct_var = (targ - inv_y(y_pred))/targ
return math.sqrt((pct_var**2).mean())
max_log_y = np.max(yl)
y_range = (0, max_log_y*1.2)
```
We can create a ModelData object directly from out data frame.
```
md = ColumnarModelData.from_data_frame(PATH, val_idx, df, yl.astype(np.float32), cat_flds=cat_vars, bs=128,
test_df=df_test)
```
Some categorical variables have a lot more levels than others. Store, in particular, has over a thousand!
```
cat_sz = [(c, len(joined_samp[c].cat.categories)+1) for c in cat_vars]
cat_sz
```
We use the *cardinality* of each variable (that is, its number of unique values) to decide how large to make its *embeddings*. Each level will be associated with a vector with length defined as below.
```
emb_szs = [(c, min(50, (c+1)//2)) for _,c in cat_sz]
emb_szs
m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),
0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)
lr = 1e-3
m.lr_find()
m.sched.plot(100)
```
### Sample
```
m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),
0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)
lr = 1e-3
m.fit(lr, 3, metrics=[exp_rmspe])
m.fit(lr, 5, metrics=[exp_rmspe], cycle_len=1)
m.fit(lr, 2, metrics=[exp_rmspe], cycle_len=4)
```
### All
```
m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),
0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)
lr = 1e-3
m.fit(lr, 1, metrics=[exp_rmspe])
m.fit(lr, 3, metrics=[exp_rmspe])
m.fit(lr, 3, metrics=[exp_rmspe], cycle_len=1)
```
### Test
```
m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),
0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)
lr = 1e-3
m.fit(lr, 3, metrics=[exp_rmspe])
m.fit(lr, 3, metrics=[exp_rmspe], cycle_len=1)
m.save('val0')
m.load('val0')
x,y=m.predict_with_targs()
exp_rmspe(x,y)
pred_test=m.predict(True)
pred_test = np.exp(pred_test)
joined_test['Sales']=pred_test
csv_fn=f'{PATH}tmp/sub.csv'
joined_test[['Id','Sales']].to_csv(csv_fn, index=False)
FileLink(csv_fn)
```
## RF
```
from sklearn.ensemble import RandomForestRegressor
((val,trn), (y_val,y_trn)) = split_by_idx(val_idx, df.values, yl)
m = RandomForestRegressor(n_estimators=40, max_features=0.99, min_samples_leaf=2,
n_jobs=-1, oob_score=True)
m.fit(trn, y_trn);
preds = m.predict(val)
m.score(trn, y_trn), m.score(val, y_val), m.oob_score_, exp_rmspe(preds, y_val)
```
|
github_jupyter
|
# Classifying Images with a NN and DNN Model
## Introduction
In this notebook, you learn how to build a neural network to classify the tf-flowers dataset using a Deep Neural Network Model.
## Learning Objectives
* Define Helper Functions.
* Train and evaluate a Neural Network (NN) model.
* Train and evaluate a Deep Neural Network model.
Each learning objective will correspond to a __#TODO__ in the [student lab notebook](../labs/classifying_images_with_a_nn_and_dnn_model.ipynb) -- try to complete that notebook first before reviewing this solution notebook.
```
# Import and print the installed version of TensorFlow
import tensorflow as tf
print(tf.version.VERSION)
```
## Defining Helper Functions
#### Reading and Preprocessing image data
```
# Helper functions
def training_plot(metrics, history):
f, ax = plt.subplots(1, len(metrics), figsize=(5*len(metrics), 5))
for idx, metric in enumerate(metrics):
ax[idx].plot(history.history[metric], ls='dashed')
ax[idx].set_xlabel("Epochs")
ax[idx].set_ylabel(metric)
ax[idx].plot(history.history['val_' + metric]);
ax[idx].legend([metric, 'val_' + metric])
# Call model.predict() on a few images in the evaluation dataset
def plot_predictions(filename):
f, ax = plt.subplots(3, 5, figsize=(25,15))
dataset = (tf.data.TextLineDataset(filename).
map(decode_csv))
for idx, (img, label) in enumerate(dataset.take(15)):
ax[idx//5, idx%5].imshow((img.numpy()));
batch_image = tf.reshape(img, [1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
batch_pred = model.predict(batch_image)
pred = batch_pred[0]
label = CLASS_NAMES[label.numpy()]
pred_label_index = tf.math.argmax(pred).numpy()
pred_label = CLASS_NAMES[pred_label_index]
prob = pred[pred_label_index]
ax[idx//5, idx%5].set_title('{}: {} ({:.4f})'.format(label, pred_label, prob))
def show_trained_weights(model):
# CLASS_NAMES is ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
LAYER = 1 # Layer 0 flattens the image, layer=1 is the first dense layer
WEIGHT_TYPE = 0 # 0 for weight, 1 for bias
f, ax = plt.subplots(1, 5, figsize=(15,15))
for flower in range(len(CLASS_NAMES)):
weights = model.layers[LAYER].get_weights()[WEIGHT_TYPE][:, flower]
min_wt = tf.math.reduce_min(weights).numpy()
max_wt = tf.math.reduce_max(weights).numpy()
flower_name = CLASS_NAMES[flower]
print("Scaling weights for {} in {} to {}".format(
flower_name, min_wt, max_wt))
weights = (weights - min_wt)/(max_wt - min_wt)
ax[flower].imshow(weights.reshape(IMG_HEIGHT, IMG_WIDTH, 3));
ax[flower].set_title(flower_name);
# The import statement combines two operations; it searches for the named module, then it binds the results of that search
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
def read_and_decode(filename, reshape_dims):
# TODO 1: Read the file
img = tf.io.read_file(filename)
# Convert the compressed string to a 3D uint8 tensor.
img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# Resize the image to the desired size.
return tf.image.resize(img, reshape_dims)
CLASS_NAMES = [item.numpy().decode("utf-8") for item in
tf.strings.regex_replace(
tf.io.gfile.glob("gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/*"),
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/", "")]
CLASS_NAMES = [item for item in CLASS_NAMES if item.find(".") == -1]
print("These are the available classes:", CLASS_NAMES)
# the label is the index into CLASS_NAMES array
def decode_csv(csv_row):
record_defaults = ["path", "flower"]
filename, label_string = tf.io.decode_csv(csv_row, record_defaults)
img = read_and_decode(filename, [IMG_HEIGHT, IMG_WIDTH])
label = tf.argmax(tf.math.equal(CLASS_NAMES, label_string))
return img, label
```
## Train and evaluate a Neural Network (NN) model
One way to get a more complex method is to interpose one or more Dense layers in between the input and output. The model now has three layers. A layer with trainable weights such as the one recently added, that is neither the input nor the output, is called a hidden layer.
In Keras, you introduce the activation function with tf.keras.activations.
The Rectified Linear Unit (ReLU) is the most commonly used activation function for hidden layers – other commonly used activation functions include sigmoid, tanh, and elu.
```
import tensorflow as tf
import numpy as np
import matplotlib.pylab as plt
fig, ax = plt.subplots(1, 3, figsize=(10,5))
x = np.arange(-10.0, 10.0, 0.1)
y = tf.keras.activations.sigmoid(x)
ax[0].plot(x, y);
ax[0].set_title("sigmoid")
y = tf.keras.activations.relu(x)
ax[1].plot(x, y);
ax[1].set_title("relu")
y = tf.keras.activations.elu(x)
ax[2].plot(x, y);
ax[2].set_title("elu");
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax')
])
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, show_layer_names=False)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax')
])
model.summary()
BATCH_SIZE = 32
train_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/train_set.csv").
map(decode_csv)).batch(BATCH_SIZE)
eval_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/eval_set.csv").
map(decode_csv)).batch(BATCH_SIZE)
# NN with one hidden layer
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)),
tf.keras.layers.Dense(128, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
# TODO 2: Train a Neural Network model
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10)
training_plot(['loss', 'accuracy'], history)
```
## Training the neural network
Training the neural network is similar to training the linear model. Compile the model passing in the optimizer, the loss, and the metrics. Then, call model.fit() passing in the datasets.
```
# parameterize to the values in the previous cell
def train_and_evaluate(batch_size = 32,
lrate = 0.001, # default in Adam constructor
l1 = 0,
l2 = 0,
num_hidden = 128):
regularizer = tf.keras.regularizers.l1_l2(l1, l2)
train_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/train_set.csv").
map(decode_csv)).batch(batch_size)
eval_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/eval_set.csv").
map(decode_csv)).batch(32) # this doesn't matter
# NN with one hidden layers
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)),
tf.keras.layers.Dense(num_hidden,
kernel_regularizer=regularizer,
activation=tf.keras.activations.relu),
tf.keras.layers.Dense(len(CLASS_NAMES),
kernel_regularizer=regularizer,
activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lrate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10)
training_plot(['loss', 'accuracy'], history)
return model
```
First, train your model by using 128 hidden layers.
```
model = train_and_evaluate(batch_size=32, lrate=0.0001, l1=0, l2=0, num_hidden=128)
```
You would normally expect that adding layers to a model will improve the ability of the model to fit the training data, and thus lower the loss. Notice that it is not always the case though.
```
model = train_and_evaluate(batch_size=32, lrate=0.0001, l1=0, l2=0, num_hidden=256)
```
## Train and evaluate a Deep Neural Network model
Now train a DNN. You need to parameterize the number of layers, and the number of nodes in each layer.
```
# parameterize to the values in the previous cell
def train_and_evaluate(batch_size = 32,
lrate = 0.0001,
l1 = 0,
l2 = 0.001,
num_hidden = [64, 16]):
regularizer = tf.keras.regularizers.l1_l2(l1, l2)
train_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/train_set.csv").
map(decode_csv)).batch(batch_size)
eval_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/eval_set.csv").
map(decode_csv)).batch(32) # this doesn't matter
# NN with multiple hidden layers
layers = [tf.keras.layers.Flatten(
input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),
name='input_pixels')]
layers = layers + [
tf.keras.layers.Dense(nodes,
kernel_regularizer=regularizer,
activation=tf.keras.activations.relu,
name='hidden_dense_{}'.format(hno))
for hno, nodes in enumerate(num_hidden)
]
layers = layers + [
tf.keras.layers.Dense(len(CLASS_NAMES),
kernel_regularizer=regularizer,
activation='softmax',
name='flower_prob')
]
model = tf.keras.Sequential(layers, name='flower_classification')
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lrate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False),
metrics=['accuracy'])
print(model.summary())
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10)
training_plot(['loss', 'accuracy'], history)
return model
# TODO 3: Train and evaluate a DNN model
model = train_and_evaluate(lrate=0.0001, l2=0.001, num_hidden = [64, 16])
```
Congrats! You've completed the lab!
|
github_jupyter
|
```
import sys
sys.path.append('../Scripts')
from capstone_functions import *
```
## Gradient Descent exploration
### This notebook has many example of running gradient descent with different hyper parameters
### Epoch Choice
This calls our main pipeline function that loads raw data performs all adjustments and creates centroids via GD
The point of the next two is to see the difference extending the epochs makes to the final scores.
The Score on test set is most important method to validate performance
```
ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0.005,
holdout_strategy='random', holdout_test_size=0.2,
test_period_date_start='2018-01-01', test_period_date_end='2019-12-31',
tw_cluster_strategy='saturday_2', placement_method='gradient_descent', verbose=1,
lr=3e-2, n_epochs=400)
ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0.005,
holdout_strategy='random', holdout_test_size=0.2,
test_period_date_start='2018-01-01', test_period_date_end='2019-12-31',
tw_cluster_strategy='saturday_2', placement_method='gradient_descent', verbose=1,
lr=3e-2, n_epochs=800)
```
### This appears to show that increasing epochs improves final score.
### Learning Rate
## What if instead we change the learning rate to smaller steps
```
ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0.005,
holdout_strategy='random', holdout_test_size=0.2,
test_period_date_start='2018-01-01', test_period_date_end='2019-12-31',
tw_cluster_strategy='saturday_2', placement_method='gradient_descent', verbose=1,
lr=3e-3, n_epochs=400)
```
### Compared to the first run, the score has not improved with smaller steps. Perhaps there are local minimal.
## Performance of GD based on holdout size
The first two example used 0.2 holdout but our model will perform better with more data so it will be good to see how much holdout data is required
```
ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0.005,
holdout_strategy='random', holdout_test_size=0.05,
test_period_date_start='2018-01-01', test_period_date_end='2019-12-31',
tw_cluster_strategy='saturday_2', placement_method='gradient_descent', verbose=1,
lr=3e-2, n_epochs=800)
```
### Changing holdout size makes a big difference to test score (but this is not comparable across runs for different sizes)
### Train score is then our indicateor if model is improving but we will not be able to understand overfitting. Submitting to zindi will be one solution to this. In this case train score also got worse. But it is not necessarily comparable across holdout sizes
### We can look deeper into how the model is handling each tw cluster and changing over time with additonal output.
```
ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0.005,
holdout_strategy='random', holdout_test_size=0.1,
test_period_date_start='2018-01-01', test_period_date_end='2019-12-31',
tw_cluster_strategy='saturday_2', placement_method='gradient_descent', verbose=1,
lr=3e-2, n_epochs=800)
```
### From the charts it appears that most improvement in train loss and validation loss occurs in the first epochs
### perhaps we can reduce epochs to save time. Mini batch size optimization is also something to consider. Learning rate could also be further explored
## Finally What does changing the tw_cluster_strategy do. We can run again with a different set.
### spoiler: it got worse. even though on other placement methods (k-means) off_peak_split outperformmed 'saturday_2'
This should be further investigated to find best strategy
```
ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0.005,
holdout_strategy='random', holdout_test_size=0.2,
test_period_date_start='2018-01-01', test_period_date_end='2019-12-31',
tw_cluster_strategy='off_peak_split', placement_method='k_means', verbose=10)
```
### Rerunning model with small holdout set to have best out put for zindi.
```
#best
ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0.005,
holdout_strategy='random', holdout_test_size=0.005,
test_period_date_start='2018-01-01', test_period_date_end='2019-12-31',
tw_cluster_strategy='holiday_simple', placement_method='gradient_descent', verbose=0,
lr=3e-3, n_epochs=400)
```
|
github_jupyter
|
# Naive Bayes from scratch
```
import pandas as pd
import numpy as np
def get_accuracy(x: pd.DataFrame, y: pd.Series, y_hat: pd.Series):
correct = y_hat == y
acc = np.sum(correct) / len(y)
cond = y == 1
y1 = len(y[cond])
y0 = len(y[~cond])
print(f'Class 0: tested {y0}, correctly classified {correct[~cond].sum()}')
print(f'Class 1: tested {y1}, correctly classified {correct[cond].sum()}')
print(f'Overall: tested {len(y)}, correctly classified {correct.sum()}')
print(f'Accuracy = {acc:.2f}')
class Classifier:
def __init__(self, dataset: str = None, mle: bool=True):
if dataset:
x_train, y_train = reader(f'datasets/{dataset}-train.txt')
x_test, y_test = reader(f'datasets/{dataset}-test.txt')
self.train(x_train, y_train, mle)
print('Training accuracy')
print('=' * 10)
self.accuracy(x_train, y_train)
print('Test accuracy')
print('=' * 10)
self.accuracy(x_test, y_test)
def accuracy(self, x: pd.DataFrame, y: pd.DataFrame) -> None:
y_hat = self.predict(x)
get_accuracy(x, y, y_hat)
class NB(Classifier):
def __init__(self, dataset: str = None, mle: bool=True):
self.prior = None
self.p_xi_given_y = {0: {}, 1: {}}
self.prior_x = {}
self.cols = None
super().__init__(dataset, mle)
def train(self, x: pd.DataFrame, y: pd.Series, mle: bool=True):
adj_den = 0 if mle else 2
adj_num = 0 if mle else 1
self.prior = y.value_counts().to_dict()
for c in [0, 1]:
self.prior[c] += adj_num
self.prior[c] /= (len(y) + adj_den)
self.cols = x.columns
for col in x.columns:
self.prior_x[col] = (x[col].value_counts() / len(y)).to_dict()
cond = y == 1
y1 = np.sum(cond)
y0 = len(y) - y1
y1 += adj_den
y0 += adj_den
x_pos = x[cond]
x_neg = x[~cond]
for cls in [0, 1]:
for col in x.columns:
x_cls = x_pos if cls == 1 else x_neg
y_cls = y1 if cls == 1 else y0
x1 = len(x_cls.query(f'{col} == 1'))
x0 = len(x_cls.query(f'{col} == 0'))
x1 += adj_num
x0 += adj_num
self.p_xi_given_y[cls][col] = {
0: x0 / y_cls,
1: x1 / y_cls
}
def predict(self, x: pd.DataFrame) -> pd.Series:
out = []
for _, row in x.iterrows():
m = {}
for cls in [0, 1]:
m[cls] = np.log([self.prior[0]] + [
self.p_xi_given_y[cls][col][row[col]]
for col in x.columns
]).sum()
out.append(1 if m[1] >= m[0] else 0)
return pd.Series(out)
def _get_ind(self, col):
num = self.prior_x[col][0] * self.p_xi_given_y[1][col][1]
den = self.prior_x[col][1] * self.p_xi_given_y[1][col][0]
return num / den
def most_indicative(self):
return pd.Series({
col: self._get_ind(col)
for col in self.cols
}).sort_values(ascending=False)
x = pd.DataFrame({'x1': [0, 0, 1, 1], 'x2': [0, 1, 0, 1]})
y = pd.Series([0, 0, 1, 1])
x
nb = NB()
nb.train(x, y)
nb.accuracy(x, y)
```
|
github_jupyter
|
# Create simple CNN network
Import all important libraries
```
import tensorflow as tf
from tensorflow import keras
import matplotlib as plt
import pandas as pd
# Import stuff fof preprocessing
from tensorflow.keras.preprocessing.image import ImageDataGenerator
```
Now load the IDmap
```
IDmap = {} # id: hand gesture\n",
# open the csv file
f = open("./IDmapping.csv", mode='r')
fileContents = f.read() #reading the content\n",
fileContents = fileContents.split('\\n') # splitting the content at the line break \\n and saving it into a list\n",
for i in range(len(fileContents)-1):
fileContents[i] = fileContents[i].split(',') # splitting each list entry at the ,\n",
IDmap[fileContents[i][0]] = fileContents[i][1] #create dictionary with map ID"
print(fileContents[:][0])
colnames=['file', 'label']
# Read data from file
data = pd.read_csv('./Hand_Annotations_2.csv',dtype=str,names=colnames, header=None)
# Preview the first 5 lines of the loaded data
data.head()
```
Now create ImageDataGenerator
```
datagen=ImageDataGenerator(rescale=1./255)
```
# flow_from_dataframe
now we can use load the data from a CSV or Json file.
Parameters:
- **dataframe** Pandas DataFrame which contains the filenames and classes or numeric data to be treated as target values.
- **directory** Path to the folder which contains all the images,None if x_col contains absolute paths pointing to each image instead of just filenames.
- **x_col** The column in the dataframe that has the filenames of the images
- **y_col** The column/columns in the dataframe in the filename that will be treated as raw target values if class_mode=”raw” (useful for regression tasks) or they will be treated as name of the classes if class_mode is “binary”/”categorical” or they will be ignored if class_mode is “input”/None.
- **class_mode** In addition to all the class_modes previously available in flow_from_directory, there is “raw”.
- **drop_duplicates** Boolean, whether to drop duplicate rows based on filename,True by default.
So you can put either all the images in one folder and point to the folder with the `directory` parameter or you have them somewhere scattered and point to them with a full path (with extension like *.jpeg) in the CSV and parameter `directroy=None`
```
train_generator=datagen.flow_from_dataframe(dataframe=data,
directory=None,
x_col=colnames[0],
y_col=colnames[1],
class_indices=IDmap,
class_mode="categorical", target_size=(224,224), batch_size=32)
```
|
github_jupyter
|
## Import dependencies
```
import numpy as np
import sys
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import seaborn as sn
import scipy as sp
from tqdm import tqdm
import glob
from fair import *
from fair.scripts.data_retrieval import *
%matplotlib inline
```
definition used to round output tables to given sig figs.
```
def round_to_sf(x,sf):
if x==0:
return 0
if np.isnan(x):
return '-'
else:
num= round(x, sf - int(np.floor(np.log10(abs(x)))))
if abs(num)>10**sf:
return str(int(num))
else:
return str(num)
```
# I. Default parameter simulated concentrations
Here we run historical emissions to test how the default parameter set simulates the historical evolution of concentrations.
```
## first we view & create a latex table for the default parameter set:
default_params = get_gas_parameter_defaults()
params_table = default_params.default.T.sort_index().rename(dict(a1='$a_1$',a2='$a_2$',a3='$a_3$',a4='$a_4$',
tau1='$\tau_1$',tau2='$\tau_2$',tau3='$\tau_3$',tau4='$\tau_4$',
r0='$r_0$',rC='$r_u$',rT='$r_T$',rA='$r_a$',PI_conc='PI\_conc',
f1='$f_1$',f2='$f_2$',f3='$f_3$'),axis=1)
params_table.index.name='agent'
params_table.columns.name='parameter'
params_table.index = [x.replace('_','\_') for x in params_table.index]
params_table.applymap(lambda x:round_to_sf(x,2)).replace(np.nan,'')#.to_latex('../../docs/manuscript/tables/TabS2',escape=False,bold_rows=True)
```
### data retrieval
#### concentrations
WMGHG concentrations are from the CMIP6 concentration dataset, [Meinshausen et al., 2017](https://www.geosci-model-dev.net/10/2057/2017/). For some species, these are extended using data from NOAA.
Reference:
Meinshausen, M., Vogel, E., Nauels, A., Lorbacher, K., Meinshausen, N., Etheridge, D. M., … Weiss, R. (2017). Historical greenhouse gas concentrations for climate modelling (CMIP6). Geoscientific Model Development, 10(5), 2057–2116. https://doi.org/10.5194/gmd-10-2057-2017
```
import ftplib
## import concentrations from official CMIP6 timeseries:
CMIP6_conc_ftp = ftplib.FTP('data.iac.ethz.ch','anonymous')
CMIP6_conc_ftp.cwd('CMIP6/input4MIPs/UoM/GHGConc/CMIP/yr/atmos/UoM-CMIP-1-1-0/GHGConc/gr3-GMNHSH/v20160701')
CMIP6_ftp_list = [x for x in CMIP6_conc_ftp.nlst() if x[-3:]=='csv']
WMGHG_concs = pd.DataFrame(dict(zip(['_'.join(x.split('_')[3:-8]) for x in CMIP6_ftp_list],[pd.read_csv('ftp://data.iac.ethz.ch/CMIP6/input4MIPs/UoM/GHGConc/CMIP/yr/atmos/UoM-CMIP-1-1-0/GHGConc/gr3-GMNHSH/v20160701/'+x,usecols=[0,1],index_col=0).iloc[:,0] for x in CMIP6_ftp_list])))
WMGHG_concs = WMGHG_concs[[x for x in WMGHG_concs.columns if x[-2:]!='eq']] # remove "equivalent" concentrations
WMGHG_concs['halon1202'] = 0
WMGHG_concs.loc[1765:2014,'halon1202'] = pd.read_csv('http://www.pik-potsdam.de/~mmalte/rcps/data/RCP45_MIDYEAR_CONCENTRATIONS.DAT',skiprows=38,delim_whitespace=True,index_col=0)['HALON1202'].loc[1765:2014].values
## we extend CO2, CH4 & N2O out to 2019 using the NOAA ESRL data
NOAA_molefrac = pd.read_csv('https://www.esrl.noaa.gov/gmd/aggi/NOAA_MoleFractions_2020.csv',skiprows=2,index_col=0,skipfooter=5).iloc[1:].replace('nd',np.nan).apply(pd.to_numeric).rename(dict(CO2='carbon_dioxide',CH4='methane',N2O='nitrous_oxide'),axis=1)
WMGHG_concs = WMGHG_concs.reindex(np.arange(2020))
for species in ['carbon_dioxide','methane','nitrous_oxide']:
# scale the NOAA data to join seamlessly (scale factors are almost exactly 1)
scale_factor = WMGHG_concs.loc[2010:2014,species].mean() / NOAA_molefrac.loc[2010:2015,species].mean()
WMGHG_concs.loc[2015:2019,species] = NOAA_molefrac.loc[2015:2020,species].values * scale_factor
WMGHG_concs.drop(np.arange(1750),inplace=True)
# rescale all GHGs to be in ppb (bar CO2)
WMGHG_concs[WMGHG_concs.columns.drop(['carbon_dioxide','methane','nitrous_oxide'])] *= 1/1000
```
#### emissions & forcing
Emissions & external forcing are taken from the RCMIP protocol.
Reference:
Nicholls, Z. R. J., Meinshausen, M., Lewis, J., Gieseke, R., Dommenget, D., Dorheim, K., … Xie, Z. (2020). Reduced complexity model intercomparison project phase 1: Protocol, results and initial observations. Geoscientific Model Development Discussions, 1–33. https://doi.org/10.5194/gmd-2019-375
```
## emissions
def get_SSP_emms(ssp):
emms = RCMIP_to_FaIR_input_emms(ssp).interpolate().loc[1750:2100]
rebase_species = ['so2','nox','co','nmvoc','bc','nh3','oc','nox_avi','methyl_bromide','methyl_chloride','chcl3','ch2cl2']
emms.loc[:,rebase_species] -= emms.loc[1750,rebase_species]
return emms
choose_ssps=['ssp119','ssp126','ssp245','ssp370','ssp585']
SSP_emms = pd.concat([get_SSP_emms(x) for x in choose_ssps],axis=1,keys=choose_ssps)
## forcing
SSP_forc = pd.concat([get_RCMIP_forc(x) for x in choose_ssps],axis=1,keys=choose_ssps).loc[:2100]
```
## run the model!
```
default_SSP_run = run_FaIR(emissions_in=SSP_emms,forcing_in=SSP_forc)
```
## plot the results
```
## get MAGICC7.1.0 data to benchmark
MAGICC_defaults = pd.read_csv('../../aux/input-data/RCMIP/data_results_phase-1_magicc7_rcmip_phase-1_magicc7.1.0.beta_v1-0-0.csv').drop(['Model','Unit','Climatemodel','Region'],axis=1).set_index(['Scenario','Variable']).reindex(['esm-'+x+'-allGHG' for x in choose_ssps],level=0)
RCMIP_outputmap = pd.read_csv('../../aux/FaIRv2.0.0-alpha_RCMIP_inputmap.csv',index_col=0)
MAGICC_defaults = MAGICC_defaults.rename(RCMIP_outputmap.reset_index().set_index('RCMIP_concs_key')['index'].to_dict(),level=1).reindex(RCMIP_outputmap.index,level=1).T
MAGICC_defaults.index = MAGICC_defaults.index.astype(int)
MAGICC_defaults.rename(dict(zip(['esm-'+x+'-allGHG' for x in choose_ssps],choose_ssps)),axis=1,level=0,inplace=True)
## get FaIRv1.5 data to benchmark
FaIR_defaults = pd.concat([pd.read_csv('../../aux/input-data/RCMIP/rcmip-master-data-results-phase-1-fair/data/results/phase-1/fair/rcmip_phase-1_fair-1.5-default-'+x+'_v1-0-1.csv') for x in ['esm-'+x+'-allGHG' for x in choose_ssps]]).drop(['Model','Unit','Climatemodel','Region'],axis=1).set_index(['Scenario','Variable'])
FaIR_defaults = FaIR_defaults.rename(RCMIP_outputmap.reset_index().set_index('RCMIP_concs_key')['index'].to_dict(),level=1).reindex(RCMIP_outputmap.index,level=1).T
FaIR_defaults.index = [int(x[:4]) for x in FaIR_defaults.index]
FaIR_defaults.rename(dict(zip(['esm-'+x+'-allGHG' for x in choose_ssps],choose_ssps)),axis=1,level=0,inplace=True)
## set plot rcParams
matplotlib.rcParams['font.family']='Helvetica'
matplotlib.rcParams['font.size']=11
matplotlib.rcParams['axes.formatter.limits']=-3,3
matplotlib.rcParams['legend.frameon']=False
plt.rcParams['pdf.fonttype'] = 42
## & plot!
colors= {'ssp245':'#7570b3','ssp370':'#d95f02','ssp585':'#e7298a','ssp119':'#66a61e','ssp126':'#1b9e77','history':'grey'}
map_conc_names = dict(zip(WMGHG_concs.columns,['C$_2$F$_6$','C$_3$F$_8$','C$_4$F$_{10}$','C$_5$F$_{12}$','C$_6$F$_{14}$','C$_7$F$_{16}$','C$_8$F$_{18}$','cC$_4$F$_{8}$','CO$_2$','CCl$_4$','CF$_4$','CFC113','CFC114','CFC115','CFC11','CFC12','CH$_2$Cl$_2$','CH$_3$CCl$_3$','CHCl$_3$','Halon1211','Halon1301','Halon2402','HCFC141b', 'HCFC142b', 'HCFC22', 'HFC125','HFC134a', 'HFC143a', 'HFC152a', 'HFC227ea', 'HFC236fa', 'HFC23','HFC245fa', 'HFC32', 'HFC365mfc', 'HFC4310mee','CH$_4$','CH$_3$Br','CH$_3$Cl','NF$_3$','N$_2$O','SF$_6$','SO$_2$F$_2$','Halon1202']))
fig,ax = plt.subplots(8,6,figsize=(15,15))
with plt.rc_context({"lines.linewidth": 0.75,"lines.markersize":4,"lines.markerfacecolor":'none',"lines.markeredgewidth":0.5}):
for i,gas in enumerate(WMGHG_concs.columns):
ax.flatten()[i].plot(WMGHG_concs.loc[1850:,gas].iloc[::10],'o',color='k')
for ssp in choose_ssps:
ax.flatten()[i].plot(default_SSP_run['C'].loc[2014:2100,(ssp,'default',gas)],color=colors[ssp],label=ssp)
ax.flatten()[i].plot(MAGICC_defaults.loc[2014:2100,(ssp,gas)]*RCMIP_outputmap.loc[gas,'RCMIP_concs_scaling'],color=colors[ssp],ls=':')
try: # need exceptions for FaIR as not all gases were included as this point.
ax.flatten()[i].plot(FaIR_defaults.loc[2014:2100,(ssp,gas)]*RCMIP_outputmap.loc[gas,'RCMIP_concs_scaling'],color=colors[ssp],ls='-.')
except:
pass
ax.flatten()[i].plot(default_SSP_run['C'].loc[1850:2014,('ssp245','default',gas)],color=colors['history'],label='historical')
ax.flatten()[i].plot(MAGICC_defaults.loc[1850:2014,(ssp,gas)]*RCMIP_outputmap.loc[gas,'RCMIP_concs_scaling'],color=colors['history'],ls=':')
try:
ax.flatten()[i].plot(FaIR_defaults.loc[1850:2014,(ssp,gas)]*RCMIP_outputmap.loc[gas,'RCMIP_concs_scaling'],color=colors['history'],ls='-.')
except:
pass
ax.flatten()[i].text(0.5,0.98,map_conc_names[gas],transform=ax.flatten()[i].transAxes,va='bottom',ha='center',fontsize=12,fontweight='bold')
if gas in ['carbon_dioxide','methane','nitrous_oxide']:
ax1 = inset_axes(ax.flatten()[i],width="100%",height="100%",bbox_to_anchor=(0.05,0.43,0.5,0.6),bbox_transform=ax.flatten()[i].transAxes)
ax1.plot(default_SSP_run['C'].loc[1850:2014,('ssp245','default',gas)],color=colors['history'])
ax1.plot(MAGICC_defaults.loc[1850:2014,(ssp,gas)]*RCMIP_outputmap.loc[gas,'RCMIP_concs_scaling'],color=colors['history'],ls=':')
ax1.plot(FaIR_defaults.loc[1850:2014,(ssp,gas)]*RCMIP_outputmap.loc[gas,'RCMIP_concs_scaling'],color=colors['history'],ls='-.')
ax1.plot(WMGHG_concs.loc[1850:,gas].iloc[::10],'o',color='k')
ax1.set_xticklabels([])
ax1.tick_params(left=False,labelleft=False,right=True,labelright=True)
ax1.ticklabel_format(axis='y',style="plain")
ax1.set_xlim(1850,2014)
[a.tick_params(labelbottom=False) for a in ax.flatten()]
[a.tick_params(labelbottom=True) for a in ax.flatten()[-11:]]
[a.ticklabel_format(style="plain") for a in ax.flatten()[-11:]]
[a.set_xlabel('year') for a in ax.flatten()[-11:]]
[a.set_xlim(1850,2100) for a in ax.flatten()]
[a.spines[pos].set_visible(False) for pos in ['right','top'] for a in ax.flatten()]
ax.flatten()[-6].plot([],[],'k',label='FaIRv2.0.0')
ax.flatten()[-6].plot([],[],'k:',label='MAGICC7.1.0-beta')
ax.flatten()[-6].plot([],[],'k-.',label='FaIRv1.5')
# fig.subplots_adjust(hspace=0.1)
plt.tight_layout(h_pad=0,w_pad=0)
ax.flatten()[-6].legend(loc=(1.05,0),labelspacing=0.1,prop={'size':9})
[a.set_visible(False) for a in ax.flatten()[-5:]]
[fig.savefig('../../docs/manuscript/figures/Fig2.'+x,dpi=600,bbox_inches='tight') for x in ['png','pdf']]
''
```
# I. Default parameter metrics
Here we compute GWP values for each gas in the FaIRv2.0.0-alpha namelist; under a scenario of concentrations fixed at their present day (2014) levels. These include the impact due to all forcing (direct through radiative effects + indirect through any atmospheric chemistry).
```
historical_concrun = WMGHG_concs.dropna().copy()
## add in aerosol emissions
aer_species = ['so2','nox','co','nmvoc','bc','nh3','oc','nox_avi']
historical_concrun = pd.concat([historical_concrun,get_SSP_emms('ssp245').loc[:2014,aer_species]],axis=1)
historical_concrun = pd.concat([historical_concrun],axis=1,keys=['historical'])
historical_forc = pd.concat([get_RCMIP_forc('ssp245').loc[historical_concrun.index]],axis=1,keys=['historical'])
## extend both series into the future, but fixed @ 2014 levels
historical_concrun = historical_concrun.reindex(np.arange(1750,2516)).interpolate(limit=501,limit_direction='forward')
historical_forc = historical_forc.reindex(np.arange(1750,2516)).interpolate(limit=501,limit_direction='forward')
## concentration-driven run over history
hist_run = run_FaIR(concentrations_in=historical_concrun, forcing_in=historical_forc, aer_concs_in=aer_species)
## obtain corresponding emissions & reset aerosol emissions
hist_emms = hist_run['Emissions'].droplevel(axis=1,level=1)
hist_emms.loc[:2014,('historical',aer_species)] = get_SSP_emms('ssp245').loc[:2014,aer_species].values
hist_emms.loc[2015:,('historical',aer_species)] = hist_emms.loc[2014,('historical',aer_species)].values
## run emissions to check consistency
hist_run_emms = run_FaIR(emissions_in=hist_emms, forcing_in=historical_forc)
## run over each gas species, perturbing each by 1t in 2015
gas_mass_conversion_factors = pd.Series(index=hist_emms.columns.levels[1],dtype=float)
gas_mass_conversion_factors.loc[:] = 1
gas_mass_conversion_factors.loc['carbon_dioxide'] = (1/1000)/(44.01/12.01)
gas_mass_conversion_factors.loc['nitrous_oxide'] = 28/44
rf_results = []
for gas_species in hist_emms.columns.levels[1]:
pert_emms = hist_emms.copy()
pert_emms.loc[2015,('historical',gas_species)] += gas_mass_conversion_factors.loc[gas_species]/1e6
pert_result = run_FaIR(emissions_in=pert_emms, forcing_in=historical_forc, show_run_info=False)
rf_results += [(pert_result['RF'].loc[:,('historical','default','Total')]-hist_run_emms['RF'].loc[:,('historical','default','Total')]).rename(gas_species)]
rf_results = pd.concat(rf_results,axis=1)
AGWP = rf_results.cumsum().loc[2015+np.array([5,10,20,50,100,500])]
AGWP.index = np.array([5,10,20,50,100,500])
GWP = AGWP.apply(lambda x: x/AGWP.carbon_dioxide)
print('GWP value over various timescales:')
GWP.index.name = 'timescale / years'
GWP.columns.name = 'agent'
GWP.T.applymap(lambda x:round_to_sf(x,2))#.to_latex('../../docs/manuscript/tables/TabS3',escape=True,bold_rows=True)
```
# Supplement I. Methane lifetime over history + RCP8.5 extension
A demonstration of the state-dependent lifetime of methane over RCP history + extended to 2100 with RCP8.5. We use RCP8.5 since this is (at least, appears to be) the most commonly discussed scenario in methane sensitivity literature.
```
RCP85_emms = RCMIP_to_FaIR_input_emms('rcp85').dropna(how='all').dropna(axis=1,how='all')
RCP85_emms = pd.concat([RCP85_emms],axis=1,keys=['RCP8.5'])
rebase_species = ['so2','nox','co','nmvoc','bc','nh3','oc','nox_avi','methyl_bromide','methyl_chloride','chcl3','ch2cl2']
rebase_species = list(set(rebase_species).intersection(RCP85_emms.columns.levels[1]))
RCP85_emms.loc[:,('RCP8.5',rebase_species)] -= RCP85_emms.loc[1765,('RCP8.5',rebase_species)]
RCP85_forc = pd.concat([get_RCMIP_forc('rcp85',['Radiative Forcing|Anthropogenic|Albedo Change','Radiative Forcing|Natural']).dropna()],axis=1,keys=['RCP8.5'])
RCP85_run = run_FaIR(emissions_in=RCP85_emms,
forcing_in=RCP85_forc,
gas_parameters=get_gas_parameter_defaults().reindex(RCP85_emms.columns.levels[1],axis=1,level=1))
CH4_lifetime = RCP85_run['alpha'].xs('methane',axis=1,level=2).droplevel(axis=1,level=1)*RCP85_run['gas_parameters'].loc['tau1',('default','methane')]
sn.lineplot(data=CH4_lifetime.loc[1850:2100],palette=['k'])
sn.despine()
plt.xlabel('year')
plt.ylabel('CH$_4$ lifetime / yrs')
plt.gca().ticklabel_format(style='plain')
plt.xlim(1850,2100)
[plt.savefig('../../docs/manuscript/figures/FigS2.'+x,dpi=600,bbox_inches='tight') for x in ['png','pdf']]
''
# comparison with Holmes et al., 2013
## 2010 values:
print('Holmes 2010:',1/(1/120+1/150+1/200+1/11.2))
print('FaIRv2.0.0-alpha 2010:',CH4_lifetime.loc[2010].values[0],end='\n\n')
print('Holmes 2010-2100 change:',(1/120+1/150+1/200+1/11.2)/(1/120+1/150+1/200+1/(11.2*1.129)))
print('FaIRv2.0.0-alpha 2010-2100 change:',(CH4_lifetime.loc[2100]/CH4_lifetime.loc[2010]).values[0])
```
# Supplement II. FaIRv2.0.0 additivity
Very brief test of how linear FaIR actually is. Non-linearity in FaIR only arises from the CO2 & CH4 cycles. The climate response of FaIR is linear in forcing. Here we test the linearity over history by carrying out several CO2 / CH4 pulse response experiments.
```
# default_SSP_run = run_FaIR(emissions_in=SSP_emms,forcing_in=SSP_forc)
base_emms = RCMIP_to_FaIR_input_emms('ssp245').interpolate().loc[1750:2500]
rebase_species = ['so2','nox','co','nmvoc','bc','nh3','oc','nox_avi','methyl_bromide','methyl_chloride','chcl3','ch2cl2']
base_emms.loc[:,rebase_species] -= base_emms.loc[1750,rebase_species]
base_emms = pd.concat([base_emms],axis=1,keys=['ssp245'])
experiments = []
# scale methane by 28 (GWP100) for closer comparison
pulse_scaling = dict(carbon_dioxide=12/44,methane=1000/28)
for species in ['carbon_dioxide','methane']:
for pulse_size in [0]+list(np.arange(0.01,0.1,0.01))+list(np.arange(0.1,1,0.1))+list(np.arange(1,10,1))+list(np.arange(10,100,10))+list(np.arange(100,1001,100)):
experiment = base_emms.copy()
experiment.loc[2019,('ssp245',species)] += pulse_size*pulse_scaling[species]
experiments += [experiment.rename(dict(ssp245=species+'_'+str(pulse_size)),axis=1,level=0)]
experiments = pd.concat(experiments,axis=1)
pulse_runs = run_FaIR(emissions_in=experiments,
forcing_in=pd.concat([get_RCMIP_forc('ssp245')]*experiments.columns.levels[0].size,axis=1,keys=experiments.columns.levels[0]))
```
### nonlinearities in terms of scaled anomalies
```
## compute the pulse experiment anomalies relative to the baseline
pulse_temp_anomalies = (pulse_runs['T'] - pulse_runs['T'].carbon_dioxide_0.values)
pulse_temp_anomalies.columns = pd.MultiIndex.from_tuples([(x[:14],float(x[15:])) if x[0]=='c' else (x[:7],float(x[8:])) for x in pulse_temp_anomalies.columns.levels[0]])
pulse_temp_anomalies = pulse_temp_anomalies.drop(0,axis=1,level=1)
pulse_temp_anomalies_scaled = pulse_temp_anomalies.apply(lambda x: x*1000/x.name[1])
CO2_RF_anomalies = (pulse_runs['RF'].xs('carbon_dioxide',axis=1,level=2) - pulse_runs['RF'].xs('carbon_dioxide',axis=1,level=2).carbon_dioxide_0.values)
CO2_RF_anomalies.columns = pd.MultiIndex.from_tuples([(x[:14],float(x[15:])) if x[0]=='c' else (x[:7],float(x[8:])) for x in CO2_RF_anomalies.columns.levels[0]])
CO2_RF_anomalies = CO2_RF_anomalies.drop(0,axis=1,level=1)
CO2_RF_anomalies_scaled = CO2_RF_anomalies.apply(lambda x: x*1000/x.name[1])
CH4_RF_anomalies = (pulse_runs['RF'].xs('methane',axis=1,level=2) - pulse_runs['RF'].xs('methane',axis=1,level=2).carbon_dioxide_0.values)
CH4_RF_anomalies.columns = pd.MultiIndex.from_tuples([(x[:14],float(x[15:])) if x[0]=='c' else (x[:7],float(x[8:])) for x in CH4_RF_anomalies.columns.levels[0]])
CH4_RF_anomalies = CH4_RF_anomalies.drop(0,axis=1,level=1)
CH4_RF_anomalies_scaled = CH4_RF_anomalies.apply(lambda x: x*1000/x.name[1])
CO2_C_anomalies = (pulse_runs['C'].xs('carbon_dioxide',axis=1,level=2) - pulse_runs['C'].xs('carbon_dioxide',axis=1,level=2).carbon_dioxide_0.values)
CO2_C_anomalies.columns = pd.MultiIndex.from_tuples([(x[:14],float(x[15:])) if x[0]=='c' else (x[:7],float(x[8:])) for x in CO2_C_anomalies.columns.levels[0]])
CO2_C_anomalies = CO2_C_anomalies.drop(0,axis=1,level=1)
CO2_C_anomalies_scaled = CO2_C_anomalies.apply(lambda x: x*1000/x.name[1])
CH4_C_anomalies = (pulse_runs['C'].xs('methane',axis=1,level=2) - pulse_runs['C'].xs('methane',axis=1,level=2).carbon_dioxide_0.values)
CH4_C_anomalies.columns = pd.MultiIndex.from_tuples([(x[:14],float(x[15:])) if x[0]=='c' else (x[:7],float(x[8:])) for x in CH4_C_anomalies.columns.levels[0]])
CH4_C_anomalies = CH4_C_anomalies.drop(0,axis=1,level=1)
CH4_C_anomalies_scaled = CH4_C_anomalies.apply(lambda x: x*1000/x.name[1])
CO2_alph_anomalies = pulse_runs['alpha'].xs('carbon_dioxide',axis=1,level=2).sub(pulse_runs['alpha'].xs('carbon_dioxide',axis=1,level=2).carbon_dioxide_0,axis=0)
CO2_alph_anomalies.columns = pd.MultiIndex.from_tuples([(x[:14],float(x[15:])) if x[0]=='c' else (x[:7],float(x[8:])) for x in CO2_alph_anomalies.columns.levels[0]])
CO2_alph_anomalies = CO2_alph_anomalies.drop(0,axis=1,level=1)
CO2_alph_anomalies_scaled = CO2_alph_anomalies.apply(lambda x: x*1000/x.name[1])
CH4_alph_anomalies = pulse_runs['alpha'].xs('methane',axis=1,level=2).sub(pulse_runs['alpha'].xs('methane',axis=1,level=2).carbon_dioxide_0,axis=0)
CH4_alph_anomalies.columns = pd.MultiIndex.from_tuples([(x[:14],float(x[15:])) if x[0]=='c' else (x[:7],float(x[8:])) for x in CH4_alph_anomalies.columns.levels[0]])
CH4_alph_anomalies = CH4_alph_anomalies.drop(0,axis=1,level=1)
CH4_alph_anomalies_scaled = CH4_alph_anomalies.apply(lambda x: x*1000/x.name[1])
anomalies = pd.concat([pulse_temp_anomalies_scaled,
CO2_RF_anomalies_scaled,
CH4_RF_anomalies_scaled,
CO2_C_anomalies_scaled,
CH4_C_anomalies_scaled,
CO2_alph_anomalies_scaled,
CH4_alph_anomalies_scaled],
axis=1,
keys=['T',r'RF$_{\mathrm{CO}_2}$',r'RF$_{\mathrm{CH}_4}$',r'C$_{\mathrm{CO}_2}$',r'C$_{\mathrm{CH}_4}$',r'$\alpha_{\mathrm{CO}_2}$',r'$\alpha_{\mathrm{CH}_4}$'],
names=['variable']).rename(dict(carbon_dioxide='CO$_2$',methane='CH$_4$'),axis=1,level=1).loc[2020:].sort_index(axis=1).stack(level=[0,1,2]).reset_index().rename({'level_0':'time','level_2':'pulse_type','level_3':'pulse_size',0:'value'},axis=1)
anomalies.time -= 2019
# set relative to small pulse limit
## comment out if absolute anomalies (ie. relative to reference) desired
pulse_temp_anomalies_scaled = pulse_temp_anomalies_scaled.apply(lambda x: x-pulse_temp_anomalies_scaled.loc[:,(x.name[0],0.01)])
CO2_RF_anomalies_scaled = CO2_RF_anomalies_scaled.apply(lambda x: x-CO2_RF_anomalies_scaled.loc[:,(x.name[0],0.01)])
CH4_RF_anomalies_scaled = CH4_RF_anomalies_scaled.apply(lambda x: x-CH4_RF_anomalies_scaled.loc[:,(x.name[0],0.01)])
CO2_C_anomalies_scaled = CO2_C_anomalies_scaled.apply(lambda x: x-CO2_C_anomalies_scaled.loc[:,(x.name[0],0.01)])
CH4_C_anomalies_scaled = CH4_C_anomalies_scaled.apply(lambda x: x-CH4_C_anomalies_scaled.loc[:,(x.name[0],0.01)])
CO2_alph_anomalies_scaled = CO2_alph_anomalies_scaled.apply(lambda x: x-CO2_alph_anomalies_scaled.loc[:,(x.name[0],0.01)])
CH4_alph_anomalies_scaled = CH4_alph_anomalies_scaled.apply(lambda x: x-CH4_alph_anomalies_scaled.loc[:,(x.name[0],0.01)])
anomalies_rel = pd.concat([pulse_temp_anomalies_scaled,
CO2_RF_anomalies_scaled,
CH4_RF_anomalies_scaled,
CO2_C_anomalies_scaled,
CH4_C_anomalies_scaled,
CO2_alph_anomalies_scaled,
CH4_alph_anomalies_scaled],
axis=1,
keys=['T',r'RF$_{\mathrm{CO}_2}$',r'RF$_{\mathrm{CH}_4}$',r'C$_{\mathrm{CO}_2}$',r'C$_{\mathrm{CH}_4}$',r'$\alpha_{\mathrm{CO}_2}$',r'$\alpha_{\mathrm{CH}_4}$'],
names=['variable']).rename(dict(carbon_dioxide='CO$_2$ - relative',methane='CH$_4$ - relative'),axis=1,level=1).loc[2020:].sort_index(axis=1).stack(level=[0,1,2]).reset_index().rename({'level_0':'time','level_2':'pulse_type','level_3':'pulse_size',0:'value'},axis=1)
anomalies_rel.time -= 2019
plot_df = pd.concat([anomalies,anomalies_rel])
plot_df.head()
g=sn.FacetGrid(plot_df.query('pulse_size in [1,10,100,200,500,1000]').sort_values(['pulse_type','variable']),col='variable',row='pulse_type',hue='pulse_size',palette=[(x,x,x) for x in np.arange(0,1,1/7)[::-1]],margin_titles=True,sharey=False)
g.map(sn.lineplot,'time','value')
g.set_titles(col_template="{col_name}",row_template='pulse type = {row_name}',fontweight='bold').set(xlim=[0,480])
[a.set_ylabel('anomaly / ppb') for a in g.axes[:,2]]
[a.set_ylabel('anomaly / ppm') for a in g.axes[:,3]]
[a.set_ylabel('anomaly / W m$^{-2}$') for a in g.axes[:,4]]
[a.set_ylabel('anomaly / K') for a in g.axes[:,-1]]
[a.set_ylabel('anomaly / -') for a in g.axes[:,0]]
g.axes[0,0].legend(title='pulse size / GtCO$_2$-eq')
[plt.savefig('../../docs/manuscript/figures/FigS3.'+x,dpi=600,bbox_inches='tight') for x in ['png','pdf']]
''
```
### measuring nonlinearities in a relative sense:
Marked out to prevent from running.
## measuring extent of nonlinearity as anomalies relative to 1000 GtC-eq pulse, normalised by 1000 GtC-eq pulse anomaly
CO2_T_nonlin = pulse_temp_anomalies_scaled.loc[2020:,'carbon_dioxide'].sub(pulse_temp_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0).div(pulse_temp_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0)
CH4_T_nonlin = pulse_temp_anomalies_scaled.loc[2020:,'methane'].sub(pulse_temp_anomalies_scaled.loc[2020:,('methane',1000)],axis=0).div(pulse_temp_anomalies_scaled.loc[2020:,('methane',1000)],axis=0)
CO2_CO2_RF_nonlin = CO2_RF_anomalies_scaled.loc[2020:,'carbon_dioxide'].sub(CO2_RF_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0).div(CO2_RF_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0)
CO2_CH4_RF_nonlin = CO2_RF_anomalies_scaled.loc[2020:,'methane'].sub(CO2_RF_anomalies_scaled.loc[2020:,('methane',1000)],axis=0).div(CO2_RF_anomalies_scaled.loc[2020:,('methane',1000)],axis=0)
CH4_CO2_RF_nonlin = CH4_RF_anomalies_scaled.loc[2020:,'carbon_dioxide'].sub(CH4_RF_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0).div(CH4_RF_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0)
CH4_CH4_RF_nonlin = CH4_RF_anomalies_scaled.loc[2020:,'methane'].sub(CH4_RF_anomalies_scaled.loc[2020:,('methane',1000)],axis=0).div(CH4_RF_anomalies_scaled.loc[2020:,('methane',1000)],axis=0)
CO2_CO2_C_nonlin = CO2_C_anomalies_scaled.loc[2020:,'carbon_dioxide'].sub(CO2_C_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0).div(CO2_C_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0)
CO2_CH4_C_nonlin = CO2_C_anomalies_scaled.loc[2020:,'methane'].sub(CO2_C_anomalies_scaled.loc[2020:,('methane',1000)],axis=0).div(CO2_C_anomalies_scaled.loc[2020:,('methane',1000)],axis=0)
CH4_CO2_C_nonlin = CH4_C_anomalies_scaled.loc[2020:,'carbon_dioxide'].sub(CH4_C_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0).div(CH4_C_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0)
CH4_CH4_C_nonlin = CH4_C_anomalies_scaled.loc[2020:,'methane'].sub(CH4_C_anomalies_scaled.loc[2020:,('methane',1000)],axis=0).div(CH4_C_anomalies_scaled.loc[2020:,('methane',1000)],axis=0)
CO2_CO2_alph_nonlin = CO2_alph_anomalies_scaled.loc[2020:,'carbon_dioxide'].sub(CO2_alph_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0).div(CO2_alph_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0)
CO2_CH4_alph_nonlin = CO2_alph_anomalies_scaled.loc[2020:,'methane'].sub(CO2_alph_anomalies_scaled.loc[2020:,('methane',1000)],axis=0).div(CO2_alph_anomalies_scaled.loc[2020:,('methane',1000)],axis=0)
CH4_CO2_alph_nonlin = CH4_alph_anomalies_scaled.loc[2020:,'carbon_dioxide'].sub(CH4_alph_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0).div(CH4_alph_anomalies_scaled.loc[2020:,('carbon_dioxide',1000)],axis=0)
CH4_CH4_alph_nonlin = CH4_alph_anomalies_scaled.loc[2020:,'methane'].sub(CH4_alph_anomalies_scaled.loc[2020:,('methane',1000)],axis=0).div(CH4_alph_anomalies_scaled.loc[2020:,('methane',1000)],axis=0)
nonlinearities = pd.concat([pd.concat([CO2_T_nonlin,CH4_T_nonlin],axis=1,keys=['CO2','CH4'],names=['pulse_type']),
pd.concat([CO2_CO2_RF_nonlin,CO2_CH4_RF_nonlin],axis=1,keys=['CO2','CH4'],names=['pulse_type']),
pd.concat([CH4_CO2_RF_nonlin,CH4_CO2_RF_nonlin],axis=1,keys=['CO2','CH4'],names=['pulse_type']),
pd.concat([CO2_CO2_C_nonlin,CO2_CH4_C_nonlin],axis=1,keys=['CO2','CH4'],names=['pulse_type']),
pd.concat([CH4_CO2_C_nonlin,CH4_CH4_C_nonlin],axis=1,keys=['CO2','CH4'],names=['pulse_type']),
pd.concat([CO2_CO2_alph_nonlin,CO2_CH4_alph_nonlin],axis=1,keys=['CO2','CH4'],names=['pulse_type']),
pd.concat([CH4_CO2_alph_nonlin,CH4_CH4_alph_nonlin],axis=1,keys=['CO2','CH4'],names=['pulse_type'])],
axis=1,
keys=['T','RF$_{\text{CO}_2}$','RF$_{\text{CH}_4}$','C$_{\text{CO}_2}$','C$_{\text{CH}_4}$','$\alpha_{\text{CO}_2}$','$\alpha_{\text{CH}_4}$'],
names=['variable']).sort_index(axis=1).stack(level=[0,1,2]).reset_index().rename({'level_0':'time','level_3':'pulse_size',0:'value'},axis=1)
nonlinearities.time -= 2019
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
class SymPowerNorm(matplotlib.colors.Normalize):
def __init__(self, vmin=None, vmax=None, order=1, clip=False):
self.order = order
matplotlib.colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [abs(self.vmin) / self.vmin * abs(self.vmin)**self.order , abs(self.vmax) / self.vmax * abs(self.vmax)**self.order], [0,1]
return np.ma.masked_array(np.interp(abs(value) / value * abs(value)**self.order, x, y))
def mapplot(x,y,z,**kwargs):
data = pd.concat([x,y,z],axis=1).set_index(['time','pulse_size']).unstack().droplevel(0,axis=1)
norm=matplotlib.colors.Normalize(vmin=-0.5,vmax=0.5)#SymPowerNorm(order=1,vmin=-0.5,vmax=0.5)
plt.pcolormesh(data.index,data.columns,data.values.T,shading='auto',norm=norm,cmap='RdBu_r')
g=sn.FacetGrid(nonlinearities,col='variable',row='pulse_type',margin_titles=True,despine=False,gridspec_kws=dict(hspace=0.1,wspace=0.1))
g.map(mapplot,'time','pulse_size','value')
g.set_titles(col_template="{col_name}",row_template='pulse type = {row_name}',fontweight='bold')
g.set(yscale='log')
[a.set_ylabel('pulse size / GtC-eq') for a in g.axes[:,0]]
[a.set_xlabel('year') for a in g.axes[-1,:]]
axins = inset_axes(g.axes[-1,-1], width="5%",height="100%",loc='lower left',bbox_to_anchor=(1.2, 0.55, 1, 1),bbox_transform=g.axes[-1,-1].transAxes,borderpad=0)
plt.colorbar(cax=axins,extend='both')
|
github_jupyter
|
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
### Image Generation from Audio
```
from pathlib import Path
from IPython.display import Audio
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
from utils import read_file, transform_path
DATA = Path('data')
# these folders must be in place
NSYNTH_AUDIO = DATA/'nsynth_audio'
TRAIN_AUDIO_PATH = NSYNTH_AUDIO/'train'
VALID_AUDIO_PATH = NSYNTH_AUDIO/'valid'
# these folders will be created
NSYNTH_IMAGES = DATA/'nsynth_images'
TRAIN_IMAGE_PATH = NSYNTH_IMAGES/'train'
VALID_IMAGE_PATH = NSYNTH_IMAGES/'valid'
train_acoustic_fnames = [f.name for f in TRAIN_AUDIO_PATH.iterdir()
if 'acoustic' in f.name]
valid_acoustic_fnames = [f.name for f in VALID_AUDIO_PATH.iterdir()
if 'acoustic' in f.name]
len(train_acoustic_fnames), len(valid_acoustic_fnames)
fn = train_acoustic_fnames[8]; fn
Audio(str(TRAIN_AUDIO_PATH/fn))
x, sr = read_file(fn, TRAIN_AUDIO_PATH)
x.shape, sr, x.dtype
def log_mel_spec_tfm(fname, src_path, dst_path):
x, sample_rate = read_file(fname, src_path)
n_fft = 1024
hop_length = 256
n_mels = 40
fmin = 20
fmax = sample_rate / 2
mel_spec_power = librosa.feature.melspectrogram(x, sr=sample_rate, n_fft=n_fft,
hop_length=hop_length,
n_mels=n_mels, power=2.0,
fmin=fmin, fmax=fmax)
mel_spec_db = librosa.power_to_db(mel_spec_power, ref=np.max)
dst_fname = dst_path / (fname[:-4] + '.png')
plt.imsave(dst_fname, mel_spec_db)
log_mel_spec_tfm(fn, TRAIN_AUDIO_PATH, Path('.'))
img = plt.imread(fn[:-4] + '.png')
plt.imshow(img, origin='lower');
# TRAIN files took 10m43s
# transform_path(TRAIN_AUDIO_PATH, TRAIN_IMAGE_PATH, log_mel_spec_tfm,
# fnames=train_acoustic_fnames, delete=True)
# VALID files took 0m31s
# transform_path(VALID_AUDIO_PATH, VALID_IMAGE_PATH, log_mel_spec_tfm,
# fnames=valid_acoustic_fnames, delete=True)
```
### Run Image Classifier
```
import fastai
fastai.__version__
from fastai.vision import *
instrument_family_pattern = r'(\w+)_\w+_\d+-\d+-\d+.png$'
data = (ImageItemList.from_folder(NSYNTH_IMAGES)
.split_by_folder()
.label_from_re(instrument_family_pattern)
.databunch())
data.c, data.classes
xs, ys = data.one_batch()
xs.shape, ys.shape
xs.min(), xs.max(), xs.mean(), xs.std()
data.show_batch(3, figsize=(8,4), hide_axis=False)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
learn.fit_one_cycle(3)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix(figsize=(10, 10), dpi=60)
interp.most_confused(min_val=20)
```
|
github_jupyter
|
```
import os
import pandas as pd
from pandas_profiling import ProfileReport
from pandas_profiling.utils.cache import cache_file
from collections import Counter
import seaborn as sn
import random
import statistics
import statsmodels.api as sm
import numpy as np
box_file_dir = os.path.join(os.getcwd(), "..", "..", "Box")
file_path_csv = os.path.join(box_file_dir, "covid_pts_enc_level_labs_dx_2021-02-02_deid.csv")
df = pd.read_csv(file_path_csv, index_col=False)
df.head()
def latinx(row):
if row.ethnicity_display == 'Hispanic or Latino' and row.race_display == 'White':
return "Hispanic"
elif row.ethnicity_display == 'Not Hispanic or Latino' and row.race_display == 'White':
return "White"
else:
return row.race_display
df['race_display'] = df.apply(lambda row: latinx(row), axis=1)
vent_df = df[~df['vent_hours_summed'].isnull()]
len(vent_df)
Counter(vent_df['race_display'])
icu_df = df[~df['icu_hours_summed'].isnull()]
Counter(icu_df['race_display'])
working_df = icu_df[~icu_df['qSOFA_score'].isnull()]
Counter(working_df['race_display'])
data = icu_df[['age_at_admit', 'pO2_Art',
'qSOFA_score','race_display',
'vent_hours_summed', 'zip_cust_table', 'heartfailure_com_flag',
'cancer_com_flag','gender','WBC','Mean_Arterial_Pressure',
'Bili_Total','CAD_com_flag','CKD_com_flag','COPD_com_flag',
'Creatinine', 'FiO2/Percent','Glasgow_Coma_Score','diabetes_com_flag',
'hypertension_com_flag','length_of_stay','discharge_disposition_display','Platelet', 'deid_empi_encounter']]
data.head()
working_df[['race_display', 'age_at_admit']].groupby('race_display').agg(['mean', 'count'])
# only 236 patients with all tests
allo_df = data[['pO2_Art', 'Creatinine', 'FiO2/Percent',
'Glasgow_Coma_Score', 'Platelet', 'Mean_Arterial_Pressure',
'Bili_Total', 'deid_empi_encounter']].dropna()
list_of_patients = list(allo_df['deid_empi_encounter'])
adjusted_patients = data[data['deid_empi_encounter'].isin(list_of_patients)]
def calculate_sofa(row):
count = 0
# need to implement Fi02/po2
if row.Platelet >= 100 and row.Platelet <= 149:
count += 1
elif row.Platelet >= 50 and row.Platelet <= 99:
count += 2
elif row.Platelet >= 20 and row.Platelet <= 49:
count += 3
elif row.Platelet < 20:
count += 4
# Glasgow
if row.Glasgow_Coma_Score == 13 or row.Glasgow_Coma_Score == 14:
count += 1
elif row.Glasgow_Coma_Score >= 10 and row.Glasgow_Coma_Score <= 12:
count += 2
elif row.Glasgow_Coma_Score >= 6 and row.Glasgow_Coma_Score <= 9:
count += 3
elif row.Glasgow_Coma_Score < 6:
count += 4
# Bilirubin
if float(row.Bili_Total) >= 1.2 and float(row.Bili_Total) <= 1.9:
count += 1
elif float(row.Bili_Total) >= 2.0 and float(row.Bili_Total) <= 5.9:
count += 2
elif float(row.Bili_Total) >= 6.0 and float(row.Bili_Total) <= 11.9:
count += 3
elif float(row.Bili_Total) >= 12.0:
count += 4
# Need to implement Mean artieral pressure later
# Creatinine
if row.Creatinine >= 1.2 and row.Creatinine <= 1.9:
count += 1
elif row.Creatinine >= 2.0 and row.Creatinine <= 3.4:
count += 2
elif row.Creatinine >= 3.5 and row.Creatinine <= 4.9:
count += 3
elif row.Creatinine >= 5.0:
count += 4
return count
allo_df['sofa'] = allo_df.apply(lambda row: calculate_sofa(row), axis = 1)
adjusted_patients['sofa'] = allo_df.apply(lambda row: calculate_sofa(row), axis = 1)
allo_df['sofa'].describe()
adjusted_patients['sofa'].describe()
#https://www.mdcalc.com/sequential-organ-failure-assessment-sofa-score#evidence
sofa_mortality_calibration = {
0: 0,
1: 0 ,
2: 6.4,
3: 6.4,
4: 20.2,
5: 20.2,
6: 21.5,
7: 21.5,
8: 33.3,
9: 33.3 ,
10: 50.0,
11: 50.0 ,
12: 95.2,
13: 95.2 ,
14: 95.2 ,
}
# still need to check corrobate
# digging onto various studies on measuring qSOFA for different comorbidities
# Min linked a paper about influenza
# can use these values
qsofa_mortality_calibration = {
0: 0.6,
1: 5 ,
2: 10,
3: 24,
}
working_df.dtypes
def comorbidity_count(row):
count = 0
if row.COPD_com_flag == 1:
count += 1
if row.asthma_com_flag == 1:
count += 1
if row.diabetes_com_flag == 1:
count += 1
if row.hypertension_com_flag == 1:
count += 1
if row.CAD_com_flag == 1:
count += 1
if row.heartfailure_com_flag == 1:
count += 1
if row.CKD_com_flag == 1:
count += 1
if row.cancer_com_flag == 1:
count += 1
return count
working_df[['COPD_com_flag', 'asthma_com_flag', 'diabetes_com_flag',
'hypertension_com_flag', 'CAD_com_flag', 'heartfailure_com_flag',
'CKD_com_flag', 'cancer_com_flag']] = working_df[['COPD_com_flag', 'asthma_com_flag', 'diabetes_com_flag',
'hypertension_com_flag', 'CAD_com_flag', 'heartfailure_com_flag',
'CKD_com_flag', 'cancer_com_flag']].fillna(0)
working_df[['COPD_com_flag', 'asthma_com_flag', 'diabetes_com_flag',
'hypertension_com_flag', 'CAD_com_flag', 'heartfailure_com_flag',
'CKD_com_flag', 'cancer_com_flag']] = working_df[['COPD_com_flag', 'asthma_com_flag', 'diabetes_com_flag',
'hypertension_com_flag', 'CAD_com_flag', 'heartfailure_com_flag',
'CKD_com_flag', 'cancer_com_flag']].astype(int)
working_df['total_comorbidities'] = working_df.apply(lambda row: comorbidity_count(row), axis=1)
working_df['cancer_com_flag'].dtype
working_df['has_comorbidity'] = working_df.total_comorbidities.apply(lambda x: 1 if x >= 1 else 0)
working_df['has_comorbidity2'] = working_df.total_comorbidities.apply(lambda x: 1 if x >= 2 else 0)
working_df['life_years'] = working_df.age_at_admit.apply(lambda x: 100 - x)
Counter(adjusted_patients['discharge_disposition_display'])
class Allocation(object):
# Code will be adjusted for SOFA. Currently using qSOFA
# Only looking at State Level CSC for vent allocation
def __init__(self, patients, scarcity, sofa_calibration):
self.patients = patients.copy()
self.patients['death'] = [0 for _ in range(len(self.patients))]
self.patients['allocated_vent'] = ["no" for _ in range(len(self.patients))]
self.num_vents = int(len(patients) * scarcity)
self.mortality_model = sofa_calibration
def allocate(self, row):
prob = self.mortality_model[row.qSOFA_score]
death = np.random.binomial(size=1, n=1, p=prob*.01)[0]
#print(death)
if death == 1 or row.discharge_disposition_display == 'Expired':
return death, 'yes'
else:
#print('yup yup')
return death, 'yes'
def check_expiration(self, df):
temp_df = df.copy()
for i, row in df.iterrows():
row = row.copy()
if (pd.isna(row.vent_hours_summed)) or row.discharge_disposition_display == 'Expired':
temp_df.loc[i, 'death'] = 1
else:
temp_df.loc[i, 'death'] = 0
return temp_df
def __run_allocation(self, df2):
for i, row in df2.iterrows():
row = row.copy()
if self.num_vents == 0:
#print('out')
break
mortality, allocate_cond = self.allocate(row)
df2.loc[i, 'death'] = mortality
df2.loc[i, 'allocated_vent'] = allocate_cond
self.num_vents -= 1
non_allocated = df2[df2['allocated_vent']=='no']
allocated = df2[df2['allocated_vent']=='yes']
adj_df = self.check_expiration(non_allocated)
return pd.concat([allocated, adj_df])
def lottery(self):
temp_patients = self.patients.copy()
temp_patients.sample(frac=1)
out_df = self.__run_allocation(temp_patients)
return out_df
def youngest(self):
temp_patients = self.patients.copy()
temp_patients.sort_values(by=['age_at_admit'], ascending=True, inplace=True)
out_df = self.__run_allocation(temp_patients)
return out_df
# pandas function
def __age_categorization(self, row):
if row.age_at_admit < 50:
return 1
elif row.age_at_admit < 70:
return 2
elif row.age_at_admit < 85:
return 3
else:
return 4
def maryland(self):
temp_patients = self.patients.copy()
temp_patients['age_cat'] = temp_patients.apply(lambda row: self.__age_categorization(row)
, axis=1)
temp_patients.sort_values(by=['qSOFA_score', 'total_comorbidities', 'age_cat'],
ascending=[True, True, True], inplace=True)
out_df = self.__run_allocation(temp_patients)
return out_df
def new_york(self):
temp_patients = self.patients.copy()
groups = [df for _, df in temp_patients.groupby('qSOFA_score')]
random.shuffle(groups)
grouped = pd.concat(groups).reset_index(drop=True)
grouped = grouped.sort_values('qSOFA_score', ascending=True)
out_df = self.__run_allocation(grouped)
return out_df
def max_lives_saved(self):
temp_patients = self.patients.copy()
temp_patients.sort_values(by=['qSOFA_score'], ascending=True, inplace=True)
out_df = self.__run_allocation(temp_patients)
return out_df
def max_life_years(self):
temp_patients = self.patients.copy()
temp_patients.sort_values(by=['qSOFA_score', 'life_years'], ascending=[True,False], inplace=True)
out_df = self.__run_allocation(temp_patients)
return out_df
def sickest_first(self):
temp_patients = self.patients.copy()
temp_patients.sort_values(by=['qSOFA_score'], ascending=False, inplace=True)
out_df = self.__run_allocation(temp_patients)
return out_df
zip_df = pd.read_csv('zip_code_data.csv', index_col=False)
zip_df['zip_code'] = zip_df.zip_code.apply(lambda x: x.strip('ZCTA5 '))
working_df = pd.merge(working_df, zip_df, left_on='zip_cust_table', right_on='zip_code', how='inner')
```
### Baseline
```
Counter(working_df['discharge_disposition_display'])
def latinx(row):
if row.ethnicity_display == 'Hispanic or Latino' and row.race_display == 'White':
return "Hispanic"
elif row.ethnicity_display == 'Not Hispanic or Latino' and row.race_display == 'White':
return "White"
else:
return row.race_display
working_df['race_display'] = df.apply(lambda row: latinx(row), axis=1)
# later think about the mortality rate as well
# summarize what I'm going to do and send to Victoria
len(working_df)
# compute other descriptive stats for this groupby
# final analysis
working_df[['race_display', 'age_at_admit']].groupby('race_display').agg(['mean', 'std', 'count']).round(2)
Counter(working_df['qSOFA_score'])
len(working_df['zip_cust_table'].unique())
# zip code demo eda
c = Counter(working_df['zip_cust_table'])
alist = c.most_common()
sum_patient = list(filter(lambda x: x[0][2] == '7', alist))
print(len(sum_patient))
num_p = 0
for x in sum_patient:
num_p += x[1]
num_p
c = Counter(working_df['zip_cust_table'])
alist = c.most_common()
n_alist = list(filter(lambda x: x[1] > 1, alist))
print(len(n_alist))
#n_alist
sn.distplot(working_df['qSOFA_score'])
race_count = Counter(working_df['race_display'])
race_count
working_df['poverty_rate'] = working_df['poverty_rate'].astype(float)
working_df['median_income'] = working_df['median_income'].astype(float)
bins = [0, 6, 12, 18,24,30,36,40]
bin_conv = [i+1 for i in range(len(bins))]
working_df['zip_binned_by_poverty'] = np.searchsorted(bins, working_df['poverty_rate'].values)
#temp_df['zip_binned_by_poverty'] = np.searchsorted(bins, temp_df['poverty_rate'].values)
bins = [20000, 40000, 60000, 80000,100000]
bin_conv = [i+1 for i in range(len(bins))]
working_df['zip_binned_by_income'] = np.searchsorted(bins, working_df['median_income'].values)
working_df['death'] = working_df.discharge_disposition_display.apply(lambda row: 1 if row == 'Expired' else 0)
Counter(working_df['death'])
data_for_model_X = working_df[['zip_binned_by_income',
'zip_binned_by_poverty',
'race_display',
'total_comorbidities',
'has_comorbidity',
'has_comorbidity2',
'age_at_admit',
'qSOFA_score',
]]
updated_data_for_model_X = pd.get_dummies(data_for_model_X)
data_for_model_y = working_df['death']
data_for_model_X.dtypes
log_reg = sm.Logit(data_for_model_y, updated_data_for_model_X).fit()
print(log_reg.summary())
for table in log_reg.summary().tables:
print(table.as_latex_tabular())
params = log_reg.params
conf = log_reg.conf_int()
conf['Odds Ratio'] = params
conf.columns = ['5%', '95%', 'Odds Ratio']
print(np.exp(conf))
conf[['5%', '95%', 'Odds Ratio' ]] = np.exp(conf[['5%', '95%', 'Odds Ratio']])
conf
print(conf.round(2).to_latex(index=True))
from sklearn.metrics import roc_auc_score
roc_auc_score(data_for_model_y, log_reg.predict(updated_data_for_model_X))
```
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.