code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # File Handling
# %%writefile test.txt
Hi, the file is created
pwd
#Open a file test.txt
my_file = open("test.txt")
my_file.read()
my_file.read()
my_file = open("test.txt")
my_file.read()
my_file.seek(0)
my_file.read()
# %%writefile test.txt
hi
hello
how are you
my_file.readlines()
my_file.seek(0)
my_file.readlines()
# # Writing Files
my_file = open('test.txt','w+')
my_file.write("The file is overiding")
my_file.seek(0)
my_file.read()
print(my_file.read())
my_file.seek(0)
print(my_file.read())
my_file.close()
# # Appending To The File
my_file = open('test.txt','a+')
my_file.read()
my_file.seek(0)
my_file.read()
print(my_file.read())
my_file.seek(0)
print(my_file.read())
my_file.read()
my_file.write('\n This is another line')
my_file.write('\n This is third line')
my_file.read()
my_file.seek(0)
my_file.read()
# # Iteration file
# %%writefile abc.txt
abc
# %%w
# %%writefile abc.txt
# %%writefile newfile.txt
# %%writefile newfile.txt
Hi How are you
# %%writefile newfile.txt
I love FCS
my_file = open('newfile.txt')
my_file.write("I Love FCS")
my_file = open('newfile.txt','r+')
my_file.close()
my_file=open('newfile.txt','r+')
my_file.read()
# # Functions
|
FileHandling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# # Read Data
dataset = pd.read_csv('Data.csv')
dataset
# .iloc[All rows, All columns except last column]
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:,-1].values
# Feature
print(X)
# Labels
print(y)
# # Handling Missing Data
# +
from sklearn.impute import SimpleImputer
# Create Object of SimpleImputer class
imputer = SimpleImputer(missing_values = np.nan, strategy = 'mean')
imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# -
print(X)
# # Encoding the independent Variable
# +
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
# For Country Column
ct = ColumnTransformer(transformers = [('encoder', OneHotEncoder(), [0])],
remainder = 'passthrough')
X = np.array(ct.fit_transform(X))
# -
print(X)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
print(y)
# # Split Train and Test data
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)
# -
print(X_train)
print(X_test)
print(y_train)
print(y_test)
# # Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, 3:] = sc.fit_transform(X_train[:, 3:])
X_test[:, 3:] = sc.transform(X_test[:, 3:])
print(X_train)
print(X_test)
|
1. Data Preprocessing/1_data_preprocessing_tools.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch
from torch.autograd import Variable
import random
import torch.nn.functional as F
from torch.autograd import Variable as V
# +
# #These classes and everything within this cell is copied directly from:
# #https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# class ModifiableModule(nn.Module):
# def params(self):
# return [p for _, p in self.named_params()]
# def named_leaves(self):
# return []
# def named_submodules(self):
# return []
# def named_params(self):
# subparams = []
# for name, mod in self.named_submodules():
# for subname, param in mod.named_params():
# subparams.append((name + '.' + subname, param))
# return self.named_leaves() + subparams
# def set_param(self, name, param):
# if '.' in name:
# n = name.split('.')
# module_name = n[0]
# rest = '.'.join(n[1:])
# for name, mod in self.named_submodules():
# if module_name == name:
# mod.set_param(rest, param)
# break
# else:
# setattr(self, name, param)
# def copy(self, other):
# for name, param in other.named_params():
# self.set_param(name, param)
# class GradLinear(ModifiableModule):
# def __init__(self, *args, **kwargs):
# super().__init__()
# ignore = nn.Linear(*args, **kwargs)
# self.weights = V(ignore.weight.data, requires_grad=True)
# self.bias = V(ignore.bias.data, requires_grad=True)
# def forward(self, x):
# return F.linear(x, self.weights, self.bias)
# def named_leaves(self):
# return [('weights', self.weights), ('bias', self.bias)]
# class SineModel(ModifiableModule):
# def __init__(self):
# super().__init__()
# self.hidden1 = GradLinear(1, 40)
# self.hidden2 = GradLinear(40, 40)
# self.out = GradLinear(40, 1)
# def forward(self, x):
# x = F.relu(self.hidden1(x))
# x = F.relu(self.hidden2(x))
# return self.out(x)
# def named_submodules(self):
# return [('hidden1', self.hidden1), ('hidden2', self.hidden2), ('out', self.out)]
# Define network
class Neural_Network(nn.Module):
def __init__(self, input_size=1, hidden_size=40, output_size=1):
super(Neural_Network, self).__init__()
# network layers
self.hidden1 = nn.Linear(input_size,hidden_size)
self.hidden2 = nn.Linear(hidden_size,hidden_size)
self.output_layer = nn.Linear(hidden_size,output_size)
#Activation functions
self.relu = nn.ReLU()
def forward(self, x):
x = self.hidden1(x)
x = self.relu(x)
x = self.hidden2(x)
x = self.relu(x)
x = self.output_layer(x)
y = x
return y
# -
class SineWaveTask:
def __init__(self):
self.a = np.random.uniform(0.1, 5.0)
self.b = np.random.uniform(0, 2*np.pi)
self.train_x = None
def f(self, x):
return self.a * np.sin(x + self.b)
def training_set(self, size=10, force_new=False):
if self.train_x is None and not force_new:
self.train_x = np.random.uniform(-5, 5, size)
x = self.train_x
elif not force_new:
x = self.train_x
else:
x = np.random.uniform(-5, 5, size)
y = self.f(x)
return torch.Tensor(x), torch.Tensor(y)
def test_set(self, size=50):
x = np.linspace(-5, 5, size)
y = self.f(x)
return torch.Tensor(x), torch.Tensor(y)
def plot(self, *args, **kwargs):
x, y = self.test_set(size=100)
return plt.plot(x.numpy(), y.numpy(), *args, **kwargs)
def plot_model(self, new_model, *args, **kwargs):
x, y_true = self.test_set(size=100)
x = Variable(x[:, None])
y_true = Variable(y_true[:, None])
y_pred = new_model(x)
plt.plot(x.data.numpy().flatten(),
y_pred.data.numpy().flatten(),
*args, **kwargs)
# +
# The Minimum Square Error is used to evaluate the difference between prediction and ground truth
criterion = nn.MSELoss()
def copy_existing_model(model):
# Function to copy an existing model
# We initialize a new model
new_model = Neural_Network()
# Copy the previous model's parameters into the new model
new_model.load_state_dict(model.state_dict())
return new_model
def get_samples_in_good_format(wave, num_samples=10, force_new=False):
#This function is used to sample data from a wave
x, y_true = wave.training_set(size=num_samples, force_new=force_new)
# We add [:,None] to get the right dimensions to pass to the model: we want K x 1 (we have scalars inputs hence the x 1)
# Note that we convert everything torch tensors
x = torch.tensor(x[:,None])
y_true = torch.tensor(y_true[:,None])
return x,y_true
def initialization_to_store_meta_losses():
# This function creates lists to store the meta losses
global store_train_loss_meta; store_train_loss_meta = []
global store_test_loss_meta; store_test_loss_meta = []
def test_set_validation(model,new_model,wave,lr_inner,k,store_test_loss_meta):
# This functions does not actually affect the main algorithm, it is just used to evaluate the new model
new_model = training(model, wave, lr_inner, k)
# Obtain the loss
loss = evaluation(new_model, wave)
# Store loss
store_test_loss_meta.append(loss)
def train_set_evaluation(new_model,wave,store_train_loss_meta):
loss = evaluation(new_model, wave)
store_train_loss_meta.append(loss)
def print_losses(epoch,store_train_loss_meta,store_test_loss_meta,printing_step=1000):
if epoch % printing_step == 0:
print(f'Epochh : {epoch}, Average Train Meta Loss : {np.mean(store_train_loss_meta)}, Average Test Meta Loss : {np.mean(store_test_loss_meta)}')
#This is based on the paper update rule, we calculate the difference between parameters and then this is used by the optimizer, rather than doing the update by hand
def reptile_parameter_update(model,new_model):
# Zip models for the loop
zip_models = zip(model.parameters(), new_model.parameters())
for parameter, new_parameter in zip_models:
if parameter.grad is None:
parameter.grad = torch.tensor(torch.zeros_like(parameter))
# Here we are adding the gradient that will later be used by the optimizer
parameter.grad.data.add_(parameter.data - new_parameter.data)
# Define commands in order needed for the metaupdate
# Note that if we change the order it doesn't behave the same
def metaoptimizer_update(metaoptimizer):
# Take step
metaoptimizer.step()
# Reset gradients
metaoptimizer.zero_grad()
def metaupdate(model,new_model,metaoptimizer):
# Combine the two previous functions into a single metaupdate function
# First we calculate the gradients
reptile_parameter_update(model,new_model)
# Use those gradients in the optimizer
metaoptimizer_update(metaoptimizer)
def evaluation(new_model, wave, item = True, num_samples=10, force_new=False):
# Get data
x, label = get_samples_in_good_format(wave,num_samples=num_samples, force_new=force_new)
# Make model prediction
prediction = new_model(x)
# Get loss
if item == True: #Depending on whether we need to return the loss value for storing or for backprop
loss = criterion(prediction,label).item()
else:
loss = criterion(prediction,label)
return loss
def training(model, wave, lr_k, k):
# Create new model which we will train on
new_model = copy_existing_model(model)
# Define new optimizer
koptimizer = torch.optim.SGD(new_model.parameters(), lr=lr_k)
# Update the model multiple times, note that k>1 (do not confuse k with K)
for i in range(k):
# Reset optimizer
koptimizer.zero_grad()
# Evaluate the model
loss = evaluation(new_model, wave, item = False)
# Backpropagate
loss.backward()
koptimizer.step()
return new_model
# -
700001/40000
# +
# #Neural network configuration stems from: https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# # maml training
# # note: uses comments and structure largely from code ocariz wrote!
# #alternative code
# '''
# Handling computation graphs and second-order backprop help and partial inspiration from:
# - https://discuss.pytorch.org/t/how-to-save-computation-graph-of-a-gradient/128286/2
# - https://discuss.pytorch.org/t/when-do-i-use-create-graph-in-autograd-grad/32853/3
# - https://lucainiaoge.github.io/download/PyTorch-create_graph-is-true_Tutorial_and_Example.pdf
# - https://www.youtube.com/watch?v=IkDw22a8BDE
# - https://discuss.pytorch.org/t/how-to-manually-update-network-parameters-while-keeping-track-of-its-computational-graph/131642/2
# - https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
# - https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
# Neural network configuration and helper class functions copied directly from
# -https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# Note, different ways to refer to the task-specific vs. meta/aggregate updates to the parameters
# Sometimes called "inner" and "outer" loop, respectively
# Here, refered to as "task_specific" and "agg"/meta" (the latter, for consistency w/ ocariz code)
# '''
# torch.manual_seed(0)
# random.seed(0)
# np.random.seed(0)
# TRAIN_SIZE = 50
# TEST_SIZE = 100
# VAL_SIZE = 100
# SINE_TRAIN = [SineWaveTask() for _ in range(TRAIN_SIZE)]
# SINE_TEST = [SineWaveTask() for _ in range(TEST_SIZE)]
# SINE_VALIDATION = [SineWaveTask() for _ in range(VAL_SIZE)]
# # from paper, originally T=25 (meta_batch_size parameter = 25)
# # https://github.com/cbfinn/maml/blob/master/main.py#L49
# T = 25 # num tasks
# N = 1 # number of inner loop steps (notation from: https://www.bayeswatch.com/2018/11/30/HTYM/)
# K = 10 # number of samples to draw from the task
# lr_task_specific = 0.01 # task specific learning rate
# lr_meta = 0.001 # meta-update learning rate
# num_epochs = 700001
# printing_step = 10000 # show log of loss every x epochs
# # Initializations
# initialization_to_store_meta_losses()
# #Instantiate the other model, from the prior class
# model = Neural_Network()
# #Use the different syntax of model.params()
# # meta_optimizer = torch.optim.Adam(model.params(), lr = lr_meta)
# meta_optimizer = torch.optim.Adam(model.parameters(), lr = lr_meta)
# #Create a list to store the meta-losses since only storing one loss at a time
# metaLosses = []
# metaTrainLosses = []
# metaValLosses = []
# mega_meta_loss = None
# for epoch in range(num_epochs):
# # store loss over all tasks to then do a large meta-level update of initial params
# # idea/help from video: https://www.youtube.com/watch?v=IkDw22a8BDE
# #Moved this inside the function so that we sample a new batch of tasks every time
# # waves = random.sample(SINE_TRAIN, T)
# # idea: try completely resampling training waves per epoch, rather than pre-fixed
# # seems to be what original authors did to resample (inspired by):
# # https://github.com/cbfinn/maml/blob/a7f45f1bcd7457fe97b227a21e89b8a82cc5fa49/data_generator.py#L162
# waves = [SineWaveTask() for _ in range(T)]
# if epoch % printing_step == 0:
# print("Starting params: ", model._modules['hidden1']._parameters["weight"][:5])
# # loop over tasks and fine-tune weights per task
# for i, T_i in enumerate(waves):
# # copy model to use the same starting weights
# #Use the different copying function capacity, our function copying doesn't work with their model formulation
# new_model = copy_existing_model(model)
# # new_model = SineModel()
# # new_model.copy(model)
# # note, b/c of manual gradient updates, need to zero out the gradients at the start
# # https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
# new_model.zero_grad()
# # use model to predict on task-specific training set
# task_specific_loss = evaluation(new_model, T_i, item = False, num_samples=K, force_new=False)
# # #Run the initial loss using .backward and create the graph so that we can go back through as neeeded
# # task_specific_loss.backward(create_graph=True, retain_graph=True)
# # #manually update the parameters
# # #This is adapted from the copy function which is from:
# # #https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# # for name, param in new_model.named_params():
# # new_model.set_param(name, param - lr_task_specific*param.grad)
# gradient_info = torch.autograd.grad(task_specific_loss, new_model.parameters(),
# create_graph=True, retain_graph=True)
# # now, need to extract gradients for each param and get a new graph
# # help from: https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
# # and: https://www.youtube.com/watch?v=IkDw22a8BDE
# model_param_data = new_model.state_dict()
# # note: order of computation is preserved and state_dict = ordered, so okay to loop
# # https://github.com/HIPS/autograd/blob/master/docs/tutorial.md
# for computation_idx, (param_name, param_obj) in enumerate(new_model.named_parameters()):
# task_specific_grad = gradient_info[computation_idx]
# model_param_data[param_name] = param_obj - lr_task_specific * task_specific_grad # manual update
# metaTrainLosses.append(task_specific_loss.item())
# # use new model to predict
# # note: we want a new sample from T_i
# # e.g., sample new draws from task, feed forward (e.g., get preds), compute loss, sum loss to meta_loss for later gradient use
# meta_loss = evaluation(new_model, T_i, item = False, num_samples=K, force_new=True)
# if mega_meta_loss is None:
# mega_meta_loss = meta_loss
# else: mega_meta_loss += meta_loss
# metaLosses.append(meta_loss.item())
# # backpropogate thru all tasks
# # use adam optimizer here!!
# mega_meta_loss.backward(retain_graph=True)
# metaoptimizer_update(meta_optimizer)
# # get a sample using the validation set
# # TODO: optimize for this single wave, o.w. just checking init w/ random wave!!
# val_wave = random.sample(SINE_VALIDATION, 1)[0]
# meta_val_loss = evaluation(new_model, val_wave, item = True, num_samples=K, force_new=False)
# metaValLosses.append(meta_val_loss)
# #Print out the average loss over the last 500 iterations
# if epoch % printing_step == 0:
# print(f"Epoch {epoch}, Current loss: {np.mean(metaLosses)}, Train loss: {np.mean(metaTrainLosses)}, Val loss: {np.mean(metaValLosses)}")
# # for name, param in model.named_params():
# # if name =="hidden1.weights":
# # print(name, param[:5])
# if epoch % printing_step == 0:
# print("Starting params: ", model._modules['hidden1']._parameters["weight"][:5])
# +
#Neural network configuration stems from: https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# maml training
# note: uses comments and structure largely from code ocariz wrote!
#alternative code
'''
Handling computation graphs and second-order backprop help and partial inspiration from:
- https://discuss.pytorch.org/t/how-to-save-computation-graph-of-a-gradient/128286/2
- https://discuss.pytorch.org/t/when-do-i-use-create-graph-in-autograd-grad/32853/3
- https://lucainiaoge.github.io/download/PyTorch-create_graph-is-true_Tutorial_and_Example.pdf
- https://www.youtube.com/watch?v=IkDw22a8BDE
- https://discuss.pytorch.org/t/how-to-manually-update-network-parameters-while-keeping-track-of-its-computational-graph/131642/2
- https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
- https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
Neural network configuration and helper class functions copied directly from
-https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
Note, different ways to refer to the task-specific vs. meta/aggregate updates to the parameters
Sometimes called "inner" and "outer" loop, respectively
Here, refered to as "task_specific" and "agg"/meta" (the latter, for consistency w/ ocariz code)
'''
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
TRAIN_SIZE = 50
TEST_SIZE = 100
VAL_SIZE = 100
SINE_TRAIN = [SineWaveTask() for _ in range(TRAIN_SIZE)]
SINE_TEST = [SineWaveTask() for _ in range(TEST_SIZE)]
SINE_VALIDATION = [SineWaveTask() for _ in range(VAL_SIZE)]
# from paper, originally T=25 (meta_batch_size parameter = 25)
# https://github.com/cbfinn/maml/blob/master/main.py#L49
T = 25 # num tasks
N = 1 # number of inner loop steps (notation from: https://www.bayeswatch.com/2018/11/30/HTYM/)
K = 10 # number of samples to draw from the task
lr_task_specific = 0.01 # task specific learning rate
lr_meta = 0.001 # meta-update learning rate
num_epochs = 300001
printing_step = 10000 # show log of loss every x epochs
# num_epochs = 10
# printing_step = 2 # show log of loss every x epochs
# Initializations
initialization_to_store_meta_losses()
#Instantiate the other model, from the prior class
model = Neural_Network()
model.zero_grad()
#Use the different syntax of model.params()
# meta_optimizer = torch.optim.Adam(model.params(), lr = lr_meta)
meta_optimizer = torch.optim.Adam(model.parameters(), lr = lr_meta)
#Create a list to store the meta-losses since only storing one loss at a time
metaLosses = []
metaTrainLosses = []
metaValLosses = []
mega_meta_loss = None
for epoch in range(num_epochs):
meta_gradients = None
# store loss over all tasks to then do a large meta-level update of initial params
# idea/help from video: https://www.youtube.com/watch?v=IkDw22a8BDE
#Moved this inside the function so that we sample a new batch of tasks every time
# waves = random.sample(SINE_TRAIN, T)
# idea: try completely resampling training waves per epoch, rather than pre-fixed
# seems to be what original authors did to resample (inspired by):
# https://github.com/cbfinn/maml/blob/a7f45f1bcd7457fe97b227a21e89b8a82cc5fa49/data_generator.py#L162
waves = [SineWaveTask() for _ in range(T)]
if epoch % printing_step == 0:
print("EPOCH Starting params: ", model._modules['hidden1']._parameters["weight"][:5])
# loop over tasks and fine-tune weights per task
for i, T_i in enumerate(waves):
# copy model to use the same starting weights
#Use the different copying function capacity, our function copying doesn't work with their model formulation
new_model = copy_existing_model(model)
# new_model = SineModel()
# new_model.copy(model)
# note, b/c of manual gradient updates, need to zero out the gradients at the start
# https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
new_model.zero_grad()
# use model to predict on task-specific training set
task_specific_loss = evaluation(new_model, T_i, item = False, num_samples=K, force_new=False)
# #Run the initial loss using .backward and create the graph so that we can go back through as neeeded
# task_specific_loss.backward(create_graph=True, retain_graph=True)
# #manually update the parameters
# #This is adapted from the copy function which is from:
# #https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# for name, param in new_model.named_params():
# new_model.set_param(name, param - lr_task_specific*param.grad)
gradient_info = torch.autograd.grad(task_specific_loss, new_model.parameters(),
create_graph=True, retain_graph=True)
# now, need to extract gradients for each param and get a new graph
# help from: https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
# and: https://www.youtube.com/watch?v=IkDw22a8BDE
model_param_data = new_model.state_dict()
# note: order of computation is preserved and state_dict = ordered, so okay to loop
# https://github.com/HIPS/autograd/blob/master/docs/tutorial.md
for computation_idx, (param_name, param_obj) in enumerate(new_model.named_parameters()):
task_specific_grad = gradient_info[computation_idx]
model_param_data[param_name] = param_obj - lr_task_specific * task_specific_grad # manual update
# load the updated task-specific params (w/ gradient info!) into the model
new_model.load_state_dict(model_param_data)
metaTrainLosses.append(task_specific_loss.item())
# use new model to predict
# note: we want a new sample from T_i
# e.g., sample new draws from task, feed forward (e.g., get preds), compute loss, sum loss to meta_loss for later gradient use
meta_loss = evaluation(new_model, T_i, item = False, num_samples=K, force_new=True)
if mega_meta_loss is None:
mega_meta_loss = meta_loss
else: mega_meta_loss += meta_loss
metaLosses.append(meta_loss.item())
# manually save gradients -- okay b/c we have a sum -- so can distribute gradient [CHECK!!!]
# idea to get gradients per task partially inspired by:
# https://gist.github.com/unixpickle/0981d4cd8efead8b40ab27de1af0733c
meta_gradient_info = torch.autograd.grad(meta_loss, new_model.parameters(),
create_graph=True, retain_graph=True)
if meta_gradients is None: meta_gradients = meta_gradient_info
else: meta_gradients += meta_gradient_info
# print("meta gradients: ", meta_gradients)
# backpropogate thru all tasks
# use adam optimizer here!!
# mega_meta_loss.backward(retain_graph=True)
# metaoptimizer_update(meta_optimizer)
# meta_gradients /= T
model_param_data = model.state_dict()
for computation_idx, (param_name, param_obj) in enumerate(model.named_parameters()):
meta_grad = meta_gradients[computation_idx]
model_param_data[param_name] = param_obj - lr_meta * meta_grad # manual update
model.load_state_dict(model_param_data)
model.zero_grad()
'''
Idea: run meta_loss.backward in inner loop, then extract gradients for the optimizer part in the outer loop to manually set
(see ocariz' reptile code)
Idea and likely code partly inspired by:
https://stackoverflow.com/questions/62459891/how-does-one-implemented-a-parametrized-meta-learner-in-pytorchs-higher-library
or idea: use earlier .backward() but with retain_graph=True
then manually update parameters using earlier models' parameters and new ones, if saved??
'''
# get a sample using the validation set
# TODO: optimize for this single wave, o.w. just checking init w/ random wave!!
val_wave = random.sample(SINE_VALIDATION, 1)[0]
meta_val_loss = evaluation(new_model, val_wave, item = True, num_samples=K, force_new=False)
metaValLosses.append(meta_val_loss)
#Print out the average loss over the last 500 iterations
if epoch % printing_step == 0:
print(f"Epoch {epoch}, Current loss: {np.mean(metaLosses)}, Train loss: {np.mean(metaTrainLosses)}, Val loss: {np.mean(metaValLosses)}")
# for name, param in model.named_params():
# if name =="hidden1.weights":
# print(name, param[:5])
if epoch % printing_step == 0:
print("Starting params: ", model._modules['hidden1']._parameters["weight"][:5])
# -
meta_gradient_info
# +
#Neural network configuration stems from: https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# maml training
# note: uses comments and structure largely from code ocariz wrote!
#alternative code
'''
Handling computation graphs and second-order backprop help and partial inspiration from:
- https://discuss.pytorch.org/t/how-to-save-computation-graph-of-a-gradient/128286/2
- https://discuss.pytorch.org/t/when-do-i-use-create-graph-in-autograd-grad/32853/3
- https://lucainiaoge.github.io/download/PyTorch-create_graph-is-true_Tutorial_and_Example.pdf
- https://www.youtube.com/watch?v=IkDw22a8BDE
- https://discuss.pytorch.org/t/how-to-manually-update-network-parameters-while-keeping-track-of-its-computational-graph/131642/2
- https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
- https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
Neural network configuration and helper class functions copied directly from
-https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
Note, different ways to refer to the task-specific vs. meta/aggregate updates to the parameters
Sometimes called "inner" and "outer" loop, respectively
Here, refered to as "task_specific" and "agg"/meta" (the latter, for consistency w/ ocariz code)
'''
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
TRAIN_SIZE = 50
TEST_SIZE = 100
VAL_SIZE = 100
SINE_TRAIN = [SineWaveTask() for _ in range(TRAIN_SIZE)]
SINE_TEST = [SineWaveTask() for _ in range(TEST_SIZE)]
SINE_VALIDATION = [SineWaveTask() for _ in range(VAL_SIZE)]
# from paper, originally T=25 (meta_batch_size parameter = 25)
# https://github.com/cbfinn/maml/blob/master/main.py#L49
# run for a fixed, unseen test wave
wave = SineWaveTask()
T = 1 # num tasks
N = 1 # number of inner loop steps (notation from: https://www.bayeswatch.com/2018/11/30/HTYM/)
K = 10 # number of samples to draw from the task
lr_task_specific = 0.01 # task specific learning rate
lr_meta = 0.001 # meta-update learning rate
num_epochs = 10 # number of shots
printing_step = 2 # show log of loss every x epochs
# num_epochs = 10
# printing_step = 2 # show log of loss every x epochs
# Initializations
initialization_to_store_meta_losses()
#Instantiate the other model, from the prior class
model = Neural_Network()
#Use the different syntax of model.params()
# meta_optimizer = torch.optim.Adam(model.params(), lr = lr_meta)
meta_optimizer = torch.optim.Adam(model.parameters(), lr = lr_meta)
#Create a list to store the meta-losses since only storing one loss at a time
metaLosses = []
metaTrainLosses = []
metaValLosses = []
mega_meta_loss = None
meta_gradients = None
for epoch in range(num_epochs):
# store loss over all tasks to then do a large meta-level update of initial params
# idea/help from video: https://www.youtube.com/watch?v=IkDw22a8BDE
# using the same wave each time
# loop over tasks and fine-tune weights per task
waves = [wave]
if epoch % printing_step == 0:
print("Starting params: ", model._modules['hidden1']._parameters["weight"][:5])
# loop over tasks and fine-tune weights per task
for i, T_i in enumerate(waves):
# copy model to use the same starting weights
#Use the different copying function capacity, our function copying doesn't work with their model formulation
new_model = copy_existing_model(model)
# note, b/c of manual gradient updates, need to zero out the gradients at the start
# https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
new_model.zero_grad()
# use model to predict on task-specific training set
task_specific_loss = evaluation(new_model, T_i, item = False, num_samples=K, force_new=False)
gradient_info = torch.autograd.grad(task_specific_loss, new_model.parameters(),
create_graph=True, retain_graph=True)
# now, need to extract gradients for each param and get a new graph
# help from: https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
# and: https://www.youtube.com/watch?v=IkDw22a8BDE
model_param_data = new_model.state_dict()
# note: order of computation is preserved and state_dict = ordered, so okay to loop
# https://github.com/HIPS/autograd/blob/master/docs/tutorial.md
for computation_idx, (param_name, param_obj) in enumerate(new_model.named_parameters()):
task_specific_grad = gradient_info[computation_idx]
model_param_data[param_name] = param_obj - lr_task_specific * task_specific_grad # manual update
# load the updated task-specific params (w/ gradient info!) into the model
new_model.load_state_dict(model_param_data)
metaTrainLosses.append(task_specific_loss.item())
# use new model to predict
# note: we want a new sample from T_i
# e.g., sample new draws from task, feed forward (e.g., get preds), compute loss, sum loss to meta_loss for later gradient use
meta_loss = evaluation(new_model, T_i, item = False, num_samples=K, force_new=True)
if mega_meta_loss is None:
mega_meta_loss = meta_loss
else: mega_meta_loss += meta_loss
metaLosses.append(meta_loss.item())
# manually save gradients -- okay b/c we have a sum -- so can distribute gradient [CHECK!!!]
# idea to get gradients per task partially inspired by:
# https://gist.github.com/unixpickle/0981d4cd8efead8b40ab27de1af0733c
meta_gradient_info = torch.autograd.grad(meta_loss, new_model.parameters(),
create_graph=True, retain_graph=True)
if meta_gradients is None: meta_gradients = meta_gradient_info
else: meta_gradients += meta_gradient_info
model_param_data = model.state_dict()
for computation_idx, (param_name, param_obj) in enumerate(model.named_parameters()):
meta_grad = meta_gradients[computation_idx]
model_param_data[param_name] = param_obj - lr_meta * meta_grad # manual update
model.load_state_dict(model_param_data)
# get a sample using the validation set
# TODO: optimize for this single wave, o.w. just checking init w/ random wave!!
val_wave = random.sample(SINE_VALIDATION, 1)[0]
meta_val_loss = evaluation(new_model, val_wave, item = True, num_samples=K, force_new=False)
metaValLosses.append(meta_val_loss)
#Print out the average loss over the last 500 iterations
if epoch % printing_step == 0:
print(f"Epoch {epoch}, Current loss: {np.mean(metaLosses)}, Train loss: {np.mean(metaTrainLosses)}, Val loss: {np.mean(metaValLosses)}")
if epoch % printing_step == 0:
print("Starting params: ", model._modules['hidden1']._parameters["weight"][:5])
# +
# #Neural network configuration stems from: https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# # maml training
# # note: uses comments and structure largely from code ocariz wrote!
# #alternative code
# '''
# Handling computation graphs and second-order backprop help and partial inspiration from:
# - https://discuss.pytorch.org/t/how-to-save-computation-graph-of-a-gradient/128286/2
# - https://discuss.pytorch.org/t/when-do-i-use-create-graph-in-autograd-grad/32853/3
# - https://lucainiaoge.github.io/download/PyTorch-create_graph-is-true_Tutorial_and_Example.pdf
# - https://www.youtube.com/watch?v=IkDw22a8BDE
# - https://discuss.pytorch.org/t/how-to-manually-update-network-parameters-while-keeping-track-of-its-computational-graph/131642/2
# - https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
# - https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
# Neural network configuration and helper class functions copied directly from
# -https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# Note, different ways to refer to the task-specific vs. meta/aggregate updates to the parameters
# Sometimes called "inner" and "outer" loop, respectively
# Here, refered to as "task_specific" and "agg"/meta" (the latter, for consistency w/ ocariz code)
# '''
# torch.manual_seed(0)
# random.seed(0)
# np.random.seed(0)
# # run for a fixed, unseen test wave
# wave = SineWaveTask()
# #Only works for 1 task
# T = 1 # num tasks
# num_samples = 10 # number of samples to draw from the task
# lr_task_specific = 0.01 # task specific learning rate
# lr_meta = 0.001 # meta-update learning rate
# num_epochs = 10 # equivalent to k-shot max k
# printing_step = 5 # show log of loss every x epochs
# # Initializations
# initialization_to_store_meta_losses()
# #Instantiate the other model, from the prior class
# model = SineModel()
# #Use the different syntax of model.params()
# meta_optimizer = torch.optim.Adam(model.params(), lr = lr_meta)
# #Create a list to store the meta-losses since only storing one loss at a time
# metaLosses = []
# metaTrainLosses = []
# metaValLosses = []
# for epoch in range(num_epochs):
# # store loss over all tasks to then do a large meta-level update of initial params
# # idea/help from video: https://www.youtube.com/watch?v=IkDw22a8BDE
# # using the same wave each time
# # loop over tasks and fine-tune weights per task
# waves = [wave]
# for i, T_i in enumerate(waves):
# # copy model to use the same starting weights
# #Use the different copying function capacity, our function copying doesn't work with their model formulation
# new_model = SineModel()
# new_model.copy(model)
# # note, b/c of manual gradient updates, need to zero out the gradients at the start
# # https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
# new_model.zero_grad()
# # use model to predict on task-specific training set
# task_specific_loss = evaluation(new_model, T_i, item = False, num_samples=num_samples, force_new=False)
# #Run the initial loss using .backward and create the graph so that we can go back through as neeeded
# # task_specific_loss.backward(create_graph=True)
# # #manually update the parameters
# # #This is adapted from the copy function which is from:
# # #https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# # for name, param in new_model.named_params():
# # new_model.set_param(name, param - lr_task_specific*param.grad)
# gradient_info = torch.autograd.grad(task_specific_loss, new_model.parameters(),
# create_graph=True, retain_graph=True)
# # now, need to extract gradients for each param and get a new graph
# # help from: https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
# # and: https://www.youtube.com/watch?v=IkDw22a8BDE
# model_param_data = new_model.state_dict()
# # note: order of computation is preserved and state_dict = ordered, so okay to loop
# # https://github.com/HIPS/autograd/blob/master/docs/tutorial.md
# for computation_idx, (param_name, param_obj) in enumerate(new_model.named_parameters()):
# task_specific_grad = gradient_info[computation_idx]
# model_param_data[param_name] = param_obj - lr_task_specific * task_specific_grad # manual update
# metaTrainLosses.append(task_specific_loss.item())
# # use new model to predict
# # note: we want a new sample from T_i
# # e.g., sample new draws from task, feed forward (e.g., get preds), compute loss, sum loss to meta_loss for later gradient use
# meta_loss = evaluation(new_model, T_i, item = False, num_samples=num_samples, force_new=True)
# metaLosses.append(meta_loss.item())
# # backpropogate thru all tasks
# # use adam optimizer here!!
# meta_loss.backward()
# metaoptimizer_update(meta_optimizer)
# # get a sample using the validation set
# val_wave = random.sample(SINE_VALIDATION, 1)[0]
# meta_val_loss = evaluation(new_model, val_wave, item = True, num_samples=num_samples, force_new=False)
# metaValLosses.append(meta_val_loss)
# #Print out the average loss over the last 500 iterations
# if epoch % printing_step == 0:
# print(f"Epoch {epoch}, Current loss: {np.mean(metaLosses)}, Train loss: {np.mean(metaTrainLosses)}, Val loss: {np.mean(metaValLosses)}")
# -
plt.plot(metaTrainLosses)
true_funcs = []
init_funcs = []
fitted_funcs = []
for func in SINE_TRAIN:
x, label = get_samples_in_good_format(func,num_samples=100, force_new=True)
# Make model prediction
prediction = model(x)
input_coords = x.detach().numpy()[:,0]
y_pred = prediction.detach().numpy()[:,0]
pred_data = sorted([(x, y) for (x, y) in zip(input_coords, y_pred)], key=lambda x: x[0])
true_vals = sorted([(x, y) for (x, y) in zip(input_coords, label)], key=lambda x: x[0])
true_funcs.append(true_vals)
init_funcs.append(pred_data)
new_model = SineModel()
new_model.copy(model)
# note, b/c of manual gradient updates, need to zero out the gradients at the start
# https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
new_model.zero_grad()
# use model to predict on task-specific training set
task_specific_loss = criterion(prediction,label)
#Run the initial loss using .backward and create the graph so that we can go back through as neeeded
task_specific_loss.backward(create_graph=True)
#manually update the parameters
#This is adapted from the copy function which is from:
#https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
for name, param in new_model.named_params():
new_model.set_param(name, param - lr_task_specific*param.grad)
fitted_preds = new_model(x)
fitted_y_pred = fitted_preds.detach().numpy()[:,0]
fitted_pred_data = sorted([(x, y) for (x, y) in zip(input_coords, fitted_y_pred)], key=lambda x: x[0])
fitted_funcs.append(fitted_pred_data)
# +
# fig, ax = plt.subplots()
# ax.plot(np.array(data)[:,0], np.array(data)[:,1], label="Preds")
# ax.plot(np.array(true_vals)[:,0], np.array(true_vals)[:,1], label="True Vals")
# ax.legend()
# -
for current_func in range(T):
fig, ax = plt.subplots()
data = fitted_funcs[current_func]
true_vals = true_funcs[current_func]
init_vals = init_funcs[current_func]
ax.plot(np.array(true_vals)[:,0], np.array(true_vals)[:,1], label="True Vals")
ax.plot(np.array(init_vals)[:,0], np.array(init_vals)[:,1], label="Initial Preds")
ax.plot(np.array(data)[:,0], np.array(data)[:,1], label="Fitted Preds")
ax.set_title(f"Task {current_func + 1}", fontsize=16)
ax.legend()
init_funcs[0][0], fitted_funcs[0][0]
# +
true_funcs = []
init_funcs = []
fitted_funcs = []
meta_test_losses = []
k_shots = 10
for func in SINE_TEST[:1]:
new_model = SineModel()
new_model.copy(model)
# note, b/c of manual gradient updates, need to zero out the gradients at the start
# https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
new_model.zero_grad()
for k in range(k_shots):
# new_model = SineModel()
# new_model.copy(model)
x, label = get_samples_in_good_format(func,num_samples=100, force_new=True)
# Make model prediction
prediction = new_model(x)
input_coords = x.detach().numpy()[:,0]
y_pred = prediction.detach().numpy()[:,0]
if k == 0:
pred_data = sorted([(x, y) for (x, y) in zip(input_coords, y_pred)], key=lambda x: x[0])
true_vals = sorted([(x, y) for (x, y) in zip(input_coords, label)], key=lambda x: x[0])
true_funcs.append(true_vals)
init_funcs.append(pred_data)
print("k: ", k)
# use model to predict on task-specific training set
criterion = nn.MSELoss()
task_specific_loss = criterion(prediction,label)
print("LOSS: ", task_specific_loss)
#Run the initial loss using .backward and create the graph so that we can go back through as neeeded
task_specific_loss.backward()#create_graph=True)
#manually update the parameters
#This is adapted from the copy function which is from:
#https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
for name, param in new_model.named_params():
# print("name: ", name, " param: ", param.grad)
new_model.set_param(name, param - lr_task_specific*param.grad)
# gradient_info = torch.autograd.grad(task_specific_loss, new_model.parameters())
# # now, need to extract gradients for each param and get a new graph
# # help from: https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
# # and: https://www.youtube.com/watch?v=IkDw22a8BDE
# model_param_data = new_model.state_dict()
# # note: order of computation is preserved and state_dict = ordered, so okay to loop
# # https://github.com/HIPS/autograd/blob/master/docs/tutorial.md
# for computation_idx, (param_name, param_obj) in enumerate(new_model.named_parameters()):
# task_specific_grad = gradient_info[computation_idx]
# model_param_data[param_name] = param_obj - lr_task_specific * task_specific_grad # manual update
meta_test_losses.append(task_specific_loss.item())
new_model.zero_grad()
fitted_preds = new_model(x)
fitted_y_pred = fitted_preds.detach().numpy()[:,0]
fitted_pred_data = sorted([(x, y) for (x, y) in zip(input_coords, fitted_y_pred)], key=lambda x: x[0])
fitted_funcs.append(fitted_pred_data)
# -
task_specific_loss.grad_fn
task_specific_loss
wave = SineWaveTask();
k_shot_updates = 4
initialization_to_store_meta_losses()
for shots in range(k_shot_updates):
new_model = training(model, wave, lr_task_specific, shots)
train_set_evaluation(new_model,wave,store_train_loss_meta)
wave.plot_model(new_model, label=f'{shots+1} gradient steps')
wave.plot(label='Original Wave')
plt.legend()
|
archive/Katie-Second-Order-3_1_debug_meta_update-debug.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Association Rules Exercise
#
# ## Q1
#
# In the first exercise, use the `supermarket.csv` file
# This dataset contains 8 shopping baskets in item list format.
#
# Import this dataset as transaction data.
#
# Think about parameters including `format`, `sep`, and `rm.duplicates`
#
# Write your code below:
library("which lib?")
supermarket <-
# ## Q2
#
# Understand the supermarket data.
#
# Which unique items are there in all shopping baskets?
#
# Write your code below:
# ## Q3
#
# How many transactions contain purchases of Butter?
#
# Write your code below:
# How many transactions contain purchase of Butter and Cheese?
# Write your code below:
# # Q4
#
# Plot the support percentage of each item, for the top 4 items.
# Write your code below:
# ## Q5: Mine association rules
#
# Find all association rules with `minsupp = 0.375` and `minconf = 0.65` and with a min number of items = 2
#
# Write your code below:
# ## Q6
#
# Inspect the found rules, in the order of decreasing lift ratio
#
# Write your code below:
# # Part 2
#
# In the second exercise, we use the `book.csv` file. This dataset contains 2000 book purchases in a binary matrix format.
#
# ## Q1
#
# Import this dataset as transaction data
#
# Write your code below:
# ## Q2
#
# Plot the frequency plot, using absolute count. Which book category sells best?
#
# Write your code below:
# ## Q3: Mine association rules
#
# Find all association rules with `minsupp = 0.1` and `minconf = 0.8`
#
# Write your code below:
# ## Q4: Understand the rules
#
# Inspect the rules, and answer the following questions:
#
# - Which rule has the highest lift? What does it tell us?
# - What can be done with this rule, if you were the bookstore manager?
#
# Write your code below:
#
# ## Q5: Plot the rules using `arulezViz`
#
# Load the package `arulesViz`, plot a graph of the rules:
# Finally, plot a scatterplot, where the shading changes depending on the confidence:
|
Lab 2 - Association Rules/Exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # With the RDS Library
# ---
# RDS provides you with a powerful tool to access and manage data. Our API can quickly and efficiently filter the data and perform computations on it on the fly. It can also retrieve important metadata on each aspect of the data for you to perform more detailed analysis. To run this notebook, packages needed for generating the charts must be installed:
# !pip install -r requirements.txt
# By utilizing a **Select** query, we are able to get the specific records of data we are interested in, along with performing standard computations, grouping, and ordering of that data at the same time. In the initial example below, we are interested in data revolving around COVID-19 deaths in Ohio, so we first create a connection to that data set through the `DataProduct` (To view other dataset, browse our catalog). With this data, we want to create a simple line plot of the number of deaths for the first 2 weeks of March. We specify the columns of data we want as the date of occurrence and as a computation that sums the deaths. Grouping by date ensures that we only have one record per date with the sum of deaths for that date. We then proceed to order the records and filter them to get the month and the amount of records we wanted.
#
# If we are interested in more extensive information around any of the columns, we could check the metadata contained within the results returned by the query.
# +
from rds import Server
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
#Connect to the Ohio data set
server = Server('https://covid19.richdataservices.com')
catalog = server.get_catalog('us_oh')
dp = catalog.get_dataproduct('oh_doh_cases')
#Query for the number of deaths over the first 2 weeks of March
results = dp.select(cols=['date_stamp', 'deaths:sum(cnt_death)'], groupby=['date_stamp'], orderby=['date_stamp'], where=['date_stamp>=2020-03-01'], limit=14)
#Plug in the data and build our line plot
df = pd.DataFrame(results.records, columns = results.columns)
sns.set(rc={'figure.figsize':(30, 10)})
sns.lineplot(data=df, x=df.columns[0], y=df.columns[1])
plt.show()
# -
# Utilizing the various options available through RDS, you can specify the data returned to be exactly what you need.
#
# In this next example, we are now making use of a **Tabulate** query with multiple dimensions so that we can see the amount of deaths for each county. If you think about a tabulation table, the **'dims'** would be the rows and columns and the **'measure'** would be the value to fill in each cell. We then proceed with filtering and ordering the data returned like before. Setting the parameter `inject=True` fills in any coded values with their labels. Variables with classifications that specify these codes can be found in the metadata.
# +
from rds import Server
from urllib.request import urlopen
import json
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
#Connect to the Ohio data set
server = Server('https://covid19.richdataservices.com')
catalog = server.get_catalog('us_oh')
dp = catalog.get_dataproduct('oh_doh_cases')
#Query for the number of deaths for every county in Ohio
results = dp.tabulate(dims=['us_county_fips'], measure=['Deaths:sum(cnt_death)'], orderby=['us_county_fips'])
df = pd.DataFrame(results.records, columns = results.columns)
#Getting the structure of the map
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
#Plug in the data and build our choropleth map
fig = px.choropleth(df, geojson=counties, locations=df.columns[0], color='Deaths', color_continuous_scale="Reds", range_color=(0, 100), scope="usa", labels={'Deaths'})
fig.update_geos(fitbounds="locations")
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}, width=1000, height=500)
fig.show()
# -
# # Without RDS
# ---
# Getting and organizing the data into a form you can work with is generally tedious and complicated. Below we demonstrate what you would need to do to get a single data set and format that data to be able to create the choropleth map you saw above.
#
# The first thing we have to do is get the data and filter it for exactly what we want. Let'ss see what our dataframe looks like after we do just that.
# +
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
#Get the data
data = pd.read_csv('https://coronavirus.ohio.gov/static/COVIDSummaryData.csv')
df = pd.DataFrame(data)
#Filter the data
df = df[['County','Death Count']]
df = df.groupby(['County']).sum().reset_index()
df = df.sort_values('County')
print(df)
# -
# Well that doesn't look right. Seems that we understandably assumed the "Death Counts" would be and integer, but we actually have to transform it into integers ourselves. Let's try that.
# +
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
#Get the data
data = pd.read_csv('https://coronavirus.ohio.gov/static/COVIDSummaryData.csv')
df = pd.DataFrame(data)
#Filter the data
df = df[['County','Death Count']]
#Transform the data
df['Death Count'] = df['Death Count'].astype(int)
df = df.groupby(['County']).sum().reset_index()
df = df.sort_values('County')
print(df)
# -
# It appears that not all values in the "Death Counts" column are in in a valid integer format! It turns out to be the "Grand Total" row, which isn't even wanted for our choropleth map so we can just drop it from the dataframe.
# +
from urllib.request import urlopen
import matplotlib.pyplot as plt
import json
import plotly.express as px
import seaborn as sns
import pandas as pd
#Get the data
data = pd.read_csv('https://coronavirus.ohio.gov/static/COVIDSummaryData.csv')
df = pd.DataFrame(data)
#Filter the data
df = df[['County','Death Count']]
#Clean the data
df = df[df['County'] != 'Grand Total']
#Transform the data
df['Death Count'] = df['Death Count'].astype(int)
df = df.groupby(['County']).sum().reset_index()
df = df.sort_values('County')
print(df)
#Getting the structure of the map
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
#Plug in the data and build our choropleth map
fig = px.choropleth(df, geojson=counties, locations=df.columns[0], color='Death Count', color_continuous_scale="Reds", range_color=(0, 100), scope="usa", labels={'Deaths'})
fig.update_geos(fitbounds="locations")
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}, width=1000, height=500)
fig.show()
# -
# Our dataframe looks good now, so why isn't our choropleth map working with it?
#
# Unfortunately, the choropleth map requires the FIPS codes for each county where we only have the names. To get this work, we will have to convert each county name into its FIPS code and try again. If we were to just use RDS, we could switch from name to FIPS by setting `inject=True` in our query. That would've been so much easier.
# +
import matplotlib.pyplot as plt
from urllib.request import urlopen
import json
import plotly.express as px
import seaborn as sns
import pandas as pd
import requests
from bs4 import BeautifulSoup
#Get the data
data = pd.read_csv('https://coronavirus.ohio.gov/static/COVIDSummaryData.csv')
df = pd.DataFrame(data)
#Filter the data
df = df[['County','Death Count']]
#Clean the data
df = df[df['County'] != 'Grand Total']
#Transform the data
df['Death Count'] = df['Death Count'].astype(int)
df = df.groupby(['County']).sum().reset_index()
df = df.sort_values('County')
#Scrape for the county FIPS codes
html = requests.get("https://en.wikipedia.org/wiki/List_of_United_States_FIPS_codes_by_county").text
soup = BeautifulSoup(html,"lxml")
table = soup.find('table', {"class":"wikitable sortable"}).find('tbody')
#Replace the county names with their FIPS code
state = ''
for df_row in df.iterrows():
county = df_row[1]['County']
for tb_row in table.find_all('tr'):
cells = tb_row.findChildren('td')
if len(cells) == 3:
state = str(cells[2].text).strip()
if len(cells) >= 2 and county in cells[1].text and state == 'Ohio':
df = df.replace(county, str(cells[0].text).strip())
break
#Getting the structure of the map
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
#Plug in the data and build our choropleth map
fig = px.choropleth(df, geojson=counties, locations=df.columns[0], color='Death Count', color_continuous_scale="Reds", range_color=(0, 100), scope="usa", labels={'Deaths'})
fig.update_geos(fitbounds="locations")
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}, width=1000, height=500)
fig.show()
# -
# Since the data from our CSV is missing the FIPS codes, we have to scrape them from wikipedia. Even after that, we still need to replace every single county name with its FIPS code in our dataframe before it'll work. The result of this is some ugly loops as well as slow code.
# As you can see, RDS handles all of the hard work of organizing, transforming, and cleaning the data, allowing you to focus on the presentation of the data instead.
# ## See our [Catalog](./covid_19_catalog.ipynb) for a list of available data sets, as well as the metadata on any variables and their classifications.
|
covid_notebook/covid_19.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_pytorch_p27)
# language: python
# name: conda_pytorch_p27
# ---
# # PyTorch Cifar10 local training
#
# ## Pre-requisites
#
# This notebook shows how to use the SageMaker Python SDK to run your code in a local container before deploying to SageMaker's managed training or hosting environments. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. Just change your estimator's `train_instance_type` to `local` (or `local_gpu` if you're using an ml.p2 or ml.p3 notebook instance).
#
# In order to use this feature you'll need to install docker-compose (and nvidia-docker if training with a GPU).
#
# **Note, you can only run a single local notebook at one time.**
# !/bin/bash ./setup.sh
# ## Overview
#
# The **SageMaker Python SDK** helps you deploy your models for training and hosting in optimized, productions ready containers in SageMaker. The SageMaker Python SDK is easy to use, modular, extensible and compatible with TensorFlow, MXNet, PyTorch and Chainer. This tutorial focuses on how to create a convolutional neural network model to train the [Cifar10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html) using **PyTorch in local mode**.
#
# ### Set up the environment
#
# This notebook was created and tested on a single ml.p2.xlarge notebook instance.
#
# Let's start by specifying:
#
# - The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.
# - The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the sagemaker.get_execution_role() with appropriate full IAM role arn string(s).
# +
import sagemaker
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker/DEMO-pytorch-cnn-cifar10'
role = sagemaker.get_execution_role()
# +
import os
import subprocess
instance_type = 'local'
if subprocess.call('nvidia-smi') == 0:
## Set type to GPU if one is present
instance_type = 'local_gpu'
print("Instance type = " + instance_type)
# -
# ### Download the Cifar10 dataset
# +
from utils_cifar import get_train_data_loader, get_test_data_loader, imshow, classes
trainloader = get_train_data_loader()
testloader = get_test_data_loader()
# -
# ### Data Preview
# +
import numpy as np
import torchvision, torch
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%9s' % classes[labels[j]] for j in range(4)))
# -
# ### Upload the data
# We use the ```sagemaker.Session.upload_data``` function to upload our datasets to an S3 location. The return value inputs identifies the location -- we will use this later when we start the training job.
inputs = sagemaker_session.upload_data(path='data', bucket=bucket, key_prefix='data/cifar10')
# # Construct a script for training
# Here is the full code for the network model:
# !pygmentize source/cifar10.py
# ## Script Functions
#
# SageMaker invokes the main function defined within your training script for training. When deploying your trained model to an endpoint, the model_fn() is called to determine how to load your trained model. The model_fn() along with a few other functions list below are called to enable predictions on SageMaker.
#
# ### [Predicting Functions](https://github.com/aws/sagemaker-pytorch-containers/blob/master/src/sagemaker_pytorch_container/serving.py)
# * model_fn(model_dir) - loads your model.
# * input_fn(serialized_input_data, content_type) - deserializes predictions to predict_fn.
# * output_fn(prediction_output, accept) - serializes predictions from predict_fn.
# * predict_fn(input_data, model) - calls a model on data deserialized in input_fn.
#
# The model_fn() is the only function that doesn't have a default implementation and is required by the user for using PyTorch on SageMaker.
# ## Create a training job using the sagemaker.PyTorch estimator
#
# The `PyTorch` class allows us to run our training function on SageMaker. We need to configure it with our training script, an IAM role, the number of training instances, and the training instance type. For local training with GPU, we could set this to "local_gpu". In this case, `instance_type` was set above based on your whether you're running a GPU instance.
#
# After we've constructed our `PyTorch` object, we fit it using the data we uploaded to S3. Even though we're in local mode, using S3 as our data source makes sense because it maintains consistency with how SageMaker's distributed, managed training ingests data.
#
# You can try the "Preview" version of PyTorch by specifying ``'1.0.0.dev'`` for ``framework_version`` when creating your PyTorch estimator.
# +
from sagemaker.pytorch import PyTorch
cifar10_estimator = PyTorch(entry_point='source/cifar10.py',
role=role,
framework_version='0.4.0',
train_instance_count=1,
train_instance_type=instance_type)
cifar10_estimator.fit(inputs)
# -
# # Deploy the trained model to prepare for predictions
#
# The deploy() method creates an endpoint (in this case locally) which serves prediction requests in real-time.
# +
from sagemaker.pytorch import PyTorchModel
cifar10_predictor = cifar10_estimator.deploy(initial_instance_count=1,
instance_type=instance_type)
# -
# # Invoking the endpoint
# +
# get some test images
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%4s' % classes[labels[j]] for j in range(4)))
outputs = cifar10_predictor.predict(images.numpy())
_, predicted = torch.max(torch.from_numpy(np.array(outputs)), 1)
print('Predicted: ', ' '.join('%4s' % classes[predicted[j]]
for j in range(4)))
# -
# # Clean-up
#
# Deleting the local endpoint when you're finished is important since you can only run one local endpoint at a time.
cifar10_estimator.delete_endpoint()
|
sagemaker-python-sdk/pytorch_cnn_cifar10/pytorch_local_mode_cifar10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv('train.csv')
df
sub = pd.read_csv('sample_submission.csv')
df.target.value_counts()
df['anatom_site_general_challenge'].value_counts()
|
Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Graph representation learning through Unsupervised GraphSAGE
# ### and using it for paper classification on Cora citation dataset
# Stellargraph Unsupervised GraphSAGE is the implementation of GraphSAGE method outlined in the paper: ***[Inductive Representation Learning on Large Graphs.](http://snap.stanford.edu/graphsage/)*** <NAME>, <NAME>, and <NAME> arXiv:1706.02216
# [cs.SI], 2017.
#
# This notebook is a short demo of how Stellargraph Unsupervised GraphSAGE can be used to learn embeddings of the nodes representing papers in the [CORA citation network](https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz). Furthermore, this notebook demonstrates the use of the learnt embeddings in a downstream node classification task (classifying papers by subject). Note that the node embeddings can also be used in other graph machine learning tasks, such as link prediction, community detection, etc.
#
# ### Unsupervised GraphSAGE:
#
# A high-level explanation of the unsupervised GraphSAGE method of graph representation learning is as follows.
#
# Objective: *Given a graph, learn embeddings of the nodes using only the graph structure and the node features, without using any known node class labels* (hence "unsupervised"; for semi-supervised learning of node embeddings, see this [demo](https://github.com/stellargraph/stellargraph/tree/master/demos/node-classification/graphsage/graphsage-cora-node-classification-example.ipynb))
#
# **Unsupervised GraphSAGE model:** In the Unsupervised GraphSAGE model, node embeddings are learnt by solving a simple classification task: given a large set of "positive" `(target, context)` node pairs generated from random walks performed on the graph (i.e., node pairs that co-occur within a certain context window in random walks), and an equally large set of "negative" node pairs that are randomly selected from the graph according to a certain distribution, learn a binary classifier that predicts whether arbitrary node pairs are likely to co-occur in a random walk performed on the graph. Through learning this simple binary node-pair-classification task, the model automatically learns an inductive mapping from attributes of nodes and their neighbors to node embeddings in a high-dimensional vector space, which preserves structural and feature similarities of the nodes. Unlike embeddings obtained by algorithms such as [`node2vec`](https://snap.stanford.edu/node2vec), this mapping is inductive: given a new node (with attributes) and its links to other nodes in the graph (which was unseen during model training), we can evaluate its embeddings without having to re-train the model.
#
# In our implementation of Unsupervised GraphSAGE, the training set of node pairs is composed of an equal number of positive and negative `(target, context)` pairs from the graph. The positive `(target, context)` pairs are the node pairs co-occuring on random walks over the graph whereas the negative node pairs are sampled randomly from a global node degree distribution of the graph.
#
# The architecture of the node pair classifier is the following. Input node pairs (with node features) are fed, together with the graph structure, into a pair of identical GraphSAGE encoders, producing a pair of node embeddings. These embeddings are then fed into a node pair classification layer, which applies a binary operator to those node embeddings (e.g., concatenating them), and passes the resulting node pair embeddings through a linear transform followed by a binary activation (e.g., sigmoid), thus predicting a binary label for the node pair.
#
# The entire model is trained end-to-end by minimizing the loss function of choice (e.g., binary cross-entropy between predicted node pair labels and true link labels) using stochastic gradient descent (SGD) updates of the model parameters, with minibatches of 'training' links generated on demand and fed into the model.
#
# Node embeddings obtained from the encoder part of the trained classifier can be used in various downstream tasks. In this demo, we show how these can be used for predicting node labels.
# +
import networkx as nx
import pandas as pd
import numpy as np
import os
import random
import stellargraph as sg
from stellargraph.data import EdgeSplitter
from stellargraph.mapper import GraphSAGELinkGenerator
from stellargraph.layer import GraphSAGE, link_classification
from stellargraph.data import UniformRandomWalk
from stellargraph.data import UnsupervisedSampler
from sklearn.model_selection import train_test_split
from tensorflow import keras
from sklearn import preprocessing, feature_extraction, model_selection
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.metrics import accuracy_score
from stellargraph import globalvar
from stellargraph import datasets
from IPython.display import display, HTML
# -
# ### Loading the CORA network data
dataset = datasets.Cora()
display(HTML(dataset.description))
dataset.download()
# Load the graph from the edgelist (in `cited-paper` <- `citing-paper` order).
edgelist = pd.read_csv(
os.path.join(dataset.data_directory, "cora.cites"),
sep="\t",
header=None,
names=["target", "source"],
)
# Load the features and subject for the nodes.
feature_names = ["w_{}".format(ii) for ii in range(1433)]
column_names = feature_names + ["subject"]
node_data = pd.read_csv(
os.path.join(dataset.data_directory, "cora.content"),
sep="\t",
header=None,
names=column_names,
)
# The node features are all attributes except the paper subject (we will use this for plotting the embeddings later). Paper subject label are not used for unsupervised training.
node_features = node_data[feature_names]
# ### Unsupervised GraphSAGE with on demand sampling
# The Unsupervised GraphSAGE requires a training sample that can be either provided as a list of `(target, context)` node pairs or it can be provided with an `UnsupervisedSampler` instance that takes care of generating positive and negative samples of node pairs on demand. In this demo we discuss the latter technique.
#
# #### UnsupervisedSampler:
# The `UnsupervisedSampler` class takes in a `Stellargraph` graph instance. The `generator` method in the `UnsupervisedSampler` is responsible for generating equal number of positive and negative node pair samples from the graph for training. The samples are generated by performing uniform random walks over the graph, using `UniformRandomWalk` object. Positive `(target, context)` node pairs are extracted from the walks, and for each
# positive pair a corresponding negative pair `(target, node)` is generated by randomly sampling `node` from the degree distribution of the graph. Once the `batch_size` number of samples is accumulated, the generator yields a list of positive and negative node pairs along with their respective 1/0 labels.
#
# In the current implementation, we use uniform random walks to explore the graph structure. The length and number of walks, as well as the root nodes for starting the walks can be user-specified. The default list for root nodes is all nodes of the graph, default `number_of_walks` is 1 (at least one walk per root node), and the default `length` of walks is 2 (need at least one node beyond the root node on the walk as a potential positive context).
# **1. Create the Stellargraph with node features.**
G = sg.StellarGraph(nodes={"paper": node_features}, edges={"cites": edgelist})
print(G.info())
# **2. Specify the other optional parameter values: root nodes, the number of walks to take per node, the length of each walk, and random seed.**
nodes = list(G.nodes())
number_of_walks = 1
length = 5
# **3. Create the UnsupervisedSampler instance with the relevant parameters passed to it.**
unsupervised_samples = UnsupervisedSampler(
G, nodes=nodes, length=length, number_of_walks=number_of_walks
)
# The graph G together with the unsupervised sampler will be used to generate samples.
# **5. Create a node pair generator:**
#
# Next, create the node pair generator for sampling and streaming the training data to the model. The node pair generator essentially "maps" pairs of nodes `(target, context)` to the input of GraphSAGE: it either takes minibatches of node pairs, or an `UnsupervisedSampler` instance which generates the minibatches of node pairs on demand. The generator samples 2-hop subgraphs with `(target, context)` head nodes extracted from those pairs, and feeds them, together with the corresponding binary labels indicating which pair represent positive or negative sample, to the input layer of the node pair classifier with GraphSAGE node encoder, for SGD updates of the model parameters.
#
# Specify:
# 1. The minibatch size (number of node pairs per minibatch).
# 2. The number of epochs for training the model.
# 3. The sizes of 1- and 2-hop neighbor samples for GraphSAGE:
#
# Note that the length of `num_samples` list defines the number of layers/iterations in the GraphSAGE encoder. In this example, we are defining a 2-layer GraphSAGE encoder.
batch_size = 50
epochs = 4
num_samples = [10, 5]
# In the following we show the working of node pair generator with the UnsupervisedSampler, which will generate samples on demand.
generator = GraphSAGELinkGenerator(G, batch_size, num_samples)
train_gen = generator.flow(unsupervised_samples)
# Build the model: a 2-layer GraphSAGE encoder acting as node representation learner, with a link classification layer on concatenated (`citing-paper`, `cited-paper`) node embeddings.
#
# GraphSAGE part of the model, with hidden layer sizes of 50 for both GraphSAGE layers, a bias term, and no dropout. (Dropout can be switched on by specifying a positive dropout rate, 0 < dropout < 1).
# Note that the length of `layer_sizes` list must be equal to the length of `num_samples`, as `len(num_samples)` defines the number of hops (layers) in the GraphSAGE encoder.
layer_sizes = [50, 50]
graphsage = GraphSAGE(
layer_sizes=layer_sizes, generator=generator, bias=True, dropout=0.0, normalize="l2"
)
# Build the model and expose input and output sockets of graphsage, for node pair inputs:
x_inp, x_out = graphsage.build()
# Final node pair classification layer that takes a pair of nodes' embeddings produced by `graphsage` encoder, applies a binary operator to them to produce the corresponding node pair embedding ('ip' for inner product; other options for the binary operator can be seen by running a cell with `?link_classification` in it), and passes it through a dense layer:
prediction = link_classification(
output_dim=1, output_act="sigmoid", edge_embedding_method="ip"
)(x_out)
# Stack the GraphSAGE encoder and prediction layer into a Keras model, and specify the loss
# +
model = keras.Model(inputs=x_inp, outputs=prediction)
model.compile(
optimizer=keras.optimizers.Adam(lr=1e-3),
loss=keras.losses.binary_crossentropy,
metrics=[keras.metrics.binary_accuracy],
)
# -
# **6. Train the model.**
history = model.fit_generator(
train_gen,
epochs=epochs,
verbose=1,
use_multiprocessing=False,
workers=4,
shuffle=True,
)
# Note that multiprocessing is switched off, since with a large training set of node pairs, multiprocessing can considerably slow down the training process with the data being transferred between various processes.
#
# Also, multiple workers can be used with `Keras version 2.2.4` and above, and it speeds up the training process considerably due to multi-threading.
# ### Extracting node embeddings
# Now that the node pair classifier is trained, we can use its node encoder part as node embeddings evaluator. Below we evaluate node embeddings as activations of the output of graphsage layer stack, and visualise them, coloring nodes by their subject label.
# +
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from stellargraph.mapper import GraphSAGENodeGenerator
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# **Building a new node-based model**
#
# The `(src, dst)` node pair classifier `model` has two identical node encoders: one for source nodes in the node pairs, the other for destination nodes in the node pairs passed to the model. We can use either of the two identical encoders to evaluate node embeddings. Below we create an embedding model by defining a new Keras model with `x_inp_src` (a list of odd elements in `x_inp`) and `x_out_src` (the 1st element in `x_out`) as input and output, respectively. Note that this model's weights are the same as those of the corresponding node encoder in the previously trained node pair classifier.
x_inp_src = x_inp[0::2]
x_out_src = x_out[0]
embedding_model = keras.Model(inputs=x_inp_src, outputs=x_out_src)
# We also need a node generator to feed graph nodes to `embedding_model`. We want to evaluate node embeddings for all nodes in the graph:
node_ids = node_data.index
node_gen = GraphSAGENodeGenerator(G, batch_size, num_samples).flow(node_ids)
# We now use `node_gen` to feed all nodes into the embedding model and extract their embeddings:
node_embeddings = embedding_model.predict_generator(node_gen, workers=4, verbose=1)
# #### Visualize the node embeddings
# Next we visualize the node embeddings in 2D using t-SNE. Colors of the nodes depict their true classes (subject in the case of Cora dataset) of the nodes.
# +
node_subject = node_data["subject"].astype("category").cat.codes
X = node_embeddings
if X.shape[1] > 2:
transform = TSNE # PCA
trans = transform(n_components=2)
emb_transformed = pd.DataFrame(trans.fit_transform(X), index=node_ids)
emb_transformed["label"] = node_subject
else:
emb_transformed = pd.DataFrame(X, index=node_ids)
emb_transformed = emb_transformed.rename(columns={"0": 0, "1": 1})
emb_transformed["label"] = node_subject
# +
alpha = 0.7
fig, ax = plt.subplots(figsize=(7, 7))
ax.scatter(
emb_transformed[0],
emb_transformed[1],
c=emb_transformed["label"].astype("category"),
cmap="jet",
alpha=alpha,
)
ax.set(aspect="equal", xlabel="$X_1$", ylabel="$X_2$")
plt.title(
"{} visualization of GraphSAGE embeddings for cora dataset".format(transform.__name__)
)
plt.show()
# -
# The observation that same-colored nodes in the embedding space are concentrated together is indicative of similarity of embeddings of papers on the same topics. We would emphasize here again that the node embeddings are learnt in unsupervised way, without using true class labels.
# ### Downstream task
#
# The node embeddings calculated using the unsupervised GraphSAGE can be used as node feature vectors in a downstream task such as node classification.
#
# In this example, we will use the node embeddings to train a simple Logistic Regression classifier to predict paper subjects in Cora dataset.
# X will hold the 50 input features (node embeddings)
X = node_embeddings
# y holds the corresponding target values
y = np.array(node_subject)
# ### Data Splitting
#
# We split the data into train and test sets.
#
# We use 5% of the data for training and the remaining 95% for testing as a hold out test set.
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.05, test_size=None, stratify=y
)
# ### Classifier Training
#
# We train a Logistic Regression classifier on the training data.
clf = LogisticRegression(verbose=0, solver="lbfgs", multi_class="auto")
clf.fit(X_train, y_train)
# Predict the hold out test set.
y_pred = clf.predict(X_test)
# Calculate the accuracy of the classifier on the test set.
accuracy_score(y_test, y_pred)
# The obtained accuracy is pretty decent, better than that obtained by using node embeddings obtained by `node2vec` that ignores node attributes, only taking into account the graph structure (see this [demo](https://github.com/stellargraph/stellargraph/tree/master/demos/embeddings/stellargraph-node2vec.ipynb)).
# **Predicted classes**
pd.Series(y_pred).value_counts()
# **True classes**
pd.Series(y).value_counts()
# ### Uses for unsupervised graph representation learning
# 1. Unsupervised GraphSAGE learns embeddings of unlabeled graph nodes. This is highly useful as most of the real-world data is typically either unlabeled, or have noisy, unreliable, or sparse labels. In such scenarios unsupervised techniques that learn low-dimensional meaningful representation of nodes in a graph by leveraging the graph structure and features of the nodes is useful.
# 2. Moreover, GraphSAGE is an inductive technique that allows us to obtain embeddings of unseen nodes, without the need to re-train the embedding model. That is, instead of training individual embeddings for each node (as in algorithms such as `node2vec` that learn a look-up table of node embeddings), GraphSAGE learns a function that generates embeddings by sampling and aggregating attributes from each node's local neighborhood, and combining those with the node's own attributes.
|
demos/embeddings/embeddings-unsupervised-graphsage-cora.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from astropy.coordinates import (CartesianRepresentation,
UnitSphericalRepresentation)
from astropy.coordinates.matrix_utilities import rotation_matrix
import numpy as np
import astropy.units as u
from scipy.integrate import quad
n_incs = 10
n_phases = 30
n_spots = 3
spot_contrast = 0.7
spot_radii = 0.2 * np.ones((n_spots, n_incs))
inc_stellar = (180*np.random.rand(n_spots,n_incs) - 90) * u.deg
spot_lons = 360*np.random.rand(n_spots,n_incs) * u.deg
spot_lats = (20*np.random.rand(n_spots,n_incs) + 70) * u.deg
phases = np.linspace(0, 2*np.pi, n_phases)
def limb_darkening(u_ld, r):
u1, u2 = u_ld
mu = np.sqrt(1 - r**2)
return (1 - u1 * (1 - mu) - u2 * (1 - mu)**2) / (1 - u1/3 - u2/6) / np.pi
def limb_darkening_normed(u_ld, r):
return limb_darkening(u_ld, r)/limb_darkening(u_ld, 0)
def total_flux(u_ld):
return 2 * np.pi * quad(lambda r: r * limb_darkening_normed(u_ld, r),
0, 1)[0]
u_ld = [0.5, 0.1]
f0 = total_flux(u_ld)
usr = UnitSphericalRepresentation(spot_lons, spot_lats)
cartesian = usr.represent_as(CartesianRepresentation)
rotate = rotation_matrix(phases[:, np.newaxis, np.newaxis],
axis='z')
tilt = rotation_matrix(inc_stellar - 90*u.deg, axis='y')
rotated_spot_positions = cartesian.transform(rotate)
tilted_spot_positions = rotated_spot_positions.transform(tilt)
r = np.ma.masked_array(np.sqrt(tilted_spot_positions.y**2 +
tilted_spot_positions.z**2),
mask=tilted_spot_positions.x < 0)
ld = limb_darkening_normed(u_ld, r)
f_spots = (np.pi * spot_radii**2 * (1 - spot_contrast) * ld *
np.sqrt(1 - r**2))
delta_f = (1 - np.sum(f_spots/f0, axis=1)).data
delta_f/delta_f.max(axis=0)
# -
cartesian
|
fleck_linalg.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from os import listdir
from os.path import isfile, join
from PIL import Image
import json
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
from config import Config
import utils
import model as modellib
import visualize
from model import log
from pycocotools import mask as maskUtils
# %matplotlib inline
# +
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Path to COCO trained weights
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# -
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# +
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 3 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 128
IMAGE_MAX_DIM = 128
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = ShapesConfig()
#config.display()
# -
class ShapesDataset(utils.Dataset):
"""Generates the shapes synthetic dataset. The dataset consists of simple
shapes (triangles, squares, circles) placed randomly on a blank surface.
The images are generated on the fly. No file access required.
"""
def load_shapes(self, IMG_PATH, LBL_PATH):
"""
Loads images from /images/
"""
# Add classes
self.add_class("smt", 1, "R")
self.add_class("smt", 2, "C")
self.add_class("smt", 3, "L")
self.add_class("smt", 4, "D")
self.add_class("smt", 5, "Q")
self.add_class("smt", 6, "X")
self.add_class("smt", 7, "Misc")
self.add_class("smt", 8, "Unclassified" )
# Add images
onlyfiles = [f for f in listdir(IMAGE_DIR) if isfile(join(IMAGE_DIR, f))]
for i in onlyfiles:
IMAGE_PATH = os.path.join(IMAGE_DIR, i)
IMAGE_LABEL_PATH = LBL_PATH + "\\" + i[:-4] + "__labels.json"
print(IMAGE_LABEL_PATH)
json_annotaion_file=open(IMAGE_LABEL_PATH).read()
annotation = json.loads(json_annotaion_file)
print(IMAGE_PATH)
with Image.open(IMAGE_PATH) as img:
width, height = img.size
self.add_image("smt", image_id=i, path=IMAGE_PATH, width=width, height=height, annotation=annotation)
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "smt":
return info["smt"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks."""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]['annotation']['labels']
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
class_map = {"R":1, "C":2, "L":3, "D":4, "Q":5, "X":6, "Misc":7, "Unclassified":8}
for annotation in annotations:
#class_id = annotation["label_class"]
if annotation["label_class"] == None:
class_id = class_map["Unclassified"]
else:
class_id = class_map[annotation["label_class"]]
if class_id:
m = self.annToMask(annotation, image_info["height"], image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(CocoDataset, self).load_mask(image_id)
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
if ann['label_type'] == "box":
centre_x_y = ann['centre']
size_x_y = ann['size']
segm = [centre_x_y['x'] - size_x_y['x']/2,centre_x_y['y'] + size_x_y['y']/2,
centre_x_y['x'] - size_x_y['x']/2,centre_x_y['y'] - size_x_y['y']/2,
centre_x_y['x'] + size_x_y['x']/2,centre_x_y['y'] - size_x_y['y']/2,
centre_x_y['x'] + size_x_y['x']/2,centre_x_y['y'] + size_x_y['y']/2,]
segm = [segm] # make list within list.
else:
segm_x_y = ann['vertices'] # Comes in a format [{'x': 464.5543363223196, 'y': 458.5734663855463},...}
# Need to convert 'segm_x_y' to an even number of floats in a list for the 'maskUtils' code to work.
segm = []
for vertex in segm_x_y:
segm.append(vertex['x'])
segm.append(vertex['y'])
segm = [segm] # 'segm' in [[227.41, 81.56, 312.81, 91.16, ....]] format.
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
LBL_DIR = os.path.join(ROOT_DIR, "annotations")
dataset_train = ShapesDataset()
dataset_train.load_shapes(IMAGE_DIR, LBL_DIR)
dataset_train.prepare()
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 15)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
print(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# +
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
# -
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_train,
learning_rate=config.LEARNING_RATE,
epochs=1,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=2,
layers="all")
# +
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# +
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_bbox)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
|
stm_dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# # Understanding masking & padding
#
# **Authors:** <NAME>, <NAME><br>
# **Date created:** 2019/07/16<br>
# **Last modified:** 2020/04/14<br>
# **Description:** Complete guide to using mask-aware sequence layers in Keras.
# + [markdown] colab_type="text"
# ## Setup
# + colab_type="code"
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# + [markdown] colab_type="text"
# ## Introduction
#
# **Masking** is a way to tell sequence-processing layers that certain timesteps
# in an input are missing, and thus should be skipped when processing the data.
#
# **Padding** is a special form of masking where the masked steps are at the start or at
# the beginning of a sequence. Padding comes from the need to encode sequence data into
# contiguous batches: in order to make all sequences in a batch fit a given standard
# length, it is necessary to pad or truncate some sequences.
#
# Let's take a close look.
# + [markdown] colab_type="text"
# ## Padding sequence data
#
# When processing sequence data, it is very common for individual samples to have
# different lengths. Consider the following example (text tokenized as words):
#
# ```
# [
# ["Hello", "world", "!"],
# ["How", "are", "you", "doing", "today"],
# ["The", "weather", "will", "be", "nice", "tomorrow"],
# ]
# ```
#
# After vocabulary lookup, the data might be vectorized as integers, e.g.:
#
# ```
# [
# [71, 1331, 4231]
# [73, 8, 3215, 55, 927],
# [83, 91, 1, 645, 1253, 927],
# ]
# ```
#
# The data is a nested list where individual samples have length 3, 5, and 6,
# respectively. Since the input data for a deep learning model must be a single tensor
# (of shape e.g. `(batch_size, 6, vocab_size)` in this case), samples that are shorter
# than the longest item need to be padded with some placeholder value (alternatively,
# one might also truncate long samples before padding short samples).
#
# Keras provides a utility function to truncate and pad Python lists to a common length:
# `tf.keras.preprocessing.sequence.pad_sequences`.
# + colab_type="code"
raw_inputs = [
[711, 632, 71],
[73, 8, 3215, 55, 927],
[83, 91, 1, 645, 1253, 927],
]
# By default, this will pad using 0s; it is configurable via the
# "value" parameter.
# Note that you could "pre" padding (at the beginning) or
# "post" padding (at the end).
# We recommend using "post" padding when working with RNN layers
# (in order to be able to use the
# CuDNN implementation of the layers).
padded_inputs = tf.keras.preprocessing.sequence.pad_sequences(
raw_inputs, padding="post"
)
print(padded_inputs)
# + [markdown] colab_type="text"
# ## Masking
#
# Now that all samples have a uniform length, the model must be informed that some part
# of the data is actually padding and should be ignored. That mechanism is **masking**.
#
# There are three ways to introduce input masks in Keras models:
#
# - Add a `keras.layers.Masking` layer.
# - Configure a `keras.layers.Embedding` layer with `mask_zero=True`.
# - Pass a `mask` argument manually when calling layers that support this argument (e.g.
# RNN layers).
# + [markdown] colab_type="text"
# ## Mask-generating layers: `Embedding` and `Masking`
#
# Under the hood, these layers will create a mask tensor (2D tensor with shape `(batch,
# sequence_length)`), and attach it to the tensor output returned by the `Masking` or
# `Embedding` layer.
# + colab_type="code"
embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
masked_output = embedding(padded_inputs)
print(masked_output._keras_mask)
masking_layer = layers.Masking()
# Simulate the embedding lookup by expanding the 2D input to 3D,
# with embedding dimension of 10.
unmasked_embedding = tf.cast(
tf.tile(tf.expand_dims(padded_inputs, axis=-1), [1, 1, 10]), tf.float32
)
masked_embedding = masking_layer(unmasked_embedding)
print(masked_embedding._keras_mask)
# + [markdown] colab_type="text"
# As you can see from the printed result, the mask is a 2D boolean tensor with shape
# `(batch_size, sequence_length)`, where each individual `False` entry indicates that
# the corresponding timestep should be ignored during processing.
# + [markdown] colab_type="text"
# ## Mask propagation in the Functional API and Sequential API
#
# When using the Functional API or the Sequential API, a mask generated by an `Embedding`
# or `Masking` layer will be propagated through the network for any layer that is
# capable of using them (for example, RNN layers). Keras will automatically fetch the
# mask corresponding to an input and pass it to any layer that knows how to use it.
#
# For instance, in the following Sequential model, the `LSTM` layer will automatically
# receive a mask, which means it will ignore padded values:
# + colab_type="code"
model = keras.Sequential(
[layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True), layers.LSTM(32),]
)
# + [markdown] colab_type="text"
# This is also the case for the following Functional API model:
# + colab_type="code"
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
outputs = layers.LSTM(32)(x)
model = keras.Model(inputs, outputs)
# + [markdown] colab_type="text"
# ## Passing mask tensors directly to layers
# + [markdown] colab_type="text"
# Layers that can handle masks (such as the `LSTM` layer) have a `mask` argument in their
# `__call__` method.
#
# Meanwhile, layers that produce a mask (e.g. `Embedding`) expose a `compute_mask(input,
# previous_mask)` method which you can call.
#
# Thus, you can pass the output of the `compute_mask()` method of a mask-producing layer
# to the `__call__` method of a mask-consuming layer, like this:
# + colab_type="code"
class MyLayer(layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
self.lstm = layers.LSTM(32)
def call(self, inputs):
x = self.embedding(inputs)
# Note that you could also prepare a `mask` tensor manually.
# It only needs to be a boolean tensor
# with the right shape, i.e. (batch_size, timesteps).
mask = self.embedding.compute_mask(inputs)
output = self.lstm(x, mask=mask) # The layer will ignore the masked values
return output
layer = MyLayer()
x = np.random.random((32, 10)) * 100
x = x.astype("int32")
layer(x)
# + [markdown] colab_type="text"
# ## Supporting masking in your custom layers
# + [markdown] colab_type="text"
# Sometimes, you may need to write layers that generate a mask (like `Embedding`), or
# layers that need to modify the current mask.
#
# For instance, any layer that produces a tensor with a different time dimension than its
# input, such as a `Concatenate` layer that concatenates on the time dimension, will
# need to modify the current mask so that downstream layers will be able to properly
# take masked timesteps into account.
#
# To do this, your layer should implement the `layer.compute_mask()` method, which
# produces a new mask given the input and the current mask.
#
# Here is an example of a `TemporalSplit` layer that needs to modify the current mask.
# + colab_type="code"
class TemporalSplit(keras.layers.Layer):
"""Split the input tensor into 2 tensors along the time dimension."""
def call(self, inputs):
# Expect the input to be 3D and mask to be 2D, split the input tensor into 2
# subtensors along the time axis (axis 1).
return tf.split(inputs, 2, axis=1)
def compute_mask(self, inputs, mask=None):
# Also split the mask into 2 if it presents.
if mask is None:
return None
return tf.split(mask, 2, axis=1)
first_half, second_half = TemporalSplit()(masked_embedding)
print(first_half._keras_mask)
print(second_half._keras_mask)
# + [markdown] colab_type="text"
# Here is another example of a `CustomEmbedding` layer that is capable of generating a
# mask from input values:
# + colab_type="code"
class CustomEmbedding(keras.layers.Layer):
def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs):
super(CustomEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.mask_zero = mask_zero
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer="random_normal",
dtype="float32",
)
def call(self, inputs):
return tf.nn.embedding_lookup(self.embeddings, inputs)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return tf.not_equal(inputs, 0)
layer = CustomEmbedding(10, 32, mask_zero=True)
x = np.random.random((3, 10)) * 9
x = x.astype("int32")
y = layer(x)
mask = layer.compute_mask(x)
print(mask)
# + [markdown] colab_type="text"
# ## Opting-in to mask propagation on compatible layers
#
# Most layers don't modify the time dimension, so don't need to modify the current mask.
# However, they may still want to be able to **propagate** the current mask, unchanged,
# to the next layer. **This is an opt-in behavior.** By default, a custom layer will
# destroy the current mask (since the framework has no way to tell whether propagating
# the mask is safe to do).
#
# If you have a custom layer that does not modify the time dimension, and if you want it
# to be able to propagate the current input mask, you should set `self.supports_masking
# = True` in the layer constructor. In this case, the default behavior of
# `compute_mask()` is to just pass the current mask through.
#
# Here's an example of a layer that is whitelisted for mask propagation:
# + colab_type="code"
class MyActivation(keras.layers.Layer):
def __init__(self, **kwargs):
super(MyActivation, self).__init__(**kwargs)
# Signal that the layer is safe for mask propagation
self.supports_masking = True
def call(self, inputs):
return tf.nn.relu(inputs)
# + [markdown] colab_type="text"
# You can now use this custom layer in-between a mask-generating layer (like `Embedding`)
# and a mask-consuming layer (like `LSTM`), and it will pass the mask along so that it
# reaches the mask-consuming layer.
# + colab_type="code"
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
x = MyActivation()(x) # Will pass the mask along
print("Mask found:", x._keras_mask)
outputs = layers.LSTM(32)(x) # Will receive the mask
model = keras.Model(inputs, outputs)
# + [markdown] colab_type="text"
# ## Writing layers that need mask information
#
# Some layers are mask *consumers*: they accept a `mask` argument in `call` and use it to
# determine whether to skip certain time steps.
#
# To write such a layer, you can simply add a `mask=None` argument in your `call`
# signature. The mask associated with the inputs will be passed to your layer whenever
# it is available.
#
# Here's a simple example below: a layer that computes a softmax over the time dimension
# (axis 1) of an input sequence, while discarding masked timesteps.
# + colab_type="code"
class TemporalSoftmax(keras.layers.Layer):
def call(self, inputs, mask=None):
broadcast_float_mask = tf.expand_dims(tf.cast(mask, "float32"), -1)
inputs_exp = tf.exp(inputs) * broadcast_float_mask
inputs_sum = tf.reduce_sum(inputs * broadcast_float_mask, axis=1, keepdims=True)
return inputs_exp / inputs_sum
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=10, output_dim=32, mask_zero=True)(inputs)
x = layers.Dense(1)(x)
outputs = TemporalSoftmax()(x)
model = keras.Model(inputs, outputs)
y = model(np.random.randint(0, 10, size=(32, 100)), np.random.random((32, 100, 1)))
# + [markdown] colab_type="text"
# ## Summary
#
# That is all you need to know about padding & masking in Keras. To recap:
#
# - "Masking" is how layers are able to know when to skip / ignore certain timesteps in
# sequence inputs.
# - Some layers are mask-generators: `Embedding` can generate a mask from input values
# (if `mask_zero=True`), and so can the `Masking` layer.
# - Some layers are mask-consumers: they expose a `mask` argument in their `__call__`
# method. This is the case for RNN layers.
# - In the Functional API and Sequential API, mask information is propagated
# automatically.
# - When using layers in a standalone way, you can pass the `mask` arguments to layers
# manually.
# - You can easily write layers that modify the current mask, that generate a new mask,
# or that consume the mask associated with the inputs.
|
tensorflow_learning/tf2/notebooks/.ipynb_checkpoints/understanding_masking_and_padding-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# <img src="../../img/ods_stickers.jpg">
# ## Открытый курс по машинному обучению
# <center>Автор материала: <NAME> (@ldinka).
# # <center>Исследование возможностей BigARTM</center>
#
# ## <center>Тематическое моделирование с помощью BigARTM</center>
# #### Интро
# BigARTM — библиотека, предназначенная для тематической категоризации текстов; делает разбиение на темы без «учителя».
#
# Я собираюсь использовать эту библиотеку для собственных нужд в будущем, но так как она не предназначена для обучения с учителем, решила, что для начала ее стоит протестировать на какой-нибудь уже размеченной выборке. Для этих целей был использован датасет "20 news groups".
#
# Идея экперимента такова:
# - делим выборку на обучающую и тестовую;
# - обучаем модель на обучающей выборке;
# - «подгоняем» выделенные темы под действительные;
# - смотрим, насколько хорошо прошло разбиение;
# - тестируем модель на тестовой выборке.
# #### Поехали!
# **Внимание!** Данный проект был реализован с помощью Python 3.6 и BigARTM 0.9.0. Методы, рассмотренные здесь, могут отличаться от методов в других версиях библиотеки.
# <img src="../../img/bigartm_logo.png"/>
# ### <font color="lightgrey">Не</font>множко теории
# У нас есть словарь терминов $W = \{w \in W\}$, который представляет из себя мешок слов, биграмм или n-грамм;
#
# Есть коллекция документов $D = \{d \in D\}$, где $d \subset W$;
#
# Есть известное множество тем $T = \{t \in T\}$;
#
# $n_{dw}$ — сколько раз термин $w$ встретился в документе $d$;
#
# $n_{d}$ — длина документа $d$.
# Мы считаем, что существует матрица $\Phi$ распределения терминов $w$ в темах $t$: (фи) $\Phi = (\phi_{wt})$
#
# и матрица распределения тем $t$ в документах $d$: (тета) $\Theta = (\theta_{td})$,
#
# переумножение которых дает нам тематическую модель, или, другими словами, представление наблюдаемого условного распределения $p(w|d)$ терминов $w$ в документах $d$ коллекции $D$:
#
# <center>$\large p(w|d) = \Phi \Theta$</center>
#
# <center>$$\large p(w|d) = \sum_{t \in T} \phi_{wt} \theta_{td}$$</center>
#
# где $\phi_{wt} = p(w|t)$ — вероятности терминов $w$ в каждой теме $t$
#
# и $\theta_{td} = p(t|d)$ — вероятности тем $t$ в каждом документе $d$.
# <img src="../../img/phi_theta.png"/>
# Нам известны наблюдаемые частоты терминов в документах, это:
#
# <center>$ \large \hat{p}(w|d) = \frac {n_{dw}} {n_{d}} $</center>
# Таким образом, наша задача тематического моделирования становится задачей стохастического матричного разложения матрицы $\hat{p}(w|d)$ на стохастические матрицы $\Phi$ и $\Theta$.
#
# Напомню, что матрица является стохастической, если каждый ее столбец представляет дискретное распределение вероятностей, сумма значений каждого столбца равна 1.
# Воспользовавшись принципом максимального правдоподобия, т. е. максимизируя логарифм правдоподобия, мы получим:
#
# <center>$
# \begin{cases}
# \sum_{d \in D} \sum_{w \in d} n_{dw} \ln \sum_{t \in T} \phi_{wt} \theta_{td} \rightarrow \max\limits_{\Phi,\Theta};\\
# \sum_{w \in W} \phi_{wt} = 1, \qquad \phi_{wt}\geq0;\\
# \sum_{t \in T} \theta_{td} = 1, \quad\quad\;\; \theta_{td}\geq0.
# \end{cases}
# $</center>
# Чтобы из множества решений выбрать наиболее подходящее, введем критерий регуляризации $R(\Phi, \Theta)$:
#
# <center>$
# \begin{cases}
# \sum_{d \in D} \sum_{w \in d} n_{dw} \ln \sum_{t \in T} \phi_{wt} \theta_{td} + R(\Phi, \Theta) \rightarrow \max\limits_{\Phi,\Theta};\\
# \sum_{w \in W} \phi_{wt} = 1, \qquad \phi_{wt}\geq0;\\
# \sum_{t \in T} \theta_{td} = 1, \quad\quad\;\; \theta_{td}\geq0.
# \end{cases}
# $</center>
# Два наиболее известных частных случая этой системы уравнений:
# - **PLSA**, вероятностный латентный семантический анализ, когда $R(\Phi, \Theta) = 0$
# - **LDA**, латентное размещение Дирихле:
# $$R(\Phi, \Theta) = \sum_{t,w} (\beta_{w} - 1) \ln \phi_{wt} + \sum_{d,t} (\alpha_{t} - 1) \ln \theta_{td} $$
# где $\beta_{w} > 0$, $\alpha_{t} > 0$ — параметры регуляризатора.
# Однако оказывается запас неединственности решения настолько большой, что на модель можно накладывать сразу несколько ограничений, такой подход называется **ARTM**, или аддитивной регуляризацией тематических моделей:
#
# <center>$
# \begin{cases}
# \sum_{d,w} n_{dw} \ln \sum_{t} \phi_{wt} \theta_{td} + \sum_{i=1}^k \tau_{i} R_{i}(\Phi, \Theta) \rightarrow \max\limits_{\Phi,\Theta};\\
# \sum_{w \in W} \phi_{wt} = 1, \qquad \phi_{wt}\geq0;\\
# \sum_{t \in T} \theta_{td} = 1, \quad\quad\;\; \theta_{td}\geq0.
# \end{cases}
# $</center>
#
# где $\tau_{i}$ — коэффициенты регуляризации.
# Теперь давайте познакомимся с библиотекой BigARTM и разберем еще некоторые аспекты тематического моделирования на ходу.
# Если Вас очень сильно заинтересовала теоретическая часть категоризации текстов и тематического моделирования, рекомендую посмотреть видеолекции из курса Яндекса на Coursera «Поиск структуры в данных» четвертой недели: <a href="https://www.coursera.org/learn/unsupervised-learning/home/week/4">Тематическое моделирование</a>.
# ### BigARTM
# #### Установка
# Естественно, для начала работы с библиотекой ее надо установить. Вот несколько видео, которые рассказывают, как это сделать в зависимости от вашей операционной системы:
# - <a href="https://www.coursera.org/learn/unsupervised-learning/lecture/qmsFm/ustanovka-bigartm-v-windows">Установка BigARTM в Windows</a>
# - <a href="https://www.coursera.org/learn/unsupervised-learning/lecture/zPyO0/ustanovka-bigartm-v-linux-mint">Установка BigARTM в Linux</a>
# - <a href="https://www.coursera.org/learn/unsupervised-learning/lecture/nuIhL/ustanovka-bigartm-v-mac-os-x">Установка BigARTM в Mac OS X</a>
#
# Либо можно воспользоваться инструкцией с официального сайта, которая, скорее всего, будет гораздо актуальнее: <a href="https://bigartm.readthedocs.io/en/stable/installation/index.html">здесь</a>. Там же указано, как можно установить BigARTM в качестве <a href="https://bigartm.readthedocs.io/en/stable/installation/docker.html">Docker-контейнера</a>.
# #### Использование BigARTM
# +
import artm
import re
import numpy as np
import seaborn as sns; sns.set()
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from matplotlib import pyplot as plt
# %matplotlib inline
# -
artm.version()
# Скачаем датасет ***the 20 news groups*** с заранее известным количеством категорий новостей:
from sklearn.datasets import fetch_20newsgroups
newsgroups = fetch_20newsgroups('../../data/news_data')
newsgroups['target_names']
# Приведем данные к формату *Vowpal Wabbit*. Так как BigARTM не рассчитан на обучение с учителем, то мы поступим следующим образом:
# - обучим модель на всем корпусе текстов;
# - выделим ключевые слова тем и по ним определим, к какой теме они скорее всего относятся;
# - сравним наши полученные результаты разбиения с истинными значенями.
TEXT_FIELD = "text"
def to_vw_format(document, label=None):
return str(label or '0') + ' |' + TEXT_FIELD + ' ' + ' '.join(re.findall('\w{3,}', document.lower())) + '\n'
all_documents = newsgroups['data']
all_targets = newsgroups['target']
len(newsgroups['target'])
# +
train_documents, test_documents, train_labels, test_labels = \
train_test_split(all_documents, all_targets, random_state=7)
with open('../../data/news_data/20news_train_mult.vw', 'w') as vw_train_data:
for text, target in zip(train_documents, train_labels):
vw_train_data.write(to_vw_format(text, target))
with open('../../data/news_data/20news_test_mult.vw', 'w') as vw_test_data:
for text in test_documents:
vw_test_data.write(to_vw_format(text))
# -
# Загрузим данные в необходимый для BigARTM формат:
batch_vectorizer = artm.BatchVectorizer(data_path="../../data/news_data/20news_train_mult.vw",
data_format="vowpal_wabbit",
target_folder="news_batches")
# Данные в BigARTM загружаются порционно, укажем в
# - *data_path* путь к обучающей выборке,
# - *data_format* — формат наших данных, может быть:
# * *bow_n_wd* — это вектор $n_{wd}$ в виду массива *numpy.ndarray*, также необходимо передать соответствующий словарь терминов, где ключ — это индекс вектора *numpy.ndarray* $n_{wd}$, а значение — соответствующий токен.
# ```python
# batch_vectorizer = artm.BatchVectorizer(data_format='bow_n_wd',
# n_wd=n_wd,
# vocabulary=vocabulary)
# ```
# * *vowpal_wabbit* — формат Vowpal Wabbit;
# * *bow_uci* — UCI формат (например, с *vocab.my_collection.txt* и *docword.my_collection.txt* файлами):
# ```python
# batch_vectorizer = artm.BatchVectorizer(data_path='',
# data_format='bow_uci',
# collection_name='my_collection',
# target_folder='my_collection_batches')
# ```
# * *batches* — данные, уже сконверченные в батчи с помощью BigARTM;
# - *target_folder* — путь для сохранения батчей.
#
# Пока это все параметры, что нам нужны для загрузки наших данных.
#
# После того, как BigARTM создал батчи из данных, можно использовать их для загрузки:
batch_vectorizer = artm.BatchVectorizer(data_path="news_batches", data_format='batches')
# Инициируем модель с известным нам количеством тем. Количество тем — это гиперпараметр, поэтому если он заранее нам неизвестен, то его необходимо настраивать, т. е. брать такое количество тем, при котором разбиение кажется наиболее удачным.
#
# **Важно!** У нас 20 предметных тем, однако некоторые из них довольно узкоспециализированны и смежны, как например 'comp.os.ms-windows.misc' и 'comp.windows.x', или 'comp.sys.ibm.pc.hardware' и 'comp.sys.mac.hardware', тогда как другие размыты и всеобъемлющи: talk.politics.misc' и 'talk.religion.misc'.
#
# Скорее всего, нам не удастся в чистом виде выделить все 20 тем — некоторые из них окажутся слитными, а другие наоборот раздробятся на более мелкие. Поэтому мы попробуем построить 40 «предметных» тем и одну фоновую. Чем больше вы будем строить категорий, тем лучше мы сможем подстроиться под данные, однако это довольно трудоемкое занятие сидеть потом и распределять в получившиеся темы по реальным категориям (<strike>я правда очень-очень задолбалась!</strike>).
#
# Зачем нужны фоновые темы? Дело в том, что наличие общей лексики в темах приводит к плохой ее интерпретируемости. Выделив общую лексику в отдельную тему, мы сильно снизим ее количество в предметных темах, таким образом оставив там лексическое ядро, т. е. ключевые слова, которые данную тему характеризуют. Также этим преобразованием мы снизим коррелированность тем, они станут более независимыми и различимыми.
T = 41
model_artm = artm.ARTM(num_topics=T,
topic_names=[str(i) for i in range(T)],
class_ids={TEXT_FIELD:1},
num_document_passes=1,
reuse_theta=True,
cache_theta=True,
seed=4)
# Передаем в модель следующие параметры:
# - *num_topics* — количество тем;
# - *topic_names* — названия тем;
# - *class_ids* — название модальности и ее вес. Дело в том, что кроме самих текстов, в данных может содержаться такая информация, как автор, изображения, ссылки на другие документы и т. д., по которым также можно обучать модель;
# - *num_document_passes* — количество проходов при обучении модели;
# - *reuse_theta* — переиспользовать ли матрицу $\Theta$ с предыдущей итерации;
# - *cache_theta* — сохранить ли матрицу $\Theta$ в модели, чтобы в дальнейшем ее использовать.
#
# Далее необходимо создать словарь; передадим ему какое-нибудь название, которое будем использовать в будущем для работы с этим словарем.
# +
DICTIONARY_NAME = 'dictionary'
dictionary = artm.Dictionary(DICTIONARY_NAME)
dictionary.gather(batch_vectorizer.data_path)
# -
# Инициализируем модель с тем именем словаря, что мы передали выше, можно зафиксировать *random seed* для вопроизводимости результатов:
np.random.seed(1)
model_artm.initialize(DICTIONARY_NAME)
# Добавим к модели несколько метрик:
# - перплексию (*PerplexityScore*), чтобы индентифицировать сходимость модели
# * Перплексия — это известная в вычислительной лингвистике мера качества модели языка. Можно сказать, что это мера неопределенности или различности слов в тексте.
# - специальный *score* ключевых слов (*TopTokensScore*), чтобы в дальнейшем мы могли идентифицировать по ним наши тематики;
# - разреженность матрицы $\Phi$ (*SparsityPhiScore*);
# - разреженность матрицы $\Theta$ (*SparsityThetaScore*).
model_artm.scores.add(artm.PerplexityScore(name='perplexity_score',
dictionary=DICTIONARY_NAME))
model_artm.scores.add(artm.SparsityPhiScore(name='sparsity_phi_score', class_id="text"))
model_artm.scores.add(artm.SparsityThetaScore(name='sparsity_theta_score'))
model_artm.scores.add(artm.TopTokensScore(name="top_words", num_tokens=15, class_id=TEXT_FIELD))
# Следующая операция *fit_offline* займет некоторое время, мы будем обучать модель в режиме *offline* в 40 проходов. Количество проходов влияет на сходимость модели: чем их больше, тем лучше сходится модель.
# +
# %%time
model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=40)
# -
# Построим график сходимости модели и увидим, что модель сходится довольно быстро:
plt.plot(model_artm.score_tracker["perplexity_score"].value);
# Выведем значения разреженности матриц:
print('Phi', model_artm.score_tracker["sparsity_phi_score"].last_value)
print('Theta', model_artm.score_tracker["sparsity_theta_score"].last_value)
# После того, как модель сошлась, добавим к ней регуляризаторы. Для начала сглаживающий регуляризатор — это *SmoothSparsePhiRegularizer* с большим положительным коэффициентом $\tau$, который нужно применить только к фоновой теме, чтобы выделить в нее как можно больше общей лексики. Пусть тема с последним индексом будет фоновой, передадим в *topic_names* этот индекс:
model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='SparsePhi',
tau=1e5,
dictionary=dictionary,
class_ids=TEXT_FIELD,
topic_names=str(T-1)))
# Дообучим модель, сделав 20 проходов по ней с новым регуляризатором:
# +
# %%time
model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=20)
# -
# Выведем значения разреженности матриц, заметим, что значение для $\Theta$ немного увеличилось:
print('Phi', model_artm.score_tracker["sparsity_phi_score"].last_value)
print('Theta', model_artm.score_tracker["sparsity_theta_score"].last_value)
# Теперь добавим к модели разреживающий регуляризатор, это тот же *SmoothSparsePhiRegularizer* резуляризатор, только с отрицательным значением $\tau$ и примененный ко всем предметным темам:
model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='SparsePhi2',
tau=-5e5,
dictionary=dictionary,
class_ids=TEXT_FIELD,
topic_names=[str(i) for i in range(T-1)]),
overwrite=True)
# +
# %%time
model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=20)
# -
# Видим, что значения разреженности увеличились еще больше:
print(model_artm.score_tracker["sparsity_phi_score"].last_value)
print(model_artm.score_tracker["sparsity_theta_score"].last_value)
# Посмотрим, сколько категорий-строк матрицы $\Theta$ после регуляризации осталось, т. е. не занулилось/выродилось. И это одна категория:
len(model_artm.score_tracker["top_words"].last_tokens.keys())
# Теперь выведем ключевые слова тем, чтобы определить, каким образом прошло разбиение, и сделать соответствие с нашим начальным списком тем:
for topic_name in model_artm.score_tracker["top_words"].last_tokens.keys():
tokens = model_artm.score_tracker["top_words"].last_tokens
res_str = topic_name + ': ' + ', '.join(tokens[topic_name])
print(res_str)
# Далее мы будем подгонять разбиение под действительные темы с помощью *confusion matrix*.
target_dict = {
'alt.atheism': 0,
'comp.graphics': 1,
'comp.os.ms-windows.misc': 2,
'comp.sys.ibm.pc.hardware': 3,
'comp.sys.mac.hardware': 4,
'comp.windows.x': 5,
'misc.forsale': 6,
'rec.autos': 7,
'rec.motorcycles': 8,
'rec.sport.baseball': 9,
'rec.sport.hockey': 10,
'sci.crypt': 11,
'sci.electronics': 12,
'sci.med': 13,
'sci.space': 14,
'soc.religion.christian': 15,
'talk.politics.guns': 16,
'talk.politics.mideast': 17,
'talk.politics.misc': 18,
'talk.religion.misc': 19
}
mixed = [
'comp.sys.ibm.pc.hardware',
'talk.politics.mideast',
'sci.electronics',
'rec.sport.hockey',
'sci.med',
'rec.motorcycles',
'comp.graphics',
'rec.sport.hockey',
'talk.politics.mideast',
'talk.religion.misc',
'rec.autos',
'comp.graphics',
'sci.space',
'soc.religion.christian',
'comp.os.ms-windows.misc',
'sci.crypt',
'comp.windows.x',
'misc.forsale',
'sci.space',
'sci.crypt',
'talk.religion.misc',
'alt.atheism',
'comp.os.ms-windows.misc',
'alt.atheism',
'sci.med',
'comp.os.ms-windows.misc',
'soc.religion.christian',
'talk.politics.guns',
'rec.autos',
'rec.autos',
'talk.politics.mideast',
'rec.sport.baseball',
'talk.religion.misc',
'talk.politics.misc',
'rec.sport.hockey',
'comp.sys.mac.hardware',
'misc.forsale',
'sci.space',
'talk.politics.guns',
'rec.autos',
'-'
]
# Построим небольшой отчет о правильности нашего разбиения:
theta_train = model_artm.get_theta()
model_labels = []
keys = np.sort([int(i) for i in theta_train.keys()])
for i in keys:
max_val = 0
max_idx = 0
for j in theta_train[i].keys():
if j == str(T-1):
continue
if theta_train[i][j] > max_val:
max_val = theta_train[i][j]
max_idx = j
topic = mixed[int(max_idx)]
if topic == '-':
print(i, '-')
label = target_dict[topic]
model_labels.append(label)
print(classification_report(train_labels, model_labels))
print(classification_report(train_labels, model_labels))
mat = confusion_matrix(train_labels, model_labels)
sns.heatmap(mat.T, annot=True, fmt='d', cbar=False)
plt.xlabel('True label')
plt.ylabel('Predicted label');
accuracy_score(train_labels, model_labels)
# Нам удалось добиться 80% *accuracy*. По матрице ответов мы видим, что для модели темы *comp.sys.ibm.pc.hardware* и *comp.sys.mac.hardware* практически не различимы (<strike>честно говоря, для меня тоже</strike>), в остальном все более или менее прилично.
#
# Проверим модель на тестовой выборке:
batch_vectorizer_test = artm.BatchVectorizer(data_path="../../data/news_data/20news_test_mult.vw",
data_format="vowpal_wabbit",
target_folder="news_batches_test")
theta_test = model_artm.transform(batch_vectorizer_test)
test_score = []
for i in range(len(theta_test.keys())):
max_val = 0
max_idx = 0
for j in theta_test[i].keys():
if j == str(T-1):
continue
if theta_test[i][j] > max_val:
max_val = theta_test[i][j]
max_idx = j
topic = mixed[int(max_idx)]
label = target_dict[topic]
test_score.append(label)
print(classification_report(test_labels, test_score))
mat = confusion_matrix(test_labels, test_score)
sns.heatmap(mat.T, annot=True, fmt='d', cbar=False)
plt.xlabel('True label')
plt.ylabel('Predicted label');
accuracy_score(test_labels, test_score)
# Итого почти 77%, незначительно хуже, чем на обучающей.
# **Вывод:** безумно много времени пришлось потратить на подгонку категорий к реальным темам, но в итоге я осталась довольна результатом. Такие смежные темы, как *alt.atheism*/*soc.religion.christian*/*talk.religion.misc* или *talk.politics.guns*/*talk.politics.mideast*/*talk.politics.misc* разделились вполне неплохо. Думаю, что я все-таки попробую использовать BigARTM в будущем для своих <strike>корыстных</strike> целей.
|
jupyter_russian/projects_individual/project_text-analysis-with-BigARTM_ldinka.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Combining measurements
#
# When we do a fit, we can have additional knowledge about a parameter from other measurements. This can be taken into account either through a simultaneous fit or by adding a constraint (subsidiary measurement).
# ## Adding a constraint
#
# If we know a parameters value from a different measurement and want to constraint this using its uncertainty, a Gaussian constraint can be added to the likelihood as
#
# \begin{align}
# \mathrm{constr_i} = \mathrm{Gauss}(\mu_{measured}; \theta_i, \sigma_{measured})
# \end{align}
#
# In general, additional terms can be added to the likelihood arbitrarily in zfit, be it to incorporate other shaped measurements or to add penalty terms to confine a fit within boundaries.
# + jupyter={"outputs_hidden": false}
import hepunits as u
import matplotlib.pyplot as plt
import mplhep
import numpy as np
import particle.literals as lp
import tensorflow as tf
import zfit
plt.rcParams['figure.figsize'] = (8,6)
# + jupyter={"outputs_hidden": false}
mu_true = lp.B_plus.mass * u.MeV
sigma_true = 50 * u.MeV
# number of signal and background
n_sig_rare = 120
n_bkg_rare = 700
# + jupyter={"outputs_hidden": false}
# create some data
signal_np = np.random.normal(loc=mu_true, scale=sigma_true, size=n_sig_rare)
bkg_np_raw = np.random.exponential(size=20000, scale=700)
bkg_np = bkg_np_raw[bkg_np_raw<1000][:n_bkg_rare] + 5000 # just cutting right, but zfit could also cut
# + jupyter={"outputs_hidden": false}
# Firstly, the observable and its range is defined
obs = zfit.Space('Bmass', (5000, 6000)) # for whole range
# + jupyter={"outputs_hidden": false}
# load data into zfit
data = zfit.Data.from_numpy(obs=obs, array=np.concatenate([signal_np, bkg_np], axis=0))
# + jupyter={"outputs_hidden": false}
# Parameters are specified: (name (unique), initial, lower, upper) whereas lower, upper are optional
mu = zfit.Parameter('mu', 5279, 5100, 5400)
sigma = zfit.Parameter('sigma', 20, 1, 200)
signal = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
lam = zfit.Parameter('lambda', -0.002, -0.1, -0.00001, step_size=0.001) # floating, also without limits
comb_bkg = zfit.pdf.Exponential(lam, obs=obs)
sig_yield = zfit.Parameter('sig_yield', n_sig_rare + 30,
step_size=3) # step size: default is small, use appropriate
bkg_yield = zfit.Parameter('bkg_yield', n_bkg_rare - 40, step_size=1)
# Create extended PDFs
extended_sig = signal.create_extended(sig_yield)
extended_bkg = comb_bkg.create_extended(bkg_yield)
# The final model is the combination of the signal and backgrond PDF
model = zfit.pdf.SumPDF([extended_bkg, extended_sig])
# + jupyter={"outputs_hidden": false}
constraint = zfit.constraint.GaussianConstraint(mu, observation=5275 * u.MeV, uncertainty=15 * u.MeV)
# + jupyter={"outputs_hidden": false}
nll = zfit.loss.ExtendedUnbinnedNLL(model, data, constraints=constraint)
minimizer = zfit.minimize.Minuit(use_minuit_grad=True)
result = minimizer.minimize(nll)
result.hesse();
# + jupyter={"outputs_hidden": false}
print(result.params)
# -
# ## Simultaneous fits
#
# Sometimes, we don't want to fit a single channel but multiple with the same likelihood and having shared parameters between them. In this example, we will fit the decay simultaneously to its resonant control channel.
#
# A simultaneous likelihood is the product of different likelihoods defined by
# \begin{align}
# \mathcal{L}_{f(x)}(\theta | {data_0, data_1, ..., data_n}) &= \prod_{i=1}^{n} \mathcal{L}(\theta_i, data_i)
# \end{align}
#
# and becomes in the NLL a sum
#
# \begin{align}
# \mathrm{NLL}_{f(x)}(\theta | {data_0, data_1, ..., data_n}) &= \sum_{i=1}^{n} \mathrm{NLL}(\theta_i, data_i)
# \end{align}
#
#
# + jupyter={"outputs_hidden": false}
n_sig_reso = 40000
n_bkg_reso = 3000
# + jupyter={"outputs_hidden": false}
# create some data
signal_np_reso = np.random.normal(loc=mu_true, scale=sigma_true * 0.7, size=n_sig_reso)
bkg_np_raw_reso = np.random.exponential(size=20000, scale=900)
bkg_np_reso = bkg_np_raw_reso[bkg_np_raw_reso<1000][:n_bkg_reso] + 5000
# load data into zfit
obs_reso = zfit.Space('Bmass_reso', (5000, 6000))
data_reso = zfit.Data.from_numpy(obs=obs_reso, array=np.concatenate([signal_np_reso, bkg_np_reso], axis=0))
# -
# ### Sharing and composing parameters
#
# As an example, the same signal shape will be used with the identical `mu` yet a scaled `sigma`. This means that the `sigma` of our control mode corresponds to the `sigma` of our signal times a scaling parameter. Therefore, the `scaled_sigma` is a function of two other parameters, `sigma` and `sigma_scaling` and _cannot_ change it's value independently. There are two fundamentally distinct types to represent this behavior in zfit, independent (`Parameter`) and dependent parameters (`ComplexParameter`, `ComposedParameter`,...)
#
# #### Independent parameter
#
# An independent parameter has, as a distinctive method, a `set_value` that _changes_ the value of the parameter. In a fit, or in general, these are _the only object_ that can directly change their value and therefore do not depend on other objects while most other objects depend on Parameters.
# As a consequence, this parameters can have limits (which effectively restrict the possible values a `Parameter` can be assigned to) and have a `step_size`, a hint to any minimization algorithm about the order of magnitude that a change in the parameter will have on the loss.
#
# Another attribute is a `floating` flag: if set to `False`, the parameter won't be floating in the fit, whether explicitly given or implicitly inferred from the dependencies.
#
# #### Dependent parameter
#
# These are single-valued functions effectively that depend on other objects, usually other parameters. Therefore, a dependent parameter does not have a `set_value` function and also does not posses limits. The latter is preferred to be set with the `Parameter` it depends on, however, if a hard limit is required, this can always be enforced in the definition of a `ComposedParameter`.
#
# The most notable parameter is the `ComposedParameter`, which returns an arbitrary function of its input arguments, the latter which can be specified with the `params` argument.
#
# While this parameters *cannot* change there value explicitly and therefore won't be used by a minimizer, the zfit minimizers automatically extract the independent parameters that a dependent parameter depends on (if this is given as an argument.)
#
# As a consequence, these parameters also miss a `step_size` attribute. Furthermore, `floating` can't be used, neither set nor retrieved; it is rather advised to check directly with its dependencies.
#
# #### Sharing parameters
#
# Since in zfit, every parameter object is unique, also defined by its name, it is straightforward to know when a parameter is shared in the loss and when it is not: if the same object is used in two places, it is shared. This can be arbitrarily mixed.
# + jupyter={"outputs_hidden": false}
# Firstly, we create a free scaling parameter
sigma_scaling = zfit.Parameter('sigma_scaling', 0.9, 0.1, 10, step_size=0.1)
def sigma_scaled_fn(sigma, sigma_scaling):
return sigma * sigma_scaling # this can be an arbitrary function
sigma_scaled = zfit.ComposedParameter('sigma scaled', # name
sigma_scaled_fn, # function
params=[sigma, sigma_scaling] # the objects used inside the function
)
# + jupyter={"outputs_hidden": false}
signal_reso = zfit.pdf.Gauss(mu=mu, # the same as for the rare mode
sigma=sigma_scaled,
obs=obs_reso
)
lambda_reso = zfit.Parameter('lambda_reso', -0.002, -0.01, 0.0001) # floating
comb_bkg_reso_pdf = zfit.pdf.Exponential(lambda_reso, obs=obs_reso)
reso_sig_yield = zfit.Parameter('reso_sig_yield', n_sig_reso - 100, 0, n_sig_reso * 3,
step_size=1) # step size: default is small, use appropriate
reso_bkg_yield = zfit.Parameter('reso_bkg_yield', n_bkg_reso + 70, 0, 2e5, step_size=1)
# Create the extended models
extended_sig_reso = signal_reso.create_extended(reso_sig_yield)
extended_bkg_reso = comb_bkg_reso_pdf.create_extended(reso_bkg_yield)
model_reso = zfit.pdf.SumPDF([extended_bkg_reso, extended_sig_reso])
# + [markdown] jupyter={"outputs_hidden": false}
# To implement the simultaneous fit, there are two ways to achieve this in zfit. As an important distinction to other frameworks, zfit translates the above equation
# \begin{align}
# \mathrm{NLL}_{f(x)}(\theta | {data_0, data_1, ..., data_n}) &= \sum_{i=1}^{n} \mathrm{NLL}(\theta_i, data_i)
# \end{align}
#
# directly into code.
#
# We can build two losses and add them directly or give a list of models and data, which build a loss each one and add up.
# + jupyter={"outputs_hidden": false}
nll_rare = zfit.loss.ExtendedUnbinnedNLL(model, data)
nll_reso = zfit.loss.ExtendedUnbinnedNLL(model_reso, data_reso)
nll_simultaneous = nll_rare + nll_reso
# + jupyter={"outputs_hidden": false}
result_simultaneous = minimizer.minimize(nll_simultaneous)
# + jupyter={"outputs_hidden": false}
result_simultaneous.hesse()
# + jupyter={"outputs_hidden": false}
print(result_simultaneous.params)
# -
# ### Plotting a simultaneous loss
#
# Since the definition of a simultaneous fit is as above, it is simple to plot each component separately: either my using the attributes of the loss to access the models and plot in a general fashion or directly reuse the model and data from before; we created them manually before.
# + jupyter={"outputs_hidden": false}
# Sets the values of the parameters to the result of the simultaneous fit
# in case they were modified.
zfit.param.set_values(nll_simultaneous.get_params(), result_simultaneous)
def plot_fit_projection(model, data, nbins=30, ax=None):
# The function will be reused.
if ax is None:
ax = plt.gca()
lower, upper = data.data_range.limit1d
# Creates and histogram of the data and plots it with mplhep.
counts, bin_edges = np.histogram(data.unstack_x(), bins=nbins)
mplhep.histplot(counts, bins=bin_edges, histtype="errorbar", yerr=True,
label="Data", ax=ax, color="black")
binwidth = np.diff(bin_edges)[0]
x = tf.linspace(lower, upper, num=1000) # or np.linspace
# Line plots of the total pdf and the sub-pdfs.
y = model.ext_pdf(x) * binwidth
ax.plot(x, y, label="total", color="royalblue")
for m, l, c in zip(model.get_models(), ["background", "signal"], ["forestgreen", "crimson"]):
ym = m.ext_pdf(x) * binwidth
ax.plot(x, ym, label=l, color=c)
ax.set_title(data.data_range.obs[0])
ax.set_xlim(lower, upper)
ax.legend(fontsize=15)
return ax
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
for mod, dat, ax, nb in zip(nll_simultaneous.model, nll_simultaneous.data, axs, [30, 60]):
plot_fit_projection(mod, dat, nbins=nb, ax=ax)
# -
# ## Discovery test
#
#
# We observed an excess of our signal:
# + jupyter={"outputs_hidden": false}
print(result_simultaneous.params[sig_yield])
# -
# Now we would like to compute the significance of this observation or in other words the probabilty that this observation is the result of the statistical fluctuation. To do so we have to perform an hypothesis test where the null and alternative hypotheses are defined as:
#
# * $H_{0}$, the null or background only hypothesis, i.e. $N_{sig} = 0$;
# * $H_{1}$, the alternative hypothesis, i.e $N_{sig} = \hat{N}_{sig}$, where $\hat{N}_{sig}$ is the fitted value of $N_{sig}$ printed above.
#
# In `hepstats` to formulate a hypothesis you have to use the `POI` (Parameter Of Interest) class.
# + jupyter={"outputs_hidden": false}
from hepstats.hypotests.parameters import POI
# the null hypothesis
sig_yield_poi = POI(sig_yield, 0)
# -
# What the `POI` class does is to take as input a `zfit.Parameter` instance and a value corresponding to a given hypothesis. You can notice that we didn't define here the alternative hypothesis as in the discovery test the value of POI for alternate is set to the best fit value.
#
# The test statistic used is the profile likelihood ratio and defined as:
#
# \begin{equation}
# q_{0} = \left\{
# \begin{array}{ll}
# -2 \ln \frac{\mathcal{L}(N_{sig}=0, \; \hat{\hat{\theta}})}{\mathcal{L}(N_{sig}=\hat{N}_{sig}, \; \hat{\theta})} & \mbox{if } \; \hat{N}_{sig} \geq 0 \\
# 0 & \mbox{if } \; \hat{N}_{sig} < 0
# \end{array}
# \right.
# \end{equation}
#
# where $\hat{\theta}$ are the best fitted values of the nuisances parameters (i.e. background yield, exponential slope...), while $\hat{\hat{\theta}}$ are the fitted values of the nuisances when ${N}_{sig} = 0$.
#
# From the test statistic distribution a p-value can computed as
#
# \begin{equation}
# p_{0} = \int_{q_{0}^{obs}}^{\infty} f(q_{0} |H_{0}) dq_{0}
# \end{equation}
#
# where $q_{0}^{obs}$ is the value of the test statistic evaluated on observed data.
#
# The construction of the test statistic and the computation of the p-value is done in a `Calculator` object in `hepstats`. In this example we will use in this example the `AsymptoticCalculator` calculator which assumes that $q_{0}$ follows a $\chi^2(ndof=1)$ which simplifies the p-value computation to
#
# \begin{equation}
# p_{0} = 1 - \Phi\bigg({\sqrt{q_{0}^{obs}}}\bigg).
# \end{equation}
#
# The calculator objects takes as input the likelihood function and a minimizer to profile the likelihood.
# + jupyter={"outputs_hidden": false}
from hepstats.hypotests.calculators import AsymptoticCalculator
# construction of the calculator instance
calculator = AsymptoticCalculator(input=nll_simultaneous, minimizer=minimizer)
calculator.bestfit = result_simultaneous
# equivalent to above
calculator = AsymptoticCalculator(input=result_simultaneous, minimizer=minimizer)
# -
# There is another calculator in `hepstats` called `FrequentistCalculator` which constructs the test statistic distribution $f(q_{0} |H_{0})$ with pseudo-experiments (toys), but it takes more time.
#
# The `Discovery` class is a high-level class that takes as input a calculator and a `POI` instance representing the null hypothesis, it basically asks the calculator to compute the p-value and also computes the signifance as
#
# \begin{equation}
# Z = \Phi^{-1}(1 - p_0).
# \end{equation}
# + jupyter={"outputs_hidden": false}
from hepstats.hypotests import Discovery
discovery = Discovery(calculator=calculator, poinull=sig_yield_poi)
discovery.result()
# -
# So we get a significance of about $7\sigma$ which is well above the $5 \sigma$ threshold for discoveries 😃.
#
# ## Upper limit calculation
#
# Let's try to compute the discovery significance with a lower number of generated signal events.
# + jupyter={"outputs_hidden": false}
# Sets the values of the parameters to the result of the simultaneous fit
zfit.param.set_values(nll_simultaneous.get_params(), result_simultaneous)
sigma_scaling.floating=False
# Creates a sampler that will draw events from the model
sampler = model.create_sampler()
# Creates new simultaneous loss
nll_simultaneous_low_sig = zfit.loss.ExtendedUnbinnedNLL(model, sampler) + nll_reso
# + jupyter={"outputs_hidden": false}
# Samples with sig_yield = 10. Since the model is extended the number of
# signal generated is drawn from a poisson distribution with lambda = 10.
sampler.resample({sig_yield: 10})
# + jupyter={"outputs_hidden": false}
calculator_low_sig = AsymptoticCalculator(input=nll_simultaneous_low_sig, minimizer=minimizer)
discovery_low_sig = Discovery(calculator=calculator_low_sig, poinull=sig_yield_poi)
discovery_low_sig.result()
print(f"\n {calculator_low_sig.bestfit.params} \n")
# -
# We might consider computing an upper limit on the signal yield instead. The test statistic for an upper limit calculation is
#
# \begin{equation}
# q_{N_{sig}} = \left\{
# \begin{array}{ll}
# -2 \ln \frac{\mathcal{L}(N_{sig}, \; \hat{\hat{\theta}})}{\mathcal{L}(N_{sig}=\hat{N}_{sig}, \; \hat{\theta})} & \mbox{if } \; \hat{N}_{sig} \leq N_{sig} \\
# 0 & \mbox{if } \; \hat{N}_{sig} > N_{sig}.
# \end{array}
# \right.
# \end{equation}
#
# and the p-value is
#
# \begin{equation}
# p_{N_{sig}} = \int_{q_{N_{sig}}^{obs}}^{\infty} f(q_{N_{sig}} |N_{sig}) dq_{N_{sig}}.
# \end{equation}
#
# The upper limit on $N_{sig}$, $N_{sig, \uparrow}$ is found for $p_{N_{sig, \uparrow}} = 1 - \alpha$, $\alpha$ being the confidence level (typically $95 \%$). The upper limit is found by interpolation of the p-values as a function of $N_{sig}$, which is done the `UpperLimit` class. We have to give the range of values of $N_{sig}$ to scan using the `POIarray` class which as the `POI` class takes as input the parameter but takes several values to evaluate the parameter instead of one.
# + jupyter={"outputs_hidden": false}
from hepstats.hypotests import UpperLimit
from hepstats.hypotests.parameters import POIarray
# Background only hypothesis.
bkg_only = POI(sig_yield, 0)
# Range of Nsig values to scan.
sig_yield_scan = POIarray(sig_yield, np.linspace(0, 70, 10))
ul = UpperLimit(calculator=calculator_low_sig, poinull=sig_yield_scan, poialt=bkg_only)
ul.upperlimit(alpha=0.05);
# + jupyter={"outputs_hidden": false}
from utils import plotlimit
plotlimit(ul, CLs=False)
# -
#
#
# ## Splot
#
# This is now an demonstration of the **sPlot** algorithm, described in [Pivk:2004ty](https://arxiv.org/pdf/physics/0402083.pdf).
#
# If a data sample is populated by different sources of events, like signal and background, **sPlot** is able to unfold the contributions of the different sources for a given variable.
#
# Let's construct a dataset with two variables, the invariant mass and lifetime, for the resonant signal defined above and the combinatorial background.
# + jupyter={"outputs_hidden": false}
# Signal distributions.
nsig_sw = 20000
np_sig_m_sw = signal_reso.sample(nsig_sw).numpy().reshape(-1,)
np_sig_t_sw = np.random.exponential(size=nsig_sw, scale=1)
# Background distributions.
nbkg_sw = 150000
np_bkg_m_sw = comb_bkg_reso_pdf.sample(nbkg_sw).numpy().reshape(-1,)
np_bkg_t_sw = np.random.normal(size=nbkg_sw, loc=2.0, scale=2.5)
# Lifetime cut.
t_cut = np_bkg_t_sw > 0
np_bkg_t_sw = np_bkg_t_sw[t_cut]
np_bkg_m_sw = np_bkg_m_sw[t_cut]
# Mass distribution
np_m_sw = np.concatenate([np_sig_m_sw, np_bkg_m_sw])
# Lifetime distribution
np_t_sw = np.concatenate([np_sig_t_sw, np_bkg_t_sw])
# Plots the mass and lifetime distribution.
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
axs[0].hist([np_bkg_m_sw, np_sig_m_sw], bins=50, stacked=True, label=("background", "signal"), alpha=.7)
axs[0].set_xlabel("m")
axs[0].legend(fontsize=15)
axs[1].hist([np_bkg_t_sw, np_sig_t_sw], bins=50, stacked=True, label=("background", "signal"), alpha=.7)
axs[1].set_xlabel("t")
axs[1].legend(fontsize=15);
# -
# In this particular example we want to unfold the signal lifetime distribution. To do so **sPlot** needs a discriminant variable to determine the yields of the various sources using an <ins>extended</ins> maximum likelihood fit.
# + jupyter={"outputs_hidden": false}
# Builds the loss.
data_sw = zfit.Data.from_numpy(obs=obs_reso, array=np_m_sw)
nll_sw = zfit.loss.ExtendedUnbinnedNLL(model_reso, data_sw)
# This parameter was useful in the simultaneous fit but not anymore so we fix it.
sigma_scaling.floating=False
# Minimizes the loss.
result_sw = minimizer.minimize(nll_sw)
print(result_sw.params)
# + jupyter={"outputs_hidden": false}
# Visualization of the result.
zfit.param.set_values(nll_sw.get_params(), result_sw)
plot_fit_projection(model_reso, data_sw, nbins=100)
# -
# **sPlot** will use the fitted yield for each sources to derive the so-called **sWeights** for each data point:
#
# \begin{equation}
# W_{n}(x) = \frac{\sum_{j=1}^{N_S} V_{nj} f_j(x)}{\sum_{k=1}^{N_S} N_{k}f_k(x)}
# \end{equation}
#
# with
#
# \begin{equation}
# V_{nj}^{-1} = \sum_{e=1}^{N} \frac{f_n(x_e) f_j(x_e)}{(\sum_{k=1}^{N_S} N_{k}f_k(x))^2}
# \end{equation}
#
#
# where ${N_S}$ is the number of sources in the data sample, here 2. The index $n$ represents the source, for instance $0$ is the signal and $1$ is the background, then $f_0$ and $N_0$ are the pdf and yield for the signal.
#
# In `hepstats` the **sWeights** are computed with the `compute_sweights` function which takes as arguments the <ins>fitted</ins> extended model and the discrimant data (on which the fit was performed).
# + jupyter={"outputs_hidden": false}
from hepstats.splot import compute_sweights
weights = compute_sweights(model_reso, data_sw)
print(weights)
# + jupyter={"outputs_hidden": false}
print("Sum of signal sWeights: ", np.sum(weights[reso_sig_yield]))
print("Sum of background sWeights: ", np.sum(weights[reso_bkg_yield]))
# -
# Now we can apply the signal **sWeights** on the lifetime distribution and retrieve its signal components.
# + jupyter={"outputs_hidden": false}
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
nbins = 40
sorter = np_m_sw.argsort()
axs[0].plot(np_m_sw[sorter], weights[reso_sig_yield][sorter], label="$w_\\mathrm{sig}$")
axs[0].plot(np_m_sw[sorter], weights[reso_bkg_yield][sorter], label="$w_\\mathrm{bkg}$")
axs[0].plot(np_m_sw[sorter], weights[reso_sig_yield][sorter] + weights[reso_bkg_yield][sorter],
"-k", label="$w_\\mathrm{sig} + w_\\mathrm{bkg}$")
axs[0].axhline(0, color="0.5")
axs[0].legend(fontsize=15)
axs[0].set_xlim(5000, 5600)
axs[1].hist(np_t_sw, bins=nbins, range=(0, 6), weights=weights[reso_sig_yield], label="weighted histogram", alpha=.5)
axs[1].hist(np_sig_t_sw, bins=nbins, range=(0, 6), histtype="step", label="true histogram", lw=1.5)
axs[1].set_xlabel("t")
axs[1].legend(fontsize=15);
# -
# Be careful the **sPlot** technique works only on variables that are uncorrelated with the discriminant variable.
# + jupyter={"outputs_hidden": false}
print(f"Correlation between m and t: {np.corrcoef(np_m_sw, np_t_sw)[0, 1]}")
# -
# Let's apply to signal **sWeights** on the mass distribution to see how bad the results of **sPlot** is when applied on a variable that is correlated with the discrimant variable.
# + jupyter={"outputs_hidden": false}
plt.hist(np_m_sw, bins=100, range=(5000, 6000), weights=weights[reso_sig_yield]);
# + jupyter={"outputs_hidden": false}
|
guides/constraints_simultaneous_fit_discovery_splot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: gcn
# language: python
# name: gcn
# ---
# +
import os
import argparse
import json
import shutil
import numpy as np
import torch
import skvideo.io
from processor.io import IO
import tools
import tools.utils as utils
from IPython.display import clear_output
# -
p = IO()
# clear_output()
p.model.eval()
# video = utils.video.get_video_frames('/media/simslab-cs/A/videoset/cgutest.mp4')
video = utils.video.get_video_frames('/media/simslab-cs/A/videoset/F6_cut/F6_082652_082730.avi')
height, width, _ = video[0].shape
# +
# video_info = utils.openpose.json_pack(output_snippets_dir, video_name, width, height)
file_json = 'after-filling.json'
with open(file_json, encoding='utf-8') as data_file:
data = json.loads(data_file.read())
sequence_info = []
weight = data.get('## description').get('image_width')
height = data.get('## description').get('image_height')
for frame_number in data.keys():
try:
frame_number = int(frame_number)
except Exception as e:
continue
#Get highest possibilites human confiden score --> usualy at detection no 0
id_name = list(data.get(str(frame_number)).keys())[2]
frame_id = frame_number
frame_data = {'frame_index': frame_id}
skeletons = []
score, coordinates = [], []
skeleton = {}
for part in data.get(str(frame_number)).get(id_name):
coordinates += [int(part.get('position')[0]*weight+0.5),int(part.get('position')[1]*height+0.5)]
score += [part.get('score')]
skeleton['pose'] = coordinates
skeleton['score'] = score
skeletons +=[skeleton]
frame_data['skeleton'] = skeletons
sequence_info += [frame_data]
video_info = dict()
video_info['data'] = sequence_info
video_info['label'] = 'unknowns'
video_info['label_index'] = -1
# -
pose, _ = utils.video.video_info_parsing(video_info)
data = torch.from_numpy(pose)
data = data.unsqueeze(0)
data = data.float().to(p.dev).detach()
output, feature = p.model.extract_feature(data)
output = output[0]
feature = feature[0]
intensity = (feature*feature).sum(dim=0)**0.5
intensity = intensity.cpu().detach().numpy()
label = output.sum(dim=3).sum(dim=2).sum(dim=1).argmax(dim=0)
label_name_path = './resource/kinetics_skeleton/label_name.txt'
with open(label_name_path) as f:
label_name = f.readlines()
label_name = [line.rstrip() for line in label_name]
print('Prediction result: {}'.format(label_name[label]))
print('Done.')
label = output.sum(dim=3).sum(dim=2).sum(dim=1)
lavel = label.cpu().detach().numpy()
idx = lavel.argsort()[-5:][::-1]
#Other detection
for i in range(5):
print('Prediction result: {}'.format(label_name[idx[i]]))
#Other detection
for i in range(5):
print('Prediction result: {}'.format(label_name[idx[i]]))
|
.ipynb_checkpoints/debug-trial-1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 14. Decision Trees
#
# [](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/14.DecisionTrees.ipynb)
#
# Slides and notebook based on https://www.datacamp.com/community/tutorials/decision-tree-classification-python, https://nanohub.org/tools/mseml/, https://machinelearningmastery.com/information-gain-and-mutual-information/.
#
# We will use the Decision Tree Classifier Building in Scikit-learn.
#
# Let's first load the required libraries.
# +
# Install the mendeleev package using pip in the current Jupyter kernel
# To use them, you may need to restart the kernel
import sys
# !{sys.executable} -m pip install mendeleev
import pymatgen as pymat
from pymatgen.core.periodic_table import Element
import mendeleev as mendel
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
import sys
# -
# ### Getting the dataset
#
# We select 47 elements that occur in the fcc, hcp, and bcc structure. The elements listed were chosen because querying them for these properties yields a dataset with no unknown values, and because they represent the three most common crystallographic structures.
#
# We then query both Pymatgen and Mendeleev to get a complete set of properties per element. We will use this data to create the features from which the model will train and test.
# +
fcc_elements = ["Ag", "Al", "Au", "Cu", "Ir", "Ni", "Pb", "Pd", "Pt", "Rh", "Th", "Yb"]
bcc_elements = ["Ba", "Ca", "Cr", "Cs", "Eu", "Fe", "Li", "Mn", "Mo", "Na", "Nb", "Rb", "Ta", "V", "W" ]
hcp_elements = ["Be", "Cd", "Co", "Dy", "Er", "Gd", "Hf", "Ho", "Lu", "Mg", "Re",
"Ru", "Sc", "Tb", "Ti", "Tl", "Tm", "Y", "Zn", "Zr"]
elements = fcc_elements + bcc_elements + hcp_elements
random.Random(1).shuffle(elements)
querable_mendeleev = ["atomic_number", "atomic_volume", "boiling_point", "en_ghosh", "evaporation_heat", "heat_of_formation",
"lattice_constant", "melting_point", "specific_heat"]
querable_pymatgen = ["atomic_mass", "atomic_radius", "electrical_resistivity","molar_volume", "bulk_modulus", "youngs_modulus",
"average_ionic_radius", "density_of_solid", "coefficient_of_linear_thermal_expansion"]
querable_values = querable_mendeleev + querable_pymatgen
# -
# We will use the database queries to populate a pandas dataframe.
# +
all_values = [] # Values for Attributes
all_labels = [] # Crystal structure labels (0 = fcc, 1 = bcc, 2 = hcp)
for item in elements:
element_values = []
# This section queries Mendeleev
element_object = mendel.element(item)
for i in querable_mendeleev:
element_values.append(getattr(element_object,i))
# This section queries Pymatgen
element_object = Element(item)
for i in querable_pymatgen:
element_values.append(getattr(element_object,i))
all_values.append(element_values) # All lists are appended to another list, creating a List of Lists
if (item in fcc_elements):
all_labels.append([1, 0, 0]) # The crystal structure labels are assigned here
elif (item in bcc_elements):
all_labels.append([0, 1, 0]) # The crystal structure labels are assigned here
elif (item in hcp_elements):
all_labels.append([0, 0, 1]) # The crystal structure labels are assigned here
# Pandas Dataframe
df = pd.DataFrame(all_values, columns=querable_values)
# We will patch some of the values that are not available in the datasets.
# Value for the CTE of Cesium
index_Cs = df.index[df['atomic_number'] == 55]
df.iloc[index_Cs, df.columns.get_loc("coefficient_of_linear_thermal_expansion")] = 0.000097
# Value from: <NAME> (ed), CRC Handbook of Chemistry and Physics, 84th Edition. CRC Press. Boca Raton, Florida, 2003
# Value for the CTE of Rubidium
index_Rb = df.index[df['atomic_number'] == 37]
df.iloc[index_Rb, df.columns.get_loc("coefficient_of_linear_thermal_expansion")] = 0.000090
# Value from: https://www.azom.com/article.aspx?ArticleID=1834
# Value for the Evaporation Heat of Ruthenium
index_Ru = df.index[df['atomic_number'] == 44]
df.iloc[index_Ru, df.columns.get_loc("evaporation_heat")] = 595 # kJ/mol
# Value from: https://www.webelements.com/ruthenium/thermochemistry.html
# Value for the Bulk Modulus of Zirconium
index_Zr = df.index[df['atomic_number'] == 40]
df.iloc[index_Zr, df.columns.get_loc("bulk_modulus")] = 94 # GPa
# Value from: https://materialsproject.org/materials/mp-131/
df.head(n=10)
# -
# ### Processing and Organizing Data
#
# We normalize the data and randomly split it into training and testing sets.
#
# ##### SETS
#
# We have 47 elements for which the crystal structure is known and we will use 40 of these as a training set and the remaining 7 as testing set.
#
# ##### NORMALIZATION
#
# We will again use the Standard Score Normalization, which subtracts the mean of the feature and divide by its standard deviation.
# $$
# \frac{X - µ}{σ}
# $$
# While our model might converge without feature normalization, the resultant model would be difficult to train and would be dependent on the choice of units used in the input.
# +
# SETS
all_values = [list(df.iloc[x]) for x in range(len(all_values))]
# List of lists are turned into Numpy arrays to facilitate calculations in steps to follow
# (Normalization).
all_values = np.array(all_values, dtype = float)
print("Shape of Values:", all_values.shape)
all_labels = np.array(all_labels, dtype = int)
print("Shape of Labels:", all_labels.shape)
# Training Set
train_values = all_values[:40, :]
train_labels = all_labels[:40, :]
# Testing Set
test_values = all_values[-7:, :]
test_labels = all_labels[-7:, :]
# NORMALIZATION
mean = np.nanmean(train_values, axis = 0) # mean
std = np.nanstd(train_values, axis = 0) # standard deviation
train_values = (train_values - mean) / std # input scaling
test_values = (test_values - mean) / std # input scaling
print(train_values[0]) # print a sample entry from the training set
print(train_labels[0])
# -
# ### Creating the Decision Tree Model
#
# For this classification, we will use a simple decision tree.
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
# Create Decision Tree classifer object
model = DecisionTreeClassifier()
# Train Decision Tree Classifer
model.fit(train_values, train_labels)
# -
# ### Validation
#
# We calculate the accuracy score on the training and the testing sets.
# +
#Predict the response for training and testing dataset
train_pred = model.predict(train_values)
test_pred = model.predict(test_values)
# Model Accuracy for training and testing set, how often is the classifier correct?
print('Training accuracy = %.3f ' % accuracy_score(train_labels, train_pred))
print('Testing accuracy = %.3f ' % accuracy_score(test_labels, test_pred))
# -
# ### Visualize the decision tree
fig = plt.figure(figsize=(25,20))
_ = tree.plot_tree(model, feature_names=querable_values, filled=True)
# +
train_predictions = model.predict(train_values)
test_predictions = model.predict(test_values)
all_labels = np.vstack((train_labels, test_labels))
all_predictions = np.vstack((train_predictions, test_predictions))
predicted_labels = []
true_labels = []
for i in range(all_predictions.shape[0]):
if (np.argmax(all_predictions[i]) == 0):
predicted_labels.append("FCC")
if (np.argmax(all_labels[i]) == 0):
true_labels.append("FCC")
if (np.argmax(all_predictions[i]) == 1):
predicted_labels.append("BCC")
if (np.argmax(all_labels[i]) == 1):
true_labels.append("BCC")
if (np.argmax(all_predictions[i]) == 2):
predicted_labels.append("HCP")
if (np.argmax(all_labels[i]) == 2):
true_labels.append("HCP")
predicted_labels = np.array(predicted_labels).reshape((-1, 1))
true_labels = np.array(true_labels).reshape((-1, 1))
headings = ["Atomic number", "True crystal structure", "Predicted crystal structure"]
atomic_number_array = np.array(df.iloc[:, 0]).reshape((-1, 1))
plot_table = np.concatenate((atomic_number_array, true_labels, predicted_labels), axis=1)
plot_df = pd.DataFrame(plot_table, columns=headings)
# -
plot_df
# ### Optimization of the decision tree
#
# The default Attribute Selection Measure to determine the quality of a split for a decision node is Gini. We can try the Entropy measure and also see if we can reduce the depths of the tree.
# +
# Create Decision Tree classifer object
model = DecisionTreeClassifier(criterion="entropy", max_depth=3)
# Train Decision Tree Classifer
model.fit(train_values, train_labels)
#Predict the response for training and testing dataset
train_pred = model.predict(train_values)
test_pred = model.predict(test_values)
# Model Accuracy for training and testing set, how often is the classifier correct?
print('Training accuracy = %.3f ' % accuracy_score(train_labels, train_pred))
print('Testing accuracy = %.3f ' % accuracy_score(test_labels, test_pred))
# -
fig = plt.figure(figsize=(25,20))
_ = tree.plot_tree(model, feature_names=querable_values, filled=True)
|
Notebooks/14.DecisionTrees_OneHoteEncoder.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/SharonCamacho/Clases/blob/main/Numpy.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="lS6dBZOOsL6A"
# # Numpy
# + [markdown] id="Jr-ObpwY3X2E"
# *Tomado de **Python Data Science Handbook** de Jake VanderPlas*
# + [markdown] id="28IY467eyPRV"
# * Proveniente de Numerical Python
# * Estructura de datos propia: los arrays que hace más eficiente el manejo de datos
#
#
# + id="dwfdCte5sLEn"
#Importamos la librería
import numpy as np
# + id="BBlrTTaEsRv9" outputId="abbf27f8-339b-44a0-e9d5-8ca3f0b242ab" colab={"base_uri": "https://localhost:8080/", "height": 171}
numpy.__version__
#Genera un error porque cuando importé la librería
#indiqué que iba a usar un alias
# + id="2Q0GSGU8sXEK" outputId="f3f8ba08-9aae-4ac2-bce7-8e1120d1da3e" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.__version__
# + id="rxl-DarMsi5j"
#para tener información acerca de numpy
# np?
# + id="PuhmjLAqstIz"
#Ejemplo
# np.max?
# + [markdown] id="NgKAWwooyILa"
# ## Crear **arrays**
# + [markdown] id="asbf1lZQ0y8P"
# No olvides que los arrays solo contienen un tipo de datos
# + id="nn9rewL2s1uj" outputId="682d98bb-2a39-402e-b499-e2548f35b790" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Crear un array de enteros:
np.array([1, 4, 2, 5, 3])
# + id="2Yuu2IYsyoaI" outputId="4c83ba8c-cff7-434e-bfa0-c6067c16c38b" colab={"base_uri": "https://localhost:8080/", "height": 35}
#Como solo puede contener un tipo de datos los int se convierten en float
np.array([3.14, 4, 2, 3])
# + id="jdbPdCeH1Hla" outputId="ee3d0796-3a3c-4fa5-8beb-58047a3d64b1" colab={"base_uri": "https://localhost:8080/", "height": 35}
#Si queremos especificar el tipo podemos usar dtipe
np.array([3.14, 4, 2, 3], dtype='int')
# + [markdown] id="UUySK8vM1aHx"
# A diferencia de las listas de Python, las matrices NumPy pueden ser explícitamente multidimensionales
# + id="kLmKrenS1jWq" outputId="de39013c-c4ec-4883-b04e-31bbe85dab17" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Una lista de lista me da un array multidimensional
np.array([range(i, i + 3) for i in [2, 4, 6]])
# + [markdown] id="2GYbNE0-1vRD"
# >>> ***OJO AQUÍ EXPLICAR LIST COMPRENHENCIÖN***
# + [markdown] id="5wu4XQwk2ETo"
# ## Crear arrays DUMMIES desde cero
# + [markdown] id="WkJXmVBk2Tnr"
# Es más eficiente crear matrices desde cero usando rutinas integradas en NumPy.
# + id="vkhpH-st2Jza" outputId="40670bb8-fc01-487c-a7e9-1f8edb89759f" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Crear un array de longitud 10 lleno con 0
np.zeros(10, dtype=int)
# + id="c6_--okh2mKL" outputId="bf7b8a12-2558-4ebe-9f88-a7af02cf9a58" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Crear un array de 3X5 llenado con 1 tipo flotante
np.ones((3, 5), dtype=float)
# + id="zQhNLkgJ2oNK" outputId="5472103b-931c-4941-c139-cc2fd12f3c8d" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Crear un array de 3X5 llenado con 3.14
np.full((3, 5), 3.14)
# + id="zmyluonP2qy1" outputId="ed81537d-b5f6-4cac-a883-c8238684d5c2" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Crear un array con una secuencia
# Inica en 0, termina en 20 con pasos de 2
np.arange(0, 20, 2)
#Notar que el 20 no es incluyente
# + id="jnk-1w-U2uBr" outputId="2410476f-26dd-4d7f-fc4f-377dc981d526" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Cree un array de cinco valores espaciados uniformemente entre 0 y 1
np.linspace(0, 1, 5)
#Aquí el 1 es incluyente
# + id="BS-KJBSr2vkV" outputId="5abb7d9e-bf6f-418f-9c73-8273b502582c" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Cree un array de 3x3 de distribución uniforme
# Valores aleatorios entre 0 y 1
np.random.random((3, 3))
# + [markdown] id="rPHD3mBV44ZM"
# >> # **Agregar foto de distribución uniforme**
#
#
#
#
# + id="YFPe7tQo2xsD" outputId="25e9fd6a-9d7d-42ad-a4c7-2052a6adaaf9" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Cree un array de 3x3 de distribución normal
# Valores aleatorios con media 0 y desviación estandar 1
np.random.normal(0, 1, (3, 3))
# + [markdown] id="Y7icF2CN5Odw"
# >> # **Agregar foto de distribución normal**
# + id="9C3SVbBD21E5" outputId="fd82a50c-4c36-4ba2-d829-0e3538604ee2" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Cree un array de 3x3
# con enteros aleatorios en un intervalo de [0,10)
np.random.randint(0, 10, (3, 3))
# + id="0J3Bj9rb21yR" outputId="97d2aa9f-c123-4186-b33f-1518e1062e70" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Crre un array 3x3 con la matriz identidad
np.eye(3)
# + id="x7Wr7xKZ26aC" outputId="94ad659b-d014-4cf5-95e2-4938ee8a428a" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Cree una matriz no inicializada de tres enteros
# Los valores serán lo que suceda que ya exista en esa ubicación de memoria
np.empty(3)
# + id="imjn_qi954Ir"
# np.empty?
# + [markdown] id="GYOYejf424Dh"
#
# "vacío, a diferencia de los ceros, no establece los valores de la matriz en cero y, por lo tanto, puede ser ligeramente más rápido. Por otro lado, requiere que el usuario establezca manualmente todos los valores de la matriz y debe usarse con precaución" (https://bit.ly/3iKLKaV)
# + [markdown] id="A7U5f8Tv95Jp"
# ## Lo básico de los array
# + [markdown] id="vVAkDf1X97TN"
# ### Atributos
# + id="3j9SV4xJ-AR3"
#Creamos una semilla para obtener los mismos resultados
np.random.seed(0)
# + id="6xZmSd64-Jrx"
#Creo un array
x3 = np.random.randint(10, size=(3, 4, 5))
# + id="KP2pZNJM_Emq" outputId="dc69fbce-16ce-432e-dcb9-fb7b0c8eda65" colab={"base_uri": "https://localhost:8080/", "height": 272}
x3
#observar que no trae valores mayores a 10
# + id="uGMyjvey-Owc" outputId="c2033830-47fa-47ff-8074-a1333124983d" colab={"base_uri": "https://localhost:8080/", "height": 90}
#Muestro los atributos de ese array
print("x3 dimensión: ", x3.ndim)
print("x3 forma:", x3.shape)
print("x3 tamaño: ", x3.size)
print("x3 tipo de dato: ", x3.dtype)
# + id="vhQ60EReACkJ" outputId="183b1a71-0a6f-4935-eaec-c709ad69a921" colab={"base_uri": "https://localhost:8080/", "height": 54}
print("itemsize:", x3.itemsize, "bytes") #Tamaño de cada elemento
print("nbytes:", x3.nbytes, "bytes") #Tamaño del array
# + [markdown] id="cjYW-XD9AMlE"
# ### Indexación
# + id="g_d2y4O0A4ef" outputId="c7df9a25-9cd3-4fc0-f473-7fbaca47ed69" colab={"base_uri": "https://localhost:8080/", "height": 272}
x3
# + id="jLgslRpPASfY" outputId="53eddb96-2f19-47db-902f-b0ac40db2fbc" colab={"base_uri": "https://localhost:8080/", "height": 90}
x3[1]
# + id="Tr0SGdSFAtGL" outputId="f0b24855-a5ab-4c85-b09d-e8a464cae672" colab={"base_uri": "https://localhost:8080/", "height": 35}
x3[1][2]
# + id="Pv4O4oTwAzVw" outputId="7b4fbea5-02ae-4972-93a2-4a254348bbd0" colab={"base_uri": "https://localhost:8080/", "height": 35}
x3[1][2][2]
# + id="5m7N0rbwBfNa" outputId="6ded6e6c-181a-4ef3-bb15-b70b814caedc" colab={"base_uri": "https://localhost:8080/", "height": 35}
x3[1][2][-1]
# + id="31NFFuwaBmyo" outputId="10668e05-163a-4cca-c2db-8c275fa34013" colab={"base_uri": "https://localhost:8080/", "height": 35}
x3[1,2,2]
# + id="ypOdXRmuBw6Q" outputId="d868c969-0aa9-4a86-900a-97095a47a572" colab={"base_uri": "https://localhost:8080/", "height": 35}
x3[1,2,2] = 20.892
x3[1,2]
# + [markdown] id="FS4TwwncCTfw"
# ### Slicing
# + [markdown] id="av6eTOHTCYNG"
# x[start : stop : step]
#
# si no se especifican los valores start=0, stop=tamaño de la dimensión, step=1
#
# + [markdown] id="nVv5d7wiF4Bz"
# cuando trabajamos con grandes conjuntos de datos, podemos acceder y procesar partes de estos conjuntos de datos sin la necesidad de copiar el búfer de datos subyacente.
# + [markdown] id="8crI76fkC6Aq"
# #### De una dimensión
# + id="Z4hBeb43CVyi" outputId="46e181c9-16c5-48a7-b84a-66713a7198ff" colab={"base_uri": "https://localhost:8080/", "height": 35}
x = np.arange(10)
x
# + id="iOqG7ZnfCwjf"
x[:5] # first five elements
# + id="Nngg9zziCy-G"
x[5:] # elements after index 5
# + id="4bPcg1EMC3tt"
x[4:7] # middle sub-array
# + id="z7xcy0pmC9F4"
x[::2] # every other element
# + id="OXQFmi2jDAtj"
x[::-1] # all elements, reversed
# + [markdown] id="nS6WIMewDDyf"
# #### Varias dimensiones
# + id="htQDgIY1DDBv" outputId="6cc0dd8c-334f-45b9-9217-54d2f0c6f3da" colab={"base_uri": "https://localhost:8080/", "height": 72}
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x2
# + id="SigG0erbDNew" outputId="fc17b461-84aa-4902-fd9e-ecb371a25c2a" colab={"base_uri": "https://localhost:8080/", "height": 54}
x2[:2, :3] # two rows, three columns
# + id="oCePwohcFgXW" outputId="dc10523c-73bf-4c8a-b287-ad1e4b558eda" colab={"base_uri": "https://localhost:8080/", "height": 54}
#Las dos primeras filas
x2[:2]
# + id="prz8ze1gFS1u" outputId="f0ae1da9-9106-4751-d85e-71a5c0363ccd" colab={"base_uri": "https://localhost:8080/", "height": 72}
#Las dos últimas columnas
x2[:,-2:]
# + [markdown] id="OPRvSh9fEAWB"
# como sería sacar un cubo de 2x2x2
# + id="7XioyhvlDeKb" outputId="51cff07c-6fa6-4535-c2a6-7ed86e654bf6" colab={"base_uri": "https://localhost:8080/", "height": 272}
x3
# + id="5cWJV91sDiS8"
x3_new = x3[:2,:2,:2]
# + id="btkuHSRUD27s" outputId="ea2bd921-5368-45cd-9a82-79aee085c208" colab={"base_uri": "https://localhost:8080/", "height": 72}
print("x3_new ndim: ", x3_new.ndim)
print("x3_new shape:", x3_new.shape)
print("x3_new size: ", x3_new.size)
# + [markdown] id="KIoBKexLA-eK"
# ### Reshaping
# + id="HthsU7D6BD0g" outputId="de83fbb6-330d-46a2-99aa-3b9616ce4358" colab={"base_uri": "https://localhost:8080/", "height": 72}
#Convertirlo en 3x3
grid = np.arange(1, 10).reshape((3, 3))
print(grid)
# + id="JBivc1trGOtx" outputId="733b5a90-3a54-48e1-c69b-38376c72cab4" colab={"base_uri": "https://localhost:8080/", "height": 35}
#Antes era un vector
np.arange(1, 10)
# + id="Nz2YIHVoGifQ" outputId="711c2e35-9012-430b-96c8-0f26be30cc53" colab={"base_uri": "https://localhost:8080/", "height": 189}
#Mi array incial debe permitir tener esa una forma, porque de lo contrario
#Muestra error
grid2 = np.arange(1, 8).reshape((3, 3))
print(grid2)
#No tiene elementos suficientes para construir una matriz 3x3
# + [markdown] id="A07amzQ9HcHw"
# ### Join / Concatenation
# + [markdown] id="2zSgmN8sHs_s"
# * np.concatenate
# * np.vstack
# * np.hstack
#
# np.concatenate toma una lista o una tupla como el primer argumento
# + [markdown] id="EDyb35BSIX4U"
# #### np.concatenate
# + id="wKvTjcALHfHm" outputId="7c523a24-c0b4-42de-a053-121257344081" colab={"base_uri": "https://localhost:8080/", "height": 35}
x = np.array([1, 2, 3])
y = np.array([3, 2, 1])
np.concatenate([x, y])
# + id="X3w1cTi0IBVv" outputId="01510b91-994c-4584-8d95-8c2c4211ed70" colab={"base_uri": "https://localhost:8080/", "height": 35}
#Se pueden concatenar más de dos
z = [99, 99, 99]
print(np.concatenate([x, y, z]))
# + id="ioILaH7TICSp"
grid = np.array([[1, 2, 3],
[4, 5, 6]])
# + id="36ITdd5JIOHf" outputId="73d3172f-6bd6-464d-e55a-0c457a8b9d67" colab={"base_uri": "https://localhost:8080/", "height": 90}
# concatenate along the first axis
np.concatenate([grid, grid])
# + id="PuL_WNCnISW4" outputId="4fc006fe-db93-425d-cb1d-9a77b3d0c9d9" colab={"base_uri": "https://localhost:8080/", "height": 54}
# concatenate along the first axis
np.concatenate([grid, grid], axis=1)
# + [markdown] id="CY2-u9tcIbJh"
# #### np.vstack / np.hstack
# + [markdown] id="QhmQmFAEIfDl"
# para arrays con diferentes dimensiones
#
# el orden importa y las dimensiones también
# + id="tGZ7I1idIUtO" outputId="2bc84233-2167-4745-9606-b7e32a67fe03" colab={"base_uri": "https://localhost:8080/", "height": 72}
#Vertical
x = np.array([1, 2, 3])
grid = np.array([[9, 8, 7],
[6, 5, 4]])
# vertically stack the arrays
np.vstack([x, grid])
# + id="hQZKM2IpIo05" outputId="bd7430a0-ea85-431d-c1c2-756dd59c00db" colab={"base_uri": "https://localhost:8080/", "height": 54}
#Horizontar
y = np.array([[99],
[99]])
np.hstack([grid, y])
#notar que el orden importa
|
Numpy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Comment out these lines
import sys
sys.path.insert(0, 'C:\\Users\\masch\\QuantumComputing\\QCompMAS\\pgmpy')
# Imports
import cmath
import numpy as np
from pgmpy.models import BayesianNetwork
from pgmpy.factors.discrete.CPD import TabularCPD
from pgmpy.inference import VariableElimination
from pgmpy.inference import BeliefPropagation
p = 0.64
bitFlip = BayesianNetwork([('q0m0', 'q0m1'), ('rv', 'q0m1'), ('q0m1', 'q0m2')])
cpd_q0m0 = TabularCPD(variable='q0m0', variable_card=2, values=[[1],[0]])
cpd_rv = TabularCPD(variable='rv', variable_card=2, values=[[np.sqrt(1-p)],[np.sqrt(p)]])
cpd_q0m1 = TabularCPD(variable='q0m1', variable_card=2, values=[[1,0,0,1],[0,1,1,0]], evidence=['q0m0', 'rv'], evidence_card = [2,2])
cpd_q0m2 = TabularCPD(variable='q0m2', variable_card=2, values=[[1/np.sqrt(2),1/np.sqrt(2)],[1/np.sqrt(2),-1/np.sqrt(2)]], evidence=['q0m1'], evidence_card=[2])
bitFlip.add_cpds(cpd_q0m0, cpd_rv, cpd_q0m1, cpd_q0m2)
BF_infer = VariableElimination(bitFlip)
bf = BF_infer.query(['rv', 'q0m2'])
print(bf)
# -
def cpd_2_dm(obj,rvs,var):
numQubits = len(var)
numRVs = len(rvs)
varOrder = obj.variables
numVars = len(varOrder)
qubitOrdering = []
rvsOrdering = []
for i in range(numQubits):
v = var[i]
j = 0
while(j < numVars and v != varOrder[j]):
j += 1
qubitOrdering.append(2**(numVars - j - 1))
for i in range(numRVs):
v = rvs[i]
j = 0
while(j < numVars and v != varOrder[j]):
j += 1
rvsOrdering.append(2**(numVars - j - 1))
vals = (obj.values).flatten()
dm = np.zeros((2**numQubits,2**numQubits),dtype="complex_")
numEvents = 2**numRVs
numPermutations = 2**numQubits
for i in range(numEvents):
val1 = 0
for j in range(numRVs):
val1 += ((i//(2**j))%2)*rvsOrdering[numRVs - j - 1]
arr1 = np.zeros((numPermutations,1),dtype="complex_")
arr2 = np.zeros((1,numPermutations),dtype="complex_")
for j in range(numPermutations):
val2 = val1
for k in range(numQubits):
val2 += ((j//(2**k))%2)*qubitOrdering[numQubits - k - 1]
arr1[j][0] = vals[val2]
arr2[0][j] = np.conj(vals[val2])
dm += np.matmul(arr1,arr2)
return dm
X = cpd_2_dm(bf,['rv'],['q0m2'])
print(X)
# +
p2 = 0.64
bitFlip2 = BayesianNetwork([('q0m0', 'q0m1'), ('q1m0', 'q1m1'), ('rv', 'q1m1')])
cpd_q0m0 = TabularCPD(variable='q0m0', variable_card=2, values=[[1],[0]])
cpd_q1m0 = TabularCPD(variable='q1m0', variable_card=2, values=[[1],[0]])
cpd_rv = TabularCPD(variable='rv', variable_card=2, values=[[np.sqrt(1-p2)],[np.sqrt(p2)]])
cpd_q0m1 = TabularCPD(variable='q0m1', variable_card=2, values=[[1/np.sqrt(2),1/np.sqrt(2)],[1/np.sqrt(2),-1/np.sqrt(2)]], evidence=['q0m0'], evidence_card = [2])
cpd_q1m1 = TabularCPD(variable='q1m1', variable_card=2, values=[[1,0,0,1],[0,1,1,0]], evidence=['q1m0', 'rv'], evidence_card=[2,2])
bitFlip2.add_cpds(cpd_q0m0, cpd_q1m0, cpd_rv, cpd_q0m1, cpd_q1m1)
BF2_infer = VariableElimination(bitFlip2)
bf2 = BF2_infer.query(['rv', 'q0m1', 'q1m1'])
print(bf2)
# -
X = cpd_2_dm(bf2,['rv'],['q1m1', 'q0m1']).round(4)
print(X)
# +
from qiskit import QuantumCircuit
import qiskit.quantum_info as qi
circ1 = QuantumCircuit(2)
circ1.h(0)
circ2 = QuantumCircuit(2)
circ2.h(0)
circ2.x(1)
dm1 = (qi.DensityMatrix.from_instruction(circ1)).__array__()
dm2 = (qi.DensityMatrix.from_instruction(circ2)).__array__()
Y = ((1-p2)*dm1 + p2*dm2).round(4)
print(Y)
# -
X == Y
|
Notebooks/BitFlip.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="37puETfgRzzg"
# # Data Preprocessing Tools
# + [markdown] id="EoRP98MpR-qj"
# ## Importing the libraries
# + id="N-qiINBQSK2g"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] id="RopL7tUZSQkT"
# ## Importing the dataset
# + id="WwEPNDWySTKm"
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# + id="hCsz2yCebe1R" executionInfo={"status": "ok", "timestamp": 1587622253093, "user_tz": -240, "elapsed": 895, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="1e4cc568-4e51-4b38-9d46-4aa3f15204be" colab={"base_uri": "https://localhost:8080/", "height": 188}
print(X)
# + id="eYrOQ43XcJR3" executionInfo={"status": "ok", "timestamp": 1587622256072, "user_tz": -240, "elapsed": 656, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="e0873b2a-3b08-4bab-ef0d-15b88858ca44" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(y)
# + [markdown] id="nhfKXNxlSabC"
# ## Taking care of missing data
# + id="c93k7ipkSexq"
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# + id="3UgLdMS_bjq_" executionInfo={"status": "ok", "timestamp": 1587622284427, "user_tz": -240, "elapsed": 919, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="254af4e0-681e-47f5-aaa7-b9c6f43258e9" colab={"base_uri": "https://localhost:8080/", "height": 188}
print(X)
# + [markdown] id="CriG6VzVSjcK"
# ## Encoding categorical data
# + [markdown] id="AhSpdQWeSsFh"
# ### Encoding the Independent Variable
# + id="5hwuVddlSwVi"
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
# + id="f7QspewyeBfx" executionInfo={"status": "ok", "timestamp": 1587622291650, "user_tz": -240, "elapsed": 570, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="5b35feef-7fe2-46ef-ce70-80495f94f4ed" colab={"base_uri": "https://localhost:8080/", "height": 188}
print(X)
# + [markdown] id="DXh8oVSITIc6"
# ### Encoding the Dependent Variable
# + id="XgHCShVyTOYY"
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
# + id="FyhY8-gPpFCa"
print(y)
# + [markdown] id="qb_vcgm3qZKW"
# ## Splitting the dataset into the Training set and Test set
# + id="pXgA6CzlqbCl"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)
# + id="GuwQhFdKrYTM" executionInfo={"status": "ok", "timestamp": 1587622301522, "user_tz": -240, "elapsed": 597, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="de1e527f-c229-4daf-e7c5-ea9d2485148d" colab={"base_uri": "https://localhost:8080/", "height": 154}
print(X_train)
# + id="TUrX_Tvcrbi4" executionInfo={"status": "ok", "timestamp": 1587622305066, "user_tz": -240, "elapsed": 835, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="9a041a9b-2642-4828-fa2f-a431d7d77631" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(X_test)
# + id="pSMHiIsWreQY" executionInfo={"status": "ok", "timestamp": 1587622306938, "user_tz": -240, "elapsed": 536, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="5afe91e0-9244-4bf5-ec1b-e3e092b85c08" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(y_train)
# + id="I_tW7H56rgtW" executionInfo={"status": "ok", "timestamp": 1587622309210, "user_tz": -240, "elapsed": 828, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="2a93f141-2a99-4a69-eec5-c82a3bb8d36b" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(y_test)
# + [markdown] id="TpGqbS4TqkIR"
# ## Feature Scaling
# + id="AxjSUXFQqo-3"
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, 3:] = sc.fit_transform(X_train[:, 3:])
X_test[:, 3:] = sc.transform(X_test[:, 3:])
# + id="DWPET8ZdlMnu" executionInfo={"status": "ok", "timestamp": 1587622313752, "user_tz": -240, "elapsed": 767, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="dea86927-5124-4e2a-e974-2804df9a913c" colab={"base_uri": "https://localhost:8080/", "height": 154}
print(X_train)
# + id="sTXykB_QlRjE" executionInfo={"status": "ok", "timestamp": 1587622315942, "user_tz": -240, "elapsed": 506, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}} outputId="b68f0cfc-d07c-48cb-80d0-6800028c41f9" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(X_test)
|
Copy of data_preprocessing_tools.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Kalman Filter SIR
# *Why* - <NAME>'s model required conjugate observation and transition densities in order to use Gibbs sampling. Pomp requires particle fitlering methods. Can we use the analytical properties of the Kalman Filter and use some simple transforms? It seems like we can!
#
# #### Transition Density
# $$(S_t',I_t',R_t') \sim MVN(RK4(S_{t-1},I_{t-1},R_{t-1})),\Sigma)$$
# Where $RK4$ is the rung-katta approximation to the SIR differential equations
# #### Observation Density
#
# $$I_t = \frac{e^{S_t'}}{e^{S_t'} + e^{I_t'} + e^{R_t'}}$$
# $$Y_t \sim N(N_t*I_t,\sigma^2)$$
# where $N_t$ is the population at time $t$
# +
import numpy as np
import pylab as pl
from pykalman import UnscentedKalmanFilter
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# initialize parameters
def transition_function(state, noise):
# Total population, N.
N = 1000
# Initial number of infected and recovered individuals, I0 and R0.
S0, I0, R0 = state[0],state[1], state[2]
# Everyone else, S0, is susceptible to infection initially.
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
beta, gamma = 0.2, 1./10
# A grid of time points (in days)
t = np.linspace(0, .001, 2)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R = y
dSdt = -beta * S * I
dIdt = beta * S * I - gamma * I
dRdt = gamma * I
return dSdt, dIdt, dRdt
# Initial conditions vector
y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma))
S, I, R = ret.T
S = S[-1] + noise[0]
I = I[-1] + noise[1]
R = R[-1] + noise[2]
ret_ar = np.array([S,I,R]).reshape((-1))
return ret_ar
def observation_function(state, noise):
S_ , I_ , R_ = state[0], state[1], state[2]
I = np.exp(I_)/(np.exp(S_) + np.exp(I_) +np.exp(R_))
return 1000*I + noise[0]
transition_covariance = np.eye(3)
random_state = np.random.RandomState(0)
observation_covariance = 1#np.eye(3) #+ random_state.randn(3, 3)
initial_state_mean = [0, 0, 0]
initial_state_covariance = np.eye(3)
# sample from model
kf = UnscentedKalmanFilter(
transition_function, observation_function,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance,
random_state=random_state
)
#states, observations = kf.sample(100, initial_state_mean)
# estimate state with filtering and smoothing
time_series = np.power(np.sin(np.arange(0,100) + np.random.normal(0,1,100))+4,2)
filtered_state_estimates = kf.filter(time_series)[0]
smoothed_state_estimates = kf.smooth(time_series)[0]
def states_to_observations(states):
expected_obs = []
for state in states:
S_ , I_ , R_ = state[0], state[1], state[2]
I = np.exp(I_)/(np.exp(S_) + np.exp(I_) +np.exp(R_))
expected_obs.append(1000*I)
return expected_obs
# draw estimates
#lines_true = pl.plot(states_to_observations(filtered_state_estimates), color='b')
from plotnine import *
from plotnine.data import *
import pandas as pd
data = [range(100),time_series.tolist(),states_to_observations(filtered_state_estimates)]
data = np.hstack((np.arange(100).reshape((-1,1)),time_series.reshape((-1,1)),np.array(states_to_observations(filtered_state_estimates)).reshape((-1,1))))
print (data.shape)
df = pd.DataFrame(data, columns=['t','y','yhat'])
df = pd.melt(df, id_vars=['t'], value_vars=['y', 'yhat'])
p = ggplot(df, aes(x='t', y='value', color='variable'))
(p + geom_line()
+ scale_color_manual(['r', 'b'])
)
# -
|
.ipynb_checkpoints/kfsir-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
df = pd.read_csv('AirQualityUCI2.csv', sep=',', header=0, index_col=0)
# ### Renaming the headers
col_names = {'Date':'date', 'Time':'time', 'CO(GT)':'cogt', 'PT08.S1(CO)':'pt08s1co', 'NMHC(GT)':'nhmcgt','C6H6(GT)':'c6h6gt', 'PT08.S2(NMHC)':'pt08s2nhmc','NOx(GT)':'noxgt','PT08.S3(NOx)':'pt08s3nox','NO2(GT)':'no2gt','PT08.S4(NO2)':'pt08s4no2','PT08.S5(O3)':'pt08s503','T':'t','RH':'rh','AH':'ah','X1':'x1','X2':'x2','X3':'x3','X4':'x4','X5':'x5'}
df = df.rename(columns=col_names)
df.head()
# ### Data Cleaning
df.head(20)
df['cogt'] = [x if x>0 else np.nan for x in df['cogt']]
#summary like function in python
df.describe()
df['cogt'] = [x if x>0 else np.nan for x in df['cogt']]
df['pt08s1co'] = [x if x>0 else np.nan for x in df['pt08s1co']]
df['nhmcgt'] = [x if x>0 else np.nan for x in df['nhmcgt']]
df['c6h6gt'] = [x if x>0 else np.nan for x in df['c6h6gt']]
df['pt08s2nhmc'] = [x if x>0 else np.nan for x in df['pt08s2nhmc']]
df['noxgt'] = [x if x>0 else np.nan for x in df['noxgt']]
df['pt08s3nox'] = [x if x>0 else np.nan for x in df['pt08s3nox']]
df['no2gt'] = [x if x>0 else np.nan for x in df['no2gt']]
df['pt08s4no2'] = [x if x>0 else np.nan for x in df['pt08s4no2']]
df['pt08s503'] = [x if x>0 else np.nan for x in df['pt08s503']]
df['t'] = [x if x>0 else np.nan for x in df['t']]
df['rh'] = [x if x>0 else np.nan for x in df['rh']]
df['ah'] = [x if x>0 else np.nan for x in df['ah']]
df['x1'] = [x if x>0 else np.nan for x in df['x1']]
df['x2'] = [x if x>0 else np.nan for x in df['x2']]
df.describe()
## removing all rows containing NaN
df = df.dropna()
df['noxgt'].describe()
# #### We can now assume our data is clean.
# ### Data integration
# Data integration is nothing but the concatenation of the two or more datasets.
#
# It can be done by using concat method of pandas.
df.head()
df.dtypes
# ### Data Transformation
# It deals with transforming of data types of the columns as per the need. Here all the codes are in float64 itself which will be beneficial for getting the matrix in machine learning.
# ### Data Model Building
iris= datasets.load_iris()
gnb = GaussianNB()
train = gnb.fit(iris.data, iris.target)
pred = train.predict(iris.data)
total = iris.data.shape[0]
success = (iris.target==pred).sum()
print("Accuracy = "+str((success/total)*100))
|
Data Science-1 - Air Quality/air_quality.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # signac - PyData Ann Arbor Meetup 2018
# ## Integration with the Python ecosystem
#
# ``signac`` is designed to be extremely lightweight, making it easy to work with other tools.
# Here, we demonstrate how it can be integrated with some other tools, which we also use to provide some comparison of ``signac``'s functionality with these tools.
# ### Sacred
#
# The [Sacred provenance management tool](https://sacred.readthedocs.io/en/latest/) is a popular Python package for logging experiments and reproducing them later.
# It provides some functionality that slightly overlaps with **signac**, but both packages can be used in a complementary manner.
# Remove left-over files from previous runs...
# !rm -rf project.py experiment.py workspace signac.rc
# Initialize the project iterating over some arbitrary index variable *i*.
# +
import signac
project = signac.init_project("Sacred")
for i in range(5):
project.open_job({"foo": i}).init()
# -
# Then setup the *sacred* experiment, which is a function of some state point variable.
# +
# %%writefile experiment.py
from sacred import Experiment
ex = Experiment()
@ex.command
def hello(foo):
print('hello #', foo)
@ex.command
def goodbye(foo):
print('goodbye #', foo)
# +
# %%writefile project.py
from flow import FlowProject
from sacred.observers import FileStorageObserver
from experiment import ex
class SacredProject(FlowProject):
pass
def setup_sacred(job):
ex.add_config(** job.sp)
ex.observers[:] = [FileStorageObserver.create(job.fn('my_runs'))]
@SacredProject.operation
@SacredProject.post.true('hello')
def hello(job):
setup_sacred(job)
ex.run('hello')
job.doc.hello = True
@SacredProject.operation
@SacredProject.pre.after(hello)
@SacredProject.post.true('goodbye')
def goodbye(job):
setup_sacred(job)
ex.run('goodbye')
job.doc.goodbye = True
if __name__ == '__main__':
SacredProject().main()
# -
# !python3 project.py run -n 1
# !python3 project.py run
# !python3 project.py status --stack --pretty --full
# ### pandas
#
# The data in a signac database can easily be coerced into a format suitable for [pandas](https://pandas.pydata.org/).
# Here, we showcase a simple ideal gas study, where both the state point metadata and document metadata is exported into a pandas `DataFrame`.
#
# An ideal gas can be modeled with the ideal gas equation: $pV = NRT$, where the product of the pressure $p$ and the volume $V$ are linearly proportional to the amount of molecules $N$, the ideal gas constant $R=8.314 \frac{\text{J}}{\text{mol K}}$, and the absolute temperature $T$.
#
# We start by initializing the data space.
# +
import pandas as pd
import numpy as np
import signac
project = signac.init_project("pandas", root='pandas-project')
for T in 200, 300, 400:
for p in 1, 10, 100:
job = project.open_job(dict(T=T, p=p, N=1))
job.doc.V = job.sp.N * job.sp.T * 8.313 / job.sp.p
# -
# We then export the *project index* to a pandas DataFrame, while flattening the statepoint dictionary:
# +
def flatten_statepoint(doc):
for key, value in doc.pop('statepoint').items():
yield 'sp.' + key, value
for key, value in doc.items():
yield key, value
project_index = {doc['_id']: dict(flatten_statepoint(doc)) for doc in project.index()}
df = pd.DataFrame(project_index).T.set_index('_id')
df
# -
# We can then apply the standard pandas selection ...
df[df['sp.p'] == 1]
# ... and aggregation mechanisms.
df[df['sp.p'] == 1].V.max()
# ### Datreant
#
# The [``datreant.core``](http://datreant.readthedocs.io/en/latest/) package is one of the closer analogues to the ``signac`` core data managment package.
# However, it is even less restrictive than ``signac`` in that it does not require any index; it simply offers a way to associate arbitrary directories on the filesystem with metadata.
#
# Both packages can be used in conjunction if there is value in maintaining trees within a ``signac`` data space or *vice versa*.
# +
import signac
project = signac.init_project("Datreant", root='datreant-project')
for i in range(5):
project.open_job({"i": i}).init()
# +
import datreant.core as dtr
for job in project:
with job:
dtr.Treant('tree1').categories['foo'] = 1
dtr.Treant('tree2').categories['foo'] = 2
# +
from glob import glob
for job in project:
print(job)
with job:
for tree in glob('tree?'):
print(tree, dtr.Treant(tree).categories)
print()
|
integration/Integration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
def read_questions_to_df():
ham_df = pd.read_csv('amat_basic_quest_delim.txt',delimiter=';',encoding='latin1')
return ham_df
ham_df = read_questions_to_df()
ham_df.head()
ham_df.info()
ham_df_sample = ham_df.sample(n=10)
# ### Preparation
# - read the questions from the csv into dataframe
#
# ### Generate a question
# - pick one row from the dataframe - get the question and the answer choices
# - get a random shuffle and remember the correct choice.
#
# ### Present
# - The question and choices to screen (including one to exit)
# - Get user input as to answer
# - Print correct or wrong and and also next question
#
# ### Exit
# - Add session scores to a file that is saved (this might be best as json or dict)
# - Why json - easy to add a timestamp and then filter out those timestamps later
#
from random import shuffle
from random import sample
choice_list = [(1, 'Audio rectification of strong signals'),
(0, 'Harmonics generated at the transmitter'),
(0, 'Improper filtering in the transmitter'),
(0, 'Lack of receiver sensitivity and selectivity')]
choice_shuffled = sample(choice_list, len(choice_list))
choice_shuffled
def get_question(df):
"""
From a dataframe, randomize and generate questions
"""
shuffled_df = df.sample(n=len(df))
correct = 0
count = 0
for index, row in shuffled_df.iterrows():
print(row['question_id'], row['question_english'])
count += 1
choice_1 = row['correct_answer_english']
choice_2 = row['incorrect_answer_1_english']
choice_3 = row['incorrect_answer_2_english']
choice_4 = row['incorrect_answer_3_english']
choice_list = list(zip([1, 0, 0, 0], [choice_1, choice_2, choice_3, choice_4]))
choices_shuffle = sample(choice_list, len(choice_list))
for index, (num, choice) in enumerate(choices_shuffle):
print("{}. {}".format(index + 1, choice))
print("Enter your choice: one of 1, 2, 3, or 4. Enter 5 for exit")
a = int(input())
if a == 5:
break
elif choices_shuffle[a-1][0] == 1:
print("Correct answer! ")
correct += 1
else:
print("Incorrect answer! Study more")
correct_answer = list(filter(lambda x: x[0] == 1, choices_shuffle))[0][1]
print("The correct answer is {}".format(correct_answer))
print()
print("Quiz done for now")
print("Got {} question(s) correct from a total of {} question(s)".format(correct, count))
return None
# + heading_collapsed=true
get_question(ham_df_sample)
# + hidden=true
list(filter(lambda x: x[0] == 1, choice_shuffled))[0][1]
# + hidden=true
test_shuffle_df.head()
# + hidden=true
|
test-notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tensorflow]
# language: python
# name: conda-env-tensorflow-py
# ---
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# +
w = tf.Variable(0, dtype=tf.float32)
# cost = tf.add(tf.add(w**2, tf.multiply(-10., w)), 25.0)
cost = w**2 - 10*w + 25
train = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
init = tf.initializers.global_variables()
with tf.Session() as sess:
sess.run(init)
print(sess.run(w))
# +
session = tf.Session()
session.run(init)
for i in range(1000):
session.run(train)
if not i % 100:
print(session.run(w))
print(session.run(w))
session.close()
# +
coefficients = np.array([[1.], [-20.], [100.]])
w = tf.Variable(0, dtype=tf.float32)
x = tf.placeholder(tf.float32, [3, 1])
cost = x[0][0]* w ** 2 + x[1][0] * w + x[2][0]
train = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
init = tf.initializers.global_variables()
# -
with tf.Session() as session:
session.run(init)
session.run(train, feed_dict={x:coefficients})
writer = tf.summary.FileWriter('./logs', session.graph)
print(f"Initialize value of w: {session.run(w)}")
for i in range(1000):
session.run(train, feed_dict={x:coefficients})
if not i % 100:
print(session.run(w))
print(f"Final value of w: {session.run(w)}")
# <b>to run tensorboard</b>
#
# > tensorboard --logdir=./logs
|
Course 2 - Improving Deep Neural Networks/week 3/Assignment/tensorflow_tutorial_basics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Data Source
#
# Dataset is derived from Fannie Mae’s [Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html) with all rights reserved by Fannie Mae. This processed dataset is redistributed with permission and consent from Fannie Mae. For the full raw dataset visit [Fannie Mae]() to register for an account and to download
#
# Instruction is available at NVIDIA [RAPIDS demo site](https://rapidsai.github.io/demos/datasets/mortgage-data).
#
# ### Prerequisite
#
# This notebook runs in a Dataproc cluster with GPU nodes, with [Spark RAPIDS](https://github.com/GoogleCloudDataproc/initialization-actions/tree/master/rapids) set up.
# -
# ### Define Data Input/Output location
#
# You need to first configure a bucket with Fannie Mae dataset as mentioned above. Here are some commands you can use once you have the tgz file (Example, for full mortgage dataset its `mortgage_2000-2016.tgz` which is about 23.3 GB).
#
# Replace `TARGET_BUCKET` with the bucket name you'd like to use.
# +
# Use pigz (Parallel gzip) to decompress the file, this will generate mortgage_2000-2016.tar (about 195 GB) file
# !pigz -d mortgage_2000-2016.tgz
# untar the file
# !tar xvf mortgage_2000-2016.tar -C mortgage_full/
# upload it to the desired bucket
# !gsutil -m cp -r mortgage_full/* gs://TARGET_BUCKET/mortgage_full/ &
# verify the upload
# !gsutil ls gs://TARGET_BUCKET/mortgage_full
# !gsutil du -hs gs://TARGET_BUCKET/mortgage_full
# -
# More information on Pigz [here](https://github.com/madler/pigz).
#
# Now lets configure data input/output locations.
# +
bucket = "TARGET_BUCKET"
orig_perf_path = 'gs://'+bucket+'/mortgage_full/perf/*'
orig_acq_path = 'gs://'+bucket+'/mortgage_full/acq/*'
train_path = 'gs://'+bucket+'/mortgage_full/train/'
test_path = 'gs://'+bucket+'/mortgage_full/test/'
tmp_perf_path = 'gs://'+bucket+'/mortgage_parquet_gpu/perf/'
tmp_acq_path = 'gs://'+bucket+'/mortgage_parquet_gpu/acq/'
# -
# ### Define ETL Process
#
# Define data schema and steps to do the ETL process:
# +
import time
from pyspark import broadcast, SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import Window
def _get_quarter_from_csv_file_name():
return substring_index(substring_index(input_file_name(), '.', 1), '_', -1)
_csv_perf_schema = StructType([
StructField('loan_id', LongType()),
StructField('monthly_reporting_period', StringType()),
StructField('servicer', StringType()),
StructField('interest_rate', DoubleType()),
StructField('current_actual_upb', DoubleType()),
StructField('loan_age', DoubleType()),
StructField('remaining_months_to_legal_maturity', DoubleType()),
StructField('adj_remaining_months_to_maturity', DoubleType()),
StructField('maturity_date', StringType()),
StructField('msa', DoubleType()),
StructField('current_loan_delinquency_status', IntegerType()),
StructField('mod_flag', StringType()),
StructField('zero_balance_code', StringType()),
StructField('zero_balance_effective_date', StringType()),
StructField('last_paid_installment_date', StringType()),
StructField('foreclosed_after', StringType()),
StructField('disposition_date', StringType()),
StructField('foreclosure_costs', DoubleType()),
StructField('prop_preservation_and_repair_costs', DoubleType()),
StructField('asset_recovery_costs', DoubleType()),
StructField('misc_holding_expenses', DoubleType()),
StructField('holding_taxes', DoubleType()),
StructField('net_sale_proceeds', DoubleType()),
StructField('credit_enhancement_proceeds', DoubleType()),
StructField('repurchase_make_whole_proceeds', StringType()),
StructField('other_foreclosure_proceeds', DoubleType()),
StructField('non_interest_bearing_upb', DoubleType()),
StructField('principal_forgiveness_upb', StringType()),
StructField('repurchase_make_whole_proceeds_flag', StringType()),
StructField('foreclosure_principal_write_off_amount', StringType()),
StructField('servicing_activity_indicator', StringType())])
_csv_acq_schema = StructType([
StructField('loan_id', LongType()),
StructField('orig_channel', StringType()),
StructField('seller_name', StringType()),
StructField('orig_interest_rate', DoubleType()),
StructField('orig_upb', IntegerType()),
StructField('orig_loan_term', IntegerType()),
StructField('orig_date', StringType()),
StructField('first_pay_date', StringType()),
StructField('orig_ltv', DoubleType()),
StructField('orig_cltv', DoubleType()),
StructField('num_borrowers', DoubleType()),
StructField('dti', DoubleType()),
StructField('borrower_credit_score', DoubleType()),
StructField('first_home_buyer', StringType()),
StructField('loan_purpose', StringType()),
StructField('property_type', StringType()),
StructField('num_units', IntegerType()),
StructField('occupancy_status', StringType()),
StructField('property_state', StringType()),
StructField('zip', IntegerType()),
StructField('mortgage_insurance_percent', DoubleType()),
StructField('product_type', StringType()),
StructField('coborrow_credit_score', DoubleType()),
StructField('mortgage_insurance_type', DoubleType()),
StructField('relocation_mortgage_indicator', StringType())])
_name_mapping = [
("WITMER FUNDING, LLC", "Witmer"),
("WELLS FARGO CREDIT RISK TRANSFER SECURITIES TRUST 2015", "Wells Fargo"),
("WELLS FARGO BANK, NA" , "Wells Fargo"),
("WELLS FARGO BANK, N.A." , "Wells Fargo"),
("WELLS FARGO BANK, NA" , "Wells Fargo"),
("USAA FEDERAL SAVINGS BANK" , "USAA"),
("UNITED SHORE FINANCIAL SERVICES, LLC D\\/B\\/A UNITED WHOLESALE MORTGAGE" , "United Seq(e"),
("U.S. BANK N.A." , "US Bank"),
("SUNTRUST MORTGAGE INC." , "Suntrust"),
("STONEGATE MORTGAGE CORPORATION" , "Stonegate Mortgage"),
("STEARNS LENDING, LLC" , "Stearns Lending"),
("STEARNS LENDING, INC." , "Stearns Lending"),
("SIERRA PACIFIC MORTGAGE COMPANY, INC." , "Sierra Pacific Mortgage"),
("REGIONS BANK" , "Regions"),
("RBC MORTGAGE COMPANY" , "RBC"),
("QUICKEN LOANS INC." , "Quicken Loans"),
("PULTE MORTGAGE, L.L.C." , "Pulte Mortgage"),
("PROVIDENT FUNDING ASSOCIATES, L.P." , "Provident Funding"),
("PROSPECT MORTGAGE, LLC" , "Prospect Mortgage"),
("PRINCIPAL RESIDENTIAL MORTGAGE CAPITAL RESOURCES, LLC" , "Principal Residential"),
("PNC BANK, N.A." , "PNC"),
("PMT CREDIT RISK TRANSFER TRUST 2015-2" , "PennyMac"),
("PHH MORTGAGE CORPORATION" , "PHH Mortgage"),
("PENNYMAC CORP." , "PennyMac"),
("PACIFIC UNION FINANCIAL, LLC" , "Other"),
("OTHER" , "Other"),
("NYCB MORTGAGE COMPANY, LLC" , "NYCB"),
("NEW YORK COMMUNITY BANK" , "NYCB"),
("NETBANK FUNDING SERVICES" , "Netbank"),
("NATIONSTAR MORTGAGE, LLC" , "Nationstar Mortgage"),
("METLIFE BANK, NA" , "Metlife"),
("LOANDEPOT.COM, LLC" , "LoanDepot.com"),
("J.P. MORGAN MADISON AVENUE SECURITIES TRUST, SERIES 2015-1" , "JP Morgan Chase"),
("J.P. MORGAN MADISON AVENUE SECURITIES TRUST, SERIES 2014-1" , "JP Morgan Chase"),
("JPMORGAN CHASE BANK, NATIONAL ASSOCIATION" , "JP Morgan Chase"),
("JPMORGAN CHASE BANK, NA" , "JP Morgan Chase"),
("JP MORGAN CHASE BANK, NA" , "JP Morgan Chase"),
("IRWIN MORTGAGE, CORPORATION" , "Irwin Mortgage"),
("IMPAC MORTGAGE CORP." , "Impac Mortgage"),
("HSBC BANK USA, NATIONAL ASSOCIATION" , "HSBC"),
("HOMEWARD RESIDENTIAL, INC." , "Homeward Mortgage"),
("HOMESTREET BANK" , "Other"),
("HOMEBRIDGE FINANCIAL SERVICES, INC." , "HomeBridge"),
("HARWOOD STREET FUNDING I, LLC" , "Harwood Mortgage"),
("GUILD MORTGAGE COMPANY" , "Guild Mortgage"),
("GMAC MORTGAGE, LLC (USAA FEDERAL SAVINGS BANK)" , "GMAC"),
("GMAC MORTGAGE, LLC" , "GMAC"),
("GMAC (USAA)" , "GMAC"),
("FREMONT BANK" , "Fremont Bank"),
("FREEDOM MORTGAGE CORP." , "Freedom Mortgage"),
("FRANKLIN AMERICAN MORTGAGE COMPANY" , "Franklin America"),
("FLEET NATIONAL BANK" , "Fleet National"),
("FLAGSTAR CAPITAL MARKETS CORPORATION" , "Flagstar Bank"),
("FLAGSTAR BANK, FSB" , "Flagstar Bank"),
("FIRST TENNESSEE BANK NATIONAL ASSOCIATION" , "Other"),
("FIFTH THIRD BANK" , "Fifth Third Bank"),
("FEDERAL HOME LOAN BANK OF CHICAGO" , "Fedral Home of Chicago"),
("FDIC, RECEIVER, INDYMAC FEDERAL BANK FSB" , "FDIC"),
("DOWNEY SAVINGS AND LOAN ASSOCIATION, F.A." , "Downey Mortgage"),
("DITECH FINANCIAL LLC" , "Ditech"),
("CITIMORTGAGE, INC." , "Citi"),
("CHICAGO MORTGAGE SOLUTIONS DBA INTERFIRST MORTGAGE COMPANY" , "Chicago Mortgage"),
("CHICAGO MORTGAGE SOLUTIONS DBA INTERBANK MORTGAGE COMPANY" , "Chicago Mortgage"),
("CHASE HOME FINANCE, LLC" , "JP Morgan Chase"),
("CHASE HOME FINANCE FRANKLIN AMERICAN MORTGAGE COMPANY" , "JP Morgan Chase"),
("CHASE HOME FINANCE (CIE 1)" , "JP Morgan Chase"),
("CHASE HOME FINANCE" , "JP Morgan Chase"),
("CASHCALL, INC." , "CashCall"),
("CAPITAL ONE, NATIONAL ASSOCIATION" , "Capital One"),
("CALIBER HOME LOANS, INC." , "Caliber Funding"),
("BISHOPS GATE RESIDENTIAL MORTGAGE TRUST" , "Bishops Gate Mortgage"),
("BANK OF AMERICA, N.A." , "Bank of America"),
("AMTRUST BANK" , "AmTrust"),
("AMERISAVE MORTGAGE CORPORATION" , "Amerisave"),
("AMERIHOME MORTGAGE COMPANY, LLC" , "AmeriHome Mortgage"),
("ALLY BANK" , "Ally Bank"),
("ACADEMY MORTGAGE CORPORATION" , "Academy Mortgage"),
("NO CASH-OUT REFINANCE" , "OTHER REFINANCE"),
("REFINANCE - NOT SPECIFIED" , "OTHER REFINANCE"),
("Other REFINANCE" , "OTHER REFINANCE")]
cate_col_names = [
"orig_channel",
"first_home_buyer",
"loan_purpose",
"property_type",
"occupancy_status",
"property_state",
"relocation_mortgage_indicator",
"seller_name",
"mod_flag"
]
# Numberic columns
label_col_name = "delinquency_12"
numeric_col_names = [
"orig_interest_rate",
"orig_upb",
"orig_loan_term",
"orig_ltv",
"orig_cltv",
"num_borrowers",
"dti",
"borrower_credit_score",
"num_units",
"zip",
"mortgage_insurance_percent",
"current_loan_delinquency_status",
"current_actual_upb",
"interest_rate",
"loan_age",
"msa",
"non_interest_bearing_upb",
label_col_name
]
all_col_names = cate_col_names + numeric_col_names
def read_perf_csv(spark, path):
return spark.read.format('csv') \
.option('nullValue', '') \
.option('header', 'false') \
.option('delimiter', '|') \
.schema(_csv_perf_schema) \
.load(path) \
.withColumn('quarter', _get_quarter_from_csv_file_name())
def read_acq_csv(spark, path):
return spark.read.format('csv') \
.option('nullValue', '') \
.option('header', 'false') \
.option('delimiter', '|') \
.schema(_csv_acq_schema) \
.load(path) \
.withColumn('quarter', _get_quarter_from_csv_file_name())
def _parse_dates(perf):
return perf \
.withColumn('monthly_reporting_period', to_date(col('monthly_reporting_period'), 'MM/dd/yyyy')) \
.withColumn('monthly_reporting_period_month', month(col('monthly_reporting_period'))) \
.withColumn('monthly_reporting_period_year', year(col('monthly_reporting_period'))) \
.withColumn('monthly_reporting_period_day', dayofmonth(col('monthly_reporting_period'))) \
.withColumn('last_paid_installment_date', to_date(col('last_paid_installment_date'), 'MM/dd/yyyy')) \
.withColumn('foreclosed_after', to_date(col('foreclosed_after'), 'MM/dd/yyyy')) \
.withColumn('disposition_date', to_date(col('disposition_date'), 'MM/dd/yyyy')) \
.withColumn('maturity_date', to_date(col('maturity_date'), 'MM/yyyy')) \
.withColumn('zero_balance_effective_date', to_date(col('zero_balance_effective_date'), 'MM/yyyy'))
def _create_perf_deliquency(spark, perf):
aggDF = perf.select(
col("quarter"),
col("loan_id"),
col("current_loan_delinquency_status"),
when(col("current_loan_delinquency_status") >= 1, col("monthly_reporting_period")).alias("delinquency_30"),
when(col("current_loan_delinquency_status") >= 3, col("monthly_reporting_period")).alias("delinquency_90"),
when(col("current_loan_delinquency_status") >= 6, col("monthly_reporting_period")).alias("delinquency_180")) \
.groupBy("quarter", "loan_id") \
.agg(
max("current_loan_delinquency_status").alias("delinquency_12"),
min("delinquency_30").alias("delinquency_30"),
min("delinquency_90").alias("delinquency_90"),
min("delinquency_180").alias("delinquency_180")) \
.select(
col("quarter"),
col("loan_id"),
(col("delinquency_12") >= 1).alias("ever_30"),
(col("delinquency_12") >= 3).alias("ever_90"),
(col("delinquency_12") >= 6).alias("ever_180"),
col("delinquency_30"),
col("delinquency_90"),
col("delinquency_180"))
joinedDf = perf \
.withColumnRenamed("monthly_reporting_period", "timestamp") \
.withColumnRenamed("monthly_reporting_period_month", "timestamp_month") \
.withColumnRenamed("monthly_reporting_period_year", "timestamp_year") \
.withColumnRenamed("current_loan_delinquency_status", "delinquency_12") \
.withColumnRenamed("current_actual_upb", "upb_12") \
.select("quarter", "loan_id", "timestamp", "delinquency_12", "upb_12", "timestamp_month", "timestamp_year") \
.join(aggDF, ["loan_id", "quarter"], "left_outer")
# calculate the 12 month delinquency and upb values
months = 12
monthArray = [lit(x) for x in range(0, 12)]
# explode on a small amount of data is actually slightly more efficient than a cross join
testDf = joinedDf \
.withColumn("month_y", explode(array(monthArray))) \
.select(
col("quarter"),
floor(((col("timestamp_year") * 12 + col("timestamp_month")) - 24000) / months).alias("josh_mody"),
floor(((col("timestamp_year") * 12 + col("timestamp_month")) - 24000 - col("month_y")) / months).alias("josh_mody_n"),
col("ever_30"),
col("ever_90"),
col("ever_180"),
col("delinquency_30"),
col("delinquency_90"),
col("delinquency_180"),
col("loan_id"),
col("month_y"),
col("delinquency_12"),
col("upb_12")) \
.groupBy("quarter", "loan_id", "josh_mody_n", "ever_30", "ever_90", "ever_180", "delinquency_30", "delinquency_90", "delinquency_180", "month_y") \
.agg(max("delinquency_12").alias("delinquency_12"), min("upb_12").alias("upb_12")) \
.withColumn("timestamp_year", floor((lit(24000) + (col("josh_mody_n") * lit(months)) + (col("month_y") - 1)) / lit(12))) \
.selectExpr('*', 'pmod(24000 + (josh_mody_n * {}) + month_y, 12) as timestamp_month_tmp'.format(months)) \
.withColumn("timestamp_month", when(col("timestamp_month_tmp") == lit(0), lit(12)).otherwise(col("timestamp_month_tmp"))) \
.withColumn("delinquency_12", ((col("delinquency_12") > 3).cast("int") + (col("upb_12") == 0).cast("int")).alias("delinquency_12")) \
.drop("timestamp_month_tmp", "josh_mody_n", "month_y")
return perf.withColumnRenamed("monthly_reporting_period_month", "timestamp_month") \
.withColumnRenamed("monthly_reporting_period_year", "timestamp_year") \
.join(testDf, ["quarter", "loan_id", "timestamp_year", "timestamp_month"], "left") \
.drop("timestamp_year", "timestamp_month")
def _create_acquisition(spark, acq):
nameMapping = spark.createDataFrame(_name_mapping, ["from_seller_name", "to_seller_name"])
return acq.join(nameMapping, col("seller_name") == col("from_seller_name"), "left") \
.drop("from_seller_name") \
.withColumn("old_name", col("seller_name")) \
.withColumn("seller_name", coalesce(col("to_seller_name"), col("seller_name"))) \
.drop("to_seller_name") \
.withColumn("orig_date", to_date(col("orig_date"), "MM/yyyy")) \
.withColumn("first_pay_date", to_date(col("first_pay_date"), "MM/yyyy")) \
def _gen_dictionary(etl_df, col_names):
cnt_table = etl_df.select(posexplode(array([col(i) for i in col_names])))\
.withColumnRenamed("pos", "column_id")\
.withColumnRenamed("col", "data")\
.filter("data is not null")\
.groupBy("column_id", "data")\
.count()
windowed = Window.partitionBy("column_id").orderBy(desc("count"))
return cnt_table.withColumn("id", row_number().over(windowed)).drop("count")
def _cast_string_columns_to_numeric(spark, input_df):
cached_dict_df = _gen_dictionary(input_df, cate_col_names).cache()
output_df = input_df
# Generate the final table with all columns being numeric.
for col_pos, col_name in enumerate(cate_col_names):
col_dict_df = cached_dict_df.filter(col("column_id") == col_pos)\
.drop("column_id")\
.withColumnRenamed("data", col_name)
output_df = output_df.join(broadcast(col_dict_df), col_name, "left")\
.drop(col_name)\
.withColumnRenamed("id", col_name)
return output_df
def run_mortgage(spark, perf, acq):
parsed_perf = _parse_dates(perf)
perf_deliqency = _create_perf_deliquency(spark, parsed_perf)
cleaned_acq = _create_acquisition(spark, acq)
df = perf_deliqency.join(cleaned_acq, ["loan_id", "quarter"], "inner")
test_quarters = ['2016Q1','2016Q2','2016Q3','2016Q4']
train_df = df.filter(~df.quarter.isin(test_quarters)).drop("quarter")
test_df = df.filter(df.quarter.isin(test_quarters)).drop("quarter")
casted_train_df = _cast_string_columns_to_numeric(spark, train_df)\
.select(all_col_names)\
.withColumn(label_col_name, when(col(label_col_name) > 0, 1).otherwise(0))\
.fillna(float(0))
casted_test_df = _cast_string_columns_to_numeric(spark, test_df)\
.select(all_col_names)\
.withColumn(label_col_name, when(col(label_col_name) > 0, 1).otherwise(0))\
.fillna(float(0))
return casted_train_df, casted_test_df
# -
# ### Define Spark conf and Create Spark Session
# For details explanation for spark conf, please go to Spark RAPIDS [config guide](https://nvidia.github.io/spark-rapids/docs/configs.html).
# +
if "sc" in globals():
sc.stop()
conf = SparkConf().setAppName("MortgageETL")
conf.set('spark.rapids.sql.explain', 'ALL')
conf.set("spark.executor.instances", "20")
conf.set("spark.executor.cores", "7")
conf.set("spark.task.cpus", "1")
conf.set("spark.rapids.sql.concurrentGpuTasks", "2")
conf.set("spark.executor.memory", "4g")
conf.set("spark.rapids.memory.pinnedPool.size", "2G")
conf.set("spark.executor.memoryOverhead", "2G")
conf.set("spark.executor.extraJavaOptions", "-Dai.rapids.cudf.prefer-pinned=true")
conf.set("spark.locality.wait", "0s")
conf.set("spark.sql.files.maxPartitionBytes", "512m")
conf.set("spark.executor.resource.gpu.amount", "1")
conf.set("spark.task.resource.gpu.amount", "0.142")
conf.set("spark.plugins", "com.nvidia.spark.SQLPlugin")
conf.set("spark.rapids.sql.hasNans", "false")
conf.set('spark.rapids.sql.batchSizeBytes', '512M')
conf.set('spark.rapids.sql.reader.batchSizeBytes', '768M')
conf.set('spark.rapids.sql.variableFloatAgg.enabled', 'true')
spark = SparkSession.builder \
.config(conf=conf) \
.getOrCreate()
sc = spark.sparkContext
# -
# ### Read CSV data and Transcode to Parquet
# Lets transcode the data first
start = time.time()
# we want a few big files instead of lots of small files
spark.conf.set('spark.sql.files.maxPartitionBytes', '200G')
acq = read_acq_csv(spark, orig_acq_path)
acq.repartition(20).write.parquet(tmp_acq_path, mode='overwrite')
perf = read_perf_csv(spark, orig_perf_path)
perf.coalesce(80).write.parquet(tmp_perf_path, mode='overwrite')
end = time.time()
print(end - start)
# ### Execute ETL Code Defined in 1st Cell
# Now lets actually process the data\n",
start = time.time()
spark.conf.set('spark.sql.files.maxPartitionBytes', '1G')
spark.conf.set('spark.sql.shuffle.partitions', '160')
perf = spark.read.parquet(tmp_perf_path)
acq = spark.read.parquet(tmp_acq_path)
train_out, test_out = run_mortgage(spark, perf, acq)
train_out.write.parquet(train_path, mode='overwrite')
end = time.time()
print(end - start)
test_out.write.parquet(test_path, mode='overwrite')
end = time.time()
print(end - start)
# ### Print Physical Plan
train_out.explain()
# ##
|
docs/demo/GCP/Mortgage-ETL-GPU.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kernel Density Estimator(KDE)
# 使用高斯核函数估计样本的概率分布:
# $$
# p(\mathbf{x})=\frac{1}{N} \sum_{n=1}^{N} \frac{1}{\left(2 \pi h^{2}\right)^{1 / 2}} \exp \left\{-\frac{\left\|\mathbf{x}-\mathbf{x}_{n}\right\|^{2}}{2 h^{2}}\right\}
# $$
import numpy as np
from sklearn.neighbors import KernelDensity
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import numpy as np
# %matplotlib inline
def gaussian_kernel(x, y, h):
return 1/(2*np.pi*h*h)**0.5 * np.exp(-((x-y)**2).sum(axis=-1)/2/h/h)
class NaiveKDE():
def __init__(self, kernel_func=gaussian_kernel, bandwidth=1):
self.kernel_func = kernel_func
self.h = bandwidth
def fit(self, data):
data = np.asarray(data)
assert np.ndim(data) <= 2
if np.ndim(data) == 1:
data = data[:, None]
self.data = data[None, :, :] # 1 X N X K
def evaluate(self, x):
x = np.asarray(x)
if np.ndim(x) == 1:
x = x[:, None]
assert x.shape[-1] == self.data.shape[-1]
if np.ndim(x) <= 1:
x_num = 1
else:
x_num = x.shape[0]
return ((self.kernel_func(x.reshape(x_num, 1, -1), self.data,
self.h))).mean(axis=-1) # M X 1 X K
def kde_test():
np.random.seed(42)
data = np.random.randn(1000)
kde = NaiveKDE(gaussian_kernel, bandwidth=1)
kde.fit(data)
x = np.arange(-10, 10, step=0.1)
p = kde.evaluate(x)
plt.plot(x, p,lw=2, label='naive kde(h=0.5)')
plt.hist(data, density=True)
gaussian = multivariate_normal(0, 1)
plt.plot(x, gaussian.pdf(x), 'r:', lw=2, label='true')
plt.legend(loc='upper left')
plt.show()
kde_test()
|
base/kde/toy_kde.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import brainlit
from brainlit.utils.ngl_pipeline import NeuroglancerSession
from brainlit.utils import upload_to_neuroglancer as upload
from brainlit.utils import upload_skeleton as upload_skel
from brainlit.viz.visualize import *
# # Uploading Brain Images in the Octree Format
# ### upload_to_neuroglancer.py and upload_skeletons.py are scripts for uploading entire brain volumes, or uploading specific resolutions onto AWS or a local directory. To use these scripts, simply run each with arguments for a destination directory (usually an s3 bucket) and the source directory (usually titled "20xx-xx-xx")
# ### Data must be tif files arranged in folders where the highest level corresponds to a single, low res image. Additionally, the highest level directory should include a folder of consensus swcs and a transform.txt file containing the metadata (origin, size) for the volume
#
# ### Files should be arranged as octree with 1-8 indicating volume octant
#
# ### 
#
#
# ## IMPORTANT! CloudVolume by default does not accept float resolutions due to bug in its downsampling code. To get around this, pull the changes from the Neurodata fork of cloudvolume here: https://github.com/neurodata/cloud-volume/blob/voxel-res-non-int/cloudvolume/datasource/precomputed/metadata.py
# ### Additionally, make sure to include your aws keys in /.cloudvolume/secrets/. Find more info here: https://github.com/seung-lab/cloud-volume
# ### Here are example usages of the functions in the upload scripts and what each is expecting
# # Part 1: Uploading images
# +
data_dir = "../../../tests/data/"
dest_dir = "./test_precomputed/"
num_res = 2
# -
files, bin_paths, vox_size,tiff_dims = upload.get_volume_info(data_dir, num_res+5, channel = 0)
print
print("Low res files: " + str(files[0]))
print("\nHigh res files: " + str(files[1]))
print("---")
print("Single image binary: " + str(bin_paths[0]))
print("\nMultiple image binaries: " + str(bin_paths[1]))
print("---")
print("Size of individual tif image: " + str(tiff_dims))
print("---")
print("Highest resolution voxel spacing in volume: " + str(vox_size))
# ### Cloudvolume image layers are created with the number of resolutions in the original data. These do not hold image data until it is pushed to them
# ### Note: To upload to s3: "s3://" is required as before the file path
# +
vols = upload.create_image_layer("file://" + dest_dir,tiff_dims,vox_size, num_res+5)
print("Number of volumes: " + str(len(vols)))
print("mips: " + str(vols[0].mip) + ' and ' + str(vols[1].mip))
#print("Volumes info: " + str(vols[0].info))
print("---")
print("High res volume info: " + str(vols[0].info['scales'][5]))
print("\nLow res volume info: " + str(vols[1].info['scales'][6]))
# -
# ### Uploading can be done with either Joblib parallel or non-parrallel sequential if the cpu power isn't there.
# # Uploading large volumes will take several days for the highest resolutions, make sure your pc does not shut off during this time. If an upload fails for any reason, you can start it again by specifying which mip you wish to upload (0-6 usually) for upload_to_neuroglancer.py
# +
# %%capture
u1=upload.upload_chunks(vols[0], files[0], bin_paths[0], parallel=False) # Low res
u2=upload.upload_chunks(vols[1], files[1], bin_paths[1], parallel=False) # High res
img0 = np.squeeze(vols[0][:,:,:])
img1 = np.squeeze(vols[1][:,:,:])
# -
# # Visualize your data with NeuroglancerSession or with cloudvolume by reading in volumes as np arrays
plot_image_2d(img0[:,:,100])
plot_image_2d(img1[:,:,100])
# # Part 2: Uploading Skeletons
# +
#slight difference from image upload info since an origin is found for the skeletons
origin, vox_size, tiff_dims = upload_skel.get_volume_info(data_dir, num_res+5)
print("Volume origin in space: " + str(origin))
print("---")
print("Size of individual tif image: " + str(tiff_dims))
print("---")
print("Highest resolution voxel spacing in volume: " + str(vox_size))
# -
skel_dest = "./test_skeletons/"
skel_layer = upload_skel.create_skeleton_layer("file://"+skel_dest,vox_size,tiff_dims)
#only highest res is used for any skeleton work
print("Highest res skel info: " + str(skel_layer.info['scales'][0]))
# +
skeletons,segids = upload_skel.create_skel_segids(data_dir+"consensus-swcs/",origin)
print("Skeleton info : " + str(skeletons))
print(("---"))
print('IDs of skeletons to be uploaded:' + str(segids))
for skel in skeletons:
skel_layer.skeleton.upload(skel)
# -
# ### At this point, skeletons are uploaded to a local directory but skeletons alone don't have much use without corresponding images. To get a good visual pull chunks of images around skeletons with ngl_pipline
# # Additional resources
# ### Neuroglancer: https://github.com/google/neuroglancer and https://neuroglancer-demo.appspot.com - Useful for making visualizations of smaller local volumes, make sure to use cors_webserver.py in order to allow the web app to read your local volumes
|
docs/archive/uploading_brains.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup
import requests
import pymongo
from splinter import Browser
# !which chromedriver
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser(driver_name='chrome', **executable_path, headless=False)
# collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later.
url="https://mars.nasa.gov/news/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
news_title = soup.find("div", class_="content_title").get_text()
news_p = soup.find("div", class_="article_teaser_body").get_text()
print(news_title)
print(news_p)
# +
# Use splinter to navigate the site and find the image url for the current Featured Mars Image
# assign the url string to a variable called featured_image_url
url="https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
browser.click_link_by_partial_text('FULL IMAGE')
# Error Message: element not interactable:
browser.is_element_present_by_text("more info", wait_time=0.5)
browser.click_link_by_partial_text('more info')
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
fig_1 = soup.find("figure", class_="lede").a['href']
featured_image_url = "https://www.jpl.nasa.gov" + fig_1
print(featured_image_url)
# +
# the latest Mars weather tweet from the page. Save the tweet text for the weather report as a
# variable called mars_weather.
url = "https://twitter.com/marswxreport?lang=en"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
originator = soup.find_all("div", class_="stream-item-header")
for i in originator:
name = i.a.span.strong.text
if (name == None):
continue
print(name)
if (name != '<NAME>'):
continue
j = i.parent.find("div", class_="js-tweet-text-container").p.text
print(j)
break
mars_weather = soup.find("div", class_="js-tweet-text-container").p.text
print(mars_weather)
# +
import pandas as pd
url = "http://space-facts.com/mars/"
tables = pd.read_html(url)
mars_html_table = tables[0].to_html
with open('mars_stats.html', 'w') as fo:
tables[0].to_html(fo)
tables[0]
# +
hemisphere_image_urls = []
entry = {}
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser(driver_name='chrome', **executable_path, headless=False)
url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
links = soup.find_all("a", class_="itemLink product-item")
for link in links:
print(link)
img_url = "https://astrogeology.usgs.gov" + link['href']
browser.visit(img_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
download_link = soup.find("div", class_="downloads").ul.li.a['href']
type(link.h3.text)
entry['title'] = link.h3.text
entry['img_url'] = download_link
hemisphere_image_urls.append(entry)
entry = {}
print(download_link)
print(link.h3.text)
print('*' * 50)
print(hemisphere_image_urls)
|
mission_to_mars.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inception
# Also known as GoogLeNet , it is a 22-layer network that won the 2014 ILSVRC Championship.
#
# 1. The original intention of the design is to expand the width and depth on its basis .
#
# 2. which is designed motives derived from improving the performance of the depth of the network generally can increase the size of the network and increase the size of the data set to increase, but at the same time cause the network parameters and easily fit through excessive , computing resources inefficient and The production of high-quality data sets is an expensive issue.
#
# 3. Its design philosophy is to change the full connection to a sparse architecture and try to change it to a sparse architecture inside the convolution.
#
# 4. The main idea is to design an inception module and increase the depth and width of the network by continuously copying these inception modules , but GooLeNet mainly extends these inception modules in depth.
#
# There are four parallel channels in each inception module , and concat is performed at the end of the channel .
#
# 1x1 conv is mainly used to reduce the dimensions in the article to avoid calculation bottlenecks.
# It also adds additional softmax loss to some branches of the previous network layer to avoid the problem of gradient disappearance.
#
# **Four parallel channels:**
#
# * 1x1 conv: Borrowed from [ Network in Network ], the input feature map can be reduced in dimension and upgraded without too much loss of the input spatial information;
# * 1x1conv followed by 3x3 conv: 3x3 conv increases the receptive field of the feature map, and changes the dimension through 1x1conv;
# * 1x1 conv followed by 5x5 conv: 5x5 conv further increases the receptive field of the feature map, and changes the dimensions through 1x1 conv;
# * 3x3 max pooling followed by 1x1 conv: The author believes that although the pooling layer will lose space information, it has been effectively applied in many fields, which proves its effectiveness, so a parallel channel is added, and it is changed by 1x1 conv Its output dimension.
#
#
# 
#
# #### Complete network design : -
#
#
# 
# Two ways to improve network performance:
#
# **The most direct way to improve the performance of deep neural networks is to increase their size . This includes depth, the number of levels, and their width, the size of each level unit .**
#
# Another easy and safe way is to **increase the size of the training data**.
#
# However, *both methods have two disadvantages* .
#
# Larger models mean more parameters, which makes it easier for the network to overfit , especially when the number of label samples in the training data set is limited.
#
# At the same time, because the production of high-quality training sets is tricky and expensive ,especially when some human experts do it , there is a large error rate . As shown below.
#
# 
#
#
# *Another shortcoming is that uniformly increasing the size of the network will increase the use of computing resources* . For example, in a deep network, if two convolutions are chained, any unified improvement of their convolution kernels will cause demand for resources.
#
# Power increase: If the increased capacity is inefficient, for example, if most of the weights end with 0 , then a lot of computing resources are wasted. But because the computing resources are always limited, an effective computational distribution always tends to increase the size of the model indiscriminately, and even the main objective goal is to improve the performance of the results.
#
# The basic method to solve these two problems is to finally change the fully connected network to a sparse architecture, even inside the convolution.
#
# The details of the GooLeNet network layer are shown in the following table:
#
# 
#
# **To sum up:**
#
# * 128 1x1 convolution kernels are used to reduce dimensions and modify linear activation units
# * A fully connected layer of 1024 units and a modified linear activation unit;
# * A dropout layer that drops neuron connections with a 70% probability;
# * A linear layer with softmax loss as classification
# Predict 1000 categories, but removed during the inference phase
#
# **Training Methodology**
#
# >The momentum is set to 0.9 and the learning rate is set to decrease by 4% every 8 epochs.
#
# >Seven models were trained . To make the problem more detailed, some models were trained on small crops, and some were trained on large crops .
#
# >The factors that make the model train well include : the sampling of patches of various sizes in the image , the size of which is evenly distributed between 8% and 100%, and the aspect ratio between 3/4 and 4/3.
#
# > **Illumination changes have an effect on avoiding overfitting.**
#
# > **Later, random interpolation is used to resize the image.**
#
#
#
#
#
# # Inception-v2(2015)
#
#
# This architecture is a landmark in the development of deep network models . The most prominent contribution is to propose a normalized Batch Normalization layer to unify the output range of the network. It is fixed in a relatively uniform range. If the BN layer is not added, the value range of the network input and output of each layer is greatly different, so the size of the learning rate will be different. The BN layer avoids this situation This accelerates the training of the network and gives the network regular terms to a certain extent , reducing the degree of overfitting of the network. In the subsequent development of network models, most models have more or less added BN layers to the model.
#
# In this paper, the BN layer is standardized before being input to the activation function. At the same time, VGG uses 2 3x3 convs instead of 5x5 convs in the inception module to reduce the amount of parameters and speed up the calculation.
#
# Algorithm advantages:
# 1. **Improved learning rate** : In the BN model, a higher learning rate is used to accelerate training convergence, but it will not cause other effects. Because if the scale of each layer is different, then the learning rate required by each layer is different. The scale of the same layer dimension often also needs different learning rates. Usually, the minimum learning is required to ensure the loss function to decrease, but The BN layer keeps the scale of each layer and dimension consistent, so you can directly use a higher learning rate for optimization.
#
#
# 2. **Remove the dropout layer** : The BN layer makes full use of the goals of the dropout layer. Remove the dropout layer from the BN-Inception model, but no overfitting will occur.
#
#
# 3. **Decrease the attenuation coefficient of L2 weight** : Although the L2 loss controls the overfitting of the Inception model, the loss of weight has been reduced by five times in the BN-Inception model.
#
#
# 4. **Accelerate the decay of the learning rate** : When training the Inception model, we let the learning rate decrease exponentially. Because our network is faster than Inception, we will increase the speed of reducing the learning rate by 6 times.
#
#
# 5. **Remove the local response layer** : Although this layer has a certain role, but after the BN layer is added, this layer is not necessary.
#
#
# 6. **Scramble training samples more thoroughly** : We scramble training samples, which can prevent the same samples from appearing in a mini-batch. This can improve the accuracy of the validation set by 1%, which is the advantage of the BN layer as a regular term. In our method, random selection is more effective when the model sees different samples each time.
#
#
# 7. **To reduce image distortion**: Because BN network training is faster and observes each training sample less often, we want the model to see a more realistic image instead of a distorted image.
#
# # Inception-v3-2015
#
# This architecture focuses, how to use the convolution kernel two or more smaller size of the convolution kernel to replace, but also the introduction of **asymmetrical layers i.e. a convolution dimensional convolution**
# has also been proposed for pooling layer Some remedies that can cause loss of spatial information;
# there are ideas such as **label-smoothing , BN-ahxiliary** .
#
# Experiments were performed on inputs with different resolutions . The results show that although low-resolution inputs require more time to train, the accuracy and high-resolution achieved are not much different.
#
# **The computational cost is reduced while improving the accuracy of the network.**
#
# **General Design Principles**
#
# We will describe some design principles that have been proposed through extensive experiments with different architectural designs for convolutional networks. At this point, full use of the following principles can be guessed, and some additional experiments in the future will be necessary to estimate their accuracy and effectiveness.
#
# 1. **Prevent bottlenecks in characterization** . The so-called bottleneck of feature description is that a large proportion of features are compressed in the middle layer (such as using a pooling operation). This operation will cause the loss of feature space information and the loss of features. Although the operation of pooling in CNN is important, there are some methods that can be used to avoid this loss as much as possible (I note: later hole convolution operations ).
#
# 2. **The higher the dimensionality of the feature, the faster the training converges** . That is, the independence of features has a great relationship with the speed of model convergence. The more independent features, the more thoroughly the input feature information is decomposed. It is easier to converge if the correlation is strong. Hebbin principle : fire together, wire together.
#
# 3. **Reduce the amount of calculation through dimensionality reduction** . In v1, the feature is first reduced by 1x1 convolutional dimensionality reduction. There is a certain correlation between different dimensions. Dimension reduction can be understood as a lossless or low-loss compression. Even if the dimensions are reduced, the correlation can still be used to restore its original information.
#
# 4. **Balance the depth and width of the network** . Only by increasing the depth and width of the network in the same proportion can the performance of the model be maximized.
#
#
# **Factorizing Convolutions with Large Filter Size**
#
# GooLeNet uses many dimensionality reduction methods, which has achieved certain results. Consider the example of a 1x1 convolutional layer used to reduce dimensions before a 3x3 convolutional layer. In the network, we expect the network to be highly correlated between the output neighboring elements at the activation function. Therefore, we can reduce their activation values before aggregation , which should generate similar local expression descriptions.
#
# This paper explores experiments to decompose the network layer into different factors under different settings in order to improve the computational efficiency of the method . Because the Inception network is fully convolutional, each weight value corresponds to a product operation each time it is activated.
#
# Therefore, any reduction in computational cost will result in a reduction in parameters. This means that we can use some suitable decomposition factors to reduce the parameters and thus speed up the training.
#
# 3.1 **Factorizing Convolutions with Large Filter Size**
#
# With the same number of convolution kernels, larger convolution kernels (such as 5x5 or 7x7) are more expensive to calculate than 3x3 convolution kernels , which is about a multiple of 25/9 = 2.78. Of course, the 5x5 convolution kernel can obtain more correlations between the information and activation units in the previous network, but under the premise of huge consumption of computing resources, a physical reduction in the size of the convolution kernel still appears.
#
# However, we still want to know whether a 5x5 convolutional layer can be replaced by a multi-layer convolutional layer with fewer parameters when the input and output sizes are consistent . If we scale the calculation map of 5x5 convolution, we can see that each output is like a small fully connected network sliding on the input window with a size of 5x5. Refer to Figure 1.
#
# 
#
#
#
# Therefore, we have developed a network that explores translation invariance and replaces one layer of convolution with two layers of convolution: the first layer is a 3x3 convolution layer and the second layer is a fully connected layer . Refer to Figure 1. We ended up replacing two 5x5 convolutional layers with two 3x3 convolutional layers. Refer to Figure 4 Figure 5. This operation can realize the weight sharing of neighboring layers. It is about (9 + 9) / 25 times reduction in computational consumption.
#
# 
# 
#
#
# **Spatial Factorization into Asymmetric Convolutions**
#
# We are wondering if the convolution kernel can be made smaller, such as 2x2, but there is an asymmetric method that can be better than this method. That is to use nx1 size convolution. For example, using the [3x1 + 1x3] convolution layer. In this case, a single 3x3 convolution has the same receptive field. Refer to Figure 3. This asymmetric method can save [((3x3)-(3 + 3)) / (3x3) = 33%] computing resources, and replacing two 2x2 only saves [11%] Computing resources.
#
# In theory, we can have a deeper discussion and use the convolution of [1xn + nx1] instead of the convolutional layer of nxn. Refer to Figure 6. But this situation is not very good in the previous layer, but it can perform better on a medium-sized feature map [mxm, m is between 12 and 20]. In this case, use [1x7 + 7x1] convolutional layer can get a very good result.
#
#
# **Utility of Auxiliary Classifiers**
#
# Inception-v1 introduced some auxiliary classifiers (referring to some branches of the previous layer adding the softmax layer to calculate the loss back propagation) to improve the aggregation problem in deep networks. The original motive is to pass the gradient back to the previous convolutional layer , so that they can effectively and improve the aggregation of features and avoid the problem of vanishing gradients.
#
# Traditionally, pooling layers are used in convolutional networks to reduce the size of feature maps . In order to avoid bottlenecks in the expression of spatial information, the number of convolution kernels in the network can be expanded before using max pooling or average pooling.
#
# For example, for a dxd network layer with K feature maps, to generate a network layer with 2K [d / 2 xd / 2] feature maps, we can use 2K convolution kernels with a step size of 1. Convolution and then add a pooling layer to get it, then this operation requires [2d 2 K 2 ]. But using pooling instead of convolution, the approximate operation is [2 * (d / 2) 2 xK 2 ], which reduces the operation by four times. However, this will cause a description bottleneck, because the feature map is reduced to [(d / 2) 2 xK], which will definitely cause the loss of spatial information on the network. Refer to Figure 9. However, we have adopted a different method to avoid this bottleneck, refer to Figure 10. That is, two parallel channels are used , one is a pooling layer (max or average), the step size is 2, and the other is a convolution layer , and then it is concatenated during output.
#
# 
#
# 
#
#
#
#
#
# ### Inception-v4-2016
# After ResNet appeared, ResNet residual structure was added.
#
# It is based on Inception-v3 and added the skip connection structure in ResNet. Finally, under the structure of 3 residual and 1 inception-v4 , it reached the top-5 error 3.08% in CLS (ImageNet calssification) .
#
# 1-Introduction
# Residual conn works well when training very deep networks. Because the Inception network architecture can be very deep, it is reasonable to use residual conn instead of concat.
#
# Compared with v3, Inception-v4 has more unified simplified structure and more inception modules.
#
# 
#
# The big picture of Inception-v4:
#
# 
#
# Fig9 is an overall picture, and Fig3,4,5,6,7,8 are all local structures. For the specific structure of each module, see the end of the article.
#
# ### Residual Inception Blocks
#
# For the residual version in the Inception network, we use an Inception module that consumes less than the original Inception. The convolution kernel (followed by 1x1) of each Inception module is used to modify the dimension, which can compensate the reduction of the Inception dimension to some extent.
#
#
# One is named **Inception-ResNet-v1**, which is consistent with the calculation cost of Inception-v3.
# One is named **Inception-ResNet-v2**, which is consistent with the calculation cost of Inception-v4.
#
# Figure 15 shows the structure of both. However, Inception-v4 is actually slower in practice, probably because it has more layers.
#
# Another small technique is that we use the BN layer in the header of the traditional layer in the Inception-ResNet module, but not in the header of the summations. ** There is reason to believe that the BN layer is effective. But in order to add more Inception modules, we made a compromise between the two.
#
# Inception-ResNet-v1
#
# 
#
# Inception-ResNet-v2
#
# 
#
# **Scaling of the Residuals**
#
# This paper finds that when the number of convolution kernels exceeds 1,000 , the residual variants will start to show instability , and the network will die in the early stages of training, which means that the last layer before the average pooling layer is in the Very few iterations start with just a zero value . This situation cannot be prevented by reducing the learning rate or by adding a BN layer . Hekaiming's ResNet article also mentions this phenomenon.
#
# This article finds that scale can stabilize the training process before adding the residual module to the activation layer . This article sets the scale coefficient between 0.1 and 0.3.
#
# In order to prevent the occurrence of unstable training of deep residual networks, He suggested in the article that it is divided into two stages of training. The first stage is called warm-up (preheating) , that is, training the model with a very low learning first. In the second stage, a higher learning rate is used. And this article finds that if the convolution sum is very high, even a learning rate of 0.00001 cannot solve this training instability problem, and the high learning rate will also destroy the effect. But this article considers scale residuals to be more reliable than warm-up.
#
# Even if scal is not strictly necessary, it has no effect on the final accuracy, but it can stabilize the training process.
#
# 
#
# **Conclusion**
#
# Inception-ResNet-v1 : a network architecture combining inception module and resnet module with similar calculation cost to Inception-v3;
#
# Inception-ResNet-v2 : A more expensive but better performing network architecture.
#
# Inception-v4 : A pure inception module, without residual connections, but with performance similar to Inception-ResNet-v2.
#
#
# **A big picture of the various module structures of Inception-v4 / Inception-ResNet-v1 / v2:**
#
# - Fig3-Stem: (Inception-v4 & Inception-ResNet-v2)
#
# 
#
# - Fig4-Inception-A: (Inception-v4)
#
# 
#
# - Fig5-Inception-B: (Inception-v4)
#
# 
#
# - Fig6-Inception-C: (Inception-v4)
#
# 
#
# - Fig7-Reduction-A: (Inception-v4 & Inception-ResNet-v1 & Inception-ResNet-v2)
#
# 
#
# - Fig8-Reduction-B: (Inception-v4)
#
# 
#
# - Fig10-Inception-ResNet-A: (Inception-ResNet-v1)
#
# 
#
# - Fig11-Inception-ResNet-B: (Inception-ResNet-v1)
#
# 
#
# - Fig12-Reduction-B: (Inception-ResNet-v1)
#
# 
#
# - Fig13-Inception-ResNet-C: (Inception-ResNet-v1)
#
# 
#
# - Fig14-Stem: (Inception-ResNet-v1)
#
# 
#
# - Fig16-Inception-ResNet-A: (Inception-ResNet-v2)
#
# 
#
# - Fig17-Inception-ResNet-B: (Inception-ResNet-v2)
#
# 
#
# - Fig18-Reduction-B: (Inception-ResNet-v2)
#
# 
#
# - Fig19-Inception-ResNet-C: (Inception-ResNet-v2)
#
# 
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# ## Summary
#
# * Inception v1 network, 1x1, 3x3, 5x5 conv and 3x3 pooling and stacking together, on the one hand, increase the width of the network, and on the other hand, increase the adaptability of the network to scale.
#
#
# * The network of v2 has been improved based on v1. On the one hand, the BN layer has been added to reduce the internal covariate shift (the internal neuron's data distribution has changed), so that the output of each layer is normalized to an N (0, 1) Gaussian, on the other hand, learning VGG replaces 5x5 in the inception module with two 3x3 convs, which reduces the number of parameters and speeds up the calculation.
#
#
# * One of the most important improvements in v3 is Factorization, which decomposes 7x7 into two one-dimensional convolutions (1x7, 7x1), and 3x3 is the same (1x3, 3x1). This benefit can speed up calculations (redundant calculations Capacity can be used to deepen the network), and one conv can be split into two convs, which further increases the network depth and increases the nonlinearity of the network. It is also worth noting that the network input has changed from 224x224 to 299x299, which is more refined. Designed 35x35 / 17x17 / 8x8 modules.
#
#
# * v4 studied whether the Inception module combined with the Residual Connection can be improved? It was found that the structure of ResNet can greatly speed up training and improve performance at the same time. An Inception-ResNet v2 network was obtained. At the same time, a deeper and more optimized Inception v4 model was designed to achieve performance comparable to Inception-ResNet v2
# ### Code implementation
import os
import zipfile
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# +
base_dir = 'tmp/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['acc'])
# This code has changed. Now instead of the ImageGenerator just rescaling
# the image, we also rotate and do other operations
# Updated to do image augmentation
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=10, # 2000 images = batch_size * steps
epochs=10,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=2)
# +
# %matplotlib inline
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# -
|
Diffrent CNN/Inception.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b>Calule a derivada da função dada. Simplifique as respostas.</b>
# <b>14. $f(x) = \frac{1}{4}x^8 - \frac{1}{2}x^6 -x + 2$</b>
# <b>Aplicando a regra da soma $\frac{d}{dx}(f+g) = \frac{d}{dx}(f) + \frac{d}{dx}(g) $</b><br><br>
# $\frac{d}{dx}(\frac{1}{4}x^8) - \frac{d}{dx}(\frac{1}{2}x^6) - \frac{d}{dx}(x) + \frac{d}{dx}(2)$<br><br><br>
#
#
# <b>Tirando a constante: $\frac{1}{4} \cdot \frac{d}{dx}(x^8)$<b><br><br>
# <b>Aplicando a regra da potência: </b> $\frac{d}{dx}(x^a) = a\cdot x^{a-1}$<br><br><br>
#
# $\frac{1}{4} \cdot \frac{d}{dx}(x^8) = 8\cdot x^{8-1}$<br><br>
# $\frac{1}{4} \cdot \frac{d}{dx}(x^8) = 8x^7$<br><br>
# <b>Multiplicando pela constante</b><br><br><br>
# $\frac{1}{4} \cdot \frac{d}{dx}(x^8) = \frac{1}{4} \cdot 8x^7$<br><br>
# $\frac{1}{4} \cdot \frac{d}{dx}(x^8) = \frac{8x^7}{4}$<br><br>
# $\frac{1}{4} \cdot \frac{d}{dx}(x^8) = 2x^7$<br><br><br>
#
# <b>Tirando a constante: $- \frac{1}{2} \cdot \frac{d}{dx}(x^6)$<b><br><br>
#
# $-\frac{1}{2} \cdot \frac{d}{dx}(x^6) = 6\cdot x^{6-1}$<br><br>
# $-\frac{1}{2} \cdot \frac{d}{dx}(x^6) = 6x^5$<br><br>
# <b>Multiplicando pela constante</b><br><br><br>
# $-\frac{1}{2} \cdot \frac{d}{dx}(x^6) = -\frac{1}{2} \cdot 6x^5$<br><br>
# $-\frac{1}{2} \cdot \frac{d}{dx}(x^6) = -\frac{6x^5}{2}$<br><br>
# $-\frac{1}{2} \cdot \frac{d}{dx}(x^6) = -3x^5$<br><br><br>
#
# $\frac{d}{dx}(-x) = -1$<br><br><br>
# <b>Derivada de uma constante é zero</b><br><br>
# $\frac{d}{dx}(2) = 0$<br><br>
#
# $\frac{d}{dx}(\frac{1}{4}x^8) - \frac{d}{dx}(\frac{1}{2}x^6) - \frac{d}{dx}(x) + \frac{d}{dx}(2) = 2x^7 -3x^5 -1 $
#
#
#
#
|
Problemas 2.2/14.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <font style="font-size:96px; font-weight:bolder; color:#0040a0"><img src="http://montage.ipac.caltech.edu/docs/M51_logo.png" alt="M" style="float: left; padding: 25px 30px 25px 0px;" /></font>
#
# <i><b>Montage</b> Montage is an astronomical image toolkit with components for reprojection, background matching, coaddition and visualization of FITS files. It can be used as a set of command-line tools (Linux, OS X and Windows), C library calls (Linux and OS X) and as Python binary extension modules.
#
# The Montage source is written in ANSI-C and code can be downloaded from GitHub ( https://github.com/Caltech-IPAC/Montage ). The Python package can be installed from PyPI ("</i>pip install MontagePy<i>"). The package has no external dependencies. See http://montage.ipac.caltech.edu/ for details on the design and applications of Montage.
#
# # MontagePy.main modules: mSubCube
#
# Most of Montage is focused on 2D astronomical image mosaics: reprojection, background matching, coaddition and so on. But there is also a need for tools that operate on data "cubes": three- or four-dimensional arrays where two of the axes represent the same projected sky coordinates as we find in the 2D images. Usually, the third axis is some form of wavelength.
#
# The problem sets are not completely parallel: datacubes do not generally need background matching but you do frequently want cutouts in the none spatial dimensions and to transpose axes.
#
# Montage includes a set of routines for manipulating datacubes:
#
# <ul>
# <li><b>mmProjectCube</b> — Reproject the spatial dimensions.</li>
# <li><b>mAddCube</b> — Reproject the cube.</li>
# <li><b>mShrinkCube</b> — Rescale a cube (integer scaling in the non-spatial dimensions).</li>
# <li><b>mSubCube</b> —Cut a portion out of a cube.</li>
# <li><b>mTranspose</b> — Transpose a cube's axes.</li>
# </ul>
#
# This routine, mSubCube, cuts a multi-dimensional box out of a cube. In the spatial dimensions this is exactly the same as mSubimage: a sky location and size or pixel ranges. In the third (and fourth) dimensions it is just index ranges (or an index value list).
#
# <b>Note:</b> The MontagePy python package has no external dependencies. We include other utilities on this page to aid in visualizing MontagePy package results.
#
# +
from MontagePy.main import mSubCube, mViewer
help(mSubCube)
# -
# ## mSubCube Example
#
# In this example, we will cut a region, in both space and wavelength from one of the inputs to our GALFA mosaic.
#
# The data used in the datacube Jupyter pages come from the Galactic Arecibo L-band Feed Array HI (GALFA-HI) survey (Peek et al., 2011, Ap J Suppl, 194, 20; DOI 10.1088/0067-0049/194/2/20; ADS Bibcode 2011ApJS..194...20P).
rtn = mSubCube(0, "GALFA/shrunken/GALFA_HI_RA+DEC_012.00+10.35_N.fits",
"work/GALFA/GALFAsubcube.fits",
12., 10., 5., 5.)
print(rtn)
# ## Before and After
#
# Here are the original image and the cutout. Since these are cubes, we have to collapse it in the third dimension for display.
#
# +
from IPython.display import HTML, display, Image
rtn = mViewer('-color yellow -grid eq j2000 \
-ct 4 -gray "GALFA/shrunken/GALFA_HI_RA+DEC_012.00+10.35_N.fits[0][60,68]" \
-2s max gaussian-log -out work/GALFA/GALFA_HI_RA+DEC_012.00+10.35_N_subcube.png',
'', mode=2 )
rtn = mViewer('-color yellow -grid eq j2000 \
-ct 4 -gray "work/GALFA/GALFAsubcube.fits[0][60,68]" \
-2s max gaussian-log -out work/GALFA/GALFAsubcube.png',
'', mode=2 )
display(HTML("<table><tr><td><img src='work/GALFA/GALFA_HI_RA+DEC_012.00+10.35_N_subcube.png'></td> \
<td><img src='work/GALFA/GALFAsubcube.png'></td></tr></table>"))
# -
# <p/>
#
# ## mSubCube Error Handling
#
# If mSubCube encounters an error, the return structure will just have two elements: a status of 1 ("error") and a message string that tries to diagnose the reason for the error.
#
# For instance, if the user specifies a datacube that doesn't exist:
rtn = mSubCube(0, "GALFA/shrunken/unknown.fits",
"work/GALFA/GALFAsubcube.fits",
12., 10., 5., 5.)
print(rtn)
#
#
#
#
# # Classic Montage: mSubCube as a Stand-Alone Program
#
#
# ### mSubCube Unix/Windows Command-line Arguments
#
# <p>mSubCube can also be run as a command-line tool in Linux, OS X, and Windows:</p>
#
# <p><tt>
# <b>Usage:</b> mSubCube [-D3 selection-list][-D4 selection-list][-d][-a(ll pixels)][-h hdu][-s statusfile] in.fit out.fit ra dec xsize [ysize] | mSubCube -p [-D3 selection-list][-D4 selection-list][-d][-h hdu][-s statusfile] in.fit out.fit xstartpix ystartpix xpixsize [ypixsize] | mSubCube -c [-D3 selection-list][-D4 selection-list][-d][-h hdu][-s statusfile] in.fit out.fit
# </tt></p>
# <p> </p>
# <p>If you are writing in C/C++, mSubCube can be accessed as a library function:</p>
#
# <pre>
# /*-***********************************************************************/
# /* */
# /* mSubimage */
# /* */
# /* This program subsets an input image around a location of interest */
# /* and creates a new output image consisting of just those pixels. */
# /* The location is defined by the RA,Dec (J2000) of the new center and */
# /* the XY size in degrees of the area (X and Y) in the direction of */
# /* the image axes, not Equatorial coordinates. */
# /* */
# /* int mode Processing mode. The two main modes are */
# /* 0 (SKY) and 1 (PIX), corresponding to cutouts */
# /* are in sky coordinate or pixel space. The two */
# /* other modes are 3 (HDU) and 4 (SHRINK), where */
# /* the region parameters are ignored and you get */
# /* back either a single HDU or an image that has */
# /* had all the blank border pixels removed. */
# /* */
# /* char *infile Input FITS file */
# /* char *outfile Subimage output FITS file */
# /* */
# /* double ra RA of cutout center (or start X pixel in */
# /* PIX mode */
# /* double dec Dec of cutout center (or start Y pixel in */
# /* PIX mode */
# /* */
# /* double xsize X size in degrees (SKY mode) or pixels */
# /* (PIX mode) */
# /* double ysize Y size in degrees (SKY mode) or pixels */
# /* (PIX mode) */
# /* */
# /* int hdu Optional HDU offset for input file */
# /* int nowcs Indicates that the image has no WCS info */
# /* (only makes sense in PIX mode) */
# /* */
# /* char *d3constraint String describing the datacube third */
# /* dimension selection constraints */
# /* */
# /* char *d4constraint String describing the datacube fourth */
# /* dimension selection constraints */
# /* */
# /* int debug Debugging output level */
# /* */
# /*************************************************************************/
#
# struct mSubCubeReturn *mSubCube(int mode, char *infile, char *outfile, double ra, double dec,
# double xsize, double ysize, int hdu, int nowcs, char *d3constraint,
# char *d4constraint, int debugin)
# </pre>
# <p><b>Return Structure</b></p>
# <pre>
# struct mSubCubeReturn
# {
# int status; // Return status (0: OK, 1:ERROR)
# char msg [1024]; // Return message (for error return)
# char json [4096]; // Return parameters as JSON string
# char content[1024]; // String giving an idea of output content (e.g., 'blank', 'flat', or 'normal'.
# char warning[1024]; // If warranted, warning message about CDELT, CRPIX, etc.
# };
# </pre>
|
mSubCube.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Increasing accuracy for LL fault detection
# +
from jupyterthemes import get_themes
import jupyterthemes as jt
from jupyterthemes.stylefx import set_nb_theme
set_nb_theme('chesterish')
# -
import pandas as pd
data_10=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train10hz2.csv')
data_20=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train20hz2.csv')
data_30=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train30Hz2.csv')
data_15=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train15hz2.csv')
data_25=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train25hz2.csv')
data_10=data_10.head(99990)
data_15=data_15.head(99990)
data_20=data_20.head(99990)
data_25=data_25.head(99990)
data_30=data_30.head(99990)
data_25.head
# #### Shuffling
data_20=data_20.sample(frac=1)
data_30=data_30.sample(frac=1)
data_10=data_10.sample(frac=1)
data_15=data_15.sample(frac=1)
data_25=data_25.sample(frac=1)
import sklearn as sk
# ### Assiging X and y for training
dataset_10=data_10.values
X_10= dataset_10[:,0:9]
print(X_10)
y_10=dataset_10[:,9]
print(y_10)
dataset_15=data_15.values
X_15= dataset_15[:,0:9]
print(X_15)
y_15=dataset_15[:,9]
print(y_15)
dataset_20=data_20.values
X_20= dataset_20[:,0:9]
print(X_20)
y_20=dataset_20[:,9]
print(y_20)
dataset_25=data_25.values
X_25= dataset_25[:,0:9]
print(X_25)
y_25=dataset_25[:,9]
print(y_25)
dataset_30=data_30.values
X_30= dataset_30[:,0:9]
print(X_30)
y_30=dataset_30[:,9]
print(y_30)
# ### Training RF Classifier
from sklearn.ensemble import RandomForestClassifier
rf_10 = RandomForestClassifier(n_estimators = 1000, random_state = 42)
rf_10.fit(X_10, y_10);
from sklearn.ensemble import RandomForestClassifier
rf_15 = RandomForestClassifier(n_estimators = 5000, random_state = 42)
rf_15.fit(X_15, y_15);
from sklearn.ensemble import RandomForestClassifier
rf_20 = RandomForestClassifier(n_estimators = 5000, random_state = 42)
rf_20.fit(X_20, y_20);
from sklearn.ensemble import RandomForestClassifier
rf_25 = RandomForestClassifier(n_estimators = 5000, random_state = 42)
rf_25.fit(X_25, y_25);
from sklearn.ensemble import RandomForestClassifier
rf_30 = RandomForestClassifier(n_estimators = 5000, random_state = 42)
rf_30.fit(X_30, y_30);
# ### Importing Testing Data
test_10=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test10hz.csv')
test_20=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test20hz.csv')
test_30=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test30Hz.csv')
test_15=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test15hz.csv')
test_25=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test25hz.csv')
test_35=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test35Hz.csv')
test_40=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test40Hz.csv')
test_10=test_10.head(99990)
test_15=test_15.head(99990)
test_20=test_20.head(99990)
test_25=test_25.head(99990)
test_30=test_30.head(99990)
test_35=test_35.head(99990)
test_40=test_40.head(99990)
test_20=test_20.sample(frac=1)
test_30=test_30.sample(frac=1)
test_40=test_40.sample(frac=1)
test_10=test_10.sample(frac=1)
test_15=test_15.sample(frac=1)
test_25=test_25.sample(frac=1)
test_35=test_35.sample(frac=1)
# ### Assigning X and y for testing
# +
dataset_test_10 = test_10.values
X_test_10 = dataset_test_10[:,0:9]
print(X_test_10)
y_test_10= dataset_test_10[:,9]
print(y_test_10)
# +
dataset_test_15 = test_15.values
X_test_15 = dataset_test_15[:,0:9]
print(X_test_15)
y_test_15= dataset_test_15[:,9]
print(y_test_15)
# +
dataset_test_20 = test_20.values
X_test_20 = dataset_test_20[:,0:9]
print(X_test_20)
y_test_20= dataset_test_20[:,9]
print(y_test_20)
# +
dataset_test_25 = test_25.values
X_test_25 = dataset_test_25[:,0:9]
print(X_test_25)
y_test_25= dataset_test_25[:,9]
print(y_test_25)
# +
dataset_test_30 = test_30.values
X_test_30 = dataset_test_30[:,0:9]
print(X_test_30)
y_test_30= dataset_test_30[:,9]
print(y_test_30)
# +
dataset_test_35 = test_35.values
X_test_35 = dataset_test_35[:,0:9]
print(X_test_35)
y_test_35= dataset_test_35[:,9]
print(y_test_35)
# +
dataset_test_40 = test_40.values
X_test_40 = dataset_test_40[:,0:9]
print(X_test_40)
y_test_40= dataset_test_40[:,9]
print(y_test_40)
# -
# ### Predictions with 10Hz model
import numpy as np
# +
predictions_10 = rf_10.predict(X_test_10)
errors_10 = abs(predictions_10 - y_test_10)
print('Mean Absolute Error 10Hz with 10Hz:', round(np.mean(errors_10), 3), 'degrees.')
accuracy = 100 - np.mean(errors_10)
print('Accuracy:', round(accuracy, 3), '%.')
# +
predictions_15 = rf_10.predict(X_test_15)
errors_15 = abs(predictions_15 - y_test_15)
print('Mean Absolute Error 15Hz with 10Hz:', round(np.mean(errors_15), 3), 'degrees.')
accuracy = 100 - np.mean(errors_15)
print('Accuracy:', round(accuracy, 3), '%.')
# +
predictions_20 = rf_10.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 10Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
# +
predictions_25 = rf_10.predict(X_test_25)
errors_25 = abs(predictions_25 - y_test_25)
print('Mean Absolute Error 25Hz with 10Hz:', round(np.mean(errors_25), 3), 'degrees.')
accuracy = 100 - np.mean(errors_25)
print('Accuracy:', round(accuracy, 3), '%.')
# +
predictions_30 = rf_10.predict(X_test_30)
errors_30 = abs(predictions_30 - y_test_30)
print('Mean Absolute Error 30Hz with 10Hz:', round(np.mean(errors_30), 3), 'degrees.')
accuracy = 100 - np.mean(errors_30)
print('Accuracy:', round(accuracy, 3), '%.')
# +
predictions_35 = rf_10.predict(X_test_35)
errors_35 = abs(predictions_35 - y_test_35)
print('Mean Absolute Error 35Hz with 10Hz:', round(np.mean(errors_35), 3), 'degrees.')
accuracy = 100 - np.mean(errors_35)
print('Accuracy:', round(accuracy, 3), '%.')
# +
predictions_40 = rf_10.predict(X_test_40)
errors_40 = abs(predictions_40 - y_test_40)
print('Mean Absolute Error 40Hz with 10Hz:', round(np.mean(errors_40), 3), 'degrees.')
accuracy = 100 - np.mean(errors_40)
print('Accuracy:', round(accuracy, 3), '%.')
|
.ipynb_checkpoints/LL_Phase_2-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 07 Gradient descent
#
# Part of ["Introduction to Data Science" course](https://github.com/kupav/data-sc-intro) by <NAME>, [<EMAIL>](mailto:<EMAIL>)
#
# Recommended reading for this section:
#
# 1. <NAME>. (2019). Data Science From Scratch: First Principles with Python (Vol. Second edition). Sebastopol, CA: O’Reilly Media
#
# The following Python modules will be required. Make sure that you have them installed.
# - `matplotlib`
# - `numpy`
# - `scipy`
# ## Lesson 1
# ### Mean speed
#
# Imagine that you had a trip on an intercity train.
#
# Each time the train passed a town you had recorded a time elapsed from the start and the travelled distance as written on the milestones.
#
# The results are collected in a table:
#
# Station | Distance $s$, km | Elapsed time $t$, minutes
# ---|---|---
# A | 0 | 0
# B | 9 | 10
# C | 21 | 21
# D | 33 | 25
# E | 46 | 37
# F | 70 | 52
#
# Now you want to know the speed of the train.
#
# First of all we can compute the mean speed over the whole travel: divide the full distance $s_{AF}=70\text{ km}$ by the full time $t_{AF}=52\text{ min}$.
#
# $$
# v_{AF} = \frac{s_{AF}}{t_{AF}}
# $$
#
# $$
# v_{AF} = \frac{70}{52} = 1.34 \text{ km/min}
# $$
#
# But you remember that the train changed its seed: sometimes it moved slower and sometime faster. So you want to know more details about its seed variations.
#
# We can compute the mean speeds between each towns:
#
# $$
# v_{AB} = \frac{9}{10} = 0.90 \text{ km/min}
# $$
#
# $$
# v_{BC} = \frac{21-9}{21-10} = \frac{12}{11} = 1.09 \text{ km/min}
# $$
#
# $$
# v_{CD} = \frac{33-21}{25-21} = \frac{12}{4} = 3.00 \text{ km/min}
# $$
#
# $$
# v_{DE} = \frac{46-33}{37-25} = \frac{13}{12} = 1.08 \text{ km/min}
# $$
#
# $$
# v_{EF} = \frac{70-46}{52-37} = \frac{24}{15} = 1.60 \text{ km/min}
# $$
# ### Augmentation of the initial data
#
# What if you want go further and find even more information about the speed since you are definitely sure that the train moved between
# the towns with a varying speed?
#
# Of course it requires more measurements of the elapsed time and corresponding distances.
#
# We will model this situation using a synthetic data that are generated using so called interpolation.
#
# Interpolation is a mathematical procedure that allows to generate new data similar in some sense to the existing data.
#
# The procedure of data extension due to the synthetic data is called augmentation. So in our case the augmentation is done via interpolation.
#
# Strictly speaking our augmentation is not quite correct: the amount of the generated data will be much larger then the initial data.
#
# But we need it just for illustration purposes only.
#
# Just think about the interpolation as bending of an elastic rod around the pins that correspond to each town in the table above.
#
# Below is the graph of the initial and interpolated data.
# +
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
# These is the table of distances and times shown above
ss = np.array([0,9,21,33,46,70])
tt = np.array([0,10,21,25,37,52])
# Modeling new measurements using interpolation
fun_st = interp1d(tt, ss, kind='cubic')
tt_new = np.linspace(0, tt[-1], tt[-1]*10+1) # add 10 new points between already known
ss_new = fun_st(tt_new)
fig, ax = plt.subplots()
ax.plot(tt, ss, '*', color='C1', label="original data: times and distacnes of towns");
ax.plot(tt_new, ss_new, color='C0', label="interpolation: an elastic rod bending the pins")
ax.legend()
ax.set_xlabel('t')
ax.set_ylabel('s');
# -
# ### Instant speed and derivatives
#
# So, imagine that we have got datasets of a very frequent measurements of times and the corresponding distances, `tt_new` and `ss_new`.
#
# We want to find the detained information about the speeds.
#
# Consider an arbitrary time $t_0$ and find corresponding distance $s_0$. We are going to find the speed at this moment.
#
# To be definite:
t0 = 23.0 # considered time
s0 = fun_st(t0) # corresponding distance
print(f"t0={t0}, s0={s0}")
# Consider a time interval $\Delta t$ and compute $s_1$ that corresponds to $t_1=t_0+\Delta t$. Also compute $\Delta s=s_1-s_0$.
dt = 10
t1 = t0 + dt
s1 = fun_st(t1)
print(f"t1={t1}, s1={s1}, ds=s1-s0={s1-s0}")
# Let us plot $s$ vs $t$ and show ($t_0$, $s_0$) and ($t_1$, $s_1$).
# +
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(tt_new, ss_new)
ax.set_xlabel('t')
ax.set_ylabel('s');
ax.plot([t0, t1, t1], [s0, s0, s1], 'ko')
ax.plot([t0, t1], [s0, s0], 'k--')
ax.plot([t1, t1], [s0, s1], 'k--')
ax.text(t0 - 1, s0 + 3, r'($t_0$, $s_0$)', fontsize=18, ha='right')
ax.text(t1 - 1, s1 + 3, r'($t_1$, $s_1$)', fontsize=18, ha='right')
ax.text(0.5*(t0+t1), s0 - 5, r'$\Delta t$', fontsize=18, ha='center')
ax.text(t1 + 1, 0.5*(s0+s1), r'$\Delta s$', fontsize=18, va='center');
# -
# We already know how to compute the speed at $t_0$ approximately: we need to take the distance $\Delta s$ and divide it by the time interval $\Delta t$
#
# $$
# v\approx \frac{\Delta s}{\Delta t}
# $$
#
# Of course this is the mean speed at the interval between $t_0$ and $t_1$.
#
# Why this is only an approximate value of the speed?
#
# Because the curve is bended between the two marked points.
#
# If the curve were straight between the two points the formula for $v$ above would be exact.
#
# How to improve the precision?
#
# The time step $\Delta t$ must be decreased. Accordingly $\Delta s$ will also will become smaller.
#
# The smaller $\Delta t$ the less noticeable is the bend between the points ($t_0$, $s_0$) and ($t_1$, $s_1$).
#
# Sometimes in practical application this is enough: we just take a sufficiently small $\Delta t$, compute the corresponding $\Delta s$ and find the speed according to the formula above.
#
# But for the rigorous computation of the instant speed at $t_0$ we must proceed as follows:
#
# $$
# v = \lim_{\Delta t\to 0} \frac{\Delta s}{\Delta t}
# $$
#
# Here is the limit: we need to find what happen if $\Delta t$ approaches zero infinitely close.
#
# The result is called a derivative of $s$ at $t$. There are several notations for it:
#
# $$
# v = \frac{ds}{dt}=s'=\dot s
# $$
#
# Thus the instant speed $v$ or just speed equals to the derivative of $s$ at $t$.
# ### Computation of derivatives
#
# If the functions is known, in our example this is $s(t)$, its derivative is computed according to a sufficiently simple rules.
#
# Actual computation of the limit is not needed.
#
# Examples of simple rules for computing the derivatives
#
# $$
# (u+v)' = u' + v'
# $$
#
# $$
# (uv)' = u' v + u v'
# $$
#
# $$
# \left(\frac{u}{v}\right)'=\frac{u'v-uv'}{v^2}
# $$
#
# $$
# c' = 0 \text{ (where $c$ is constant)}
# $$
#
# $$
# (cu)' = cu' \text{ (where $c$ is constant)}
# $$
#
# The Chain rule.
#
# Let $y=f(u)$ and $u=g(x)$. Then $y(x)=f(g(x))$. Here $u$ denotes a temporary variable introduced for convenience and $x$ is the variable by which we differentiate.
#
# $$
# y'(x)=f'(u) g'(x)
# $$
#
# Derivatives of some elementary functions (differentiation at $x$ is assumed):
#
# $$
# (x^n)' = n x^{n-1}
# $$
#
# $$
# x'=1
# $$
#
# $$
# (\sin x)' = \cos x
# $$
#
# $$
# (\cos x)' = -\sin x
# $$
#
# $$
# (e^x)' = e^x
# $$
#
# $$
# (\log x)' = 1/x
# $$
# Examples of computation of derivatives:
#
# $$
# (x^3 + 4)' = (x^3)' + 4' = 3x^2
# $$
#
# <br>
#
# $$
# (\sin x \cos x)' = (\sin x)' \cos x + \sin x (\cos x)' = \cos x \cos x - \sin x \sin x = \cos^2 x - \sin^2 x
# $$
#
# <br>
#
# $$
# \left( \frac{e^x}{x^2} \right)' = \frac{(e^x)'x^2 - e^x (x^2)'}{x^4} = \frac{e^x x^2 - e^x 2 x}{x^4} = \frac{x-2}{x^3} e^x
# $$
# Examples of using the chain rule:
#
# Find $y'(x)$ for $y(x)=e^{-x^2}$.
#
# First denote $y=f(u)=e^u$, $u=g(x)=-x^2$
#
# Then find the derivatives using a chain rule
# $$
# y'(x)=f'(u)g'(x)=(e^u)'(-x^2)'=e^u (-2x)
# $$
#
# Finally substitute $u$ with its value $-x^2$:
# $$
# y'(x)= -2x e^{-x^2}
# $$
#
# One more example: find $y'(x)$ for $y(x)=\sin^2 x$
#
# Denote $y=f(u)=u^2$ (notice that square is the most outer operation in the expression $\sin^2 x$). $u=g(x)=\sin x$
#
# Now the chain rule:
#
# $$
# y'(x)=f'(u)g'(x)=(u^2)'(\sin x)' = 2 u \cos x
# $$
#
# Substitute for $u$:
#
# $$
# y'(x) = 2 \sin x \cos x
# $$
# ### Geometric meaning of the derivative
#
# For a function $y(x)$ its derivative at a certain point $x_0$ equals to the slope of a tangent line (the slope means the tangent of an angle with a horizontal line).
#
# Positive slope - the function grows at this point.
#
# Growing means that $y$ gets larger if we move along $x$ from left to right.
#
# Negative slope - the function decays.
#
# Zero slope - the point is a special point, can be minimum or maximum (also can be an inflection point).
#
# In the example above we see a graph of
#
# $$
# y(x) = x^2
# $$
#
# Its derivative is
#
# $$
# y'(x) = 2x
# $$
#
# Consider a point $x_0=-2.5$:
#
# $$
# y'(x=-2.5) = 2 \cdot (-2.5) = -5
# $$
#
# The derivative is negative and the function decays.
#
# The point $x_0=4$:
#
# $$
# y'(x=4) = 2 \cdot 4 = 8
# $$
#
# The derivative is positive and the function grows.
#
# The point $x_0=0$:
#
# $$
# y'(x=0) = 2 \cdot 0 = 0
# $$
#
# The derivative is zero - this is the maximum.
#
# Below is an illustration of this example.
#
# Compare the absolute values of the slopes at $x_0=-2.5$ and $x_0=4$: closer to the maximum means smaller slope.
# +
import numpy as np
import matplotlib.pyplot as plt
def fun(x):
"""Considered function"""
return x**2
def dfun(x):
"""Derivative"""
return 2*x
def tangent_line(px):
"""Coefficients of a tangent line y=kx+b """
k = dfun(px) # derivative equals to slope of the tanegt line
b = fun(px) - k * px
return k, b
fig, ax = plt.subplots(figsize=(10, 8))
xx = np.linspace(-5, 5, 100)
yy = [fun(x) for x in xx]
ax.plot(xx, yy)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y(x)=x^2$')
px0 = -2.5
k0, b0 = tangent_line(px0)
x0 = np.linspace(px0-1, px0+1, 25)
ax.plot([px0], [fun(px0)], 'o', color='C1')
ax.plot(x0, k0 * x0 + b0, color='C1')
ax.text(px0-0.1, fun(px0)-1.5, r'$y^\prime({{{}}})={{{}}}$'.format(px0,dfun(px0)), fontsize=18, ha='right')
px0 = 4
k0, b0 = tangent_line(px0)
x0 = np.linspace(px0-1, px0+1, 25)
ax.plot([px0], [fun(px0)], 'o', color='C1')
ax.plot(x0, k0 * x0 + b0, color='C1')
ax.text(px0-0.3, fun(px0)+0.1, r'$y^\prime({{{}}})={{{}}}$'.format(px0,dfun(px0)), fontsize=18, ha='right')
px0 = 0
k0, b0 = tangent_line(px0)
x0 = np.linspace(px0-1, px0+1, 25)
ax.plot([px0], [fun(px0)], 'o', color='C1')
ax.plot(x0, k0 * x0 + b0, color='C1');
ax.text(px0, fun(px0)+1.5, r'$y^\prime({{{}}})={{{}}}$'.format(px0,dfun(px0)), fontsize=18, ha='center');
# -
# All this together means that we can use derivatives to find functions maximums or minimums.
# ### Gradient descent for a one-dimensional function
#
# Assume we have a function $y(x)$ and want to find $x_\text{min}$ where the function reaches its minimum.
#
# We start with an initial guess at $x_0$ and going to find a point $x_1$ somehow closer to the minimum.
#
# According to the discussion above the sign of a derivative indicates the direction of the step and a derivative magnitude determines its size.
#
# 
# For the left point:
#
# $$
# x_1 = x_0 + \Delta x_0
# $$
#
# and for the right point:
#
# $$
# x_1 = x_0 - \Delta x_0
# $$
#
# How to choose $\Delta x$ automatically?
#
# We can set $\Delta x = -\gamma y'(x)$, where $\gamma$ is some small parameter.
#
# $$
# x_1 = x_0 - \gamma y'(x_0)
# $$
#
# Similarly we will find $x_2$ then $x_3$ and so on.
#
# In general we have an iteration formula:
#
# $$
# x_{n+1} = x_n - \gamma y'(x_n)
# $$
#
# To compute the minimum we have to apply this formula to get $x_0$, $x_1$, $x_2$, $x_3$, ... until two successive $x_n$ and $x_{n+1}$
# becomes sufficiently close to each other:
#
# $$
# |x_{n+1}-x_n| < \epsilon
# $$
#
# Here $\epsilon$ is an absolute error. This is selected before the beginning of the computations.
#
# The iterative method of minimum finding is called gradient descent.
#
# Here the derivative is called gradient. We go to in the descending direction that is pointed by the gradient.
#
# Later we will consider a general case where the gradient descent is used to find minimums of functions of many variables.
# One may ask a question: why we perform this procedure of iteration if previously we said that a minimum (as well as a maximum, of course) can by computed as a zero of the derivative.
#
# For example, given
#
# $$
# y=x^2
# $$
#
# we can find its derivative
#
# $$
# y'=2x
# $$
#
# The minimum is where $y'=0$:
#
# $$
# 2x=0, \;\; x = 0
# $$
#
# Why we need the iterations if the minimum can be found that simple?
#
# The answer is that often a functions that has to be minimized is very complicated so that finding its minimum as $y'(x)=0$ is much more complicated compared with the simple iteration procedure discussed above.
# ### Example of a gradient descent for a function of one variable
#
# Consider an example.
#
# A function that we minimize is as follows:
#
# $$
# y(x) = x^3 + 4 x^2 - x
# $$
#
# Its derivative can be easily found:
#
# $$
# y'(x) = 3 x^2 + 8 x -1
# $$
#
# Here is the graph of the function:
# +
import numpy as np
import matplotlib.pyplot as plt
def fun(x):
"""The function"""
return x**3 + 4 * x**2 - x
def dfun(x):
"""Its derivative"""
return 3 * x**2 + 8 * x - 1
xx = np.linspace(-4, 2, 100)
yy = [fun(x) for x in xx]
fig, ax = plt.subplots()
ax.plot(xx, yy)
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y');
# -
# Before testing the iteration procedure for minimization we can find the minimum explicitly.
#
# Condition for the minimum is $y'(x)=0$:
#
# $$
# y'(x) = 3 x^2 + 8 x - 1=0
# $$
#
# We need to solve this quadratic equation.
#
# $$
# D = 8^2 + 4\cdot 3 \cdot 1 = 76
# $$
#
# $$
# x_1 = \frac{-8 + \sqrt{76}}{6} = 0.1196329811802247
# $$
#
# $$
# x_2 = \frac{-8 - \sqrt{76}}{6} = -2.786299647846891
# $$
#
# From the plot above we see that $x_2$ is the maximum and $x_1$ is the minimum that we need.
#
# $$
# x_\text{min} = x_1 = 0.1196329811802247
# $$
#
# Let us now write a program for iterative finding of this point.
#
# For convenience here is the formula for iterations accompanied with the condition for the iterations stop:
#
# $$
# x_{n+1} = x_n - \gamma y'(x_n),\;\; |x_{n+1}-x_n| < \epsilon
# $$
#
#
# +
gamma = 0.05
eps = 1e-10
x0 = -2
iters = [x0]
while True:
x1 = x0 - gamma * dfun(x0) # one iteration
iters.append(x1) # store current iteration for subsequent illustration
if np.abs(x1-x0)<eps: # stop if two points are close to each other
break
x0 = x1
xmin = x1
print(f"x_min={xmin}")
# -
# In the plot below we see how the iterations of $x_n$ converge to $x_\text{min}$
# +
import numpy as np
import matplotlib.pyplot as plt
xx = np.linspace(-4, 2, 100)
yy = [fun(x) for x in xx]
fig, ax = plt.subplots()
ax.plot(xx, yy)
x_iter = iters[:16] # show only 16 points since others will overlap each other
y_iter = [fun(x) for x in x_iter]
ax.plot(x_iter, y_iter, 'o')
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_ylim([-1,15])
for n, x in enumerate(x_iter):
print(f"x{n}={x:8.4f}")
print(f"x_min={xmin:8.4f}")
# -
# Sometimes a function is so complicated that we can not find its derivative. In this case we can use its finite difference approximation
#
# $$
# y'(x) \approx \frac{y(x+\Delta x)-y(x)}{\Delta x}
# $$
#
# Let us check our iterations with the finite difference approximation of the derivative.
#
# Here is the function for it:
def ndfun(x, dx):
"""Finite difference approximation of the derivative"""
dy = fun(x+dx) - fun(x)
return dy / dx
# +
gamma = 0.05
eps = 1e-10
x0 = -2
dx = 0.001 # finte step for derivative computation
iters = [x0]
while True:
x1 = x0 - gamma * ndfun(x0, dx) # exact dfun is substituted with appoximate ndfun
iters.append(x1)
if np.abs(x1-x0)<eps:
break
x0 = x1
xmin_nd = x1
print(f"x_min={xmin_nd}")
# -
# Notice that the iterations also converge to the $x_\text{min}$.
# ### Exercises
#
# 1\. For the functions $y(x)$ listed below compute the derivatives at $x$
#
# - $y(x)=2x^2-x$
# - $y(x)=\sin x^2$
# - $y(x)=e^{-3x}$
# - $y(x)=\log(1/x)$
#
# 2\. Write a program that finds the minimum of the function
#
# $$
# y(x) = 1-e^{-(x-5)^2}
# $$
#
# using a gradient descent.
# ## Lesson 2
# ### Scalar function of many variables
#
# Let us remember that the term scalar means a single number unlike a vector which is an ordered set of numbers.
#
# An example: function of two variables
#
# $$
# f(x,y)=\log(1+x^2+y^2)
# $$
#
# This is its implementation as Python function
def fun(x, y):
# Test function to demonstrate surface plotting
return np.log(1 + x**2 + y**2)
# And this is its graph
# +
import matplotlib.pyplot as plt
import numpy as np
# Make data.
X = np.linspace(-2, 2, 101)
Y = np.linspace(-2, 2, 101)
X, Y = np.meshgrid(X, Y)
Z = fun(X, Y)
# {"projection": "3d"} activates using 3D plotting
fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(10,10))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('f(x,y)')
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap='copper') # cmap specifies how the surface will be painted
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=10, pad=0.07);
# -
# ### Minimizing function of many variables
#
# We want to find its minimum point, i.e., such pair $x_\text{min}$ and $y_\text{min}$ that $f(x_\text{min}, y_\text{min})$ reaches its minimum.
#
# For this simple function we can find the minimum analytically.
#
# The minimum point fulfills the equation:
#
# $$
# \frac{\partial f(x,y)}{\partial x}=0, \; \frac{\partial f(x,y)}{\partial y}=0
# $$
#
# The notation with curly $\partial$ stands for a partial derivative.
#
# The partial derivative at $x$ means that we need to compute a usual derivative at $x$ considering $y$ as a constant.
#
# And similarly for the partial derivative at $y$: now $x$ must be treated as a constant.
#
# Let us compute the partial derivatives of our function $f(x,y)=\log(1+x^2+y^2)$.
#
# Since the chain rule will be used, we introduce a notation:
#
# $$
# u(x,y) = 1+x^2+y^2
# $$
#
# Now we have:
#
# $$
# \frac{\partial}{\partial x} \log u(x,y) =
# \left(\frac{d}{d u} \log u \right) \left( \frac{\partial}{\partial x} u(x,y) \right) =
# \left(\frac{1}{u} \right) \left( \frac{\partial}{\partial x} (1+x^2+y^2) \right)=\left(\frac{1}{u} \right) (2x)
# $$
#
# And finally after substituting $u$:
# $$
# \frac{\partial}{\partial x} \log (1+x^2+y^2) = \frac{2x}{1+x^2+y^2}
# $$
#
# Doing in the same manner for $y$ we obtain:
#
# $$
# \frac{\partial}{\partial y} \log (1+x^2+y^2) = \frac{2y}{1+x^2+y^2}
# $$
#
# Now we need to find such $x_\text{min}$ and $y_\text{min}$ that both of these partial derivatives vanish. Obviously these are:
#
# $$
# x_\text{min} = 0, \; y_\text{min}=0
# $$
#
# This point is the minimum of our function.
# ### Gradient
#
# Most of functions in practice can not be analyzed that simple.
#
# We either cannot solve equations for simultaneous zeros of the partial derivatives or even cannot compute the derivatives themselves.
#
# Thus a numerical algorithm is required to find the minimum. The gradient descent works well.
#
# Let us first define a gradient of a function. It is denoted as $\nabla f$.
#
# Given the scalar function of many variables $f(x_1, x_2, \ldots x_n)$ its gradient is computed as a vector of its partial derivatives:
#
# $$
# \nabla f = \left(
# \frac{\partial}{\partial x_1} f(x_1, x_2, \ldots x_n),
# \frac{\partial}{\partial x_2} f(x_1, x_2, \ldots x_n),
# \dots
# \frac{\partial}{\partial x_n} f(x_1, x_2, \ldots x_n)
# \right)
# $$
#
# For example for the above function of two variables the gradient components have already been computed and its gradient can be written as follows:
#
# $$
# \nabla \log(1+x^2+y^2) = \left(
# \frac{2x}{1+x^2+y^2},
# \frac{2y}{1+x^2+y^2}
# \right)
# $$
# Why do we need a gradient?
#
# Because this a vector that points a direction of the fastest function growth.
#
# Imagine that you stand somewhere in mountainous area and what to climb up to the closest mountaintop.
#
# 
#
# But the weather is extremely foggy so that you can not just look where to go.
#
# But you have a formula describing the heights of the surrounding area:
#
# $$
# h = f(x, y)
# $$
#
# Here $x$ and $y$ are coordinates of a point and $h$ is its attitude, i.e., the height above sea level.
#
# Thus you can do as follows:
#
# Obtain you initial coordinates ($x_0$, $y_0$) and compute the gradient at your position:
#
# $$
# \nabla f(x=x_0,y=y_0)
# $$
#
# This vector points where the height growth is the fastest.
#
# You take a small step along this direction and appears at the point ($x_1$, $y_1$).
#
# Do all the same again: compute the gradient and take a step.
#
# Repeat it until reach at the top.
#
# And if you want to go to the canyon instead you just need to takes steps opposite to the gradient direction.
# ### Gradient descent for a function of many variables
#
# Assume that $v_n=(x_n, y_n)$ is a vector of coordinates $x_n$ and $y_n$.
#
# The algorithm of going down to the canyon can be written as a simple iterative formula:
#
# $$
# v_{n+1} = v_n - \gamma \nabla f(v_n)
# $$
#
# This is the equation for the gradient descent. The iterations stop when the distance between two successive vectors become sufficiently small:
#
# $$
# |v_{n+1}-v_{n}| < \epsilon
# $$
#
# Here $\epsilon$ is an absolute error that we accept before the iterations start.
#
# Compare this equation with the one considered above for one dimensional functions
#
# $$
# x_{n+1} = x_n - \gamma y'(x_n)
# $$
#
# Here we have the derivative instead the gradient. Indeed a function has only one variable its gradient is transformed into a simple derivative.
# ### Example of a gradient descent for a function of two variables
#
#
# Let us find the minimum of the function discussed above using the gradient descent.
#
# Let us remember the formulas for the function and its gradient:
#
# $$
# f(x,y)=\log(1+x^2+y^2)
# $$
#
# $$
# \nabla f(x,y) = \left(
# \frac{2x}{1+x^2+y^2},
# \frac{2y}{1+x^2+y^2}
# \right)
# $$
#
# Below the Python implementation:
# +
import numpy as np
def fun(x, y):
# Test function to demonstrate surface plotting
return np.log(1 + x**2 + y**2)
def grad_fun(x, y):
denom = 1 + x**2 + y**2
return np.array([2*x / denom, 2*y / denom])
# -
# Algorithm of the gradient descent can be implemented as follows:
# +
gamma = 0.5
eps = 1e-10
v0 = np.array([-2, 2]) # initial point
iters = [v0]
while True:
v1 = v0 - gamma * grad_fun(v0[0], v0[1]) # one iteration
iters.append(v1) # store current iteration for subsequent illustration
if np.linalg.norm(v1-v0)<eps: # stop if to points are close to each other
break
v0 = v1
vmin = v1
print(f"v_min={vmin}")
# -
# Here is the graph of the function. The bullet points on it shows the steps approaching to the minimum.
# +
import matplotlib.pyplot as plt
import numpy as np
# Make data.
X = np.linspace(-2, 2, 101)
Y = np.linspace(-2, 2, 101)
X, Y = np.meshgrid(X, Y)
Z = fun(X, Y)
# {"projection": "3d"} activates using 3D plotting
fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(10,10))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('f(x,y)')
# Plot the surface.
surf = ax.plot_wireframe(X, Y, Z, alpha=0.7) # cmap specifies how the surface will be painted
xy_iter = np.array(iters[:10])
z_iter = np.array([fun(xy[0], xy[1]) for xy in xy_iter])
ax.plot(xy_iter[:, 0], xy_iter[:, 1], z_iter, 'ok');
# -
# Gradient descent works better if we can find expression for the function gradient.
#
# But this not always possible.
#
# In this case we can use its final difference approximation.
#
# This is done in a similar manner as in one dimensional case:
def ngrad_f(x1, x2, dx):
f0 = fun(x1, x2)
dfdx = (fun(x1 + dx, x2) - f0) / dx
dfdy = (fun(x1, x2 + dx) - f0) / dx
return np.array([dfdx, dfdy])
# This is the copy of the previous algorithm but using finite difference gradient.
#
# It also works, but notice that we have to take smaller $\gamma$ and $\Delta x$ must also be very small.
# +
gamma = 0.01
eps = 1e-10
v0 = np.array([-2, 2]) # initial point
dx = 0.00001
while True:
v1 = v0 - gamma * ngrad_f(v0[0], v0[1], dx) # one iteration
if np.linalg.norm(v1-v0)<eps: # stop if two points are close to each other
break
v0 = v1
vmin = v1
print(f"v_min={vmin}")
# -
# ### Loss function as a scalar function of many variables
#
# Imagine that we build a model that predicts car features.
#
# We feed it a car characteristics list and expect to get a predictions of its certain features.
#
# In course of the model preparation we perform its supervised learning:
#
# We feed the model with cars characteristics whose features are already known and try to tune its
# parameters to minimize the difference between the known and the predicted features.
#
# The known and the predicted features are represented as vectors.
#
# Training the model we compute their distance and try to minimize it. This distance is called the loss function.
#
# In actual model training a large set of the true feature vectors are compared with the corresponding predictions.
#
# Each prediction vector depends on the model parameters and usually their number is large.
#
# So typically the loss function depends on very many variables.
#
# Training a modes means finding the minimum of the loss function with respect to the model parameters.
#
# For the sake of illustration we consider a trivial case: one feature vector with $N$ components is compared with a "prediction" vector whose $N$ components must be tuned to minimize the loss.
#
# This is our feature vector. It is synthetic, i.e., is generated at random
#
# $$
# v = (y_1, y_2, \ldots, y_N)
# $$
#
# And this is the "prediction" vector of variables that must be tuned:
#
# $$
# u = (x_1, x_2, \ldots, x_N)
# $$
#
# The loss functions is
#
# $$
# L = \sum_{i=1}^{N} (x_i - y_i)^2
# $$
#
# Such function is called MSE (mean squared error). In the other words this is the squared Euclidean distance between $u$ and $v$.
#
# Of course we know in advance that the minimum is reached if $x_i=y_i$ for all $i$.
#
# But we will initialize $x_i$ with a random initial guesses and will apply the gradient descent to observe if they will arrive at $y_i$.
#
# For the gradient descent we need partial derivatives of $L$ at $x_i$:
#
# $$
# \frac{\partial L}{\partial x_i} = 2 (x_i - y_i)
# $$
#
# The gradient $\nabla L$ is a vector composed of these partial derivatives.
#
# Here is Python implementation of the loss function and its gradient.
# +
import numpy as np
# The feature vector that will be fitted using gradient descent
v = np.array([222.0, 38.0, -542.0, -138.0, 502.0, -187.0, -91.0, 917.0, 50.0, -773.0])
def loss(u):
"""Loss function"""
return sum([(x - y)**2 for x, y in zip(u, v)])
def grad_loss(u):
"""Gradient of the loss function"""
return np.array([2 * (x - y) for x, y in zip(u, v)])
# -
# We need a function that performs the gradient descent.
def grad_desc(grad_fun, u0, gamma, eps=1e-10):
"""Given the gradient function grad_fun computes the minimum
via gradient descent method.
Default absolute error is 1e-10
"""
iters = [u0]
while True:
u1 = u0 - gamma * grad_fun(u0) # one iteration
iters.append(u1) # store current iteration
if np.linalg.norm(u1 - u0) < eps: # stop if two vectors are close to each other
break
u0 = u1
return np.array(iters)
# The computations:
# +
rng = np.random.default_rng()
# initial guess
u0 = rng.random(len(v))
gamma = 0.1
sol = grad_desc(grad_loss, u0, gamma)
print(f"u0 = {sol[0]}")
print(f"umin= {sol[-1]}")
print(f"v = {v}")
# -
# Here is the illustration how the solution converges.
#
# Dependence of the loss function vs iteration step is called learning curve.
#
# In our case the learning curve is exponential (observe logarithmic scale along Y-axis).
# +
import matplotlib.pyplot as plt
ls = np.array([loss(u) for u in sol])
du = [np.linalg.norm(sol[i] - sol[i-1]) for i in range(1, len(sol))]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(14, 4))
ax = axs[0]
ax.plot(range(len(ls)), ls)
ax.set_yscale('log')
ax.set_xlabel(r'$n$')
ax.set_ylabel(r'$L$')
ax.set_title('Loss function')
ax = axs[1]
ax.plot(range(len(du)), du)
ax.set_yscale('log')
ax.set_xlabel(r'$n$')
ax.set_ylabel(r'$|u_{n+1}-u_{n}|$')
ax.set_title(r'Absolute error $|u_{n+1}-u_{n}|$')
for ax in axs:
ax.grid()
# -
# ### Choosing the step size
#
# In the gradient descent method there is one parameter $\gamma$ that control the step size.
#
# $$
# v_{n+1} = v_n - \gamma \nabla f(v_n)
# $$
#
# Its value dramatically influences the convergence.
#
# Too small $\gamma$ results in the slowing down of the computations.
#
# On the other hand side if $\gamma$ is too large the iterations can start bouncing near the minimum without approaching it.
#
# 
#
# <br>
#
# Typically when a model with a large number of parameters is trained the step size multiplier $\gamma$ is gradually decreased.
#
# The simplest way is to change it according to some schedule depending on the iteration number $n$.
#
# For example it can be hyperbolic decay:
#
# $$
# \gamma = \gamma_0 / n
# $$
#
# or exponential decay:
#
# $$
# \gamma = \gamma_0 e^{-n}
# $$
#
# Or it can be changed manually: iterations are stopped each, say 100, repetitions, $\gamma$ is modified and the iterations are continued.
#
# Better results can usually be achieved with a modified version of the gradient descent with an automatic adaptive adjustment of the step size multiplier.
#
# Here are some popular algorithms:
#
# - Adagrad
# - Adadelta
# - RMSprop
# - Adam
#
# Usually the best choice is Adam. This is the recommended method to try first when training a model.
# ### Exercises
#
# 3\. Compute gradients of the following functions:
#
# - $f(x,y) = x^3 - y^2$
# - $f(x,y) = \sin (x+y)$
# - $f(x,y) = x e^y$
#
# 4\. Write a program that finds the minimum of the function
# $$
# f(x_1, x_2, x_3) = - 0.1 \log x_1 - 0.3 \log x_2 - 0.6 \log x_3
# $$
#
# using a gradient descent.
|
07_Gradient_descent.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import matplotlib.pyplot as plt
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.molecules_graph_regression.load_net import gnn_model
from data.data import LoadData
# +
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
t0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if net_params['lap_pos_enc']:
st = time.time()
print("[!] Adding Laplacian positional encoding.")
dataset._add_laplacian_positional_encodings(net_params['pos_enc_dim'])
print('Time LapPE:',time.time()-st)
if net_params['wl_pos_enc']:
st = time.time()
print("[!] Adding WL positional encoding.")
dataset._add_wl_positional_encodings()
print('Time WL PE:',time.time()-st)
if net_params['full_graph']:
st = time.time()
print("[!] Converting the given graphs to full graphs..")
dataset._make_full_graph()
print('Time taken to convert to full graphs:',time.time()-st)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_MAEs, epoch_val_MAEs = [], []
scores = []
# import train and evaluate functions
from train.EA_train_molecules_graph_regression import train_epoch, evaluate_network
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
epoch_train_loss, epoch_train_mae, optimizer, scores_epoch = train_epoch(model, optimizer, device, train_loader, epoch)
epoch_val_loss, epoch_val_mae = evaluate_network(model, device, val_loader, epoch)
_, epoch_test_mae = evaluate_network(model, device, test_loader, epoch)
if epoch%10==0:
scores.append(scores_epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_MAEs.append(epoch_train_mae)
epoch_val_MAEs.append(epoch_val_mae)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_mae', epoch_train_mae, epoch)
writer.add_scalar('val/_mae', epoch_val_mae, epoch)
writer.add_scalar('test/_mae', epoch_test_mae, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_MAE=epoch_train_mae, val_MAE=epoch_val_mae,
test_MAE=epoch_test_mae)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_mae = evaluate_network(model, device, test_loader, epoch)
_, train_mae = evaluate_network(model, device, train_loader, epoch)
print("Test MAE: {:.4f}".format(test_mae))
print("Train MAE: {:.4f}".format(train_mae))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
fig, axs = plt.subplots(nrows=2, ncols=5, figsize=(25,15))
for i in range(2):
for j in range(5):
axs[i,j].hist(scores[j + i*5])
axs[i,j].set_title('Epoch '+str((j + i*5)*10))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST MAE: {:.4f}\nTRAIN MAE: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_mae, train_mae, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
# +
"""
USER CONTROLS
"""
config = open('configs/molecules_GraphTransformer_LapPE_ZINC_500k_sparse_graph_BN.json')
config = json.load(config)
# -
def main():
device = gpu_setup(True, 0)
# model, dataset, out_dir
MODEL_NAME = config['model']
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
out_dir = config['out_dir']
# parameters
params = config['params']
# modif
params['epochs'] = 100
# end modif
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
# ZINC
net_params['num_atom_type'] = dataset.num_atom_type
net_params['num_bond_type'] = dataset.num_bond_type
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_EA_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_EA_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_EA_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
|
EA/visualize_attention_Baseline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup and Imports
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
import seaborn as sns
import re
sns.set
import spacy
import nltk
from nltk.corpus import brown
from nltk.corpus import wordnet
from collections import Counter
import Tweet_Normalizer as tn
from bs4 import BeautifulSoup
import requests
import spacy
import scipy
import gensim
from sklearn.model_selection import train_test_split
from nltk.tokenize.toktok import ToktokTokenizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import xgboost
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import csv
import math
from scipy.stats import uniform
from scipy.stats import reciprocal
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterSampler
import tensorflow as tf
from tensorflow import keras
import pickle
from gensim.models.fasttext import FastText
import prepare_embeddings as pe
# # Load the Data
tweets = pd.read_csv("data/train.csv")
# # Clean the Data
# %%time
#USe tweet scrubber function to clean the data
tweets = tn.tweet_scrubber(tweets, verbose = True)
#Check for blank rows after cleaning. We expect 5
tweets = tweets.replace(r'^(\s)+$', np.nan, regex = True)
#Drop the empty rows
tweets.dropna(subset=["Clean Tweets"], inplace = True)
#Reset the index in place
tweets.reset_index(drop = True, inplace = True)
#Take a look at the last few rows of the data
pd.set_option('display.max_colwidth', 2)
tweets.tail(n=15)
# # Split the Data intro Training and Validation Sets
train_corpus, val_corpus, y_train, y_val = train_test_split(tweets["Clean Tweets"], np.array(tweets["target"]),
test_size=.15, random_state=42, stratify=np.array(tweets["target"]))
# # Prepare Dense Word Embeddings
#Tokenize the training and validation set
tokenizer = ToktokTokenizer()
tokenized_train = [tokenizer.tokenize(text) for text in train_corpus]
tokenized_val = [tokenizer.tokenize(text) for text in val_corpus]
# +
# %%time
#Number of Features
ft_num_features = 300
ft_model = FastText(tokenized_train, vector_size = ft_num_features, window = 250, min_count = 0,
sample=1e-3, sg=1, epochs=100, workers=10)
#Create the training and validation set
X_train = pe.document_vectorizer(corpus=tokenized_train, model=ft_model, num_features=ft_num_features)
X_val = pe.document_vectorizer(corpus=tokenized_val, model=ft_model, num_features=ft_num_features)
# -
#Check the shapes
print(X_train.shape)
print(X_val.shape)
print(y_train.shape)
print(y_val.shape)
# # Baseline Scores
# ### Logistic Regression
lr_clf = LogisticRegression(max_iter=10000)
# %%time
y_train_pred = cross_val_predict(lr_clf, X_train, y_train, cv = 5)
lr_base_acc = accuracy_score(y_train, y_train_pred) * 100
lr_base_f1 = f1_score(y_train, y_train_pred) * 100
print(f"Logistic Regression Baseline Accuracy: {lr_base_acc:.2f}")
print(f"Logistic Regression Baseline F1-Score: {lr_base_f1:.2f}")
confusion_matrix(y_train, y_train_pred)
# ### Naive Bayes
nb_clf = GaussianNB()
# %%time
y_train_pred = cross_val_predict(nb_clf, X_train, y_train, cv = 5)
nb_base_acc = accuracy_score(y_train, y_train_pred) * 100
nb_base_f1 = f1_score(y_train, y_train_pred) * 100
print(f"Naive Bayes Baseline Accuracy: {nb_base_acc:.2f}")
print(f"Naive Bayes Baseline F1-Score: {nb_base_f1:.2f}")
confusion_matrix(y_train, y_train_pred)
# ### Random Forest
rf_clf = RandomForestClassifier()
# %%time
y_train_pred = cross_val_predict(rf_clf, X_train, y_train, cv = 5)
rf_base_acc = accuracy_score(y_train, y_train_pred) * 100
rf_base_f1 = f1_score(y_train, y_train_pred) * 100
print(f"Random Forest Baseline Accuracy: {rf_base_acc:.2f}")
print(f"Random Forest Baseline F1-Score: {rf_base_f1:.2f}")
confusion_matrix(y_train, y_train_pred)
# ### XGBoost
xgb_clf = xgboost.XGBClassifier(use_label_encoder=False, objective = "binary:logistic")
# %%time
y_train_pred = cross_val_predict(xgb_clf, X_train, y_train, cv = 5)
xgb_base_acc = accuracy_score(y_train, y_train_pred) * 100
xgb_base_f1 = f1_score(y_train, y_train_pred) * 100
print(f"\nXGBoost Baseline Accuracy: {xgb_base_acc:.2f}")
print(f"XGBoost Baseline F1-Score: {xgb_base_f1:.2f}")
confusion_matrix(y_train, y_train_pred)
# ### Deep Learning
input_ = keras.layers.Input(X_train.shape[1:])
hidden1 = keras.layers.Dense(25, activation = "relu")(input_)
hidden2 = keras.layers.Dense(25, activation = "relu")(hidden1)
output = keras.layers.Dense(1, activation="sigmoid")(hidden2)
baseline_model = keras.Model(inputs=input_, outputs = output)
baseline_model.compile(loss = "binary_crossentropy", optimizer="adam", metrics = ["accuracy"])
#Model architecture
print(baseline_model.summary())
# +
# %%time
early = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience = 50, restore_best_weights=True)
history = baseline_model.fit(X_train, y_train, validation_split=.2, epochs=500, batch_size=32, verbose=0, callbacks = [early])
# -
plt.figure(figsize = (12,8))
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='val')
plt.grid(True)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Train and Validation Loss During Training with Early Stoppping")
plt.legend()
plt.show()
#Baseline Accuracy and F1 Scores
y_pred = baseline_model.predict(X_val)
#Change the prediction to form expected for accuracy and f1-score functions
y_pred = np.round(y_pred.reshape((y_pred.shape[0])))
#Change the validation targets
y_val = y_val.astype(np.int)
model_base_acc = np.round(accuracy_score(y_pred, y_val), 4) * 100
model_base_f1 = np.round(f1_score(y_pred, y_val), 4) * 100
print(f"Model Baseline Accuracy Score: {model_base_acc:.2f}%")
print(f"Model Baseline F1-Score: {model_base_f1:.2f}%")
#Confusion matrix
confusion_matrix(y_pred, y_val)
# # Baseline Results
# ### F1-Score
# +
Models = ["Logistic Regression", "Naive Bayes", "Random Forest", "XGBoost", "Deep Learning"]
F1_Base_Scores = [lr_base_f1, nb_base_f1, rf_base_f1, xgb_base_f1, model_base_f1]
Accuracy_Base_Scores = [lr_base_acc, nb_base_acc, rf_base_acc, xgb_base_acc, model_base_acc]
#Create a data frame with the results
base_results = pd.DataFrame({"Model": Models, "F1": F1_Base_Scores, "Accuracy" : Accuracy_Base_Scores})
# -
base_results.sort_values(by="F1", ascending = False, inplace = True)
base_results
#Bar chart of the results
sns.set(rc = {'figure.figsize':(15,10)})
sns.barplot(x="Model", y = "F1", order = base_results["Model"] \
, data = base_results, color = "blue")
plt.title("Baseline F1-Scores")
plt.ylabel("F1-Score")
plt.xlabel("Models")
# ### Accuracy
base_results.sort_values(by="Accuracy", ascending = False, inplace = True)
base_results
#Bar chart of the results
sns.set(rc = {'figure.figsize':(15,10)})
sns.barplot(x="Model", y = "Accuracy", order = base_results["Model"] \
, data = base_results, color = "blue")
plt.title("Baseline Accuracy Score")
plt.ylabel("Accuracy")
plt.xlabel("Models")
# # Hyperparameter Tuning
# ### Logistic Regression
# +
# %%time
#Randomly search through the hyperparameter space
param_distribs = {
"C" : np.linspace(0, 30, 10000),
}
lr_clf = LogisticRegression(penalty = "l2", solver = "lbfgs", max_iter=10000)
lr_rnd_search_cv = RandomizedSearchCV(lr_clf, param_distribs, n_iter = 50,
cv=2, scoring = 'f1', random_state=42, n_jobs = -1)
lr_rnd_search_cv.fit(X_train, y_train)
print(lr_rnd_search_cv.best_params_)
print(lr_rnd_search_cv.best_score_)
# -
#Save the results in a dataframe
lr_rnd_search_df = pd.DataFrame(lr_rnd_search_cv.cv_results_)
#Rank the results by score
lr_rnd_search_df[["param_C", "mean_test_score"]].sort_values(by = "mean_test_score", ascending = False).head()
# ### Random Forest
# +
# %%time
#Randomly search through the hyperparameter space
param_distribs = {
"n_estimators": np.arange(50, 300)
}
rf_clf = RandomForestClassifier()
rf_rnd_search_cv = RandomizedSearchCV(rf_clf, param_distribs, n_iter = 30,
cv=2 ,scoring = 'f1', random_state=42)
rf_rnd_search_cv.fit(X_train, y_train)
print(rf_rnd_search_cv.best_params_)
print(rf_rnd_search_cv.best_score_)
# -
#Save the results
rf_rnd_search_df = pd.DataFrame(rf_rnd_search_cv.cv_results_)
tuned_params = ["param_n_estimators", "mean_test_score"]
#Rank by score
rf_rnd_search_df[tuned_params].sort_values(by = "mean_test_score", ascending = False).head()
# ### XGBoost
#Randomly search through the hyperparameter space
param_distribs = {
"max_depth": [2,3,4,5,6,7],
"gamma": uniform(loc = 0.0, scale = 3),
"min_child_weight": list(range(20,51)),
"colsample_bytree": uniform(loc = 0.1, scale = 0.9),
"learning_rate": uniform(loc = 0.01, scale = 0.5),
"subsample": uniform(loc = 0.5, scale = 0.5),
"reg_lambda": uniform(loc = 0.01, scale = 3)
}
rng = np.random.RandomState(42)
n_iter = 100
param_list = list(ParameterSampler(param_distribs, n_iter = n_iter, random_state=rng))
# +
# %%time
#Create an evaluation set
eval_set = [(X_train, y_train), (X_val, y_val)]
val_f1_score = []
n_est = []
counter = 1
xgb_cf = xgboost.XGBClassifier(n_estimators = 1000, use_label_encoder=False, objective = "binary:logistic")
for params in param_list:
xgb_cf.set_params(**params)
xgb_cf.fit(X_train, y_train, eval_set=eval_set, eval_metric = "auc", verbose = False, early_stopping_rounds = 30)
val_set_preds = xgb_cf.predict(X_val)
val_f1_score.append(f1_score(y_val, val_set_preds))
n_est.append(int(xgb_cf.get_booster().attributes()["best_ntree_limit"]))
if counter % 10 == 0:
print(f'Done with {counter} of {n_iter}')
counter += 1
# -
#Save the results
xgb_param_search_df = pd.DataFrame(param_list)
xgb_param_search_df["Validation F1-Score"] = val_f1_score
xgb_param_search_df["N Estimators"] = n_est
#Rank by score
xgb_param_search_df.sort_values(by="Validation F1-Score", ascending = False).head()
# ### Deep Learning
def build_model(n_hidden=1, n_neurons=5, lr_rate=3e-2, activation = "relu",
drop_rate=.2, kernel = True, kernel_initializer = "he_normal", optimizer="Adam",
momentum = .9, nesterov = False):
"""
This function creates a deep learning model with default parameters. This function is used to tune the hyperparameters.
returns: functional style keras model
"""
input_ = keras.layers.Input(X_train.shape[1:])
for layer in range(n_hidden):
if layer == 0:
hidden = keras.layers.Dense(n_neurons, activation = activation, kernel_initializer=kernel_initializer)(input_)
hidden = keras.layers.Dropout(rate = drop_rate)(hidden)
else:
hidden = keras.layers.Dense(n_neurons, activation = activation, kernel_initializer=kernel_initializer)(hidden)
hidden = keras.layers.Dropout(rate = drop_rate)(hidden)
output = keras.layers.Dense(1, activation = "sigmoid")(hidden)
model = keras.Model(inputs = input_, outputs = output)
if optimizer == "SGD":
optimizer = keras.optimizers.SGD(learning_rate=lr_rate, momentum=momentum, nesterov=nesterov)
elif optimizer == "Adam":
optimizer = keras.optimizers.Adam(learning_rate=lr_rate)
else:
optimizer = keras.optimizers.RMSprop(learning_rate=lr_rate, momentum = momentum)
model.compile(loss="binary_crossentropy", optimizer=optimizer, metrics = ["accuracy"])
return model
# +
#Grid of hyperparameters to search through
param_distribs = {
"n_hidden": np.arange(5, 20),
"n_neurons": np.arange(50, 100),
"drop_rate" : [0.20, 0.225, 0.25, 0.275, 0.30],
"lr_rate" : [.005, .0075, 0.09, .01, 0.02, .025, .05, .075, .1 ],
"activation": ["relu", "elu", "selu"],
"kernel_initializer" : ["glorot_normal", "he_normal", "lecun_normal"],
"optimizer" : ["SGD", "Adam","RMSprop"],
"momentum" : [0.95, 0.99, 0.999, 0.9999],
"nesterov" : [True, False]
}
#Number of models to consider
num_models = 100
param_list = list(ParameterSampler(param_distribs, n_iter = num_models))
# +
# %%time
val_acc_score = []
batch = []
count = 1
early = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience = 10)
for params in param_list:
model = build_model(**params)
batch_size = np.random.choice([32, 64])
history = model.fit(X_train, y_train, validation_split=.2, epochs=100, batch_size=batch_size, verbose=0, callbacks = [early])
batch.append(batch_size)
min_val_index = np.argmax(history.history["val_accuracy"])
min_val = history.history["val_accuracy"][min_val_index]
val_acc_score.append(min_val)
if count % 10 == 0:
print(f"Done with {count} of {num_models}")
count += 1
# -
#Save the results in a dataframe
model_param_df = pd.DataFrame(param_list)
model_param_df["Batch Size"] = batch
model_param_df["Validation Accuracy"] = val_acc_score
#Rank the results by the validation accuracy
model_param_df.sort_values(by="Validation Accuracy", ascending = False)
# # Validation Scores
# ### Logistic Regression
# +
#Get the best hyperparameters
max_lr_f1 = lr_rnd_search_df["mean_test_score"].argmax()
best_C = lr_rnd_search_df.loc[max_lr_f1, "param_C"]
lr_clf = LogisticRegression(penalty="l2", C = best_C, random_state = 42, max_iter=10000)
# -
# %%time
lr_clf.fit(X_train, y_train)
#Validation results
y_pred = lr_clf.predict(X_val)
lr_val_f1 = np.round(f1_score(y_pred, y_val), 5) * 100
lr_val_acc = np.round(accuracy_score(y_pred, y_val), 5) * 100
print(f'Logistic Regression Validation Accuracy Score: {lr_val_acc:.2f}%')
print(f'Logistic Regression Validation F1-Score: {lr_val_f1:.2f}%')
confusion_matrix(y_pred, y_val)
# ### Naive Bayes
nb_clf = GaussianNB()
# %%time
nb_clf.fit(X_train, y_train)
#Validation Results
y_pred = nb_clf.predict(X_val)
nb_val_f1 = np.round(f1_score(y_pred, y_val), 5) * 100
nb_val_acc = np.round(accuracy_score(y_pred, y_val), 5) * 100
print(f'Naive Bayes Validation Accuracy Score: {nb_val_acc:.2f}%')
print(f'Naive Bayes Validation F1-Score: {nb_val_f1:.2f}%')
confusion_matrix(y_pred, y_val)
# ### Random Forest
#Get the best hyperparameters
max_rf_f1 = rf_rnd_search_df["mean_test_score"].argmax()
best_n_est = rf_rnd_search_df.loc[max_rf_f1, "param_n_estimators"]
rf_clf = RandomForestClassifier(n_estimators=best_n_est, random_state=42)
# %%time
rf_clf.fit(X_train, y_train)
#Validation Results
y_pred = rf_clf.predict(X_val)
rf_val_f1 = np.round(f1_score(y_pred, y_val), 5) * 100
rf_val_acc = np.round(accuracy_score(y_pred, y_val), 5) * 100
print(f'Random Forest Validation Accuracy Score: {rf_val_acc:.2f}%')
print(f'Random Forest Validation F1-Score: {rf_val_f1:.2f}%')
confusion_matrix(y_pred, y_val)
# ### XGBoost
# +
#Get the best hyperparameters
max_xgb_f1 = xgb_param_search_df["Validation F1-Score"].argmax()
best_colsample = xgb_param_search_df.loc[max_xgb_f1, "colsample_bytree"]
best_gamma = xgb_param_search_df.loc[max_xgb_f1, "gamma"]
best_lr = xgb_param_search_df.loc[max_xgb_f1, "learning_rate"]
best_max_depth = xgb_param_search_df.loc[max_xgb_f1, "max_depth"]
best_min_child = xgb_param_search_df.loc[max_xgb_f1, "min_child_weight"]
best_reg_lambda = xgb_param_search_df.loc[max_xgb_f1, "reg_lambda"]
best_subsample = xgb_param_search_df.loc[max_xgb_f1, "subsample"]
best_n_est = xgb_param_search_df.loc[max_xgb_f1, "N Estimators"]
xgb_clf = xgboost.XGBClassifier(n_estimators=best_n_est, colsample_bytree = best_colsample, gamma = best_gamma,
learning_rate=best_lr, max_depth = best_max_depth, min_child_weight=best_min_child,
reg_lambda=best_reg_lambda, subsample=best_subsample,
use_label_encoder=False, objective = "binary:logistic")
# -
# %%time
xgb_clf.fit(X_train, y_train)
#Validation Results
y_pred = xgb_clf.predict(X_val)
xgb_val_f1 = np.round(f1_score(y_pred, y_val), 5) * 100
xgb_val_acc = np.round(accuracy_score(y_pred, y_val), 5) * 100
print(f'XGBoost Validation Accuracy Score: {xgb_val_acc:.2f}%')
print(f'XGBoost Validation F1-Score: {xgb_val_f1:.2f}%')
confusion_matrix(y_pred, y_val)
# ### Deep Learning
# +
#Get the best hyperparameters
max_f1 = model_param_df["Validation Accuracy"].argmax()
best_n_neurons = model_param_df.loc[max_f1, "n_neurons"]
best_n_hidden = model_param_df.loc[max_f1, "n_hidden"]
best_lr_rate = model_param_df.loc[max_f1, "lr_rate"]
best_momentum = model_param_df.loc[max_f1, "momentum"]
best_init = model_param_df.loc[max_f1, "kernel_initializer"]
best_drop_rate = model_param_df.loc[max_f1, "drop_rate"]
best_act_func = model_param_df.loc[max_f1, "activation"]
best_opt = model_param_df.loc[max_f1, "optimizer"]
use_nesterov = model_param_df.loc[max_f1, "nesterov"]
best_batch_size = model_param_df.loc[max_f1, "Batch Size"]
#Initialize the model
model = build_model(n_hidden=best_n_hidden, n_neurons=best_n_neurons, lr_rate=best_lr_rate,
activation = best_act_func, drop_rate=best_drop_rate, kernel = True, kernel_initializer = best_init,
optimizer=best_opt, momentum = best_momentum, nesterov = use_nesterov)
# +
# %%time
early = keras.callbacks.EarlyStopping(monitor = 'val_loss', mode = 'min', verbose = 1, patience = 50, restore_best_weights=True)
history = model.fit(X_train, y_train, validation_split = .1, epochs = 1000, batch_size = best_batch_size, verbose = 0, callbacks=[early])
# -
plt.figure(figsize = (12,8))
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='val')
plt.grid(True)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Tuned Model Train and Validation Loss During Training with Early Stoppping")
plt.legend()
plt.show()
# Validation Results
y_pred = model.predict(X_val)
#Change the prediction to form expected for accuracy and f1-score functions
y_pred = np.round(y_pred.reshape((y_pred.shape[0])))
#Change the validation targets
y_val = y_val.astype(np.int)
model_val_acc = np.round(accuracy_score(y_pred, y_val), 4) * 100
model_val_f1 = np.round(f1_score(y_pred, y_val), 4) * 100
print(f"Deep Learning Validation Accuracy Score: {model_val_acc:.2f}%")
print(f"Deep Learning Validation F1-Score: {model_val_f1:.2f}%")
confusion_matrix(y_pred, y_val)
# # Results
# ### F1-Score
# +
Models = ["Logistic Regression", "Naive Bayes", "Random Forest", "XGBoost", "Deep Learning"]
F1_Val_Scores = [lr_val_f1, nb_val_f1, rf_val_f1, xgb_val_f1, model_val_f1]
Accuracy_Val_Scores = [lr_val_acc, nb_val_acc, rf_val_acc, xgb_val_acc, model_val_acc]
#Create a data frame with the results
val_results = pd.DataFrame({"Model": Models, "F1": F1_Val_Scores, "Accuracy" : Accuracy_Val_Scores})
# -
val_results.sort_values(by="F1", ascending = False, inplace = True)
val_results
#Bar chart of the results
sns.set(rc = {'figure.figsize':(15,10)})
sns.barplot(x="Model", y = "F1", order = val_results["Model"] \
, data = val_results, color = "blue")
plt.title("Validation F1-Scores")
plt.ylabel("F1-Score")
plt.xlabel("Models")
# ### Accuracy
val_results.sort_values(by="Accuracy", ascending = False, inplace = True)
val_results
#Bar chart of the results
sns.set(rc = {'figure.figsize':(15,10)})
sns.barplot(x="Model", y = "Accuracy", order = val_results["Model"] \
, data = val_results, color = "blue")
plt.title("Validation F1-Scores")
plt.ylabel("Accuracy")
plt.xlabel("Models")
# # Save the Best Model
# ### Create the dense embedding with all the training tweets
# +
tokenized_corpus = [tokenizer.tokenize(text) for text in tweets["Clean Tweets"]]
#Number of Features
ft_num_features = 300
ft_model = FastText(tokenized_corpus, vector_size = ft_num_features, window = 250, min_count = 0,
sample=1e-3, sg=1, epochs=100, workers=10)
X = pe.document_vectorizer(corpus=tokenized_corpus, model=ft_model, num_features=ft_num_features)
y = tweets["target"]
# -
#Check the shape
print(X.shape)
print(y.shape)
#Reinitialize XGBoost model
xgb_clf = xgboost.XGBClassifier(n_estimators=best_n_est, colsample_bytree = best_colsample, gamma = best_gamma,
learning_rate=best_lr, max_depth = best_max_depth, min_child_weight=best_min_child,
reg_lambda=best_reg_lambda, subsample=best_subsample,
use_label_encoder=False, objective = "binary:logistic")
# %%time
#Fit on all training instances
xgb_clf.fit(X, y)
#Save the model
pickle.dump(xgb_clf, open("best_ft_model.sav",'wb'))
|
FastText Models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''nlpkor2'': venv)'
# language: python
# name: python38364bitnlpkor2venvd6ba932570ef4af6b6af5bc7a778a398
# ---
# ### Get Frameworks and Custom Modules
# + tags=[]
import pandas as pd
import numpy as np
print(pd.__version__)
print(np.__version__)
# -
from modules.replace_dep import replace_with_dict, remove_college
replace_with_dict("언홍영")
remove_college("사회과학대학")
# ### Making Departments dataset
df_abstact_wrangle = pd.read_csv("data_wrangled/df_abstract_wrangle.csv", encoding="utf-8")
df_abstact_wrangle.sample(5)
# + tags=[]
departments = list(df_abstact_wrangle["학과"].unique())
print(len(departments), departments[:10])
# -
df_abstact_wrangle["학과"] = df_abstact_wrangle["학과"].fillna(" ")
df_abstact_wrangle["학과"].isnull().values.any()
# + tags=[]
# 이중전공 두 개로 쪼개기
import re
import numpy as np
from itertools import chain
# return list from series of comma-separated strings
def split_double_majors(str):
return re.split(',|/', str)
def chainer(series):
# print(series)
return list(chain.from_iterable(series.apply(split_double_majors)))
# calculate lengths of splits
# lens = df_abstact_wrangle['학과'].str.split('/').map(len)
lens = df_abstact_wrangle['학과'].apply(split_double_majors).map(len)
lens
# + tags=[]
# create new dataframe, repeating or chaining as appropriate
res = pd.DataFrame({'대학코드': np.repeat(df_abstact_wrangle['대학코드'], lens),
'제목': np.repeat(df_abstact_wrangle['제목'], lens),
'학과': chainer(df_abstact_wrangle['학과']),
'과정': np.repeat(df_abstact_wrangle['과정'], lens),
'년도': np.repeat(df_abstact_wrangle['년도'], lens),
'BERT_SCORE': np.repeat(df_abstact_wrangle['BERT_SCORE'], lens),
'RNN_SCORE': np.repeat(df_abstact_wrangle['RNN_SCORE'], lens),
'REVIEW_LINK': np.repeat(df_abstact_wrangle['REVIEW_LINK'], lens),})
res
# -
df_department = res[["대학코드", "학과", "REVIEW_LINK"]]
df_department.head()
# ### Preprocessing Department names
# apply preprocessing module
df_department["학과"] = df_department["학과"].apply(lambda x: remove_college(x))
df_department["학과"] = df_department["학과"].apply(lambda x: replace_with_dict(x))
df_department.head()
from collections import Counter
print(Counter(df_department["학과"]))
# print(Counter(temp))
# +
from math import nan
departments = df_department["학과"].unique()
# departments = temp.unique()
korean_deparments = []
english_departments = []
for department in departments:
if type(department) == float:
pass
elif not re.search(r'[a-zA-Z]', department):
korean_deparments.append(department)
else:
english_departments.append(department)
# + tags=[]
# print(len(korean_deparments), korean_deparments)
print(len(korean_deparments),sorted(korean_deparments))
# + tags=[]
print(len(english_departments), english_departments)
# -
# ### Yielding metrics for unsupervised clustering(단과대 vs 종합대)
univ_code = "JP000023"
one_univ_departments = df_department.loc[df_department["대학코드"]==univ_code]["학과"]
one_univ_departments
# +
# 종합대에서 metrics 뽑아낸 예시
from collections import Counter, OrderedDict
dict_departments = Counter(one_univ_departments)
ordered_dict_departments= OrderedDict(dict_departments.most_common())
departments = list(ordered_dict_departments.keys())
number_of_students = list(ordered_dict_departments.values())
print(departments, number_of_students)
# -
# variance of students' departments
np.var(number_of_students)
# +
# 단과대에서 metrics 뽑아낸 예시
from collections import Counter, OrderedDict
univ_code = "DE000003"
one_univ_departments = df_department.loc[df_department["대학코드"]==univ_code]["학과"]
one_univ_departments
dict_departments = Counter(one_univ_departments)
ordered_dict_departments= OrderedDict(dict_departments.most_common())
departments = list(ordered_dict_departments.keys())
number_of_students = list(ordered_dict_departments.values())
print(departments, number_of_students)
# -
# variance of students' departments
np.var(number_of_students)
# ### Making Dataset for clustering
def var_and_sum(df_department, univ_code):
one_univ_departments = df_department.loc[df_department["대학코드"]==univ_code]["학과"]
dict_departments = Counter(one_univ_departments)
ordered_dict_departments= OrderedDict(dict_departments.most_common())
departments = list(ordered_dict_departments.keys())
number_of_students = list(ordered_dict_departments.values())
print(np.var(number_of_students), sum(number_of_students), len(departments))
return np.var(number_of_students), sum(number_of_students), len(departments)
var_and_sum(df_department, univ_code)
univ_list = list(df_department["대학코드"].drop_duplicates().values)[1:]
univ_list[:5]
df_univ = pd.read_csv("data_wrangled/df_univ_wrangled.csv")
df_univ.head(2)
# get univ_name
given_code = "NL000010"
df_univ.loc[df_univ["대학코드"]==given_code]["대학명"].iloc[0]
# + tags=[]
df_data = pd.DataFrame(columns=["대학코드", "대학명","NUM_VISITS", "NUM_DEP", "VAR"])
for univ_code_i in univ_list:
univ_name = df_univ.loc[df_univ["대학코드"]==univ_code_i]["대학명"].iloc[0]
var_data, sum_data, dep_data = var_and_sum(df_department, univ_code_i)
df_data = df_data.append({"대학코드":univ_code_i, "대학명": univ_name,
"NUM_VISITS":sum_data, "NUM_DEP":dep_data, "VAR":var_data}, ignore_index=True)
df_data
# -
df_data.to_csv("./data_wrangled/department_data.csv", index=False)
|
preprocess_departments.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import random,math
def sqrt_number():
m = int(input('请输入一个整数m:'))
k = int(input('请输入一个大于m的整数k: '))
i=0
total=0
n = int(input('请输入一个你想输入的整数: '))
while i < n:
number = random.randint(m,k)
total+=number
i+=1
return (math.sqrt(total/n))
#主程序
print(sqrt_number())
# +
import random,math
def log_number():
m = int(input('请输入一个整数m:'))
k = int(input('请输入一个大于m的整数k: '))
i=0
total_1=0
total_2=0
n = int(input('请输入一个你想输入的整数: '))
while i < n:
x = math.log(random.randint(m,k))
y=1/x
total_1+=x
total_2+=y
i+=1
return total_1,total_2
#主程序
print('西格玛log(随机整数)及西格玛1/log(随机整数分别为: ',log_number())
# +
import random
def start_game():
print(
'''
===游戏开始!===
\\(≧▽≦)//
'''
)
def win():
print(
'''
===你赢了!计算机没猜粗来233===
~~~O(∩_∩)O~~
'''
)
def lose():
print(
'''
====你输了===
。。o(>﹏<)o。。
'''
)
def game_over():
print(
'''
===游戏结束===
'''
)
def show_instruction():
print(
'''
请先输入一个整数A,再输入一个更大的整数B,让计算机在1
到B之间猜,如果在你规定的A次之内计算机将数猜出来,你就
输了,如果计算机没猜出来,你就赢了。
'''
)
def show_team():
print(
'''
===制作团队:二狗工作室===
'''
)
def menu():
print('''=====游戏菜单=====
1. 游戏说明
2. 开始游戏
3. 退出游戏
4. 制作团队
=====游戏菜单=====''')
def guess_game():
guess = int(input('请指定一个让计算机猜的整数A:'))
n = int(input('请输入一个大于0的整数B,作为神秘整数的上界: '))
max_tiames = int(input('计算机最多能猜多少次'))
guess_tiames=0
while guess_tiames <= max_tiames:
number = random.randint(1,n)
guess_times+=1
if guess==number:
lose()
print('计算机猜了',guess_times,'次')
print('比标准次数少',max_times-guess_times,'次')
break
else:
win()
#主函数
def main():
while 1:
menu()
choice = int(input('请输入你的选择: '))
if choice == 1:
show_instruction()
elif choice == 2:
start_game()
guess_game()
elif choice== 3:
game_over()
break
else:
show_team()
#主程序
if __name__ == '__main__':
main()
|
chapter2/homework/computer/4-5/201611680805.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import pandas as pd
df = pd.read_table("/home/mathiane/SemanticSegmentation/models/research/deeplab/datasets/TumorDetection/dataset/ImageSets/train.txt", header = None)
list_picts = df.iloc[:,0]
list_picts
# !python train_dev.py --dataroot /home/mathiane/SemanticSegmentation/models/research/deeplab/datasets/TumorDetection/dataset --dataset_mode segmentation --preprocess any --no_flip --name TypicalAtypical --model segmentation_model.py
|
.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
project_path = "C:/workspace/Bus Project"
os.chdir(project_path)
# +
from multiprocessing import Pool
import multiprocessing
import pandas as pd
import numpy as np
import datetime
from tqdm import tqdm
import folium
from functools import partial
import bus.analyzer as anz
import bus.stay as stay
# +
# 데이터 기간 설정
start_date = datetime.datetime(2019, 6, 1)
end_date = datetime.datetime(2019, 6, 28)
# 로딩할 파일 명 리스트 생성
input_path_list = anz.make_input_path(start_date, end_date)
station_usage_df = anz.parallel_load_total_usage_df(input_path_list, core=8)
# 데이터 로드
user_df = anz.load_user_df()
station_df = anz.load_station_df()
cluster_df = anz.load_cluster_df()
cluster_station_df = anz.load_cluster_station_df()
# 정류장 간 이동 데이터 -> 군집간 이동 데이터
cluster_usage_df = stay.create_cluster_usage_df(station_usage_df, cluster_station_df)
# -
# 체류시간 추출
stay_df = stay.get_walk_df(cluster_usage_df)
# 결측값 제거
stay_df = stay_df.dropna()
# 관광객 데이터 추출
tourist_stay_df = stay.fillter_usage_df(stay_df, user_df, tourist=True)
tourist_stay_df['stay_time'] = tourist_stay_df["geton_datetime"] - tourist_stay_df["getoff_datetime"]
tourist_stay_df = tourist_stay_df[tourist_stay_df['stay_time'].apply(lambda x : x.seconds/60 <= 1000)]
tourist_stay_df = tourist_stay_df[tourist_stay_df['stay_time'].apply(lambda x : x.seconds/60 >= 30)]
tourist_stay_df.sort_values(by="stay_time")
temp_stay_df = tourist_stay_df[["getoff_cluster_id", "geton_cluster_id", "stay_time"]].groupby(by=["getoff_cluster_id", "geton_cluster_id"]).sum()
temp_count_df =tourist_stay_df[["getoff_cluster_id", "geton_cluster_id", "stay_time"]].groupby(by=["getoff_cluster_id", "geton_cluster_id"]).count().rename(columns={"stay_time":"count"})
df = pd.merge(temp_stay_df, temp_count_df, on=['getoff_cluster_id', 'geton_cluster_id'])
df['stay_time'] = df['stay_time'].apply(lambda x : x.seconds/60)
df['stay_time'] = df['stay_time']/df['count']
df = df.reset_index()
df
# +
class ClusterManager:
def __init__(self, cluster_df):
self.cluster_df = cluster_df
def set_cluster_df(self, cluster_df):
self.cluster_df = cluster_df
def extract_cluster_by_id(self, cluster_id):
return cluster_df[cluster_df["cluster_id"] == cluster_id]
def get_location_from_cluster(self, cluster):
return cluster[["cluster_longitude", "cluster_latitude"]].values[0]
def set_dist_clolums_from_two_clustes(self, id1, id2, longitude = "cluster_longitude", latitude = "cluster_latitude"):
location1 = self.get_location_from_cluster(self.extract_cluster_by_id(id1))
location2 = self.get_location_from_cluster(self.extract_cluster_by_id(id2))
x1, y1 = location1
x2, y2 = location2
cluster_df = self.cluster_df
selector = list(cluster_df.columns)
cluster_df['dist1_x'] = (cluster_df["cluster_longitude"] - x1)**2
cluster_df['dist1_y'] = (cluster_df[latitude] - y1)**2
cluster_df['dist2_x'] = (cluster_df["cluster_longitude"] - x2)**2
cluster_df['dist2_y'] = (cluster_df[latitude] - y2)**2
cluster_df['dist1'] = (cluster_df['dist1_x'] + cluster_df['dist1_y'])**(1/2)
cluster_df['dist2'] = (cluster_df['dist2_x'] + cluster_df['dist2_y'])**(1/2)
cluster_df['dist'] = cluster_df['dist1'] + cluster_df['dist2']
cluster_df['dist'] = cluster_df['dist']*6500000/360
cluster_df['dist'] = cluster_df['dist'].apply(lambda x : int(x))
if "dist" not in selector:
selector.append("dist")
cluster_df = cluster_df[selector]
cluster_df = cluster_df.sort_values(by="dist")
def get_dist(self, id1, id2):
location1 = self.get_location_from_cluster(self.extract_cluster_by_id(id1))
location2 = self.get_location_from_cluster(self.extract_cluster_by_id(id2))
x1, y1 = location1
x2, y2 = location2
return ((x1-x2)**2+(y1-y2)**2)**(1/2)*6500000/360
def get_column_filter(self):
return self.cluster_df.columns
def filter_column(self, column_filter):
self.cluster_df = self.cluster_df[column_filter]
def get_stay_area_flag_list(self, id1, id2):
column_filter = self.get_column_filter()
dist = self.get_dist(id1, id2)
self.set_dist_clolums_from_two_clustes(id1, id2)
stay_area_flag_list = self.cluster_df['dist'] <= dist*1.01
self.filter_column(column_filter)
return stay_area_flag_list
def get_stay_area_df(self, id1, id2):
return self.cluster_df[self.get_stay_area_flag_list(id1, id2)]
def get_cluster_map(self):
return get_cluster_map(self.cluster_df)
def get_cluster_map(self, df):
center = [df["cluster_latitude"].mean(), df["cluster_longitude"].mean()]
map = folium.Map(location=center, zoom_start=10)
for i in df.index:
folium.CircleMarker([df.loc[i, "cluster_latitude"], df.loc[i, "cluster_longitude"]], color = 'blue', weight = 5, radius=1).add_to(map)
return map
def get_stay_area_map(self, id1, id2):
stay_area_df = self.get_stay_area_df(id1, id2)
map = self.get_cluster_map(stay_area_df)
location1 = self.get_location_from_cluster(self.extract_cluster_by_id(id1))
location2 = self.get_location_from_cluster(self.extract_cluster_by_id(id2))
folium.CircleMarker(location1, color = 'red', weight = 10, radius=3).add_to(map)
folium.CircleMarker(location1, color = 'red', weight = 10, radius=3).add_to(map)
return map
def get_stay_cluster_infor(self, stay_df):
stay_time_table = self.cluster_df[['cluster_id']]
stay_time_table['count'] = 0
stay_time_table['stay_time'] = 0
for i in tqdm(range(len(stay_df))):
start_id = stay_df["getoff_cluster_id"][i]
end_id = stay_df["geton_cluster_id"][i]
stay_time = stay_df['stay_time'][i]
self.acc_stay_time(stay_time_table, start_id, end_id, stay_time)
stay_time_table["stay_time"] = stay_time_table[["stay_time", "count"]].apply(lambda x : (x[0]/x[1]) if (x[1] != 0) else x[0], axis = 1)
return stay_time_table[['cluster_id', 'stay_time', 'count']]
def acc_stay_time(self, stay_time_table, start_id, end_id, stay_time):
stay_area_flag_list = self.get_stay_area_flag_list(start_id, end_id)
stay_time_table['flag'] = stay_area_flag_list
cluster_num = stay_area_flag_list.value_counts()[True]
stay_time = stay_time/cluster_num
stay_time_table['stay_time'] = stay_time_table[['flag', 'stay_time']].apply(lambda x : x[1]+stay_time if x[0] else x[1], axis = 1)
stay_time_table['count'] = stay_time_table[['flag', 'count']].apply(lambda x : x[1]+1 if x[0] else x[1], axis = 1)
cm = ClusterManager(cluster_df)
# cm.get_stay_area_df(1, 10)
cm.get_stay_area_map(757, 41)
# c = cm.get_stay_area_flag_list(1, 1)
# cm.get_stay_area_flag_table()
d = cm.get_stay_cluster_infor(df)
# -
# d.to_csv("data/analysis/citizen_cluster_stay_df.csv")
# d = pd.read_csv("data/analysis/cluster_stay_df.csv", encoding = "cp949")
# d = pd.read_csv("data/analysis/citizen_cluster_stay_df.csv")
d = pd.read_csv("data/analysis/tourist_cluster_stay_df.csv")
d1 = d
d1.sort_values(by='count')
|
main/2. 체류시간 분석/3. 체류시간 추출.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from chromwindow import window
# +
data = pd.DataFrame({'chrom': ['chr1']+['chr2']*10,
'start': list(range(10)) + [40],
'end': list(map(sum, zip(range(10), [5, 1]*5+[20]))) + [45],
'species': ['human']*5+['chimp']*6, 'run': range(0, 110, 10), 'analysis' : np.linspace(3, 7, 11)})
data
# -
# Make a function `interval_count` that is called on the intervals in windows of size 5. Note that the `window` decorator only handles a single chromosome so you always need to group your data by chromosome:
# +
@window(size=5)
def interval_count(df):
return len(df.index)
df = data.groupby('chrom').apply(interval_count)
df
# -
# You can get rid of the extra index like this:
df.reset_index(drop=True, level=-1)
# You can further convert the index to colums like this:
df.reset_index(drop=True, level=-1).reset_index()
# You can group by more than just the chromosome if you like:
data.groupby(['chrom', 'species']).apply(interval_count).reset_index(drop=True, level=-1).reset_index()
# You can use hte `even` keyword to put approximately the same amount of interval in each window (to the extent that this is possible):
# +
@window(size=10)
def interval_sum(df):
return (df.end-df.start).sum()
data.groupby('chrom').apply(interval_sum).reset_index(drop=True, level=-1).reset_index()
# -
# You can return any number of values from your function. Just do so as a Series or a dictionary:
# +
@window(size=10)
def multiple_stats(df):
# return a Series
return df[['analysis','run']].sum()
data.groupby(['chrom']).apply(multiple_stats).reset_index(drop=True, level=-1).reset_index()
# +
@window(size=10)
def multiple_stats(df):
# return dictionary
return dict(tot_length=(df.end-df.start).sum(), interval_count=len(df), mean_length=(df.end-df.start).mean())
data.groupby(['chrom']).apply(multiple_stats).reset_index(drop=True, level=-1).reset_index()
# +
@window(size=100000000, empty=True, fill='hg19')
def count1(df):
return len(df.index)
data.groupby('chrom').apply(count1).reset_index(drop=True, level=-1).reset_index()
# -
# Use the `logbase` argument to make windows increase logarithmically with the specified base, starting from size. Usefull if the density of intervals decrease with distance (E.g. reletive to some annotation.)
# +
@window(size=2, logbase=2)
def count2(df):
return len(df.index)
data.groupby('chrom').apply(count2).reset_index(drop=True, level=-1).reset_index()
# -
# If you get fed up with adding `.reset_index(drop=True, level=-1).reset_index()` you can make your own reset_index to pipe it trough:
def reset_group_index(df):
return df.reset_index(drop=True, level=-1).reset_index()
# +
@window(size=10)
def count(df):
return len(df.index)
data.groupby(['chrom']).apply(count).pipe(reset_group_index)
|
example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 학습목표
#
# 다항 선형 회귀(Multivariable Linear regression)에 대해 알아본다.
# 핵심키워드
#
# * 다항 선형 회귀(Multivariable Linear regression)
# * 가설 함수(Hypothesis Function)
# * 평균 제곱 오차(Mean Squared Error)
# * 경사하강법(Gradient descent)
# 다항 선형 회귀란? 여러 요소를 종합적으로 고려하여 한 가지 항목 값을 예측하는 것
import torch
# ## DATA
x_train = torch.FloatTensor([[73, 80, 75],
[93, 88, 98],
[89, 91, 90],
[96, 98, 100],
[73, 66, 70]])
y_train = torch.FloatTensor([[152], [185], [180], [196], [142]])
# ### Multivariate Hypothesis Function: Naive
# + jupyter={"outputs_hidden": true} tags=[]
# variate들의 개수가 3개일 경우 H(x) 계산
hypothesis = x1_train * w1 + x2_train * w2 + x3_train * w3 + b
# -
# 이런식으로 계산한다면 고려해야 할 변수가 1000개일 경우 일일히 코드로 나열해 주어야 하기에 막막
# ### Multivatiate Hypothesis Function: using Matrix!
# Matmul로 한번에 계산!
# * 더 간결하고,
# * x벡터의 길이가 바뀌어도 코드를 바꿀 필요가 없고,
# * 속도도 더 빠르다!
# 
# + jupyter={"outputs_hidden": true} tags=[]
# H(x) 계산
hypothesis = x_train.matmul(W) + b # or .mm or @
# -
# ---
# cost func은 simple linear regression과 같다.
cost = torch.mean((hypothesis - y_train)**2)
# + jupyter={"outputs_hidden": true} tags=[]
# optimizer 설정 역시 마찬가지로 linear regression과 같다.
from torch import optim
# optimizer 설정
optimizer = optim.SGD([W, b], lr = 1e-5)
# 파라미터 갱신(학습)
optimizer.zero_grad()
cost.backward()
optimizer.step()
# -
# ---
# ## Full Code w/ torch.optim
# loss 미분하여 gradient 직접 계산할 필요 x
# 역전파(.backward())와 파라미터 갱신(.step()) 편리
# +
import torch
from torch import optim
# 데이터
x_train = torch.FloatTensor([[73, 80, 75],
[93, 88, 98],
[89, 91, 90],
[96, 98, 100],
[73, 66, 70]])
y_train = torch.FloatTensor([[152], [185], [180], [196], [142]])
# 모델 초기화
W = torch.zeros((3,1), requires_grad = True) # x_train과 행렬곱하기에 맞는 형태
b = torch.zeros(1, requires_grad = True)
# optimizer 설정
optimizer = optim.SGD([W,b], lr = 1e-5)
#---반복문 시작---
nb_epochs = 20
for epoch in range(nb_epochs):
# 모델로 예측 및 loss값 구하기
hypothesis = x_train.mm(W) + b # .matmul or @
loss = torch.mean((hypothesis - y_train) ** 2)
# loss와 optimizer 규칙을 이용해 파라미터 갱신
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch {:1d}/{} hypothesis: {} Loss: {:.6f}'.format(epoch, nb_epochs, hypothesis.squeeze().detach(), loss.item()
))
# -
# * 점점 줄어드는 loss
# * 점점 y_train과 가까워지는 hypothesis
# * Learing Rate에 따라 발산할지도
# ---
# W, bf를 일일이 써주는 것은 모델이 커질수록 귀찮아진다.(아래 부분)
# + jupyter={"outputs_hidden": true} tags=[]
'''
# 모델 초기화
W = torch.zeros((3,1), requires_grad = True) # x_train과 행렬곱하기에 맞는 형태
b = torch.zeros(1, requires_grad = True)
# 모델로 예측 및 loss값 구하기
hypothesis = x_train.mm(W) + b # .matmul or @
'''
# -
# 이 때 torch의 nn.Module 활용!
# +
import torch.nn as nn
class MultivariateLinearRegressionModel(nn.Module):
# 대충 여기서 x_train과 행렬곱할 W의 shape 정해주는 듯
def __init__(self):
super().__init__()
self.linear = nn.Linear(3,1)
# 여기서 모델이 어떻게 연산을 수행할 것인지 정해줌
def forward(self, x):
return self.linear(x)
model = MultivariateLinearRegressionModel()
hypothesis = model(x_train)
# -
# * nn.Module을 상속해서 모델 생성
# * nn.Linear(3,1)
# * 입력차원: 3
# * 출력차원: 1
# * hypothesis 계산은 forward() 에서!
# * gradient계산은 pytorch가 알아서 해준다. backward()
# Pytorch에서는 다양한 Cost Functions를 제공하기도 한다.
# 기존의 Cost 계산하던 방식
loss = torch.mean((hypothesis - y_train)**2)
# +
# torch에서 제공하는 Cost Function 활용!
from torch.nn.functional as F
cost = F.mse_loss(hypothesis, y_train) #hypothesis는 predict한 결과
# -
# * torch.nn.functional에서 제공하는 loss function사용
# * 장점: 쉽게 다른 loss로 교체 가능!(l1_loss, smooth_l1_loss, 등...)
# ## Full code w/ torch.optim
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# 데이터
x_train = torch.FloatTensor([[73, 80, 75],
[93, 88, 98],
[89, 91, 90],
[96, 98, 100],
[73, 66, 70]])
y_train = torch.FloatTensor([[152], [185], [180], [196], [142]])
# 모델 불러오기
class MultivariateLinearRegressionModel(nn.Module):
# 대충 여기서 x_train과 행렬곱할 W의 shape 정해주는 듯
def __init__(self):
super().__init__()
self.linear = nn.Linear(3,1)
# 여기서 모델이 어떻게 연산을 수행할 것인지 정해줌
def forward(self, x):
return self.linear(x)
# 모델 초기화(not 파라미터 초기화 anymore)
# W = torch.zero((3,1), requires_grad = True)
# b = torch.zero(1, requires_grad = True)
model = MultivariateLinearRegressionModel()
# optimizer정의
optimizer = optim.SGD(model.parameters(), lr = 1e-5)
# 예측, loss계산, gradient이용한 역전파 및 파라미터 갱신을 입력값의 수만큼 for loop으로 돌리기
nb_epochs = 20
for epoch in range(nb_epochs):
# H(x) 계산
# (x)hypothesis = x_train.mm(W) + b # .matmul or @
hypothesis = model(x_train)
# loss 계산
# (x) loss = torch.mean((hypothesis - y_train)**2)
loss = F.mse_loss(hypothesis, y_train)
# loss로 모델 파라미터 개선
optimizer.zero_grad() #초기화
loss.backward() #역전파(gradient 계산)
optimizer.step() # 역전파 결과 토대로 파라미터 개선
print('Epoch: {:1d}/{} hypothesis: {} Loss: {:.6f}'.format(epoch, nb_epochs, hypothesis.squeeze().detach(), loss.item()))
# -
# 다음 주제: Pytorch에서는 많은 양의 데이터를 어떻게 다룰까요?
|
Lab-04-1 Multivariable Linear regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 演習2 - ショアのアルゴリズム
#
# # 歴史的背景
#
# 多くの計算問題は、入力問題の大きさに応じてどのように計算量が増大するかで、アルゴリズムの性能を測ります。例えば、足し算のアルゴリズムは、足す数字の大きさに応じて線形に計算量が増大しますが、中には入力の大きさに応じて計算量が指数関数的に成長するものもあります。こうした指数関数的に計算量が増大する計算問題は、地球上のどのコンピューターを使っても解けないほど大きくなることがあります。インターネットのセキュリティは、現実的な時間内では解決できない計算問題が存在することによって担保されています。
#
# 1994年、ピーター・ショアは、量子コンピューター上で整数を効率的に素因数分解できることを示しました。[1] これは大きなニュースです。 というのも、この問題を解く最良の古典アルゴリズムは、指数関数的に成長するアルゴリズムのひとつだからです。実際、[RSA暗号](https://en.wikipedia.org/wiki/RSA_(cryptosystem))は、ある一定以上の大きな数を素因数分解することが不可能であることに依存しています。
#
# 現在の古典的なコンピューターには大きすぎる整数を素因数分解するには、何百万もの量子ビットとゲートが必要になりますが、これらの回路は現在の量子コンピューターで正常に動作させるには大きすぎます。
#
# では、<NAME>、<NAME>、<NAME>、<NAME>、<NAME>、<NAME>は、2001年に量子コンピューターによる15の因数分解をどうやって成功させたのでしょうか?[2]
#
# ショアのアルゴリズムの回路を作る上での課題は、制御された$ay \bmod N$を計算する回路を作ることです。多項式のゲート数で回路を作る方法は分かっていますが、現在のコンピューターでは大きすぎます。幸いなことに、問題に関する情報が事前にわかっていれば、ちょっと「ズル」をしてより効率的な回路を作ることができる場合があります。
#
# 上記の論文の著者は、他の多くの科学的進歩に加えて、$7y \bmod 15$を実行する非常にシンプルな回路を発見しました。これにより、彼らのハードウェアで動作させるのに十分な大きさの回路ができました。今回のIBM Quantum Challengeが終わる頃には、ショアのアルゴリズムに使用する$35y \bmod N$の回路を作成し、`ibmq_santiago`上で動作させていることでしょう。
#
# この課題で行われていることをより厳密に理解したい場合は、[Qiskit Textbook のショアのアルゴリズムの章](https://qiskit.org/textbook/ja/ch-algorithms/shor.html)を読むことをオススメしますが、そちらを参照せずに課題を完了することもできます。
#
# ### 参考文献
# 1. Shor, <NAME>. "Algorithms for quantum computation: discrete logarithms and factoring." Proceedings 35th annual symposium on foundations of computer science. Ieee, 1994.
# 1. Vandersypen, <NAME>, et al. "Experimental realization of Shor's quantum factoring algorithm using nuclear magnetic resonance." Nature 414.6866 (2001): 883-887.
# # ショアのアルゴリズムの概要
#
# [量子位相推定法](https://qiskit.org/textbook/ja/ch-algorithms/quantum-phase-estimation.html)というアルゴリズムは、ある量子状態にゲートをかけることで適用される位相を求めてくれます。位相推定アルゴリズムの入力は、状態$|1\rangle$とゲート$Z$であり、Z$ゲート$が状態$|1\rangle$に作用すると、同じ状態に$\pi$のグローバルな位相が追加された状態になります。
#
# $$
# Z|1\rangle = -|1\rangle = e^{i\pi} |1\rangle
# $$
#
# 量子位相推定アルゴリズムがこれを解くための手法を提供します。別の例を[こちら](https://qiskit.org/textbook/ja/ch-algorithms/quantum-phase-estimation.html#example_t_gate)で見ることができます。
#
# ショアは、$U|y\rangle = |a y\bmod N\rangle$という振る舞いをするゲート$U$に対して位相推定を行うと、$N$の因子についての情報がすぐに得られることを示しました。
# # 練習問題
#
# この課題では、$13y \bmod 35$を実装した回路で位相推定を行い、35を因数分解します。これを実現する回路を作り、しかも `ibmq_santiago` で実行できるほど小さくすることが課題となります。これは簡単なことではないので、まず最初にちょっとした「ズル」をしておきます。
#
# ショアのアルゴリズムは、初期量子状態$|1\rangle$に$U$を適用して到達できる状態でのみ動作すればよいので、このような動作をする回路であれば、どんな回路でも良いということです。
#
# $$
# \begin{aligned}
# U|1\rangle &= |13\rangle \\
# UU|1\rangle &= |29\rangle \\
# UUU|1\rangle &= |27\rangle \\
# UUUU|1\rangle &= |1\rangle \\
# \end{aligned}
# $$
#
# では、上記を簡単に行うにはどうすればよいのでしょうか。4つの異なる量子状態を正しく変換すればよいのですから、これを2量子ビットにエンコードすればよいことになります。今回の課題では、2量子ビットの計算基底の状態を、次のように数字にマッピングすることにします:
#
# $$
# \begin{aligned}
# |1\rangle &\rightarrow |00\rangle \\
# |13\rangle &\rightarrow |01\rangle \\
# |29\rangle &\rightarrow |10\rangle \\
# |27\rangle &\rightarrow |11\rangle \\
# \end{aligned}
# $$
#
# なぜこれが「ズル」なのでしょう?なぜなら、この最適化を利用するためには、$U$が影響を与える状態をすべて「知っておく」必要があるからです。つまり、再び1に戻るまで$ay \bmod N$を計算して、$a^x \bmod N$の周期を把握しておくことで、$N$の因子を得ることができます。このように、$r$の値がわかるような情報を使って最適化することは、古典的なコンピューターでは解決できない問題には当然対応できません。
#
# しかし、この課題の目的は、あくまで、ショアのアルゴリズムが意図したとおりに動作することを検証することであり、$U$の回路を得るためにちょっとしたズルを行ったという事実を気にするつもりはありません。
#
# <div id='u-definition'></div>
# <div class="alert alert-block alert-success">
#
# **Exercise 2a:** 下記の変換を行い、別の量子ビットによって制御される回路($U$)を作成してください。この回路は'target'という名前の2量子ビットのターゲットレジスタに作用し、'control'という名前の別の1量子ビットのレジスタによって制御されます。完成した回路を変数'`cu`'に割り当ててください。
#
#
# $$
# \begin{aligned}
# U|00\rangle &= |01\rangle \\
# U|01\rangle &= |10\rangle \\
# U|10\rangle &= |11\rangle \\
# U|11\rangle &= |00\rangle \\
# \end{aligned}
# $$
# +
from qiskit import QuantumCircuit
from qiskit import QuantumRegister, QuantumCircuit
c = QuantumRegister(1, 'control')
t = QuantumRegister(2, 'target')
cu = QuantumCircuit(c, t, name="Controlled 13^x mod 35")
# コードを記入ください - 開始
cu.ccx(c[0],t[0],t[1])
cu.cx(c[0],t[0])
# コードを記入ください - 終了
cu.draw('mpl')
# -
# 以下のセルを実行して、答えを確認しましょう。
# 以下のコードで回答を確認しましょう
from qc_grader import grade_ex2a
grade_ex2a(cu)
# おめでとうございます!難しい部分を見事クリアされました。
#
# 位相推定アルゴリズムの出力は量子ビットを測定して読み取るので、'counting'レジスタに$r$を読み取るのに十分な量子ビットが含まれていることを確認する必要があります。ここでは、$r=4$なので、$\log_2(4) = 2$個の量子ビットがあればよいことになります($r$を事前に知っているのでここでもちょっとズルしています)が、Santiagoには5個の量子ビットがあり、 'target'レジスターには2量子ビットしか使っていないので、残りの3量子ビットをカウントレジスターとして使います。
#
# $U$の位相推定を行うためには、$n$個の計数レジスターの各量子ビット(添字が$x$)に対して、$U^{2^x}$($U$を$2^x$回繰り返す)を実行する回路を作る必要があります。ここでは、以下の3つの回路が必要になります:
#
# $$ U, \; U^2, \; \text{and} \; U^4 $$
#
# そこで次は、$U^2$を実行する回路(つまり、$U$を2回適用することに相当する回路)を作ります。
# <div class="alert alert-block alert-success">
#
# **Exercise 2b:** 下記の変換を行い、別の量子ビットによって制御される回路($U^2$)を作成してください。この回路は'target'という名前の2量子ビットのターゲットレジスタに作用し、'control'という名前の別の1量子ビットのレジスタによって制御されます。完成した回路を変数'`cu2`'に割り当ててください。
#
# $$
# \begin{aligned}
# U|00\rangle &= |10\rangle \\
# U|01\rangle &= |11\rangle \\
# U|10\rangle &= |00\rangle \\
# U|11\rangle &= |01\rangle \\
# \end{aligned}
# $$
# +
c = QuantumRegister(1, 'control')
t = QuantumRegister(2, 'target')
cu2 = QuantumCircuit(c, t)
# コードを記入ください - 開始
cu2.cx(c[0],t[1])
# コードを記入ください - 終了
cu2.draw('mpl')
# -
# 以下のセルを実行して、答えを確認しましょう。
# 以下のコードで回答を確認しましょう
from qc_grader import grade_ex2b
grade_ex2b(cu2)
# 最後に、$U$を4回適用することに相当する回路も必要です(つまり、$U^4$という回路が必要です)。
#
#
# <div class="alert alert-block alert-success">
#
# **Exercise 2c:** 下記の変換を行い、別の量子ビットによって制御される回路($U^4$)を作成してください。この回路は'target'という名前の2量子ビットのターゲットレジスタに作用し、'control'という名前の別の1量子ビットのレジスタによって制御されます。完成した回路を変数'`cu4`'に割り当ててください。ヒント:最適解はシンプルです。
#
# $$
# \begin{aligned}
# U|00\rangle &= |00\rangle \\
# U|01\rangle &= |01\rangle \\
# U|10\rangle &= |10\rangle \\
# U|11\rangle &= |11\rangle \\
# \end{aligned}
# $$
# </div>
# +
c = QuantumRegister(1, 'control')
t = QuantumRegister(2, 'target')
cu4 = QuantumCircuit(c, t)
# コードを記入ください - 開始
# コードを記入ください - 終了
cu4.draw('mpl')
# -
# 以下のセルを実行して、答えを確認しましょう。
# 以下のコードで回答を確認しましょう
from qc_grader import grade_ex2c
grade_ex2c(cu4)
# <div class="alert alert-block alert-success">
#
# **Exercise 2 final:** これで$U$, $U^2$, $U^4$を制御できるようになったので、これらを組み合わせてショアのアルゴリズムの量子部分を実行する回路を作ることができます。
#
# 初期化は簡単です。カウントレジスタを$|{+}{+}{+}\rangle$の状態にし(3つのHゲートで可能です)、ターゲットレジスタを$|1\rangle$の状態にします(計算基底の状態$|00\rangle$にマッピングしたので、ここでは何もする必要はありません)。ここまでの部分はこちらで作っておきます。
#
# あなたの仕事は、初期化と逆量子フーリエ変換の間に使用される、制御された$U$を実行する回路を作ることです。より正確に記述すると、以下の回路を作っていただきたいと思います。
#
# $$
# CU_{c_0 t}CU^2_{c_1 t}CU^4_{c_2 t}
# $$
#
# ここで、$c_0$、$c_1$、$c_2$は'counting'レジスタの3つの量子ビット、$t$は'target'レジスタ、$U$はこの課題の最初の部分で<a href="#u-definition">定義したとおりです</a>。この表記では、$CU_{a b}$は、$CU$が$a$によって制御され、$b$に作用することを意味します。この問題を解決する簡単な方法は、上で作成した回路 `cu`, `cu2`, `cu4` を単純に組み合わせることですが、おそらく同じ動作をするより効率的な回路を見つけることができるでしょう。
#
# </div>
# <div class="alert alert-block alert-danger">
#
# 作成される回路には、[CNOT](https://qiskit.org/documentation/stubs/qiskit.circuit.library.CXGate.html)と1量子ビットゲート[U-gates](https://qiskit.org/documentation/stubs/qiskit.circuit.library.UGate.html)しか使用できません。多量子ビットゲートは、1量子ビットゲートに比べて、ハードウェア上で実行するのが難しいため、あなたのスコアは使用したCNOTの数になります(少なければ少ないほどよい)。この条件の対応に戸惑いそうであれば、回路をこの形式に変換するコードを提出書類の横に記載しています。ただ、手計算の方が恐らくうまくいくでしょう。
#
# </div>
# これまでのソリューションを最終的な提出物にまとめるためのコード
cqr = QuantumRegister(3, 'control')
tqr = QuantumRegister(2, 'target')
cux = QuantumCircuit(cqr, tqr)
solutions = [cu, cu2, cu4]
for i in range(3):
cux = cux.compose(solutions[i], [cqr[i], tqr[0], tqr[1]])
cux.draw('mpl')
# 以下のコードで回答を確認しましょう
from qc_grader import grade_ex2_final
# 回路をCNOTや1量子ビットゲートに変換する必要がある場合は、以下の2行をアンコメントしてください。
from qiskit import transpile
cux = transpile(cux, basis_gates=['cx','u'])
grade_ex2_final(cux)
# 最終的に回路を提出する時は以下のコードでSubmitください。
# 回答を提出してください。チャレンジ期間中であれば何度でも再提出は可能です。
from qc_grader import submit_ex2_final
submit_ex2_final(cux)
# おめでとうございます!あなたはこのチャレンジを終えました。あなたが作成した回路で、35を素因数分解されるのをみてみましょう。
#
# ## 作成した回路で35を素因数分解する
#
# 以下のコードセルは、あなたが提出した課題をもとに、$\tfrac{s}{r}$を得るための回路を作成します。ここでは$s$は$0$ と $r-1$間のランダムな整数であり、$r$は$f(x) = 13^x \bmod 35$の周期になります。
# +
from qiskit.circuit.library import QFT
from qiskit import ClassicalRegister
# 回路オブジェクトの作成
cr = ClassicalRegister(3)
shor_circuit = QuantumCircuit(cqr, tqr, cr)
# 量子ビットの初期化
shor_circuit.h(cqr)
# 回路の追加
shor_circuit = shor_circuit.compose(cux)
# inverse QFT で出力を抽出
shor_circuit.append(QFT(3, inverse=True), cqr)
shor_circuit.measure(cqr, cr)
shor_circuit.draw('mpl')
# -
# この回路をトランスパイルして、回路がどのくらいの大きさで、何個のCNOTを使っているのか見てみましょう。
from qiskit import Aer, transpile, assemble
from qiskit.visualization import plot_histogram
qasm_sim = Aer.get_backend('aer_simulator')
tqc = transpile(shor_circuit, basis_gates=['u', 'cx'], optimization_level=3)
print(f"circuit depth: {tqc.depth()}")
print(f"Circuit contains {tqc.count_ops()['cx']} CNOTs")
# 結果をみてみましょう。
counts = qasm_sim.run(tqc).result().get_counts()
plot_histogram(counts)
# すべてが正しく動作した場合は、 $0$, $2$, $4$, $8$ の数字を等確率で測定することができるはずです。 これは、位相推定によって、 $2^n \cdot \tfrac{s}{r}$ が得られるからです。 ここで、 $n$はカウントレジスタの量子ビット数(ここでは、$n = 3$, $s$ は$0$ から$r-1$の間のランダムな整数、 $r$は計算しようとしている数)です。これを、$s/r$を表す分数に変換してみましょう(これは古典的に簡単に計算できるものです)。
from fractions import Fraction
n = 3 # nは`counting`レジスタの量子ビットの数
# 各測定ストリングを循環させる
for measurement in counts.keys():
# バイナリ文字列を`int`に変換して、2^nで割る
decimal = int(measurement, 2)/2**n
# 続分数のアルゴリズムを使用して、a/bの形式に変換します
print(Fraction(decimal).limit_denominator())
# いくつかの結果の分母を見れば、正解の $r = 4$ がわかることがわかります。すぐに $r=4$ を検証することができます。
13**4 % 35
# では、ここからどうやって因数を求めたらよいでしょうか。$N$と$a^{r/2}-1$または$a^{r/2}+1$の最大公約数が$N$の因数である可能性が高く、最大公約数も古典的に簡単に計算できることがわかりました。
from math import gcd # greatest common divisor
for x in [-1, 1]:
print(f"Guessed factor: {gcd(13**(4//2)+x, 35)}")
# 1つの因数を見つければよく、それを使って$N$を割ることでもう1つの因数を見つけることができます。しかし、この場合は、 $a^{r/2}-1$ と $a^{r/2}+1$ のどちらも $35$ の因数を与えてくれます。これが正しいかどうか、もう一度検証してみましょう。
7*5
# ## `ibmq_santiago` での実行
#
# Santiago上で動作いただくことを冒頭で約束したので、ここではその方法を紹介します。この例では、便宜上Santiagoをシミュレーションしたデバイスを使用していますが、必要に応じて実際のデバイスに切り替えることができます。
# +
from qiskit.test.mock import FakeSantiago
from qiskit import assemble
from qiskit.visualization import plot_histogram
santiago = FakeSantiago()
real_device = False
## 実機でコードを走らせるときは以下のコメントアウトを解除
#from qiskit import IBMQ
#IBMQ.load_account()
#provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
#santiago = provider.get_backend('ibmq_santiago')
#real_device = True
# Santiago上での実行のために回路をトランスパイルする
tqc = transpile(shor_circuit, santiago, optimization_level=3)
if not real_device:
tqc = assemble(tqc)
# 回路の実行とカウントの表示
counts = santiago.run(tqc).result().get_counts()
plot_histogram(counts)
# -
# スコアが十分に低ければ、完璧なシミュレーションで見たように、0$、2$、4$、8$を測定する確率が高いことがわかります。プロセッサの不正確さや量子ビットと相互作用する不要なもののために、いくつかの余分な結果が表示されます。この「ノイズ」は、回路が長くなればなるほど悪化します。計算時間が長くなればなるほど、不要な相互作用の時間が長くなり、ゲート数が増えれば増えるほど、潜在的なエラーが増えるからです。そのため、できるだけ小さな回路を作るためにズルをする必要がありました。
#
#
# 近い将来、私たちの量子システムは、これらの問題を克服するために、より高度なエラー抑制技術を使い始めることができるまでに改善されるでしょう。そうなれば、[ズルをせずにショアのアルゴリズムを実行できる](https://arxiv.org/pdf/quant-ph/0205095.pdf)ほどの大規模な回路を走らせることができるようになります。
# ## Additional information
#
# **Created by:** <NAME>
#
# **Version:** 1.0.0
|
solutions by participants/ex2/ex2-ja-AyumuShiraishi-6cnot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import numpy as np
import time
import csv
# # use search page links to get the link for each individual's resume
num_lst = []
url_lst = ['https://www.postjobfree.com/resumes?q=&l=United+States&radius=25']
string = 'https://www.postjobfree.com/resumes?q=&l=United+States&radius=25'
for num in range(2, 1191):
url_lst.append('https://www.postjobfree.com/resumes?q=&l=United+States&radius=25&p='+str(num))
resume_links = []
for link in tqdm(url_lst):
response = requests.get(link)
soup = BeautifulSoup(response.content, 'html.parser')
time.sleep(3)
for x in soup.find_all('h3', attrs={'class': 'itemTitle'}):
resume_links.append("https://www.postjobfree.com"+x.a['href'])
len(resume_links)
results = []
for link in tqdm(resume_links[3252:]):
response = requests.get(link)
soup = BeautifulSoup(response.content, 'html.parser')
results.append({
'job_title': soup.find('div', attrs={'class': 'leftColumn'}).find('h1').get_text(),
'resume': soup.find('div', attrs={'class': 'normalText'}).get_text()[:-23]
})
time.sleep(3)
with open('../../data/website_resumes.csv', 'w', encoding="utf-8") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=results[0].keys())
writer.writeheader()
for row in results:
writer.writerow(row)
results[0].keys()
|
notebook/data web scraping/Web Scraping.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# # Import Data
# Importing the training set
Dataset = pd.read_csv('AMZNtrain.csv')
training_set = Dataset.iloc[:, 1:2].values
# # Feature Scaling
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
MINISC = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = MINISC.fit_transform(training_set)
# # Train,Test Creation
# +
# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# -
X_train.shape
# # Adding Layers and Training
# +
# - Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
REG = Sequential()
REG.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
REG.add(Dropout(0.2))
REG.add(LSTM(units = 50, return_sequences = True))
REG.add(Dropout(0.2))
REG.add(LSTM(units = 50, return_sequences = True))
REG.add(Dropout(0.2))
REG.add(LSTM(units = 50))
REG.add(Dropout(0.2))
# Adding the output layer
REG.add(Dense(units = 1))
# Compiling the RNN
REG.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
REG.fit(X_train, y_train, epochs = 100, batch_size = 32)
# +
# - Making the predictions and visualising the results
Dataset1 = pd.read_csv('AMZNtrain.csv')
real_stock_price = Dataset1.iloc[:, 1:2].values
# -
# # Prediction
# Getting the predicted stock price of 2017
dataset_total = pd.concat((Dataset['Open'], Dataset1['Open']), axis = 0)#;print(dataset_total)
inputs = dataset_total[len(dataset_total) - len(dataset_total) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = MINISC.transform(inputs);inputs.shape
X_test = np.array(inputs);X_test.shape
X_test = X_test.reshape(1,60,1)
predicted_stock_price = REG.predict(X_test)
predicted_stock_price = MINISC.inverse_transform(predicted_stock_price)
# # Prediction Visualizing
# (4) Visualising the results
plt.plot(real_stock_price, color = 'red', label = 'Real Amazon Stock Price')
plt.plot(predicted_stock_price, color = 'black', label = 'Predicted Amazon Stock Price')
plt.title('Amazon Stock Prediction')
plt.xlabel('Time')
plt.ylabel('Amazon Stock Price')
plt.legend()
plt.show()
|
Stock price Prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
from database.market import Market
from database.strategy import Strategy
from database.sec import SEC
from modeler.modeler import Modeler as m
from datetime import datetime, timedelta, timezone
import numpy as np
import math
from tqdm import tqdm
import pickle
from sklearn.preprocessing import OneHotEncoder
sec = SEC()
market = Market()
strat = Strategy()
market.connect()
sp5 = market.retrieve("sp500")
market.disconnect()
strat.connect()
prices = strat.retrieve("prices")
strat.disconnect()
prices["date"] = pd.to_datetime(prices["date"])
prices["year"] = [x.year for x in prices["date"]]
prices["quarter"] = [x.quarter for x in prices["date"]]
quarterly_grouped = prices.groupby(["year","quarter","ticker"]).mean()
quarterly_grouped["category"] = [math.ceil(x/100) * 100 for x in quarterly_grouped["adjClose"]]
quarterly_grouped["category"] = [1000 if x > 100 else x for x in quarterly_grouped["category"]]
quarterly_grouped.reset_index(inplace=True)
groups = quarterly_grouped.merge(sp5.rename(columns={"Symbol":"ticker"}),on="ticker",how="left")
g = groups[["year","quarter","ticker","adjClose","category","GICS Sector","CIK"]]
g["string_category"] = [str(x) for x in g["category"]]
g["classification"] = g["string_category"] + g["GICS Sector"]
numberss = len(g["classification"].unique())
enc = OneHotEncoder(handle_unknown="ignore")
transformed = [[x] for x in g["classification"]]
encoding = enc.fit_transform(transformed)
df_encoding = pd.DataFrame(encoding.toarray())
for col in df_encoding.columns:
g[col] = df_encoding[col]
yearly_gap = 1
training_years = 1
fails = []
filings = []
columns = []
sec.connect()
for cik in tqdm(list(g["CIK"].unique())):
try:
filing = sec.retrieve_filing_data(cik)
symbols = sp5[sp5["CIK"]==cik]["Symbol"]
if symbols.index.size > 1:
ticker = str(list(symbols)[0])
else:
ticker = symbols.item()
funds = filing.copy()
drop_columns = ["adsh","cik","_id"]
for column in funds.columns:
if str(column).islower() and str(column) != "filed" and str(column) not in ["year","quarter","ticker"]:
drop_columns.append(column)
funds["filed"] = [datetime.strptime(str(x),"%Y%m%d").replace(tzinfo=timezone.utc) if "-" not in str(x) else \
datetime.strptime(str(x).split(" ")[0],"%Y-%m-%d").replace(tzinfor=timezone.utc) for x in funds["filed"]]
funds["quarter"] = [x.quarter for x in funds["filed"]]
funds["year"] = [x.year + yearly_gap for x in funds["filed"]]
funds["ticker"] = ticker
funds.drop(drop_columns,axis=1,inplace=True,errors="ignore")
qa = funds.copy()
for col in qa.columns:
test = qa[col].fillna(-99999)
availability = 1 - (len([x for x in test if x == -99999]) / qa.index.size)
if availability < 0.95:
funds.drop(col,inplace=True,axis=1)
filings.append(funds)
except Exception as e:
print("prep",ticker,str(e))
fails.append([ticker,str(e)])
sec.disconnect()
try:
f = pd.concat(filings)
for col in tqdm(f.columns):
test = f[col].fillna(-99999)
availability = len([x for x in test != -99999 if x == True]) / test.index.size
if availability < 0.7:
f.drop(col,axis=1,inplace=True)
except Exception as e:
print(str(e))
g.columns
try:
data = f.merge(g.drop(["string_category","classification","adjClose","category","GICS Sector","CIK"],axis=1), \
on=["year","quarter","ticker"],how="left")
factors = list(data.columns)
factors = [x for x in factors if x not in ["year","quarter","ticker"]]
for i in range(numberss):
factors.remove(i)
for col in factors:
data[col].replace([np.inf,-np.inf,np.nan,np.NaN],f[col].mean(),inplace=True)
except Exception as e:
print(str(e))
for col in data.columns:
data.rename(columns= {col:str(col)},inplace=True)
# +
# market.connect()
# data = market.retrieve("financial_categorization_data")
# market.disconnect()
# -
data.drop(["_id","filed"],axis=1,inplace=True,errors="ignore")
factors
year_range = range(2021,2022)
yearly_gap = 1
training_years = 1
for year in tqdm(year_range):
try:
training_data = data[(data["year"] < year) & (data["year"] >= year - yearly_gap)]
factors = list(data.columns)
factors = [x for x in factors if x not in ["year","quarter","ticker"]]
for i in range(numberss):
try:
factors.remove(str(i))
except:
continue
for col in factors:
training_data[col].replace([np.inf,-np.inf,np.nan,np.NaN],training_data[col].mean(),inplace=True)
training_data.dropna(inplace=True)
x = training_data[factors]
y = training_data[[str(x) for x in range(numberss)]]
prediction_data = data[data["year"]==year]
refined_data = {"X":x.reset_index(drop=True),"y":y.reset_index(drop=True)}
models = m.xgb_classify(refined_data.copy(),multioutput=True)
model = models["model"]
for col in factors:
prediction_data[col].replace([np.inf,-np.inf,np.nan,np.NaN],prediction_data[col].mean(),inplace=True)
prediction_data.dropna(inplace=True)
predictions = enc.inverse_transform(model.predict(prediction_data[factors]))
prediction_data["prediction"] = [x[0] for x in predictions]
prediction_data["score"] = models["score"].item()
sim = prediction_data[["year","quarter","ticker","prediction","score"]]
strat.connect()
strat.store("application_stock_categories",sim)
strat.disconnect()
except Exception as e:
print(year,str(e))
|
versions/v0/application_financial_categorization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] id="38m2CqLbPCk2"
# ## Learning SMACH
#
# + [markdown] id="Q0Gc1hqt9mQh"
# ### Creating a State Machine
#
# To create a Smach state machine, you first create a number of states, and then add those states to a State Machine container.
# + id="VTWG_ldfhnXl"
# #!/usr/bin/env python
import rospy
import smach
import smach_ros
import time
from pyrobot import Robot
base_config_dict={'base_controller': 'ilqr'}
robot = Robot('locobot', base_config=base_config_dict)
# + [markdown] id="8KYI7FKThdxp"
# ### Creating a state
# + id="4exaMM_thsHX"
# define state Base_move
class Base_move(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['forward','backward'])
self.counter = 0
self.forward_position = [1,0.0,0.0]
self.backward_position = [0.0,0.0,0.0]
def execute(self, userdata):
rospy.loginfo('Executing state Base_move')
if self.counter < 2:
self.counter += 1
robot.base.go_to_absolute(self.forward_position)
time.sleep(1)
return 'forward'
else:
robot.base.go_to_absolute(self.backward_position)
time.sleep(1)
return 'backward'
# + id="3vme0nsUiDxK"
# define state Arm_move
class Arm_move(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['arm_home','arm_move'])
self.counter = 0
self.joint_position = [0, 0.3, 0.23, 1, 0]
def execute(self, userdata):
rospy.loginfo('Executing state Arm_move')
if (self.counter%2) == 0:
self.counter += 1
robot.arm.go_home()
time.sleep(1)
return 'arm_home'
else:
robot.arm.set_joint_positions(self.joint_position, plan=False)
time.sleep(1)
return 'arm_move'
# + [markdown] id="yVA9HzWyiNSc"
# ### Adding states to a state machine
# + colab={"base_uri": "https://localhost:8080/"} id="IzuHyz57iGmU" outputId="fad183c3-0252-4e9e-db56-9b23feca2127"
# main
def main():
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['End'])
# Open the container
with sm:
# Add states to the container
smach.StateMachine.add('Base_move', Base_move(), transitions={'forward':'Arm_move', 'backward':'End'})
smach.StateMachine.add('Arm_move', Arm_move(), transitions={'arm_home':'Base_move','arm_move':'Base_move'})
# Create and start the introspection server
sis = smach_ros.IntrospectionServer('my_smach_introspection_server', sm, '/SM_ROOT')
sis.start()
# Execute SMACH plan
outcome = sm.execute()
# Wait for ctrl-c to stop the application
rospy.spin()
sis.stop()
if __name__ == '__main__':
main()
|
notebook/LoCoBot/smach_pyrobot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
# # Install Package
# !pip install https://github.com/ThiagueraBarao/jupyterDjangoORM/archive/main.zip
# # Load Library
import jupyterDjangoORM as JDO
# # Functions
# 1. QueryToJson()
#
# * JSON response (str): Python String with JSON query response;
#
# ---
#
# 2. ModelToPandasDf()
#
# * Pandas Dataframe response from query response;
# # Import Django Models
from yourDjangoProject.models import MODEL_A, MODEL_B
# # Functions
# ## QueryToJson
JDO.QueryToJson(query=MODEL_A.objects.all())
JDO.QueryToJson(query=MODEL_B.objects.filter(id__gt=31))
# ## ModelToPandasDf
JDO.ModelToPandasDf(query=MODEL_A.objects.filter(id__gt=31))
JDO.ModelToPandasDf(query=MODEL_B.objects.all()).head()
|
Notebooks/Example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from datetime import datetime
from os import listdir
import numpy as np
pd.set_option('display.max_rows', None)
filepaths = ["subtitles/" + f for f in listdir("subtitles/") if (f.endswith('.csv') or f.endswith('.csv'))]
df = pd.concat(map(pd.read_csv, filepaths))
df.shape
df.groupby(['keyword'])['fetch_time'].nunique()
|
code/extraction/ValidateSubtitles.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"} toc-hr-collapsed=false
# Santander Customer Satisfaction
# + [markdown] slideshow={"slide_type": "slide"} toc-hr-collapsed=false
# # Introdução
#
# A satisfação do cliente é uma medida essencial do sucesso. Os clientes insatisfeitos raramente expressam sua insatisfação antes de sair.
#
# Nosso objetivo aqui é identificar os clientes insatisfeitos e com isso criar planos de ação, medidas, para melhorar a felicidade dos clientes
# + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=false
# ## Dataset
#
# Sobre os dados, são dados os conjuntos de **treino** e de **teste**, as features do dataset são anônimas, sabendo que feature `TARGET` contém a classificação do cliente como satisfeito ou insatisfeito
#
# ```python
# TARGET = 0 # satisfeito
# TARGET = 1 # insatisfeito
# ```
# + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=false
# ## Objetivos
#
# Temos aqui três objetivos principais
#
# 1. Classificar os `positivos verdadeiros`, ou seja, o cliente que estava insatisfeito e foi alvo de uma ação de retenção
# - O benefício nesse caso é o lucro da ação ($R\$\ 100,00$) menos os custos relacionados à ação de retenção ($R\$\ 10,00$)
#
# 2. Atribuir uma nota de 1 a 5 para cada cliente da base de `teste` respeitando a variável `TARGET`, isto é, o seu nível de satisfação, sendo 1 o mais insatisfeito e 5 o mais satisfeito
# - Ao atribuir essa nota, deve-se ter em mente que o somente os clientes com nota 1 serão alvos de uma ação de retenção e que o objetivo dessa ação é maximizar o lucro esperado pelo cliente.
#
# 3. Agrupar em três grupos, onde teríamos os maiores lucros esperados por cliente
# -
# # Exploração dos dados
# + jupyter={"outputs_hidden": false}
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
import xgboost as xgb
# + jupyter={"outputs_hidden": false}
train = pd.read_csv("./data/train.csv")
test = pd.read_csv("./data/test.csv")
# -
# ## Estudo das variavéis
#
# Como as colunas do nosso *dataset* não possuem descrição, vamos seguir uma abordagem de explorar os dados e com isso criar um primeiro modelo de classificação como `baseline`, para isso podemos construir alguns testes de escolha das `features`, como
# - Univariate feature selection
# - Recursive feature elimination
# ### Distribuição do target
# Observemos se um cliente possui mais de uma amostra no `dataset`
train.ID.shape[0]==len(train.ID.unique())
# Como sabemos a que a `feature` `ID` é a chave que identifica o cliente, vamos observar como está a sua distribuição em relação a `feature` `TARGET`
(train.groupby('TARGET')['ID'].agg({'count'})['count']).apply(lambda x: x/train.shape[0])
# Apenas 3,96% dos clientes estão insatisfeitos
(train.groupby('TARGET')['ID'].agg({'count'})['count']).apply(lambda x: x/train.shape[0]).plot.bar();
# ### Agrupamento das variáveis
# Como não sabemos os nomes das colunas, e encontramos um certo padrão em relação as variáveis, observe que existe um prefixo e um sufixo que compõe a variável. Vamos tentar agrupar essas variáveis pelo grupo à qual ela pertence. Por exemplo, a variável `var40` está segmentada em subvariáveis
train.columns[train.columns.str.contains('var40')]
# #### Variavéis que não acompanham nenhum sufixo
# +
# apenas as `var`
sns.pairplot(train[train.columns[~train.columns.str.contains('.var*')]])
#result
# a var15 parece uma variavel de tempo, possui caracteristica de decaimento
# -
# Vamos observar a variância dessas features
train[train.columns[~train.columns.str.contains('.var*')]].var().plot.bar(label='variância das features');
# A `var38` possui a maior variância desse *subset* das features, algo com variância muito grande poderia indicar algum valor monetário, provavelmente, saldo em conta
train.var38.map(np.log10).hist();
# A distribuição é centrada aproximadamente em $10^5 \approx 100000$, que é um valor bem alto para conta corrente, vamos observar mais a frente...
# Outro ponto a se observar, é a distribuição da feature `var15`, que provalvelmente seja idade do cliente
train.var15.hist();
# #### Variavéis que acompanham sufixo
# Anteriomente, olhamos para as variáveis que não foram quebradas em subcategorias, por assim dizer, variáveis "puras".
#
# O que acontece se agruparmos novamente as variáveis que foram segmentadas para entender seu comportamento?
# as demais `var`
train[train.columns[train.columns.str.contains('var')]]
var_names = pd.DataFrame(train.columns[train.columns.str.contains('var')], columns=['names'])
# +
import re
r = re.compile("var")
var_names['group'] = var_names['names'].str.split('_').apply(lambda x: list(filter(r.match, x))[0])
# -
# O dataframe `var_names` possui a coluna `group` que indica a variável que foi segmentada
var_names
plt.figure(figsize=(16,12))
var_names.group.value_counts().plot.barh();
variaveis_count = var_names.group.value_counts()
np.sort(variaveis_count[variaveis_count==1].index)
# Nosso primeiro ponto aqui,
# - Variáveis que não são segmentadas provavelmente referem-se a informações do cliente, como idade, `var15`, ou alguma informação referente a cadastro
# - `var10`
# - `var10cte`
# - `var11`
# - `var15`
# - `var19`
# - `var21`
# - `var3`
# - `var35`
# - `var36`
# - `var38`
# - `var4`
grouped = var_names.groupby(['group','names']).count()
grouped
# Uma forma de analisarmos a importância das features seria analisar seu comportamento por grupo, assim poderíamos eliminar algumas delas.
#
# Por exemplo, analisar todo o grupo da `var8` e descobrir o que significa a sua segmentação.
sns.pairplot(train[(grouped.loc[['var8','names']].reset_index())['names'].values]);
# Tratar as features dessa forma nos leva à um trabalho cansativo e de muita inferência sobre resultados que não conhecemos, sendo assim, vamos usar uma metodologia baseada na importância dessas mesmas features.
# ## Seleção das features
# ### Remoção das features com baixa variância
# +
from collections import defaultdict
columns_by_thresholds = defaultdict(list)
var_thresholds = np.linspace(0.0, 1.0, 11)
# pegando features com variância até < 1.0
remove = {}
for thresholds in var_thresholds:
remove = []
for col in [x for x in train.columns if x!='TARGET']:
if train[col].var() < thresholds:
remove.append(col)
columns_by_thresholds["{0:.1f}".format(thresholds)] = remove
# -
plt.plot([float(i) for i in columns_by_thresholds.keys()], [len(x) for x in columns_by_thresholds.values()], '-o')
plt.xlabel('variância')
plt.ylabel('# de features que podem ser eliminadas');
# Assim, pegando as features com $\sigma^2 (feature)\le 0.5$ já removemos algumas features que não impactarão nosso modelo
columns_best = list(set(train.columns) - set(columns_by_thresholds['0.5']))
fig, axes = plt.subplots(1,2, figsize=(18,8))
sns.heatmap(train[columns_best].values, ax=axes[0])
axes[0].set_title('Quantidade de zeros nos dados')
sns.heatmap((train[columns_best] + 1).apply(np.log10).values, ax=axes[1])
axes[1].set_title('Quantidade de zeros nos dados');
# removendo as colunas com variancia menor que 0.2
remove_lower_variance = columns_by_thresholds['0.2']
train.drop(remove_lower_variance, axis=1, inplace=True)
test.drop(remove_lower_variance, axis=1, inplace=True)
# +
# removendo colunas duplicadas
remove_duplicate = []
cols = train.columns
for i in range(len(cols)-1):
v = train[cols[i]].values
for j in range(i+1,len(cols)):
if np.array_equal(v,train[cols[j]].values):
remove_duplicate.append(cols[j])
train.drop(remove_duplicate, axis=1, inplace=True)
test.drop(remove_duplicate, axis=1, inplace=True)
# -
# ### Modelo base
# Com a remoção das features **menos importantes**, podemos criar um modelo *baseline* que pode nos auxiliar em tratar as features mais importantes
# quantidade de features restantes
train.shape[1]
# split data into train and test
test_id = test.ID
test = test.drop(["ID"],axis=1)
test
# +
X = train.drop(["TARGET","ID"],axis=1)
y = train.TARGET
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1729)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# -
## # Feature selection
clf = ExtraTreesClassifier(random_state=1729, n_jobs=-1)
selector = clf.fit(X_train, y_train)
# plot most important features
feat_imp = pd.Series(clf.feature_importances_, index = X_train.columns.values).sort_values(ascending=False)
feat_imp[:40].plot(kind='bar', title='Feature Importances according to ExtraTreesClassifier', figsize=(12, 8))
plt.ylabel('Feature Importance Score')
plt.subplots_adjust(bottom=0.3)
plt.savefig('1.png')
plt.show()
X_train.var38.apply(np.log10).plot.box()
plt.scatter(X_train.var15.apply(np.log10), X_train.var38.apply(np.log10), c=y_train.values, cmap=plt.cm.viridis_r, alpha=0.2)
plt.colorbar();
# ### Logistic Regression
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn import metrics
from sklearn.model_selection import cross_val_predict, GridSearchCV
# +
X = train.drop(["TARGET","ID"],axis=1)
y = train.TARGET
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40, random_state=1729)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# -
parameters_to_tune = {'class_weight':[{0: w} for w in [1, 2, 4, 6, 10]]}
lr = LogisticRegression()
lr_cv = LogisticRegressionCV(cv=5, class_weight=[{1: w} for w in [1, 2, 4, 6, 10]], n_jobs=-1)
clf = GridSearchCV(lr, parameters_to_tune)
clf.fit(X_train, y_train)
print (clf.best_params_)
y_pred = clf.predict(X_test)
print(classification_report(y_true = y_test.values, y_pred = y_pred, labels=[0,1], target_names=['satisfeito', 'insatisfeito']))
# +
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
cm = confusion_matrix(y_test, y_pred, labels=[0,1])
tn, fp, fn, tp = cm.ravel()
# -
cm
tn, fp, fn, tp
# #### Lucro
#
# Definamos o lucro como sendo o quanto eu consigo alcançar com minha campanha menos o que eu gastei
#
# $$
# lucro = TP \times 100 - (TP + FP)\times 10
# $$
# +
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test))
threshold_list = np.linspace(0.0, 1.0, 50)
lucro = []
for i in threshold_list:
#print ('\n******** For i = {} ******'.format(i))
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>i else 0)
test_accuracy = metrics.accuracy_score(y_test.as_matrix().reshape(y_test.as_matrix().size,1),
y_test_pred.iloc[:,1].as_matrix().reshape(y_test_pred.iloc[:,1].as_matrix().size,1))
#print('Our testing accuracy is {}'.format(test_accuracy))
#print(confusion_matrix(y_test, y_test_pred, labels=[0,1]))
conf_matrix = confusion_matrix(y_test.as_matrix().reshape(y_test.as_matrix().size,1),
y_test_pred.iloc[:,1].as_matrix().reshape(y_test_pred.iloc[:,1].as_matrix().size,1), labels=[0,1] )
tn, fp, fn, tp = conf_matrix.ravel()
print(i, precision_score(y_test, y_test_pred.iloc[:,1].as_matrix().reshape(y_test_pred.iloc[:,1].as_matrix().size,1)))
lucro.append(tp*100 - (tp + fp)*10)
# -
# ### Gradient Boosting
from sklearn import metrics
from sklearn.ensemble import GradientBoostingClassifier
# +
X = train.drop(["TARGET","ID"],axis=1)
y = train.TARGET
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1729)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# -
clf = GradientBoostingClassifier(random_state=1729).fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_pred_prob = clf.predict_proba(X_test)
print(classification_report(y_true = y_test.values, y_pred = y_pred, labels=[0,1], target_names=['satisfeito', 'insatisfeito']))
# +
from sklearn.metrics import auc
from sklearn.metrics import roc_curve, auc, confusion_matrix
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob[:,1])
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % auc(fpr, tpr))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.show()
# -
# #### Lucro
#
# Definamos o lucro como sendo o quanto eu consigo alcançar com minha campanha menos o que eu gastei
#
# $$
# lucro = TP \times 100 - (TP + FP)\times 10
# $$
# +
def predict(clf, X_test, threshold=0.5):
return clf.predict_proba(X_test)[:, 0] < threshold
def lucro(clf, X_test, y_test, threshold):
y_pred = predict(clf, X_test, threshold)
tp = (y_pred==1) & (y_test==1)
fp = (y_pred==1) & (y_test==0)
tp_score = sum(tp * 100)
pred_score = sum(y_pred)*(-10)
score = (tp_score + pred_score)
return threshold, tp_score, pred_score, score
# +
lucro_list = []
threshold_list = np.linspace(0.0, 1.0, 50)
for threshold in threshold_list:
t, _, _, lucro_i = lucro(clf, X_test, y_test, threshold)
lucro_list.append(lucro_i)
# -
threshold_lucro = pd.DataFrame({'threshold': threshold_list,
'lucro': lucro_list})
threshold_lucro.plot(x='threshold', y='lucro')
18200 / y_test.shape[0]
threshold_lucro[threshold_lucro.lucro == threshold_lucro.lucro.max()]
# +
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % auc(fpr, tpr))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.show()
# -
# ### Selecionando as melhores features
# clf.feature_importances_
fs = SelectFromModel(selector, prefit=True)
test_backup = test.copy(deep=True)
# +
X_train = fs.transform(X_train)
X_test = fs.transform(X_test)
test = fs.transform(test_backup)
print(X_train.shape, X_test.shape, test.shape)
## # Train Model
# classifier from xgboost
m2_xgb = xgb.XGBClassifier(n_estimators=110, njobs=-1, max_depth = 4, \
seed=1729)
# -
m2_xgb.fit(X_train, y_train, eval_metric="auc", verbose = False,
eval_set=[(X_test, y_test)])
# +
from sklearn.metrics import classification_report
#y_pred = m2_xgb.predict(X_test)
y_pred = selector.predict(X_test)
print(classification_report(y_true = y_test.values, y_pred = y_pred, labels=[0,1], target_names=['satisfeito', 'insatisfeito']))
# +
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % auc(fpr, tpr))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.show()
# +
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred, labels=[0,1])
tn, fp, fn, tp = cm.ravel()
# -
cm
# +
plt.clf()
plt.imshow(cm, interpolation=None, cmap=plt.cm.Blues)
classNames = ['Satisfeito','Insatisfeito']
plt.ylabel('True label')
plt.xlabel('Predicted label')
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames, rotation=45)
plt.yticks(tick_marks, classNames, rotation=-45)
s = [['TN','FP'], ['FN', 'TP']]
for i in range(2):
for j in range(2):
plt.text(j,i+0.01, str(s[i][j])+" = "+str(cm[i][j]))
plt.tight_layout()
plt.show()
# -
tn, fp, fn, tp
pd.DataFrame({'test': y_test, 'pred': y_pred})
# ## Nao funcionou
# ### Recursive Feature Elimination
# Usando o Gradient Boosting Trees, conseguimos um modelo base que já tem uma boa métrica definida.
# Vamos observar uma outra seleção de features, baseada na eliminação de features, recursivamente,
# observando o score da predição
# +
X = train.drop(["TARGET","ID"],axis=1)
y = train.TARGET
test = test_backup
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1729)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# +
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.feature_selection import RFECV
estimator = RandomForestClassifier()
selector = RFECV(estimator=estimator, step=1, cv=KFold(3),
scoring='accuracy')
# -
selector = selector.fit(X_train, y_train)
# +
X_train_rfe = selector.transform(X_train)
X_test_rfe = selector.transform(X_test)
test = selector.transform(test)
# -
y_pred = selector.predict(X_test)
selector.n_features_
print(classification_report(y_true = y_test.values, y_pred = y_pred,
labels=[0,1], target_names=['satisfeito', 'insatisfeito']))
roc_auc_score(y_test, y_pred)
print(classification_report(y_test, y_pred, target_names=['satisfeito', 'insatisfeito']))
# +
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred, labels=[0,1])
tn, fp, fn, tp = cm.ravel()
sns.heatmap(confusion_matrix(y_test, y_pred, labels=[0,1]), annot=True,
annot_kws={"size": 16}, cmap=plt.cm.Blues)
plt.tight_layout()
|
santander_customer_satisfaction/santander_customer_satisfaction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # String Formatting
# ---
# ## Old Way
# strings
'Hello %s' % ('World!',)
# numbers
'The number is: %d' % 5
# float
'Total Amount: %f' % 2.789654
# floating point with 2 decimal places
'Total Amount: %.2f' % 2.789654
# ## New Way
# {} is replaced by formatting
'Hello to string {}'.format('formatting')
# *{} are called placeholders,*
# first argument to format method get to replace the first {} and so on
'This is {} argument and this the {}'.format('first', 'second')
# or we can use index for arguments passed to format method
'The {1} and come at {0} position using index like {2} and {3}'.format('first', 'second', 0, 1)
# or keyword argument like this
'Or we can use {what} argument'.format(what='keyword')
# +
# this is useful
details = {
'username': 'hitler47',
'firstname': 'Adolf',
'lastname': 'Hitler',
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
"""Hello {username},
Thank you for registering in our site.
Here are your details:
Firstname: {firstname}
Lastname: {lastname}
Email: {email}
Username: {username}
Password: <PASSWORD>}
Please, don't blame us if you show this email to other people and got hacked ;)
Best,
Some WebApp
""".format(**details)
# -
# **Note:** `**details` *just unpacks the dictionary into key=value pair*
# with a list or tuple like this
'The first value of {} is {lst[0]}'.format('lst', lst=[8, 7, 5])
# ### Fun kinds
'Galaxy far far {:<90}'.format('away!')
'I am {:>90}'.format('right')
'{:^40}'.format('Justified')
'{:*^40}'.format('Welcome')
# a float
'{0:f}'.format(4.56789)
# a float with sign
'{0:+f}'.format(4.56789)
# a float with sign
'{0:+f}'.format(-4.56789)
# a float with rounded decimal places
'{0:.2f}'.format(4.56789)
# large number with comman separator
'{:,}'.format(4568921003456)
# octal
'{:o}'.format(42)
# hex
'{:x}'.format(42)
# hex with prefix
'{:#x}'.format(42)
# octal with prefix
'{:#o}'.format(42)
from datetime import datetime
# date formatting
'{:%Y-%m-%d}'.format(datetime.now())
|
Python/References/StringFormatting.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="Y3Diy0OEOQWX"
# # Get Ready
# + [markdown] colab_type="text" id="HHGCVHeO8HRY"
# ## Install required libraries
# Uncomment lines for installing
# + colab={"base_uri": "https://localhost:8080/", "height": 845} colab_type="code" id="R7L0ivqq956P" outputId="93624dbe-d1dc-4d73-d3dc-14821fdacb7c"
# # !pip install nest_asyncio
# # !pip install pyecharts
# + [markdown] colab_type="text" id="ivo-AlbBBFr9"
# ## Import library
# + colab={} colab_type="code" id="OwVQy_i3-Ttm"
import warnings
import nest_asyncio
import pandas as pd
from snapshot_pyppeteer import snapshot
from pyecharts import *
from pyecharts.charts import Map
from pyecharts import options as opts
from pyecharts.globals import ThemeType
from pyecharts.render import make_snapshot
from pyecharts.globals import CurrentConfig, NotebookType
from IPython.core.interactiveshell import InteractiveShell
# + [markdown] colab_type="text" id="O-RtAjVdEZUZ"
# ## Some settings for this code file
# + colab={} colab_type="code" id="98ukeol--T0S"
# !jupyter trust Data-Analysis.ipynb
InteractiveShell.ast_node_interactivity = "all"
CurrentConfig.NOTEBOOK_TYPE = NotebookType.JUPYTER_LAB
theme=ThemeType.LIGHT
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 500)
pd.set_option('display.max_colwidth', 50)
warnings.simplefilter(action='ignore', category=FutureWarning)
nest_asyncio.apply()
data_file_root_path = "Data"
data_source = "https://www.kaggle.com/sudalairajkumar/novel-corona-virus-2019-dataset/data"
# + [markdown] colab_type="text" id="0bC1JWvOFfDc"
# # Data Analysis
# + colab={} colab_type="code" id="PBzAnWUU-KUq"
def read_file(file_name):
file_data = pd.read_csv(f"{data_file_root_path}/novel-corona-virus-2019-dataset/{file_name}").fillna(0)
file_data.rename(
columns={'Country/Region' : 'CountryRegion',
'Province/State' : "ProvinceState"},
inplace=True)
return file_data
# + colab={} colab_type="code" id="RBXvjDstdrFn"
def cal_new_confirmed(a_df):
confirmed_col = a_df['Confirmed']
new_confirmed = []
new_confirmed.append(confirmed_col[0])
for i in range(len(confirmed_col)):
try:
new_confirmed.append(int(confirmed_col[i + 1] - confirmed_col[i]))
except KeyError:
break
a_df["NewConfirmed"] = new_confirmed
return a_df
# + [markdown] colab_type="text" id="sem8sUUuFmUw"
# ## Basic Summary
# + colab={"base_uri": "https://localhost:8080/", "height": 419} colab_type="code" id="LSbAVeoSCqQo" outputId="10577963-9e24-4663-99d8-f0b8fd7041d7"
covid_19_data = read_file("covid_19_data.csv")
covid_19_data["Active"] = covid_19_data['Confirmed'] - covid_19_data['Deaths'] - covid_19_data['Recovered']
start_obser_date = covid_19_data["ObservationDate"].iloc[0]
latest_obser_date = covid_19_data["ObservationDate"].iloc[-1]
covid_19_data
# + [markdown] colab_type="text" id="cr9SAHvyZdMo"
# ### Overall cases count
# + colab={"base_uri": "https://localhost:8080/", "height": 61} colab_type="code" id="8UZ4FDXFYZ_L" outputId="b3015a71-8d8e-4296-a8f4-21ccaaa62021"
grouped_static = covid_19_data.groupby('ObservationDate')['Confirmed', 'Deaths', 'Recovered', 'Active'].sum().reset_index()
grouped_static = cal_new_confirmed(grouped_static)
total_static = grouped_static[grouped_static["ObservationDate"] == max(grouped_static["ObservationDate"])].reset_index(drop=True)
total_static.style.background_gradient(cmap='Pastel1').format({
"Confirmed": "{:,.0f}",
"Deaths": "{:,.0f}",
"Recovered": "{:,.0f}",
"Active": "{:,.0f}",
"NewConfirmed": "{:,.0f}",
})
# + [markdown] colab_type="text" id="vz40zYEZZrwW"
# ### Cases per country/Region
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="li-S4Rr6WUzG" outputId="5d520c3d-e692-4464-802c-5627a7f5e069"
print(f"Latest Record of Data: {latest_obser_date} \n")
latest_covid_19_data = covid_19_data[covid_19_data["ObservationDate"] == max(covid_19_data["ObservationDate"])].reset_index()
basic_static = latest_covid_19_data.groupby(["CountryRegion"])['Confirmed', 'Deaths', 'Recovered', 'Active'].sum().reset_index()
basic_static = basic_static.sort_values(by='Confirmed', ascending=False).reset_index(drop=True)
basic_static.index += 1
basic_static.style.background_gradient(cmap='Reds').format(
{"Confirmed": "{:,.0f}",
"Deaths": "{:,.0f}",
"Recovered": "{:,.0f}",
"Active": "{:,.0f}",}
)
# + [markdown] colab_type="text" id="V_2aoFdtOihc"
# ### Plot top 15 countries data
# -
def plot_ebar(a_df, type_str: str) -> charts.Bar:
country = a_df["CountryRegion"].to_list()
y1 = a_df[type_str].to_list()
color_dict = {
"Confirmed": "#FF5252",
"Recovered": "#00BFA5",
"Deaths": "#FF6D00"
}
bar = (
charts.Bar(init_opts=opts.InitOpts(
theme=ThemeType.LIGHT,
width="1350px",
height="800px"
))
.add_xaxis(
country,
)
.add_yaxis(
type_str, y1,
itemstyle_opts=opts.ItemStyleOpts(
color = color_dict[type_str]
),
)
.set_global_opts(
title_opts=opts.TitleOpts(
title=f'{type_str} Number of Top 15 Confirmed Count Countries',
subtitle=f"from {start_obser_date} to {latest_obser_date}",
pos_top=0
),
yaxis_opts=opts.AxisOpts(
# name=f'Number of cases',
name_location="center",
),
xaxis_opts=opts.AxisOpts(
name="Country/\nRegion",
axislabel_opts = opts.LabelOpts(
interval=0,
rotate=25,
margin=10
)
),
legend_opts=opts.LegendOpts(
is_show=True,
pos_right=100,
),
)
)
file_name = f"{type_str}-bar"
make_snapshot(
snapshot,
bar.render(f"Images/{file_name}.html"),
f"Images/{file_name}.png"
)
return bar
# + colab={} colab_type="code" id="P3UtOAhOiWnO"
top_15_countries = basic_static.head(15)
# -
top_15_confirmed_bar = plot_ebar(top_15_countries, "Confirmed")
top_15_confirmed_bar.load_javascript()
top_15_confirmed_bar.render_notebook()
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="zbQxyC-d-T8y" outputId="e3e80dd3-c1f0-45da-8578-3fd4f6faaafd"
top_15_deaths_bar = plot_ebar(top_15_countries, "Deaths")
top_15_deaths_bar.load_javascript()
top_15_deaths_bar.render_notebook()
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="0QuiqBczlCRQ" outputId="b970c7dc-e54a-45c5-eaea-c2b18e4a7726"
top_15_recovered_bar = plot_ebar(top_15_countries, "Recovered")
top_15_recovered_bar.load_javascript()
top_15_recovered_bar.render_notebook()
# + colab={} colab_type="code" id="dDwhVeBAlCY6"
# + [markdown] colab_type="text" id="9DsuWcn6advi"
# ## Tendency
# + [markdown] colab_type="text" id="xHgN4ixA8i9g"
# ### Functions for trend analysis
# + [markdown] colab_type="text" id="9yzWyar28cVl"
# #### Make trend table
# + colab={} colab_type="code" id="YhZukHCt6UMs"
def make_trend_table(country: str):
line_data = covid_19_data[covid_19_data['CountryRegion']==country]
line_data = line_data.groupby(["ObservationDate"])['Confirmed', 'Deaths', 'Recovered', 'Active'].sum().reset_index()
line_data["ObservationDate"] = pd.to_datetime(line_data["ObservationDate"], format='%m/%d/%Y')
return line_data
# + [markdown] colab_type="text" id="cY4ExMTp8qN7"
# #### Plot trend data
# -
def plot_line_trend(trend_df, country: str) -> charts.Line:
date = trend_df["ObservationDate"].astype(str).to_list()
y1 = trend_df["Confirmed"].to_list()
y2 = trend_df["Deaths"].to_list()
y3 = trend_df["Recovered"].to_list()
y4 = trend_df["Active"].to_list()
y5 = trend_df["NewConfirmed"].to_list()
line = (
charts.Line(init_opts=opts.InitOpts(
theme=ThemeType.LIGHT,
width="1350px",
height="800px"
))
.add_xaxis(xaxis_data=date)
.add_yaxis(
series_name="Confirmed",
y_axis=y1,
label_opts=opts.LabelOpts(is_show=False),
# is_smooth=True,
linestyle_opts=opts.LineStyleOpts(width=3),
)
.add_yaxis(
series_name="Recovered",
y_axis=y3,
label_opts=opts.LabelOpts(is_show=False),
# is_smooth=True,
linestyle_opts=opts.LineStyleOpts(width=3),
)
.add_yaxis(
series_name="Deaths",
y_axis=y2,
label_opts=opts.LabelOpts(is_show=False),
# is_smooth=True,
linestyle_opts=opts.LineStyleOpts(width=3),
)
.add_yaxis(
series_name="Active",
y_axis=y4,
label_opts=opts.LabelOpts(is_show=False),
# is_smooth=True,
linestyle_opts=opts.LineStyleOpts(width=3),
)
.add_yaxis(
series_name="New Confirmed",
y_axis=y5,
label_opts=opts.LabelOpts(is_show=False),
# is_smooth=True,
linestyle_opts=opts.LineStyleOpts(width=3),
)
.set_global_opts(
title_opts=opts.TitleOpts(
title=f"COVID-19 Trend of {country}",
subtitle=f"from {start_obser_date} to {latest_obser_date}"
),
tooltip_opts=opts.TooltipOpts(trigger="axis"),
yaxis_opts=opts.AxisOpts(
type_="value",
axistick_opts=opts.AxisTickOpts(is_show=True),
splitline_opts=opts.SplitLineOpts(is_show=True),
),
xaxis_opts=opts.AxisOpts(type_="category", boundary_gap=False),
))
file_name = f"{country}-trend"
make_snapshot(
snapshot,
line.render(f"Images/{file_name}.html"),
f"Images/{file_name}.png"
)
return line
# + [markdown] colab_type="text" id="r-pEaZyawDeo"
# ### China Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 419} colab_type="code" id="ijOfdmHv-UBz" outputId="f9ecc572-69b2-46eb-d410-ae8e366c2166"
china_line_data = make_trend_table('Mainland China')
china_line_data = cal_new_confirmed(china_line_data)
china_line_data
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="gO7wtBTGUoXc" outputId="b327b1c6-ac44-4d32-d9f8-b14737fe96f0"
china_line = plot_line_trend(china_line_data, 'Mainland China')
china_line.load_javascript()
china_line.render_notebook()
# + [markdown] colab_type="text" id="ottXBBbwAzvC"
# ### Singapore Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="XZxqs_6lA0Bb" outputId="130aec57-28d1-4b40-f2f8-328f95b1382a"
singapore_line_data = make_trend_table('Singapore')
singapore_line_data = cal_new_confirmed(singapore_line_data)
singapore_line = plot_line_trend(singapore_line_data, 'Singapore')
singapore_line.load_javascript()
singapore_line.render_notebook()
# + [markdown] colab_type="text" id="506vm_4H6CB8"
# ### Japan Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="SRzLNrqMUoZm" outputId="c914dc2e-84ac-4f0e-829a-ae84159ab36f"
japan_line_data = make_trend_table('Japan')
japan_line_data = cal_new_confirmed(japan_line_data)
japan_line = plot_line_trend(japan_line_data, 'Japan')
japan_line.load_javascript()
japan_line.render_notebook()
# + [markdown] colab_type="text" id="ikm6Nf0C9NFA"
# ### South Korea Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="_Y23dwWBUoe-" outputId="a9824519-0472-4c72-8da2-23ca3ec5503d"
south_korea_line_data = make_trend_table('South Korea')
south_korea_line_data = cal_new_confirmed(south_korea_line_data)
south_korea_line = plot_line_trend(south_korea_line_data, 'South Korea')
south_korea_line.load_javascript()
south_korea_line.render_notebook()
# + [markdown] colab_type="text" id="7keISI1TvPt6"
# ### India Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="Oz98-WGgvP0B" outputId="89471c73-e4a9-40e2-ed87-383a437599f5"
india_line_data = make_trend_table('India')
india_line_data = cal_new_confirmed(india_line_data)
india_line = plot_line_trend(india_line_data, 'India')
india_line.load_javascript()
india_line.render_notebook()
# + [markdown] colab_type="text" id="GKGNxqv79m23"
# ### United Kingdom Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="MuCS2TKvUomj" outputId="3503f223-a5a6-4bdd-c9d3-0cdff0a27a58"
uk_line_data = make_trend_table('UK')
uk_line_data = cal_new_confirmed(uk_line_data)
uk_line = plot_line_trend(uk_line_data, 'UK')
uk_line.load_javascript()
uk_line.render_notebook()
# + [markdown] colab_type="text" id="sq-Q8dkF-cDR"
# ### Italy Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="B863CD4VUodC" outputId="54edf4ed-c171-4cf2-e0d3-ec3adc4e1759"
italy_line_data = make_trend_table('Italy')
italy_line_data = cal_new_confirmed(italy_line_data)
italy_line = plot_line_trend(italy_line_data, 'Italy')
italy_line.load_javascript()
italy_line.render_notebook()
# + [markdown] colab_type="text" id="t1_XcuHoF1CN"
# ### Spain Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="j29WLiP9F1L2" outputId="26574dd7-1148-463b-c8ab-b5dcf7c71fcd"
spain_line_data = make_trend_table('Spain')
spain_line_data = cal_new_confirmed(spain_line_data)
spain_line = plot_line_trend(spain_line_data, 'Spain')
spain_line.load_javascript()
spain_line.render_notebook()
# + [markdown] colab_type="text" id="QqqWC4S8JbsI"
# ### Australia Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="6SLSHi17Jbz9" outputId="62dc3bde-b013-4e20-bebd-e9c5b22207cf"
australia_line_data = make_trend_table('Australia')
australia_line_data = cal_new_confirmed(australia_line_data)
australia_line = plot_line_trend(australia_line_data, 'Australia')
australia_line.load_javascript()
australia_line.render_notebook()
# + [markdown] colab_type="text" id="8tvDuphZTpME"
# ### France Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="ErjeGGQNTpcp" outputId="236530ae-9e8c-4117-ab51-4a27636a8823"
france_line_data = make_trend_table('France')
france_line_data = cal_new_confirmed(france_line_data)
france_line = plot_line_trend(france_line_data, 'France')
france_line.load_javascript()
france_line.render_notebook()
# + [markdown] colab_type="text" id="VMp4EZPM-qwO"
# ### America Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="wePgWJtkUojG" outputId="f1fcc38f-dc6a-4dee-c90b-b54b45515581"
us_line_data = make_trend_table('US')
us_line_data = cal_new_confirmed(us_line_data)
us_line = plot_line_trend(us_line_data, 'US')
us_line.load_javascript()
us_line.render_notebook()
# + [markdown] colab_type="text" id="Pok0rsMITfTK"
# ### Hong Kong Trend
# + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="ZaIeOkwcQDCI" outputId="0d706985-83b9-4ada-86c9-1e00e854e449"
hk_line_data = make_trend_table('Hong Kong')
hk_line_data = cal_new_confirmed(hk_line_data)
hk_line = plot_line_trend(hk_line_data, 'Hong Kong')
hk_line.load_javascript()
hk_line.render_notebook()
# + [markdown] colab_type="text" id="Rm3aejJlIKFO"
# ## Make Pie Chart function
# + colab={} colab_type="code" id="MpM4-08aUozK"
def plot_grouping_pie_chart(grouped_df, group_name: str) -> charts.Pie:
labels = grouped_df[group_name]
percentages = grouped_df["Percent"]
pie = (
charts.Pie(init_opts=opts.InitOpts(
theme=ThemeType.LIGHT
))
.add(
"", [list(z) for z in zip(labels, percentages)],
radius=["40%", "75%"],
rosetype="percentages"
)
.set_global_opts(
title_opts=opts.TitleOpts(
title=f"COVID-19 Confirmed {group_name} Group",
subtitle= f"from {start_obser_date} to {latest_obser_date}"
),
legend_opts=opts.LegendOpts(
orient='vertical',
is_show=True,
pos_right=10,
pos_top=50
),
)
.set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
)
make_snapshot(
snapshot,
pie.render(f"Images/{group_name}-grouping-percentage.html"),
f"Images/{group_name}-grouping-percentage.png"
)
return pie
# -
def make_pie_df(a_df):
new_grouped = a_df.reset_index(name="Count")
percent = []
sum = 0
for c in new_grouped["Count"]:
sum += c
for c in new_grouped["Count"]:
percent.append(round(c / sum, 3))
new_grouped["Percent"] = percent
return new_grouped
# + [markdown] colab_type="text" id="22_VXChyEIhe" toc-hr-collapsed=false
# ## Age Group
# -
# ### Process Data
# + colab={} colab_type="code" id="dHGHlGJs-T_S"
COVID19_open_line_list = read_file("COVID19_open_line_list.csv")
null_index = COVID19_open_line_list[COVID19_open_line_list["ID"] == 0.0].index
COVID19_open_line_list.drop(null_index, inplace=True)
COVID19_open_line_list = COVID19_open_line_list.loc[:, ~COVID19_open_line_list.columns.str.contains('^Unnamed')]
COVID19_open_line_list = COVID19_open_line_list.loc[:, ~COVID19_open_line_list.columns.str.contains('^admin')]
COVID19_open_line_list = COVID19_open_line_list.replace({
"male" : "Male",
"female" : "Female",
})
# + colab={"base_uri": "https://localhost:8080/", "height": 728} colab_type="code" id="6IsMGMYlUovs" outputId="ac416044-0228-4fe1-edf5-52ea32a40784"
age_series = COVID19_open_line_list["age"].astype(str)
for age in age_series:
if '-' in age:
age_range = age.split('-')
a1 = int(age_range[0])
a2 = int(age_range[1])
a = int((a1 + a2) / 2)
age_series = age_series.replace(age, a)
COVID19_open_line_list["age"] = age_series.astype(float)
COVID19_open_line_list
# + colab={"base_uri": "https://localhost:8080/", "height": 215} colab_type="code" id="Oqz2IfNzUooj" outputId="7845fe41-39dc-4d6f-b74d-d61e71f7a968"
list_bins = [1, 10, 20, 30, 40, 50, 60, 70, 80, 100]
list_label = ['0-10', '11-20', '21-30', '31-40', '41-50', '51-60', '61-70', '71-80', '81-100']
age_grouped = pd.cut(COVID19_open_line_list["age"], bins=list_bins, labels=list_label, include_lowest=True)
age_group_df = pd.Series(age_grouped, name=("Age")).to_frame()
age_group_df = age_group_df.groupby(["Age"]).size()
new_age_grouped = make_pie_df(age_group_df)
new_age_grouped
# -
# ### Plot Pie Chart
# + colab={"base_uri": "https://localhost:8080/", "height": 417} colab_type="code" id="7oxb3prP9riC" outputId="6e2f43c8-a926-4671-f439-1084b81a15df"
age_pie = plot_grouping_pie_chart(new_age_grouped, "Age")
age_pie.load_javascript()
age_pie.render_notebook()
# + [markdown] colab_type="text" id="RmFtt-gFhH5N"
# ## Gender Group
# + colab={"base_uri": "https://localhost:8080/", "height": 107} colab_type="code" id="4Hp60DlwJmGn" outputId="f4f45366-ae40-48ea-dd5b-2ff00f6acc5e"
gender_group = COVID19_open_line_list.groupby("sex")
gender_group_count = gender_group.size()
gender_group_count.index.name = "Gender"
gender_group_count = make_pie_df(gender_group_count).replace(0, "Not Reported")
gender_group_count
# + colab={"base_uri": "https://localhost:8080/", "height": 367} colab_type="code" id="hEj6J3m3ISTp" outputId="939092b3-2984-4dfb-8a9e-3a7f559b5b8a"
gender_pie = plot_grouping_pie_chart(gender_group_count, "Gender")
gender_pie.load_javascript()
gender_pie.render_notebook()
# + colab={} colab_type="code" id="_tWN29KxJmMi"
# + [markdown] colab_type="text" id="s1FFyIriOqU2"
# ## Map
# Reference: [A Complete Guide to an Interactive Geographical Map using Python](https://towardsdatascience.com/a-complete-guide-to-an-interactive-geographical-map-using-python-f4c5197e23e0)
# +
plot_geo_df = basic_static
plot_geo_df = plot_geo_df.replace({
"US" : "United States",
"Mainland China" : "China",
"UK" : "United Kingdom",
"Congo (Brazzaville)" : "Dem. Rep. Congo",
"Congo (Kinshasa)" : "Congo",
"Burma" : "Myanmar",
"South Sudan" : "S. Sudan",
"Central African Republic" : "Central African Rep.",
"Western Sahara" : "W. Sahara",
"South Korea" : "Korea",
"Czech Republic" : "Czech Rep.",
"Dominican Republic" : "Dominican Rep.",
"Ivory Coast" : "Côte d'Ivoire",
"Laos" : "Lao PDR",
"North Macedonia" : "Macedonia",
"Bosnia and Herzegovina" : "Bosnia and Herz.",
"Equatorial Guinea" : "Eq. Guinea"
})
locate = plot_geo_df["CountryRegion"]
confirmed_cases = plot_geo_df["Confirmed"].astype(int)
recovered_cases = plot_geo_df["Recovered"].astype(int)
deaths_cases = plot_geo_df["Deaths"].astype(int)
# -
def plot_map(cases, type_str):
file_name = f"COVID-19-Global-{type_str}-Cases"
map = (
charts.Map(init_opts=opts.InitOpts(
theme=ThemeType.LIGHT,
width="1350px",
height="800px"
))
.add(
f"World {type_str} Cases",
[list(z) for z in zip(locate.to_list(), cases.to_list())],
"world",
is_map_symbol_show=False,
zoom=1.2
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(
title=file_name.replace('-', " "),
subtitle=f"from {start_obser_date} to {latest_obser_date}"
),
visualmap_opts=opts.VisualMapOpts(max_=300000),
)
)
make_snapshot(
snapshot,
map.render(f"Images/{file_name}.html"),
f"Images/{file_name}.png"
)
return map
# + colab={} colab_type="code" id="vhmh_TGUBA7G"
world_confirmed_map = plot_map(confirmed_cases, "Comfirmed")
world_confirmed_map.load_javascript()
world_confirmed_map.render_notebook()
# + colab={} colab_type="code" id="D8WLnGEeBA_O"
world_recovered_map = plot_map(recovered_cases, "Recovered")
world_recovered_map.load_javascript()
world_recovered_map.render_notebook()
# -
world_deaths_map = plot_map(deaths_cases, "Deaths")
world_recovered_map.load_javascript()
world_recovered_map.render_notebook()
# + [markdown] colab_type="text" id="UrEFOd1Hc_f-"
# #### Calculate US time series condirmed cases
# + colab={} colab_type="code" id="BbTT7ozXdICN"
def cal_us_series_sum(a_df):
all_number_us_df = a_df.drop(
columns=[
"UID", "iso2", "iso3", "code3", "FIPS", "Admin2", "Province_State", "Country_Region", "Lat", "Long_", "Combined_Key"
]
)
a_df["Sum"] = all_number_us_df.sum(axis=1)
return a_df
# + colab={} colab_type="code" id="jn2-o3vkdSLA"
time_series_covid_19_confirmed_US = cal_us_series_sum(read_file("time_series_covid_19_confirmed_US.csv"))
time_series_covid_19_deaths_US = cal_us_series_sum(read_file("time_series_covid_19_deaths_US.csv"))
# + [markdown] colab_type="text" id="ZYjyaPPYA5uX"
# #### Combine whole time series data
# + colab={} colab_type="code" id="5Qxw-VK6BA2e"
time_series_covid_19_confirmed = read_file("time_series_covid_19_confirmed.csv")
time_series_covid_19_confirmed_US = read_file("time_series_covid_19_confirmed_US.csv")
lat = time_series_covid_19_confirmed["Lat"].append(time_series_covid_19_confirmed_US["Lat"])
lon = time_series_covid_19_confirmed["Long"].append(time_series_covid_19_confirmed_US["Long_"])
ProvinceState = time_series_covid_19_confirmed["ProvinceState"].astype(str).append(time_series_covid_19_confirmed_US["Province_State"].astype(str))
CountryRegion = time_series_covid_19_confirmed["CountryRegion"].astype(str).append(time_series_covid_19_confirmed_US["Country_Region"].astype(str))
time_series_o = time_series_covid_19_confirmed.loc[:, time_series_covid_19_confirmed.columns.str.contains('20')]
time_series_US = time_series_covid_19_confirmed_US.loc[:, time_series_covid_19_confirmed_US.columns.str.contains('20')]
time_series = time_series_o + time_series_US
time_series.fillna(0, inplace=True)
all_time_series = {
"CountryRegion": CountryRegion,
"ProvinceState": ProvinceState,
"Latitude": lat,
"Longitude": lon,
}
all_time_series_df = pd.DataFrame(all_time_series)
all_time_series_df = all_time_series_df.join(time_series)
all_number_time_series_df = all_time_series_df.drop(
columns=[
"ProvinceState", "CountryRegion", "Latitude", "Longitude"
]
)
all_time_series_df["Sum"] = all_number_time_series_df.sum(axis=1).astype(int)
# + colab={} colab_type="code" id="F6Z8IZl5JahD"
all_time_series_df
# + colab={} colab_type="code" id="xEVWIEbMIJIQ"
# + colab={} colab_type="code" id="xnvQSP4_gSfL"
# + colab={} colab_type="code" id="kyaTJ2B_gSdh"
|
Data-Analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import datetime
import rabbitpy
# Connect to the default URL of amqp://guest:guest@localhost:15672/%2F
connection = rabbitpy.Connection()
try:
with connection.channel() as channel:
properties = {'content_type': 'text/plain',
'timestamp': datetime.datetime.now(),
'message_type': 'graphite metric'}
body = 'server.cpu.utilization 25.5 1350884514'
message = rabbitpy.Message(channel, body, properties)
message.publish('chapter2-example',
'server-metrics',
mandatory=True)
except rabbitpy.exceptions.MessageReturnedException as error:
print('Publish failure: %s' % error)
|
notebooks/4.1.2 Handling Basic.Return.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HappyPlane Problem from RL point of View
# ## Random Envirnoment-Executions Rewards
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from PlaneEnvironment import PlaneEnv
# -
env = PlaneEnv('../../planes/plane1.txt')
iters = 200
rewards = []
for i in range(iters):
env.reset()
done = False
reward = 0
while (done == False):
action = env.action_space_sample()
(x, r, done, info) = env.step(action)
reward += r
#if (i%15):
# print(x)
#env.plane.drawPlane()
rewards.append(reward)
#print("iter: " + str(i) + ", reward: " + str(reward))
#print()
plt.subplots(figsize=(15,5))
plt.plot(rewards)
plt.title('Rewards. Random Case')
# ## Q-Learning Algorithm for passengers allocation
import numpy as np
from QLearningAlgorithm import QLearning
ql = QLearning('../../planes/plane1.txt')
s=10
totalrewards,_ = ql.launch(epsType='t2', slot=s, N=250)
# +
slot=s
plt.subplots(figsize=(18,5))
plt.plot(totalrewards)
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-slot):(t+1)].mean()
plt.plot(running_avg)
plt.title("Rewards")
# -
# # End of evaluation!
|
src/main/PlaneEnv.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import sys
import optuna
import pandas as pd
import copy
import os
from lifelines.utils import concordance_index
from sklearn.metrics import r2_score, f1_score, precision_score, recall_score, roc_auc_score
from torch.utils.data import DataLoader, TensorDataset
from torch.nn import functional as F
from torchcontrib.optim import SWA
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from optuna.integration import PyTorchLightningPruningCallback
from argparse import ArgumentParser
sys.path.append('../')
sys.path.append('../data/ml_mmrf')
sys.path.append('../data/')
from ml_mmrf.data import load_mmrf
from synthetic.synthetic_data import load_synthetic_data_trt, load_synthetic_data_noisy
from models.sfomm import SFOMM
from models.utils import *
from models.fomm import FOMM
from models.ssm.ssm import SSM
from scipy.stats import norm
# +
# %matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
color_dict = {
'Baseline' : 'blue',
'LuPTS' : 'red',
'Stat-LuPTS': 'black',
'MLP' : 'orange'
}
marker_dict = {
'Baseline' : 's',
'LuPTS' : 'o',
'Stat-LuPTS': 'D',
'MLP' : 'p'
}
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00']) # Set the default color cycle
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 18
# -
# ## A] Data Setup
fold = 0
data_dir = '/data/ml_mmrf/ml_mmrf/output/cleaned_mm0_2mos_pfs_nonasct_bin_ind_seed0.pkl'
ddata = load_mmrf(fold_span = [fold], \
digitize_K = 0, \
digitize_method = 'uniform', \
data_dir=data_dir, \
restrict_markers=[], \
add_syn_marker=True, \
window='first_second', \
data_aug=False, \
ablation=False, \
feats=[])
# +
X = np.concatenate((ddata[fold]['train']['x'],ddata[fold]['valid']['x'],ddata[fold]['test']['x']),axis=0)
B = np.concatenate((ddata[fold]['train']['b'],ddata[fold]['valid']['b'],ddata[fold]['test']['b']),axis=0)
Y = np.concatenate((ddata[fold]['train']['ys_seq'],ddata[fold]['valid']['ys_seq'],ddata[fold]['test']['ys_seq']),axis=0)
A = np.concatenate((ddata[fold]['train']['a'],ddata[fold]['valid']['a'],ddata[fold]['test']['a']),axis=0)
M = np.concatenate((ddata[fold]['train']['m'],ddata[fold]['valid']['m'],ddata[fold]['test']['m']),axis=0)
CE = np.concatenate((ddata[fold]['train']['ce'],ddata[fold]['valid']['ce'],ddata[fold]['test']['ce']),axis=0)
pids = np.concatenate((ddata[fold]['train']['pids'],ddata[fold]['valid']['pids'],ddata[fold]['test']['pids']),axis=0)
print(X.shape)
print(B.shape)
print(ddata[fold]['train']['feature_names_x'])
print(ddata[fold]['train']['feature_names'])
print(ddata[fold]['train'].keys())
idxs = np.where(CE == 0.)
Yobs = Y[idxs]
print(len(np.where(Yobs == 1)[0]))
print(len(np.where(Yobs == 0)[0]))
print(np.sum(CE))
# -
# ## B] Model Definition
# +
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
class LUPTSRegressor():
def __init__(self, state_estimator=None, outcome_estimator=None, stationary=True, num_states=-1):
if not stationary and num_states == -1:
raise ValueError('need to provide non-negative number of states.')
self.trained = False
self.stationary = stationary
if state_estimator is None and stationary:
self.state_estimator = LinearRegression(fit_intercept=False)
elif state_estimator is None and not stationary:
self.state_estimator = [LinearRegression(fit_intercept=False) for _ in range(num_states)]
else:
self.state_estimator = state_estimator
if not stationary and not isinstance(self.state_estimator, list):
raise Exception('state_estimator must be a list of estimators for non-stationary setup')
if outcome_estimator is None:
self.outcome_estimator = LinearRegression()
else:
self.outcome_estimator = outcome_estimator
def fit(self, Xs, y, B=None):
if not isinstance(Xs, list):
raise Exception('Xs must be a list of dataframes or 2D arrays')
X1 = Xs[0]
m = X1.shape[0]
d = X1.shape[1]
self.T = len(Xs)
if not self.stationary:
assert self.T-1 == len(self.state_estimator), 'number of estimators not equivalent to T-1'
XT = Xs[-1]
if self.T > 1:
self.fit_state_(Xs, B)
self.fit_outcome_(XT, y, B)
self.trained = True
return self
def fit_state_(self, Xs, B=None):
if self.stationary:
inp = np.concatenate(Xs[:-1], axis=0)
if B is not None:
base_cat = np.repeat(B,len(Xs)-1,axis=0)
inp = np.concatenate([inp,base_cat],axis=-1)
out = np.concatenate(Xs[1:], axis=0)
self.Mz = self.state_estimator.fit(inp, out)
else:
self.Mz = []
for i,estimator in enumerate(self.state_estimator):
inp = Xs[i]; out = Xs[i+1]
self.Mz.append(estimator.fit(np.concatenate([inp,B],axis=-1),out))
def fit_outcome_(self, X, y, B=None):
if B is not None:
X = np.concatenate([X,B],axis=-1)
self.My = self.outcome_estimator.fit(X, y.ravel())
def predict(self, X, B=None):
if not self.trained:
raise Exception('Model not yet fit to data')
Z = X
if B is not None:
Z = np.concatenate([X,B],axis=-1)
for t in range(self.T-1):
if self.stationary:
Z = self.Mz.predict(Z)
else:
Z = self.Mz[t].predict(Z)
if B is not None:
Z = np.concatenate([Z,B],axis=-1)
y = self.My.predict(Z)
return y
# +
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
def NonLinearCV(ty='classifier'):
"""
Setup cross validation for Random Forest
"""
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 1, stop = 20, num = 10)]
# Number of features to consider at every split
max_features = ['auto']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(1, 20, num = 10)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [5, 7, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [15, 20]
# Method of selecting samples for training each tree
bootstrap = [True]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
# Use the random grid to search for best hyperparameters
# First create the base model to tune
if ty == 'classifier':
rf = RandomForestClassifier()
else:
rf = RandomForestRegressor()
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 300,
cv = 3, verbose=2, random_state=42, n_jobs = -1)
return rf_random
def evaluate(model, X, Y):
y_pred = model.predict(X)
y_pred_probs = model.predict_proba(X)
roc_auc = roc_auc_score(Y, y_pred_probs[:,1])
accuracy = accuracy_score(Y, y_pred)
print("--- Accuracy: %.2f%% ---" % (accuracy * 100.0))
print("--- AUC ROC: %.2f ---" % (roc_auc))
print ("--- Confusion Matrix ---")
print (confusion_matrix(Y, y_pred, labels=[0,1]))
def evaluate_multiclass(model, X, Y):
y_pred = model.predict(X)
accuracy = accuracy_score(Y, y_pred)
print("--- Accuracy: %.2f%% ---" % (accuracy * 100.0))
print ("--- Confusion Matrix ---")
print (confusion_matrix(Y, y_pred, labels=[0,1,2,3]))
# -
# ## C] Splitting, Training, and Evaluation (5x2cv)
def get_pts(Xtrain, Atrain, num_points=3):
# construct two sets of lists (first will be diffs and second will be raw values)
Xs1 = []; Xs2 = []
## after first line
Xfls = [np.zeros_like(Xtrain[:,0,:]) for _ in range(num_points)]
## for each patient, can decide to have some # of time points across first line
for i in range(Xtrain.shape[0]):
trts = Atrain[i,:,-3]
idxs = np.where(trts == 1.)[0]
st = idxs[0]; end = idxs[-1]
# time_points = [st, int(np.floor(end/2)), end]
time_points = [st, int(np.floor(end/4)), int(np.floor(end/2)), int(3*np.floor(end/4)), end]
assert len(time_points) == num_points
for j in range(num_points):
Xfl_j = Xfls[j]
Xfl_j[i]= Xtrain[i,time_points[j],:]
return Xfls
# +
import numpy as np
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import ElasticNetCV,LogisticRegressionCV,MultiTaskElasticNetCV
rkf = RepeatedKFold(n_splits=2, n_repeats=50, random_state=15)
ty = 'five-time'; model = 'lin'
results = []
np.random.seed(10)
train_sizes = np.arange(50,223,15)
train_sizes = list(train_sizes) + [223]
best_estimators = {'lupts': (None, 0.), 'baseline': (None,0.)} # for maximum training set size
for i,(train_index, test_index) in enumerate(rkf.split(X)):
print(f'======== iter {i+1} ========')
Xtrain, Btrain, Ytrain, Atrain, Mtrain, CEtrain, pids_train = X[train_index], B[train_index], Y[train_index], A[train_index], M[train_index], CE[train_index], pids[train_index]
Xtest, Btest, Ytest, Atest, Mtest, CEtest, pids_test = X[test_index], B[test_index], Y[test_index], A[test_index], M[test_index], CE[test_index], pids[test_index]
Xs = get_pts(Xtrain, Atrain, num_points=5)
for size in train_sizes:
# with CV internally (for hyperparameters)
# Mbase = LogisticRegressionCV(random_state = 0, Cs = 25, cv = 5, solver='liblinear', penalty='l1', max_iter=1000)
# state_estimator = MultiTaskElasticNetCV(random_state = 0, l1_ratio=[.1,.5,.7,.9,.95,.99,1.],cv=5,eps=1e-3,fit_intercept=False)
# Ma = LUPTSRegressor(state_estimator=state_estimator, outcome_estimator=LogisticRegressionCV(random_state = 0, Cs = 25, cv = 5, solver='liblinear', penalty='l1', max_iter=1000))
# print(f'N: {size}')
if model == 'lin':
Mbase = LogisticRegression(max_iter=1000)
Ma = LUPTSRegressor(outcome_estimator=LogisticRegression(max_iter=1000),stationary=False,num_states=len(Xs)-1)
Ma_stat = LUPTSRegressor(outcome_estimator=LogisticRegression(max_iter=1000),stationary=True,num_states=len(Xs)-1)
else:
Mbase = NonLinearCV(ty='classifier')
Ma = LUPTSRegressor(state_estimator=NonLinearCV(ty='regressor'),outcome_estimator=NonLinearCV(ty='classifier'))
# train subsample
train_idxs = np.random.choice(np.arange(Ytrain.shape[0]),size=size,replace=False)
ytrain_sub = Ytrain[train_idxs]; CEtrain_sub = CEtrain[train_idxs]; Btrain_sub = Btrain[train_idxs]
Xs_sub = [X[train_idxs] for X in Xs]
# baseline model
Xtr_fin = np.concatenate((Xs_sub[0],Btrain_sub),axis=-1)
Xte_fin = np.concatenate((Xtest[:,0,:],Btest),axis=-1)
Mbase.fit(Xtr_fin, ytrain_sub.ravel())
ybase_pred = Mbase.predict(Xte_fin)
# LUPTS model
Ma.fit(Xs_sub, ytrain_sub, Btrain_sub)
ya_pred = Ma.predict(Xtest[:,0,:],Btest)
# stationary model
Ma_stat.fit(Xs_sub, ytrain_sub, Btrain_sub)
ya_pred_stat = Ma_stat.predict(Xtest[:,0,:],Btest)
# metric computation
event_obs = (1.-CEtest).ravel()
idx = np.where(event_obs>0)[0]
ya_pred_obs = ya_pred[idx]
ybase_pred_obs = ybase_pred[idx]
ya_pred_stat_obs = ya_pred_stat[idx]
ytest_obs = Ytest[idx]
baseline_auc= roc_auc_score(ytest_obs, ybase_pred_obs)
lupts_auc = roc_auc_score(ytest_obs, ya_pred_obs)
lupts_stat_auc = roc_auc_score(ytest_obs, ya_pred_stat_obs)
results.append({'type': ty, 'method': 'baseline', 'auc': baseline_auc, 'iteration': i+1, 'size': size})
results.append({'type': ty, 'method': 'lupts', 'auc': lupts_auc, 'iteration': i+1, 'size': size})
results.append({'type': ty, 'method': 'lupts-stat', 'auc': lupts_stat_auc, 'iteration': i+1, 'size': size})
if size == 223:
if lupts_auc > best_estimators['lupts'][1]:
best_estimators['lupts'] = (Ma,lupts_auc)
if baseline_auc > best_estimators['baseline'][1]:
best_estimators['baseline'] = (Mbase, baseline_auc)
R = pd.DataFrame(results)
R
# -
best_estimators
# ## D] Plotting
# +
size_by_auc_baseline = np.zeros((len(train_sizes),100))
size_by_auc_lupts = np.zeros((len(train_sizes),100))
size_by_auc_lupts_stat = np.zeros((len(train_sizes),100))
for i,size in enumerate(train_sizes):
baseline_aucs = R[(R['size'] == size) & (R['method'] == 'baseline')]['auc'].values
lupts_aucs = R[(R['size'] == size) & (R['method'] == 'lupts')]['auc'].values
lupts_stat_aucs = R[(R['size'] == size) & (R['method'] == 'lupts-stat')]['auc'].values
size_by_auc_baseline[i] = baseline_aucs; size_by_auc_lupts[i] = lupts_aucs
size_by_auc_lupts_stat[i] = lupts_stat_aucs
fig, ax = plt.subplots(figsize=(8,6))
# ax.set_title('Results on Test Set [lin/lin] (PFS Task) [w/ 3 intermediate time points]' , fontsize=20,pad=20)
ax.set_ylabel('AUC', fontsize=20)
ax.set_xlabel('Number of training samples', fontsize=20)
ax.tick_params(axis='y', labelsize=20)
ax.tick_params(axis='x', labelsize=20)
ax.set_ylim(0.48,0.62)
ax.plot(train_sizes,np.mean(size_by_auc_baseline,axis=1),marker='s',ms=8,color='blue', label='Baseline')
ax.plot(train_sizes,np.mean(size_by_auc_lupts,axis=1),marker='o',ms=8,color='red', label='LuPTS')
ax.plot(train_sizes,np.mean(size_by_auc_lupts_stat,axis=1),marker='D',ms=8,color='black', label='Stat-LuPTS')
ax.fill_between(train_sizes,np.mean(size_by_auc_baseline,axis=1)+np.std(size_by_auc_baseline,axis=1),\
np.mean(size_by_auc_baseline,axis=1)-np.std(size_by_auc_baseline,axis=1),alpha=0.2,color='blue')
ax.fill_between(train_sizes,np.mean(size_by_auc_lupts,axis=1)+np.std(size_by_auc_lupts,axis=1),\
np.mean(size_by_auc_lupts,axis=1)-np.std(size_by_auc_lupts,axis=1),alpha=0.2,color='red')
ax.fill_between(train_sizes,np.mean(size_by_auc_lupts_stat,axis=1)+np.std(size_by_auc_lupts_stat,axis=1),\
np.mean(size_by_auc_lupts_stat,axis=1)-np.std(size_by_auc_lupts_stat,axis=1),alpha=0.2,color='black')
ax.legend(fontsize=20, loc='lower right')
ax.grid()
fig.savefig('./plots/fig-pfs-task-3points-100repeats-wstat.pdf',bbox_inches='tight')
# -
# ## E] Qualitative Experiments
lupts_best = best_estimators['lupts'][0]
baseline_best = best_estimators['baseline'][0]
# +
import matplotlib.patches as mpatches
Xnames = ddata[fold]['train']['feature_names_x']
Bnames = ddata[fold]['train']['feature_names']
fnames = np.concatenate((Xnames,Bnames),axis=-1)
print(fnames)
new_names = []
for name in fnames:
if name == 'serum_m_protein':
new_names.append('Mprot')
elif name == 'serum_beta2_microglobulin':
new_names.append('b2m')
elif 'cbc' in name or 'chem' in name or 'serum' in name:
new_names.append(name.split('_')[-1])
elif 'PC' in name:
new_names.append(name.replace('PC','RNASEQ'))
else:
new_names.append(name)
all_coefs_full = np.concatenate((lupts_best.outcome_estimator.coef_,baseline_best.coef_),axis=0)
coef = np.round(lupts_best.outcome_estimator.coef_,1)
coef2 = np.round(baseline_best.coef_,1)
all_coefs = np.concatenate((coef,coef2),axis=0)
fig, ax = plt.subplots(figsize=(15,2))
a1 = sns.heatmap(all_coefs_full, ax=ax, xticklabels=new_names, yticklabels=['lupts','baseline'], \
annot=all_coefs,annot_kws={"fontsize":12}, cbar=None)
a1.tick_params(axis='x', labelsize=18)
a1.tick_params(axis='y', labelsize=20)
for item in a1.get_xticklabels():
item.set_rotation(90)
for item in a1.get_yticklabels():
item.set_rotation(40)
left, bottom, width, height = (16.9,0,1.1,1)
rect=mpatches.Rectangle((left,bottom),width,height,
fill=False,
color="darkblue",
linewidth=4)
left, bottom, width, height = (7.9,0,1.1,1)
rect2=mpatches.Rectangle((left,bottom),width,height,
fill=False,
color="darkblue",
linewidth=4)
left, bottom, width, height = (29.9,1,1.1,1)
rect3=mpatches.Rectangle((left,bottom),width,height,
fill=False,
color="white",
linewidth=4)
left, bottom, width, height = (31.9,1,1.1,1)
rect4=mpatches.Rectangle((left,bottom),width,height,
fill=False,
color="white",
linewidth=4)
#facecolor="red")
fig.gca().add_patch(rect)
fig.gca().add_patch(rect2)
fig.gca().add_patch(rect3)
fig.gca().add_patch(rect4)
fig.savefig('./plots/heatmap_lupts_baseline.pdf',bbox_inches='tight')
|
notebooks/mm-pfs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
#Export
from uti.bert_interface import *
from uti.trainer import *
from pytorch_pretrained_bert.modeling import BertConfig, BertForSequenceClassification
path = Path('/home/jupyter/insight_project/Project-M/data/preprocessed/')
path.ls()
test = BERT_Interface("bert-base-uncased",path.ls())
test.path
test.pre_processing(test=True)
# # Test BERT tokenization
#
# Text pre-processing question: Should I keep \n\n format? Looks like BERT tokenization didn't take care of the \n\n case
test.data_list[0].train_ds[0]
test.data_list[0].train_ds.inner_df.iloc[0]['Body']
# # Build BERT Trainer
#Export
class BERT_Trainer(Trainer):
def __init__(self, bert_interface, model='bert-base-uncased', num_labels=2):
self.model = BertForSequenceClassification.from_pretrained(model, num_labels)
super().__init__(bert_interface)
def _create_leaner(self, data):
learn = Learner(data, self.model,
model_dir=self.dest, metrics=[accuracy]
)
if torch.cuda.is_available():
learn.to_fp16()
return learn
bert_trainer = BERT_Trainer(test)
bert_trainer.train_individual_clasifier()
from notebook2script import *
notebook2script('Test_BERT_Trainer.ipynb','bert_trainer')
|
jupyter_notebooks/Test_BERT_Trainer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
#import sklearn package details
from sklearn.model_selection import train_test_split #GridSearchCV is imported later from sklearn.model_selection
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
#load the dataset for Breast Cancer Detection
# using the UCI repository data
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data"
df = pd.read_csv(url)
df.head()
# giving the names of the cols as the data has no col. names. There are 11 cols excluding the ID
names = ['id','clump_thickness','uniform_cell_size','uniform_cell_shape',
'marginal_adhesion','single_epithelial_size','bare_nuclei',
'bland_chromatin','normal_nucleoli','mitoses','class']
df = pd.read_csv(url, names=names)
df.head()
#performing data exploration to understand and preprocess the data
#using a heat map to see if there are blanks in the dataset
#sns.heatmap(df.isnull(), yticklabels = False, cbar = False, cmap = 'viridis') -- df.info() also shows there are no nulls
df.info()
#barenuclei col has some items that are ?. Need to be replaced
df.isin(['?']).any()
df[df.eq('?').any(1)] #.eq is same as == operator
# replacing ? with -99999
df.replace('?',-99999, inplace = True)
df.isin(['?']).any()
#print the shape of the dataset
print(df.shape)
#lets drop the id col. as ML wont be needing this
df.drop(['id'],axis = 1, inplace = True)
df.head()
print(df.shape)
###PERFORMING DATA VISUALIZATION
print(df.iloc[0]) #shows first row of the dataset. Class value = 2 Benign, 4=Melignant
print(df.describe()) # if I show include ='all' then it will also include bare_nuclei col as that has NaN values
#Good part is all the features are standardized between 1 and 10. So, I can directly use KNN without using StandardScaler
#Plotting histogram for each variable or col
df.hist(figsize =(10,10))
plt.show()
# +
#sns.pairplot(df)
#scatter_matrix(df,figsize = (18,18))
temp = df.drop('bare_nuclei',axis = 1) #pairplot was errorring for bare_nuclei as it has imputed values of -99999
sns.pairplot(temp)
#data does not seem to be having any standout relationship between the features to classify
#the cell class to be melignant or benign
# +
#Implementing the ML models
X = df.drop('class',axis =1)
y = df['class']
# -
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=8)
#IMPLEMENTING KNN
knn = KNeighborsClassifier(n_neighbors =1)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
print (classification_report (y_test, pred))
#Using elbow method to predict k value for better results
error_rate=[]
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors =i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10,8))
plt.plot(range(1,40), error_rate, color = 'blue',linestyle ='--',marker='o')
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
#From above seems like 11 is a good value for K. Using K=11 and checking for the results -> f1 score improved from 95 to 97
knn = KNeighborsClassifier(n_neighbors =11)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
print (classification_report (y_test, pred))
#IMPLEMENTING SVC
model = SVC()
model.fit(X_train, y_train)
pred_svc = model.predict(X_test)
print (confusion_matrix (y_test, pred_svc))
print ('\n')
print (classification_report (y_test, pred_svc))
#KNN has better results vs SVC. Lets try to tune C and gamma values and see if performance improves.
#Precision has a much lower score
from sklearn.model_selection import GridSearchCV
param_grid = {'C':[0.1,1,10,100,1000], 'gamma':[1,0.1,0.01,0.001,0.0001]}
grid = GridSearchCV(SVC(),param_grid,verbose =3)
grid.fit(X_train, y_train)
grid.best_params_
grid.best_estimator_
grid_predictions = grid.predict(X_test)
print (confusion_matrix (y_test, grid_predictions))
print ('\n')
print (classification_report (y_test, grid_predictions))
# +
##Now we see SVM is very close to KNN
|
Breast Cancer Detection_SVM_KNN/Breast_Cancer Detection_SVM_KNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="0q2xE5tMYBKo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="e6344363-d5ef-46df-9217-abc44da90e32" executionInfo={"status": "ok", "timestamp": 1583591051454, "user_tz": -60, "elapsed": 13048, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
# !pip install --upgrade tables
# !pip install eli5
# !pip install xgboost
# + id="5hOwG_k3YTbI" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
import eli5
from eli5.sklearn import PermutationImportance
# + id="LQoJAJfPZAct" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cfea2dd5-10e4-4ae6-f025-a7946ed62355" executionInfo={"status": "ok", "timestamp": 1583591245781, "user_tz": -60, "elapsed": 1872, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
# cd "/content/drive/My Drive/Colab Notebooks/Matrix_DW/matrix_two/dw_matrix_car"
# + id="B3LFAMxfZL1G" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="31df5f56-6764-4d86-d495-d06764e14dc2" executionInfo={"status": "ok", "timestamp": 1583591377530, "user_tz": -60, "elapsed": 17164, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df = pd.read_hdf('data/car.h5')
df.shape
# + [markdown] id="dzikbeCGbi0s" colab_type="text"
# ## Feature Engineering
# + id="xJr_52LdZoNr" colab_type="code" colab={}
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[ feat + SUFFIX_CAT] = factorized_values
# + id="SrcuWxDsbt8G" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d20c8f91-b806-4845-c5da-23189dee93e8" executionInfo={"status": "ok", "timestamp": 1583591928051, "user_tz": -60, "elapsed": 1118, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
cat_feats = [x for x in df.columns if SUFFIX_CAT in x ]
cat_feats = [x for x in cat_feats if 'price' not in x ]
len(cat_feats)
# + id="C3Wt__6KbyjJ" colab_type="code" colab={}
def run_model(model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + [markdown] id="HC3mQwwjjLpT" colab_type="text"
# ## DecisionTree
# + id="Sg49477Xb2Ef" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8cb4a316-a521-4924-f2cc-576520453243" executionInfo={"status": "ok", "timestamp": 1583593839055, "user_tz": -60, "elapsed": 4181, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
run_model( DecisionTreeRegressor(max_depth=5), cat_feats )
# + [markdown] id="jg7FN5-bjTd3" colab_type="text"
# ## RandomForest
# + id="RQ6khaxpjEXf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d37a5522-5aad-437a-92e5-2fcb7e43870c" executionInfo={"status": "ok", "timestamp": 1583594050705, "user_tz": -60, "elapsed": 87855, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
run_model( RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0), cat_feats )
# + [markdown] id="EAHNKx5HjrQu" colab_type="text"
# ##XGBoost
# + id="2oyB5Qubjjs1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="1f461522-52b5-4dc8-c045-f50619d83c9f" executionInfo={"status": "ok", "timestamp": 1583594281727, "user_tz": -60, "elapsed": 57078, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
xgb_params ={
'max_depth': 5,
'n_estimators': 50,
'learning_rate': 0.1,
'seed': 0
}
model = xgb.XGBRegressor(**xgb_params)
run_model(model, cat_feats)
# + id="szUuWBYYkjZO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 416} outputId="a52f9f13-ae7d-4e8f-836a-283116748348" executionInfo={"status": "ok", "timestamp": 1583594754508, "user_tz": -60, "elapsed": 350947, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
m = xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0)
m.fit(X, y)
imp = PermutationImportance(m, random_state=0).fit(X,y)
eli5.show_weights(imp, feature_names=cat_feats)
# + id="D4t4Rv6clPPn" colab_type="code" colab={}
feats = [
"param_napęd__cat",
"param_rok-produkcji__cat",
"param_stan__cat",
"param_skrzynia-biegów__cat",
"param_faktura-vat__cat",
"param_moc__cat",
"param_marka-pojazdu__cat",
"feature_kamera-cofania__cat",
"param_typ__cat",
"param_pojemność-skokowa__cat",
"seller_name__cat",
"feature_wspomaganie-kierownicy__cat",
"param_model-pojazdu__cat",
"param_wersja__cat",
"param_kod-silnika__cat",
"feature_system-start-stop__cat",
"feature_asystent-pasa-ruchu__cat",
"feature_czujniki-parkowania-przednie__cat",
"feature_łopatki-zmiany-biegów__cat",
"feature_regulowane-zawieszenie__cat"]
# + id="WSYixwjJnwcB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="5766fc25-6778-4b65-ff31-b42b792d1d53" executionInfo={"status": "ok", "timestamp": 1583595262332, "user_tz": -60, "elapsed": 13210, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + id="-TNOIMQGodkH" colab_type="code" colab={}
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x) )
# + id="eCKDMvVGqaHh" colab_type="code" colab={}
feats2 = [
"param_napęd__cat",
"param_rok-produkcji",
"param_stan__cat",
"param_skrzynia-biegów__cat",
"param_faktura-vat__cat",
"param_moc__cat",
"param_marka-pojazdu__cat",
"feature_kamera-cofania__cat",
"param_typ__cat",
"param_pojemność-skokowa__cat",
"seller_name__cat",
"feature_wspomaganie-kierownicy__cat",
"param_model-pojazdu__cat",
"param_wersja__cat",
"param_kod-silnika__cat",
"feature_system-start-stop__cat",
"feature_asystent-pasa-ruchu__cat",
"feature_czujniki-parkowania-przednie__cat",
"feature_łopatki-zmiany-biegów__cat",
"feature_regulowane-zawieszenie__cat"]
# + id="LPXUaZtys-6u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="25b46cdb-165f-4454-fae6-769c7af5a677" executionInfo={"status": "ok", "timestamp": 1583596468765, "user_tz": -60, "elapsed": 13819, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
run_model(xgb.XGBRegressor(**xgb_params), feats2)
# + id="FT4ZBFyPtD_V" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="eb9630b5-7e64-41b5-e071-f40b2f80c0cc" executionInfo={"status": "ok", "timestamp": 1583596533393, "user_tz": -60, "elapsed": 1182, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df['param_moc'].unique()
# + id="I_Kaqy_wtWiK" colab_type="code" colab={}
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]))
# + id="jX0ovdsJuXKh" colab_type="code" colab={}
feats3 = [
"param_napęd__cat",
"param_rok-produkcji",
"param_stan__cat",
"param_skrzynia-biegów__cat",
"param_faktura-vat__cat",
"param_moc",
"param_marka-pojazdu__cat",
"feature_kamera-cofania__cat",
"param_typ__cat",
"param_pojemność-skokowa__cat",
"seller_name__cat",
"feature_wspomaganie-kierownicy__cat",
"param_model-pojazdu__cat",
"param_wersja__cat",
"param_kod-silnika__cat",
"feature_system-start-stop__cat",
"feature_asystent-pasa-ruchu__cat",
"feature_czujniki-parkowania-przednie__cat",
"feature_łopatki-zmiany-biegów__cat",
"feature_regulowane-zawieszenie__cat"]
# + id="Gx2Z_WhpuZOU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="f586a206-1e01-4e3c-e14c-f46133d90e47" executionInfo={"status": "ok", "timestamp": 1583596835863, "user_tz": -60, "elapsed": 13163, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
run_model(xgb.XGBRegressor(**xgb_params), feats3)
# + id="MppHOsSTudsu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="170b796b-c972-48a6-9c7a-6a596e25c1e5" executionInfo={"status": "ok", "timestamp": 1583597024754, "user_tz": -60, "elapsed": 1048, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df["param_pojemność-skokowa"].unique()
# + id="8IgeV5ZXvO6W" colab_type="code" colab={}
df["param_pojemność-skokowa"] = df["param_pojemność-skokowa"].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')))
# + id="FM0Jp7wPwBaY" colab_type="code" colab={}
feats4 = [
"param_napęd__cat",
"param_rok-produkcji",
"param_stan__cat",
"param_skrzynia-biegów__cat",
"param_faktura-vat__cat",
"param_moc",
"param_marka-pojazdu__cat",
"feature_kamera-cofania__cat",
"param_typ__cat",
"param_pojemność-skokowa",
"seller_name__cat",
"feature_wspomaganie-kierownicy__cat",
"param_model-pojazdu__cat",
"param_wersja__cat",
"param_kod-silnika__cat",
"feature_system-start-stop__cat",
"feature_asystent-pasa-ruchu__cat",
"feature_czujniki-parkowania-przednie__cat",
"feature_łopatki-zmiany-biegów__cat",
"feature_regulowane-zawieszenie__cat"]
# + id="7yfPzmHEwi8Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="e2150074-596e-42b1-c71a-0f07bf5593bd" executionInfo={"status": "ok", "timestamp": 1583597483740, "user_tz": -60, "elapsed": 12656, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
run_model(xgb.XGBRegressor(**xgb_params), feats4)
# + id="hZYw5N19wnMx" colab_type="code" colab={}
|
day4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.0 64-bit (''padrao'': pyenv)'
# language: python
# name: python3
# ---
# Recommender system is an ML algorithm that combine several computational techniques to select personalized items based in user interesti.
#
# The most common recommender system are:
# * Content-based filtering System: produces recommendations that focus on item attributes and give recommendations based on similarities.
# * Collaborative filtering System: produces recommendations based on knowledge of attitudes toward items. This algorithm uses the "mass knowledge" to recommend items. Furthermore, collaborative filtering can be broken down into:
# * Collaborative memory filtering
# * Collaborative filtering by model
#
# If you need more information about this technique, please check this [link](https://towardsdatascience.com/introduction-to-recommender-systems-1-971bd274f421)
# # Imports
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
sns.set_style('white')
# -
column_name = ['user_id', 'item_id', 'rating', 'timestamp']
df = pd.read_csv('data/u.data', sep='\t', names=column_name)
df.head(10)
movie_title = pd.read_csv('data/Movie_Id_Titles')
movie_title.head(5)
df = pd.merge(df, movie_title, on='item_id')
df.head()
# # Data Exploratory Analysis
df.groupby('title')['rating'].mean().sort_values(ascending=False).head(10)
df.groupby('title')['rating'].count().sort_values(ascending=False).head(10)
ratings = pd.DataFrame(df.groupby('title')['rating'].mean())
ratings.head()
ratings['count'] = pd.DataFrame(df.groupby('title')['rating'].count())
ratings.head()
ratings.shape
plt.figure(figsize=(18,8))
ratings['count'].hist(bins=70)
plt.xlabel('Número de Usuários')
plt.ylabel('Número de Filmes');
plt.figure(figsize=(18,8))
ratings['rating'].hist(bins=70);
plt.figure(figsize=(10,10))
sns.jointplot(x='rating', y='count', data=ratings, alpha=0.4, height=8);
# Observa-se que há um aumento no rating conforme aumenta-se o número de contagem (usuários que assitiram), o que é esperado uma vez que quanto maior o número de avaliações mais possível é que o filme seja assistido por outros
# # Model Construction
moviemat = df.pivot_table(index='user_id', columns='title', values='rating')
moviemat.head(10)
ratings.sort_values('count', ascending=False)
starwars_user_ratings = moviemat['Star Wars (1977)']
liarliar_user_ratings = moviemat['Liar Liar (1997)']
similar_to_starwars = moviemat.corrwith(starwars_user_ratings)
similar_to_liarliar = moviemat.corrwith(liarliar_user_ratings)
corr_starwars = pd.DataFrame(similar_to_starwars, columns=['Correlation'])
corr_starwars.dropna(inplace=True)
corr_starwars.head()
corr_starwars.sort_values('Correlation', ascending=False).head(30)
corr_starwars = corr_starwars.join(ratings['count'])
corr_starwars.head(10)
corr_starwars[corr_starwars['count']>100].sort_values('Correlation', ascending=False).head(15)
corr_liarliar = pd.DataFrame(similar_to_liarliar, columns=['Correlation'])
corr_liarliar.dropna(inplace=True)
corr_liarliar.head()
corr_liarliar.sort_values('Correlation', ascending=False)
corr_liarliar = corr_liarliar.join(ratings['count'])
corr_liarliar[corr_liarliar['count']>100].sort_values('Correlation', ascending=False).head(15)
|
09_recommender_systems.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Linear Programming with PuLP
#
# ## Try me
# [](https://colab.research.google.com/github/ffraile/operations-research-notebooks/blob/main/docs/source/CLP/libraries/Python%20PuLP%20Tutorial.ipynb)[](https://mybinder.org/v2/gh/ffraile/operations-research-notebooks/main?labpath=docs%2Fsource%2FCLP%2Flibraries%2FPython%20PuLP%20Tutorial.ipynb)
#
# ## Requirements
# ### Install in your environment
# #### Pip Installation
# The simplest way to install PuLP in your environment is using [pip](https://pypi.org/project/pip/). If you have installed
# Python and pip in your environment, just open a terminal and try:
#
# ```
# pip install pulp
# ```
# #### Conda Installation
# If you use Conda, open a Conda Terminal and try:
#
# ```
# conda install –c conda-forge pulp
# ```
#
# #### Google Colabs installation
# Run the following code cell to try this notebook in Google Colabs:
# + pycharm={"name": "#%%\n"}
# !pip install pulp
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Binder installation
# Run the following code cell to try this notebook in Binder:
#
# + pycharm={"name": "#%%\n"}
# !pip install pulp
# !pip install pandas
# !pip install numpy
# -
# ## Linear Optimisation with PulP
# In this tutorial, we will learn to model and solve Linear Programming Problems using the Python open source Linear Programming library [PuLP](http://pythonhosted.org/PuLP/). PuLP can be installed using Conda, as described [here](https://anaconda.org/conda-forge/pulp).
#
# To guide this example, we will use a simple LPP formulated in class:
#
# maximise $z = 300x + 250y$
#
# Subject to:
#
# $2x + y \leq 40$
# $x + 3y \leq 45$
# $x \leq 12$
#
# Let´s start importing the library PuLP to solve linear programs
import pulp
# We are going to use panda to display the results as tables using Panda
import pandas as pd
#And we will use numpy to perform array operations
import numpy as np
#We will use display and Markdown to format the output of code cells as Markdown
from IPython.display import display, Markdown
# ### Problem Class LpProblem
# PuLP uses *classes* providing different methods to model and solve LPPs. The class that will contain our model is the **LpProblem** class. To create a new LpProblem we use the pulp LpProblem function:
#
# - **LpProblem(name='None', sense=1):** Creates a new Linear Programming Problem. The parameter name (default 'None') assigns a name to the problem. The parameter sense (either pulp.LpMinimise or pulp.LpMaximize) sets the type of objective function. The default is minimize.
#
# Let us create an instance for our problem:
# Create an instance of the problem class using LpProblem
model = pulp.LpProblem("Production_Mix_example", pulp.LpMaximize) #this will create an instance of an LP Maximise problem
# ### Variable class LpVariable
# The definition of a LPP program with PuLP is very similar to the standard procedure used to model a problem. First, we need to define the unknown variables in our problem. For this purpose we use the class **LpVariable**. The function LpVariable allows us to create a variable:
#
# - **LpVariable(name, lowBound=None, upBound=None, cat='Continuous', e=None):** Creates an instance of variable with the following properties:
# - **Name:** The name of the variable to be used in the solution.
# - **lowBoud:** The lower bound of the variable, the default is unsrestricted (-Inf).
# - **upBound:** The upper bound of the variable. The default is unrestricted (Inf).
# - **cat:** Either 'Continuous' for continuous variables, 'Binary' for binary variables or 'Integer' for Integer variables. We will see in detail binary and integer variables in the course unit for Mixed Integer Programming, but now you know that you will be able to model and solve this type of problems with PuLP. The default is 'Continuous'.
# - **e:** This parameter is outside the scope of this course and can be neglected for now.
#
# We can define the variables of our problem using the LpVariable function:
#
# ```python
# x = pulp.LpVariable('x', lowBound=0, cat='Continuous')
# y = pulp.LpVariable('y', lowBound=0, cat='Continuous')
# ```
#
# Note however that using this function, we need a line of code for every unknown. This simply does not scale up. What if we have hundreds of unknowns? Luckily for us, PuLP provides a convenient method to write more efficient codes for our program, the **LpVariable.dicts** method, which basically allows us to create a set of variables with the same category, upper bounds and lower bounds at once:
#
# - **LpVariable.dicts(name, indexs, lowBound=None, upBound=None, cat='Continuous')**: Creates a dictionary containing variables of type cat (default 'Continuous'), indexed with the keys contained in the *iterable* index and bounded by lowBound (default -Inf) and upBound (default Inf).
#
# For instance, we can write the same code as:
# First we define a tuple with the variable names x and y
variable_names = ('x','y')
# Then we create a variable from a dictionary, using the variable names as keys
variables = pulp.LpVariable.dicts("vars",
(i for i in variable_names),
lowBound=0,
cat='Continuous')
# Notice that we have created a tuple with the variable names and then created a dictionary with the actual variables that we will use in our model. We will be able to get the variables from the dictionary using the names as keys. This way, if we had for instance 20 variables, we coud still create them only with two lines of code.
# ### Adding expressions
# In PuLP, both objective function and constraints are *expressions* (algebraic expressions containing variables) that have to be added to the instance problem using the standard operand '+='. For instance, to add the objective function in this example, we could write:
#
# ```python
# model += 300 * x + 250 * y, "Profit"
# ```
# With this line of code, we have added a new expression with name "Profit" that multiplies the technological coefficients to the variables X and Y (as defined in the code snippet in the previous section). This is the simplest way to create a expression, but it is clear that it is not the most scalable way, since we need to add a new term to the summation manually for every variable. PuLP provides a convenient function, *lpSum* to achieve this result programmatically. *lpSum* takes an array of expressions and returns the summation of the elements in the array. Let us see it action:
# We define the technological coefficients
coefficients = [300, 250]
# Then we add the objective function to the model like
# model += linear_expression, name
# eg model += 300*X + 250y, "Profit"
# We use the function lpSum to generate the linear expression from a vector
# The vector is generated using a for loop over the variable names:
model += (
pulp.lpSum([
coefficients[i] * variables[variable_names[i]]
for i in range(len(variable_names))])
), "Profit"
# Notice that we have used **list comprehension** to create the array passed to the lpSum function using an index array. In this case we have created an array of length equal to the number of variables using the functions **range** and **length**.
# We can follow the same method to add constraints. For instance, the simplest way to add the constraint in our example is:
# ```python
# # And the constraints
# model += 2 * X + Y <= 40, "Man Power"
# model += X + 3 * Y <= 45, "Machine Operating Time"
# model += X <=12, "Marketing"
# ```
# However, we can see that this is not the most scalable alternative there is, since we will need to add a new line of code every constraint. There is another approach, to put our data in iterable objects and use list comprehension and for loops to define the constraints:
# +
# And the constraints, the Matrix A
A=[[2, 1], #Coefficients of the first constraint
[1, 3], #Coefficients of the second constraint
[1, 0]] #Coefficients of the third constraint
# And vector b
b = [40, 45, 12] #limits of the three constraints
# need We also define the name for the constraints
constraint_names = ['Man Power', 'Machine Operating Time', 'Marketing']
# Now we add the constraints using
# model += expression, name
# eg model += 2*X + y <= 40
# We add all constraints in a loop, using a vector and the function lpSum to generate the linear expression:
for i in range(len(A)):
model += pulp.lpSum([
A[i][j] * variables[variable_names[j]]
for j in range(len(variable_names))]) <= b[i] , constraint_names[i]
#note that in this case all constraints are of type less or equal
# -
# Now that we have created our model, we can get the solution just by calling the method **solve()**. The status of the solution can be read in the LpStatus attribute:
# Solve our problem
model.solve()
pulp.LpStatus[model.status]
# Now, let us display the solution in a nice table using Pandas. We are going to first display the solution value using markdown and then we will use Pandas to create a table with the results.
# +
# Solution
max_z = pulp.value(model.objective)
#We use display and Mardown to show the value using markdown
display(Markdown("The value of the objective function is **%.2f**"%max_z))
# Print our decision variable values
display(Markdown("The following tables show the values obtained: "))
# First we create a dataframe from the dictionary of the solution. We want to use the variable indexes to present the results and
# place the different values provided by the solver in the data frame.
var_df = pd.DataFrame.from_dict(variables, orient="index",
columns = ["Variables"])
# First we add the solution. We apply a lambda function to get only two decimals:
var_df["Solution"] = var_df["Variables"].apply(lambda item: "{:.2f}".format(float(item.varValue)))
# We do the same for the reduced cost:
var_df["Reduced cost"] = var_df["Variables"].apply(lambda item: "{:.2f}".format(float(item.dj)))
# We use the display function to represent the results:
display(var_df)
# we define a dictionary with the constraints:
const_dict = dict(model.constraints)
#We create a list of records from the dictionary and exclude the Expression to have a more compact solution.
con_df = pd.DataFrame.from_records(list(const_dict.items()), exclude=["Expression"], columns=["Constraint", "Expression"])
#Now we add columns for the solution, the slack and shadow price
con_df["Right Hand Side"] = con_df["Constraint"].apply(lambda item: "{:.2f}".format(-const_dict[item].constant))
con_df["Slack"] = con_df["Constraint"].apply(lambda item: "{:.2f}".format(const_dict[item].slack))
con_df["Shadow Price"] = con_df["Constraint"].apply(lambda item: "{:.2f}".format(const_dict[item].pi))
# And we display the results
display(con_df)
# -
|
docs/source/CLP/libraries/Python PuLP Tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creacion Panel con data de Cortes de Apelaciones
# +
import pandas as pd
import numpy as np
import warnings
# Bokeh Library
from bokeh.io import show, output_notebook, output_file
from bokeh.models import ColumnDataSource, FactorRange, CheckboxButtonGroup, CustomJS, DataTable, DateFormatter, TableColumn
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
from bokeh.models.widgets import Tabs, Panel
# -
# Output to file
output_file('ingresos_terminos.html',
title='Ingresos y Terminos por Cortes de Apelaciones - 2015 al 2019')
path_processed = "../data/processed/pjud"
# Carga de datos ...
df_causas = pd.read_feather(f"{path_processed}/consolidated_Fulldata_Causa.feather")
# Carga de datos ...
df_cortes = pd.read_feather(f"{path_processed}/processes_ListadoTribunalesyCortes.feather")
cortes = df_cortes['CORTE'].unique().tolist()
# +
ingresos_por_cortes = []
for (corte, año_ingreso), sub_df in df_causas.groupby(by=['corte','año_ingreso']):
unique_rol = sub_df['tribunal_rit'].unique()
ingresos_por_cortes.append([corte, año_ingreso, len(unique_rol)])
df_ingresos_cortes = pd.DataFrame(ingresos_por_cortes, columns=['corte','año_ingreso','total_causas'])
# +
terminos_por_cortes = []
for (corte, año_termino), sub_df in df_causas.groupby(by=['corte','año_termino']):
unique_rol = sub_df['tribunal_rit'].unique()
terminos_por_cortes.append([corte, año_termino, len(unique_rol)])
df_terminos_cortes = pd.DataFrame(terminos_por_cortes, columns=['corte','año_termino','total_causas'])
# +
# GENERO DICCIONARIO DE DATOS PARA GRAFICAR
cortes = df_causas.corte.unique().tolist()
data_ingresos ={}
data_terminos ={}
data_ingresos['cortes'] = cortes
data_terminos['cortes'] = cortes
years = []
años = range(2015,2020)
for año in años:
ingreso_año = []
termino_año = []
years.append(str(año))
for corte in cortes:
criterio_ingresos = f"corte == '{corte}' and año_ingreso == {año}"
criterio_terminos = f"corte == '{corte}' and año_termino == {año}"
total_ingreso = df_ingresos_cortes.query(criterio_ingresos).total_causas
total_termino = df_terminos_cortes.query(criterio_terminos).total_causas
ingreso_año.append(total_ingreso)
termino_año.append(total_termino)
data_ingresos[f'{año}'] = ingreso_año
data_terminos[f'{año}'] = termino_año
palette = ["#c9d9d3", "#718dbf", "#e84d60", "#CC8dbf", "#AB4d60"]
# +
#output_file("CORTES DE APELACIONES_INGRESOS_CAUSAS_PENALES_2015_AL_2019.html") Si deseo guardar el grafo debo d
# GRAFO INGRESO
x = [ (corte.replace("C.A. DE ",""), year) for corte in cortes for year in years ]
counts = sum(zip(data_ingresos['2015'], data_ingresos['2016'], data_ingresos['2017'], data_ingresos['2018'], data_ingresos['2019']), ()) # like an hstack
source = ColumnDataSource(data=dict(x=x, counts=counts))
plot_ingreso = figure(x_range=FactorRange(*x), plot_height=800, plot_width=1600, title="CORTES DE APELACIONES - INGRESOS CAUSAS PENALES - 2015 AL 2019",
toolbar_location=None, tools="hover", tooltips="INGRESOS C.A. @x: @counts", y_axis_type="linear", y_range=(10000,150000))
plot_ingreso.vbar(x='x', top='counts', width=0.7, source=source, line_color="white",
fill_color=factor_cmap('x', palette=palette, factors=years, start=1, end=2))
plot_ingreso.y_range.start = 0
plot_ingreso.x_range.range_padding = 0
plot_ingreso.xaxis.major_label_orientation = 1
plot_ingreso.xgrid.grid_line_color = None
# GRAFO TERMINO
x = [ (corte.replace("C.A. DE ",""), year) for corte in cortes for year in years ]
counts = sum(zip(data_terminos['2015'], data_terminos['2016'], data_terminos['2017'], data_terminos['2018'], data_terminos['2019']), ()) # like an hstack
source = ColumnDataSource(data=dict(x=x, counts=counts))
plot_termino = figure(x_range=FactorRange(*x), plot_height=800, plot_width=1600, title="CORTES DE APELACIONES - TERMINOS CAUSAS PENALES - 2015 AL 2019",
toolbar_location=None, tools="hover", tooltips="TÉRMINOS C.A. @x: @counts", y_axis_type="linear", y_range=(10000,150000))
plot_termino.vbar(x='x', top='counts', width=0.7, source=source, line_color="white",
fill_color=factor_cmap('x', palette=palette, factors=years, start=1, end=2))
plot_termino.y_range.start = 0
plot_termino.x_range.range_padding = 0
plot_termino.xaxis.major_label_orientation = 1
plot_termino.xgrid.grid_line_color = None
# -
# ## Creacion TABLA
# +
data_ingreso = dict(
corte = df_ingresos_cortes['corte'],
año = df_ingresos_cortes['año_ingreso'],
total = df_ingresos_cortes['total_causas']
)
data_termino = dict(
corte = df_terminos_cortes['corte'],
año = df_terminos_cortes['año_termino'],
total = df_terminos_cortes['total_causas']
)
source_ingreso = ColumnDataSource(data_ingreso)
source_termino = ColumnDataSource(data_termino)
columns_ingreso = [
TableColumn(field="corte", title="Corte"),
TableColumn(field="año", title="Año Ingreso"),
TableColumn(field="total", title="Total Causas"),
]
columns_termino = [
TableColumn(field="corte", title="Corte"),
TableColumn(field="año", title="Año Termino"),
TableColumn(field="total", title="Total Causas"),
]
data_table_ingreso = DataTable(source=source_ingreso, columns=columns_ingreso, width=600, height=2800)
data_table_termino = DataTable(source=source_termino, columns=columns_termino, width=600, height=2800)
# +
# Creo panels
ingreso_panel_grafo = Panel(child=plot_ingreso, title='GRÁFICO INGRESOS CAUSAS')
termino_panel_grafo = Panel(child=plot_termino, title='GRÁFICO TERMINOS CAUSAS')
ingreso_panel_table = Panel(child=data_table_ingreso, title='NUMEROS INGRESOS CAUSAS')
termino_panel_table = Panel(child=data_table_termino, title='NUMEROS TERMINOS CAUSAS')
# Assign the panels to Tabs
tabs = Tabs(tabs=[ingreso_panel_grafo, termino_panel_grafo, ingreso_panel_table, termino_panel_table])
# Show the tabbed layout
show(tabs)
# -
|
reports/01_reports.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Urban morphometrics
#
# Morpohometric assessment measure wide range of characters of urban form to derive a complex description of built-up patterns composed of enclosed tessellation, buildings and street network.
#
# All algorithms used within this notebook are part of `momepy` Python toolkit and can be used from there. We have extracted them from `momepy`, adapted for `dask` and `pygeos` and used in raw form tailored directly to our use case. The algorithms which were enhanced are pushed back to momepy and will be part of `momepy` 0.4.0.
#
# All steps within this notebook are parallelised using `dask`. The first part, which measures aspects of individual elements (does not require to know the context) uses pre-release of `dask-geopandas`. The rest uses `dask` to manage parallel iteration over geo-chunks with single-core algorithms.
#
# Some functions are imported from a `momepy_utils.py` file stored wihtin this directory. Those are either helper functions taken directly from momepy or their enhanced versions, all which will be included in the next release of momepy:
#
# - `get_edge_ratios` is implemented in momepy 0.4.0 as `get_network_ratio`
# - `get_nodes` is included in `get_node_id`
# - remaining functions have been used to refactor existing momepy classes.
#
#
# ## Individual elements
#
# Note: Requires dask-geopandas and current master of geopandas to support dask version.
# +
# # !pip install git+git://github.com/jsignell/dask-geopandas.git
# # !pip install git+git://github.com/geopandas/geopandas.git
# +
import time
import warnings
from time import time
import dask.dataframe as dd
import dask_geopandas as dask_geopandas
import geopandas
import libpysal
import momepy
import networkx as nx
import numpy as np
import pandas as pd
import pygeos
import scipy
from dask.distributed import Client, LocalCluster, as_completed
from libpysal.weights import Queen
from momepy_utils import (
_circle_radius,
centroid_corner,
elongation,
get_corners,
get_edge_ratios,
get_nodes,
solar_orientation_poly,
squareness,
)
# -
# We are using a single machine wihtin this notebook with 14 cores, so we start local dask cluster with 14 workers.
client = Client(LocalCluster(n_workers=14))
client
# `dask-geopandas` is still under development and raises few warnigns at the moment, all which can be ignored.
warnings.filterwarnings('ignore', message='.*initial implementation of Parquet.*')
warnings.filterwarnings('ignore', message='.*Assigning CRS to a GeoDataFrame without a geometry*')
# ### Measuring buildings and enclosed cells
#
# In the first step, we iterate over geo-chunks, merge enclosed tessellation and buildings to a single `geopandas.GeoDataFrame` and convert it to `dask.GeoDataFrame`. The rest of the code is mostly an extraction from momepy source code adapted for dask.
for chunk_id in tqdm(range(103), total=103):
# Load data and merge them together
blg = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/buildings/blg_{chunk_id}.pq")
tess = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/tessellation/tess_{chunk_id}.pq")
blg = blg.rename_geometry('buildings')
tess = tess.rename_geometry('tessellation')
df = tess.merge(blg, on='uID', how='left')
# Convert to dask.GeoDataFrame
ddf = dask_geopandas.from_geopandas(df, npartitions=14)
## Measure morphometric characters
# Building area
ddf['sdbAre'] = ddf.buildings.area
# Building perimeter
ddf['sdbPer'] = ddf.buildings.length
# Courtyard area
exterior_area = ddf.buildings.map_partitions(lambda series: pygeos.area(pygeos.polygons(series.exterior.values.data)), meta='float')
ddf['sdbCoA'] = exterior_area - ddf['sdbAre']
# Circular compactness
hull = ddf.buildings.convex_hull.exterior
radius = hull.apply(lambda g: _circle_radius(list(g.coords)) if g is not None else None, meta='float')
ddf['ssbCCo'] = ddf['sdbAre'] / (np.pi * radius ** 2)
# Corners
ddf['ssbCor'] = ddf.buildings.apply(lambda g: get_corners(g), meta='float')
# Squareness
ddf['ssbSqu'] = ddf.buildings.apply(lambda g: squareness(g), meta='float')
# Equivalent rectangular index
bbox = ddf.buildings.apply(lambda g: g.minimum_rotated_rectangle if g is not None else None, meta=geopandas.GeoSeries())
ddf['ssbERI'] = (ddf['sdbAre'] / bbox.area).pow(1./2) * (bbox.length / ddf['sdbPer'])
# Elongation
ddf['ssbElo'] = bbox.map_partitions(lambda s: elongation(s), meta='float')
# Centroid corner mean distance and deviation
def _centroid_corner(series):
ccd = series.apply(lambda g: centroid_corner(g))
return pd.DataFrame(ccd.to_list(), index=series.index)
ddf[['ssbCCM', 'ssbCCD']] = ddf.buildings.map_partitions(_centroid_corner, meta=pd.DataFrame({0: [0.1], 1: [1.1]}))
# Solar orientation
ddf['stbOri'] = bbox.apply(lambda g: solar_orientation_poly(g), meta='float')
# Tessellation longest axis length
hull = ddf.tessellation.convex_hull.exterior
ddf['sdcLAL'] = hull.apply(lambda g: _circle_radius(list(g.coords)), meta='float') * 2
# Tessellation area
ddf['sdcAre'] = ddf.tessellation.area
# Circular compactness
radius = hull.apply(lambda g: _circle_radius(list(g.coords)), meta='float')
ddf['sscCCo'] = ddf['sdcAre'] / (np.pi * radius ** 2)
# Equivalent rectangular index
bbox = ddf.tessellation.apply(lambda g: g.minimum_rotated_rectangle, meta=geopandas.GeoSeries())
ddf['sscERI'] = (ddf['sdcAre'] / bbox.area).pow(1./2) * (bbox.length / ddf.tessellation.length)
# Solar orientation
ddf['stcOri'] = bbox.apply(lambda g: solar_orientation_poly(g), meta='float')
# Covered area ratio
ddf['sicCAR'] = ddf['sdbAre'] / ddf['sdcAre']
# Building-cell alignment
ddf['stbCeA'] = (ddf['stbOri'] - ddf['stcOri']).abs()
# Compute all characters using dask
df = ddf.compute()
# Save to parquet file
df.to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
client.restart()
time.sleep(5)
# ### Measuring enclosures
#
# All enclosures are loaded as a single dask.GeoDataFrame and measured at once.
# +
# %%time
# Load data
encl = dask_geopandas.read_parquet("../../urbangrammar_samba/spatial_signatures/enclosures/encl_*.pq")
# Area
encl['ldeAre'] = encl.geometry.area
# Perimeter
encl['ldePer'] = encl.geometry.length
# Circular compacntess
hull = encl.geometry.convex_hull.exterior
radius = hull.apply(lambda g: _circle_radius(list(g.coords)) if g is not None else None, meta='float')
encl['lseCCo'] = encl['ldeAre'] / (np.pi * radius ** 2)
# Equivalent rectangular index
bbox = encl.geometry.apply(lambda g: g.minimum_rotated_rectangle if g is not None else None, meta=geopandas.GeoSeries())
encl['lseERI'] = (encl['ldeAre'] / bbox.area).pow(1./2) * (bbox.length / encl['ldePer'])
# Compactness-weighted axis
longest_axis = hull.apply(lambda g: _circle_radius(list(g.coords)), meta='float') * 2
encl['lseCWA'] = longest_axis * ((4 / np.pi) - (16 * encl['ldeAre']) / ((encl['ldePer']) ** 2))
# Solar orientation
encl['lteOri'] = bbox.apply(lambda g: solar_orientation_poly(g), meta='float')
# Compute data and return geopandas.GeoDataFrame
encl_df = encl.compute()
# Weighted number of neighbors
inp, res = encl_df.sindex.query_bulk(encl_df.geometry, predicate='intersects')
indices, counts = np.unique(inp, return_counts=True)
encl_df['neighbors'] = counts - 1
encl_df['lteWNB'] = encl_df['neighbors'] / encl_df['ldePer']
# Load complete enclosed tessellation as a dask.GeoDataFrame
tess = dd.read_parquet("../../urbangrammar_samba/spatial_signatures/tessellation/tess_*.pq")
# Measure weighted cells within enclosure
encl_counts = tess.groupby('enclosureID').count().compute()
merged = encl_df[['enclosureID', 'ldeAre']].merge(encl_counts[['geometry']], how='left', on='enclosureID')
encl_df['lieWCe'] = merged['geometry'] / merged['ldeAre']
# Save data to parquet
encl_df.drop(columns='geometry').to_parquet("../../urbangrammar_samba/spatial_signatures/morphometrics/enclosures.pq")
# -
# We can now close dask client.
client.close()
# ## Generate spatial weights (W)
#
# Subsequent steps will require understanding of the context of each tessellation cell in a form of spatial weights matrices (Queen contiguity and Queen contiguty of inclusive 3rd order). We generate them beforehand and store as `npz` files representing sparse matrix.
#
# Each geo-chunk is loaded together with relevant cross-chunk tessellation cells (to avoid edge effect). We use dask to parallelise the iteration. Number of workers is smaller now to ensure enough memory for each chunk.
workers = 8
client = Client(LocalCluster(n_workers=workers, threads_per_worker=1))
client
# First we have to specify a function doing the processing itself, where the only attribure is the `chunk_id`.
def generate_w(chunk_id):
# load cells of a chunk
cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
# add neighbouring cells from other chunks
cross_chunk_cells = []
for chunk, inds in cross_chunk.loc[chunk_id].indices.iteritems():
add_cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk}.pq").iloc[inds]
cross_chunk_cells.append(add_cells)
df = cells.append(pd.concat(cross_chunk_cells, ignore_index=True), ignore_index=True)
w = libpysal.weights.Queen.from_dataframe(df, geom_col='tessellation')
w3 = momepy.sw_high(k=3, weights=w)
scipy.sparse.save_npz(f"../../urbangrammar_samba/spatial_signatures/weights/w_{chunk_id}.npz", w.sparse)
scipy.sparse.save_npz(f"../../urbangrammar_samba/spatial_signatures/weights/w3_{chunk_id}.npz", w3.sparse)
return f"Chunk {chunk_id} processed sucessfully."
# Then we use dask to iterate over all 103 chunks. The following script sends first 8 chunks to dask together and then submits a new chunk as soon as any of previous finishes (courtesy of <NAME>). That way we process only 8 chunks at once ensuring that we the cluster will not run out of memory.
# %%time
inputs = iter(range(103))
futures = [client.submit(generate_w, next(inputs)) for i in range(workers)]
ac = as_completed(futures)
for finished_future in ac:
# submit new future
try:
new_future = client.submit(generate_w, next(inputs))
ac.add(new_future)
except StopIteration:
pass
print(finished_future.result())
client.close()
# ## Spatial distribution and network analysis
#
# To measure spatial distribution of we use single-core algorithm and parallelise iteration.
workers = 8
client = Client(LocalCluster(n_workers=workers, threads_per_worker=1))
client
# We will need to load street network data from PostGIS datatabase, so we establish a connection which will be used within the loop.
# +
cross_chunk = pd.read_parquet('../../urbangrammar_samba/spatial_signatures/cross-chunk_indices.pq')
chunks = geopandas.read_parquet('../../urbangrammar_samba/spatial_signatures/local_auth_chunks.pq')
user = os.environ.get('DB_USER')
pwd = os.<PASSWORD>('DB_PWD')
host = os.environ.get('DB_HOST')
port = os.environ.get('DB_PORT')
db_connection_url = f"postgres+psycopg2://{user}:{pwd}@{host}:{port}/built_env"
# -
# Within the same function below we measure spatial distribution of elements and network-based characters.
def measure(chunk_id):
# load cells of a chunk
cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
cells['keep'] = True
# add neighbouring cells from other chunks
cross_chunk_cells = []
for chunk, inds in cross_chunk.loc[chunk_id].indices.iteritems():
add_cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk}.pq").iloc[inds]
add_cells['keep'] = False
cross_chunk_cells.append(add_cells)
df = cells.append(pd.concat(cross_chunk_cells, ignore_index=True), ignore_index=True)
# read W
w = libpysal.weights.WSP(scipy.sparse.load_npz(f"../../urbangrammar_samba/spatial_signatures/weights/w_{chunk_id}.npz")).to_W()
# alignment
def alignment(x, orientation='stbOri'):
orientations = df[orientation].iloc[w.neighbors[x]]
return abs(orientations - df[orientation].iloc[x]).mean()
df['mtbAli'] = [alignment(x) for x in range(len(df))]
# mean neighbour distance
def neighbor_distance(x):
geom = df.buildings.iloc[x]
if geom is None:
return np.nan
return df.buildings.iloc[w.neighbors[x]].distance(df.buildings.iloc[x]).mean()
df['mtbNDi'] = [neighbor_distance(x) for x in range(len(df))]
# weighted neighbours
df['mtcWNe'] = pd.Series([w.cardinalities[x] for x in range(len(df))], index=df.index) / df.tessellation.length
# area covered by neighbours
def area_covered(x, area='sdcAre'):
neighbours = [x]
neighbours += w.neighbors[x]
return df[area].iloc[neighbours].sum()
df['mdcAre'] = [area_covered(x) for x in range(len(df))]
# read W3
w3 = libpysal.weights.WSP(scipy.sparse.load_npz(f"../../urbangrammar_samba/spatial_signatures/weights/w3_{chunk_id}.npz")).to_W()
# weighted reached enclosures
def weighted_reached_enclosures(x, area='sdcAre', enclosure_id='enclosureID'):
neighbours = [x]
neighbours += w3.neighbors[x]
vicinity = df[[area, enclosure_id]].iloc[neighbours]
return vicinity[enclosure_id].unique().shape[0] / vicinity[area].sum()
df['ltcWRE'] = [weighted_reached_enclosures(x) for x in range(len(df))]
# mean interbuilding distance
# define adjacency list from lipysal
adj_list = w.to_adjlist(remove_symmetric=False)
adj_list["weight"] = (
df.buildings.iloc[adj_list.focal]
.reset_index(drop=True)
.distance(df.buildings.iloc[adj_list.neighbor].reset_index(drop=True)).values
)
G = nx.from_pandas_edgelist(
adj_list, source="focal", target="neighbor", edge_attr="weight"
)
ibd = []
for i in range(len(df)):
try:
sub = nx.ego_graph(G, i, radius=3)
ibd.append(np.nanmean([x[-1] for x in list(sub.edges.data('weight'))]))
except:
ibd.append(np.nan)
df['ltbIBD'] = ibd
# Reached neighbors and area on 3 topological steps on tessellation
df['ltcRea'] = [w3.cardinalities[i] for i in range(len(df))]
df['ltcAre'] = [df.sdcAre.iloc[w3.neighbors[i]].sum() for i in range(len(df))]
# Save cells to parquet keeping only within-chunk data not the additional neighboring
df[df['keep']].drop(columns=['keep']).to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
# Load street network for an extended chunk area
chunk_area = chunks.geometry.iloc[chunk_id].buffer(5000) # we extend the area by 5km to minimise edge effect
engine = create_engine(db_connection_url)
sql = f"SELECT * FROM openroads_200803_topological WHERE ST_Intersects(geometry, ST_GeomFromText('{chunk_area.wkt}',27700))"
streets = geopandas.read_postgis(sql, engine, geom_col='geometry')
# Street profile (measures width, width deviation and openness)
sp = street_profile(streets, blg)
streets['sdsSPW'] = sp[0]
streets['sdsSWD'] = sp[1]
streets['sdsSPO'] = sp[2]
# Street segment length
streets['sdsLen'] = streets.length
# Street segment linearity
streets['sssLin'] = momepy.Linearity(streets).series
# Convert geopadnas.GeoDataFrame to networkx.Graph for network analysis
G = momepy.gdf_to_nx(streets)
# Node degree
G = momepy.node_degree(G)
# Subgraph analysis (meshedness, proportion of 0, 3 and 4 way intersections, local closeness)
G = momepy.subgraph(
G,
radius=5,
meshedness=True,
cds_length=False,
mode="sum",
degree="degree",
length="mm_len",
mean_node_degree=False,
proportion={0: True, 3: True, 4: True},
cyclomatic=False,
edge_node_ratio=False,
gamma=False,
local_closeness=True,
closeness_weight="mm_len",
verbose=False
)
# Cul-de-sac length
G = momepy.cds_length(G, radius=3, name="ldsCDL", verbose=False)
# Square clustering
G = momepy.clustering(G, name="xcnSCl")
# Mean node distance
G = momepy.mean_node_dist(G, name="mtdMDi", verbose=False)
# Convert networkx.Graph back to GeoDataFrames and W (denoting relationships between nodes)
nodes, edges, sw = momepy.nx_to_gdf(G, spatial_weights=True)
# Generate inclusive higher order weights
edges_w3 = momepy.sw_high(k=3, gdf=edges)
# Mean segment length
edges["ldsMSL"] = momepy.SegmentsLength(edges, spatial_weights=edges_w3, mean=True, verbose=False).series
# Generate inclusive higher order weights
nodes_w5 = momepy.sw_high(k=5, weights=sw)
# Node density
nodes["lddNDe"] = momepy.NodeDensity(nodes, edges, nodes_w5, verbose=False).series
# Weighter node density
nodes["linWID"] = momepy.NodeDensity(nodes, edges, nodes_w5, weighted=True, node_degree="degree", verbose=False).series
# Save to parquets
edges.to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/edges/edges_{chunk_id}.pq")
nodes.to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/nodes/nodes_{chunk_id}.pq")
return f"Chunk {chunk_id} processed sucessfully."
# Again we use dask to iterate over all 103 chunks. The following script sends first 8 chunks to dask together and then submits a new chunk as soon as any of previous finishes. That way we process only 8 chunks at once ensuring that we the cluster will not run out of memory.
inputs = iter(range(103))
futures = [client.submit(measure, next(inputs)) for i in range(workers)]
ac = as_completed(futures)
for finished_future in ac:
# submit new future
try:
new_future = client.submit(measure, next(inputs))
ac.add(new_future)
except StopIteration:
pass
print(finished_future.result())
client.close()
# ## Link elements together
#
# For the further analysis, we need to link data measured on individual elements together. We link cells to edges based on the proportion of overlap (if a cell intersects more than one edge) and nodes based on proximity (with a restriction - node has to be on linked edge). Enclosures are linked based on enclosure ID.
#
# As above, we define a single-core function and use dask to manage parallel iteration.
def link(chunk_id):
s = time()
cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
edges = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/edges/edges_{chunk_id}.pq")
nodes = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/nodes/nodes_{chunk_id}.pq")
cells['edgeID'] = get_edge_ratios(cells, edges)
cells['nodeID'] = get_nodes(cells, nodes, edges, 'nodeID', 'edgeID', 'node_start', 'node_end')
characters = ['sdsSPW', 'sdsSWD', 'sdsSPO', 'sdsLen', 'sssLin', 'ldsMSL']
l = []
for d in cells.edgeID:
l.append((edges.iloc[list(d.keys())][characters].multiply(list(d.values()), axis='rows')).sum(axis=0))
cells[characters] = pd.DataFrame(l, index=cells.index)
cells = cells.merge(nodes.drop(columns=['geometry']), on='nodeID', how='left')
cells = cells.rename({'degree': 'mtdDeg', 'meshedness': 'lcdMes', 'proportion_3': 'linP3W', 'proportion_4': 'linP4W',
'proportion_0': 'linPDE', 'local_closeness': 'lcnClo'}, axis='columns')
cells['edgeID_keys'] = cells.edgeID.apply(lambda d: list(d.keys()))
cells['edgeID_values'] = cells.edgeID.apply(lambda d: list(d.values()))
cells.drop(columns='edgeID').to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
return f"Chunk {chunk_id} processed sucessfully in {time() - s} seconds."
workers = 14
client = Client(LocalCluster(n_workers=workers, threads_per_worker=1))
client
# %%time
inputs = iter(range(103))
futures = [client.submit(link, next(inputs)) for i in range(workers)]
ac = as_completed(futures)
for finished_future in ac:
# submit new future
try:
new_future = client.submit(link, next(inputs))
ac.add(new_future)
except StopIteration:
pass
print(finished_future.result())
client.close()
# Enclosures are linked via simple attribute join and since the operation is does not require any computation, it is done as a simple loop.
enclosures = pd.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/enclosures.pq")
for chunk_id in range(103):
s = time()
cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
cells = cells.merge(enclosures.drop(columns=['neighbors']), on='enclosureID', how='left')
cells.to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
print(f"Chunk {chunk_id} processed sucessfully in {time() - s} seconds.")
# ## Inter-element characters
#
# The remaining morphometric characters are based on a relations between multiple elements. The implementation mirrors the approach above.
workers = 8
client = Client(LocalCluster(n_workers=workers, threads_per_worker=1))
client
def measure(chunk_id):
s = time()
# Load data
cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
edges = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/edges/edges_{chunk_id}.pq")
nodes = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/nodes/nodes_{chunk_id}.pq")
# Street Alignment
edges['orient'] = momepy.Orientation(edges, verbose=False).series
edges['edgeID'] = range(len(edges))
keys = cells.edgeID_values.apply(lambda a: np.argmax(a))
cells['edgeID_primary'] = [inds[i] for inds, i in zip(cells.edgeID_keys, keys)]
cells['stbSAl'] = momepy.StreetAlignment(cells,
edges,
'stbOri',
left_network_id='edgeID_primary',
right_network_id='edgeID').series
# Area Covered by each edge
vals = {x:[] for x in range(len(edges))}
for i, keys in enumerate(cells.edgeID_keys):
for k in keys:
vals[k].append(i)
area_sums = []
for inds in vals.values():
area_sums.append(cells.sdcAre.iloc[inds].sum())
edges['sdsAre'] = area_sums
# Building per meter
bpm = []
for inds, l in zip(vals.values(), edges.sdsLen):
bpm.append(cells.buildings.iloc[inds].notna().sum() / l if len(inds) > 0 else 0)
edges['sisBpM'] = bpm
# Cell area
nodes['sddAre'] = nodes.nodeID.apply(lambda nid: cells[cells.nodeID == nid].sdcAre.sum())
# Area covered by neighboring edges + count of reached cells
edges_W = Queen.from_dataframe(edges)
areas = []
reached_cells = []
for i in range(len(edges)):
neighbors = [i] + edges_W.neighbors[i]
# areas
areas.append(edges.sdsAre.iloc[neighbors].sum())
# reached cells
ids = []
for n in neighbors:
ids += vals[n]
reached_cells.append(len(set(ids)))
edges['misCel'] = reached_cells
edges['mdsAre'] = areas
# Area covered by neighboring (3 steps) edges + count of reached cells
edges_W3 = momepy.sw_high(k=3, weights=edges_W)
areas = []
reached_cells = []
for i in range(len(edges)):
neighbors = [i] + edges_W3.neighbors[i]
# areas
areas.append(edges.sdsAre.iloc[neighbors].sum())
# reached cells
ids = []
for n in neighbors:
ids += vals[n]
reached_cells.append(len(set(ids)))
edges['lisCel'] = reached_cells
edges['ldsAre'] = areas
# Link together
e_to_link = ['sdsAre', 'sisBpM', 'misCel', 'mdsAre', 'lisCel', 'ldsAre']
n_to_link = 'sddAre'
cells = cells.merge(nodes[['nodeID', 'sddAre']], on='nodeID', how='left')
l = []
for keys, values in zip(cells.edgeID_keys, cells.edgeID_values):
l.append((edges.iloc[keys][e_to_link].multiply(values, axis='rows')).sum(axis=0)) # weighted by the proportion
cells[e_to_link] = pd.DataFrame(l, index=cells.index)
# Reached neighbors and area on 3 topological steps on tessellation
cells['keep'] = True
# add neighbouring cells from other chunks
cross_chunk_cells = []
for chunk, inds in cross_chunk.loc[chunk_id].indices.iteritems():
add_cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk}.pq").iloc[inds]
add_cells['keep'] = False
cross_chunk_cells.append(add_cells)
df = cells.append(pd.concat(cross_chunk_cells, ignore_index=True), ignore_index=True)
w3 = libpysal.weights.WSP(scipy.sparse.load_npz(f"../../urbangrammar_samba/spatial_signatures/weights/w3_{chunk_id}.npz")).to_W()
# Reached cells in 3 topological steps
df['ltcRea'] = [w3.cardinalities[i] for i in range(len(df))]
# Reached area in 3 topological steps
df['ltcAre'] = [df.sdcAre.iloc[w3.neighbors[i]].sum() for i in range(len(df))]
# Save
df[df['keep']].drop(columns=['keep']).to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
return f"Chunk {chunk_id} processed sucessfully in {time() - s} seconds."
# %%time
inputs = iter(range(103))
futures = [client.submit(measure, next(inputs)) for i in range(workers)]
ac = as_completed(futures)
for finished_future in ac:
# submit new future
try:
new_future = client.submit(measure, next(inputs))
ac.add(new_future)
except StopIteration:
pass
print(finished_future.result())
client.close()
# At this point, all primary morphometric characters are measured and stored in a chunked parquet.
#
# ## Convolution
#
# Morphometric variables are an input of cluster analysis, which should result in delineation of spatial signatures. However, primary morphometric characters can't be used directly. We have to understand them in context. For that reason, we introduce a convolution step. Each of the characters above will be expressed as first, second (median) and third quartile within 3 topological steps on enclosed tessellation. Resulting convolutional data will be then used as an input of cluster analysis.
def convolute(chunk_id):
s = time()
cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk_id}.pq")
cells['keep'] = True
# add neighbouring cells from other chunks
cross_chunk_cells = []
for chunk, inds in cross_chunk.loc[chunk_id].indices.iteritems():
add_cells = geopandas.read_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/cells/cells_{chunk}.pq").iloc[inds]
add_cells['keep'] = False
cross_chunk_cells.append(add_cells)
df = cells.append(pd.concat(cross_chunk_cells, ignore_index=True), ignore_index=True)
# read W
w = libpysal.weights.WSP(scipy.sparse.load_npz(f"../../urbangrammar_samba/spatial_signatures/weights/w3_{chunk_id}.npz")).to_W()
# list characters
characters = [x for x in df.columns if len(x) == 6]
# prepare dictionary to store results
convolutions = {}
for c in characters:
convolutions[c] = []
# measure convolutions
for i in range(len(df)):
neighbours = [i]
neighbours += w.neighbors[i]
vicinity = df.iloc[neighbours]
for c in characters:
convolutions[c].append(np.nanpercentile(vicinity[c], [25, 50, 75], interpolation='midpoint'))
# save convolutions to parquet file
conv = pd.DataFrame(convolutions)
exploded = pd.concat([pd.DataFrame(conv[c].to_list(), columns=[c + '_q1', c + '_q2',c + '_q3']) for c in characters], axis=1)
exploded[df.keep].to_parquet(f"../../urbangrammar_samba/spatial_signatures/morphometrics/convolutions/conv_{chunk_id}.pq")
return f"Chunk {chunk_id} processed sucessfully in {time() - s} seconds."
workers = 8
client = Client(LocalCluster(n_workers=workers, threads_per_worker=1))
client
# %%time
inputs = iter(range(103))
futures = [client.submit(convolute, next(inputs)) for i in range(workers)]
ac = as_completed(futures)
for finished_future in ac:
# submit new future
try:
new_future = client.submit(convolute, next(inputs))
ac.add(new_future)
except StopIteration:
pass
print(finished_future.result())
|
docs/_sources/measuring/morphometrics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PAN - Atribuição Autoral - 2018 - Modelo Char
#
#
# %matplotlib inline
# +
#python basic libs
import os;
from os.path import join as pathjoin;
import zipfile;
import re;
import random;
import json;
from collections import defaultdict, Counter;
from pprint import pprint
from time import time
# +
#data analysis libs
import numpy as np;
import pandas as pd;
from pandas.plotting import scatter_matrix;
import matplotlib.pyplot as plt;
import seaborn as sns;
# +
#machine learning libs
#feature extraction
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn import feature_selection;
#preprocessing and transformation
from sklearn import preprocessing;
from sklearn.preprocessing import MaxAbsScaler;
from sklearn.decomposition import PCA;
from sklearn.base import BaseEstimator, ClassifierMixin
#classifiers
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
#
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
#model valuation
from sklearn.model_selection import train_test_split;
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score;
# -
# NLP Libs
# +
# import spacy;
# import nltk;
# import gensim
# import logging
# from gensim.models import KeyedVectors
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.CRITICAL)
# -
from IPython.display import Markdown, display, HTML
# libraries configurations
import warnings
from sklearn.exceptions import UndefinedMetricWarning
warnings.simplefilter(action='ignore', category=UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
np.set_printoptions(precision=4)
pd.options.display.float_format = '{:,.4f}'.format
sns.set(color_codes=True);
# +
import platform;
import sklearn;
import scipy;
Markdown(
"|%-15s|%-65s|\n"%("PACK","VERSION") +
"|%-15s|%-65s|\n"%('-'*15,'-'*65) +
('\n'.join(
"|%-15s|%-45s| " % (pack, version.strip())
for pack, version in
zip(['SO','NumPy','SciPy','Scikit-Learn','Seaborn'],
[platform.platform(), np.__version__, scipy.__version__,
sklearn.__version__, sns.__version__])
)))
# -
import seaborn as sns;
sns.set(color_codes=True);
import platform; print(platform.platform())
print("NumPy", np.__version__)
import scipy; print("SciPy", scipy.__version__)
import sklearn; print("Scikit-Learn", sklearn.__version__)
print("seaborn", sns.__version__)
np.set_printoptions(precision=4)
pd.options.display.float_format = '{:,.4f}'.format
from sklearnExtensions import DenseTransformer
# ### paths configuration
# +
baseDir = '../';
corpusTraining = 'pan18-cross-domain-authorship-attribution-training-dataset-2017-12-02';
corpusEvaluation = 'pan18-cross-domain-authorship-attribution-test-dataset2-2018-04-20';
corpusEach1 = 'Lyrics_AA_PT';
currentCorpus = corpusTraining;
inputDir= pathjoin(baseDir,currentCorpus);
outputDir= pathjoin(baseDir,'out');
if not os.path.exists(outputDir):
os.mkdir(outputDir);
# -
# ## loading the dataset
import pan
problems = pan.readCollectionsOfProblemsFromZip(inputDir + '.zip');
problems[0]['problem']
pd.DataFrame(problems)[['problem','language','authorCount','candidates','unknown']]
# ### examinando o parametro min_df isoladamente
def runML(problem):
print ("\nProblem: %s, language: %s, " %(problem['problem'],problem['language']), end=' ')
train_docs, train_labels, _ = zip(*problem['candidates'])
test_docs, test_labels, test_filename = zip(*problem['unknown'])
pipeline = Pipeline([
('vect', TfidfVectorizer(analyzer='char',
min_df=0.05,
max_df=1.0,
norm='l1',
ngram_range=(3,5),
sublinear_tf=True,
smooth_idf=True,
lowercase =False)),
('dense', DenseTransformer()),
('scaler', MaxAbsScaler()),
('transf', PCA(0.999)),
('clf', LogisticRegression(random_state=0,multi_class='multinomial', solver='newton-cg')),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__min_df':(2,0.01,0.05,0.1)
}
grid_search = GridSearchCV(pipeline,
parameters,
cv=3,
iid=False,
verbose=False,
scoring='f1_macro'
)
t0 = time()
grid_search.fit(train_docs, train_labels)
print("GridSearch: %0.3fs" % (time() - t0), end=' ')
print("Best score: %0.3f" % grid_search.best_score_, end=' ')
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
train_pred=grid_search.predict(train_docs);
test_pred=grid_search.predict(test_docs);
f1 = f1_score(test_labels,test_pred, average='macro')
precision = precision_score(test_labels,test_pred, average='macro')
recall = recall_score(test_labels,test_pred, average='macro')
accuracy = accuracy_score(test_labels,test_pred)
return {
'problem' : problem['problem'],
"language" : problem['language'],
'AuthorCount' : len(set(train_labels)),
"train_doc_size": len(train_docs),
"train_caract_per_doc": sum([len(l) for l in train_docs])/len(train_docs),
"test_doc_size" : len(test_docs),
"test_caract_per_doc": sum([len(l) for l in test_docs])/len(test_docs),
'f1' : round(f1,3),
'precision': round(precision,3),
'recall' : round(recall,3),
'accuracy' : round(accuracy,3),
}, grid_search.cv_results_, best_parameters;
result = [];
cv_result = [];
best_parameters = [];
for problem in problems:
with warnings.catch_warnings():
warnings.filterwarnings("ignore");
r, c, b = runML(problem);
result.append(r);
cv_result.append(c);
b['problem'] = problem['problem'];
best_parameters.append(b);
# +
for c,p in zip(cv_result, problems):
c['problem'] = p['problem']
df = pd.concat([
pd.DataFrame(c) for c in cv_result
])
df = df[df.rank_test_score == 1]
df = df[['problem', 'mean_test_score','std_test_score','param_vect__min_df']]
df.sort_values(['problem','mean_test_score','param_vect__min_df'])
# -
# ### analisando os demais parametros
def runML(problem):
print ("\nProblem: %s, language: %s, " %(problem['problem'],problem['language']), end=' ')
train_docs, train_labels, _ = zip(*problem['candidates'])
test_docs, test_labels, test_filename = zip(*problem['unknown'])
pipeline = Pipeline([
('vect', TfidfVectorizer(analyzer='char',
min_df=0.01,
max_df=1.0,
norm='l1',
lowercase =False)),
('scaler', MaxAbsScaler()),
('dense', DenseTransformer()),
('transf', PCA()),
('clf', LogisticRegression(random_state=0,multi_class='multinomial', solver='newton-cg')),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__ngram_range' :((2,3),(2,4),(2,5)),
'vect__use_idf' :(True, False),
'vect__sublinear_tf':(True, False),
'vect__smooth_idf' :(True, False),
'vect__norm':('l1','l2'),
'transf__n_components':(0.95,0.999),
}
grid_search = GridSearchCV(pipeline,
parameters,
cv=3,
iid=False,
n_jobs=-1,
verbose=False,
scoring='f1_macro')
t0 = time()
grid_search.fit(train_docs, train_labels)
print("Gridsearh %0.3fs" % (time() - t0), end=' ')
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
print("'"+problem['language']+"':{")
for param_name in sorted(parameters.keys()):
print("\t'%s': %r," % (param_name, best_parameters[param_name]))
print("},")
train_pred=grid_search.predict(train_docs);
test_pred=grid_search.predict(test_docs);
train_pred=grid_search.predict(train_docs);
test_pred=grid_search.predict(test_docs);
f1 = f1_score (test_labels,test_pred, average='macro');
precision = precision_score(test_labels,test_pred, average='macro');
recall = recall_score (test_labels,test_pred, average='macro');
accuracy = accuracy_score (test_labels,test_pred);
def mean(a):
return sum(a)/len(a);
return {
'problem' : problem['problem'],
"language" : problem['language'],
'authorCount' : len(set(train_labels)),
"train_doc_size": len(train_docs),
"train_doc_per_author": mean(Counter(train_labels).values()) ,
"train_caract_per_doc": sum([len(l) for l in train_docs])/len(train_docs),
"test_doc_size" : len(test_docs),
"test_doc_per_author": mean(Counter(test_labels).values()) ,
"test_caract_per_doc": sum([len(l) for l in test_docs])/len(test_docs),
'f1' : round(f1,3),
'precision': round(precision,3),
'recall' : round(recall,3),
'accuracy' : round(accuracy,3),
}, grid_search.cv_results_,best_parameters, grid_search.best_estimator_;
result = [];
cv_result = [];
best_parameters = [];
estimators = [];
for problem in problems:
with warnings.catch_warnings():
warnings.filterwarnings("ignore");
r, c, b, e = runML(problem);
result.append(r);
cv_result.append(c);
estimators.append(e);
b['problem'] = problem['problem'];
best_parameters.append(b);
df=pd.DataFrame(result)[['problem',
"language",
'authorCount',
"train_doc_size",
"test_doc_size",
'f1','precision','recall' ,'accuracy']]
df
df[['f1']].mean()
languages={
'en':'inglesa',
'sp':'espanhola',
'it':'italiana',
'pl':'polonesa',
'fr':'francesa'
}
# +
for c,p in zip(cv_result, problems):
c['problem'] = p['problem']
c['authorCount'] = p ['authorCount']
c['language'] = p ['language']
dfCV = pd.concat([
pd.DataFrame(c) for c in cv_result
])
params = {
'param_vect__ngram_range':'ngram_range',
'param_vect__sublinear_tf':'sublinear_tf',
'param_vect__use_idf':'use_idf',
'param_vect__smooth_idf':'smooth_idf',
'param_vect__norm':'norm',
'param_transf__n_components':'n_components'}
dfCV = dfCV[['problem','language', 'authorCount','rank_test_score', 'mean_test_score','std_test_score'] + list(params.keys())].rename(columns=params)
dfCV.problem = dfCV.problem.apply(lambda x: re.sub(r'\D','',x))
dfCV.n_components = dfCV.n_components.apply(lambda x: round(x*100,1) if x is not None else 100)
dfCV.sort_values(['problem','mean_test_score'],ascending=[True,False], inplace=True)
dfCV[dfCV.rank_test_score == 1]
# -
# ## Saving the model
dfCV.to_csv('PANAA2018_CHAR.csv', index=False,compression='zip')
dfCV = pd.read_csv('PANAA2018_CHAR.csv', na_values='',compression='zip')
import pickle;
with open("PAN_AA_2018_CHAR.pkl","wb") as f:
pickle.dump(estimators,f)
def pandas_df_to_markdown_table(df):
from IPython.display import Markdown, display
fmt = ['---' for i in range(len(df.columns))]
df_fmt = pd.DataFrame([fmt], columns=df.columns)
df_formatted = pd.concat([df_fmt, df])
display(Markdown(df_formatted.to_csv(sep="|", index=False)))
# +
def explainEstimator(est, top=10):
#idf = est.named_steps['vect'].idf_;
vect = {i:v for v,i in est.named_steps['vect'].vocabulary_.items()};
ngrams = {i:len(v) for v,i in est.named_steps['vect'].vocabulary_.items()};
print(est.named_steps['vect'].ngram_range)
pca = est.named_steps['transf'].components_;
clf = est.named_steps['clf'].coef_;
classes = est.named_steps['clf'].classes_;
relevancy = clf.dot(pca);
relevancy = relevancy / np.abs(relevancy).max(axis=1).reshape(-1,1)
#for v,c in zip(['pca','clf','rel'],[pca,clf,relevancy]):
# print(v,type(c),c.shape);
nrels =[];
bestFeatures = [];
for _class, rel in zip(classes,relevancy):
order = np.argsort(rel)
order = np.hstack([order[0:top],order[-top:]]);
bestFeatures.append([vect[i] for i in order]);
nrel = [np.sum([abs(r) for i,r in enumerate(rel) if ngrams[i]==n]) for n in sorted(list(set(ngrams.values())))]
nrels.append(nrel)
nrels = np.array(nrels);
nrels = nrels /nrels.sum(axis=1).reshape(-1,1);
nrels = np.round(nrels*100,2);
pandas_df_to_markdown_table(
pd.DataFrame(nrels,index=classes,columns=sorted(list(set(ngrams.values())))).T.reset_index()
)
pandas_df_to_markdown_table(
pd.DataFrame(np.array(bestFeatures).T, columns=classes).T.reset_index()
)
explainEstimator(estimators[1])
# -
# ## understanding the model with reports
# Podemos ver que para um mesmo problema mais de uma configuração é possível
with pd.option_context('display.precision', 4):
html = (dfCV.pivot_table(
index=['problem','language'],
columns=['ngram_range','n_components','sublinear_tf','use_idf','smooth_idf','norm'],
values='mean_test_score',aggfunc='mean'
).T.style.background_gradient(axis=0, cmap=plt.cm.Blues))
html
# +
pd.options.display.precision = 3
print(u"\\begin{table}[h]\n\\centering\n\\caption{Medida F1 para os parâmetros }")
print(re.sub(r'[ ]{2,}',' ',dfCV.pivot_table(
index=['problem','language'],
columns=['ngram_range','n_components','sublinear_tf','smooth_idf','norm'],
values='mean_test_score'
).to_latex()))
print ("\label{tab:modelocaracter}")
print(r"\end{table}")
# -
dfCV.ngram_range[0]
# +
d = dfCV.copy()
d = d.rename(columns={'language':u'Língua', 'sublinear_tf':'TF Sublinear'})
d = d [ d.norm.isna() == False]
d.problem = d.apply(lambda x: x[u'Língua'] +" "+ str(x[u'problem']), axis=1)
d.ngram_range = d.ngram_range.apply(lambda x: int(re.sub(r'(.*,)|\D',r'',x)))
#d.ngram_range = d.apply(lambda x: str(x[u'ngram_range'][0]) +" "+ str(x[u'ngram_range'][1]), axis=1)
d.std_test_score =d.std_test_score / d.std_test_score.quantile(0.95) *500;
d.std_test_score +=1;
d.std_test_score = d.std_test_score.astype(np.int64)
g = sns.FacetGrid(d, col='Língua', hue='TF Sublinear', row="norm", height=3,palette="Set1")
g.map(plt.scatter, "ngram_range", "mean_test_score",alpha=0.5, s=d.std_test_score.values/2).add_legend();
#sns.pairplot(d, hue="TF Sublinear", vars=["autorNumber", "mean_test_score"])
# -
g = sns.FacetGrid(d, row='authorCount', hue='TF Sublinear', col=u"Língua", height=3,palette="Set1")
g.map(plt.scatter, "ngram_range", "mean_test_score", alpha=0.5, s=d.std_test_score.values/2).add_legend();
sns.distplot(dfCV.std_test_score, bins=50);
import statsmodels.api as sm
# +
d = dfCV[dfCV.smooth_idf == False]
d = d[['mean_test_score','problem', 'language','sublinear_tf','norm','ngram_range','n_components','use_idf']].copy();
d.sublinear_tf=d.sublinear_tf.apply(lambda x: 1 if x else 0)
d.n_components=d.n_components.apply(lambda x: 0 if x == 95 else 1)
d.use_idf =d.use_idf.apply(lambda x: 1 if x else 0)
d.norm =d.norm.apply(lambda x: 1 if x == 'l1' else 0)
d.norm.fillna(value='None', inplace=True);
d.ngram_range = d.ngram_range.apply(lambda x: int(re.sub(r'(.*,)|\D',r'',x))).astype(np.uint8)
d.drop(columns=['problem'], inplace=True)
d['intercept'] = 1;
#d.mean_test_score = np.log(d.mean_test_score/(1-d.mean_test_score))
d=pd.get_dummies(d, columns=['language'])
# -
d.describe()
mod = sm.OLS( d.iloc[:,0], d.iloc[:,1:])
res = mod.fit()
res.summary()
sns.distplot(res.predict()-d.iloc[:,0].values, bins=25);
sns.jointplot(x='F1',y='F1-estimated',data=pd.DataFrame({'F1':d.iloc[:,0].values, 'F1-estimated':res.predict()}));
|
2019/PAN_AA_2018-char.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import bayes_mvs as bayesest
import os
from szsimulator import Szsimulator
# %matplotlib inline
# -
mean_size = 3 # micron
doubling_time = 18 #min
tmax = 180 #min
sample_time = 2 #min
div_steps = 10
ncells = 1000
gr = np.log(2)/doubling_time
kd = div_steps*gr/(mean_size)
# +
#EXAMPLE USING A GILLESPIE ALGORITHM.
ncells = 2000
sampling_time = sample_time
rprom = 10 # RNA mean concentration
pprom = 1000 # prot mean concentration
gammar = 5*gr # RNA Active degradation rate
kr = rprom*(gr+gammar) # RNA transcription rate
kp = pprom*gr/rprom # Protein translation rate
pop = np.zeros([ncells,3])
indexes = np.int(tmax/sampling_time)
rarray = np.zeros([ncells,indexes])
parray = np.zeros([ncells,indexes])
tarray = np.zeros([indexes])
szarray = np.zeros([ncells,indexes])
cellindex = 0
indexref = 0
for cell in pop:
if ncells > 100:
if cellindex/ncells > indexref:
print(str(np.int(100*cellindex/ncells))+"%")
indexref += 0.1
#Initialize the simulator
sim = Szsimulator(tmax = tmax, sample_time = sample_time, ncells=1, gr = gr, k = kd, steps = div_steps)
#_______________
#Example of a Gillepie simulation
cell[0] = mean_size #Initial size
cell[1] = mean_size*rprom #Initial RNA number
cell[2] = mean_size*pprom #Initial Protein number
t=0
nextt = 0
index = 0
ndiv = 0
while t<tmax: #iterating over time
nr = cell[1]
nprot = cell[2]
sz = cell[0]
reactions=[[0,1,0],[0,-1,0],[0,0,1]] #Reactions (RNA creation, RNA active degradation, Protein creation)
Farray = [kr*sz, gammar*nr, kp*nr] # Reaction rates
Ftot=np.sum(Farray) #suming rates
Fnorm=np.array(Farray)/Ftot # array of relative probabilities
ran1=np.random.rand() # Random number
tau=-np.log(ran1)/Ftot # Gillespie time
ran2=np.random.rand() # Random number
temp = 0
for m in range(len(Farray)):
temp+=Fnorm[m]
if ran2<temp:
cell += np.array(reactions)[m] #checking where the random number is
break
t += tau # Adding time step
sim.simulate(tmax=tau,export = False) #Simulate size dynamics for that given time
cell[0] = sim.get_sz(0) #Taking he cell size after that simulation
if sim.get_ndiv(0) > ndiv: #Check if cell got divided
cell[1] = np.random.binomial(nr,0.5) # RNA segregated binomially
cell[2] = np.random.binomial(nprot,0.5) # Protein segregated binomially
ndiv += 1 # New number of divisions
nr = cell[1] #Refreshing RNA number
nprot = cell[2] #Refreshing Protein number
sz = cell[0] #Refreshing size number
if t > nextt and index<len(tarray): #storing data
rarray[cellindex,index] = nr/sz # RNA concentration
parray[cellindex,index] = nprot/sz # Protein concentration
szarray[cellindex,index] = sz # Cell size
tarray[index] = t # Time
index += 1
nextt += sampling_time
cellindex += 1
# +
data=pd.DataFrame(np.transpose(np.array(szarray)))
ind=0
newcol=[]
for name in data.columns:
newcol.append("mom"+str(ind))
ind+=1
data.columns=newcol
mnszarray=[]
cvszarray=[]
errcv2sz=[]
errmnsz=[]
for m in range(len(data)):
szs=data.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95)
mnszarray.append(mean_cntr[0])
errmnsz.append(mean_cntr[1][1]-mean_cntr[0])
cvszarray.append(var_cntr[0]/mean_cntr[0]**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2sz.append(errv)
data['time'] = tarray
data['Mean_sz'] = mnszarray
data['Error_mean'] = errmnsz
data['sz_CV2'] = cvszarray
data['Error_CV2'] = errcv2sz
if not os.path.exists('./data/gillespie'):
os.makedirs('./data/gillespie')
data.to_csv("./data/gillespie/szsim.csv")
# +
tmax=9*doubling_time
dt=0.0001*doubling_time
lamb=1
a=gr
nsteps=div_steps
k=kd
v0=mean_size
#psz1=[]
ndivs=10
t=0
bigdeltat=0.1
steps=int(np.floor(tmax/dt))
u=np.zeros([ndivs,nsteps])#(DIVS,STEPS)
u[0]=np.zeros(nsteps)
u[0][0]=1#P_00
allmeandivs4=[]#average divisions along the time
allvardiv4=[] # variace of pn along the time
allmeansz4=[]
allvarsz4=[]
time4=[]#time array
yenvol=[]
xenvol=[]
start=0
count=int(np.floor(tmax/(dt*1000)))-1
count2=0
for l in range(steps):
utemp=u
for n in range(len(utemp)):#n=divs,
for m in range(len(utemp[n])):#m=steps
if (m==0):#m=steps
if(n==0):#n=divs
dun=-k*v0**lamb*np.exp(lamb*a*t)*(utemp[0][0])
u[n][m]+=dun*dt
else:
arg=lamb*(a*t-n*np.log(2))
dun=k*v0**lamb*np.exp(arg)*((2**lamb)*utemp[n-1][len(utemp[n])-1]-utemp[n][0])
u[n][m]+=dun*dt
elif(m==len(utemp[n])-1):
if(n==len(utemp)-1):
arg=lamb*(a*t-n*np.log(2))
dun=k*v0**lamb*np.exp(arg)*(utemp[n][len(utemp[n])-2])
u[n][m]+=dun*dt
else:
arg=lamb*(a*t-n*np.log(2))
dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])
u[n][m]+=dun*dt
else:
arg=lamb*(a*t-n*np.log(2))
dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])
u[n][m]+=dun*dt
t+=dt
count=count+1
if count==int(np.floor(tmax/(dt*1000))):
time4.append(t/doubling_time)
mean=0
for n in range(len(utemp)):
pnval=np.sum(u[n])
mean+=n*pnval
allmeandivs4.append(mean/mean_size)
var=0
for n in range(len(utemp)):#divs
pnval=np.sum(u[n])
var+=(n-mean)**2*pnval
allvardiv4.append(np.sqrt(var))
pn=np.zeros(ndivs)
sizen=np.zeros(ndivs)
meansz=0
for ll in range(len(utemp)):
pnltemp=np.sum(u[ll])#prob of n divs
pn[ll]=pnltemp#
sizen[ll]=np.exp(a*t)/2**ll#
meansz+=pnltemp*v0*np.exp(a*t)/2**ll
allmeansz4.append(meansz)
varsz=0
for ll in range(len(utemp)):
pnltemp=np.sum(u[ll])
varsz+=(v0*np.exp(a*t)/2**ll-meansz)**2*pnltemp
allvarsz4.append(varsz)
count=0
count2+=1
if(count2==100):
print(str(int(100*t/tmax))+"%")
count2=0
# -
np.sum(u)
# +
fig, ax = plt.subplots(1,2, figsize=(12,4))
#ax[0].plot(tarray,mnszarray)
ax[0].fill_between(np.array(tarray)/doubling_time,np.array(mnszarray)-np.array(errmnsz),np.array(mnszarray)+np.array(errmnsz),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label='SSA')
#ax[1].plot(tarray,cvszarray)
ax[1].fill_between(np.array(tarray)/doubling_time,np.array(cvszarray)-np.array(errcv2sz),np.array(cvszarray)+np.array(errcv2sz),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
ax[0].plot(np.array(time4),np.array(allmeansz4),lw=2,c='#006599',label="Numerical")
ax[1].plot(np.array(time4),np.array(allvarsz4)/np.array(allmeansz4)**2,lw=2,c='#006599')
ax[0].set_ylabel("$s$ ($\mu$m)",size=20)
ax[1].set_ylabel("$C_V^2(s)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
ax[0].set_ylim([1,1.2*np.max(mnszarray)])
ax[1].set_ylim([0,1.2*np.max(cvszarray)])
for l in [0,1]:
ax[l].set_xlim([0,tmax/doubling_time])
taqui=np.arange(0,(tmax+1)/doubling_time,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.3)
taqui=np.arange(0,0.15,step=0.02)
ax[1].set_yticks(np.array(taqui))
ax[0].legend(fontsize=15)
if not os.path.exists('./figures/gillespie'):
os.makedirs('./figures/gillespie')
plt.savefig('./figures/gillespie/size_statistics.svg',bbox_inches='tight')
plt.savefig('./figures/gillespie/size_statistics.png',bbox_inches='tight')
# +
data=pd.DataFrame(np.transpose(np.array(rarray)))
ind=0
newcol=[]
for name in data.columns:
newcol.append("mom"+str(ind))
ind+=1
data.columns=newcol
mnrnaarray=[]
cvrnaarray=[]
errcv2rna=[]
errmnrna=[]
for m in range(len(data)):
rnas=data.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(rnas,alpha=0.95)
mnrnaarray.append(mean_cntr[0])
errmnrna.append(mean_cntr[1][1]-mean_cntr[0])
cvrnaarray.append(var_cntr[0]/mean_cntr[0]**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2rna.append(errv)
data['time'] = tarray
data['Mean_RNA'] = mnrnaarray
data['Error_mean'] = errmnrna
data['RNA_CV2'] = cvrnaarray
data['Error_CV2'] = errcv2rna
if not os.path.exists('./data/gillespie'):
os.makedirs('./data/gillespie')
data.to_csv("./data/gillespie/RNAsim.csv")
# +
fig, ax = plt.subplots(1,2, figsize=(12,4))
ax[0].plot(np.array(tarray)/doubling_time,mnrnaarray,c="#BD0025")
ax[0].fill_between(np.array(tarray)/doubling_time,np.array(mnrnaarray)-np.array(errmnrna),np.array(mnrnaarray)+np.array(errmnrna),
alpha=1, edgecolor='#FF3333', facecolor='#FF3333',linewidth=0)
ax[1].plot(np.array(tarray)/doubling_time,cvrnaarray,c="#BD0025")
ax[1].fill_between(np.array(tarray)/doubling_time,np.array(cvrnaarray)-np.array(errcv2rna),np.array(cvrnaarray)+np.array(errcv2rna),
alpha=1, edgecolor='#FF3333', facecolor='#FF3333',linewidth=0)
ax[0].set_ylabel("RNA",size=20)
ax[1].set_ylabel("$C_V^2(r)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
ax[0].set_ylim([0,1.2*np.max(mnrnaarray)])
ax[1].set_ylim([0,1.2*np.max(cvrnaarray)])
for l in [0,1]:
ax[l].set_xlim([0,tmax])
taqui=np.arange(0,tmax+1,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.3)
taqui=np.arange(0,1.2*np.max(cvrnaarray),step=np.round(.2*np.max(cvrnaarray),2))
ax[1].set_yticks(np.array(taqui))
if not os.path.exists('./figures/gillespie'):
os.makedirs('./figures/gillespie')
plt.savefig('./figures/gillespie/rna_statistics.svg',bbox_inches='tight')
plt.savefig('./figures/gillespie/rna_statistics.png',bbox_inches='tight')
# +
data=pd.DataFrame(np.transpose(np.array(parray)))
ind=0
newcol=[]
for name in data.columns:
newcol.append("mom"+str(ind))
ind+=1
data.columns=newcol
mnprotarray=[]
cvprotarray=[]
errcv2prot=[]
errmnprot=[]
for m in range(len(data)):
rnas=data.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(rnas,alpha=0.95)
mnprotarray.append(mean_cntr[0])
errmnprot.append(mean_cntr[1][1]-mean_cntr[0])
cvprotarray.append(var_cntr[0]/mean_cntr[0]**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2prot.append(errv)
data['time'] = tarray
data['Mean_prot'] = mnrnaarray
data['Error_mean'] = errmnrna
data['prot_CV2'] = cvrnaarray
data['Error_CV2'] = errcv2rna
if not os.path.exists('./data/gillespie'):
os.makedirs('./data/gillespie')
data.to_csv("./data/gillespie/protsim.csv")
# +
fig, ax = plt.subplots(1,2, figsize=(12,4))
ax[0].plot(np.array(tarray)/doubling_time,mnprotarray,c="#3BB000")
ax[0].fill_between(np.array(tarray)/doubling_time,np.array(mnprotarray)-np.array(errmnprot),np.array(mnprotarray)+np.array(errmnprot),
alpha=1, edgecolor='#4BE000', facecolor='#4BE000',linewidth=0)
ax[1].plot(np.array(tarray)/doubling_time,cvprotarray,c="#3BB000")
ax[1].fill_between(np.array(tarray)/doubling_time,np.array(cvprotarray)-np.array(errcv2prot),np.array(cvprotarray)+np.array(errcv2prot),
alpha=1, edgecolor='#4BE000', facecolor='#4BE000',linewidth=0)
ax[0].set_ylabel("Protein",size=20)
ax[1].set_ylabel("$C_V^2(p)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
ax[0].set_ylim([0,1.2*np.max(mnprotarray)])
ax[1].set_ylim([0,1.2*np.max(cvprotarray)])
for l in [0,1]:
ax[l].set_xlim([0,7])
taqui=np.arange(0,8,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.5)
taqui=np.arange(0,1.2*np.max(cvprotarray),step=np.round(.2*np.max(cvprotarray),4))
ax[1].set_yticks(np.array(taqui))
if not os.path.exists('./figures'):
os.makedirs('./figures')
if not os.path.exists('./figures/gillespie'):
os.makedirs('./figures/gillespie')
plt.savefig('./figures/gillespie/prot_statistics.svg',bbox_inches='tight')
plt.savefig('./figures/gillespie/prot_statistics.png',bbox_inches='tight')
# -
|
.ipynb_checkpoints/Gene_expression_Gillespie-checkpoint.ipynb
|
# # The framework and why do we need it
#
# In the previous notebooks, we introduce some concepts regarding the
# evaluation of predictive models. While this section could be slightly
# redundant, we intend to go into details into the cross-validation framework.
#
# Before we dive in, let's linger on the reasons for always having training and
# testing sets. Let's first look at the limitation of using a dataset without
# keeping any samples out.
#
# To illustrate the different concepts, we will use the California housing
# dataset.
# +
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing(as_frame=True)
data, target = housing.data, housing.target
# -
# In this dataset, the aim is to predict the median value of houses in an area
# in California. The features collected are based on general real-estate and
# geographical information.
#
# Therefore, the task to solve is different from the one shown in the previous
# notebook. The target to be predicted is a continuous variable and not anymore
# discrete. This task is called regression.
#
# This, we will use a predictive model specific to regression and not to
# classification.
print(housing.DESCR)
data.head()
# To simplify future visualization, let's transform the prices from the
# 100 (k\\$) range to the thousand dollars (k\\$) range.
target *= 100
target.head()
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.</p>
# </div>
# ## Training error vs testing error
#
# To solve this regression task, we will use a decision tree regressor.
# +
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(data, target)
# -
# After training the regressor, we would like to know its potential generalization
# performance once deployed in production. For this purpose, we use the mean
# absolute error, which gives us an error in the native unit, i.e. k\\$.
# +
from sklearn.metrics import mean_absolute_error
target_predicted = regressor.predict(data)
score = mean_absolute_error(target, target_predicted)
print(f"On average, our regressor makes an error of {score:.2f} k$")
# -
# We get perfect prediction with no error. It is too optimistic and almost
# always revealing a methodological problem when doing machine learning.
#
# Indeed, we trained and predicted on the same dataset. Since our decision tree
# was fully grown, every sample in the dataset is stored in a leaf node.
# Therefore, our decision tree fully memorized the dataset given during `fit`
# and therefore made no error when predicting.
#
# This error computed above is called the **empirical error** or **training
# error**.
#
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">In this MOOC, we will consistently use the term "training error".</p>
# </div>
#
# We trained a predictive model to minimize the training error but our aim is
# to minimize the error on data that has not been seen during training.
#
# This error is also called the **generalization error** or the "true"
# **testing error**.
#
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">In this MOOC, we will consistently use the term "testing error".</p>
# </div>
#
# Thus, the most basic evaluation involves:
#
# * splitting our dataset into two subsets: a training set and a testing set;
# * fitting the model on the training set;
# * estimating the training error on the training set;
# * estimating the testing error on the testing set.
#
# So let's split our dataset.
# +
from sklearn.model_selection import train_test_split
data_train, data_test, target_train, target_test = train_test_split(
data, target, random_state=0)
# -
# Then, let's train our model.
regressor.fit(data_train, target_train)
# Finally, we estimate the different types of errors. Let's start by computing
# the training error.
target_predicted = regressor.predict(data_train)
score = mean_absolute_error(target_train, target_predicted)
print(f"The training error of our model is {score:.2f} k$")
# We observe the same phenomena as in the previous experiment: our model
# memorized the training set. However, we now compute the testing error.
target_predicted = regressor.predict(data_test)
score = mean_absolute_error(target_test, target_predicted)
print(f"The testing error of our model is {score:.2f} k$")
# This testing error is actually about what we would expect from our model if
# it was used in a production environment.
# ## Stability of the cross-validation estimates
#
# When doing a single train-test split we don't give any indication regarding
# the robustness of the evaluation of our predictive model: in particular, if
# the test set is small, this estimate of the testing error will be
# unstable and wouldn't reflect the "true error rate" we would have observed
# with the same model on an unlimited amount of test data.
#
# For instance, we could have been lucky when we did our random split of our
# limited dataset and isolated some of the easiest cases to predict in the
# testing set just by chance: the estimation of the testing error would be
# overly optimistic, in this case.
#
# **Cross-validation** allows estimating the robustness of a predictive model
# by repeating the splitting procedure. It will give several training and
# testing errors and thus some **estimate of the variability of the
# model generalization performance**.
#
# There are different cross-validation strategies, for now we are going to
# focus on one called "shuffle-split". At each iteration of this strategy we:
#
# - randomly shuffle the order of the samples of a copy of the full dataset;
# - split the shuffled dataset into a train and a test set;
# - train a new model on the train set;
# - evaluate the testing error on the test set.
#
# We repeat this procedure `n_splits` times. Keep in mind that the computational
# cost increases with `n_splits`.
#
# 
#
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">This figure shows the particular case of <strong>shuffle-split</strong> cross-validation
# strategy using <tt class="docutils literal">n_splits=5</tt>.
# For each cross-validation split, the procedure trains a model on all the red
# samples and evaluate the score of the model on the blue samples.</p>
# </div>
#
# In this case we will set `n_splits=40`, meaning that we
# will train 40 models in total and all of them will be discarded: we just
# record their generalization performance on each variant of the test set.
#
# To evaluate the generalization performance of our regressor, we can use
# [`sklearn.model_selection.cross_validate`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html)
# with a
# [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html)
# object:
# +
from sklearn.model_selection import cross_validate
from sklearn.model_selection import ShuffleSplit
cv = ShuffleSplit(n_splits=40, test_size=0.3, random_state=0)
cv_results = cross_validate(
regressor, data, target, cv=cv, scoring="neg_mean_absolute_error")
# -
# The results `cv_results` are stored into a Python dictionary. We will convert
# it into a pandas dataframe to ease visualization and manipulation.
# +
import pandas as pd
cv_results = pd.DataFrame(cv_results)
cv_results.head()
# -
# <div class="admonition tip alert alert-warning">
# <p class="first admonition-title" style="font-weight: bold;">Tip</p>
# <p>A score is a metric for which higher values mean better results. On the
# contrary, an error is a metric for which lower values mean better results.
# The parameter <tt class="docutils literal">scoring</tt> in <tt class="docutils literal">cross_validate</tt> always expect a function that is
# a score.</p>
# <p class="last">To make it easy, all error metrics in scikit-learn, like
# <tt class="docutils literal">mean_absolute_error</tt>, can be transformed into a score to be used in
# <tt class="docutils literal">cross_validate</tt>. To do so, you need to pass a string of the error metric
# with an additional <tt class="docutils literal">neg_</tt> string at the front to the parameter <tt class="docutils literal">scoring</tt>;
# for instance <tt class="docutils literal"><span class="pre">scoring="neg_mean_absolute_error"</span></tt>. In this case, the negative
# of the mean absolute error will be computed which would be equivalent to a
# score.</p>
# </div>
#
# Let us revert the negation to get the actual error:
cv_results["test_error"] = -cv_results["test_score"]
# Let's check the results reported by the cross-validation.
cv_results.head(10)
# We get timing information to fit and predict at each cross-validation
# iteration. Also, we get the test score, which corresponds to the testing
# error on each of the splits.
len(cv_results)
# We get 40 entries in our resulting dataframe because we performed 40
# splits. Therefore, we can show the testing error distribution and thus, have
# an estimate of its variability.
# +
import matplotlib.pyplot as plt
cv_results["test_error"].plot.hist(bins=10, edgecolor="black", density=True)
plt.xlabel("Mean absolute error (k$)")
_ = plt.title("Test error distribution")
# -
# We observe that the testing error is clustered around 47 k\\$ and
# ranges from 43 k\\$ to 50 k\\$.
print(f"The mean cross-validated testing error is: "
f"{cv_results['test_error'].mean():.2f} k$")
print(f"The standard deviation of the testing error is: "
f"{cv_results['test_error'].std():.2f} k$")
# Note that the standard deviation is much smaller than the mean: we could
# summarize that our cross-validation estimate of the testing error is
# 46.36 +/- 1.17 k\\$.
#
# If we were to train a single model on the full dataset (without
# cross-validation) and then later had access to an unlimited amount of test
# data, we would expect its true testing error to fall close to that
# region.
#
# While this information is interesting in itself, it should be contrasted to
# the scale of the natural variability of the vector `target` in our dataset.
#
# Let us plot the distribution of the target variable:
target.plot.hist(bins=20, edgecolor="black", density=True)
plt.xlabel("Median House Value (k$)")
_ = plt.title("Target distribution")
print(f"The standard deviation of the target is: {target.std():.2f} k$")
# The target variable ranges from close to 0 k\\$ up to 500 k\\$ and, with a
# standard deviation around 115 k\\$.
#
# We notice that the mean estimate of the testing error obtained by
# cross-validation is a bit smaller than the natural scale of variation of the
# target variable. Furthermore, the standard deviation of the cross validation
# estimate of the testing error is even smaller.
#
# This is a good start, but not necessarily enough to decide whether the
# generalization performance is good enough to make our prediction useful in
# practice.
#
# We recall that our model makes, on average, an error around 47 k\\$. With this
# information and looking at the target distribution, such an error might be
# acceptable when predicting houses with a 500 k\\$. However, it would be an
# issue with a house with a value of 50 k\\$. Thus, this indicates that our
# metric (Mean Absolute Error) is not ideal.
#
# We might instead choose a metric relative to the target value to predict: the
# mean absolute percentage error would have been a much better choice.
#
# But in all cases, an error of 47 k\\$ might be too large to automatically use
# our model to tag house values without expert supervision.
#
# ## More detail regarding `cross_validate`
#
# During cross-validation, many models are trained and evaluated. Indeed, the
# number of elements in each array of the output of `cross_validate` is a
# result from one of these `fit`/`score` procedures. To make it explicit, it is
# possible to retrieve these fitted models for each of the splits/folds by
# passing the option `return_estimator=True` in `cross_validate`.
cv_results = cross_validate(regressor, data, target, return_estimator=True)
cv_results
cv_results["estimator"]
# The five decision tree regressors corresponds to the five fitted decision
# trees on the different folds. Having access to these regressors is handy
# because it allows to inspect the internal fitted parameters of these
# regressors.
#
# In the case where you only are interested in the test score, scikit-learn
# provide a `cross_val_score` function. It is identical to calling the
# `cross_validate` function and to select the `test_score` only (as we
# extensively did in the previous notebooks).
# +
from sklearn.model_selection import cross_val_score
scores = cross_val_score(regressor, data, target)
scores
# -
# ## Summary
#
# In this notebook, we saw:
#
# * the necessity of splitting the data into a train and test set;
# * the meaning of the training and testing errors;
# * the overall cross-validation framework with the possibility to study
# generalization performance variations.
|
notebooks/cross_validation_train_test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
from ewstools import core
import itertools
# set your path
path_from = "/Users/liqingchun/Google Drive/Dissertation/Paper/Journal/Early Warning/Evelyn_results/Covid-Early-Warning/Filtered-Time-Series-Data/NewYork/"
path_written_to = "/Users/liqingchun/Google Drive/Dissertation/Paper/Journal/Early Warning/Evelyn_results/Covid-Early-Warning/results/New York/"
cityname = "New York"
input_jan = "newyork-time-series-Jan-new.csv"
input_feb = "newyork-time-series-Feb-new.csv"
input_mar = "newyork-time-series-Mar-new.csv"
# +
# first case, death, shelter in place
first_case = "Mar. 1st"
first_death = "Mar. 11th"
shelter = "Mar. 20th"
# start from zero, since the graph has zero
case_num = 31 + 29 - 1
death_num = 31 + 29 + 11 - 1
shelter_num = 31 + 29 + 20 - 1
# +
# set the starting value, ending value and step for bandwidth and rowing window
bw_start = 20
bw_end = 90
bw_step = 2
bw_number = 36
rw_start = 0.2
rw_end = 0.5
rw_step = 0.01
rw_number = 31
# move forward
span = 0.25
lags = [1]
ews = ['sd','ac']
# -
os.chdir(path_from)
os.getcwd()
# read
df1 = pd.read_csv(input_jan)
print(df1.columns)
# drop the index column
df1 = df1.drop('Unnamed: 0',axis=1)
# schema of the data frame
df1
df2 = pd.read_csv(input_feb)
df2 = df2.drop('Unnamed: 0',axis=1)
df3 = pd.read_csv(input_mar)
df3 = df3.drop('Unnamed: 0',axis=1)
print(df2.shape)
print(df3.shape)
# combine
cityWhole = pd.concat([df1,df2,df3],ignore_index=True)
cityWhole
# +
baseline = cityWhole.loc[16:36,:].set_index('Date').loc[:,'Essential':'NonEssential']
temp_avr = baseline.values.reshape(-1,7,2).mean(axis=1)
temp_avr = temp_avr.mean(axis=0)
#temp_avr
perc_change = cityWhole.loc[37:85,:].set_index('Date').loc[:,'Essential':'NonEssential']
perc_change_mean = perc_change.values.reshape(-1,7,2).mean(axis=1)
#perc_change_mean
temp_result = (temp_avr - perc_change_mean)/temp_avr
#temp_result
print(temp_result[3])
print(temp_result[-1])
# -
cityWhole.loc[31]
total_days = len(cityWhole)
print(total_days)
cityWhole.index
# create two list of possible rowing-window values and band-width values, put them into list to feed into itertools functions
rw = list(np.linspace(rw_start,rw_end,rw_number, endpoint=True))
bw = list(np.linspace(bw_start,bw_end,bw_number, endpoint=True))
list1 = [rw,bw]
bw
# generate a list of tuples that contains (rowing-window-value,bandwidth-value) pairs
prod = itertools.product(*list1)
TupList = []
for i in prod:
TupList.append(i)
TupList[0]
# create 2 new lists to store all values of std and all values of lag-1AC
storage_std = np.array([])
storage_lag = np.array([])
# for each (rowing-window,bandwidth) pair, compute the ews dataframe, store the corresponding kendall tau value of std into storage_std, and store the corresponding kendall tau value of lag into storage_lag
for i in range(len(TupList)):
ews_dic = core.ews_compute(cityWhole["Essential"], roll_window = TupList[i][0],smooth ='Gaussian',band_width= TupList[i][1],span = span,lag_times = [1], ews = ews,upto = 'Full')
if i != 0: # exclude the first item since they are NoneType objects and this will make the entire array to be type "object" if include them
storage_std = np.append(storage_std,ews_dic['Kendall tau']["Standard deviation"][0]) # pandas series with 1 element, so index 0
storage_lag = np.append(storage_lag, ews_dic['Kendall tau']["Lag-1 AC"][0]) # pandas series with 1 element, so index 0
# print out the max kendall tau for std and lagAC for comparison
max_std = np.amax(storage_std)
max_lag = np.amax(storage_lag)
print(max_std)
print(max_lag)
print(len(TupList))
print(len(storage_std))
print(len(storage_lag))
# ##### Add one when locating the tuple in TupList since the first item is "None Type" in tupList and we did not store that in storage_std and storage_lag
# find the location/index of maximum std in the storage_std, and use the index to find the corresponding rowing-window and bandwidth
print("*** max std located in pair ***")
max_std_index = np.where(storage_std == max_std)
print("index at : " + str(max_std_index[0]+1) + "in TupList")
# store the tuple for comparison
target_pair_std = TupList[max_std_index[0][0]+1]
print("tuple (rowing-windows,bandwidth): " + str(target_pair_std))
print("largest std kendall tau: " + str(max_std))
# some mechanism to find the index of maximum lagAC and the corresponding tuple
print("*** max lag located in pair ***")
max_lag_index = np.where(storage_lag == max_lag)
print("index at : " + str(max_lag_index[0]+1) + "in TupList")
# store the tuple for comparison
target_pair_lag = TupList[max_lag_index[0][0]+1]
print("tuple (rowing-windows,bandwidth): " + str(target_pair_lag))
print("largest lag kendall tau: " + str(max_lag))
# create an empty tuple, the tuple that contains larger Kendall Tau value will be selected
# select either AC max tuple or Standard Deviation max tuple automatically
selected_pair = ()
if (max_lag > max_std):
selected_pair = target_pair_lag
print("lagAC larger, select " + str(target_pair_lag))
elif(max_lag < max_std):
selected_pair = target_pair_std
print("std larger, select " + str(target_pair_std))
else:
print("rare case!")
selected_pair = target_pair_std
# +
# double check, print out
#setRW = selected_pair[0]
#setBW = selected_pair[1]
setRW = target_pair_std[0]
setBW = target_pair_std[1]
print("setRW : " + str(setRW))
print("setBW : " + str(setBW))
ews_dic_std = core.ews_compute(cityWhole["Essential"],
roll_window = setRW,
smooth ='Gaussian',
band_width= setBW,
span = span,
lag_times = [1],
ews = ews,
upto = 'Full')
# +
# double check, print out
#setRW = selected_pair[0]
#setBW = selected_pair[1]
setRW = target_pair_lag[0]
setBW = target_pair_lag[1]
print("setRW : " + str(setRW))
print("setBW : " + str(setBW))
ews_dic_lag = core.ews_compute(cityWhole["Essential"],
roll_window = setRW,
smooth ='Gaussian',
band_width= setBW,
span = span,
lag_times = [1],
ews = ews,
upto = 'Full')
# +
# double check, print out
setRW = selected_pair[0]
setBW = selected_pair[1]
print("setRW : " + str(setRW))
print("setBW : " + str(setBW))
ews_dic = core.ews_compute(cityWhole["Essential"],
roll_window = setRW,
smooth ='Gaussian',
band_width= setBW,
span = span,
lag_times = [1],
ews = ews,
upto = 'Full')
# +
# dfs
df_ews_std = ews_dic_std['EWS metrics']
df_ktau_std = ews_dic_std['Kendall tau']
df_ews_lag = ews_dic_lag['EWS metrics']
df_ktau_lag = ews_dic_lag['Kendall tau']
df_ews = ews_dic['EWS metrics']
# +
# different cities needs adjustment of the location of annotation since total number varies
# set to mean, so location set automatically
# location of annotation for the 1st graph
annotation_location_1 = np.mean(df_ews[["State variable"]])
annotation_location_2 = np.mean(df_ews[["Residuals"]])
# location of annotations for the 2nd graph
annotation_location_3 = np.mean(df_ews_std["Standard deviation"])
# location of annotations for the 3rd graph
annotation_location_4 = np.mean(df_ews_lag["Lag-1 AC"])
# +
# 2 parameter: you may want to change these two
LegendTF = False
generateNow = True
# set the early warning (gray)band using the variables below
# Action Required: the start and the end of band in first graph
bar_start_1 = 61 # need modification
bar_end_1 = 63 # need modification
# Action Required: the start and the end of band in 2nd graph
bar_start_2 = 72 # need modification
bar_end_2 = 74 # need modification
# Action Required: the start and the end of band in 3rd graph
bar_start_3 = 61 # need modification
bar_end_3 = 63 # need modification
# -
cityWhole.loc[61]
# +
# the dates and string of first case, first death , shelter in place order are set at the beginning in the notebook
# parameter needs to be set are in the previous cell
# constant, you don't need to change them, but if you want to you can change ALPHA OR DIST
ALPHA = 0.3
DIST = 5
ZERO = 0
NROWS = 3
NCOLS = 1
windown_position_std = int(target_pair_std[0]*total_days-1)
windown_position_lag = int(target_pair_lag[0]*total_days-1)
# title = ("Essential Early warning signals for city " + cityname)
# No action require below
plt.style.use('classic')
fig1, axes = plt.subplots(nrows = NROWS, ncols = NCOLS, figsize=(7,5), sharex=True)
df_ews[['State variable']].plot(ax=axes[0],
color='black', xlim = (-DIST,total_days + DIST),legend = LegendTF,
marker='o', markersize=3) # frame auto set
df_ews[['Smoothing']].plot(ax=axes[0], color='red', xlim = (-DIST,total_days + DIST),legend = LegendTF)
df_ews[['Residuals']].plot(ax=axes[0], color='blue', xlim = (-DIST,total_days + DIST),legend = LegendTF,
marker='o', markersize=3, linestyle='dashed') # frame auto set
#axes[0].axvline(x = case_num, color='red', label='First case')
#axes[0].axvline(x = death_num, color='yellow')
#axes[0].axvline(x = shelter_num, color='yellow', label='Shelter in place')
#axes[0].annotate('First Case' +'\n'+ first_case, xy=((case_num + DIST), annotation_location_2)) # location auto set
#axes[0].annotate('First Death'+'\n'+ first_death, xy=((death_num - DIST), annotation_location_2)) # location auto set
#axes[0].annotate('Shelter in Place'+'\n'+ shelter, xy=((shelter_num + DIST), annotation_location_1)) # location auto set
#axes[0].axvspan(bar_start_1, bar_end_1, alpha=ALPHA, color='gray')
#axes[0].legend(loc=3, ncol=3)
axes[0].set_ylabel('Visits', fontsize=14)
axes[0].ticklabel_format(axis='y', style='sci', scilimits=(0,0))
#axes[0].annotate('(a)', xy=(1 - DIST, 0.8*np.max(df_ews[["State variable"]])))
df_ews_std['Standard deviation'].plot(ax=axes[1],legend=False, xlim = (-DIST,total_days+DIST),
color="black", marker='o', markersize=3, ls='--', markerfacecolor='none') # frame auto set
axes[1].axvline(x = case_num, color='red')
#axes[1].axvline(x = death_num, color='purple')
axes[1].axvline(x = shelter_num, color='blue')
axes[1].annotate("Kendall tau:" + str(round(df_ktau_std["Standard deviation"][0],4)), xy=(ZERO, 0.9*annotation_location_3)) # location auto set
#axes[1].annotate('First Case' +'\n'+ first_case, xy=((case_num + DIST), annotation_location_3)) # location auto set
#axes[1].annotate('First Death'+'\n'+ first_death, xy=((death_num - DIST), annotation_location_3)) # location auto set
#axes[1].annotate('Shelter in Place'+'\n'+ shelter, xy=((shelter_num + DIST), annotation_location_3)) # location auto set
axes[1].axvspan(bar_start_2, bar_end_2, alpha = ALPHA, color='gray', label='Early Warning Period')
axes[1].axvline(x = windown_position_std, color='black', ls='--')
axes[1].annotate("", xy=(0, 1.1*annotation_location_3), xytext=(windown_position_std, 1.1*annotation_location_3),
arrowprops=dict(arrowstyle="<|-|>", connectionstyle="arc3", ls='--',color='black'))
axes[1].text(windown_position_std*0.5, 1.1*annotation_location_3, 'Window size',
{'color': 'black', 'ha': 'center', 'va': 'bottom'})
#axes[1].legend(loc=2, ncol=1)
axes[1].set_ylabel('Std', fontsize=14)
#axes[1].annotate('(b)', xy=(1 - DIST, 0.95*np.max(df_ews_std[["Standard deviation"]])))
df_ews_lag[['Lag-1 AC']].plot(ax=axes[2],legend=False, xlim = (-DIST,total_days+DIST),
color="black", marker='s', markersize=3, ls='--', markerfacecolor='none') # frame auto set
axes[2].axvline(x = case_num, color='red')
#axes[2].axvline(x = death_num, color='purple')
axes[2].axvline(x = shelter_num, color='blue')
axes[2].annotate("Kendall tau: " + str(round(df_ktau_lag["Lag-1 AC"][0],4)),xy=(ZERO, 0.5*annotation_location_4)) # location auto set
#axes[2].annotate('First Case' +'\n'+ first_case, xy=((case_num + DIST), annotation_location_4)) # location auto set
#axes[2].annotate('First Death'+'\n'+ first_death, xy=((death_num - DIST), annotation_location_4)) # location auto set
#axes[2].annotate('Shelter in Place'+'\n'+ shelter, xy=((shelter_num + DIST), annotation_location_4)) # location auto set
axes[2].axvspan(bar_start_3, bar_end_3, alpha = ALPHA, color='gray', label='Early Warning Period')
axes[2].axvline(x = windown_position_lag, color='black', ls='--')
axes[2].annotate("", xy=(0, 1.1*annotation_location_4), xytext=(windown_position_lag, 1.1*annotation_location_4),
arrowprops=dict(arrowstyle="<|-|>", connectionstyle="arc3", ls='--', color='black'))
axes[2].text(windown_position_lag*0.5, 1.1*annotation_location_4, 'Window size',
{'color': 'black', 'ha': 'center', 'va': 'bottom'})
#axes[2].legend(loc=2)
axes[2].set_ylabel('AC(1)')
#axes[2].annotate('(c)', xy=(1-DIST, 0.95*np.max(df_ews_lag[["Lag-1 AC"]])))
x_tick = [1, 'Jan-1', 'Jan-21', 'Feb-10', 'Mar-1', 'Mar-21']
axes[2].set_xticklabels(x_tick)
foo_fig = plt.gcf()
# set "generateNow" in previous cell
if (generateNow):
os.chdir(path_written_to)
foo_fig.savefig((cityname + "-Essential-new.png"))
# -
np.max(df_ktau_lag[["Lag-1 AC"]])
# +
fig, ax = plt.subplots(figsize=(4.5,5))
#fig = plt.figure(figsize=(10,8))
plt.style.use('classic')
#z = list(storage_lag)
#z.insert(0, np.min(storage_lag))
#ax.contour(bw, rw, np.array(z).reshape(30, 38), cmap="RdBu_r")
window, bandwith = zip(*TupList)
x = window[1:]
y = bandwith[1:]
if np.max(storage_std)>np.max(storage_lag):
z = storage_std
else:
z= storage_lag
ax.tricontour(x, y, z, levels=14, linewidths=0.5, colors='k')
cntr = ax.tricontourf(x, y, z, levels=14, cmap="RdBu_r")
fig.colorbar(cntr, ax=ax)
ax.set(xlim=(0.2, 0.5), ylim=(20, 90))
ax.set_ylabel('Smoothing bandwith', fontsize=12)
ax.set_xlabel('Rolling Window Size', fontsize=12)
foo_fig = plt.gcf() # 'get current figure'
# set "generateNow" in previous cell
if (generateNow):
os.chdir(path_written_to)
foo_fig.savefig((cityname + "-Essential_contour-new.png"))
# +
plt.style.use('classic')
fig = plt.figure(figsize=(3,5))
#z = list(storage_lag)
#z.insert(0, np.min(storage_lag))
#ax.contour(bw, rw, np.array(z).reshape(30, 38), cmap="RdBu_r")
if np.max(storage_std)>np.max(storage_lag):
a = storage_std
label = 'Std'
else:
a= storage_lag
label = 'AC(1)'
hist,bins = np.histogram(a,bins=40)
plt.hist(a, bins = bins, color='grey')
plt.xlabel('Kendall tau (' + label + ')')
foo_fig = plt.gcf() # 'get current figure'
# set "generateNow" in previous cell
if (generateNow):
os.chdir(path_written_to)
foo_fig.savefig((cityname + "-Essential_histogram-new.png"))
# -
# ## Here starts Non-essential
# +
# reset storage
storage_std = np.array([])
storage_lag = np.array([])
print("should print out two empty brakets below")
print(storage_std)
print(storage_lag)
for i in range(len(TupList)):
ews_dic = core.ews_compute(cityWhole["NonEssential"], roll_window = TupList[i][0],smooth ='Gaussian',band_width= TupList[i][1],span = span,lag_times = [1], ews = ews,upto = 'Full')
if i != 0:
storage_std = np.append( storage_std,ews_dic['Kendall tau']["Standard deviation"][0])
storage_lag = np.append(storage_lag, ews_dic['Kendall tau']["Lag-1 AC"][0])
# -
# ### I know they can be overriden, but just in case we reset all variables
max_std = -100000000
max_lag = -100000000
max_std = np.amax(storage_std)
max_lag = np.amax(storage_lag)
print(max_std)
print(max_lag)
print(len(TupList))
print(len(storage_std))
print(len(storage_lag))
# PLUS 1 SINCE LENGTH IS DIFFERENT
# reset
max_std_index = -10000000
target_pair_std = (-10000000,-100000000)
# compute
print("*** max std located in pair ***")
max_std_index = np.where(storage_std == max_std) # find the largest element index
print("index at : " + str(max_std_index[0] + 1))
target_pair_std = TupList[max_std_index[0][0] + 1]
print("tuple (rowing-windows,bandwidth): " + str(target_pair_std))
print("largest std kendall tau: " + str(max_std))
# reset
max_lag_index = -1000000000
target_pair_lag = (-1000000000,-1000000000)
# compute
print("*** max lag located in pair ***")
max_lag_index = np.where(storage_lag == max_lag)
print("index at : " + str(max_lag_index[0] + 1)) # set to the index zero when multiple results return because we try to have smaller rowing window
target_pair_lag = TupList[max_lag_index[0][0] + 1]
print("tuple (rowing-windows,bandwidth): " + str(target_pair_lag))
print("largest lag kendall tau: " + str(max_lag))
selected_pair = ()
if (max_lag > max_std):
selected_pair = target_pair_lag
print("lagAC larger, select " + str(target_pair_lag))
elif(max_lag < max_std):
selected_pair = target_pair_std
print("std larger, select " + str(target_pair_std))
else:
print("rare case!")
selected_pair = target_pair_lag
setRW = -10000000
setBW = -10000000
setRW = round(selected_pair[0],3)
setBW = selected_pair[1]
print("setRW : " + str(setRW))
print("setBW : " + str(setBW))
ews_dic = core.ews_compute(cityWhole["NonEssential"],
roll_window = setRW,
smooth ='Gaussian',
band_width=setBW,
span = span,
lag_times = [1],
ews = ews,
upto = 'Full')
# +
setRW = target_pair_std[0]
setBW = target_pair_std[1]
print("setRW : " + str(setRW))
print("setBW : " + str(setBW))
ews_dic_std = core.ews_compute(cityWhole["NonEssential"],
roll_window = setRW,
smooth ='Gaussian',
band_width= setBW,
span = span,
lag_times = [1],
ews = ews,
upto = 'Full')
# +
setRW = target_pair_lag[0]
setBW = target_pair_lag[1]
print("setRW : " + str(setRW))
print("setBW : " + str(setBW))
ews_dic_lag = core.ews_compute(cityWhole["NonEssential"],
roll_window = setRW,
smooth ='Gaussian',
band_width= setBW,
span = span,
lag_times = [1],
ews = ews,
upto = 'Full')
# +
# dfs
df_ews_std = ews_dic_std['EWS metrics']
df_ktau_std = ews_dic_std['Kendall tau']
df_ews_lag = ews_dic_lag['EWS metrics']
df_ktau_lag = ews_dic_lag['Kendall tau']
df_ews = ews_dic['EWS metrics']
# -
df_ktau_std
# +
# reset
annotation_location_1 = -10000000
annotation_location_2 = -10000000
annotation_location_3 = -10000000
annotation_location_4 = -10000000
# location of annotation for the 1st graph
annotation_location_1 = np.mean(df_ews[["State variable"]])
annotation_location_2 = np.mean(df_ews[["Residuals"]])
# location of annotations for the 2nd graph
annotation_location_3 = np.mean(df_ews_std["Standard deviation"])
# location of annotations for the 3rd graph
annotation_location_4 = np.mean(df_ews_lag["Lag-1 AC"])
# +
# 2 parameter: you may want to change these two
LegendTF = False
generateNow = True
# set the early warning (gray)band using the variables below
# Action Required: the start and the end of band in first graph
bar_start_1 = 62 # need modification
bar_end_1 = 64 # need modification
# Action Required: the start and the end of band in 2nd graph
bar_start_2 = 73 # need modification
bar_end_2 = 75 # need modification
# Action Required: the start and the end of band in 3rd graph
bar_start_3 = 62 # need modification
bar_end_3 = 64 # need modification
# -
cityWhole.loc[62]
# +
# the dates and string of first case, first death , shelter in place order are set at the beginning in the notebook
# parameter needs to be set are in the previous cell
# constants , you don't need to change them
ZERO = 0
DIST = 5
ALPHA = 0.7
NROWS = 3
NCOLS = 1
# plots
# constant, you don't need to change them, but if you want to you can change ALPHA OR DIST
ALPHA = 0.3
DIST = 5
ZERO = 0
NROWS = 3
NCOLS = 1
windown_position_std = int(target_pair_std[0]*total_days-1)
windown_position_lag = int(target_pair_lag[0]*total_days-1)
# title = ("Essential Early warning signals for city " + cityname)
# No action require below
plt.style.use('classic')
fig1, axes = plt.subplots(nrows = NROWS, ncols = NCOLS, figsize=(7,5), sharex=True)
df_ews[['State variable']].plot(ax=axes[0],
color='black', xlim = (-DIST,total_days + DIST),legend = LegendTF,
marker='o', markersize=3) # frame auto set
df_ews[['Smoothing']].plot(ax=axes[0], color='red', xlim = (-DIST,total_days + DIST),legend = LegendTF)
df_ews[['Residuals']].plot(ax=axes[0], color='blue', xlim = (-DIST,total_days + DIST),legend = LegendTF,
marker='o', markersize=3, linestyle='dashed') # frame auto set
#axes[0].axvline(x = case_num, color='red', label='First case')
#axes[0].axvline(x = death_num, color='yellow')
#axes[0].axvline(x = shelter_num, color='yellow', label='Shelter in place')
#axes[0].annotate('First Case' +'\n'+ first_case, xy=((case_num + DIST), annotation_location_2)) # location auto set
#axes[0].annotate('First Death'+'\n'+ first_death, xy=((death_num - DIST), annotation_location_2)) # location auto set
#axes[0].annotate('Shelter in Place'+'\n'+ shelter, xy=((shelter_num + DIST), annotation_location_1)) # location auto set
#axes[0].axvspan(bar_start_1, bar_end_1, alpha=ALPHA, color='gray')
#axes[0].legend(loc=3, ncol=3)
axes[0].set_ylabel('Visits', fontsize=14)
axes[0].ticklabel_format(axis='y', style='sci', scilimits=(0,0))
#axes[0].annotate('(d)', xy=(1 - DIST, 0.8*np.max(df_ews[["State variable"]])))
df_ews_std['Standard deviation'].plot(ax=axes[1],legend=False, xlim = (-DIST,total_days+DIST),
color="black", marker='o', markersize=3, ls='--', markerfacecolor='none') # frame auto set
axes[1].axvline(x = case_num, color='red')
#axes[1].axvline(x = death_num, color='purple')
axes[1].axvline(x = shelter_num, color='blue')
axes[1].annotate("Kendall tau:" + str(round(df_ktau_std["Standard deviation"][0],4)), xy=(ZERO, 0.8*annotation_location_3)) # location auto set
#axes[1].annotate('First Case' +'\n'+ first_case, xy=((case_num + DIST), annotation_location_3)) # location auto set
#axes[1].annotate('First Death'+'\n'+ first_death, xy=((death_num - DIST), annotation_location_3)) # location auto set
#axes[1].annotate('Shelter in Place'+'\n'+ shelter, xy=((shelter_num + DIST), annotation_location_3)) # location auto set
axes[1].axvspan(bar_start_2, bar_end_2, alpha = ALPHA, color='gray', label='Early Warning Period')
axes[1].axvline(x = windown_position_std, color='black', ls='--')
axes[1].annotate("", xy=(0, 1.1*annotation_location_3), xytext=(windown_position_std, 1.1*annotation_location_3),
arrowprops=dict(arrowstyle="<|-|>", connectionstyle="arc3", ls='--',color='black'))
axes[1].text(windown_position_std*0.5, 1.1*annotation_location_3, 'Window size',
{'color': 'black', 'ha': 'center', 'va': 'bottom'})
#axes[1].legend(loc=2, ncol=1)
axes[1].set_ylabel('Std', fontsize=14)
#axes[1].annotate('(e)', xy=(1 - DIST, 0.9*np.max(df_ews_std[["Standard deviation"]])))
df_ews_lag[['Lag-1 AC']].plot(ax=axes[2],legend=False, xlim = (-DIST,total_days+DIST),
color="black", marker='s', markersize=3, ls='--', markerfacecolor='none') # frame auto set
axes[2].axvline(x = case_num, color='red')
#axes[2].axvline(x = death_num, color='purple')
axes[2].axvline(x = shelter_num, color='blue')
axes[2].annotate("Kendall tau: " + str(round(df_ktau_lag["Lag-1 AC"][0],4)),xy=(ZERO, annotation_location_4*0.65)) # location auto set
#axes[2].annotate('First Case' +'\n'+ first_case, xy=((case_num + DIST), annotation_location_4)) # location auto set
#axes[2].annotate('First Death'+'\n'+ first_death, xy=((death_num - DIST), annotation_location_4)) # location auto set
#axes[2].annotate('Shelter in Place'+'\n'+ shelter, xy=((shelter_num + DIST), annotation_location_4)) # location auto set
axes[2].axvspan(bar_start_3, bar_end_3, alpha = ALPHA, color='gray', label='Early Warning Period')
axes[2].axvline(x = windown_position_lag, color='black', ls='--')
axes[2].annotate("", xy=(0, 1.1*annotation_location_4), xytext=(windown_position_lag, 1.1*annotation_location_4),
arrowprops=dict(arrowstyle="<|-|>", connectionstyle="arc3", ls='--', color='black'))
axes[2].text(windown_position_lag*0.5, 1.1*annotation_location_4, 'Window size',
{'color': 'black', 'ha': 'center', 'va': 'bottom'})
#axes[2].legend(loc=2)
axes[2].set_ylabel('AC(1)')
#axes[2].annotate('(f)', xy=(1-DIST, 0.85*np.max(df_ews_lag[["Lag-1 AC"]])))
x_tick = [1, 'Jan-1', 'Jan-21', 'Feb-10', 'Mar-1', 'Mar-21']
axes[2].set_xticklabels(x_tick)
foo_fig = plt.gcf() # 'get current figure'
# set "generateNow" in previous cell
if (generateNow):
os.chdir(path_written_to)
foo_fig.savefig((cityname + "-NonEssential-new.png"))
# +
fig, ax = plt.subplots(figsize=(4.5,5))
#fig = plt.figure(figsize=(10,8))
plt.style.use('classic')
#z = list(storage_lag)
#z.insert(0, np.min(storage_lag))
#ax.contour(bw, rw, np.array(z).reshape(30, 38), cmap="RdBu_r")
window, bandwith = zip(*TupList)
x = window[1:]
y = bandwith[1:]
if np.max(storage_std)>np.max(storage_lag):
z = storage_std
else:
z= storage_lag
ax.tricontour(x, y, z, levels=14, linewidths=0.5, colors='k')
cntr = ax.tricontourf(x, y, z, levels=14, cmap="RdBu_r")
fig.colorbar(cntr, ax=ax)
ax.set(xlim=(0.2, 0.5), ylim=(20, 90))
ax.set_ylabel('Smoothing bandwith', fontsize=12)
ax.set_xlabel('Rolling Window Size', fontsize=12)
foo_fig = plt.gcf() # 'get current figure'
# set "generateNow" in previous cell
if (generateNow):
os.chdir(path_written_to)
foo_fig.savefig((cityname + "-NonEssential_contour-new.png"))
# +
plt.style.use('classic')
fig = plt.figure(figsize=(3,5))
#z = list(storage_lag)
#z.insert(0, np.min(storage_lag))
#ax.contour(bw, rw, np.array(z).reshape(30, 38), cmap="RdBu_r")
if np.max(storage_std)>np.max(storage_lag):
a = storage_std
label = 'Std'
else:
a= storage_lag
label = 'AC(1)'
hist,bins = np.histogram(a,bins=40)
plt.hist(a, bins = bins, color='grey')
plt.xlabel('Kendall tau (' + label + ')')
foo_fig = plt.gcf() # 'get current figure'
# set "generateNow" in previous cell
if (generateNow):
os.chdir(path_written_to)
foo_fig.savefig((cityname + "-NonEssential_histogram-new.png"))
# -
|
17-Analysis-Notebooks/New_York_Early_Warning_v4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Grouping based on Geo Data**
# 37명 4개씩 상점 분할하기
# - **Total** : 151 개
# - 1 개의 상호가 2개의 점포를 소유중 **(1개로 포함하여 작업)**
# ## **1 데이터 내용 살펴보기**
import pandas as pd
table = pd.read_csv("./data/ourtown_store_geo2.csv")
table.tail(3)
", ".join(sorted(set(table['업종'].to_list())))
", ".join(sorted(set(table['비고'].to_list())))
", ".join(sorted(set(table['기타'].fillna('').to_list())))
# ## **2 Folium Install**
# - **[지도 위 데이터 표시하기](https://rk1993.tistory.com/entry/Python%EC%A7%80%EB%8F%84-%EB%8D%B0%EC%9D%B4%ED%84%B0-%EC%8B%9C%EA%B0%81%ED%99%94-Folium-%EA%B8%B0%EC%B4%88-%EC%8B%A4%EC%8A%B5%ED%95%98%EA%B8%B0)**
# - **[Folium Tutorial](https://python-visualization.github.io/folium/quickstart.html#Markers)**
# - **[Module 설치하기](https://somjang.tistory.com/entry/Python-Folium-%ED%95%9C%EA%B8%80-%EA%B9%A8%EC%A7%90-%ED%98%84%EC%83%81-%ED%95%B4%EA%B2%B0%ED%95%98%EA%B8%B0)**
# ```r
# # ! pip install folium
# # ! pip install git+https://github.com/python-visualization/branca.git@master
# ```
# 예제 데이터 만들기
example = table.loc[:,['lon','lat','상호명']]
example.columns = ['경도','위도','구분']
example.head(3)
# ## **3 Folium Install**
# - **[Folium Tutorial](https://python-visualization.github.io/folium/quickstart.html#Markers)**\
# - 건설업, 공연장, 기타, 기타업종, 기타판매, 인쇄업, 학원
# - 미용업, 피부미용, 카페, 떡집, 반찬판매, 음식점, 정육점, 제과점, 주점,
# - 도소매, 문구점, 부동산, 빨래방, 사진관, 서비스(녹음실)
# - 서점, 슈퍼, 스터디카페, 실내스포츠업, 안경점, 약국, 의류판매,
# +
import folium
lat, long = example['위도'].mean(), example['경도'].mean() # 지도의 중심
instance_map = folium.Map([lat,long],zoom_start=14)
# # Marking on the Map
# for _ in range(len(example)):
# sub_lat, sub_long, title = example.loc[_,'위도'], example.loc[_,'경도'], example.loc[_,'구분']
# folium.Marker([ sub_lat, sub_long ], tooltip = title,\
# icon = folium.Icon(icon='film', color="darkpurple")).add_to(instance_map)
# instance_map.save('./Web/foliumap.html')
# # instance_map
# -
# 지도 새로 띄우기
import folium
instance_map = folium.Map([lat,long], zoom_start=14)
for i in example.index:
sub_lat, sub_long, title = example.loc[i,'위도'], example.loc[i,'경도'], example.loc[i,'구분']
color = 'blue' # 구분이 소매면 빨간색 (default:녹색)
if example.loc[i,'구분'] == '기타':
color = "green"
folium.CircleMarker([sub_lat, sub_long], color=color,\
radius = 7, tooltip = title).add_to(instance_map)
instance_map # instance_map.save('example.html')
# +
# instance_map.save("test.svg")
# +
# import io
# from PIL import Image
# img_data = instance_map._to_png(5)
# img = Image.open(io.BytesIO(img_data))
# img.save('image.png')
# -
# ! pip list | grep selenium
# +
# instance_map._to_png("test.png")
# -
# <br/>
#
# # **Arterior 작가 작업구역 선정**
# - **원칙 :** 거리가 가까운 업종끼리 묶는다 (종류는 무관)
# - **예외1 :** 거리가 있으면 유사한 업종끼리 묶어서 난이도를 낮춘다
# - **K Means 알고리즘** 은 **비지도학습** 으로, label 없이 자동으로 분류 기준을 세워서 동작 된다
# - **KNN 알고리즘** 은 **지도학습** 으로, Labeling 되지 않은 데이터를 분류하는 알고리즘
# - 작업 Process
# 1. **K Means** 알고리즘을 활용하여 37개의 분류 데이터 생성
# 1. 생성된 데이터를 기준으로 4개, 5개씩 임의로 분류
# 1. 애매한 부분을 **KNN** 을 활용하여 Label 최종적 마무리 진행
#
# ## **1 Import Data to Visualization**
# - [Matplotlib Font Setting](http://corazzon.github.io/matplotlib_font_setting)
# - [Naver Map API Examples](https://navermaps.github.io/maps.js.ncp/docs/tutorial-3-drawing-restore.example.html)
# +
# %matplotlib inline
from matplotlib import rcParams
from matplotlib import pyplot as plt
import matplotlib
import string
matplotlib.rc('font', family='NanumGothic')
plt.rcParams['axes.unicode_minus'] = False
fig, ax = plt.subplots(figsize=(15, 15))
example.plot.scatter(x='경도', y='위도', ax=ax)
for _ in example.index:
ax.annotate(example.loc[_]['구분'], (example.loc[_]['경도'], example.loc[_]['위도']))
# -
# # **K Means Classifier**
# - **[Clustering GPS Coordinates and Forming Regions with Python](https://levelup.gitconnected.com/clustering-gps-co-ordinates-forming-regions-4f50caa7e4a1)**
# - **[Weighted K-Means Clustering of GPS Coordinates](https://medium.com/datadriveninvestor/weighted-k-means-clustering-of-gps-coordinates-python-7c6270846163)**
#
# ## **1 K Means Cluster 갯수 비교하기**
# 몇개를 기준으로 Clustering 할 것인가?
# +
from sklearn.cluster import KMeans
# Clustering 기준을 몇개로 할 것인지 점수를 확인
# 분석결과 : 완만한 상승곡선으로 크게 구분시 차이가 없다
K_clusters = range(1,50)
kmeans = [ KMeans(n_clusters=i) for i in K_clusters ]
Y_axis = example[['경도']]
X_axis = example[['위도']]
score = [ kmeans[i].fit(Y_axis).score(Y_axis) for i in range(len(kmeans)) ]
# Visualize
plt.plot(K_clusters, score); plt.xlabel('Number of Clusters')
plt.ylabel('Score'); plt.title('Elbow Curve'); plt.show()
# -
# ## **2 K Means 37 개를 기준으로 비지도 학습**
# 37개를 기준으로 데이터 구분하기
from collections import Counter
# Compute k-means clustering.
kmeans = KMeans(n_clusters = 37, init ='k-means++')
kmeans.fit(example[example.columns[0:2]])
example['label'] = kmeans.fit_predict(example[example.columns[0:2]])
example['label'] = list(map(lambda x : x+1 , example['label'])) # 0 을 지우고 1개씩 더하기
# centers = kmeans.cluster_centers_ # Coordinates of cluster centers.
# labels = kmeans.predict(example[example.columns[0:2]]) # Labels of each point
result_count = Counter(example['label'].to_list())
result_count = sorted([f"Id{k:02d}:{v:02d}" for k,v in result_count.items()])
", ".join(result_count)
example.head(3)
# ## **3 Visualization**
# 37개를 기준으로 데이터 구분하기
# Adding the Color names
import matplotlib.colors as mcolors
color_names = list(mcolors.cnames.keys())
example['color'] = [color_names[_] for _ in example["label"]]
example.head(3)
fig, ax = plt.subplots(figsize=(15, 15))
example.plot(kind = 'scatter',
x = '경도',
y = '위도',
s = 50, # marker size
c = example['color'],
ax = ax) # marker color by group
for _ in example.index:
ax.annotate(example.loc[_]['구분'],
(example.loc[_]['경도'], example.loc[_]['위도']),
fontsize=10)
plt.title('Scatter Plot of iris by pandas', fontsize=20)
plt.xlabel('Petal Length', fontsize=14)
plt.ylabel('Petal Width', fontsize=14)
plt.show()
fig, ax = plt.subplots(figsize=(15, 15))
example.plot.scatter(x='경도', y='위도', ax=ax)
for _ in example.index:
ax.annotate(example.loc[_]['label'], (example.loc[_]['경도'], example.loc[_]['위도']), color='red')
# ## **2 K Means Clustering**
# - [Matplotlib Font Setting](http://corazzon.github.io/matplotlib_font_setting)
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 4).fit(example[['경도','위도']])
labels = kmeans.labels_
centers = kmeans.cluster_centers_
example['clustering'] = labels
example.head(3)
from collections import Counter
Counter(example['clustering'].to_list())
|
jupyters/at3_folium01.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Language Detection
# Semesterarbeit CAS **Practical Machine Learning** der Berner Fachhochschule. <br />
# Q3/2020, <NAME>
# ## Initialisierung
#
# + pycharm={"name": "#%%\n"}
# Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display
from IPython.display import HTML
# Reports
data_balance = "ReportDataBalance"
score = "ReportScore"
score_per_language = "ReportScorePerLanguage"
score_per_word = "ReportScorePerWord"
score_prob = "ReportScoreProbability"
score_prob_hits = score_prob + "Hits"
score_prob_probs = score_prob + "Probabilities"
system_performance = "ReportSystemPerformance"
# languages
languages = ["de", "fr", "it", "en"]
# Load reports
def load(report):
file = "outcome/" + report + ".csv"
return pd.read_csv(file)
# Display as HTML
def display_title(title):
display(HTML('<h2>' + title + '</h2>'))
def display_data(df, sort_by='Model', show_index=False):
display(HTML(df.sort_values(by=sort_by).to_html(index=show_index)))
display(HTML('<br />'))
# Save graphics
def save(report, layout_rect=(0,0,1,1)):
file = "graphic/" + report + ".png"
plt.tight_layout(rect=layout_rect)
plt.savefig(file, dpi=300)
# -
# ## ReportDataBalance
#
# + pycharm={"name": "#%%\n"}
df = load(data_balance)
filter = []
for language in languages:
filter.append("articles_" + language + "_100k.csv")
df = df.loc[df['File'].isin(filter)]
display_title("Testdata balance")
display_data(df, sort_by='File')
ax = df.plot(title='Data balance per language',
kind='barh', x='File', y=['AvgChars','AvgWords'], legend=None,
width=0.7, figsize=(10,10),
subplots=True, sharex=False )
ax[0].set_xlabel("Number of characters")
ax[1].set_xlabel("Number of words")
ax[0].set_ylabel("")
ax[1].set_ylabel("")
save(data_balance, layout_rect=(0,0,1,0.95))
# -
# ## ReportScore
#
# + pycharm={"name": "#%%\n"}
df = load(score)
df['Accuracy'] = df['Score'] / df['Total']
display_title("Accuracy per model")
display_data(df)
ax = df.plot(title='Accuracy per model',
kind='barh', x='Model', y='Accuracy', legend=None,
xticks=np.arange(0, 1.01, step=0.01), xlim=(0.95,1.0),
width=0.7, figsize=(10,4) )
ax.set_xlabel("Accuracy")
ax.set_ylabel("")
save(score)
# -
# ## ReportScorePerLanguage
#
# + pycharm={"name": "#%%\n"}
df = load(score_per_language)
display_title("Accuracy per language")
display_data(df)
ax = df.plot(title='Accuracy per language',
kind='barh', x='Model', y=['de','fr','it','en'], legend=None,
xticks=np.arange(0, 1.01, step=0.01), xlim=(0.90,1.0),
width=0.7, figsize=(10,10),
subplots=True, sharex=True )
for i in range(0,4):
ax[i].set_xlabel("Accuracy")
ax[i].set_ylabel("")
save(score_per_language, layout_rect=(0,0,1,0.95))
# -
# ## ReportScorePerWord
#
# + pycharm={"name": "#%%\n"}
df = load(score_per_word)
models = df['Model'].tolist()
dft = df.T
data = dft[1:]
display_title("Accuracy per word")
display_data(df)
ax = data.plot(title='Accuracy per word',
kind='line', grid=True,
xticks=np.arange(0,20, step=1.0),
yticks=np.arange(0.3, 1.01, step=0.05),
figsize=(20,10)
)
ax.set_xlabel("Number of words")
ax.set_ylabel("Accuracy")
ax.legend(models, title="Models", bbox_to_anchor=(0.573, 0.765), fontsize=11)
save(score_per_word)
# -
# ## ReportScoreProbability Overview
# + pycharm={"name": "#%%\n"}
df_probs = load(score_prob_probs)
df_hits = load(score_prob_hits)
df = pd.DataFrame(columns=['Model', 'Hits', 'Fails', 'AvgHits', 'AvgFails'])
for i in range(0,5):
model = df_probs.iat[i,0]
hits = 0
fails = 0
sum_hits = 0
sum_fails = 0
for j in range(1, 1001):
is_hit = df_hits.iat[i,j]
value = df_probs.iat[i,j]
if is_hit:
hits = hits + 1
sum_hits = sum_hits + value
else:
fails = fails + 1
sum_fails = sum_fails + value
avg_hits = sum_hits/hits
avg_fails= sum_fails/fails
df.loc[i] = [model, hits, fails, avg_hits, avg_fails]
display_title("Score Probabilities Overview")
display_data(df)
# Hits and Fails
ax = df.plot(title='Hits and Fails',
kind='barh', x='Model', y=['Hits','Fails'],
width=0.7, figsize=(10,4), stacked=True )
ax.set_xlabel("Samples")
ax.set_ylabel("")
ax.legend(['Hits','Fails'], title="Legend", bbox_to_anchor=(1.15, 1), fontsize=11)
save(score_prob + "HitsAndFails")
# Average Probabilities
ax = df.plot(title='Average Probabilities',
kind='barh', x='Model', y=['AvgHits','AvgFails'],
width=0.7, figsize=(10,4) )
ax.set_xlabel("Average Probability")
ax.set_ylabel("")
ax.legend(['Hits','Fails'], title="Legend", bbox_to_anchor=(1.15, 1), fontsize=11)
save(score_prob + "AverageProbability")
# -
# ## ReportScoreProbability Lineplot
# + pycharm={"name": "#%%\n"}
df = load(score_prob_probs)
display_title("Score Probabilities Lineplot")
display_data(df)
models = df['Model'].tolist()
data = df.T[1:]
data.columns = models
ax = data.plot(title='Probabilities per model',
kind='line', grid=True,
xticks=np.arange(0,1001, step=100),
yticks=np.arange(0.1, 1.01, step=0.1),
figsize=(20,15),
subplots=True )
for i in range(0,5):
ax[i].set_xlabel("Sample")
ax[i].set_ylabel("Probability")
save(score_prob + "Lineplot", layout_rect=(0,0,1,0.95))
# -
# ## ReportScoreProbability Boxplot
# + pycharm={"name": "#%%\n"}
df = load(score_prob_probs)
display_title("Score Probabilities Boxplot")
display_data(df)
models = df['Model'].tolist()
boxplot_df = df.drop(['Model'], axis=1)
boxplot_dict = {}
for i in range(0, len(models)):
boxplot_dict[models[i]] = boxplot_df.iloc[i,]
data = pd.DataFrame(boxplot_dict, columns = models)
ax = data.boxplot(rot=45)
ax.set_title("Probabilities per model")
ax.set_ylabel("Probability")
save(score_prob + "Boxplot")
# -
# ## ReportSystemPerformance
# + pycharm={"name": "#%%\n"}
df = load(system_performance)
df['Memory Peak MB'] = df['Memory Peak'] / (10**6)
display_title("System Performance")
display_data(df)
# Response time per call
ax = df.plot(title='Response time per call',
kind='barh', x='Model', y='Time', legend=None,
width=0.7, figsize=(10,4) )
ax.set_xlabel("Time [ms]")
ax.set_ylabel("")
save(system_performance + "Time")
# Response time per call
ax = df.plot(title='CPU usage during 1k predictions',
kind='barh', x='Model', y='CPU', legend=None,
xticks=np.arange(0,18, step=1.0),
width=0.7, figsize=(10,4) )
ax.set_xlabel("Percent [%]")
ax.set_ylabel("")
save(system_performance + "CPU")
# Response time per call
ax = df.plot(title='Memory peak during 1k predictions',
kind='barh', x='Model', y='Memory Peak MB', legend=None,
width=0.7, figsize=(10,4) )
ax.set_xlabel("Memory [MB]")
ax.set_ylabel("")
save(system_performance + "Memory")
# + [markdown] pycharm={"name": "#%% md\n"}
# _The end._
|
reports/Visualization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Print the given sequence of numbers using for loop
count = [10,20,30,40,50]
for num in count:
print(num)
#Print multiples of 10 for numbers in a given range
for num in range(5):
if num > 0:
print(num * 10)
#Print first 5 natural numbers using while loop
count = 1
while count <= 5:
print(count)
count += 1
entry = 0
sum1 = 0
print("Enter numbers to find their sum,negative numbers ends the loop")
while True:
#int() typecasts string to integer
entry = int(input())
if (entry < 0):
break
sum1 += entry
print("Sum =", sum1)
num = 0
for num in range(6):
num=num + 1
if num == 3:
continue
print('Num has value ' + str(num))
print('End of loop')
for var1 in range(3):
print( "Iteration " + str(var1 +1) + " of outer loop")
for var2 in range(2):#nested loop
print(var2 + 1)
print("Out of inner loop")
print("Out of outer loop")
# # `Exercise 1`
#The output pattern to be generated is
#1
#1 2
#1 2 3
#1 2 3 4
#1 2 3 4 5
num = int(input("Enter a number to generate its pattern = "))
for i in range(1,num + 1):
for j in range(1,i + 1):
print(j,end = " ")
print()
#Use of nested loops to find the prime numbers between 2 to 50
num = 2
for i in range(2,50):
j= 2
while( j<= (i/2)):
if(i % j == 0): #factor found
break #break out of loop
j += 1
if (j > i/j) : #no factor found
print(i, "is aprime number")
print ("Bye Bye!!")
|
implementation strategies.ipynb
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: scala-2.13
// language: scala
// name: scala-2.13
// ---
// # The PolyMath Computer proof
//
// We generate a proof similar to that generated during the PolyMath 14 project.
// +
import $cp.bin.`polymath.jar`
import freegroups._, Word._, ProofFinder._, LinearNormProofs._
import scala.util._
repl.pprinter() = {
val p = repl.pprinter()
p.copy(
additionalHandlers = p.additionalHandlers.orElse {
wordHandler
}
)
}
// -
val proofFuture = getProofOutputFuture
// ## Viewing the proof
//
// * The proof below is formatted in unicode.
Try(Display.markdown(proofFuture.value.get.get._1))
// To ensure correctness, a proof with arbitrary precision rational numbers is also generated.
Try(Display.markdown(proofFuture.value.get.get._2))
// ## Computing Watson-Crick lengths and bounds
//
// * We can compute these recursively.
// * As the properties used in computation are those of a normalized conjugacy length, we get a bound.
val w1 = Word("aaba!b!")
val w2 = w1 * w1
wcLength(w1)
wcLength(w2)
// ## From bounds to proofs
//
// The same recursive process that gives lengths gives proofs of bounds for _all_ normalized conjugacy invariant lengths.
val pf1 = getProof(w1)
val pf2 = getProof(w2)
pf1.word
pf1.bound
pf2.word
pf2.bound
proofOut(pf1)
proofOut(pf2)
// ## Using homogeneity
// +
import LinNormBound._
val pf3 = PowerBound(w1, 2, pf2)
// -
pf3.word
pf3.bound
Try(PowerBound(w1, 3, pf2))
getProofFuture
|
polymath.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
import numpy as np
import pandas as pd
table = pd.read_csv('/home/sofya/Documents/UNI/ML/ml2020/project3/actor_err.csv')
# -
table # first let's have a look. first thing we see is line 24 uses some other separator
table.at[24] = table.loc[24].Actor.split(':')
table # we also see that there are two similar columns actor and Firstname + Lastname. We see some of lastnames are missing. maybe we can compare them to the actor names, if noone uses psedonim left out the 'actor' column
table[table.Actor != table.Firstname+ ' '+ table.Lastname] # so wee see there are no pseudonames, just misspellings or caps; correct that
names_index = table[table.Actor != table.Firstname+ ' '+ table.Lastname].index
table.loc[names_index]
table.at[names_index, ['Firstname']] = table.loc[names_index].Actor.str.split(' ').str[0]
table.at[names_index, ['Lastname']] = table.loc[names_index].Actor.str.split(' ').str[1]
table
#
# we also see that in raw 18 values are enclaused, correct that
table.columns
table.at[18] = table.loc[18].str.replace("'", '')
#Price column has some different notations and nan, since it binary variable we change it for true and false
table['Price'] = ['no' if pd.isnull(i) or i[-1]!= 's' else('yes') for i in table.Price]
# finally resort column to start wit name and surname, remove column actor since it's redundant and check for dups
table = table[['Firstname','Lastname', 'Total Gross', 'Number of Movies', 'Average per Movie','#1 Movie', 'Gross', 'Price']]
set(table.duplicated().to_list()) # no duplications
table # I can't say what gross is
table.dtypes
table['Total Gross'] = table['Total Gross'].astype('float')
table['Number of Movies'] = table['Number of Movies'].astype('float')
table['Average per Movie'] = table['Average per Movie'].astype('float')
table['Gross'] = table['Gross'].astype('float')
table.sum()# from the available hint I understand that the Gross is always positiv and price nan equals no
table['Gross'] = abs(table['Gross'])
table
print(table.sum())
table.Price.value_counts()
|
Machine-Learning-2021/project4/data_quality.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import tree
from sklearn import datasets
#data set loading
#finding iris only is very old
for i in dir(datasets):
if 'iris' in i:
print(i)
#anaother way of finding datasets
data=[i for i in dir(datasets)]
y=[i for i in dir(datasets) if 'iris' in i]
y
#now exploring iris
dir(y)
#explore
iris=datasets.load_iris()
iris
# +
#or
#from sklearn.datasets import load_iris
# -
dir(iris)
feature_attribute=iris.feature_names
feature_attribute
iris.target_names
features=iris.data #actual data for all flowers
features
features.shape
type(features)
#actual answer
label=iris.target
label
feature=iris.data
label=iris.target
iris.target.shape
sl=features[:,0]
sl.shape
sw=features[:,1]
sw.shape
pl=features[:,2]
pw=features[:,3]
# +
#plotting data with matplotlib
import matplotlib.pyplot as plt
plt.scatter(sw,sl)
# -
plt.scatter(sl,sw,label='SWL')
plt.scatter(pl,pw,label='PWL')
plt.scatter(sw,pl,label='swpl')
plt.legend()
#plt.show()
#now time for category of data
from sklearn.model_selection import train_test_split
data_split=train_test_split(feature,label,test_size=0.2) #test size .2 means 20%test data and 80% training data
train_fea,test_fea,train_label,test_label=data_split
#call classifier
from sklearn import tree
clf=tree.DecisionTreeClassifier()
#now traing data
trained=clf.fit(train_fea,train_label)
#now prediction
predicted=trained.predict(test_fea)
predicted
test_label #actual answer
#to check accuracy
from sklearn.metrics import accuracy_score
accuracy_score(test_label,predicted)
# +
#data->category->1.train 2.test 3.train_ans 4.test_ans
# -
iris
|
Supervised machine learning/iris_decision.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas
reldf=pandas.read_excel("./RawData/Relationships.xlsx", sheet_name="Relationships")
typedf=pandas.read_excel("./RawData/Relationships.xlsx", sheet_name="Entities")
typedf.name=pandas.array(list(set(list(reldf.source.unique())+list(reldf.target.unique()))))
# Write to Excel
with pandas.ExcelWriter("./RawData/Relationships.xlsx") as writer:
typedf.to_excel(writer, sheet_name="Entities", index=False)
reldf.to_excel(writer, sheet_name="Relationships", index=False)
# +
# Write to JSON
with open("./RawData/reldata.json","w") as f:
f.write(reldf.to_json(orient="records"))
with open("./RawData/typedata.json","w") as f:
f.write(typedf.to_json(orient="records"))
# -
import xetrapal
from xetrapal import gdastras
a=xetrapal.karma.load_xpal_smriti("/opt/xpal-data//avxpal.json")
avxpal=xetrapal.Xetrapal(a)
config=xetrapal.karma.load_config_json(a.configfile)
pygsheetsconfig = xetrapal.karma.load_config_json(config['Pygsheets']['avdrive'])
gd = gdastras.gd_get_googledriver(pygsheetsconfig)
# +
gd.spreadsheet_titles()
# -
gd.open()
|
SheetGraph.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from collections import defaultdict
import time
import random
import dynet as dy
import numpy as np
# Functions to read in the corpus
w2i = defaultdict(lambda: len(w2i))
t2i = defaultdict(lambda: len(t2i))
UNK = w2i["<unk>"]
def read_dataset(filename):
with open(filename, "r") as f:
for line in f:
tag, words = line.lower().strip().split(" ||| ")
yield ([w2i[x] for x in words.split(" ")], t2i[tag])
# Read in the data
train = list(read_dataset("../data/classes/train.txt"))
w2i = defaultdict(lambda: UNK, w2i)
dev = list(read_dataset("../data/classes/test.txt"))
nwords = len(w2i)
ntags = len(t2i)
# Start DyNet and define trainer
model = dy.Model()
trainer = dy.AdamTrainer(model)
# -
# Define the model
EMB_SIZE = 64
HID_SIZE = 64
HID_LAY = 2
W_emb = model.add_lookup_parameters((nwords, EMB_SIZE)) # Word embeddings
W_h = [model.add_parameters((HID_SIZE, EMB_SIZE if lay == 0 else HID_SIZE)) for lay in range(HID_LAY)]
b_h = [model.add_parameters((HID_SIZE)) for lay in range(HID_LAY)]
W_sm = model.add_parameters((ntags, HID_SIZE)) # Softmax weights
b_sm = model.add_parameters((ntags)) # Softmax bias
# A function to calculate scores for one value
def calc_scores(words):
dy.renew_cg()
h = dy.esum([dy.lookup(W_emb, x) for x in words])
for W_h_i, b_h_i in zip(W_h, b_h):
h = dy.tanh( dy.parameter(W_h_i) * h + dy.parameter(b_h_i) )
return dy.parameter(W_sm) * h + dy.parameter(b_sm)
for ITER in range(100):
# Perform training
random.shuffle(train)
train_loss = 0.0
start = time.time()
for words, tag in train:
my_loss = dy.pickneglogsoftmax(calc_scores(words), tag)
train_loss += my_loss.value()
my_loss.backward()
trainer.update()
print("iter %r: train loss/sent=%.4f, time=%.2fs" % (ITER, train_loss/len(train), time.time()-start))
# Perform testing
test_correct = 0.0
for words, tag in dev:
scores = calc_scores(words).npvalue()
predict = np.argmax(scores)
if predict == tag:
test_correct += 1
print("iter %r: test acc=%.4f" % (ITER, test_correct/len(dev)))
|
01-intro/deep-cbow.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Regularized Regression: L$_1$ and L$_2$
# [](https://github.com/eabarnes1010/course_objective_analysis/tree/main/code)
# [](https://colab.research.google.com/github/eabarnes1010/course_objective_analysis/blob/main/code/regularization_techniques_l1_l2.ipynb)
#
#
#
# Avoiding overfitting with regularization.
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
print('IN_COLAB = ' + str(IN_COLAB))
# +
if IN_COLAB:
# !apt-get install libproj-dev proj-data proj-bin
# !apt-get install libgeos-dev
# !pip install cython
# !pip install cartopy
# !apt-get -qq install python-cartopy python3-cartopy
# !pip uninstall -y shapely
# !pip install shapely --no-binary shapely
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model, preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import scipy.io as io
import copy as copy
import csv
import datetime
import pandas as pd
import xarray as xr
import matplotlib as mpl
import cartopy as ct
# set figure defaults
mpl.rcParams['figure.dpi'] = 150
plt.rcParams['figure.figsize'] = (12.0/2, 8.0/2)
# -
# ## Example: when $L_1$ makes sense
# $L_1$ regularization (or LASSO) adds a term to the loss function that is proportional to the sum of the absolute value of the regression coefficients:
#
# $\lambda \sum_i^N |\beta_i|$
#
# This term acts to force small coefficients to *exactly* zero, and allowing only a few to stay large. $\lambda$ (or $\alpha$ as it is specified below), sets how important this term is.
#
# Let's look at example of when this may be useful by doing some regression with hourly data from Christmas Field.
# +
if IN_COLAB:
# !pip install wget
import wget
filename = wget.download("https://raw.githubusercontent.com/eabarnes1010/course_objective_analysis/main/data/christman_2016.csv")
else:
filename = '../data/christman_2016.csv'
data_input = np.genfromtxt(filename, delimiter = ',')
# grab the variables I want
# grab_indices = [2,3,5,9,10,11]
grab_indices = [4,2,5,9,10,11]
data = data_input[:,grab_indices]
names_input = ['date','time','temp (F)', 'RH (%)', 'DewPt (F)','Wind (mph)', 'Dir (deg.)', 'Gust (mph)', 'Gust Dir (deg.)','Pres (mb)', 'Solar (W/m^2)','Precip (in)']
names = [names_input[i] for i in grab_indices]
# convert precip inches to mm
data[:,[i for i, s in enumerate(names) if 'Precip' in s]] = data[:,[i for i, s in enumerate(names) if 'Precip' in s]]*25.4
names[names.index('Precip (in)')] = 'Precip (mm)'
print(np.shape(data))
print(names)
# -
# Since all of the units are different, we will standardize everything here.
# +
# standardize the data
data_std = preprocessing.scale(data)
print(data_std.mean(axis=0))
print(data_std.std(axis=0))
# -
# Next we set our predictors and predictands.
# +
# predict Dewpoint
y = data_std[:,0]
# define predictors (all 5 other variables)
x = data_std[:,1:]
# -
# Okay - let's fit the line and look how different forms of regularization impact the solution. Remember that $\lambda$ determines how important the regularization term is when computing the loss function. Make it big, and regularization becomes very important. Make $\lambda=0$ and you are back to standard OLS regression.
#
# *Note that in sklearn they use $\alpha$ instead of $\lambda$.
# +
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42, shuffle=True)
print(np.shape(X_train))
print(np.shape(X_test))
# +
#--------------------------------------
# Regression Fitting
regOLS = linear_model.LinearRegression(fit_intercept=True)
regL1 = linear_model.Lasso(alpha=.15, fit_intercept=True)
regL2 = linear_model.Ridge(alpha=2000, fit_intercept=True)
regOLS.fit(X_train,y_train)
regL1.fit(X_train,y_train)
regL2.fit(X_train,y_train)
predictOLS = regOLS.predict(X_test)
predictL1 = regL1.predict(X_test)
predictL2 = regL2.predict(X_test)
#--------------------------------------
MS = 4
plt.figure(figsize=(7,3))
plt.plot(regOLS.coef_,'.-', markersize=9, label='OLS')
plt.plot(regL1.coef_,'.--', markersize=15, label='L1 (LASSO)')
# plt.plot(regL2.coef_,'.--', markersize=9, label='L2 (Ridge)')
plt.xticks(np.arange(0,5), labels=names[1:], fontsize=8)
plt.xlabel('predictor')
plt.ylabel('regression coefficient')
plt.axhline(y=0,color='gray', linewidth=1)
plt.legend()
plt.xlim(-.5,4.5)
plt.ylim(-.45,1.05)
plt.title('predicting ' + names[0])
plt.show()
plt.figure(figsize=(7/1.5,4/1.5))
plt.title('predicted vs actual value...')
plt.plot(y_test,predictOLS, '.', markersize=MS, label='OLS')
plt.plot(y_test,predictL1, '.', markersize=MS, label='L1')
# plt.plot(y_test,predictL2, '.', markersize=MS, label='L2')
plt.plot((-3,3),(-3,3),'-k')
plt.ylabel('predicted value')
plt.xlabel('actual value')
plt.legend()
plt.show()
# -
# Fun! LASSO regularization helps us determine which variables are the *most* important, and helps us to avoid overfitting. It sets small coefficients to zero.
# ## Example: when Ridge ($L_2$) makes sense
# While LASSO helps set small coefficients to zero, Ridge regression (or $L_2$ regularization) helps spread/share the weights across all of the regression coefficients. The term added to the loss function is:
#
# $\lambda \sum_i^N \beta_i^2$
#
# $\lambda$ (or $\alpha$ as it is specified below), sets how important this term is.
#
# Let's look at example of when this may be useful by doing some regression with daily Z500 data across the globe.
#
# +
if IN_COLAB:
# !pip install wget
import wget
filename = wget.download('https://eabarnes-data.atmos.colostate.edu/course_objective_analysis/z500_daily.mat')
else:
filename = '../data/z500_daily.mat'
DATA = io.loadmat(filename)
Xall = np.array(DATA['X'])
LAT = np.array(DATA['LAT'])[0,:]
LON = np.array(DATA['LONG'])[0,:]
TIME = np.array(DATA['TIME'])
print('data is loaded')
# less data to deal with
X = copy.deepcopy(Xall[:,::4,::4])
LAT = LAT[::4]
LON = LON[::4]
del Xall
# +
# get time vector in order for xarray
dateList = []
for i, junk in enumerate(TIME[:,0]):
dateList.append(datetime.datetime(int(TIME[i,1]),int(TIME[i,2]),int(TIME[i,3])))
# create xarray
da = xr.DataArray(X,dims=('time','latitude','longitude'),coords={'latitude': LAT, 'longitude': LON, 'time': dateList}, name='z500')
# +
climatology_mean = da.groupby("time.month").mean("time")
climatology_std = da.groupby("time.month").std("time")
anomalies = xr.apply_ufunc(
lambda x, m, s: (x - m) / s,
da.groupby("time.month"),
climatology_mean,
climatology_std,
)
X = da.where(anomalies['time.season'] == 'DJF',drop=True)
print(np.shape(X))
# +
# Define the predictand point
predictLatIndex = 6
predictLonIndex = 20
y = np.squeeze(X[:,predictLatIndex,predictLonIndex])
da['longitude'].values[predictLonIndex],da['latitude'].values[predictLatIndex]
# -
# Define the predictors
# set the northern hemisphere to zero so that only the southern hemisphere is used for predicting our point
x = copy.deepcopy(X)
x[:,0:8,:] = 0.
x = np.reshape(x.values,(np.shape(x)[0],np.shape(x)[1]*np.shape(x)[2]))
print(x.shape)
# The above code sets the values near the predictand (y) to zero so that they cannot be used in the regression. That is, anything close to the same longitude as the predictand cannot be used. You will notice this when you plot the regression coefficients on a map, and they are colored white.
# +
#--------------------------------------
# Regression Fitting
# reg = linear_model.LinearRegression()
# reg = linear_model.Lasso(alpha=200.)
reg = linear_model.Ridge(alpha=1e8)
reg.fit(x,y)
#--------------------------------------
# set unused coefficients to nan for plotting
data = np.reshape(reg.coef_,(len(da['latitude'].values),len(da['longitude'].values)))
# get rid of line at long = 0.0
data_cyc = np.append(data,data[:,0:1],axis=1)
lons_cyc = np.append(da['longitude'].values,360.)
# plot the figure of regression coefficients
data_crs = ct.crs.PlateCarree()
plt.figure(figsize=(11,3))
ax = plt.subplot(1,2,1,projection=ct.crs.PlateCarree())
ax.set_global()
ax.coastlines(linewidth = .75)
maxval = np.nanmax(np.abs(reg.coef_[:]))
image = ax.pcolor(lons_cyc, da['latitude'].values, data_cyc, transform=data_crs, cmap='RdBu_r', vmin=-maxval, vmax = maxval)
image.set_clim(-maxval,maxval)
cb = plt.colorbar(image, shrink=.5, orientation="horizontal", pad=.05)
cb.set_label('regression coefficient', fontsize=12)
plt.plot(da['longitude'].values[predictLonIndex],da['latitude'].values[predictLatIndex],'x',markersize=7, linewidth=10, color = 'fuchsia', transform=data_crs)
plt.subplot(1,2,2)
plt.hist(reg.coef_[:],20)
plt.yscale('log')
plt.xlabel('coefficient value')
plt.ylabel('frequency')
plt.show()
# -
# I love ridge regression since I do a lot of research with *maps*. $L_2$ regularization helps the weight get spread across predictors - thus often taking into account the correlations across predictors I know are there.
|
code/regularization_techniques_l1_l2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit ('env')
# metadata:
# interpreter:
# hash: c292faa64a06b85ee29012afc185ba9a6fb4d4e01057ca1d6e17bce0ce64ff1f
# name: python3
# ---
# +
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import collections
import os
# +
train = pd.read_json('../data/raw/train.jsonl', lines=True)
dev = pd.read_json('../data/raw/dev_unseen.jsonl', lines=True)
test = pd.read_json('../data/raw/test_unseen.jsonl', lines=True)
vision = pd.read_csv('../data/processed/vision2.csv').drop('id',axis=1)
# -
train = pd.merge(train,vision,how='left',on='img')
dev = pd.merge(dev,vision,how='left',on='img')
test = pd.merge(test,vision,how='left',on='img')
# ### Just one fold (normal train/dev)
# +
# K = 1 normal dev
K = 1
i = 0
path = '../data/folds/'+str(K)
os.makedirs(path)
y_t = train['label']
X_t = train.drop('label',axis=1,inplace=False)
y_d = dev['label']
X_d = dev.drop('label',axis=1,inplace=False)
X_t.to_csv(path+'/X_train_split_'+str(i+1)+'.csv',index=False)
X_d.to_csv(path+'/X_dev_split_'+str(i+1)+'.csv',index=False)
y_t.to_csv(path+'/y_train_split_'+str(i+1)+'.csv',index=False)
y_d.to_csv(path+'/y_dev_split_'+str(i+1)+'.csv',index=False)
test.to_csv(path+'/X_test.csv',index=False)
# -
# ### KFOLD
train = pd.concat([train,dev],axis=0)
# +
K = [5,10]
y = train['label']
X = train.drop('label',axis=1,inplace=False)
for k in K:
print('K = ',k)
path = '../data/folds/'+str(k)
os.makedirs(path)
sss = StratifiedKFold(n_splits=k, random_state=420,shuffle=True)
for i, (train_index, dev_index) in enumerate(sss.split(X,y)):
print('--------FOLD ',i)
X_t, X_d = X.iloc[train_index], X.iloc[dev_index]
y_t, y_d = y.iloc[train_index], y.iloc[dev_index]
X_t.to_csv(path+'/X_train_split_'+str(i+1)+'.csv',index=False)
X_d.to_csv(path+'/X_dev_split_'+str(i+1)+'.csv',index=False)
y_t.to_csv(path+'/y_train_split_'+str(i+1)+'.csv',index=False)
y_d.to_csv(path+'/y_dev_split_'+str(i+1)+'.csv',index=False)
test.to_csv(path+'/X_test.csv',index=False)
# -
|
notebooks/folds.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME>
import ipywidgets as widgets # import the ipywidget
from IPython.display import display # this will display the widget
# Learn all about the widget types [here](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Basics.html).
w = widgets.IntSlider() # Create an integer slider object
display(w) # display your slider object
display(w)
w = widgets.IntSlider(
value=7,
min=0,
max=10,
step=1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
display(w)
w.value
# +
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interact
def LinearPlotWidgetFcn(slope, y_intercept):
x = np.arange(101) # an x-vector from 0 to 100
y = x*slope + y_intercept
plt.plot(x, y, lw = 2)
plt.xlim(0., 150.)
plt.ylim(0., 150.)
plt.ylabel('y-axis (a.u.)')
plt.xlabel('x-axis (a.u.)')
plt.show()
interact(
LinearPlotWidgetFcn,
slope = widgets.IntSlider( min=-10, max=10, step=1, value=0, description='Slope:' ),
y_intercept = widgets.IntSlider( min=0, max=100, step=5, value=50, description='Y-intercept:' )
);
# -
|
dev/widget_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 线性回归的简洁实现
# :label:`sec_linear_concise`
#
# 在过去的几年里,出于对深度学习强烈的兴趣,
# 许多公司、学者和业余爱好者开发了各种成熟的开源框架。
# 这些框架可以自动化基于梯度的学习算法中重复性的工作。
# 在 :numref:`sec_linear_scratch`中,我们只运用了:
# (1)通过张量来进行数据存储和线性代数;
# (2)通过自动微分来计算梯度。
# 实际上,由于数据迭代器、损失函数、优化器和神经网络层很常用,
# 现代深度学习库也为我们实现了这些组件。
#
# 在本节中,我们将介绍如何(**通过使用深度学习框架来简洁地实现**)
# :numref:`sec_linear_scratch`中的(**线性回归模型**)。
#
# ## 生成数据集
#
# 与 :numref:`sec_linear_scratch`中类似,我们首先[**生成数据集**]。
#
# + origin_pos=3 tab=["tensorflow"]
import numpy as np
import tensorflow as tf
from d2l import tensorflow as d2l
# + origin_pos=4 tab=["tensorflow"]
true_w = tf.constant([2, -3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w, true_b, 1000)
# + [markdown] origin_pos=5
# ## 读取数据集
#
# 我们可以[**调用框架中现有的API来读取数据**]。
# 我们将`features`和`labels`作为API的参数传递,并通过数据迭代器指定`batch_size`。
# 此外,布尔值`is_train`表示是否希望数据迭代器对象在每个迭代周期内打乱数据。
#
# + origin_pos=8 tab=["tensorflow"]
def load_array(data_arrays, batch_size, is_train=True): #@save
"""构造一个TensorFlow数据迭代器"""
dataset = tf.data.Dataset.from_tensor_slices(data_arrays)
if is_train:
dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.batch(batch_size)
return dataset
# + origin_pos=9 tab=["tensorflow"]
batch_size = 10
data_iter = load_array((features, labels), batch_size)
# + [markdown] origin_pos=10
# 使用`data_iter`的方式与我们在 :numref:`sec_linear_scratch`中使用`data_iter`函数的方式相同。为了验证是否正常工作,让我们读取并打印第一个小批量样本。
# 与 :numref:`sec_linear_scratch`不同,这里我们使用`iter`构造Python迭代器,并使用`next`从迭代器中获取第一项。
#
# + origin_pos=11 tab=["tensorflow"]
next(iter(data_iter))
# + [markdown] origin_pos=12
# ## 定义模型
#
# 当我们在 :numref:`sec_linear_scratch`中实现线性回归时,
# 我们明确定义了模型参数变量,并编写了计算的代码,这样通过基本的线性代数运算得到输出。
# 但是,如果模型变得更加复杂,且当你几乎每天都需要实现模型时,你会想简化这个过程。
# 这种情况类似于为自己的博客从零开始编写网页。
# 做一两次是有益的,但如果每个新博客你就花一个月的时间重新开始编写网页,那并不高效。
#
# 对于标准深度学习模型,我们可以[**使用框架的预定义好的层**]。这使我们只需关注使用哪些层来构造模型,而不必关注层的实现细节。
# 我们首先定义一个模型变量`net`,它是一个`Sequential`类的实例。
# `Sequential`类将多个层串联在一起。
# 当给定输入数据时,`Sequential`实例将数据传入到第一层,
# 然后将第一层的输出作为第二层的输入,以此类推。
# 在下面的例子中,我们的模型只包含一个层,因此实际上不需要`Sequential`。
# 但是由于以后几乎所有的模型都是多层的,在这里使用`Sequential`会让你熟悉“标准的流水线”。
#
# 回顾 :numref:`fig_single_neuron`中的单层网络架构,
# 这一单层被称为*全连接层*(fully-connected layer),
# 因为它的每一个输入都通过矩阵-向量乘法得到它的每个输出。
#
# + [markdown] origin_pos=15 tab=["tensorflow"]
# 在Keras中,全连接层在`Dense`类中定义。
# 由于我们只想得到一个标量输出,所以我们将该数字设置为1。
#
# 值得注意的是,为了方便使用,Keras不要求我们为每个层指定输入形状。
# 所以在这里,我们不需要告诉Keras有多少输入进入这一层。
# 当我们第一次尝试通过我们的模型传递数据时,例如,当后面执行`net(X)`时,
# Keras会自动推断每个层输入的形状。
# 我们稍后将详细介绍这种工作机制。
#
# + origin_pos=18 tab=["tensorflow"]
# keras是TensorFlow的高级API
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(1))
# + [markdown] origin_pos=19
# ## (**初始化模型参数**)
#
# 在使用`net`之前,我们需要初始化模型参数。
# 如在线性回归模型中的权重和偏置。
# 深度学习框架通常有预定义的方法来初始化参数。
# 在这里,我们指定每个权重参数应该从均值为0、标准差为0.01的正态分布中随机采样,
# 偏置参数将初始化为零。
#
# + [markdown] origin_pos=22 tab=["tensorflow"]
# TensorFlow中的`initializers`模块提供了多种模型参数初始化方法。
# 在Keras中最简单的指定初始化方法是在创建层时指定`kernel_initializer`。
# 在这里,我们重新创建了`net`。
#
# + origin_pos=25 tab=["tensorflow"]
initializer = tf.initializers.RandomNormal(stddev=0.01)
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))
# + [markdown] origin_pos=28 tab=["tensorflow"]
# 上面的代码可能看起来很简单,但是你应该注意到这里的一个细节:
# 我们正在为网络初始化参数,而Keras还不知道输入将有多少维!
# 网络的输入可能有2维,也可能有2000维。
# Keras让我们避免了这个问题,在后端执行时,初始化实际上是*推迟*(deferred)执行的。
# 只有在我们第一次尝试通过网络传递数据时才会进行真正的初始化。
# 请注意,因为参数还没有初始化,所以我们不能访问或操作它们。
#
# + [markdown] origin_pos=29
# ## 定义损失函数
#
# + [markdown] origin_pos=32 tab=["tensorflow"]
# 计算均方误差使用的是`MeanSquaredError`类,也称为平方$L_2$范数。
# 默认情况下,它返回所有样本损失的平均值。
#
# + origin_pos=35 tab=["tensorflow"]
loss = tf.keras.losses.MeanSquaredError()
# + [markdown] origin_pos=36
# ## 定义优化算法
#
# + [markdown] origin_pos=39 tab=["tensorflow"]
# 小批量随机梯度下降算法是一种优化神经网络的标准工具,
# Keras在`optimizers`模块中实现了该算法的许多变种。
# 小批量随机梯度下降只需要设置`learning_rate`值,这里设置为0.03。
#
# + origin_pos=42 tab=["tensorflow"]
trainer = tf.keras.optimizers.SGD(learning_rate=0.03)
# + [markdown] origin_pos=43
# ## 训练
#
# 通过深度学习框架的高级API来实现我们的模型只需要相对较少的代码。
# 我们不必单独分配参数、不必定义我们的损失函数,也不必手动实现小批量随机梯度下降。
# 当我们需要更复杂的模型时,高级API的优势将大大增加。
# 当我们有了所有的基本组件,[**训练过程代码与我们从零开始实现时所做的非常相似**]。
#
# 回顾一下:在每个迭代周期里,我们将完整遍历一次数据集(`train_data`),
# 不停地从中获取一个小批量的输入和相应的标签。
# 对于每一个小批量,我们会进行以下步骤:
#
# * 通过调用`net(X)`生成预测并计算损失`l`(前向传播)。
# * 通过进行反向传播来计算梯度。
# * 通过调用优化器来更新模型参数。
#
# 为了更好的衡量训练效果,我们计算每个迭代周期后的损失,并打印它来监控训练过程。
#
# + origin_pos=46 tab=["tensorflow"]
num_epochs = 3
for epoch in range(num_epochs):
for X, y in data_iter:
with tf.GradientTape() as tape:
l = loss(net(X, training=True), y)
grads = tape.gradient(l, net.trainable_variables)
trainer.apply_gradients(zip(grads, net.trainable_variables))
l = loss(net(features), labels)
print(f'epoch {epoch + 1}, loss {l:f}')
# + [markdown] origin_pos=47
# 下面我们[**比较生成数据集的真实参数和通过有限数据训练获得的模型参数**]。
# 要访问参数,我们首先从`net`访问所需的层,然后读取该层的权重和偏置。
# 正如在从零开始实现中一样,我们估计得到的参数与生成数据的真实参数非常接近。
#
# + origin_pos=50 tab=["tensorflow"]
w = net.get_weights()[0]
print('w的估计误差:', true_w - tf.reshape(w, true_w.shape))
b = net.get_weights()[1]
print('b的估计误差:', true_b - b)
# + [markdown] origin_pos=51
# ## 小结
#
# + [markdown] origin_pos=54 tab=["tensorflow"]
# * 我们可以使用TensorFlow的高级API更简洁地实现模型。
# * 在TensorFlow中,`data`模块提供了数据处理工具,`keras`模块定义了大量神经网络层和常见损耗函数。
# * TensorFlow的`initializers`模块提供了多种模型参数初始化方法。
# * 维度和存储可以自动推断,但注意不要在初始化参数之前尝试访问参数。
#
# + [markdown] origin_pos=55
# ## 练习
#
# 1. 如果将小批量的总损失替换为小批量损失的平均值,你需要如何更改学习率?
# 1. 查看深度学习框架文档,它们提供了哪些损失函数和初始化方法?用Huber损失代替原损失,即
# $$l(y,y') = \begin{cases}|y-y'| -\frac{\sigma}{2} & \text{ if } |y-y'| > \sigma \\ \frac{1}{2 \sigma} (y-y')^2 & \text{ 其它情况}\end{cases}$$
# 1. 你如何访问线性回归的梯度?
#
# + [markdown] origin_pos=58 tab=["tensorflow"]
# [Discussions](https://discuss.d2l.ai/t/1780)
#
|
d2l/tensorflow/chapter_linear-networks/linear-regression-concise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/", "height": 340} colab_type="code" id="iGDkPYZyU0gP" outputId="42d89930-1b15-41f2-ced0-cd883440233a"
# # !pip install tensorflow-gpu==2.0.0-alpha0
# + colab={} colab_type="code" id="Tq37pAWleXue"
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
# %matplotlib inline
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="DH7JbWuGV9ss" outputId="a87ae2ed-c5aa-4e50-ddac-a16439520a30"
print(tf.__version__)
# + colab={} colab_type="code" id="020V8mftKq8z"
(x_train, y_train), (x_val, y_val) = keras.datasets.fashion_mnist.load_data()
# + colab={"base_uri": "https://localhost:8080/", "height": 1445} colab_type="code" id="9DhUxaCAQHfo" outputId="fd8a9e25-7fc3-4acd-b96c-8b13337e6c71"
x_train[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="aIuA7mFJK9Ly" outputId="7942c1e6-20ee-4c7b-ec9d-d9012e1b137c"
x_train.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="_WeZL-GTPDhR" outputId="5d80b42c-db7b-4c86-fb72-e9357ad9b8ef"
x_val.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 487} colab_type="code" id="MsvCl_toPFS-" outputId="e234e098-ef6d-4e39-c429-4a54738e50fe"
plt.imshow(x_train[0])
plt.grid(False)
# + colab={} colab_type="code" id="60DfGDhKMrRk"
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# + colab={"base_uri": "https://localhost:8080/", "height": 594} colab_type="code" id="AcKKsJQ0PbkB" outputId="b8759f86-4651-4444-96c4-8a4492e7918c"
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.xlabel(class_names[y_train[i]])
# + colab={} colab_type="code" id="w12lscLuV_f7"
def preprocess(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def create_dataset(xs, ys, n_classes=10):
ys = tf.one_hot(ys, depth=n_classes)
return tf.data.Dataset.from_tensor_slices((xs, ys)) \
.map(preprocess) \
.shuffle(len(ys)) \
.batch(128)
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" id="jGtfbje0WNp9" outputId="91402c22-f3be-4bb8-fe94-bac9c5453404"
train_dataset = create_dataset(x_train, y_train)
val_dataset = create_dataset(x_val, y_val)
model = keras.Sequential([
keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
keras.layers.Dense(units=256, activation='relu'),
keras.layers.Dense(units=192, activation='relu'),
keras.layers.Dense(units=128, activation='relu'),
keras.layers.Dense(units=10, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(
train_dataset.repeat(),
epochs=10,
steps_per_epoch=500,
validation_data=val_dataset.repeat(),
validation_steps=2
)
# + colab={"base_uri": "https://localhost:8080/", "height": 525} colab_type="code" id="0Q4NeEOYWaav" outputId="70861629-66a9-4500-bfe4-9fdc863497a9"
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.ylim((0, 1)) # Uncomment this when showing you model for pay raise
plt.legend(['train', 'test'], loc='upper left');
# + colab={"base_uri": "https://localhost:8080/", "height": 525} colab_type="code" id="7Ksi_0s3eyAW" outputId="8cf8dbe1-5b56-46ee-a5eb-6085893175ca"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.ylim((1.5, 2))
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + colab={} colab_type="code" id="nqTjtkNye9F_"
predictions = model.predict(val_dataset)
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="KDObxsHihv13" outputId="86bab1b9-b663-460c-a5a2-959226d38cd1"
predictions[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="e7JxvqQahwa1" outputId="a2e4ed04-32ce-40b1-8533-4bb25c37e14b"
np.argmax(predictions[0])
# + colab={} colab_type="code" id="FZH39Ml5hys2"
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'green'
else:
color = 'red'
plt.xlabel("Predicted: {} {:2.0f}% (True: {})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
# + colab={"base_uri": "https://localhost:8080/", "height": 485} colab_type="code" id="YsaWeJEHQS_y" outputId="900fb8db-5641-4700-cbf7-e3cb0d3ad0b2"
i = 0
plot_image(i, predictions, y_val, x_val)
# -
|
01.neural_network.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Rizwan-Ahmed-Surhio/Accountants/blob/main/11_Dictionaries.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="R619N_V-7o9N"
# ### Dictionaries
# + id="WhYqPnFQ7hAi"
yearly_Sales = [40000, 42000, 47000, 52000, 39000]
# + id="LFMurkV575TP"
years = [2016,2019,2017,2020,2018]
# + [markdown] id="-B9Bydr39Ysh"
# #### Access List element using Integer
# + id="TkUfZwIu9WSX"
yearly_Sales[1]
# + [markdown] id="XEmnwm0U9fy6"
# #### Accessing Dictionary Element Using Key
# + id="b4xnth-I8FHy"
sales = {2016: 40000,
2017: 42000,
2018: 47000,
2019: 52000,
2020: 39000}
# + id="EIRGqDKq8lrN"
type(sales)
# + id="vekpa7Wf8nF3"
sales[2018]
# + [markdown] id="9Xvf9aJi86KY"
# ##### Dictionary can have any type as key not just the integer in case of Lists
# + id="gLb9EGje8wu7"
# + [markdown] id="qJJyL2tX-OoF"
# ## Create a Dictionary of Apple Inc. financial data
# + id="K-nRKHi1-cCc"
apple = {"date": "2020-09-26",
"symbol": "AAPL",
"ebitda": 81020000000,
"ebitdaratio": 0.295138699160337,
"operatingIncome": 66288000000,
"operatingIncomeRatio": 0.244398302460703,
"eps": 3.36,
"epsdiluted": 3.36,
"BasedInUS": True
}
# + id="6g7CjaLISHdv"
type(apple)
# + [markdown] id="GFbdAkzPSYTw"
# #### Find Apple EPS and Basedin In US data type
# + id="V0cB1BG1WBrm"
apple['eps']
# + id="VdL5fQOfSOQZ"
apple['BasedInUS']
# + id="H4ReODOWSTn0"
# + [markdown] id="-AaMQQxro-7z"
# ### Create a dictionary for Arrow company financial data
# + id="bJgi4UZSpBs6"
arrow = {'Revenue': 1000000,
'cost_of_sales': 600000,
'admin_cost': 150000,
'marketing_cost': 50000,
'interest': 2000,
'is_profitable': True}
# + id="y5jqqEOKenPW"
arrow
# + id="JyyNn2ZfgfC9"
arrow['Revenue']
# + id="8oeBAkKMgi0a"
arrow['admin_cost']
# + [markdown] id="bfVje0ONzzxi"
# ### Adding a new key value pair in existing dictionary
#
# + id="p6-oqagwz6We"
arrow['profit_amount'] = 198000
# + id="g72jOC-TkSRC"
arrow
# + [markdown] id="GzyBL10Q0ZdL"
# ### Change existing value of a key
# + id="Wnh5uRKd0dwh"
arrow['Revenue'] = 1200000
# + id="IFjPGIzZnVF_"
arrow
# + [markdown] id="pdBziJRu6dKJ"
# ### Nested dictionary structures , List in a Dictionary
# + id="urhVMz2j6nLC"
employees = {
'accounting' : 'Ahmed',
'marketing' : 'Richard',
'HR' : 'John',
'IT' : ['Sofi', 'Irfan', 'Vikas', 'Sam']
}
# + id="qn3dkZxt8TjX"
employees['IT']
# + id="BKD6zuvb8Vzg"
type(employees['IT'])
# + id="AynfuOyb8_w-"
# + [markdown] id="Lnwsne2L-QGz"
# ### get() Method
# + id="5vVo8qWK-SGT"
apple['EBITDA']
# + id="5X4aGhPg-hcM"
print(apple.get("EBITDA"))
# + id="lBU7u2if_fbX"
print(apple.get("eps"))
# + id="D1g1kk1ez2zM"
print(apple.get("population_Growth_in_US", "Sorry value not found"))
|
11_Dictionaries.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## HTML post-process
#
# `Typora` generates an HTML file with inline CSS styling, with the selected theme at the moment of the export.
# The cascade is as follows:
#
# 1. Base CSS from Typora, extracted to its own CSS file: `typora.base.css`
# 1. Selected base theme, in our case the files `monospace-dark.css` or `monospace.css`
# 1. Custom common CSS for Typora, called `base.user.css` in the Typora themes folder.
# 1. Custom CSS for each theme, using files with ".user" postfix: `monospace-dark.user.css` or `monospace.user.css`
#
# By using media queries we can implement both themes to be in sync with the viewer local config,
# and force any of the 2 themes for `@media print`.
#
# The expected html in `<head>` should contain these CSS:
#
# ```html
# <link href='typora.base.css' rel='stylesheet' type='text/css' />
# <link href='monospace-dark.css' rel='stylesheet' type='text/css' media="screen and (prefers-color-scheme: dark), screen and (prefers-color-scheme: no-preference), print"/>
# <link href='monospace.css' rel='stylesheet' type='text/css' media="screen and (prefers-color-scheme: light)"/>
# <link href='base.user.css' rel='stylesheet' type='text/css' />
# <link href='monospace-dark.user.css' rel='stylesheet' type='text/css' media="screen and (prefers-color-scheme: dark), screen and (prefers-color-scheme: no-preference), print"/>
# <link href='monospace.user.css' rel='stylesheet' type='text/css' media="screen and (prefers-color-scheme: light)" />
# ```
#
# A manual switch (hidden for print) is also added to toggle between themes, by just inserting 2 lines:
#
# ```html
# <!-- on head -->
# <script type="module" src="https://googlechromelabs.github.io/dark-mode-toggle/src/dark-mode-toggle.mjs"></script>
# ```
#
# ```html
# <!-- at the end of body -->
# <aside>
# <dark-mode-toggle id="dark-mode-toggle-1" appearance="toggle" legend="dark mode" dark="on" light="off" remember="remember"></dark-mode-toggle>
# </aside>
# ```
#
# ### Jinja2 template render
#
# As the `head` gets more complex (favicons, open graph meta tags, etc), **it is easier to maintain** it with a jinja2 template, where we fill some basic data for the static site, and insert the content from the extracted `body` in the Typora-generated html.
# +
"""
Script to post-process the Typora-generated web page.
* Replace head with:
- referenced local CSS files,
- referenced local badges,
- Open Graph meta tags,
- favicon meta tags,
* Add dark/light theme switcher
"""
import re
from pathlib import Path
from jinja2 import Template
# paths
#p_base = Path(__file__).parents[1]
p_base = Path("../")
p_index = p_base / "index.html"
p_typora = p_base / "resumes" / "resume-eugenio-panadero.html"
p_templ = p_base / "notebooks" / "layout.html"
rel_path_rsc = "resources/"
# title and short description
title = "<NAME>'s resume"
description = (
"Python Senior Developer (12+ years of experience) interested in web apps, "
"data analysis & ML, IoT, home automation, energy and electrical industries"
)
# Load original html
html_orig = p_typora.read_text()
p_typora.write_text(html_orig.replace("README", title))
body = re.findall(
r"<body class='typora-export' >(.*)</body>", html_orig, flags=re.DOTALL
)[0]
rg_ref_badge = re.compile(r" src='(https://img.shields.io/badge/(\w+)-.+?)' ")
body_local_badges = rg_ref_badge.sub(r" src='resources/badge_\2.svg' ", body)
# Collect data for template
data = {
"title": title,
"short_desc": description,
"rel_path_rsc": rel_path_rsc,
"base_url": "https://azogue.github.io",
"site_name": "profile: <NAME>",
"image_width": 868,
"image_height": 1380,
"typora_body": body_local_badges,
}
# Render jinja2 template into index.html
p_index.write_text(
Template(p_templ.read_text()).render(**data)
)
|
notebooks/html-post-process-css.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Examples
import sys
sys.path.append("../")
from datetime import time
import pandas as pd
import pandas_market_calendars as mcal
# ## Setup new exchange calendar
nyse = mcal.get_calendar('NYSE')
# Get the time zone
nyse.tz.zone
# Get the AbstractHolidayCalendar object
holidays = nyse.holidays()
holidays.holidays[-5:]
# ## Exchange open valid business days
# Get the valid open exchange business dates between a start and end date.
# Note that Dec 26 (Christmas), Jan 2 (New Years) and all weekends are missing
nyse.valid_days(start_date='2016-12-20', end_date='2017-01-10')
# ## Schedule
schedule = nyse.schedule(start_date='2016-12-30', end_date='2017-01-10')
schedule
# with early closes
early = nyse.schedule(start_date='2012-07-01', end_date='2012-07-10')
early
# ## Get early closes
nyse.early_closes(schedule=early)
# ## Open at time
# Test to see if a given timestamp is during market open hours
nyse.open_at_time(early, pd.Timestamp('2012-07-03 12:00', tz='America/New_York'))
nyse.open_at_time(early, pd.Timestamp('2012-07-03 16:00', tz='America/New_York'))
# ## Date Range
# This function will take a schedule DataFrame and return a DatetimeIndex with all timestamps at the frequency given
# for all of the exchange open dates and times.
mcal.date_range(early, frequency='1D')
mcal.date_range(early, frequency='1H')
# ## Custom open and close times
# If you want to override the market open and close times enter these at construction
cal = mcal.get_calendar('NYSE', open_time=time(10, 0), close_time=time(14, 30))
print('open, close: %s, %s' % (cal.open_time, cal.close_time))
# ## Merge schedules
# NYSE Calendar
nyse = mcal.get_calendar('NYSE')
schedule_nyse = nyse.schedule('2015-12-20', '2016-01-06')
schedule_nyse
# LSE Calendar
lse = mcal.get_calendar('LSE')
schedule_lse = lse.schedule('2015-12-20', '2016-01-06')
schedule_lse
# ### Inner merge
# This will find the dates where both the NYSE and LSE are open.
# Notice that Dec 28th is open for NYSE but not LSE.
# Also note that some days have a close prior to the open. This function does not currently check for that.
mcal.merge_schedules(schedules=[schedule_nyse, schedule_lse], how='inner')
# ### Outer merge
# This will return the dates and times where either the NYSE or the LSE are open
mcal.merge_schedules(schedules=[schedule_nyse, schedule_lse], how='outer')
# ## Use holidays in numpy
# This will use your exchange calendar in numpy to add business days
import numpy as np
cme = mcal.get_calendar("CME")
np.busday_offset(dates="2020-05-22", holidays=cme.holidays().holidays, offsets=1)
# ## Trading Breaks
# Some markets have breaks in the day, like the CME Equity Futures markets which are closed from 4:15 - 4:35 (NY) daily. These calendars will have additional columns in the schedule() DataFrame
#
cme = mcal.get_calendar('CME_Equity')
schedule = cme.schedule('2020-01-01', '2020-01-04')
schedule
# The date_range() properly accounts for the breaks
mcal.date_range(schedule, '5min')
|
examples/usage.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/daniel-sjkdm/ConsumingAPIs/blob/master/CountriesAPI.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="qwJLFhNt6jBr"
# # Countries API
#
# The following notebook is an analysis of the countries API using:
#
# # + pandas
# # + searborn
# # + matplotlib
# # + requests
#
# This is for practicing the data analysis skills! And also how it relates to get
# data from different sources. Here the source of data is a free REST API that doesn't need authentication in order to be used.
#
# ## Source
#
# [Countries API](https://restcountries.eu/)
# + id="75LnWY8_63l1"
import pandas as pd
import requests
import matplotlib
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import json
from pprint import pprint
# + id="SXl0C5diDljl"
sns.set_theme()
matplotlib.rc('figure', figsize=(10, 5))
# + id="ECJhfvUc6_jw"
base_url = "https://restcountries.eu/rest/v2"
# + id="a7zfpOMx7V0s"
# Get all available information of the countries
r = requests.get(f"{base_url}/all")
if r.ok:
data = r.json()
else:
raise Exception("There was an error in the API server")
# + id="3FHYJQUV7ZHV" outputId="bf85e6b5-6718-4c0c-83c7-7f3b90c2ea1e" colab={"base_uri": "https://localhost:8080/"}
# Which are the fields of the json object?
columns = list(data[0].keys())
pprint(columns)
# + id="vE0QTBKx7bPd"
# Since I'm only interested in a subset of columns, I'll filter the ones
# I don't want to appear
columns_i_dont_want = ["alpha2Code", "alpha3Code", "callingCodes", "altSpellings",
"timezones", "borders", "nativeName", "numericCode",
"translations", "cioc"]
# + id="g0AmbLF_7kuc" outputId="561b5cba-de57-4a0d-d97d-9a25fc70ba9f" colab={"base_uri": "https://localhost:8080/", "height": 206}
# Since the response data is a list of json objects it must be parsed into
# something useful and easy to read. That's why in here I'll
# extract with a map function only the fields of interest of the dict objects
new_data = []
for item in data:
new_data.append(
dict(
name=item["name"],
capital=item["capital"],
region=item["region"],
subregion=item["subregion"],
population=item["population"],
latlng=item["latlng"],
area=item["area"],
gini=item["gini"],
currencies="".join(list(map(lambda field: str(field["name"]), item["currencies"]))),
languages=list(map(lambda field: field["name"], item["languages"])),
regionalBlocs=list(map(lambda field: field["name"], item["regionalBlocs"]))
)
)
# This is what the data looks like
df = pd.DataFrame(new_data)
df.head()
# + id="ZRfa5P-PEGmy" outputId="7e18cdd2-9f3c-4413-b8da-ff97e6c08899" colab={"base_uri": "https://localhost:8080/"}
# Basic dataframe information
print(f"--- Shape = {df.shape}")
print(f"\n--- Datatypes = {df.dtypes}")
print("\n--- Are there null items?\n")
print(df.isnull().any())
# + id="XW8Ba1Y1FlD8" outputId="3338d5d1-5565-4df4-b9b1-736a511e0a1d" colab={"base_uri": "https://localhost:8080/", "height": 224}
# Some fields have not valid values, let's drop them
# to don't alter the analysis of the data
# Some rows get dropped (98)
df.dropna(inplace=True, how="any")
df.reset_index(inplace=True, drop=True)
print(f"Dataframe shape = {df.shape}")
df.head()
# + id="QuCWPkl__mAi" outputId="65ad5c3f-ae1a-4941-c733-bb57a34d7f10" colab={"base_uri": "https://localhost:8080/", "height": 300}
df.describe()
# + id="ugQCgcN_MpYS" outputId="0db20b5f-c831-4264-ca1c-60e176a1cbac" colab={"base_uri": "https://localhost:8080/", "height": 238}
# Group the dataframe by region and sum their values
# The region with the most population is Asia
df.groupby("region").sum().sort_values(by="population", ascending=False)
# + id="_LNup9K8XZrV" outputId="cd33b821-f0d7-4b9a-b0c6-90b6fb6a4d65" colab={"base_uri": "https://localhost:8080/", "height": 455}
# Group by country, sum the numeric values of each group and then
# sort by population.
# This is how we can see China is the country with the highest
# population and Seychelles is the country with the least
df.groupby("name").sum().sort_values(by="population", ascending=False)
# + id="94sc6kced1Te" outputId="a60aaaf5-c284-4663-f0c8-d55c89d2f133" colab={"base_uri": "https://localhost:8080/", "height": 238}
# Group by gini and get the row with the highest value
# The gini index is a measure of the wealth distribution
# in a country. The lowest the value the better.
# Here we can see that the Seychelles country in Africa is
# the worst one in terms of this index
df_gini = df.groupby("gini").max().sort_values(by="gini", ascending=False)
df_gini.head()
# + id="lNu_8FJrj2iD" outputId="86335872-03d4-46fb-b1d8-425fc41d859b" colab={"base_uri": "https://localhost:8080/", "height": 614}
# Group by currencies and display the ones that uses Euro
df.groupby("currencies").get_group("Euro").reset_index(drop=True)
# + id="42DudZzAEUp_" outputId="4ae0ae34-2d57-42c2-aa89-72f78befd52c" colab={"base_uri": "https://localhost:8080/", "height": 614}
# This is the same as above
df[df["currencies"] == "Euro"].reset_index()
# + id="wwE8_XdhN8o8" outputId="38f0f7c2-cc04-4571-ccf2-c857c28e1fdb" colab={"base_uri": "https://localhost:8080/"}
# Display the frequency of the currencies as a percentage
# The Euro is the most used as national currency by 11.84
# percent of the countries (listed in the dataset)
df["currencies"].value_counts(normalize=True).sort_values(ascending=False)
# + id="pyzJN-v7NkrI" outputId="060e8f2b-9cad-452f-da6b-6e4c9d119ddb" colab={"base_uri": "https://localhost:8080/", "height": 81}
# Get information about Mexico!
df_mexico = df[df["name"] == "Mexico"]
df_mexico
# + [markdown] id="5xn0pWTueI_N"
# ## Interesting groups
#
# Groups subsets are created here to be analyzed by:
#
# # + population
# # + gini index
# + id="H93w_VI2bVnD"
region_group = df.groupby("region")
subregion_group = df.groupby("subregion")
population_group = df.groupby("population")
currencies_group = df.groupby("currencies")
# + [markdown] id="adV5xzCudlS5"
# ### Population statistics
# + id="yHaM9vB5doQD" outputId="fcce2b47-ad9a-445c-ae63-3e8007a4318e" colab={"base_uri": "https://localhost:8080/"}
# Average population per region
region_group["population"].mean().sort_values(ascending=True)
# + id="SPLyOEoweneP" outputId="bb9b85ab-c578-4660-f9fa-5653bd92d05d" colab={"base_uri": "https://localhost:8080/"}
# Average population per subregion
# In average, Eastern Asia is the most populated region of Asia
# In America, the most populated subregion in average is Northern America
subregion_group["population"].mean().sort_values(ascending=False)
# + id="6TY2kbsJfIrl" outputId="e11c824a-7a46-47fe-d324-929b84feb901" colab={"base_uri": "https://localhost:8080/"}
# Here's the average population for currencies
currencies_group["population"].mean().sort_values(ascending=False).head()
# + [markdown] id="ll-iLpepblRM"
# ### Gini index statistics
# + id="AOn_cIIGbt_-" outputId="77ea7efa-409c-47f3-f776-a7948c712987" colab={"base_uri": "https://localhost:8080/"}
# By region, in average Europe has the best gini index
region_group["gini"].mean().sort_values(ascending=False)
# + id="5xGL0JH3bqRy" outputId="354f4f95-72a3-476f-db70-da289fc4c33c" colab={"base_uri": "https://localhost:8080/"}
# By population, in average the best gini index is in a population
# of 5717014 persons
population_group["gini"].mean().sort_values(ascending=False).tail()
# + id="-lb761JjcXSd" outputId="9ab87463-067c-4f0e-979a-8f2dd8b06529" colab={"base_uri": "https://localhost:8080/"}
# By currency, the best gini index is hold by the Europeans countries
# that uses Euro
currencies_group["gini"].mean().sort_values(ascending=False).tail()
# + [markdown] id="8LjQ9WDFIyMP"
# ## Plotting
#
# Since all the countries are listed, to display some plots I'm only using a subset of them to make it visually clear.
# + id="rLHZu0e-bBgn"
height = 8
aspect = 1.5
# + id="v8LTh9euDA3F" outputId="5baa80d8-4031-4bf3-c825-0b894bffa514" colab={"base_uri": "https://localhost:8080/", "height": 598}
# Let's plot the population in countries that begins with letter C
sns.displot(data=df.loc[df["name"].str.startswith("M", na=False)],
x="population", hue="name", multiple="stack",
height=height, aspect=aspect)
# + id="x8ss6dJIMSpr" outputId="b9aa5fb5-f96d-43ac-fc8b-da3bca390c5a" colab={"base_uri": "https://localhost:8080/", "height": 598}
# Plot the area of the countries starting with M
sns.displot(data=df[df["name"].str.startswith("M", na=False)], x="area",
hue="name", multiple="stack", height=8, aspect=aspect)
# + id="6kOeOAl2jC2Y" outputId="ce0121bb-1056-43fe-97ea-ff3998694227" colab={"base_uri": "https://localhost:8080/", "height": 598}
# Plot the frequency distribution of the currencies
sns.displot(data=df, x="currencies", height=height, aspect=aspect,
multiple="stack")
# + id="duixZ-JSZquu" outputId="470ff950-ae2a-4636-a6cc-7f89075a4efe" colab={"base_uri": "https://localhost:8080/", "height": 602}
# Plot the relationship between population and area
# Most of the population are centered in smaller areas
sns.relplot(data=df, x="area", y="population", height=height, aspect=aspect)
# + id="B_jZ_iA1hlyQ" outputId="9b1644b2-f064-4096-f994-e6dde918fd86" colab={"base_uri": "https://localhost:8080/", "height": 602}
# Relationship between gini index and population
# Theres no correlation between the index and population
# since in smaller populations there are both high and low
# gini index values
sns.relplot(data=df, x="population", y="gini", height=height, aspect=aspect)
# + id="vaXR2hJqi7za" outputId="e5bfe37a-b176-4dd6-af52-f3714e50f6a6" colab={"base_uri": "https://localhost:8080/", "height": 602}
sns.relplot(data=df, x="area", y="gini", height=height, aspect=aspect)
# + id="G319WV5AEjeW" outputId="4b2a05a6-ec3c-418a-fb94-73faa7f634a1" colab={"base_uri": "https://localhost:8080/"}
currencies_group = df.groupby("currencies")
currencies_group["population"].value_counts().head(10)
# + id="64CF1eBfUqEa" outputId="9e7cd2d4-bfb5-40ef-e061-47181ee90f1a" colab={"base_uri": "https://localhost:8080/", "height": 614}
currencies_group.get_group("Euro")
# + id="61b7FJx6V8HN" outputId="2c378f42-f0b0-4629-9fcb-3bcb9629bfac" colab={"base_uri": "https://localhost:8080/"}
# Get gini index by each region
region_group = df.groupby("region")
region_group["population"].value_counts().loc["Africa"]
# region_group.get_group("Africa")
# + id="V3_n9SBjUQrr" outputId="52a1bb4b-361b-4299-fbfd-7e4285e4c605" colab={"base_uri": "https://localhost:8080/"}
currencies_group["population"].value_counts()
# + id="pyLrfQQgS9wq" outputId="01fdee24-4543-478b-b762-da4dded117b5" colab={"base_uri": "https://localhost:8080/", "height": 143}
filter = df["currencies"] == "United States dollar"
df.loc[filter]
|
CountriesAPI.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.9 64-bit
# name: python3
# ---
# ## Setup
# !git clone https://github.com/sithu31296/semantic-segmentation
# %cd semantic-segmentation
# %pip install -r requirements.txt
# +
import torch
from torchvision import io
from torchvision import transforms as T
from PIL import Image
def show_image(image):
if image.shape[2] != 3: image = image.permute(1, 2, 0)
image = Image.fromarray(image.numpy())
return image
# -
# ## Show Available Pretrained Models
# +
from semseg import show_models
show_models()
# -
# ## Load a Pretrained Model
#
# Download a pretrained model's weights from the result table (ADE20K, CityScapes, ...) and put it in `checkpoints/pretrained/model_name/`.
# %pip install gdown
# +
# import gdown
from pathlib import Path
ckpt = Path('./checkpoints/pretrained/segformer')
ckpt.mkdir(exist_ok=True, parents=True)
url = 'https://drive.google.com/uc?id=1-OmW3xRD3WAbJTzktPC-VMOF5WMsN8XT'
output = './checkpoints/pretrained/segformer/segformer.b3.ade.pth'
# gdown.download(url, output, quiet=False)
# +
from semseg.models import *
model = eval('SegFormer')(
backbone='MiT-B3',
num_classes=150
)
try:
model.load_state_dict(torch.load('checkpoints/pretrained/segformer/segformer.b3.ade.pth', map_location='cpu'))
except:
print("Download a pretrained model's weights from the result table.")
model.eval()
print('Loaded Model')
# -
# ## Simple Image Inference
#
# ### Load Image
image_path = 'assests/ade/ADE_val_00000049.jpg'
image = io.read_image(image_path)
print(image.shape)
show_image(image)
# ### Preprocess
# resize
image = T.CenterCrop((512, 512))(image)
# scale to [0.0, 1.0]
image = image.float() / 255
# normalize
image = T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))(image)
# add batch size
image = image.unsqueeze(0)
image.shape
# ### Model Forward
with torch.no_grad():
seg = model(image)
seg.shape
# ### Postprocess
seg = seg.softmax(1).argmax(1).to(int)
seg.unique()
# +
from semseg.datasets import *
palette = eval('ADE20K').PALETTE
# -
seg_map = palette[seg].squeeze().to(torch.uint8)
show_image(seg_map)
# ## Show Available Backbones
# +
from semseg import show_backbones
show_backbones()
# -
# ## Show Available Heads
# +
from semseg import show_heads
show_heads()
# -
# ## Show Available Datasets
# +
from semseg import show_datasets
show_datasets()
# -
# ## Construct a Custom Model
#
# ### Choose a Backbone
# +
from semseg.models.backbones import ResNet
backbone = ResNet('18')
# -
# init random input batch
x = torch.randn(2, 3, 224, 224)
# get features from the backbone
features = backbone(x)
for out in features:
print(out.shape)
# ### Choose a Head
# +
from semseg.models.heads import UPerHead
head = UPerHead(backbone.channels, 128, num_classes=10)
# -
seg = head(features)
seg.shape
from torch.nn import functional as F
# upsample the output
seg = F.interpolate(seg, size=x.shape[-2:], mode='bilinear', align_corners=False)
seg.shape
# Check `semseg/models/custom_cnn.py` and `semseg/models/custom_vit.py` for a complete construction for custom model.
|
tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# language: python
# name: python385jvsc74a57bd031f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.ticker as ticker
import seaborn as sns
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score, make_scorer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.inspection import permutation_importance
import multiprocessing
from mlxtend.classifier import EnsembleVoteClassifier
from sklearn.ensemble import BaggingClassifier
from xgboost import XGBClassifier
labels = pd.read_csv('../../csv/train_labels.csv')
labels.head()
values = pd.read_csv('../../csv/train_values.csv')
values.T
to_be_categorized = ["land_surface_condition", "foundation_type", "roof_type",\
"position", "ground_floor_type", "other_floor_type",\
"plan_configuration", "legal_ownership_status"]
for row in to_be_categorized:
values[row] = values[row].astype("category")
values.info()
datatypes = dict(values.dtypes)
for row in values.columns:
if datatypes[row] != "int64" and datatypes[row] != "int32" and \
datatypes[row] != "int16" and datatypes[row] != "int8":
continue
if values[row].nlargest(1).item() > 32767 and values[row].nlargest(1).item() < 2**31:
values[row] = values[row].astype(np.int32)
elif values[row].nlargest(1).item() > 127:
values[row] = values[row].astype(np.int16)
else:
values[row] = values[row].astype(np.int8)
labels["building_id"] = labels["building_id"].astype(np.int32)
labels["damage_grade"] = labels["damage_grade"].astype(np.int8)
labels.info()
values['age_is_leq_than_100'] = (values['age'] <= 100).astype(np.int8)
# values['age_is_betw_100_and_200'] = ((values['age'] > 100) & (values['age'] <= 200)).astype(np.int8)
values['age_is_greater_than_200'] = (values['age'] > 200).astype(np.int8)
values[values['age'] >= 100]
# # Feature Engineering para XGBoost
important_values = values\
.merge(labels, on="building_id")
important_values.drop(columns=["building_id"], inplace = True)
important_values["geo_level_1_id"] = important_values["geo_level_1_id"].astype("category")
important_values
# +
X_train, X_test, y_train, y_test = train_test_split(important_values.drop(columns = 'damage_grade'),
important_values['damage_grade'], test_size = 0.075, random_state = 123)
# +
#OneHotEncoding
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]])
res = pd.concat([original_dataframe, dummies], axis=1)
res = res.drop([feature_to_encode], axis=1)
return(res)
features_to_encode = ["geo_level_1_id", "land_surface_condition", "foundation_type", "roof_type",\
"position", "ground_floor_type", "other_floor_type",\
"plan_configuration", "legal_ownership_status"]
for feature in features_to_encode:
X_train = encode_and_bind(X_train, feature)
X_test = encode_and_bind(X_test, feature)
# -
X_train
import lightgbm as lgb
lgbm_model_1 = lgb.LGBMClassifier(boosting_type='gbdt',
colsample_bytree=1.0,
importance_type='split',
learning_rate=0.15,
max_depth=None,
n_estimators=1600,
n_jobs=-1,
objective=None,
subsample=1.0,
subsample_for_bin=200000,
subsample_freq=0)
lgbm_model_1.fit(X_train, y_train)
# +
# bc_lgbm_model_1 = BaggingClassifier(base_estimator = lgbm_model_1,
# n_estimators = 30,
# oob_score = True,
# bootstrap_features = True,
# random_state = 0,
# verbose = 3)
# bc_lgbm_model_1.fit(X_train, y_train)
# + slideshow={"slide_type": "-"}
# eclf_model = EnsembleVoteClassifier(clfs=[bc_xgb_model_1, #~74.53
# rf_model_1, #~73.92
# bc_lgbm_model_1, #~74.56
# cb_model_1, #~74.92
# gb_model_1], #~74.07
# weights=[3,1.75,3,6.75,1.875],
# fit_base_estimators=False,
# voting='soft')
# -
y_preds = eclf_model.predict(X_test)
f1_score(y_test, y_preds, average='micro')
|
src/EnsembleVoteClassifier/.ipynb_checkpoints/lgbm-graph-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import socket
import dask
import dask_cudf
import distributed
import dask_xgboost as dxgb
# +
print("- setting dask settings")
dask.config.set({'distributed.scheduler.work-stealing': False})
dask.config.set({'distributed.scheduler.bandwidth': 1})
print("-- Changes to dask settings")
print("--- Setting work-stealing to ", dask.config.get('distributed.scheduler.work-stealing'))
print("--- Setting scheduler bandwidth to ", dask.config.get('distributed.scheduler.bandwidth'))
print("-- Settings updates complete")
# -
ip = socket.gethostbyname(socket.gethostname())
scheduler = "tcp://" + ip + ":8786"
client = distributed.Client(scheduler)
client.restart()
client
# update this path to reflect the datastore from Tracked Metrics if you downloaded the NYC Taxi Trip dataset
datastore = "/path/to/azure/datastore"
# +
# list of column names that need to be re-mapped
remap = {}
remap['tpep_pickup_datetime'] = 'pickup_datetime'
remap['tpep_dropoff_datetime'] = 'dropoff_datetime'
remap['ratecodeid'] = 'rate_code'
#create a list of columns & dtypes the df must have
must_haves = {
'pickup_datetime': 'datetime64[ms]',
'dropoff_datetime': 'datetime64[ms]',
'passenger_count': 'int32',
'trip_distance': 'float32',
'pickup_longitude': 'float32',
'pickup_latitude': 'float32',
'rate_code': 'int32',
'dropoff_longitude': 'float32',
'dropoff_latitude': 'float32',
'fare_amount': 'float32'
}
# -
# helper function which takes a DataFrame partition
def clean(df_part, remap, must_haves):
# some col-names include pre-pended spaces remove & lowercase column names
tmp = {col:col.strip().lower() for col in list(df_part.columns)}
df_part = df_part.rename(tmp)
# rename using the supplied mapping
df_part = df_part.rename(remap)
# iterate through columns in this df partition
for col in df_part.columns:
# drop anything not in our expected list
if col not in must_haves:
df_part = df_part.drop(col)
continue
if df_part[col].dtype == 'object' and col in ['pickup_datetime', 'dropoff_datetime']:
df_part[col] = df_part[col].astype('datetime64[ms]')
continue
# if column was read as a string, recast as float
if df_part[col].dtype == 'object':
df_part[col] = df_part[col].str.fillna('-1')
df_part[col] = df_part[col].astype('float32')
else:
# downcast from 64bit to 32bit types
# Tesla T4 are faster on 32bit ops
if 'int' in str(df_part[col].dtype):
df_part[col] = df_part[col].astype('int32')
if 'float' in str(df_part[col].dtype):
df_part[col] = df_part[col].astype('float32')
df_part[col] = df_part[col].fillna(-1)
return df_part
# adjust this dictionary list if you'd like to use different year in this workload
is_valid_years = {
"2014": False,
"2015": False,
"2016": True
}
# +
data_path = os.path.join(datastore, "data/nyctaxi")
dfs = []
if not os.path.exists(data_path):
print("WARNING: the NYC Taxi Trip Data was not found in the Azure datastore")
print("WARNING: updating the data path to use a public datastore")
print("WARNING: data will be downloaded and processed in-situ")
print("WARNING: this degrades performance")
print("WARNING: to avoid this performance degradation, use the `--download_nyctaxi_data=True` option when using start_azureml.py")
data_path = "gcs://anaconda-public-data/nyc-taxi/csv/"
if is_valid_years["2014"]:
taxi_df_2014 = dask_cudf.read_csv(os.path.join(data_path, "2014/yellow_*.csv"))
taxi_df_2014 = taxi_df_2014.map_partitions(clean, remap, must_haves)
dfs.append(taxi_df_2014)
if is_valid_years["2015"]:
taxi_df_2015 = dask_cudf.read_csv(os.path.join(data_path, "2015/yellow_*.csv"))
taxi_df_2015 = taxi_df_2015.map_partitions(clean, remap, must_haves)
dfs.append(taxi_df_2015)
if is_valid_years["2016"]:
valid_months_2016 = [str(x).rjust(2, '0') for x in range(1, 7)]
valid_files_2016 = [os.path.join(data_path, "2016/yellow_tripdata_2016-{}.csv".format(month)) for month in valid_months_2016]
taxi_df_2016 = dask_cudf.read_csv(valid_files_2016)
taxi_df_2016 = taxi_df_2016.map_partitions(clean, remap, must_haves)
dfs.append(taxi_df_2016)
else:
if is_valid_years["2014"] and os.path.exists(os.path.join(data_path, "2014")):
taxi_df_2014 = dask_cudf.read_csv(os.path.join(data_path, "2014/yellow_*.csv"))
taxi_df_2014 = taxi_df_2014.map_partitions(clean, remap, must_haves)
dfs.append(taxi_df_2014)
if is_valid_years["2015"] and os.path.exists(os.path.join(data_path, "2014")):
taxi_df_2015 = dask_cudf.read_csv(os.path.join(data_path, "2015/yellow_*.csv"))
taxi_df_2015 = taxi_df_2015.map_partitions(clean, remap, must_haves)
dfs.append(taxi_df_2015)
if is_valid_years["2016"] and os.path.exists(os.path.join(data_path, "2014")):
taxi_df_2016 = dask_cudf.read_csv(os.path.join(data_path, "2016/yellow_*.csv"))
taxi_df_2016 = taxi_df_2016.map_partitions(clean, remap, must_haves)
dfs.append(taxi_df_2016)
taxi_df = dask.dataframe.multi.concat(dfs)
# -
print("Column names are as follows:")
for column in taxi_df.columns:
print(column)
# +
# apply a list of filter conditions to throw out records with missing or outlier values
query_frags = [
'fare_amount > 0 and fare_amount < 500',
'passenger_count > 0 and passenger_count < 6',
'pickup_longitude > -75 and pickup_longitude < -73',
'dropoff_longitude > -75 and dropoff_longitude < -73',
'pickup_latitude > 40 and pickup_latitude < 42',
'dropoff_latitude > 40 and dropoff_latitude < 42'
]
taxi_df = taxi_df.query(' and '.join(query_frags))
# inspect the results of cleaning
taxi_df.head().to_pandas()
# +
import math
from math import cos, sin, asin, sqrt, pi
import numpy as np
def haversine_distance_kernel(pickup_latitude, pickup_longitude, dropoff_latitude, dropoff_longitude, h_distance):
for i, (x_1, y_1, x_2, y_2) in enumerate(zip(pickup_latitude, pickup_longitude, dropoff_latitude, dropoff_longitude)):
x_1 = pi / 180 * x_1
y_1 = pi / 180 * y_1
x_2 = pi / 180 * x_2
y_2 = pi / 180 * y_2
dlon = y_2 - y_1
dlat = x_2 - x_1
a = sin(dlat / 2)**2 + cos(x_1) * cos(x_2) * sin(dlon / 2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers
h_distance[i] = c * r
def day_of_the_week_kernel(day, month, year, day_of_week):
for i, (d_1, m_1, y_1) in enumerate(zip(day, month, year)):
if month[i] < 3:
shift = month[i]
else:
shift = 0
Y = year[i] - (month[i] < 3)
y = Y - 2000
c = 20
d = day[i]
m = month[i] + shift + 1
day_of_week[i] = (d + math.floor(m * 2.6) + y + (y // 4) + (c // 4) - 2 * c) % 7
def add_features(df):
df['hour'] = df['pickup_datetime'].dt.hour
df['year'] = df['pickup_datetime'].dt.year
df['month'] = df['pickup_datetime'].dt.month
df['day'] = df['pickup_datetime'].dt.day
df['diff'] = df['dropoff_datetime'].astype('int32') - df['pickup_datetime'].astype('int32')
df['pickup_latitude_r'] = df['pickup_latitude'] // .01 * .01
df['pickup_longitude_r'] = df['pickup_longitude'] // .01 * .01
df['dropoff_latitude_r'] = df['dropoff_latitude'] // .01 * .01
df['dropoff_longitude_r'] = df['dropoff_longitude'] // .01 * .01
df = df.drop('pickup_datetime')
df = df.drop('dropoff_datetime')
df = df.apply_rows(haversine_distance_kernel,
incols=['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'],
outcols=dict(h_distance=np.float32),
kwargs=dict())
df = df.apply_rows(day_of_the_week_kernel,
incols=['day', 'month', 'year'],
outcols=dict(day_of_week=np.float32),
kwargs=dict())
df['is_weekend'] = (df['day_of_week']<2).astype("int32")
return df
# +
# %%time
# actually add the features
taxi_df = taxi_df.map_partitions(add_features).persist()
done = distributed.wait(taxi_df)
# inspect the result
# taxi_df.head().to_pandas()
# -
# %matplotlib inline
taxi_df.groupby('hour').fare_amount.mean().compute().to_pandas().sort_index().plot(legend=True);
# +
# %%time
X_train = taxi_df.query('day < 25').persist()
# create a Y_train ddf with just the target variable
Y_train = X_train[['fare_amount']].persist()
# drop the target variable from the training ddf
X_train = X_train[X_train.columns.difference(['fare_amount'])]
# this wont return until all data is in GPU memory
done = distributed.wait([X_train, Y_train])
# +
# %%time
params = {
'learning_rate' : 0.3,
'max_depth' : 8,
'objective' : 'reg:squarederror',
'subsample' : 0.6,
'gamma' : 1,
'silent' : True,
'verbose_eval' : True,
'tree_method' :'gpu_hist'
}
trained_model = dxgb.train(client, params, X_train, Y_train, num_boost_round=100)
# -
def drop_empty_partitions(df):
lengths = df.map_partitions(len).compute()
nonempty = [length > 0 for length in lengths]
return df.partitions[nonempty]
# +
X_test = taxi_df.query('day >= 25').persist()
X_test = drop_empty_partitions(X_test)
# Create Y_test with just the fare amount
Y_test = X_test[['fare_amount']]
# Drop the fare amount from X_test
X_test = X_test[X_test.columns.difference(['fare_amount'])]
# display test set size
# len(X_test)
# +
# generate predictions on the test set
Y_test['prediction'] = dxgb.predict(client, trained_model, X_test)
# +
Y_test['squared_error'] = (Y_test['prediction'] - Y_test['fare_amount'])**2
# inspect the results to make sure our calculation looks right
Y_test.head().to_pandas()
# -
# compute the actual RMSE over the full test set
RMSE = Y_test.squared_error.mean().compute()
math.sqrt(RMSE)
|
the_archive/archived_rapids_blog_notebooks/azureml/rapids/nyctaxi_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b> <font size = 5> Intersection count and distance to highway data from Open Street Maps </b> </font>
# In this iPython notebook, the Overpass API from Open Street Maps is used to determine the location of all traffic signals within a given bounding box. The Overpy library is used to send the request to the API and this call returns the latitude and longitude of all traffic signals. Next, the distance between each traffic intersection and each point in the monitoring data is measured. A traffic score is calculated as the 'Number of traffic intersections within a 1,000 ft buffer' to each point in the monitoring data
#
# The second section of this notebook uses the Overpass API to get the latitude and longitude of all points within a bounding box classified as a highway. Next, the distance from each monitoring location to the closest highway is determined.
# +
#Import python packages including overpy
import overpy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import descartes
import geopandas as gpd
from shapely.geometry import Point, Polygon
from shapely.ops import nearest_points
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
import math
import time
from scipy.stats import boxcox
from matplotlib import cm
import matplotlib.lines as mlines
sns.set(style = 'whitegrid')
sns.set_palette('bright')
# %matplotlib inline
# -
# # <b> <font size = 5> Fetch all nodes using the API Query. Here the node is specified as 'Highway=traffic_signals' </b> </font>
#Call overpass API and pass bounding box.
api = overpy.Overpass()
result = api.query("""
node(37.68,-122.36,37.8712,-122.03) ["highway"="traffic_signals"];
(._;>;);
out body;
""")
traffic_lat = []
traffic_lon = []
for node in result.nodes:
traffic_lat.append(node.lat)
traffic_lon.append(node.lon)
#Write Latitude and Longitude data to a dataframe
traffic_df = pd.DataFrame(list(zip(traffic_lat, traffic_lon)), columns = ['Latitude', 'Longitude'])
# ##### Write to csv
# traffic_df.to_csv("Data/Raw-data/all_traffic_intersections.csv")
# # <b> <font size = 5> Load traffic intersection data</b> </font>
traffic_df = pd.read_csv("Data/Raw-data/all_traffic_intersections.csv")
#Drop the first column
traffic_df.drop(columns = ['Unnamed: 0'], inplace=True)
## Rename index and intersection number
traffic_df.rename(columns = {'index':'Intersection'}, inplace=True)
### Add an empty column for distance
traffic_df['dist'] = 0
traffic_df['dist'].astype(float)
# ## <b> <font size = 4> Convert traffic dataset into a column format to calculate distance </b> </font>
# Create individual dataframes
traffic_lat = traffic_df[['Intersection', 'Latitude']]
traffic_long = traffic_df[['Intersection', 'Longitude']]
traffic_dist = traffic_df[['Intersection', 'dist']]
# Transpose all the dataframes
traffic_lat = traffic_lat.T
traffic_long = traffic_long.T
traffic_dist = traffic_dist.T
## Make the header as the first row in each transposed dataframe
traffic_lat = traffic_lat.rename(columns=traffic_lat.iloc[0].astype(int)).drop(traffic_lat.index[0])
traffic_long = traffic_long.rename(columns=traffic_long.iloc[0].astype(int)).drop(traffic_long.index[0])
traffic_dist = traffic_dist.rename(columns=traffic_dist.iloc[0].astype(int)).drop(traffic_dist.index[0])
## Add suffix to column header based on the dataframe type
traffic_lat.columns = [str(col) + '_latitude' for col in traffic_lat.columns]
traffic_long.columns = [str(col) + '_longitude' for col in traffic_long.columns]
traffic_dist.columns = [str(col) + '_distance' for col in traffic_dist.columns]
## Remove index for each dataframe
traffic_lat.reset_index(drop=True, inplace=True)
traffic_long.reset_index(drop=True, inplace=True)
traffic_dist.reset_index(drop=True, inplace=True)
### Combine individual dataframes into one
traffic_combined = traffic_lat.join(traffic_long).join(traffic_dist)
### Sort based on column names
traffic_combined = traffic_combined.reindex(columns=sorted(traffic_combined.columns))
#Update dataframe to contain 21488 rows
traffic_combined = traffic_combined.loc[traffic_combined.index.repeat(21488)].reset_index(drop=True)
# # <b> <font size = 5> Load Air Pollution Monitoring Data </b> </font>
df = pd.read_csv('EDF_Data.csv', header = 1)
df.tail()
BC_df = df[['Longitude', 'Latitude', 'BC Value']]
NO2_df = df[['Longitude', 'Latitude', 'NO2 Value']]
# ## <b> <font size = 4> Combine BC and NO2 datasets with traffic data </b> </font>
combined_BC_traffic = BC_df.join(traffic_combined)
combined_NO2_traffic = NO2_df.join(traffic_combined)
combined_BC_traffic.head()
# ## <b> <font size = 4> Calculate distance between monitoring location and each traffic intersection </b> </font>
# **We only calculate the distance from each monitoring location in the BC dataset with traffic intersections since the location of measurements are the same for NO2 and BC**
# +
# Convert distance or emissions distance column to float type
for idx, col in enumerate(combined_BC_traffic.columns):
if "_dist" in col:
combined_BC_traffic[col] = pd.to_numeric(combined_BC_traffic[col], downcast="float")
# -
### Defining a function to calculate the distance between two GPS coordinates (latitude and longitude)
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
# +
time1 = time.time()
for index, row in combined_BC_traffic.iterrows():
for idx, col in enumerate(combined_BC_traffic.columns):
if "_dist" in col:
combined_BC_traffic.at[index,col] = float(distance((row.iloc[1], row.iloc[0]), (row.iloc[idx+1], row.iloc[idx+2])))*3280.84
#BC_Facility.at[index,col] = float(row.iloc[idx])
time2 = time.time()
print(time2 - time1)
# -
combined_BC_traffic.head()
# ##### Write the entire dataset to a csv file
# combined_BC_traffic.to_csv("Data/Unused-data/BC_traffic_full.csv")
# # <b> <font size = 5> Read Traffic Distance Data </b> </font>
#Read dataset
combined_BC_traffic = pd.read_csv("Data/Unused-data/BC_traffic_full.csv")
#Drop the latitude column
combined_BC_traffic = combined_BC_traffic[combined_BC_traffic.columns.drop(list(combined_BC_traffic.filter(regex='_latitude')))]
#Drop the longitude column
combined_BC_traffic = combined_BC_traffic[combined_BC_traffic.columns.drop(list(combined_BC_traffic.filter(regex='_longitude')))]
#Drop BC value
combined_BC_traffic = combined_BC_traffic[combined_BC_traffic.columns.drop(list(combined_BC_traffic.filter(regex='BC Value')))]
#Clean-up the columns
combined_BC_traffic.drop(columns = ['Unnamed: 0'], inplace=True)
#Write to a new csv file
combined_BC_traffic.to_csv("Data/Unused-data/BC_traffic_distance.csv")
# ## <b> <font size = 4> Count the number of intersections with distance <1,000 feet </b> </font>
#Read csv file
combined_BC_traffic = pd.read_csv("Data/Unused-data/BC_traffic_distance.csv")
#Create an empty column for number of intersection
combined_BC_traffic['number_intersections'] = 0
# ## <b> <font size = 4> Define function using range that returns True or False if a value is between 0 - 1,000 feet. </b> </font>
# +
def count_values_in_range(series, range_min, range_max):
# "between" returns a boolean Series equivalent to left <= series <= right.
# NA values will be treated as False.
return series.between(left=range_min, right=range_max).sum()
range_min, range_max = 0, 1000
combined_BC_traffic['number_intersections'] = combined_BC_traffic.apply(
func=lambda row: count_values_in_range(row, range_min, range_max), axis=1)
# -
#Get only the rows that returned true
BC_traffic_score = combined_BC_traffic[['Latitude','Longitude','number_intersections']]
# #### Write to a csv file
# BC_traffic_score.to_csv("Data/Traffic_score_2000.csv")
# # <b> <font size = 5> Calculate Distance to Closest Highway </b> </font>
# <font size = 5> <b> Fetch all nodes using the API Query. Here the node is specified as 'Highway=motorway' </b> </font>
# +
api = overpy.Overpass()
# fetch all ways and nodes
result = api.query("""
way(37.68,-122.36,37.752,-122.130) ["highway" = "motorway"];
(._;>;);
out body;
""")
highway_lat = []
highway_lon = []
for node in result.nodes:
highway_lat.append(node.lat)
highway_lon.append(node.lon)
# -
highway_df = pd.DataFrame(list(zip(highway_lat, highway_lon)), columns = ['Latitude', 'Longitude'])
highway_df.reset_index(inplace=True)
highway_df.rename(columns = {'index':'Location_id'}, inplace=True)
# ##### Write to csv
# highway_df.to_csv("Data/highway_locations.csv")
# ## <b> <font size = 4> Find location of closest highway to each point and measure distance </b> </font>
geometry_BC = [Point(xy) for xy in zip(BC_df['Longitude'], BC_df['Latitude'])]
geometry_NO2 = [Point(xy) for xy in zip(NO2_df['Longitude'], NO2_df['Latitude'])]
geometry_highway = [Point(xy) for xy in zip(highway_df['Longitude'], highway_df['Latitude'])]
crs = {'init': 'epsg:4326'}
# Create a geopandas dataframe with the coordinate reference system as epsg4326
geo_df_BC = gpd.GeoDataFrame(BC_df, crs = crs, geometry = geometry_BC)
geo_df_NO2 =gpd.GeoDataFrame(NO2_df, crs = crs, geometry = geometry_NO2)
geo_df_highway =gpd.GeoDataFrame(highway_df, crs = crs, geometry = geometry_highway)
# **Use geopandas nearest function to get the location of the nearest highway from each monitoring location**
# +
# Unary Union of the geo_df geometry
pts = geo_df_highway.geometry.unary_union
def near(point, pts=pts):
# find the nearest point and return the corresponding Location
nearest = geo_df_highway.geometry == nearest_points(point, pts)[1]
return geo_df_highway[nearest]['Location_id'].to_numpy()[0]
geo_df_BC['Nearest_Highway'] = geo_df_BC.apply(lambda row: near(row.geometry), axis=1)
# +
# Unary Union of the geo_df geometry
pts = geo_df_highway.geometry.unary_union
def near(point, pts=pts):
# find the nearest point and return the corresponding Location
nearest = geo_df_highway.geometry == nearest_points(point, pts)[1]
return geo_df_highway[nearest]['Location_id'].to_numpy()[0]
geo_df_NO2['Nearest_Highway'] = geo_df_NO2.apply(lambda row: near(row.geometry), axis=1)
# -
BC_df_highway = BC_df.merge(highway_df, left_on=['Nearest_Highway'], right_on = ['Location_id'], suffixes = ['_BC','_highway'])
BC_df_highway.head()
BC_df_highway.drop(columns = ['Location_id', 'geometry_BC','geometry_highway', 'Nearest_Highway'], inplace=True)
### Add an empty column for distance
BC_df_highway['dist'] = 0
BC_df_highway['dist'].astype(float)
#Convert all distance columns to type float
BC_df_highway['dist'] = pd.to_numeric(BC_df_highway['dist'], downcast="float")
BC_df_highway['Latitude_highway'] = pd.to_numeric(BC_df_highway['Latitude_highway'], downcast="float")
BC_df_highway['Longitude_highway'] = pd.to_numeric(BC_df_highway['Longitude_highway'], downcast="float")
BC_df_highway.head()
BC_df_highway['Latitude_highway'].describe()
# **Apply the distance function previously defined to calculate the distance between the latitude and longitude of monitoring location, and latitude and longitude of closest highway**
BC_df_highway['Dist'] = BC_df_highway.apply(lambda row : distance((row['Latitude_BC'], row['Longitude_BC']),
(row['Latitude_highway'], row['Longitude_highway'])), axis = 1)
BC_df_highway['Dist'].describe()
# ##### Write to a csv
# BC_df_highway.to_csv("Data/BC_dist_highway.csv")
NO2_df_highway = NO2_df.merge(highway_df, left_on=['Nearest_Highway'], right_on = ['Location_id'], suffixes = ['_NO2','_highway'])
NO2_df_highway.drop(columns = ['Location_id', 'geometry_NO2','geometry_highway', 'Nearest_Highway'], inplace=True)
NO2_df_highway['Latitude_highway'] = pd.to_numeric(NO2_df_highway['Latitude_highway'], downcast="float")
NO2_df_highway['Longitude_highway'] = pd.to_numeric(NO2_df_highway['Longitude_highway'], downcast="float")
# **Apply the distance function previously defined to calculate the distance between the latitude and longitude of monitoring location, and latitude and longitude of closest highway**
NO2_df_highway['Dist'] = NO2_df_highway.apply(lambda row : distance((row['Latitude_NO2'], row['Longitude_NO2']),
(row['Latitude_highway'], row['Longitude_highway'])), axis = 1)
NO2_df_highway['Dist'].describe()
# ##### Write to csv
# NO2_df_highway.to_csv("Data/NO2_dist_highway.csv")
# # Reference
# 1. Fetch location of traffic signals: <a> "https://python-overpy.readthedocs.io/en/latest/introduction.html"> </a>
|
Notebooks/OSM-Traffic-Data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# -
# <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
#
# # Python API Examples
#
# This notebook walks through the basics of the Riva Speech and Language AI Services.
#
# ## Overview
#
# NVIDIA Riva is a platform for building and deploying AI applications that fuse vision, speech and other sensors. It offers a complete workflow to build, train and deploy AI systems that can use visual cues such as gestures and gaze along with speech in context. With the Riva platform, you can:
#
# - Build speech and visual AI applications using pretrained NVIDIA Neural Modules ([NeMo](https://github.com/NVIDIA/NeMo)) available at NVIDIA GPU Cloud ([NGC](https://ngc.nvidia.com/catalog/models?orderBy=modifiedDESC&query=%20label%3A%22NeMo%2FPyTorch%22&quickFilter=models&filters=)).
#
# - Transfer learning: re-train your model on domain-specific data, with NVIDIA [NeMo](https://github.com/NVIDIA/NeMo). NeMo is a toolkit and platform that enables researchers to define and build new state-of-the-art speech and natural language processing models.
#
# - Optimize neural network performance and latency using NVIDIA TensorRT
#
# - Deploy AI applications with TensorRT Inference Server:
# - Support multiple network formats: ONNX, TensorRT plans, PyTorch TorchScript models.
# - Deployement on multiple platforms: from datacenter to edge servers, via Helm to K8s cluster, on NVIDIA Volta/Turing GPUs or Jetson Xavier platforms.
#
# See the below video for a demo of Riva capabilities.
# +
from IPython.display import IFrame
# Riva Youtube demo video
IFrame("https://www.youtube.com/embed/r264lBi1nMU?rel=0&controls=0&showinfo=0", width="560", height="315", frameborder="0", allowfullscreen=True)
# -
# For more detailed information on Riva, please refer to the [Riva developer documentation](https://developer.nvidia.com/).
#
# ## Introduction the Riva Speech and Natural Languages services
#
# Riva offers a rich set of speech and natural language understanding services such as:
#
# - Automated speech recognition (ASR)
# - Text-to-Speech synthesis (TTS)
# - A collection of natural language understanding services such as named entity recognition (NER), punctuation, intent classification.
# ## Learning objectives
#
# - Understand how interact with Riva Speech and Natural Languages APIs, services and use cases
#
# ## Requirements and setup
#
# To execute this notebook, please follow the setup steps in [README](./README.md).
#
# We first generate some required libraries.
# +
import io
import librosa
from time import time
import numpy as np
import IPython.display as ipd
import grpc
import requests
# NLP proto
import riva_api.riva_nlp_pb2 as rnlp
import riva_api.riva_nlp_pb2_grpc as rnlp_srv
# ASR proto
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
# TTS proto
import riva_api.riva_tts_pb2 as rtts
import riva_api.riva_tts_pb2_grpc as rtts_srv
import riva_api.riva_audio_pb2 as ra
# -
# ### Create Riva clients and connect to Riva Speech API server
#
# The below URI assumes a local deployment of the Riva Speech API server on the default port. In case the server deployment is on a different host or via Helm chart on Kubernetes, the user should use an appropriate URI.
# +
channel = grpc.insecure_channel('localhost:50051')
riva_asr = rasr_srv.RivaSpeechRecognitionStub(channel)
riva_nlp = rnlp_srv.RivaLanguageUnderstandingStub(channel)
riva_tts = rtts_srv.RivaSpeechSynthesisStub(channel)
# -
# ## Content
# 1. [Offline ASR Example](#1)
# 1. [Core NLP Service Examples](#2)
# 1. [TTS Service Example](#3)
# 1. [Riva NLP Service Examples](#4)
#
# <a id="1"></a>
#
# ## 1. Offline ASR Example
#
# Riva Speech API supports `.wav` files in PCM format, `.alaw`, `.mulaw` and `.flac` formats with single channel in this release.
# This example uses a .wav file with LINEAR_PCM encoding.
# read in an audio file from local disk
path = "/work/wav/sample.wav"
audio, sr = librosa.core.load(path, sr=None)
with io.open(path, 'rb') as fh:
content = fh.read()
ipd.Audio(path)
# +
# Set up an offline/batch recognition request
req = rasr.RecognizeRequest()
req.audio = content # raw bytes
req.config.encoding = ra.AudioEncoding.LINEAR_PCM # Supports LINEAR_PCM, FLAC, MULAW and ALAW audio encodings
req.config.sample_rate_hertz = sr # Audio will be resampled if necessary
req.config.language_code = "en-US" # Ignored, will route to correct model in future release
req.config.max_alternatives = 1 # How many top-N hypotheses to return
req.config.enable_automatic_punctuation = True # Add punctuation when end of VAD detected
req.config.audio_channel_count = 1 # Mono channel
response = riva_asr.Recognize(req)
asr_best_transcript = response.results[0].alternatives[0].transcript
print("ASR Transcript:", asr_best_transcript)
print("\n\nFull Response Message:")
print(response)
# -
# <a id="2"></a>
#
# ## 2. Core NLP Service Examples
#
# All of the Core NLP Services support batched requests. The maximum batch size,
# if any, of the underlying models is hidden from the end user and automatically
# batched by the Riva and TRTIS servers.
#
# The Core NLP API provides three methods currently:
#
# 1. TransformText - map an input string to an output string
#
# 2. ClassifyText - return a single label for the input string
#
# 3. ClassifyTokens - return a label per input token
# +
# Use the TextTransform API to run the punctuation model
req = rnlp.TextTransformRequest()
req.model.model_name = "riva_punctuation"
req.text.append("add punctuation to this sentence")
req.text.append("do you have any red nvidia shirts")
req.text.append("i need one cpu four gpus and lots of memory "
"for my new computer it's going to be very cool")
nlp_resp = riva_nlp.TransformText(req)
print("TransformText Output:")
print("\n".join([f" {x}" for x in nlp_resp.text]))
# +
# Use the TokenClassification API to run a Named Entity Recognition (NER) model
# Note: the model configuration of the NER model indicates that the labels are
# in IOB format. Riva, subsequently, knows to:
# a) ignore 'O' labels
# b) Remove B- and I- prefixes from labels
# c) Collapse sequences of B- I- ... I- tokens into a single token
req = rnlp.TokenClassRequest()
req.model.model_name = "riva_ner" # If you have deployed a custom model with the domain_name
# parameter in ServiceMaker's `riva-build` command then you should use
# "riva_ner_<your_input_domain_name>" where <your_input_domain_name>
# is the name you provided to the domain_name parameter.
req.text.append("<NAME> is the CEO of NVIDIA Corporation, "
"located in Santa Clara, California")
resp = riva_nlp.ClassifyTokens(req)
print("Named Entities:")
for result in resp.results[0].results:
print(f" {result.token} ({result.label[0].class_name})")
# +
# Submit a TextClassRequest for text classification.
# Riva NLP comes with a default text_classification domain called "domain_misty" which consists of
# 4 classes: meteorology, personality, weather and nomatch
request = rnlp.TextClassRequest()
request.model.model_name = "riva_text_classification_domain" # If you have deployed a custom model
# with the `--domain_name` parameter in ServiceMaker's `riva-build` command
# then you should use "riva_text_classification_<your_input_domain_name>"
# where <your_input_domain_name> is the name you provided to the
# domain_name parameter. In this case the domain_name is "domain"
request.text.append("Is it going to snow in Burlington, Vermont tomorrow night?")
request.text.append("What causes rain?")
request.text.append("What is your favorite season?")
ct_response = riva_nlp.ClassifyText(request)
print(ct_response)
# -
# <a id="3"></a>
#
# ## 3. TTS Service Example
#
# Subsequent releases will include added features, including model registration to support multiple languages/voices with the same API. Support for resampling to alternative sampling rates will also be added.
# +
req = rtts.SynthesizeSpeechRequest()
req.text = "Is it recognize speech or wreck a nice beach?"
req.language_code = "en-US" # currently required to be "en-US"
req.encoding = ra.AudioEncoding.LINEAR_PCM # Supports LINEAR_PCM, FLAC, MULAW and ALAW audio encodings
req.sample_rate_hz = 22050 # ignored, audio returned will be 22.05KHz
req.voice_name = "ljspeech" # ignored
resp = riva_tts.Synthesize(req)
audio_samples = np.frombuffer(resp.audio, dtype=np.float32)
ipd.Audio(audio_samples, rate=22050)
# -
# <a id="4"></a>
#
# ## 4. Riva NLP Service Examples
#
# The NLP Service contains higher-level/more application-specific NLP APIs. This
# guide demonstrates how the AnalyzeIntent API can be used for queries across
# both known and unknown domains.
# +
# The AnalyzeIntent API can be used to query a Intent Slot classifier. The API can leverage a
# text classification model to classify the domain of the input query and then route to the
# appropriate intent slot model.
# Lets first see an example where the domain is known. This skips execution of the domain classifier
# and proceeds directly to the intent/slot model for the requested domain.
req = rnlp.AnalyzeIntentRequest()
req.query = "How is the humidity in San Francisco?"
req.options.domain = "weather" # The <domain_name> is appended to "riva_intent_" to look for a
# model "riva_intent_<domain_name>". So in this e.g., the model "riva_intent_weather"
# needs to be preloaded in riva server. If you would like to deploy your
# custom Joint Intent and Slot model use the `--domain_name` parameter in
# ServiceMaker's `riva-build intent_slot` command.
resp = riva_nlp.AnalyzeIntent(req)
print(resp)
# +
# Below is an example where the input domain is not provided.
req = rnlp.AnalyzeIntentRequest()
req.query = "Is it going to rain tomorrow?"
# The input query is first routed to the a text classification model called "riva_text_classification_domain"
# The output class label of "riva_text_classification_domain" is appended to "riva_intent_"
# to get the appropriate Intent Slot model to execute for the input query.
# Note: The model "riva_text_classification_domain" needs to be loaded into Riva server and have the appropriate
# class labels that would invoke the corresponding intent slot model.
resp = riva_nlp.AnalyzeIntent(req)
print(resp)
# -
# Some weather Intent queries
queries = [
"Is it currently cloudy in Tokyo?",
"What is the annual rainfall in Pune?",
"What is the humidity going to be tomorrow?"
]
for q in queries:
req = rnlp.AnalyzeIntentRequest()
req.query = q
start = time()
resp = riva_nlp.AnalyzeIntent(req)
print(f"[{resp.intent.class_name}]\t{req.query}")
# +
# Demonstrate latency by calling repeatedly.
# NOTE: this is a synchronous API call, so request #N will not be sent until
# response #N-1 is returned. This means latency and throughput will be negatively
# impacted by long-distance & VPN connections
req = rnlp.TextTransformRequest()
req.text.append("i need one cpu four gpus and lots of memory for my new computer it's going to be very cool")
iterations = 10
# Demonstrate synchronous performance
start_time = time()
for _ in range(iterations):
nlp_resp = riva_nlp.PunctuateText(req)
end_time = time()
print(f"Time to complete {iterations} synchronous requests: {end_time-start_time}")
# Demonstrate async performance
start_time = time()
futures = []
for _ in range(iterations):
futures.append(riva_nlp.PunctuateText.future(req))
for f in futures:
f.result()
end_time = time()
print(f"Time to complete {iterations} asynchronous requests: {end_time-start_time}\n")
# -
# <a id="5"></a>
#
# ## 5. Go deeper into Riva capabilities
#
# Now that you have a basic introduction to the Riva APIs, you may like to try out:
#
# ### 1. Sample apps:
#
# Riva comes with various sample apps as a demonstration for how to use the APIs to build interesting applications such as a [chatbot](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/weather.html), a domain specific speech recognition or [keyword (entity) recognition system](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/callcenter.html), or simply how Riva allows scaling out for handling massive amount of requests at the same time. ([SpeechSquad)](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/speechsquad.html)
# Have a look at the Sample Application section in the [Riva developer documentation](https://developer.nvidia.com/) for all the sample apps.
#
#
# ### 2. Finetune your own domain specific Speech or NLP model and deploy into Riva.
#
# Train the latest state-of-the-art speech and natural language processing models on your own data using [NeMo](https://github.com/NVIDIA/NeMo) or [Transfer Learning ToolKit](https://developer.nvidia.com/transfer-learning-toolkit) and deploy them on Riva using the [Riva ServiceMaker tool](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/model-servicemaker.html).
#
#
# ### 3. Further resources:
#
# Explore the details of each of the APIs and their functionalities in the [docs](https://docs.nvidia.com/deeplearning/jarvis/user-guide/docs/protobuf-api/protobuf-api-root.html).
|
nlp_demo_riva/riva/nb_demo_speech_api.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mask R-CNN - Inspect Balloon Training Data
#
# Inspect and visualize data loading and pre-processing code.
# +
import os
import sys
import itertools
import math
import logging
import json
import re
import random
from collections import OrderedDict
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
from matplotlib.patches import Polygon
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
from samples.balloon import balloon
# %matplotlib inline
# -
# ## Configurations
#
# Configurations are defined in balloon.py
config = balloon.BalloonConfig()
BALLOON_DIR = os.path.join(ROOT_DIR, "datasets/balloon")
# ## Dataset
# +
# Load dataset
# Get the dataset from the releases page
# https://github.com/matterport/Mask_RCNN/releases
dataset = balloon.BalloonDataset()
dataset.load_balloon(BALLOON_DIR, "train")
# Must call before using the dataset
dataset.prepare()
print("Image Count: {}".format(len(dataset.image_ids)))
print("Class Count: {}".format(dataset.num_classes))
for i, info in enumerate(dataset.class_info):
print("{:3}. {:50}".format(i, info['name']))
# -
# ## Display Samples
#
# Load and display images and masks.
# Load and display random samples
image_ids = np.random.choice(dataset.image_ids, 4)
for image_id in image_ids:
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset.class_names)
# ## Bounding Boxes
#
# Rather than using bounding box coordinates provided by the source datasets, we compute the bounding boxes from masks instead. This allows us to handle bounding boxes consistently regardless of the source dataset, and it also makes it easier to resize, rotate, or crop images because we simply generate the bounding boxes from the updates masks rather than computing bounding box transformation for each type of image transformation.
# +
# Load random image and mask.
image_id = random.choice(dataset.image_ids)
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
# Compute Bounding box
bbox = utils.extract_bboxes(mask)
# Display image and additional stats
print("image_id ", image_id, dataset.image_reference(image_id))
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)
# -
# ## Resize Images
#
# To support multiple images per batch, images are resized to one size (1024x1024). Aspect ratio is preserved, though. If an image is not square, then zero padding is added at the top/bottom or right/left.
# +
# Load random image and mask.
image_id = np.random.choice(dataset.image_ids, 1)[0]
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
# Resize
image, window, scale, padding, _ = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding)
# Compute Bounding box
bbox = utils.extract_bboxes(mask)
# Display image and additional stats
print("image_id: ", image_id, dataset.image_reference(image_id))
print("Original shape: ", original_shape)
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)
# -
# ## Mini Masks
#
# Instance binary masks can get large when training with high resolution images. For example, if training with 1024x1024 image then the mask of a single instance requires 1MB of memory (Numpy uses bytes for boolean values). If an image has 100 instances then that's 100MB for the masks alone.
#
# To improve training speed, we optimize masks by:
# * We store mask pixels that are inside the object bounding box, rather than a mask of the full image. Most objects are small compared to the image size, so we save space by not storing a lot of zeros around the object.
# * We resize the mask to a smaller size (e.g. 56x56). For objects that are larger than the selected size we lose a bit of accuracy. But most object annotations are not very accuracy to begin with, so this loss is negligable for most practical purposes. Thie size of the mini_mask can be set in the config class.
#
# To visualize the effect of mask resizing, and to verify the code correctness, we visualize some examples.
# +
image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
dataset, config, image_id)
log("image", image)
log("image_meta", image_meta)
log("class_ids", class_ids)
log("bbox", bbox)
log("mask", mask)
display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])
# +
# visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)
# -
# Add augmentation and mask resizing.
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
dataset, config, image_id)
log("mask", mask)
display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])
mask = utils.expand_mask(bbox, mask, image.shape)
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)
# ## Anchors
#
# The order of anchors is important. Use the same order in training and prediction phases. And it must match the order of the convolution execution.
#
# For an FPN network, the anchors must be ordered in a way that makes it easy to match anchors to the output of the convolution layers that predict anchor scores and shifts.
#
# * Sort by pyramid level first. All anchors of the first level, then all of the second and so on. This makes it easier to separate anchors by level.
# * Within each level, sort anchors by feature map processing sequence. Typically, a convolution layer processes a feature map starting from top-left and moving right row by row.
# * For each feature map cell, pick any sorting order for the anchors of different ratios. Here we match the order of ratios passed to the function.
#
# **Anchor Stride:**
# In the FPN architecture, feature maps at the first few layers are high resolution. For example, if the input image is 1024x1024 then the feature meap of the first layer is 256x256, which generates about 200K anchors (256*256*3). These anchors are 32x32 pixels and their stride relative to image pixels is 4 pixels, so there is a lot of overlap. We can reduce the load significantly if we generate anchors for every other cell in the feature map. A stride of 2 will cut the number of anchors by 4, for example.
#
# In this implementation we use an anchor stride of 2, which is different from the paper.
# +
# Generate Anchors
backbone_shapes = modellib.compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Print summary of anchors
num_levels = len(backbone_shapes)
anchors_per_cell = len(config.RPN_ANCHOR_RATIOS)
print("Count: ", anchors.shape[0])
print("Scales: ", config.RPN_ANCHOR_SCALES)
print("ratios: ", config.RPN_ANCHOR_RATIOS)
print("Anchors per Cell: ", anchors_per_cell)
print("Levels: ", num_levels)
anchors_per_level = []
for l in range(num_levels):
num_cells = backbone_shapes[l][0] * backbone_shapes[l][1]
anchors_per_level.append(anchors_per_cell * num_cells // config.RPN_ANCHOR_STRIDE**2)
print("Anchors in Level {}: {}".format(l, anchors_per_level[l]))
# -
# Visualize anchors of one cell at the center of the feature map of a specific level.
# +
## Visualize anchors of one cell at the center of the feature map of a specific level
# Load and draw random image
image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, _, _, _ = modellib.load_image_gt(dataset, config, image_id)
fig, ax = plt.subplots(1, figsize=(10, 10))
ax.imshow(image)
levels = len(backbone_shapes)
for level in range(levels):
colors = visualize.random_colors(levels)
# Compute the index of the anchors at the center of the image
level_start = sum(anchors_per_level[:level]) # sum of anchors of previous levels
level_anchors = anchors[level_start:level_start+anchors_per_level[level]]
print("Level {}. Anchors: {:6} Feature map Shape: {}".format(level, level_anchors.shape[0],
backbone_shapes[level]))
center_cell = backbone_shapes[level] // 2
center_cell_index = (center_cell[0] * backbone_shapes[level][1] + center_cell[1])
level_center = center_cell_index * anchors_per_cell
center_anchor = anchors_per_cell * (
(center_cell[0] * backbone_shapes[level][1] / config.RPN_ANCHOR_STRIDE**2) \
+ center_cell[1] / config.RPN_ANCHOR_STRIDE)
level_center = int(center_anchor)
# Draw anchors. Brightness show the order in the array, dark to bright.
for i, rect in enumerate(level_anchors[level_center:level_center+anchors_per_cell]):
y1, x1, y2, x2 = rect
p = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=2, facecolor='none',
edgecolor=(i+1)*np.array(colors[level]) / anchors_per_cell)
ax.add_patch(p)
# -
# ## Data Generator
#
# Create data generator
random_rois = 2000
g = modellib.DataGenerator(
dataset, config, shuffle=True, random_rois=random_rois,
detection_targets=True)
# +
# Uncomment to run the generator through a lot of images
# to catch rare errors
# for i in range(1000):
# print(i)
# _, _ = next(g)
# +
# Get Next Image
if random_rois:
[normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois], \
[mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = g[0]
log("rois", rois)
log("mrcnn_class_ids", mrcnn_class_ids)
log("mrcnn_bbox", mrcnn_bbox)
log("mrcnn_mask", mrcnn_mask)
else:
[normalized_images, image_meta, rpn_match, rpn_bbox, gt_boxes, gt_masks], _ = g[0]
log("gt_class_ids", gt_class_ids)
log("gt_boxes", gt_boxes)
log("gt_masks", gt_masks)
log("rpn_match", rpn_match, )
log("rpn_bbox", rpn_bbox)
image_id = modellib.parse_image_meta(image_meta)["image_id"][0]
print("image_id: ", image_id, dataset.image_reference(image_id))
# Remove the last dim in mrcnn_class_ids. It's only added
# to satisfy Keras restriction on target shape.
mrcnn_class_ids = mrcnn_class_ids[:,:,0]
# +
b = 0
# Restore original image (reverse normalization)
sample_image = modellib.unmold_image(normalized_images[b], config)
# Compute anchor shifts.
indices = np.where(rpn_match[b] == 1)[0]
refined_anchors = utils.apply_box_deltas(anchors[indices], rpn_bbox[b, :len(indices)] * config.RPN_BBOX_STD_DEV)
log("anchors", anchors)
log("refined_anchors", refined_anchors)
# Get list of positive anchors
positive_anchor_ids = np.where(rpn_match[b] == 1)[0]
print("Positive anchors: {}".format(len(positive_anchor_ids)))
negative_anchor_ids = np.where(rpn_match[b] == -1)[0]
print("Negative anchors: {}".format(len(negative_anchor_ids)))
neutral_anchor_ids = np.where(rpn_match[b] == 0)[0]
print("Neutral anchors: {}".format(len(neutral_anchor_ids)))
# ROI breakdown by class
for c, n in zip(dataset.class_names, np.bincount(mrcnn_class_ids[b].flatten())):
if n:
print("{:23}: {}".format(c[:20], n))
# Show positive anchors
fig, ax = plt.subplots(1, figsize=(16, 16))
visualize.draw_boxes(sample_image, boxes=anchors[positive_anchor_ids],
refined_boxes=refined_anchors, ax=ax)
# -
# Show negative anchors
visualize.draw_boxes(sample_image, boxes=anchors[negative_anchor_ids])
# Show neutral anchors. They don't contribute to training.
visualize.draw_boxes(sample_image, boxes=anchors[np.random.choice(neutral_anchor_ids, 100)])
# ## ROIs
if random_rois:
# Class aware bboxes
bbox_specific = mrcnn_bbox[b, np.arange(mrcnn_bbox.shape[1]), mrcnn_class_ids[b], :]
# Refined ROIs
refined_rois = utils.apply_box_deltas(rois[b].astype(np.float32), bbox_specific[:,:4] * config.BBOX_STD_DEV)
# Class aware masks
mask_specific = mrcnn_mask[b, np.arange(mrcnn_mask.shape[1]), :, :, mrcnn_class_ids[b]]
visualize.draw_rois(sample_image, rois[b], refined_rois, mask_specific, mrcnn_class_ids[b], dataset.class_names)
# Any repeated ROIs?
rows = np.ascontiguousarray(rois[b]).view(np.dtype((np.void, rois.dtype.itemsize * rois.shape[-1])))
_, idx = np.unique(rows, return_index=True)
print("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1]))
if random_rois:
# Dispalay ROIs and corresponding masks and bounding boxes
ids = random.sample(range(rois.shape[1]), 8)
images = []
titles = []
for i in ids:
image = visualize.draw_box(sample_image.copy(), rois[b,i,:4].astype(np.int32), [255, 0, 0])
image = visualize.draw_box(image, refined_rois[i].astype(np.int64), [0, 255, 0])
images.append(image)
titles.append("ROI {}".format(i))
images.append(mask_specific[i] * 255)
titles.append(dataset.class_names[mrcnn_class_ids[b,i]][:20])
display_images(images, titles, cols=4, cmap="Blues", interpolation="none")
# Check ratio of positive ROIs in a set of images.
if random_rois:
limit = 10
temp_g = modellib.DataGenerator(
dataset, config, shuffle=True, random_rois=10000,
detection_targets=True)
total = 0
for i in range(limit):
_, [ids, _, _] = temp_g[i]
positive_rois = np.sum(ids[0] > 0)
total += positive_rois
print("{:5} {:5.2f}".format(positive_rois, positive_rois/ids.shape[1]))
print("Average percent: {:.2f}".format(total/(limit*ids.shape[1])))
# + jupyter={"outputs_hidden": true}
|
samples/balloon/inspect_balloon_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dungdung23/100-Days-Of-ML-Code/blob/master/Spotify_Music_Recommendation_with_LightGCN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="5ym3rJHdIrGU"
# # Spotify Music Recommendation with LightGCN
# + [markdown] id="8rRq8Qw8vzBP"
# By <NAME>, <NAME>, <NAME>
# + [markdown] id="QHPGmshaJp9k"
# This is the Colab notebook associated with our Medium post! (We will update this with the link once the article is published)
# + [markdown] id="aLIg99Y6mYkN"
# Note: please ensure that you have GPU runtime enabled! Go to Runtime -> Change runtime type. Then set the Hardware accelerator to GPU if it isn't already set.
# + [markdown] id="6iqg8GCJ8OjR"
# This Colab will:
# 1. Teach you how to use PyTorch Geometric to write and train your own GNNs from scratch, by showing how to implement LightGCN for music recommendation
# 2. Evaluate the performance by recommending songs for a bunch of playlists from the Spotify Million Playlist Dataset
# 3. Produce some animations and visualizations of the learned playlist/node embeddings. Stick around until the end to see some interesting discoveries about country music playlists, Drake songs, and Taylor Swift songs!
# + [markdown] id="FbwOF8ZSIv16"
# ## Install and import necessary packages
# + [markdown] id="GFtIDibbf8U3"
# First, we need to install a newer version of matplotlib for our visualizations later on.
#
# PLEASE FOLLOW THE MESSAGE THAT POPS UP AND RESTART THE RUNTIME AFTER RUNNING THIS CELL!
# + id="6penBwOSf0FO" colab={"base_uri": "https://localhost:8080/", "height": 603} outputId="b1c16b42-3364-4422-d2fe-d24249164558"
# !pip install matplotlib==3.5.0
# + [markdown] id="cnI75_wGf-Qj"
# Again, please restart the runtime after running the cell above!! Click the button that comes up.
#
# Here are some more installations:
# + colab={"base_uri": "https://localhost:8080/"} id="o51SFMJOqHlS" outputId="8c6741bd-7da5-431d-f5eb-c539e72e92d7"
# !pip install torch-scatter -f https://data.pyg.org/whl/torch-1.10.0+cu111.html
# !pip install torch-sparse -f https://data.pyg.org/whl/torch-1.10.0+cu111.html
# We are installing torch-geometric from a specific GitHub commit instead of from pip wheels/conda. This is because there is currently a bug in
# RandomLinkSplit that has been fixed in the repo, but not yet in an official release. Once release 2.0.3 is available, we can go back
# to installing from pip wheels. See here for more info about the bug: https://github.com/pyg-team/pytorch_geometric/issues/3440
# !pip install git+https://github.com/pyg-team/pytorch_geometric.git@<PASSWORD>8f79f1035742
# + [markdown] id="EbdPGbmdgIwk"
# Import necessary packages:
# + id="FQ7EnP6BqPmE"
import json
import numpy as np
import os
import torch
from torch_geometric import seed_everything
from torch_geometric.data import Data, Dataset
from torch_geometric.loader import DataLoader
from torch_geometric.nn import MessagePassing
from torch_geometric.transforms import RandomLinkSplit
from torch_geometric.utils import degree
import matplotlib.pyplot as plt
from matplotlib import animation, rc
from sklearn.decomposition import PCA
from IPython.display import HTML
seed_everything(5) # set random seed
# + id="rF9EWvBZNLNc"
import matplotlib
matplotlib.__version__
assert matplotlib.__version__ == '3.5.0' # if this fails, makes sure you ran the matplotlib installation above AND restarted your runtime
# + [markdown] id="nMUj2ceMI_Mp"
# ## Load data + data pre-processing
# + [markdown] id="seWqXFF4JB70"
# The full Spotify Million Playlist Dataset is available at https://www.aicrowd.com/challenges/spotify-million-playlist-dataset-challenge.
#
# We have already gone ahead and pre-processed the data, rather than importing the entire dataset here, for two reasons:
# 1. You need to make an account to download the Spotify dataset, so we don't think we should post it online
# 2. The full dataset is around 33GB, which is very large!
#
# Of course, feel free to download it yourself and play around with it.
#
# Our pre-processing code is available in our GitHub repo at https://github.com/ba305/LightGCN-Spotify. There is also an explanation of the key parts of the pre-processing code in our Medium article.
#
# For now, we will import the already-preprocessed data, which is hosted at our GitHub repo.
# + colab={"base_uri": "https://localhost:8080/"} id="94A_2r-wJBJb" outputId="7f92cccc-523d-45c7-b506-05eacb9f8e74"
# ! wget https://raw.githubusercontent.com/ba305/LightGCN-Spotify/main/data/dataset_large/data_object.pt
# ! wget https://raw.githubusercontent.com/ba305/LightGCN-Spotify/main/data/dataset_large/dataset_stats.json
# ! wget https://raw.githubusercontent.com/ba305/LightGCN-Spotify/main/data/dataset_large/song_info.json
# ! wget https://raw.githubusercontent.com/ba305/LightGCN-Spotify/main/data/dataset_large/playlist_info.json
# + [markdown] id="KF8I3QTz2gJw"
# Now that we downloaded our preprocessed dataset, we can load it in.
# + id="EpE7dFJge1Wq"
# Load data
base_dir = "."
data = torch.load(os.path.join(base_dir, "data_object.pt"))
with open(os.path.join(base_dir, "dataset_stats.json"), 'r') as f:
stats = json.load(f)
num_playlists, num_nodes = stats["num_playlists"], stats["num_nodes"]
# + [markdown] id="3gxBd579e8Ox"
# ### Train/validation/test split
# + [markdown] id="9au4M0HB2kM6"
# The train/validation/test split is very important! We will use a 70%-15%-15% split.
#
# For graph ML problems, this splitting is quite complex (see our article for more details). Luckily, the RandomLinkSplit function from PyG can help us if we use it correctly.
#
# We need to specify is_undirected=True so that it knows to avoid data leakage from reverse edges (e.g., [4,5] and [5,4] should stay in the same split since they are basically the same edge). We also set add_negative_train_samples=False and neg_sampling_ratio=0 since we have our own negative sampling implementation.
# + id="j0YoXGWHe7uO"
# Train/val/test split
transform = RandomLinkSplit(is_undirected=True, add_negative_train_samples=False, neg_sampling_ratio=0,
num_val=0.15, num_test=0.15)
train_split, val_split, test_split = transform(data)
# Confirm that every node appears in every set above
assert train_split.num_nodes == val_split.num_nodes and train_split.num_nodes == test_split.num_nodes
# + [markdown] id="vYOFvmPz2-Vg"
# We can now take a quick look at the splits. Note that message passing edges are stored in the .edge_index attribute, and supervision/evaluation edges are stored in the .edge_label_index attribute. Keep in mind that the message passing edges are directed, while the evaluation edges are undirected.
# + id="2MKNz9_3fDyq" colab={"base_uri": "https://localhost:8080/"} outputId="b2136469-f32f-440e-c1e3-4539d2277a62"
print(train_split)
print(val_split)
print(test_split)
# + [markdown] id="Fq6-jodC3LH2"
# We will want to load our evaluation edges in batches using a DataLoader. Therefore, we first must create a PyG Dataset object which specifies how to load in each batch. See the documentation for more information: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_dataset.html
# + id="-cNSEJdH13SV"
class PlainData(Data):
"""
Custom Data class for use in PyG. Basically the same as the original Data class from PyG, but
overrides the __inc__ method because otherwise the DataLoader was incrementing indices unnecessarily.
Now it functions more like the original DataLoader from PyTorch itself.
See here for more information: https://pytorch-geometric.readthedocs.io/en/latest/notes/batching.html
"""
def __inc__(self, key, value, *args, **kwargs):
return 0
class SpotifyDataset(Dataset):
"""
Dataset object containing the Spotify supervision/evaluation edges. This will be used by the DataLoader to load
batches of edges to calculate loss or evaluation metrics on. Here, get(idx) will return ALL outgoing edges of the graph
corresponding to playlist "idx." This is because when calculating metrics such as recall@k, we need all of the
playlist's positive edges in the same batch.
"""
def __init__(self, root, edge_index, transform=None, pre_transform=None):
self.edge_index = edge_index
self.unique_idxs = torch.unique(edge_index[0,:]).tolist() # playlists will all be in row 0, b/c sorted by RandLinkSplit
self.num_nodes = len(self.unique_idxs)
super().__init__(root, transform, pre_transform)
def len(self):
return self.num_nodes
def get(self, idx): # returns all outgoing edges associated with playlist idx
edge_index = self.edge_index[:, self.edge_index[0,:] == idx]
return PlainData(edge_index=edge_index)
# + [markdown] id="OrPYS7003qDj"
# For each split, we have a set of message passing edges (for GNN propagation/getting final multi-scale node embeddings), and also a set of evaluation edges (used to calculate loss/performance metrics). For message passing edges, we can just store them in a PyG Data object. For evaluation edges, we put them in a SpotifyDataset object so we can load them in in batches with a DataLoader.
# + id="S6V2A5u5fD05"
train_ev = SpotifyDataset('temp', edge_index=train_split.edge_label_index)
train_mp = Data(edge_index=train_split.edge_index)
val_ev = SpotifyDataset('temp', edge_index=val_split.edge_label_index)
val_mp = Data(edge_index=val_split.edge_index)
test_ev = SpotifyDataset('temp', edge_index=test_split.edge_label_index)
test_mp = Data(edge_index=test_split.edge_index)
# + [markdown] id="bXPJd59_Ju5x"
# # Creating the Model
# + [markdown] id="v20pZ_VA3ziA"
# Now it's time to create the full LightGCN model! Please see the documentation for more details on how to create message passing networks: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html
# + [markdown] id="jGUib63137gM"
# We need to create two classes. First is a class that defines how a single LightGCN message passing layer should work. For this, we need to specify the aggregation function (in the constructor), and also define the message() and forward() functions. Please see our article for a more in-depth description of what each function does.
# + id="eLVK-3f74HHU"
class LightGCN(MessagePassing):
"""
A single LightGCN layer. Extends the MessagePassing class from PyTorch Geometric
"""
def __init__(self):
super(LightGCN, self).__init__(aggr='add') # aggregation function is 'add
def message(self, x_j, norm):
"""
Specifies how to perform message passing during GNN propagation. For LightGCN, we simply pass along each
source node's embedding to the target node, normalized by the normalization term for that node.
args:
x_j: node embeddings of the neighbor nodes, which will be passed to the central node (shape: [E, emb_dim])
norm: the normalization terms we calculated in forward() and passed into propagate()
returns:
messages from neighboring nodes j to central node i
"""
# Here we are just multiplying the x_j's by the normalization terms (using some broadcasting)
return norm.view(-1, 1) * x_j
def forward(self, x, edge_index):
"""
Performs the LightGCN message passing/aggregation/update to get updated node embeddings
args:
x: current node embeddings (shape: [N, emb_dim])
edge_index: message passing edges (shape: [2, E])
returns:
updated embeddings after this layer
"""
# Computing node degrees for normalization term in LightGCN (see LightGCN paper for details on this normalization term)
# These will be used during message passing, to normalize each neighbor's embedding before passing it as a message
row, col = edge_index
deg = degree(col)
deg_inv_sqrt = deg.pow(-0.5)
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
# Begin propagation. Will perform message passing and aggregation and return updated node embeddings.
return self.propagate(edge_index, x=x, norm=norm)
# + [markdown] id="1u0qwh_U4TbI"
# Secondly, we need another class to define the full GNN, which consists of multiple LightGCN layers. This model has learnable user/item (i.e., playlist/song) embeddings.
# + id="U7w0A_cBqRo4"
class GNN(torch.nn.Module):
"""
Overall graph neural network. Consists of learnable user/item (i.e., playlist/song) embeddings
and LightGCN layers.
"""
def __init__(self, embedding_dim, num_nodes, num_playlists, num_layers):
super(GNN, self).__init__()
self.embedding_dim = embedding_dim
self.num_nodes = num_nodes # total number of nodes (songs + playlists) in dataset
self.num_playlists = num_playlists # total number of playlists in dataset
self.num_layers = num_layers
# Initialize embeddings for all playlists and songs. Playlists will have indices from 0...num_playlists-1,
# songs will have indices from num_playlists...num_nodes-1
self.embeddings = torch.nn.Embedding(num_embeddings=self.num_nodes, embedding_dim=self.embedding_dim)
torch.nn.init.normal_(self.embeddings.weight, std=0.1)
self.layers = torch.nn.ModuleList() # LightGCN layers
for _ in range(self.num_layers):
self.layers.append(LightGCN())
self.sigmoid = torch.sigmoid
def forward(self):
raise NotImplementedError("forward() has not been implemented for the GNN class. Do not use")
def gnn_propagation(self, edge_index_mp):
"""
Performs the linear embedding propagation (using the LightGCN layers) and calculates final (multi-scale) embeddings
for each user/item, which are calculated as a weighted sum of that user/item's embeddings at each layer (from
0 to self.num_layers). Technically, the weighted sum here is the average, which is what the LightGCN authors recommend.
args:
edge_index_mp: a tensor of all (undirected) edges in the graph, which is used for message passing/propagation and
calculating the multi-scale embeddings. (In contrast to the evaluation/supervision edges, which are distinct
from the message passing edges and will be used for calculating loss/performance metrics).
returns:
final multi-scale embeddings for all users/items
"""
x = self.embeddings.weight # layer-0 embeddings
x_at_each_layer = [x] # stores embeddings from each layer. Start with layer-0 embeddings
for i in range(self.num_layers): # now performing the GNN propagation
x = self.layers[i](x, edge_index_mp)
x_at_each_layer.append(x)
final_embs = torch.stack(x_at_each_layer, dim=0).mean(dim=0) # take average to calculate multi-scale embeddings
return final_embs
def predict_scores(self, edge_index, embs):
"""
Calculates predicted scores for each playlist/song pair in the list of edges. Uses dot product of their embeddings.
args:
edge_index: tensor of edges (between playlists and songs) whose scores we will calculate.
embs: node embeddings for calculating predicted scores (typically the multi-scale embeddings from gnn_propagation())
returns:
predicted scores for each playlist/song pair in edge_index
"""
scores = embs[edge_index[0,:], :] * embs[edge_index[1,:], :] # taking dot product for each playlist/song pair
scores = scores.sum(dim=1)
scores = self.sigmoid(scores)
return scores
def calc_loss(self, data_mp, data_pos, data_neg):
"""
The main training step. Performs GNN propagation on message passing edges, to get multi-scale embeddings.
Then predicts scores for each training example, and calculates Bayesian Personalized Ranking (BPR) loss.
args:
data_mp: tensor of edges used for message passing / calculating multi-scale embeddings
data_pos: set of positive edges that will be used during loss calculation
data_neg: set of negative edges that will be used during loss calculation
returns:
loss calculated on the positive/negative training edges
"""
# Perform GNN propagation on message passing edges to get final embeddings
final_embs = self.gnn_propagation(data_mp.edge_index)
# Get edge prediction scores for all positive and negative evaluation edges
pos_scores = self.predict_scores(data_pos.edge_index, final_embs)
neg_scores = self.predict_scores(data_neg.edge_index, final_embs)
# # Calculate loss (binary cross-entropy). Commenting out, but can use instead of BPR if desired.
# all_scores = torch.cat([pos_scores, neg_scores], dim=0)
# all_labels = torch.cat([torch.ones(pos_scores.shape[0]), torch.zeros(neg_scores.shape[0])], dim=0)
# loss_fn = torch.nn.BCELoss()
# loss = loss_fn(all_scores, all_labels)
# Calculate loss (using variation of Bayesian Personalized Ranking loss, similar to the one used in official
# LightGCN implementation at https://github.com/gusye1234/LightGCN-PyTorch/blob/master/code/model.py#L202)
loss = -torch.log(self.sigmoid(pos_scores - neg_scores)).mean()
return loss
def evaluation(self, data_mp, data_pos, k):
"""
Performs evaluation on validation or test set. Calculates recall@k.
args:
data_mp: message passing edges to use for propagation/calculating multi-scale embeddings
data_pos: positive edges to use for scoring metrics. Should be no overlap between these edges and data_mp's edges
k: value of k to use for recall@k
returns:
dictionary mapping playlist ID -> recall@k on that playlist
"""
# Run propagation on the message-passing edges to get multi-scale embeddings
final_embs = self.gnn_propagation(data_mp.edge_index)
# Get embeddings of all unique playlists in the batch of evaluation edges
unique_playlists = torch.unique_consecutive(data_pos.edge_index[0,:])
playlist_emb = final_embs[unique_playlists, :] # has shape [number of playlists in batch, 64]
# Get embeddings of ALL songs in dataset
song_emb = final_embs[self.num_playlists:, :] # has shape [total number of songs in dataset, 64]
# All ratings for each playlist in batch to each song in entire dataset (using dot product as the scoring function)
ratings = self.sigmoid(torch.matmul(playlist_emb, song_emb.t())) # shape: [# playlists in batch, # songs in dataset]
# where entry i,j is rating of song j for playlist i
# Calculate recall@k
result = recall_at_k(ratings.cpu(), k, self.num_playlists, data_pos.edge_index.cpu(),
unique_playlists.cpu(), data_mp.edge_index.cpu())
return result
# + [markdown] id="aRM_SMGB4iZ_"
# # A few important helper functions
# + [markdown] id="UIkI6Wga4k6h"
# During evaluation, our metric of choice is recall@k. Let's write a helper function that will calculate recall@k for us on a single batch of data.
# + id="0PQY4beKqSIw"
def recall_at_k(all_ratings, k, num_playlists, ground_truth, unique_playlists, data_mp):
"""
Calculates recall@k during validation/testing for a single batch.
args:
all_ratings: array of shape [number of playlists in batch, number of songs in whole dataset]
k: the value of k to use for recall@k
num_playlists: the number of playlists in the dataset
ground_truth: array of shape [2, X] where each column is a pair of (playlist_idx, positive song idx). This is the
batch that we are calculating metrics on.
unique_playlists: 1D vector of length [number of playlists in batch], which specifies which playlist corresponds
to each row of all_ratings
data_mp: an array of shape [2, Y]. This is all of the known message-passing edges. We will use this to make sure we
don't recommend songs that are already known to be in the playlist.
returns:
Dictionary of playlist ID -> recall@k on that playlist
"""
# We don't want to recommend songs that are already known to be in the playlist!
# Set those to a low rating so they won't be recommended
known_edges = data_mp[:, data_mp[0,:] < num_playlists] # removing duplicate edges (since data_mp is undirected). also makes it so
# that for each column, playlist idx is in row 0 and song idx is in row 1
playlist_to_idx_in_batch = {playlist: i for i, playlist in enumerate(unique_playlists.tolist())}
exclude_playlists, exclude_songs = [], [] # already-known playlist/song links. Don't want to recommend these again
for i in range(known_edges.shape[1]): # looping over all known edges
pl, song = known_edges[:,i].tolist()
if pl in playlist_to_idx_in_batch: # don't need the edges in data_mp that are from playlists that are not in this batch
exclude_playlists.append(playlist_to_idx_in_batch[pl])
exclude_songs.append(song - num_playlists) # subtract num_playlists to get indexing into all_ratings correct
all_ratings[exclude_playlists, exclude_songs] = -10000 # setting to a very low score so they won't be recommended
# Get top k recommendations for each playlist
_, top_k = torch.topk(all_ratings, k=k, dim=1)
top_k += num_playlists # topk returned indices of songs in ratings, which doesn't include playlists.
# Need to shift up by num_playlists to get the actual song indices
# Calculate recall@k
ret = {}
for i, playlist in enumerate(unique_playlists):
pos_songs = ground_truth[1, ground_truth[0, :] == playlist]
k_recs = top_k[i, :] # top k recommendations for playlist
recall = len(np.intersect1d(pos_songs, k_recs)) / len(pos_songs)
ret[playlist] = recall
return ret
# + [markdown] id="GNSxfBFw4wP9"
# Also, during training, to calculate the Bayesian Personalized Ranking (BPR) loss, we will need to sample negative edges. Here is a function that does that.
#
# Note that to sample negatives, we randomly select songs for each playlist without checking if they are true negatives, since that is computationally expensive. This is fine in our case, because we will never be sampling more than ~100 songs for a playlist (out of thousands of songs), so although we will accidentally sample some positive songs occasionally, it will be an acceptably small number. However, if that is not the case for your dataset, please consider modifying this function to sample true negatives only.
# + id="NlZa_WZuqSK_"
def sample_negative_edges(batch, num_playlists, num_nodes):
# Randomly samples songs for each playlist. Here we sample 1 negative edge
# for each positive edge in the graph, so we will
# end up having a balanced 1:1 ratio of positive to negative edges.
negs = []
for i in batch.edge_index[0,:]: # looping over playlists
assert i < num_playlists # just ensuring that i is a playlist
rand = torch.randint(num_playlists, num_nodes, (1,)) # randomly sample a song
negs.append(rand.item())
edge_index_negs = torch.row_stack([batch.edge_index[0,:], torch.LongTensor(negs)])
return Data(edge_index=edge_index_negs)
# + [markdown] id="U1J8uPsGJ3X3"
# # Write the main training and test loops
# + [markdown] id="FR9CXHNz5InE"
# We are almost ready to train! Let's define the main training function. This performs GNN propagation on the message passing edges to get the final multi-scale embeddings, and then calculates the loss and performs back-propagation to update our learned embeddings.
# + id="CHP1iV95qSNe"
def train(model, data_mp, loader, opt, num_playlists, num_nodes, device):
"""
Main training loop
args:
model: the GNN model
data_mp: message passing edges to use for performing propagation/calculating multi-scale embeddings
loader: DataLoader that loads in batches of supervision/evaluation edges
opt: the optimizer
num_playlists: the number of playlists in the entire dataset
num_nodes: the number of nodes (playlists + songs) in the entire dataset
device: whether to run on CPU or GPU
returns:
the training loss for this epoch
"""
total_loss = 0
total_examples = 0
model.train()
for batch in loader:
del batch.batch; del batch.ptr # delete unwanted attributes
opt.zero_grad()
negs = sample_negative_edges(batch, num_playlists, num_nodes) # sample negative edges
data_mp, batch, negs = data_mp.to(device), batch.to(device), negs.to(device)
loss = model.calc_loss(data_mp, batch, negs)
loss.backward()
opt.step()
num_examples = batch.edge_index.shape[1]
total_loss += loss.item() * num_examples
total_examples += num_examples
avg_loss = total_loss / total_examples
return avg_loss
# + [markdown] id="FX9jDBLm5UlO"
# Similarly, we can define the testing/evaluation loop. This is similar to training, except we calculate recall@k instead of BPR loss, and we do not perform back-propagation.
# + id="_wYSKg-nqlcu"
def test(model, data_mp, loader, k, device, save_dir, epoch):
"""
Evaluation loop for validation/testing.
args:
model: the GNN model
data_mp: message passing edges to use for propagation/calculating multi-scale embeddings
loader: DataLoader that loads in batches of evaluation (i.e., validation or test) edges
k: value of k to use for recall@k
device: whether to use CPU or GPU
save_dir: directory to save multi-scale embeddings for later analysis. If None, doesn't save any embeddings.
epoch: the number of the current epoch
returns:
recall@k for this epoch
"""
model.eval()
all_recalls = {}
with torch.no_grad():
# Save multi-scale embeddings if save_dir is not None
data_mp = data_mp.to(device)
if save_dir is not None:
embs_to_save = gnn.gnn_propagation(data_mp.edge_index)
torch.save(embs_to_save, os.path.join(save_dir, f"embeddings_epoch_{epoch}.pt"))
# Run evaluation
for batch in loader:
del batch.batch; del batch.ptr # delete unwanted attributes
batch = batch.to(device)
recalls = model.evaluation(data_mp, batch, k)
for playlist_idx in recalls:
assert playlist_idx not in all_recalls
all_recalls.update(recalls)
recall_at_k = np.mean(list(all_recalls.values()))
return recall_at_k
# + [markdown] id="D13mFO4ZJ76i"
# # Begin training!
# + [markdown] id="IJVGLO1X5cvZ"
# We are now ready to train! First, let's set a few important hyper-parameters. For this Colab, we will train for 30 epochs so that it finishes in a relatively short amount of time. However, in reality, you should increase that number and just monitor the validation recall@k to ensure that it is not overfitting!
#
# Additionally, make sure that your batch_size is large enough so that the training process is more computationally efficient.
#
# We also need to set the value of k for recall@k. Please ensure that you select a reasonable value. Here, we select 300 since there are approximately 5,700 songs, so if a song is suggested in the top 300 recommendations, that means it is in the top 5% of recommendations, which seems like a good cutoff point. Feel free to play around with this value, or even calculate it at multiple k values and take the average.
# + id="juX417Uc6L1I" colab={"base_uri": "https://localhost:8080/"} outputId="a45c9cdf-ed13-4060-c3fb-d5dbcbadd91b"
num_songs = num_nodes - num_playlists
print(f"There are {num_songs} unique songs in the dataset")
print (300 / num_songs)
# + id="Zpcck1gPqljH"
# Training hyperparameters
epochs = 30 # number of training epochs (we are keeping it relatively low so that this Colab runs fast)
k = 300 # value of k for recall@k. It is important to set this to a reasonable value!
num_layers = 3 # number of LightGCN layers (i.e., number of hops to consider during propagation)
batch_size = 2048 # batch size. refers to the # of playlists in the batch (each will come with all of its edges)
embedding_dim = 64 # dimension to use for the playlist/song embeddings
save_emb_dir = 'embeddings' # path to save multi-scale embeddings during test(). If None, will not save any embeddings
# Use GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# + id="BtPVcfDkN4zR"
# Make directory to save embeddings
if save_emb_dir is not None:
os.mkdir(save_emb_dir)
# + [markdown] id="ysyyEdE56ZPy"
# Here we create the DataLoaders for the supervision/evaluation edges (one for each of the train/val/test sets)
# + id="BAFoPa1mq7Wv"
train_loader = DataLoader(train_ev, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_ev, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(test_ev, batch_size=batch_size, shuffle=False)
# + [markdown] id="QJNFn67p6hDG"
# Now we initialize our GNN model and optimizer:
# + id="wVLJ7DZs1ca6"
# Initialize GNN model
gnn = GNN(embedding_dim=embedding_dim, num_nodes=data.num_nodes, num_playlists=num_playlists, num_layers=num_layers).to(device)
opt = torch.optim.Adam(gnn.parameters(), lr=1e-3) # using Adam optimizer
# + [markdown] id="pet-nDSW6jfN"
# Time to train!
#
# Note: this cell will take approximately 15 minutes to run.
# + id="_Z0cVWFTq7ZX" colab={"base_uri": "https://localhost:8080/"} outputId="88be5b99-44a9-463a-db95-3bbf962a71ab"
all_train_losses = [] # list of (epoch, training loss)
all_val_recalls = [] # list of (epoch, validation recall@k)
# Main training loop
for epoch in range(epochs):
train_loss = train(gnn, train_mp, train_loader, opt, num_playlists, num_nodes, device)
all_train_losses.append((epoch, train_loss))
if epoch in range(11) or epoch % 5 == 0: # perform validation for the first ~10 epochs, then every 5 epochs after that
val_recall = test(gnn, val_mp, val_loader, k, device, save_emb_dir, epoch)
all_val_recalls.append((epoch, val_recall))
print(f"Epoch {epoch}: train loss={train_loss}, val_recall={val_recall}")
else:
print(f"Epoch {epoch}: train loss={train_loss}")
print()
# Print best validation recall@k value
best_val_recall = max(all_val_recalls, key = lambda x: x[1])
print(f"Best validation recall@k: {best_val_recall[1]} at epoch {best_val_recall[0]}")
# Print final recall@k on test set
test_recall = test(gnn, test_mp, test_loader, k, device, None, None)
print(f"Test set recall@k: {test_recall}")
# + [markdown] id="cWHKxIFG6nXa"
# Let's plot the loss curve:
# + id="ebxqrIOCq7bs" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="1bfb2a0a-73a7-4ac6-9797-cc076655da47"
plt.plot([x[0] for x in all_train_losses], [x[1] for x in all_train_losses])
plt.xlabel("Epoch")
plt.ylabel("Training loss")
plt.show()
# + [markdown] id="aotsVQdY6pDJ"
# And also plot the validation recall@k over time:
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="aufUYQbL8rok" outputId="c271ba66-fdfc-49a8-f0b2-14a7d5785450"
plt.plot([x[0] for x in all_val_recalls], [x[1] for x in all_val_recalls])
plt.xlabel("Epoch")
plt.ylabel("Validation recall@k")
plt.show()
# + [markdown] id="pIvtdBz-BZ7B"
# # Visualize embeddings
# + [markdown] id="L1h-S22oGWJg"
# Now let's visualize our learned multi-scale playlist/song embeddings!
#
# We will make an animated video of the embeddings over the first 10 epochs (which is when the embeddings/loss are changing the most).
#
# To visualize it, we will use Principal Components Analysis (PCA) to project each 64-dimensional vector down into 2-dimensions so we can visualize it in a plot.
# + [markdown] id="ZYKbFdXeG0Or"
# #### Projecting down to 2 dimensions
# + id="k88S_mgOBcAO"
pca_embs = []
for i in range(11): # just visualizing epochs 0-10
embs = torch.load(os.path.join('embeddings/embeddings_epoch_' + str(i) + '.pt'), map_location=torch.device('cpu'))
pca = PCA(n_components=2).fit_transform(embs)
pca_embs.append(pca)
# + id="GdgB0MBkBnqB"
# Load playlist/song details
with open('playlist_info.json', 'r') as f:
playlists = json.load(f)
playlists = {int(k): v for k, v in playlists.items()}
with open('song_info.json', 'r') as f:
songs = json.load(f)
songs = {int(k): v for k, v in songs.items()}
# + [markdown] id="SxgLcsTWG3uU"
# # Visualizing country music playlists
# + [markdown] id="kKXiwmNyG6dr"
# During out inspection of the results, we noticed that there was a large cluster of playlists that stood out in particular. By manually inspecting those playlists, we noticed that they were primarily country music playlists.
#
# To show this programatically, we will find all the playlists that have the word "country" in the title. This is not perfect, but it will serve as a good estimate of which playlists in the dataset are mainly country music.
# + id="FuftwZrBBrrP"
# Find all playlists with "country" in the title
country = []
country_colors = [] # colors for plotting
for id in playlists:
name = playlists[id]['name'].lower()
if 'country' in name:
country.append(country)
country_colors.append(0)
else:
country_colors.append(1)
# + colab={"base_uri": "https://localhost:8080/"} id="i4Vud5vnHPLu" outputId="ef075c76-9255-4682-951a-99941ccf3734"
print(f"There are {len(country)} country playlists out of {len(playlists)} total playlists")
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="3IffldfhDBCo" outputId="ab6345cb-240b-47ab-aef8-135f445780a0"
# First set up the figure, the axis, and the plot element we want to animate
fig, ax = plt.subplots()
ax.set_xlim(( -0.5, 0.5))
ax.set_ylim((-0.5, 0.5))
colors = ['red', 'skyblue']
from matplotlib.colors import ListedColormap
cmap = ListedColormap(colors)
scat = ax.scatter([], [], c=[], s=3, alpha=0.3, cmap=cmap)
# NOTE: this plot below will be empty! Just ignore it and go to the next cell
# + id="_JkcqhfaDBeB"
def init():
scat.set_offsets([])
return scat,
# + id="5z5MY8-EDBgY"
def animate(i):
embs = pca_embs[i]
x, y = embs[:num_playlists, 0], embs[:num_playlists, 1] # only plotting playlists, not songs
data = np.hstack((x[:,np.newaxis], y[:, np.newaxis]))
scat.set_offsets(data)
scat.set_array(country_colors)
return (scat,)
# + id="XD8t53fHDk6H"
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=11, interval=1000, blit=True)
# + [markdown] id="pVa9NFHzY-s1"
# Now let's watch the animated video as the embeddings change from epochs 0-10! The country playlists are in red, and non-country playlists are in blue.
# + colab={"base_uri": "https://localhost:8080/", "height": 310} id="RimWC1eUDsVb" outputId="0f6a6bce-cfec-4692-c6d6-727bd8decdb7"
HTML(anim.to_html5_video())
# + [markdown] id="hM8BuHDQZAsU"
# You can clearly see the separation as the country playlists diverge from the non-country playlists!
# + id="lTQAvdmjL2O9"
# f = "country_animation.gif"
# writergif = animation.PillowWriter(fps=1)
# anim.save(f, writer=writergif, dpi=160)
# + [markdown] id="lQujiuLjOCct"
# # Visualizing Drake songs
# + [markdown] id="-sl2Ogv2OE2t"
# Now let's see the animated evolution of embeddings for all of Drake's songs that appear in the dataset.
#
# We expect that they would slowly begin to cluster together over time.
# + id="vdp21WfdSNlJ"
drake_id = 'spotify:artist:3TVXtAsR1Inumwj472S9r4'
drake = []
drake_colors = [] # colors for plotting
drake_sizes = [] # sizes of the points when plotting
for id in songs:
if songs[id]['artist_uri'] == drake_id:
drake.append(id)
drake_colors.append(0)
drake_sizes.append(15)
else:
drake_colors.append(1)
drake_sizes.append(3)
# + colab={"base_uri": "https://localhost:8080/"} id="WOeE6KPYOVxt" outputId="ee9b179d-e646-4f21-9d86-40fd89b3d787"
print(f"There are {len(drake)} Drake songs in the dataset")
# + id="rSIXoEcXYvgo"
colors = ['red', 'skyblue']
cmap_drake = ListedColormap(colors)
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="KhD5g6B3SUEb" outputId="99a29315-9c23-444e-f04f-cd7267a7c5b3"
# First set up the figure, the axis, and the plot element we want to animate
fig, ax = plt.subplots()
ax.set_xlim(( -0.8, 0.8))
ax.set_ylim((-0.8, 0.8))
scat = ax.scatter([], [], c=[], s=[], alpha=0.3, cmap=cmap_drake)
# NOTE: this plot will be empty! Just ignore it and continue to the next cell
# + id="-34pe4BFSXOy"
def init():
scat.set_offsets([])
return scat,
# + id="WL04-6KSSY73"
def animate(i):
embs = pca_embs[i]
x, y = embs[num_playlists:, 0], embs[num_playlists:, 1] # only plotting songs, not playlists
data = np.hstack((x[:,np.newaxis], y[:, np.newaxis]))
scat.set_offsets(data)
scat.set_array(drake_colors)
scat.set_sizes(drake_sizes)
return (scat,)
# + id="wFOslhM1Sk0D"
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=11, interval=1000, blit=True)
# + [markdown] id="bHSvwnp5yfWj"
# Below is the animation from epochs 0-10. We see that the red dots (Drake songs) begin to cluster together!
# + colab={"base_uri": "https://localhost:8080/", "height": 310} id="cBSzl2q8Sn7-" outputId="bb885b3f-2d9a-4338-ca5a-e4b111a3d71c"
HTML(anim.to_html5_video())
# + id="b8F5m1DFSpu5"
# f = "drake_animation.gif"
# writergif = animation.PillowWriter(fps=1)
# anim.save(f, writer=writergif, dpi=160)
# + [markdown] id="onEH2ObySFCF"
# # Visualizing Taylor Swift songs
# + [markdown] id="e-P3VBLAZKlG"
# Finally, let's see the songs of Taylor Swift.
#
# Stay tuned to the end, since we make an interesting discovery!
# + id="XoHm_e5iSyBP"
swift_id = 'spotify:artist:06HL4z0CvFAxyc27GXpf02'
swift = []
swift_colors = []
swift_sizes = []
for id in songs:
if songs[id]['artist_uri'] == swift_id:
swift.append(id)
swift_colors.append(0)
swift_sizes.append(15)
else:
swift_colors.append(1)
swift_sizes.append(3)
# + id="F9TyEIR0TdjQ"
colors = ['red', 'skyblue']
cmap_swift = ListedColormap(colors)
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="MWgt7Ob6Tdlf" outputId="466538ac-f0d9-44be-b278-67898297784c"
# First set up the figure, the axis, and the plot element we want to animate
fig, ax = plt.subplots()
ax.set_xlim(( -0.8, 0.8))
ax.set_ylim((-0.8, 0.8))
scat = ax.scatter([], [], c=[], s=[], alpha=0.3, cmap=cmap_swift)
# NOTE: this plot will be empty! Just ignore it and continue to the next cell
# + id="aiFLt3uLTdng"
def init():
scat.set_offsets([])
return scat,
# + id="wyFvmNaDTp68"
def animate(i):
embs = pca_embs[i]
x, y = embs[num_playlists:, 0], embs[num_playlists:, 1] # only plotting songs, not playlists
data = np.hstack((x[:,np.newaxis], y[:, np.newaxis]))
scat.set_offsets(data)
scat.set_array(swift_colors)
scat.set_sizes(swift_sizes)
return (scat,)
# + id="xYivVQQtTp9K"
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=11, interval=1000, blit=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 310} id="hfVeSGSLTp_C" outputId="3cb1880f-d137-40c1-cc74-a8ce6c554000"
HTML(anim.to_html5_video())
# + id="0pJ0RUvrWWOE"
# f = "swift_animation.gif"
# writergif = animation.PillowWriter(fps=1)
# anim.save(f, writer=writergif, dpi=160)
# + [markdown] id="tOxJrVLYWZ2p"
# You may notice something very interesting!
#
# By the end of the GIF, Taylor Swift songs are right at the border between the country songs vs. non-country songs. This makes a lot of sense, since she started off as a country singer and later switched to pop music.
#
# To explore this further, we will manually classify each of the 21 songs as being from her country vs. pop phases. We are mainly going off of the classifications of her songs on Wikipedia. You can see these below:
# + id="MryDP1wNTqA6"
swift_pop = ['Look What You Made Me Do',
'Shake It Off',
'Style',
"...Ready For It?",
"Wildest Dreams",
"I Knew You Were Trouble.",
"22",
"We Are Never Ever Getting Back Together",
"Bad Blood",
"Blank Space"]
swift_country = ['White Horse',
'Picture To Burn',
'Our Song',
'Everything Has Changed',
'You Belong With Me',
'Love Story',
'Red',
'Sparks Fly',
'Mean',
'Crazier',
'Safe & Sound - from The Hunger Games Soundtrack']
# + [markdown] id="9lc9UMZwWzgP"
# Now let's plot her country songs in blue, her pop songs in red, and all other songs in gray, using the embeddings from epoch 10.
# + id="MiZZdL4LPKwP"
swift_colors = []
swift_alphas = []
for id in songs:
if songs[id]['artist_uri'] == swift_id:
swift_alphas.append(1)
if songs[id]['track_name'] in swift_pop:
swift_colors.append("red")
else: # then it's in swift_country
swift_colors.append("blue")
else:
swift_colors.append("gray")
swift_alphas.append(0.2)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="74AKRVCKPKyU" outputId="96b530e4-f054-45d2-d525-0960bbd64bf8"
plt.scatter(pca_embs[10][num_playlists:,0], pca_embs[10][num_playlists:, 1], alpha=swift_alphas, color=swift_colors,
s=swift_sizes)
# + [markdown] id="8ubRFhw-W4i5"
# It looks like we were correct! Her country songs (in blue) are much closer to the cluster of country songs, and her pop songs (red) are right in the middle of the non-country song area.
#
# It even makes sense that her country songs are not entirely in the country cluster, because she has a lot of crossover appeal, which means she probably has many fans that listen to her country songs even if they don't listen to much country music in general. This crossover appeal likely pulls her country songs a bit back more in the direction of the non-country songs.
#
# Anyway, this is an extremely interesting result. Our model appears to have learned to embed the different phases of Taylor Swift's career into two parts, completely by itself!
# + id="xffIdmHwWhxX"
# + id="mZuI0ue7YkMw"
|
Spotify_Music_Recommendation_with_LightGCN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <div class="contentcontainer med left" style="margin-left: -50px;">
# <dl class="dl-horizontal">
# <dt>Title</dt> <dd> Path Element</dd>
# <dt>Dependencies</dt> <dd>Matplotlib</dd>
# <dt>Backends</dt> <dd><a href='./Path.ipynb'>Matplotlib</a></dd> <dd><a href='../bokeh/Path.ipynb'>Bokeh</a></dd>
# </dl>
# </div>
import numpy as np
import holoviews as hv
hv.extension('matplotlib')
# A ``Path`` object is actually a collection of lines, unlike ``Curve`` where the y-axis is the dependent variable, a ``Path`` consists of lines connecting arbitrary points in two-dimensional space. The individual subpaths should be supplied as a list and will be stored as NumPy arrays, DataFrames or dictionaries for each column, i.e. any of the formats accepted by columnar data formats.
#
# In this example we will create a Lissajous curve, which describe complex harmonic motion:
# +
# %%opts Path (color='black' linewidth=4)
lin = np.linspace(0, np.pi*2, 200)
def lissajous(t, a, b, delta):
return (np.sin(a * t + delta), np.sin(b * t), t)
hv.Path([lissajous(lin, 3, 5, np.pi/2)])
# -
#
# If you looked carefully the ``lissajous`` function actually returns three columns, respectively for the x, y columns and a third column describing the point in time. By declaring a value dimension for that third column we can also color the Path by time. Since the value is cyclical we will also use a cyclic colormap (``'hsv'``) to represent this variable:
# %%opts Path [color_index='time'] (linewidth=4 cmap='hsv')
hv.Path([lissajous(lin, 3, 5, np.pi/2)], vdims='time')
# If we do not provide a ``color_index`` overlaid ``Path`` elements will cycle colors just like other elements do unlike ``Curve`` a single ``Path`` element can contain multiple lines that are disconnected from each other. A ``Path`` can therefore often useful to draw arbitrary annotations on top of an existing plot.
#
# A ``Path`` Element accepts multiple formats for specifying the paths, the simplest of which is passing a list of ``Nx2`` arrays of the x- and y-coordinates, alternative we can pass lists of coordinates. In this example we will create some coordinates representing rectangles and ellipses annotating an ``RGB`` image:
# +
# %%opts Path (linewidth=4)
angle = np.linspace(0, 2*np.pi, 100)
baby = list(zip(0.15*np.sin(angle), 0.2*np.cos(angle)-0.2))
adultR = [(0.25, 0.45), (0.35,0.35), (0.25, 0.25), (0.15, 0.35), (0.25, 0.45)]
adultL = [(-0.3, 0.4), (-0.3, 0.3), (-0.2, 0.3), (-0.2, 0.4),(-0.3, 0.4)]
scene = hv.RGB.load_image('../assets/penguins.png')
scene * hv.Path([adultL, adultR, baby]) * hv.Path([baby])
# -
# A ``Path`` can also be used as a means to display a number of lines with the same sampling along the x-axis at once. If we initialize the ``Path`` with a tuple of x-coordinates and stacked y-coordinates, we can quickly view a number of lines at once. Here we will generate a number of random traces each slightly offset along the y-axis:
# %%opts Path [aspect=3 fig_size=300]
N, NLINES = 100, 10
hv.Path((np.arange(N), np.random.rand(N, NLINES) + np.arange(NLINES)[np.newaxis, :])) *\
hv.Path((np.arange(N), np.random.rand(N, NLINES) + np.arange(NLINES)[np.newaxis, :]))
# For full documentation and the available style and plot options, use ``hv.help(hv.Path).``
|
examples/reference/elements/matplotlib/Path.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Speeding up the solvers
# This notebook contains a collection of tips on how to speed up the solvers
# %pip install pybamm -q # install PyBaMM if it is not installed
import pybamm
import matplotlib.pyplot as plt
import numpy as np
# ## Choosing a solver
# Since it is very easy to switch which solver is used for the model, we recommend you try different solvers for your particular use case. In general, the `CasadiSolver` is the fastest.
#
# Once you have found a good solver, you can further improve performance by trying out different values for the `method`, `rtol`, and `atol` arguments. Further options are sometimes available, but are solver specific. See [solver API docs](https://pybamm.readthedocs.io/en/latest/source/solvers/index.html) for details.
# ## Choosing and optimizing CasadiSolver settings
# ### Fast mode vs safe mode
# The `CasadiSolver` comes with a `mode` argument which can be set to "fast" or "safe" (the third option, "safe without grid", is experimental and should not be used for now).
# The "fast" mode is faster but ignores any "events" (such as a voltage cut-off point), while the "safe" mode is slower but does stop at events (with manually implemented "step-and-check" under the hood). Therefore, "fast" mode should be used whenever events are not expected to be hit (for example, when simulating a drive cycle or a constant-current discharge where the time interval is such that the simulation will finish before reaching the voltage cut-off). Conversely, "safe" mode should be used whenever events are important: in particular, when using the `Experiment` class.
# To demonstrate the difference between safe mode and fast mode, consider the following example
# +
# Set up model
model = pybamm.lithium_ion.DFN()
param = model.default_parameter_values
cap = param["Nominal cell capacity [A.h]"]
param["Current function [A]"] = cap * pybamm.InputParameter("Crate")
sim = pybamm.Simulation(model, parameter_values=param)
# Set up solvers. Reduce max_num_steps for the fast solver, for faster errors
fast_solver = pybamm.CasadiSolver(mode="fast", extra_options_setup={"max_num_steps": 1000})
safe_solver = pybamm.CasadiSolver(mode="safe")
# -
# Both solvers can solve the model up to 3700 s, but the fast solver ignores the voltage cut-off around 3.1 V
# +
safe_sol = sim.solve([0,3700], solver=safe_solver, inputs={"Crate": 1})
fast_sol = sim.solve([0,3700], solver=fast_solver, inputs={"Crate": 1})
timer = pybamm.Timer()
print("Safe:", safe_sol.solve_time)
print("Fast:", fast_sol.solve_time)
cutoff = param["Lower voltage cut-off [V]"]
plt.plot(fast_sol["Time [h]"].data, fast_sol["Terminal voltage [V]"].data, "b-", label="Fast")
plt.plot(safe_sol["Time [h]"].data, safe_sol["Terminal voltage [V]"].data, "r-", label="Safe")
plt.plot(fast_sol["Time [h]"].data, cutoff * np.ones_like(fast_sol["Time [h]"].data), "k--", label="Voltage cut-off")
plt.legend();
# -
# If we increase the integration interval, the safe solver still stops at the same point, but the fast solver fails
# +
safe_sol = sim.solve([0,4500], solver=safe_solver, inputs={"Crate": 1})
print("Safe:", safe_sol.solve_time)
plt.plot(safe_sol["Time [h]"].data, safe_sol["Terminal voltage [V]"].data, "r-", label="Safe")
plt.plot(safe_sol["Time [h]"].data, cutoff * np.ones_like(safe_sol["Time [h]"].data), "k--", label="Voltage cut-off")
plt.legend();
try:
sim.solve([0,4500], solver=fast_solver, inputs={"Crate": 1})
except pybamm.SolverError as e:
print("Solving fast mode, error occured:", e.args[0])
# -
# It should be noted here that the time in the warning "At t = 0.179234, , mxstep steps taken before reaching tout" is dimensionless time, since this is the time that the casadi solver sees. This can be converted to dimensional time as follows:
print(f"Errored at {0.179234 * param.evaluate(model.timescale)} seconds")
# We can solve with fast mode up to close to this time to understand why the model is failing
fast_sol = sim.solve([0,4049], solver=fast_solver, inputs={"Crate": 1})
fast_sol.plot([
"Minimum negative particle surface concentration",
"Electrolyte concentration [mol.m-3]",
"Maximum positive particle surface concentration",
"Terminal voltage [V]",
], time_unit="seconds", figsize=(9,9));
# In this case, we can see that the reason the solver is failing is that the concentration at the surface of the particles in the positive electrode hit their maximum (dimensionless) value of 1. Since the exchange current density has a term `sqrt(1-c_s_surf)`, the square root of a negative number is complex, `c_s_surf` going above 1 will cause the solver to fail.
# As a final note, there are some cases where the "safe" mode prints some warnings. This is linked to how the solver looks for events (sometimes stepping too far), and can be safely ignored if the solution looks sensible.
safe_sol_160 = sim.solve([0,160], solver=safe_solver, inputs={"Crate": 10})
plt.plot(safe_sol_160["Time [h]"].data, safe_sol_160["Terminal voltage [V]"].data, "r-", label="Safe")
plt.plot(safe_sol_160["Time [h]"].data, cutoff * np.ones_like(safe_sol_160["Time [h]"].data), "k--", label="Voltage cut-off")
plt.legend();
# Reducing the time interval to [0, 150], we see that the solution is exactly the same, without the warnings
safe_sol_150 = sim.solve([0,150], solver=safe_solver, inputs={"Crate": 10})
plt.plot(safe_sol_150["Time [h]"].data, safe_sol_150["Terminal voltage [V]"].data, "r-", label="Safe [0,150]")
plt.plot(safe_sol_160["Time [h]"].data, safe_sol_160["Terminal voltage [V]"].data, "b.", label="Safe [0,160]")
plt.plot(safe_sol_150["Time [h]"].data, cutoff * np.ones_like(safe_sol_150["Time [h]"].data), "k--", label="Voltage cut-off")
plt.legend();
safe_solver_2 = pybamm.CasadiSolver(mode="safe", dt_max=30)
safe_sol_2 = sim.solve([0,160], solver=safe_solver_2, inputs={"Crate": 10})
# ### Choosing dt_max to speed up the safe mode
# The parameter `dt_max` controls how large the steps taken by the `CasadiSolver` with "safe" mode are when looking for events.
# +
for dt_max in [10,20,100,1000,3700]:
safe_sol = sim.solve(
[0,3600],
solver=pybamm.CasadiSolver(mode="safe", dt_max=dt_max),
inputs={"Crate": 1}
)
print(f"With dt_max={dt_max}, took {safe_sol.solve_time} "+
f"(integration time: {safe_sol.integration_time})")
fast_sol = sim.solve([0,3600], solver=fast_solver, inputs={"Crate": 1})
print(f"With 'fast' mode, took {fast_sol.solve_time} "+
f"(integration time: {fast_sol.integration_time})")
# -
# In general, a larger value of `dt_max` gives a faster solution, since fewer integrator creations and calls are required.
#
# Below the solution time interval of 36s, the value of `dt_max` does not affect the solve time, since steps must be at least 36s large.
# The discrepancy between the solve time and integration time is due to the extra operations recorded by "solve time", such as creating the integrator. The "fast" solver does not need to do this (it reuses the first one it had already created), so the solve time is much closer to the integration time.
# The example above was a case where no events are triggered, so the largest `dt_max` works well. If we step over events, then it is possible to makes `dt_max` too large, so that the solver will attempt (and fail) to take large steps past the event, iteratively reducing the step size until it works. For example:
for dt_max in [10,20,100,1000,3600]:
# Reduce max_num_steps to fail faster
safe_sol = sim.solve(
[0,4500],
solver=pybamm.CasadiSolver(mode="safe", dt_max=dt_max, extra_options_setup={"max_num_steps": 1000}),
inputs={"Crate": 1}
)
print(f"With dt_max={dt_max}, took {safe_sol.solve_time} "+
f"(integration time: {safe_sol.integration_time})")
# The integration time with `dt_max=3600` remains the fastest, but the solve time is the slowest due to all the failed steps.
# ### Choosing the period for faster experiments
# The "period" argument of the experiments also affects how long the simulations take, for a similar reason to `dt_max`. Therefore, this argument can be manually tuned to speed up how long an experiment takes to solve.
# We start with one cycle of CCCV
experiment = pybamm.Experiment(
[
"Discharge at C/10 for 10 hours or until 3.3 V",
"Rest for 1 hour",
"Charge at 1 A until 4.1 V",
"Hold at 4.1 V until 50 mA",
"Rest for 1 hour",
]
)
solver = pybamm.CasadiSolver(mode="safe", extra_options_setup={"max_num_steps": 1000})
sim = pybamm.Simulation(model, experiment=experiment, solver=solver)
sol = sim.solve()
print("Took ", sol.solve_time)
# This gives a nice, smooth voltage curve
plt.plot(sol["Time [s]"].data, sol["Terminal voltage [V]"].data);
# We can speed up the experiment by increasing the period, but tradeoff is that the resolution of the solution becomes worse
experiment = pybamm.Experiment(
[
"Discharge at C/10 for 10 hours or until 3.3 V",
"Rest for 1 hour",
"Charge at 1 A until 4.1 V",
"Hold at 4.1 V until 50 mA",
"Rest for 1 hour",
],
period="10 minutes",
)
sim = pybamm.Simulation(model, experiment=experiment, solver=solver)
sol = sim.solve()
print("Took ", sol.solve_time)
plt.plot(sol["Time [s]"].data, sol["Terminal voltage [V]"].data);
# If we increase the period too much, the experiment becomes slower as the solver takes more failing steps
experiment = pybamm.Experiment(
[
"Discharge at C/10 for 10 hours or until 3.3 V",
"Rest for 1 hour",
"Charge at 1 A until 4.1 V",
"Hold at 4.1 V until 50 mA",
"Rest for 1 hour",
],
period="30 minutes",
)
sim = pybamm.Simulation(model, experiment=experiment, solver=solver)
sol = sim.solve()
print("Took ", sol.solve_time)
plt.plot(sol["Time [s]"].data, sol["Terminal voltage [V]"].data);
# We can control the period of individual parts of the experiment to get the fastest solution (again, at the cost of resolution)
experiment = pybamm.Experiment(
[
"Discharge at C/10 for 10 hours or until 3.3 V (5 hour period)",
"Rest for 1 hour (30 minute period)",
"Charge at 1 C until 4.1 V (10 minute period)",
"Hold at 4.1 V until 50 mA (10 minute period)",
"Rest for 1 hour (30 minute period)",
],
)
solver = pybamm.CasadiSolver(mode="safe", extra_options_setup={"max_num_steps": 1000})
sim = pybamm.Simulation(model, experiment=experiment, solver=solver)
sol = sim.solve()
print("Took ", sol.solve_time)
plt.plot(sol["Time [s]"].data, sol["Terminal voltage [V]"].data);
# As you can see, this kind of optimization requires a lot of manual tuning. We are working on ways to make the experiment class more efficient in general.
# ### Changing the time interval
# Finally, in some cases, changing the time interval (either the step size or the final time) may affect whether or not the casadi solver can solve the system.
# Therefore, if the casadi solver is failing, it may be worth changing the time interval (usually, reducing step size or final time) to see if that allows the solver to solve the model.
# Unfortunately, we have not yet been able to isolate a minimum working example to demonstrate this effect.
# ## Handling instabilities
# If the solver is taking a lot of steps, possibly failing with a `max_steps` error, and the error persists with different solvers and options, this suggests a problem with the model itself. This can be due to a few things:
#
# - A singularity in the model (such as division by zero). Solve up to the time where the model fails, and plot some variables to see if they are going to infinity. You can then narrow down the source of the problem.
# - High model stiffness. The first thing to do to tackle this is to non-dimensionalize your model. If you really don't want to do this, or you do it and the problem persists, plot different variables to identify which variables or parameters may be causing problems. To reduce stiffness, all (dimensionless) parameter values should be as close to 1 as possible.
# - Non-differentiable functions (see [below](#Smooth-approximations-to-non-differentiable-functions))
#
# If none of these fixes work, we are interested in finding out why - please get in touch!
# ### Smooth approximations to non-differentiable functions
# Some functions, such as `minimum`, `maximum`, `heaviside`, and `abs`, are discontinuous and/or non-differentiable (their derivative is discontinuous). Adaptive solvers can deal with this discontinuity, but will take many more steps close to the discontinuity in order to resolve it. Therefore, using smooth approximations instead can reduce the number of steps taken by the solver, and hence the integration time. See [this post](https://discourse.julialang.org/t/handling-instability-when-solving-ode-problems/9019/5) for more details.
#
# Here is an example using the `maximum` function. The function `maximum(x,1)` is continuous but non-differentiable at `x=1`, where its derivative jumps from 0 to 1. However, we can approximate it using the [`softplus` function](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Softplus), which is smooth everywhere and is sometimes used in neural networks as a smooth approximation to the RELU activation function. The `softplus` function is given by
# $$
# s(x,y;k) = \frac{\log(\exp(kx)+\exp(ky))}{k},
# $$
# where `k` is a strictly positive smoothing (or sharpness) parameter. The larger the value of `k`, the better the approximation but the stiffer the term (exp blows up quickly!). Usually, a value of `k=10` is a good middle ground.
#
# In PyBaMM, you can either call the `softplus` function directly, or change `pybamm.settings.max_smoothing` to automatically replace all your calls to `pybamm.maximum` with `softplus`.
# +
x = pybamm.Variable("x")
y = pybamm.Variable("y")
# Normal maximum
print("Exact maximum:", pybamm.maximum(x,y))
# Softplus
print("Softplus (k=10):", pybamm.softplus(x,y,10))
# Changing the setting to call softplus automatically
pybamm.settings.max_smoothing = 20
print("Softplus (k=20):", pybamm.maximum(x,y))
# All smoothing parameters can be changed at once
pybamm.settings.set_smoothing_parameters(30)
print("Softplus (k=30):", pybamm.maximum(x,y))
# Change back
pybamm.settings.set_smoothing_parameters("exact")
print("Exact maximum:", pybamm.maximum(x,y))
# -
# Note that if both sides are constant then pybamm will use the exact value even if the setting is set to smoothing
a = pybamm.InputParameter("a")
pybamm.settings.max_smoothing = 20
# Both inputs are constant so uses exact maximum
print("Exact:", pybamm.maximum(0.999,1).evaluate())
# One input is not constant (InputParameter) so uses softplus
print("Softplus:", pybamm.maximum(a,1).evaluate(inputs={"a": 0.999}))
pybamm.settings.set_smoothing_parameters("exact")
# Here is the plot of softplus with different values of `k`
# +
pts = pybamm.linspace(0, 2, 100)
fig, ax = plt.subplots(figsize=(10,5))
ax.plot(pts.evaluate(), pybamm.maximum(pts,1).evaluate(), lw=2, label="exact")
ax.plot(pts.evaluate(), pybamm.softplus(pts,1,5).evaluate(), ":", lw=2, label="softplus (k=5)")
ax.plot(pts.evaluate(), pybamm.softplus(pts,1,10).evaluate(), ":", lw=2, label="softplus (k=10)")
ax.plot(pts.evaluate(), pybamm.softplus(pts,1,100).evaluate(), ":", lw=2, label="softplus (k=100)")
ax.legend()
# -
# Solving a model with the exact maximum, and smooth approximations, demonstrates a clear speed-up even for a very simple model
# +
model_exact = pybamm.BaseModel()
model_exact.rhs = {x: pybamm.maximum(x, 1)}
model_exact.initial_conditions = {x: 0.5}
model_exact.variables = {"x": x, "max(x,1)": pybamm.maximum(x, 1)}
model_smooth = pybamm.BaseModel()
k = pybamm.InputParameter("k")
model_smooth.rhs = {x: pybamm.softplus(x, 1, k)}
model_smooth.initial_conditions = {x: 0.5}
model_smooth.variables = {"x": x, "max(x,1)": pybamm.softplus(x, 1, k)}
solver = pybamm.CasadiSolver(mode="fast")
# Exact solution
timer = pybamm.Timer()
time = 0
for _ in range(100):
exact_sol = solver.solve(model_exact, [0, 2])
# Report integration time, which is the time spent actually doing the integration
time += exact_sol.integration_time
print("Exact:", time/100)
sols = [exact_sol]
ks = [5, 10, 100]
for k in ks:
time = 0
for _ in range(100):
sol = solver.solve(model_smooth, [0, 2], inputs={"k": k})
time += sol.integration_time
print(f"Smooth, k={k}:", time/100)
sols.append(sol)
pybamm.dynamic_plot(sols, ["x", "max(x,1)"], labels=["exact"] + [f"smooth (k={k})" for k in ks]);
# -
# #### Other smooth approximations
# Here are the other smooth approximations for the other non-smooth functions:
pybamm.settings.set_smoothing_parameters(10)
print("Smooth minimum (softminus):\t {!s}".format(pybamm.minimum(x,y)))
print("Smooth heaviside (sigmoid):\t {!s}".format(x < y))
print("Smooth absolute value: \t\t {!s}".format(abs(x)))
pybamm.settings.set_smoothing_parameters("exact")
# ## References
#
# The relevant papers for this notebook are:
pybamm.print_citations()
|
examples/notebooks/speed-up-solver.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv('./data/tesco.csv',header=None)
df
# +
from itertools import chain, combinations
min_support = 0.3
min_confidence = 0.5
freq_set = {} # would collect frequence of each items_set
combine_set = {} # would collect items_set with accept support
def subsets(arr):
""" Returns non empty subsets of arr"""
return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])
def get_support(item):
"""local function which Returns the support of an item"""
return float(freq_set[item])/len(transaction_list)
def join_set(item_set, length):
"""Join a set with itself and returns the n-element item_sets"""
return set([i.union(j) for i in item_set for j in item_set if len(i.union(j)) == length])
def items_min_support(item_set, transaction_list, min_support, freq_set):
"""calculates the support for items in the item_set and returns a subset
of the item_set each of whose elements satisfies the minimum support"""
_item_set = set()
localSet = {}
for item in item_set:
for transaction in transaction_list:
if item.issubset(transaction):
freq_set.setdefault(item,0)
localSet.setdefault(item,0)
freq_set[item] += 1
localSet[item] += 1
for item, count in list(localSet.items()):
support = float(count)/len(transaction_list)
if support >= min_support:
_item_set.add(item)
return _item_set
def get_item_set_transaction_list(df):
transaction_list = list()
item_set = set()
for i in range(len(df)):
transaction = frozenset(df.iloc[i,:].dropna().values)
transaction_list.append(transaction)
for item in transaction:
item_set.add(frozenset([item])) # Generate 1-item_sets
return item_set, transaction_list
def update_combine_set(last_set):
k = 2
while(last_set != set([])):
combine_set[k-1] = last_set
last_set = join_set(last_set, k)
current_set = items_min_support(last_set,
transaction_list,
min_support,
freq_set)
last_set = current_set
k = k + 1
def get_support_confidence(min_confidence):
items_support = []
items_set_confidence = []
for key, value in list(combine_set.items()):
items_support.extend([(tuple(item), get_support(item))
for item in value])
for key, value in list(combine_set.items())[1:]:
for item in value:
_subsets = list(map(frozenset, [x for x in subsets(item)]))
for element in _subsets:
remain = item.difference(element)
if len(remain) > 0:
confidence = get_support(item)/get_support(element)
if confidence >= min_confidence:
items_set_confidence.append(((tuple(element), tuple(remain)),confidence))
return items_support,items_set_confidence
item_set, transaction_list = get_item_set_transaction_list(df) # item_set => every item ; transaction_list=>items in each cart
strip_items_set = items_min_support(item_set, transaction_list, min_support,freq_set) # items with accepted support
update_combine_set(strip_items_set)
support_list, confidence_list = get_support_confidence(min_confidence)
print("\n------frequence of each items_set----- :\n")
print(freq_set)
print("\n------items_set with accept support----- :\n")
print(combine_set)
print("\n--------------support------------- :\n")
for item, support in sorted(support_list, key=lambda x: len(x)):
print(f"item: {str(item)} , {support}")
print("\n------------confidence------------ :\n")
for rule, confidence in sorted(confidence_list, key=lambda x: x[1]):
pre, post = rule
print(f"Rule: {str(pre)} ==> {str(post)} , {confidence}")
|
.ipynb_checkpoints/mod06-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:MongoEnv] *
# language: python
# name: conda-env-MongoEnv-py
# ---
# #### Import Dependencies
# Dependencies
#from splinter import Browser
from bs4 import BeautifulSoup as bs
import requests
#website2= Nasa
Website2="https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
response2=requests.get(Website2)
soup=bs(response2.text,'html.parser')
# #### The website returns news that are not the latest shown and list_date cannot be accessed
result = soup.find_all('div', class_="list_text")
result
results1 = soup.find('div', class_='content_title').text.strip()
results1
print(f'"The first first title for the latest news provided by the website is: " {results1}')
results2 = soup.find_all('div', class_="image_and_description_container")
FirstPar=results2[0].find('div', class_='rollover_description_inner').text.strip()
print(f'"The first paragraph for the latest news provided by the website is: " {FirstPar}')
|
Missions_to_Mars/.ipynb_checkpoints/mars2-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/application_model_zoo/Example%20-%20Document%20Layout%20Analysis%20(FasterRCNN).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Document Layout Analysis Using Faster-RCNN
# ## About the network:
# 1. Paper on Faster-RCNN: https://arxiv.org/abs/1506.01497
#
# 2. Blog-1 on Faster-RCNN: https://towardsdatascience.com/faster-r-cnn-for-object-detection-a-technical-summary-474c5b857b46
#
# 3. Blog-2 on Faster-RCNN: https://towardsdatascience.com/faster-rcnn-object-detection-f865e5ed7fc4
# # Table of Contents
#
# ### 1. Installation Instructions
# ### 2. Use trained Model for Document Layout Analysis
# ### 3. How to train using PRImA Layout Analysis Dataset
# # Installation
#
# - Run these commands
#
# - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
#
# - cd Monk_Object_Detection/3_mxrcnn/installation
#
# - Select the right requirements file and run
#
# - cat requirements_cuda10.1.txt | xargs -n 1 -L 1 pip install
# ! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
# +
# For colab use the command below
# #! cd Monk_Object_Detection/3_mxrcnn/installation && cat requirements_colab.txt | xargs -n 1 -L 1 pip install
# For Local systems and cloud select the right CUDA version
# ! cd Monk_Object_Detection/3_mxrcnn/installation && cat requirements_cuda10.1.txt | xargs -n 1 -L 1 pip install
# -
# # Use Already Trained Model for Demo
import os
import sys
sys.path.append("Monk_Object_Detection/3_mxrcnn/lib/")
sys.path.append("Monk_Object_Detection/3_mxrcnn/lib/mx-rcnn")
from infer_base import *
# +
#Download trained model
# -
# ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1TZQSBiMDBrGhcT75AknTbofirSFXprt8' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1TZQSBiMDBrGhcT75AknTbofirSFXprt8" -O obj_dla_faster_rcnn_trained.zip && rm -rf /tmp/cookies.txt
# ! unzip -qq obj_dla_faster_rcnn_trained.zip
class_file = set_class_list("dla_fasterRCNN/classes.txt");
set_model_params(model_name="vgg16", model_path="dla_fasterRCNN/dla_fasterRCNN-vgg16.params");
set_hyper_params(gpus="0", batch_size=1);
set_img_preproc_params(img_short_side=300, img_long_side=500, mean=(196.45086004329943, 199.09071480252155, 197.07683846968297), std=(0.25779948968052024, 0.2550292865960972, 0.2553027154941914));
initialize_rpn_params();
initialize_rcnn_params();
sym = set_network();
mod = load_model(sym);
set_output_params(vis_thresh=0.9, vis=True)
Infer("Test_Images/test1.jpg", mod);
set_output_params(vis_thresh=0.7, vis=True)
Infer("Test_Images/test2.jpg", mod);
set_output_params(vis_thresh=0.5, vis=True)
Infer("Test_Images/test3.jpg", mod);
# # Train Your Own Model
# ## Dataset Credits
# - https://www.primaresearch.org/datasets/Layout_Analysis
# +
#Download Dataset
# -
# ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1iBfafT1WHAtKAW0a1ifLzvW5f0ytm2i_' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1iBfafT1WHAtKAW0a1ifLzvW5f0ytm2i_" -O PRImA_Layout_Analysis_Dataset.zip && rm -rf /tmp/cookies.txt
# ! unzip -qq PRImA_Layout_Analysis_Dataset.zip
# # Data Preprocessing
# ### Library for Data Augmentation
# Refer to https://github.com/albumentations-team/albumentations for more details
# ! pip install albumentations
import os
import sys
import cv2
import numpy as np
import pandas as pd
from PIL import Image
import albumentations as A
import glob
import matplotlib.pyplot as plt
import xmltodict
import json
from tqdm.notebook import tqdm
from pycocotools.coco import COCO
root_dir = "PRImA Layout Analysis Dataset/";
img_dir = "Images/";
anno_dir = "XML/";
final_root_dir="Document_Layout_Analysis/" #Directory for jpeg and augmented images
# +
if not os.path.exists(final_root_dir):
os.makedirs(final_root_dir)
if not os.path.exists(final_root_dir+img_dir):
os.makedirs(final_root_dir+img_dir)
# -
# ## TIFF Image Format to JPEG Image Format
for name in glob.glob(root_dir+img_dir+'*.tif'):
im = Image.open(name)
name = str(name).rstrip(".tif")
name = str(name).lstrip(root_dir)
name = str(name).lstrip(img_dir)
im.save(final_root_dir+ img_dir+ name + '.jpg', 'JPEG')
# # Format Conversion and Data Augmentation
# ## Given Format- VOC Format
#
# ### Dataset Directory Structure
#
# ./PRImA Layout Analysis Dataset/ (root_dir)
# |
# |-----------Images (img_dir)
# | |
# | |------------------img1.jpg
# | |------------------img2.jpg
# | |------------------.........(and so on)
# |
# |
# |-----------Annotations (anno_dir)
# | |
# | |------------------img1.xml
# | |------------------img2.xml
# | |------------------.........(and so on)
#
#
# ## Intermediatory Format- Monk Format
#
# ### Dataset Directory Structure
#
# ./Document_Layout_Analysis/ (final_root_dir)
# |
# |-----------Images (img_dir)
# | |
# | |------------------img1.jpg
# | |------------------img2.jpg
# | |------------------.........(and so on)
# |
# |
# |-----------train_labels.csv (anno_file)
#
#
# ### Annotation file format
#
# | Id | Labels |
# | img1.jpg | x1 y1 x2 y2 label1 x1 y1 x2 y2 label2 |
#
# - Labels: xmin ymin xmax ymax label
# - xmin, ymin - top left corner of bounding box
# - xmax, ymax - bottom right corner of bounding box
# ## Required Format- COCO Format
#
# ### Dataset Directory Structure
#
# ./ (root_dir)
# |
# |------Document_Layout_Analysis (coco_dir)
# | |
# | |---Images (img_dir)
# | |----|
# | |-------------------img1.jpg
# | |-------------------img2.jpg
# | |-------------------.........(and so on)
# |
# |
# | |---annotations (anno_dir)
# | |----|
# | |--------------------instances_Images.json
# | |--------------------classes.txt
#
#
# - instances_Train.json -> In proper COCO format
# - classes.txt -> A list of classes in alphabetical order
files = os.listdir(root_dir + anno_dir);
combined = [];
# ### Data Augmentation Function
def augmentData(fname, boxes):
image = cv2.imread(final_root_dir+img_dir+fname)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transform = A.Compose([
A.IAAPerspective(p=0.7),
A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=5, p=0.5),
A.IAAAdditiveGaussianNoise(),
A.ChannelShuffle(),
A.RandomBrightnessContrast(),
A.RGBShift(p=0.8),
A.HueSaturationValue(p=0.8)
], bbox_params=A.BboxParams(format='pascal_voc', min_visibility=0.2))
for i in range(1, 9):
label=""
transformed = transform(image=image, bboxes=boxes)
transformed_image = transformed['image']
transformed_bboxes = transformed['bboxes']
#print(transformed_bboxes)
flag=False
for box in transformed_bboxes:
x_min, y_min, x_max, y_max, class_name = box
if(xmax<=xmin or ymax<=ymin):
flag=True
break
label+= str(int(x_min))+' '+str(int(y_min))+' '+str(int(x_max))+' '+str(int(y_max))+' '+class_name+' '
if(flag):
continue
cv2.imwrite(final_root_dir+img_dir+str(i)+fname, transformed_image)
label=label[:-1]
combined.append([str(i) + fname, label])
# ## VOC to Monk Format Conversion
# Applying Data Augmentation only on those images which contain atleast 1 minority class so as to reduce bias in the dataset
#label generation for csv
for i in tqdm(range(len(files))):
box=[];
augment=False;
annoFile = root_dir + anno_dir + files[i];
f = open(annoFile, 'r');
my_xml = f.read();
anno= dict(dict(dict(xmltodict.parse(my_xml))['PcGts'])['Page'])
fname=""
for j in range(len(files[i])):
if((files[i][j])>='0' and files[i][j]<='9'):
fname+=files[i][j];
fname+=".jpg"
image = cv2.imread(final_root_dir+img_dir+fname)
height, width = image.shape[:2]
label_str = ""
for key in anno.keys():
if(key=='@imageFilename' or key=='@imageWidth' or key=='@imageHeight'):
continue
if(key=="TextRegion"):
if(type(anno["TextRegion"]) == list):
for j in range(len(anno["TextRegion"])):
text=anno["TextRegion"][j]
xmin=width
ymin=height
xmax=0
ymax=0
if(text["Coords"]):
if(text["Coords"]["Point"]):
for k in range(len(text["Coords"]["Point"])):
coordinates=anno["TextRegion"][j]["Coords"]["Point"][k]
xmin= min(xmin, int(coordinates['@x']));
ymin= min(ymin, int(coordinates['@y']));
xmax= min(max(xmax, int(coordinates['@x'])), width);
ymax= min(max(ymax, int(coordinates['@y'])), height);
if('@type' in text.keys()):
label_str+= str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+' '+text['@type']+' '
if(xmax<=xmin or ymax<=ymin):
continue
tbox=[];
tbox.append(xmin)
tbox.append(ymin)
tbox.append(xmax)
tbox.append(ymax)
tbox.append(text['@type'])
box.append(tbox)
else:
text=anno["TextRegion"]
xmin=width
ymin=height
xmax=0
ymax=0
if(text["Coords"]):
if(text["Coords"]["Point"]):
for k in range(len(text["Coords"]["Point"])):
coordinates=anno["TextRegion"]["Coords"]["Point"][k]
xmin= min(xmin, int(coordinates['@x']));
ymin= min(ymin, int(coordinates['@y']));
xmax= min(max(xmax, int(coordinates['@x'])), width);
ymax= min(max(ymax, int(coordinates['@y'])), height);
if('@type' in text.keys()):
label_str+= str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+' '+text['@type']+' '
if(xmax<=xmin or ymax<=ymin):
continue
tbox=[];
tbox.append(xmin)
tbox.append(ymin)
tbox.append(xmax)
tbox.append(ymax)
tbox.append(text['@type'])
box.append(tbox)
else:
val=""
if(key=='GraphicRegion'):
val="graphics"
augment=True
elif(key=='ImageRegion'):
val="image"
elif(key=='NoiseRegion'):
val="noise"
augment=True
elif(key=='ChartRegion'):
val="chart"
augment=True
elif(key=='TableRegion'):
val="table"
augment=True
elif(key=='SeparatorRegion'):
val="separator"
elif(key=='MathsRegion'):
val="maths"
augment=True
elif(key=='LineDrawingRegion'):
val="linedrawing"
augment=True
else:
val="frame"
augment=True
if(type(anno[key]) == list):
for j in range(len(anno[key])):
text=anno[key][j]
xmin=width
ymin=height
xmax=0
ymax=0
if(text["Coords"]):
if(text["Coords"]["Point"]):
for k in range(len(text["Coords"]["Point"])):
coordinates=anno[key][j]["Coords"]["Point"][k]
xmin= min(xmin, int(coordinates['@x']));
ymin= min(ymin, int(coordinates['@y']));
xmax= min(max(xmax, int(coordinates['@x'])), width);
ymax= min(max(ymax, int(coordinates['@y'])), height);
label_str+= str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+' '+ val +' '
if(xmax<=xmin or ymax<=ymin):
continue
tbox=[];
tbox.append(xmin)
tbox.append(ymin)
tbox.append(xmax)
tbox.append(ymax)
tbox.append(val)
box.append(tbox)
else:
text=anno[key]
xmin=width
ymin=height
xmax=0
ymax=0
if(text["Coords"]):
if(text["Coords"]["Point"]):
for k in range(len(text["Coords"]["Point"])):
coordinates=anno[key]["Coords"]["Point"][k]
xmin= min(xmin, int(coordinates['@x']));
ymin= min(ymin, int(coordinates['@y']));
xmax= min(max(xmax, int(coordinates['@x'])), width);
ymax= min(max(ymax, int(coordinates['@y'])), height);
label_str+= str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+' '+val+' '
if(xmax<=xmin or ymax<=ymin):
continue
tbox=[];
tbox.append(xmin)
tbox.append(ymin)
tbox.append(xmax)
tbox.append(ymax)
tbox.append(val)
box.append(tbox)
label_str=label_str[:-1]
combined.append([fname, label_str])
if(augment):
augmentData(fname, box)
df = pd.DataFrame(combined, columns = ['ID', 'Label']);
df.to_csv(final_root_dir + "/train_labels.csv", index=False);
# ## Monk to COCO Format
import os
import numpy as np
import cv2
import dicttoxml
import xml.etree.ElementTree as ET
from xml.dom.minidom import parseString
from tqdm import tqdm
import shutil
import json
import pandas as pd
root = "Document_Layout_Analysis";
img_dir = "Images/";
anno_file = "train_labels.csv";
dataset_path = root;
images_folder = root + "/" + img_dir;
annotations_path = root + "/annotations/";
# +
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = root + "/" + anno_file;
# +
output_dataset_path = root;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
# -
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
# +
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
# +
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
# -
# ### Function to get mean and standard deviation of dataset
def normalize():
channel_sum = np.zeros(3)
channel_sum_squared = np.zeros(3)
num_pixels=0
count=0
for file in files:
file_path=final_root_dir+img_dir+file
img=cv2.imread(file_path)
img= img/255.
num_pixels += (img.size/3)
channel_sum += np.sum(img, axis=(0, 1))
channel_sum_squared += np.sum(np.square(img), axis=(0, 1))
mean = channel_sum / num_pixels
std = np.sqrt((channel_sum_squared/num_pixels) - mean**2)
#bgr to rgb conversion
rgb_mean = list(mean)[::-1]
rgb_std = list(std)[::-1]
return rgb_mean, rgb_std
img_dir = "Images/";
final_root_dir="Document_Layout_Analysis/"
files = os.listdir(final_root_dir + img_dir);
mean, std = normalize()
mean=[x*255 for x in mean]
print(mean)
print(std)
# +
#[196.45086004329943, 199.09071480252155, 197.07683846968297]
#[0.25779948968052024, 0.2550292865960972, 0.2553027154941914]
# -
# # Training
import os
import sys
sys.path.append("Monk_Object_Detection/3_mxrcnn/lib/")
sys.path.append("Monk_Object_Detection/3_mxrcnn/lib/mx-rcnn")
from train_base import *
# Dataset params
root_dir = "./";
coco_dir = "Document_Layout_Analysis";
img_dir = "Images";
set_dataset_params(root_dir=root_dir, coco_dir=coco_dir, imageset=img_dir);
# ### Available models
# vgg16
# resnet50
# resnet101
# Model Type
set_model_params(model_name="vgg16");
#Hyperparameters
set_hyper_params(gpus="0", lr=0.003, lr_decay_epoch='20', epochs=30, batch_size=8);
set_output_params(log_interval=500, save_prefix="model_vgg16");
#Preprocessing image parameters(mean and std calculated in preprocessing notebook)
set_img_preproc_params(img_short_side=300, img_long_side=500, mean=(196.45086004329943, 199.09071480252155, 197.07683846968297), std=(0.25779948968052024, 0.2550292865960972, 0.2553027154941914));
#Initializing Parameters
initialize_rpn_params();
initialize_rcnn_params();
# +
#Removing cache if any
# -
if os.path.isdir("./cache/"):
os.system("rm -r ./cache/")
#loading dataset
roidb = set_dataset();
#loading model
sym = set_network();
train(sym, roidb);
# # Inference
import os
import sys
sys.path.append("Monk_Object_Detection/3_mxrcnn/lib/")
sys.path.append("Monk_Object_Detection/3_mxrcnn/lib/mx-rcnn")
from infer_base import *
class_file = set_class_list("./Document_Layout_Analysis/annotations/classes.txt");
#Model - Select the model as per number of iterations it has been trained for
set_model_params(model_name="vgg16", model_path="trained_model/model_vgg16-0030.params");
set_hyper_params(gpus="0", batch_size=1);
set_img_preproc_params(img_short_side=300, img_long_side=500, mean=(196.45086004329943, 199.09071480252155, 197.07683846968297), std=(0.25779948968052024, 0.2550292865960972, 0.2553027154941914));
initialize_rpn_params();
initialize_rcnn_params();
sym = set_network();
mod = load_model(sym);
set_output_params(vis_thresh=0.9, vis=True)
Infer("PRImA_Layout_Analysis_Dataset/Test_Images/test1.jpg", mod);
set_output_params(vis_thresh=0.7, vis=True)
Infer("PRImA_Layout_Analysis_Dataset/Test_Images/test2.jpg", mod);
set_output_params(vis_thresh=0.5, vis=True)
Infer("PRImA_Layout_Analysis_Dataset/Test_Images/test3.jpg", mod);
|
application_model_zoo/Example - Document Layout Analysis (FasterRCNN).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.feature_selection.variance_threshold import VarianceThreshold
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.cluster import KMeans
from pandas import read_excel
import warnings
warnings.filterwarnings('ignore')
import urllib2
# -
slo_df = read_excel('slo_dataset.xlsx')
slo_df.reset_index(drop=True, inplace=True)
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
stats_df = slo_df.select_dtypes(include=numerics)
# ### Removing identifiers
stats_df.drop([col for col in stats_df.columns if 'Id' in col], axis=1, inplace=True)
# ### Removing useless numeric columns
stats_df.drop(['season', 'champLevel'], axis=1, inplace=True)
# ### Normalizing the rest of the stats by time
# #### Transform game duration format into minutes
stats_df['gameDuration_in_minutes'] = stats_df.gameDuration / 60
# ### Exclude columns that aren't affected by time
stats_to_normalize = [col for col in stats_df.columns if '_at_' not in col and 'tt' not in col and 'gameDuration' not in col]
stats_normalized_df = stats_df[stats_to_normalize].apply(lambda x: x / stats_df.gameDuration_in_minutes)
not_time_affected_stats_df = stats_df[[col for col in stats_df.columns if '_at_' in col or 'tt' in col]]
# ### Clustering playstyles by position
positions = slo_df.position.unique().tolist()
positions
stats_by_position = {}
for i, p in enumerate(positions):
# Preprocessing
stats = stats_normalized_df[i::5]
nan_cols = stats.iloc[:, stats.isnull().any().tolist()].columns
stats.drop(nan_cols, axis=1, inplace=True)
labels = slo_df[i::5].win
# Clustering
km = KMeans(n_clusters=3)
clusters = km.fit_predict(X=stats)
stats['clusters'] = clusters
c0 = stats[stats.clusters == 0]
c1 = stats[stats.clusters == 1]
c2 = stats[stats.clusters == 2]
clusters = [c0, c1, c2]
stats_by_position[p] = {'X': stats, 'top_10_features_by_cluster': []}
for i, c in enumerate(clusters):
c_new = SelectKBest(chi2, k=10).fit(X=c, y=slo_df.ix[c.index].win)
c_new_cols = c.iloc[:, c_new.get_support()].columns.tolist()
stats_by_position[p]['top_10_features_by_cluster'].append(c_new_cols)
stats_by_position['SUPP']['X'].clusters.value_counts()
vt = VarianceThreshold(threshold=.5)
vt.fit(X=top_c_2)
top_stats.iloc[:, vt.get_support()].columns
top_stats.iloc[:, vt.get_support()].columns
top_stats.fillna(top_stats.mean())
|
feature_selection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["pdf-title"]
# # Image features exercise
# *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
#
# We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.
#
# All of your work for this exercise will be done in this notebook.
# + tags=["pdf-ignore"]
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# + [markdown] tags=["pdf-ignore"]
# ## Load data
# Similar to previous exercises, we will load CIFAR-10 data from disk.
# + tags=["pdf-ignore"]
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
# + [markdown] tags=["pdf-ignore"]
# ## Extract Features
# For each image we will compute a Histogram of Oriented
# Gradients (HOG) as well as a color histogram using the hue channel in HSV
# color space. We form our final feature vector for each image by concatenating
# the HOG and color histogram feature vectors.
#
# Roughly speaking, HOG should capture the texture of the image while ignoring
# color information, and the color histogram represents the color of the input
# image while ignoring texture. As a result, we expect that using both together
# ought to work better than using either alone. Verifying this assumption would
# be a good thing to try for your own interest.
#
# The `hog_feature` and `color_histogram_hsv` functions both operate on a single
# image and return a feature vector for that image. The extract_features
# function takes a set of images and a list of feature functions and evaluates
# each feature function on each image, storing the results in a matrix where
# each column is the concatenation of all feature vectors for a single image.
# + tags=["pdf-ignore"]
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
# -
# ## Train SVM on features
# Using the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.
# + tags=["code"]
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [5e4, 5e5, 5e6]
results = {}
best_val = -1
best_svm = None
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
for lr in learning_rates:
for reg in regularization_strengths:
model = LinearSVM()
loss_hist = model.train(X_train_feats,y_train,learning_rate = lr,reg = reg,num_iters = 1500)
at = np.mean(model.predict(X_train_feats) == y_train)
av = np.mean(model.predict(X_val_feats) == y_val)
results[(lr,reg)] = (at,av)
if av>best_val:
best_val = av
best_svm = model
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# -
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print(test_accuracy)
# +
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
# + [markdown] tags=["pdf-inline"]
# ### Inline question 1:
# Describe the misclassification results that you see. Do they make sense?
#
#
# $\color{blue}{\textit Your Answer:}$
#
#
#
# +
# yes, svm is trained on general color distribution
# -
# ## Neural Network on image features
# Earlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels.
#
# For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.
# + tags=["pdf-ignore"]
# Preprocessing: Remove the bias dimension
# Make sure to run this cell only ONCE
print(X_train_feats.shape)
X_train_feats = X_train_feats[:, :-1]
X_val_feats = X_val_feats[:, :-1]
X_test_feats = X_test_feats[:, :-1]
print(X_train_feats.shape)
# + tags=["code"]
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
# hidden_dim = 500
num_classes = 10
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
best_val = -1
learning_rates = [1e-1,2e-1,3e-1]
hs = [500,400,300,200]
lamda = [1e-7,1e-6]
for lr in learning_rates:
for hidden_dim in hs:
for reg in lamda:
model = TwoLayerNet(input_dim, hidden_dim, num_classes)
stat = model.train(X_train_feats, y_train, X_val_feats, y_val,num_iters=2500,learning_rate=lr
,reg=reg, verbose=False)
val_acc = (model.predict(X_val_feats) == y_val).mean()
if val_acc>best_val:
best_val = val_acc
best_net = model
stats = stat
best_lr = lr
best_reg = reg
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# -
best_val
(best_net.predict(X_train_feats) == y_train).mean()
# +
# Run your best neural net classifier on the test set. You should be able
# to get more than 55% accuracy.
test_acc = (best_net.predict(X_test_feats) == y_test).mean()
print(test_acc)
# +
# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Classification accuracy')
plt.legend()
plt.show()
|
assignment1/features.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
import sys
sys.path.append('../src')
from collections import Counter
import localmodule
import functools
from joblib import Memory, Parallel, delayed
from librosa.display import specshow
import math
import music21 as m21
import numpy as np
import os
import scipy
# + deletable=true editable=true
composer_str = "Haydn"
track_str = "op71n2-04"
# Define constants.
J_tm = 9
N = 2**10
n_octaves = 8
midi_octave_offset = 2
quantization = 2.0
xi = 0.4
sigma = 0.16
midis = []
# Parse Kern score with music21.
#data_dir = localmodule.get_data_dir()
data_dir = '/Users/vl238/nemisig2018/nemisig2018_data'
dataset_name = localmodule.get_dataset_name()
kern_name = "_".join([dataset_name, "kern"])
kern_dir = os.path.join(data_dir, kern_name)
composer_dir = os.path.join(kern_dir, composer_str)
track_name = track_str + ".krn"
track_path = os.path.join(composer_dir, track_name)
score = m21.converter.parse(track_path)
pianoroll_parts = []
n_parts = len(score.parts)
n_semitones = 12 * n_octaves
# Loop over parts to extract piano rolls.
for part_id in range(n_parts):
part = score.parts[part_id]
pianoroll_part = np.zeros((n_semitones, N))
# Get the measure offsets
measure_offset = {}
for el in part.recurse(classFilter=('Measure')):
measure_offset[el.measureNumber] = el.offset
# Loop over notes
for note in part.recurse(classFilter=('Note')):
note_start = int(math.ceil(
(measure_offset[note.measureNumber] +\
note.offset) *\
quantization))
note_end = int(math.ceil((
measure_offset[note.measureNumber] +\
note.offset +\
note.duration.quarterLength) *\
quantization))
pianoroll_part[
note.midi - midi_octave_offset * 12,
note_start:note_end] = 1
midis.append(note.midi)
pianoroll_parts.append(pianoroll_part)
# Stack parts into piano roll.
mtrack_pianoroll = np.stack(pianoroll_parts, 2)
pianoroll = mtrack_pianoroll.max(axis=2)
# Setup wavelet filter bank over time.
wavelet_filterbank_ft = np.zeros((1, N, 2*J_tm-1))
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
wavelet_filterbank_ft[0, :, -1 - 2*j] = psi_ft
wavelet_filterbank_ft[0, :, -1 - (2*j+1)] = conj_psi_ft
# Append scaling function phi (average).
wavelet_filterbank_ft[0, 0, 0] = 1
# Convolve pianoroll with filterbank.
pianoroll_ft = scipy.fftpack.fft(pianoroll, axis=1)
pianoroll_ft = np.expand_dims(pianoroll_ft, axis=2)
wavelet_transform_ft = pianoroll_ft * wavelet_filterbank_ft
wavelet_transform = scipy.fftpack.ifft(wavelet_transform_ft, axis=1)
# +
# Reshape MIDI axis to chromagram
chromagram = np.reshape(wavelet_transform,
(12, -1, wavelet_transform.shape[1], wavelet_transform.shape[2]), 'F')
# Construct eigentriads
cosine_basis = np.array([[np.cos(2*np.pi*omega*t/3) for omega in range(3)] for t in range(3)]).T
sine_basis = np.array([[np.sin(2*np.pi*omega*t/3) for omega in range(3)] for t in range(3)]).T
fourier_basis = cosine_basis + np.complex(0, 1) * sine_basis
major_template = [0, 4, 7]
minor_template = [0, 3, 7]
major_eigentriads = np.zeros((12, 3), dtype=np.complex)
minor_eigentriads = np.zeros((12, 3), dtype=np.complex)
for omega in range(3):
for t, p in enumerate(major_template):
major_eigentriads[p, omega] = fourier_basis[t, omega]
for t, p in enumerate(minor_template):
minor_eigentriads[p, omega] = fourier_basis[t, omega]
eigentriads = np.stack(
(major_eigentriads, minor_eigentriads), axis=1)
# Convolve chromagram with eigentriads
chromagram_ft = scipy.fftpack.fft(chromagram, axis=0)
chromagram_ft = chromagram_ft[:, np.newaxis, :, :, :, np.newaxis]
eigentriads_ft = scipy.fftpack.fft(eigentriads, axis=0)
eigentriads_ft = eigentriads_ft[:, :, np.newaxis,
np.newaxis, np.newaxis, :]
eigentriad_transform_ft = chromagram_ft * eigentriads_ft
eigentriad_transform = scipy.fftpack.fft(
eigentriad_transform_ft, axis=0)
# Apply modulus nonlinearity
eigentriad_transform_modulus = np.abs(eigentriad_transform)
# Convolve eigentriad transform with filterbank again.
# This is akin to a scattering transform.
# We remove the finest scale (last two coefficients).
eigentriad_transform_modulus_ft = scipy.fftpack.fft(eigentriad_transform_modulus, axis=3)
eigentriad_transform_modulus_ft = eigentriad_transform_modulus_ft[:, :, :, :, :, :, np.newaxis]
scattering_filterbank_ft = wavelet_filterbank_ft[:, np.newaxis, np.newaxis, :, np.newaxis, np.newaxis, :-2]
scattering_transform_ft = eigentriad_transform_modulus_ft * scattering_filterbank_ft
scattering_transform = scipy.fftpack.ifft(scattering_transform_ft, axis=3)
# REMOVE ME
scattering_transform = scattering_transform[:, :, :, :1, :, :, :]
print("SCATTERING TRANFORM RESTRICTED TO A SINGLE TIMESTAMP")
# -
scattering_transform.shape
# +
# Reshape chroma and quality into a chord axis
sc_shape = scattering_transform.shape
tonnetz_shape = (
sc_shape[0]*sc_shape[1], sc_shape[2],
sc_shape[3], sc_shape[4], sc_shape[5],
sc_shape[6])
tonnetz = np.reshape(scattering_transform,
tonnetz_shape, 'F')
# Build adjacency matrix for Tonnetz graph
# (1/3) Major to minor transitions.
major_edges = np.zeros((12,))
# Parallel minor (C major to C minor)
major_edges[0] = 1
# Relative minor (C major to A minor)
major_edges[9] = 1
# Leading tone minor (C major to E minor)
major_edges[4] = 1
# (2/3) Minor to major transitions
minor_edges = np.zeros((12,))
# Parallel major (C minor to C major)
minor_edges[0] = 1
# Relative major (C minor to Eb major)
minor_edges[3] = 1
# Leading tone major (C major to Ab minor)
minor_edges[8] = 1
# (2/3) Build full adjacency matrix by 4 blocks.
major_adjacency = scipy.linalg.toeplitz(major_edges, minor_edges)
minor_adjacency = scipy.linalg.toeplitz(minor_edges, major_edges)
tonnetz_adjacency = np.zeros((24,24))
tonnetz_adjacency[:12, 12:] = minor_adjacency
tonnetz_adjacency[12:, :12] = major_adjacency
# Define Laplacian on the Tonnetz graph.
tonnetz_laplacian = 3 * np.eye(24) - tonnetz_adjacency
# Compute eigenprogressions, i.e. eigenvectors of the Tonnetz Laplacian
eigvecs, eigvals = np.linalg.eig(tonnetz_laplacian)
# Diagonalize Laplacian.
eigvals, eigvecs = np.linalg.eig(tonnetz_laplacian)
sorting_indices = np.argsort(eigvals)
eigvals = eigvals[sorting_indices]
eigvecs = eigvecs[:, sorting_indices]
# Key invariance
phi = eigvecs[:, 0]
# Tonic invariance with quality covariance
psi_quality = eigvecs[:, 23]
# C -> C# -> D ... simultaneously with Cm -> C#m -> ...
# Major third periodicity.
psi_chromatic = eigvecs[:, 1] + 1j * eigvecs[:, 2]
# Major keys: pentatonic pattern (C D F G A) moving up a minor third.
# Major keys: minor seventh pattern (B D E A) moving down a minor third.
psi_pentatonic_up = eigvecs[:, 3] + 1j * eigvecs[:, 4]
# Cm -> B -> Bm -> Bb -> Am -> ...
# Minor third periodicity
psi_Cm_B_Bm_Bb = eigvecs[:, 5] + 1j * eigvecs[:, 6]
# C -> Am -> A -> Cm -> C ...
# Relative (R) followed by parallel (P).
# Major third periodicity
j = np.complex(np.cos(2*np.pi/3), np.sin(2*np.pi/3))
jbar = np.complex(np.cos(-2*np.pi/3), np.sin(-2*np.pi/3))
psi_RP = eigvecs[:, 7] + j * eigvecs[:, 8] + jbar * eigvecs[:, 9]
# C -> Bm -> Bb -> Am -> Ab -> ...
psi_C_Bm_Bb_Am = eigvecs[:, 10] + 1j * eigvecs[:, 11]
# Upwards minor third. Qualities in phase opposition.
psi_minorthird_quality = eigvecs[:, 12] + 1j * eigvecs[:, 13]
# Ab is simultaneous with Am.
# Abstract notion of "third" degree with quality invariance?
# Tritone periodicity
j = np.complex(np.cos(2*np.pi/3), np.sin(2*np.pi/3))
jbar = np.complex(np.cos(-2*np.pi/3), np.sin(-2*np.pi/3))
psi_third_tritone = eigvecs[:, 14] + j * eigvecs[:, 15] + jbar * eigvecs[:, 16]
# C -> C#m -> D -> D#m -> ...
# Minor third periodicity.
psi_C_Dbm_D_Ebm = eigvecs[:, 17] + 1j * eigvecs[:, 18]
# Major keys: pentatonic pattern (C D F G A) moving down a minor third.
# Major keys: minor seventh pattern (B D E A) moving up a minor third.
psi_pentatonic_down = eigvecs[:, 19] + 1j * eigvecs[:, 20]
# C is simultaneous with Dm.
# Abstract notion of minor key?
# Major third periodicity.
psi_minorkey = eigvecs[:, 21] + 1j * eigvecs[:, 22]
# Concatenate eigenprogressions.
eigenprogressions = np.stack((
phi,
psi_quality,
psi_chromatic,
psi_pentatonic_up,
psi_Cm_B_Bm_Bb,
psi_RP,
psi_C_Bm_Bb_Am,
psi_C_Bm_Bb_Am,
psi_minorthird_quality,
psi_third_tritone,
psi_C_Dbm_D_Ebm,
psi_pentatonic_down,
psi_minorkey), axis=-1)
eigenprogressions = np.reshape(
eigenprogressions, (12, 2, -1), 'F')
# Apply eigenprogression transform.
scattering_transform_ft = scipy.fftpack.fft(scattering_transform, axis=0)
scattering_transform_ft = scattering_transform_ft[:, :, :, :, :, :, :, np.newaxis]
eigenprogressions_ft = scipy.fftpack.fft(eigenprogressions, axis=0)
eigenprogressions_ft = eigenprogressions_ft[
:, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis]
eigenprogression_transform_ft = scattering_transform_ft * eigenprogressions_ft
eigenprogression_transform = scipy.fftpack.ifft(eigenprogression_transform_ft, axis=0)
# Setup wavelet filter bank across octaves.
# This is comparable to a spiral scattering transform.
J_oct = 3
octave_filterbank_ft = np.zeros((n_octaves, 2*J_oct-1))
for j in range(J_oct-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * n_octaves
den = 2 * sigma_j * sigma_j * n_octaves * n_octaves
psi_ft = localmodule.morlet(center, den, n_octaves, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
octave_filterbank_ft[:, -1 - 2*j] = psi_ft
octave_filterbank_ft[:, -1 - (2*j+1)] = conj_psi_ft
octave_filterbank_ft[0, 0] = 1
octave_filterbank_ft = octave_filterbank_ft[
np.newaxis, np.newaxis, :,
np.newaxis, np.newaxis,
np.newaxis, np.newaxis, np.newaxis]
# Apply octave transform.
eigenprogression_transform_ft = scipy.fftpack.fft(
eigenprogression_transform, axis=2)
eigenprogression_transform_ft = eigenprogression_transform_ft[
:, :, :, :, :, :, :, :, np.newaxis]
octave_transform_ft =\
eigenprogression_transform_ft * octave_filterbank_ft
octave_transform = scipy.fftpack.fft(
octave_transform_ft, axis=2)
# Apply second-order modulus nonlinearity.
U2 = np.abs(octave_transform)
# Average over time, chroma, and octave.
S2 = np.sum(U2, axis=(0, 2, 3))
# +
# OUTPUTS
# Qualities.
# Dim 2.
S0 = S2[:, 0, 0, 0, 0, 0]
# Wavelet transform modulus
# Dim 17*2 = 24.
S1 = S2[:, :, 0, 0, 0, 0]
# Major eigentriads.
# Dim 17*3 = 51.
S1_maj_eigentriads = S2[0, :, :, 0, 0, 0]
# Eigentriads.
# Dim 2*17*3 = 102.
S1_eigentriads = S2[:, :, :, 0, 0, 0]
# Scattering transform.
# Dim 2*17*3*15 = 1530.
S2_scattering = S2[:, :, :, :, 0, 0]
# Eigenprogressions.
# Dim 2*17*3*13 = 1326.
S2_eigenprogressions = S2[:, :, :, 0, :, 0]
# Scattered eigenprogressions.
# Dim 2*17*3*15*13 = 19890.
S2_scattered_eigenprogressions = S2[:, :, :, :, :, 0]
# Time-octave transform.
# Dim 2*17*3*15*5 = 7650.
S2_time_octave_transform = S2[:, :, :, :, 0, :]
# Spiral eigenprogression transform!
# Dim 2*17*3*15*13*5 = 99450.
S2_spiral_eigenprogression_transform = S2
# -
2*17*3*15*13*5
# + deletable=true editable=true
print(eigentriads_ft.shape)
print(chromagram_ft.shape)
print(eigenprogression_transform_ft.shape)
print(scattering_transform.shape)
# -
octave_transform_ft.shape
# +
from matplotlib import pyplot as plt
plt.plot(np.squeeze(octave_filterbank_ft))
# -
psiplot = eigvecs[:, 21] + 1j * eigvecs[:, 22]
plt.figure(figsize=(10, 3))
plt.plot(np.stack((np.real(psiplot[:12]), np.imag(psiplot[:12]))).T, "o-");
plt.figure(figsize=(10, 3))
plt.plot(np.stack((np.real(psiplot[12:]), np.imag(psiplot[12:]))).T, "o-");
# + deletable=true editable=true
plt.figure()
specshow(pianoroll[24:60, :512])
for j in range(3, 8):
plt.figure()
specshow(np.real(Y[24:60, :512, j]))
# + deletable=true editable=true
# %matplotlib inline
from matplotlib import pyplot as plt
plt.imshow(-tonnetz_laplacian)
# + deletable=true editable=true
eigenarpeggios_ft.shape
# + deletable=true editable=true
x = [2, 3, 4]
x[-3]
# + deletable=true editable=true
minor_template
# + deletable=true editable=true
|
notebooks/.ipynb_checkpoints/fig1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
from nibabel.testing import data_path
import nibabel as nib
img = nib.load("/Users/pikachu/Desktop/NDD/temp/sub-NDARBN100LCD/func/sub-NDARBN100LCD_task-rest_bold.nii.gz")
img_data = img.get_fdata()
img_data.shape
# viz slices in the transversal plane
transversal = np.transpose(img_data, [1, 0, 2, 3])
transversal = np.rot90(transversal, 2)
transversal.shape
# viz slices in the sagittal plane
sagittal = np.transpose(img_data, [1, 2, 0, 3])
sagittal = np.rot90(sagittal, 1)
sagittal.shape
# viz slices in the coronal plane
coronal = np.transpose(img_data, [0, 2, 1, 3])
coronal = np.rot90(coronal, 1)
coronal.shape
# %pylab inline
import matplotlib.pyplot as plt
# data = np.rot90(img_data)
# imgplot = plt.imshow(data[:, :, 128])
# plt.show()
fig, ax = plt.subplots(1, 6, figsize=[22, 3.5])
fig.text(0.095, 0.5, "Transversal (t=200)", va='center', rotation='vertical', color="r", size=11)
for i, slice_no in enumerate(range(6, 54, 9)):
ax[i].imshow(transversal[:, :, slice_no, 200])
# ax[i].axis('off')
ax[i].set_xlabel('x', size = 12)
ax[i].set_ylabel('z', size = 12)
ax[i].set_title('Slice number: {}'.format(slice_no), color='r')
fig.savefig('plots/transversal.png')
fig, ax = plt.subplots(1, 6, figsize=[25, 3])
fig.text(0.095, 0.5, "Sagittal (t=100)", va='center', rotation='vertical', color="r", size = 12)
for i, slice_no in enumerate(range(6, 78, 13)):
ax[i].imshow(sagittal[:, :, slice_no, 100])
# ax[i].axis('off')
ax[i].set_xlabel('z', size = 12)
ax[i].set_ylabel('y', size = 12)
ax[i].set_title('Slice number: {}'.format(slice_no), color='r')
fig.savefig('plots/sagittal.png')
fig, ax = plt.subplots(1, 6, figsize=[25, 3])
fig.text(0.095, 0.5, "Coronal (t=300)", va='center', rotation='vertical', color="r", size = 12)
for i, slice_no in enumerate(range(6, 78, 13)):
ax[i].imshow(coronal[:, :, slice_no, 300])
# ax[i].axis('off')
ax[i].set_xlabel('x', size = 12)
ax[i].set_ylabel('y', size = 12)
ax[i].set_title('Slice number: {}'.format(slice_no), color='r')
fig.savefig('plots/coronal.png')
# +
fig, ax = plt.subplots(1, 1, figsize=[18, 4])
# the timecourse of a random voxel
ax.plot(transversal[30, 65, 33, :], lw=3)
ax.set_xlim([0, transversal.shape[3]-1])
ax.set_xlabel('Time [secs]', fontsize=16)
ax.set_ylabel('Signal strength', fontsize=16)
ax.set_title('Voxel time course (at transversal(30, 65, 33)) ', fontsize=16)
ax.tick_params(labelsize=12)
fig.savefig('plots/voxel.png')
plt.show()
# +
edge_list_file = open("/Users/pikachu/Desktop/NDD/temp/sub-NDARAA075AMK_task-rest_bold_JHU_res-2x2x2_measure-correlation.edgelist", "r")
adj_matrix = [line.split() for line in edge_list_file.readlines()]
# print(adj_matrix)
# -
import networkx as nx
# +
G = nx.Graph()
node_list = sorted(list(set([int(i) for i, j, w in adj_matrix])))
all_weights = sorted(list(set([float(w) for i, j, w in adj_matrix])))
import statistics
mean_weight = statistics.mean(all_weights) * 1.99
labels = dict()
for node_name in node_list:
labels[node_name] = str(node_name)
for node in node_list:
G.add_node(node)
pos=nx.fruchterman_reingold_layout(G)
nx.draw_networkx_nodes(G,pos,node_color='green',node_size=70)
nx.draw_networkx_labels(G,pos,labels,font_size=6)
for i, j, w in adj_matrix:
G.add_edge(int(i), int(j), weight=float(w))
# for weight in all_weights:
# weighted_edges = [(node1,node2) for (node1,node2,edge_attr) in G.edges(data=True) if edge_attr['weight']==weight]
# width = weight*len(node_list)*3.0/sum(all_weights)
# nx.draw_networkx_edges(G,pos,edgelist=weighted_edges,width=width)
large_edges = [(node1,node2) for (node1,node2,edge_attr) in G.edges(data=True) if edge_attr['weight']>=mean_weight]
nx.draw_networkx_edges(G,pos,edgelist=large_edges,width=width, edge_color='red')
# small_edges = [(node1,node2) for (node1,node2,edge_attr) in G.edges(data=True) if edge_attr['weight']<mean_weight]
# nx.draw_networkx_edges(G,pos,edgelist=small_edges,width=width, edge_color='yellow', style='dashed')
plt.figure(1,figsize=(1024,1024))
plt.axis('off')
plt.savefig("Graph.png", format="PNG", dpi=300)
plt.show()
# -
|
week2 slides/assignment1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.5 64-bit (''JUST'': conda)'
# name: python375jvsc74a57bd0b01975ddddaf4db0ddd9e77ba558bc384051aec2d64c8688cf2fb07acba12100
# ---
# +
#### ALL NOTEBOOK SHOULD HAVE SOME VERSION OF THIS #####################################
########################################################################################
# %load_ext autoreload
# %autoreload 2
import os
import sys
currentdir = os.getcwd()
# go to root directory. change the # of os.path.dirnames based on where currentdir is
parentdir = os.path.dirname(os.path.dirname(currentdir))
# chek where I'm at. if I go too far up the tree, go back
if 'Protein-Purification-Model-Public' not in parentdir: parentdir = currentdir
if parentdir not in sys.path: sys.path.insert(0,parentdir)
########################################################################################
# -
import utils
import visualization.simple_data_vis as vis
import surrogate_models.dab_nn_defs as engine
import kerastuner as kt
import matplotlib.pyplot as plt
import tensorflow as tf
# +
# load data from just-private/data
filename = 'mol_res_scan_results_7.csv'
data = utils.load_data(parentdir, filename)
# since currently data is just one big dataframe, select model inputs as X and purity, yield as Y
x = [*data.columns[:2],*data.columns[4:]]
y = data.columns[2:4]
# -
CV = 5
data2split, validation = utils.chroma_train_test_split(data, test_size=0.20)
trains, tests = utils.data_pipeline([data2split,], x, y, cv = CV)
# + tags=[]
models = []
for i in range(CV):
dlr = engine.create_deterministic_linear_regressor(
FEATURE_NAMES = x,
TARGET_NAMES = y,
name = 'DLR_'+str(i)+'_'+filename[:-4]
)
pnn = engine.create_probabilistic_nn(
FEATURE_NAMES = x,
TARGET_NAMES = y,
hidden_units = [16,8,4,],
name = 'PNN_'+str(i)+'_'+filename[:-4],
)
models.append([dlr, pnn])
# -
dlr.summary()
# +
# train all the models under the same conditions
learning_rate = 0.01
epochs = 100
optimizer = 'Adam' # change manually or come up with dictionary?
losses = ['mean_squared_error', engine.negative_loglikelihood]*2
loss_weights = (1/trains[0][0][1].mean().div(trains[0][0][1].mean().max())).round(2).to_dict()
histories = {}
for i in range(CV):
print('CV round '+str(i))
for m,l in zip(models[i], losses):
histories[utils.get_model_name(m,filename)] = engine.run_experiment(
model = m,
loss = {y[0]:l,y[1]:l},
loss_weights = loss_weights,
optimizer = tf.keras.optimizers.Adam,
learning_rate = learning_rate,
num_epochs = epochs,
train_dataset = trains[0][i],
test_dataset = tests[0][i],
verbose = 0,
log = 0
)
settings = {'learning_rate' : learning_rate,
'epochs' : epochs,
'optimizer': optimizer,
'loss_weights': loss_weights,
'dataset' : filename}
|
notebooks/development_notebooks/cv_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (reco_pyspark)
# language: python
# name: reco_pyspark
# ---
# <i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
#
# <i>Licensed under the MIT License.</i>
# # Running ALS on MovieLens (PySpark)
#
# Matrix factorization by [ALS](https://spark.apache.org/docs/latest/api/python/_modules/pyspark/ml/recommendation.html#ALS) (Alternating Least Squares) is a well known collaborative filtering algorithm.
#
# This notebook provides an example of how to utilize and evaluate ALS PySpark ML (DataFrame-based API) implementation, meant for large-scale distributed datasets. We use a smaller dataset in this example to run ALS efficiently on multiple cores of a [Data Science Virtual Machine](https://azure.microsoft.com/en-gb/services/virtual-machines/data-science-virtual-machines/).
# **Note**: This notebook requires a PySpark environment to run properly. Please follow the steps in [SETUP.md](../../SETUP.md) to install the PySpark environment.
# +
# set the environment path to find Recommenders
import sys
import pyspark
from pyspark.ml.recommendation import ALS
import pyspark.sql.functions as F
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField
from pyspark.sql.types import StringType, FloatType, IntegerType, LongType
from recommenders.utils.timer import Timer
from recommenders.datasets import movielens
from recommenders.utils.notebook_utils import is_jupyter
from recommenders.datasets.spark_splitters import spark_random_split
from recommenders.evaluation.spark_evaluation import SparkRatingEvaluation, SparkRankingEvaluation
from recommenders.utils.spark_utils import start_or_get_spark
print("System version: {}".format(sys.version))
print("Spark version: {}".format(pyspark.__version__))
# -
# Set the default parameters.
# + tags=["parameters"]
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# -
# ### 0. Set up Spark context
#
# The following settings work well for debugging locally on VM - change when running on a cluster. We set up a giant single executor with many threads and specify memory cap.
# the following settings work well for debugging locally on VM - change when running on a cluster
# set up a giant single executor with many threads and specify memory cap
spark = start_or_get_spark("ALS PySpark", memory="16g")
# ### 1. Download the MovieLens dataset
# +
# Note: The DataFrame-based API for ALS currently only supports integers for user and item ids.
schema = StructType(
(
StructField("UserId", IntegerType()),
StructField("MovieId", IntegerType()),
StructField("Rating", FloatType()),
StructField("Timestamp", LongType()),
)
)
data = movielens.load_spark_df(spark, size=MOVIELENS_DATA_SIZE, schema=schema)
data.show()
# -
# ### 2. Split the data using the Spark random splitter provided in utilities
train, test = spark_random_split(data, ratio=0.75, seed=123)
print ("N train", train.cache().count())
print ("N test", test.cache().count())
# ### 3. Train the ALS model on the training data, and get the top-k recommendations for our testing data
#
# To predict movie ratings, we use the rating data in the training set as users' explicit feedback. The hyperparameters used in building the model are referenced from [here](http://mymedialite.net/examples/datasets.html). We do not constrain the latent factors (`nonnegative = False`) in order to allow for both positive and negative preferences towards movies.
# Timing will vary depending on the machine being used to train.
# +
header = {
"userCol": "UserId",
"itemCol": "MovieId",
"ratingCol": "Rating",
}
als = ALS(
rank=10,
maxIter=15,
implicitPrefs=False,
regParam=0.05,
coldStartStrategy='drop',
nonnegative=False,
seed=42,
**header
)
# +
with Timer() as train_time:
model = als.fit(train)
print("Took {} seconds for training.".format(train_time.interval))
# -
# In the movie recommendation use case, recommending movies that have been rated by the users do not make sense. Therefore, the rated movies are removed from the recommended items.
#
# In order to achieve this, we recommend all movies to all users, and then remove the user-movie pairs that exist in the training dataset.
# +
with Timer() as test_time:
# Get the cross join of all user-item pairs and score them.
users = train.select('UserId').distinct()
items = train.select('MovieId').distinct()
user_item = users.crossJoin(items)
dfs_pred = model.transform(user_item)
# Remove seen items.
dfs_pred_exclude_train = dfs_pred.alias("pred").join(
train.alias("train"),
(dfs_pred['UserId'] == train['UserId']) & (dfs_pred['MovieId'] == train['MovieId']),
how='outer'
)
top_all = dfs_pred_exclude_train.filter(dfs_pred_exclude_train["train.Rating"].isNull()) \
.select('pred.' + 'UserId', 'pred.' + 'MovieId', 'pred.' + "prediction")
# In Spark, transformations are lazy evaluation
# Use an action to force execute and measure the test time
top_all.cache().count()
print("Took {} seconds for prediction.".format(test_time.interval))
# -
top_all.show()
# ### 4. Evaluate how well ALS performs
rank_eval = SparkRankingEvaluation(test, top_all, k = TOP_K, col_user="UserId", col_item="MovieId",
col_rating="Rating", col_prediction="prediction",
relevancy_method="top_k")
print("Model:\tALS",
"Top K:\t%d" % rank_eval.k,
"MAP:\t%f" % rank_eval.map_at_k(),
"NDCG:\t%f" % rank_eval.ndcg_at_k(),
"Precision@K:\t%f" % rank_eval.precision_at_k(),
"Recall@K:\t%f" % rank_eval.recall_at_k(), sep='\n')
# ### 5. Evaluate rating prediction
# Generate predicted ratings.
prediction = model.transform(test)
prediction.cache().show()
# +
rating_eval = SparkRatingEvaluation(test, prediction, col_user="UserId", col_item="MovieId",
col_rating="Rating", col_prediction="prediction")
print("Model:\tALS rating prediction",
"RMSE:\t%f" % rating_eval.rmse(),
"MAE:\t%f" % rating_eval.mae(),
"Explained variance:\t%f" % rating_eval.exp_var(),
"R squared:\t%f" % rating_eval.rsquared(), sep='\n')
# -
if is_jupyter():
# Record results with papermill for tests
import papermill as pm
import scrapbook as sb
sb.glue("map", rank_eval.map_at_k())
sb.glue("ndcg", rank_eval.ndcg_at_k())
sb.glue("precision", rank_eval.precision_at_k())
sb.glue("recall", rank_eval.recall_at_k())
sb.glue("rmse", rating_eval.rmse())
sb.glue("mae", rating_eval.mae())
sb.glue("exp_var", rating_eval.exp_var())
sb.glue("rsquared", rating_eval.rsquared())
sb.glue("train_time", train_time.interval)
sb.glue("test_time", test_time.interval)
# cleanup spark instance
spark.stop()
|
examples/00_quick_start/als_movielens.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Watch Me Code 1: Requests
#
import requests
url = 'http://www.syr.edu'
response = requests.get(url)
if response.ok:
print(response.text)
url = 'http://www.syr.edu/mikeiscool'
response = requests.get(url)
if response.ok:
print(response.text)
else:
print("Error:", response.status_code)
#What is my IP Address? (This service tells you that)
url2= 'http://httpbin.org/ip'
response = requests.get(url2)
if response.ok:
ip = response.json()
print(ip)
# accessing the response dictionary
url3= 'http://httpbin.org/get'
response = requests.get(url3)
data = response.json()
data
data['headers']['User-Agent']
# +
# query string these become arguments
parameters = { 'name' : 'mike', 'age' : 45 }
url4 = 'https://httpbin.org/get'
response = requests.get(url4, params = parameters)
print (response.url)
if response.ok:
data = response.json()
data
# -
|
lessons/10-http/WMC1-Requests.ipynb
|