code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split,KFold,RandomizedSearchCV
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
#import ER_multiclass as ER
#from sklearn.linear_model import LogisticRegression
#from sklearn.naive_bayes import GaussianNB
#from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
# %matplotlib inline
# -
np.random.seed(1)
X = np.loadtxt('../diabetes_X.txt')
y = np.loadtxt('../diabetes_y.txt')
def inference(X_train,y_train,X_test,y_test):
## Optimize hyper parameters by RandomizedSearchCV:
model = RandomForestClassifier(random_state = 1)
#model
# Number of trees in random forest:
n_estimators = [int(x) for x in np.linspace(start = 10, stop = 100, num = 10)]
# Number of features to consider at every split:
max_features = ['auto']
# Maximum number of levels in tree:
max_depth = [int(x) for x in np.linspace(1, 10, num = 10)]
# Minimum number of samples required to split a node:
min_samples_split = [5, 10, 15, 20]
# Minimum number of samples required at each leaf node:
min_samples_leaf = [int(x) for x in np.linspace(start = 1, stop = 5, num = 5)]
# Method of selecting samples for training each tree:
#bootstrap = [True, False]
bootstrap = [False]
# Create the random grid:
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
random_search = RandomizedSearchCV(estimator = model, param_distributions = random_grid, n_iter = 100,
cv = 3, verbose=2, random_state=1, n_jobs = -1)
random_search.fit(X_train, y_train)
# best hyper parameters:
print(random_search.best_params_)
y_pred = random_search.best_estimator_.predict(X_test)
accuracy = accuracy_score(y_test,y_pred)
return accuracy
def compare_inference(X,y,train_size):
npred = 10
accuracy = np.zeros((len(list_methods),npred))
precision = np.zeros((len(list_methods),npred))
recall = np.zeros((len(list_methods),npred))
accuracy_train = np.zeros((len(list_methods),npred))
for ipred in range(npred):
X_train0,X_test,y_train0,y_test = train_test_split(X,y,test_size=0.2,random_state = ipred)
idx_train = np.random.choice(len(y_train0),size=int(train_size*len(y)),replace=False)
X_train,y_train = X_train0[idx_train],y_train0[idx_train]
for i,method in enumerate(list_methods):
accuracy[i,ipred] = inference(X_train,y_train,X_test,y_test)
return accuracy.mean(axis=1),accuracy.std(axis=1)
list_train_size = [0.8,0.6,0.4,0.2]
#list_methods=['logistic_regression','naive_bayes','random_forest','expectation_reflection']
#list_train_size = [0.8,0.2]
list_methods=['random_forest']
acc = np.zeros((len(list_train_size),len(list_methods)))
acc_std = np.zeros((len(list_train_size),len(list_methods)))
for i,train_size in enumerate(list_train_size):
acc[i,:],acc_std[i,:] = compare_inference(X,y,train_size)
print(train_size,acc[i,:])
acc
acc_std
|
diabetes_RF.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Image classification by deep learning
#
# #### First we load a image dataset
import graphlab
graphlab.canvas.set_target('ipynb')
image_train=graphlab.SFrame('image_train_data')
image_test=graphlab.SFrame('image_test_data')
# ## Exploring the Image Data
image_train['image'].show()
# ## Method 1: Training the clasifier using the raw image pixels
raw_pixel_model=graphlab.logistic_classifier.create(image_train,target='label',
features=['image_array'])
# # Make a prediction with the above simple raw pixel model
# First 3 images
image_test[0:3]['image'].show()
image_test[0:3]['label']
# Testing our predictions on test data
raw_pixel_model.predict(image_test[0:3])
# # Evaluating raw pixel model on whole test data
raw_pixel_model.evaluate(image_test)
# # Improving the accuracy of our predictions using Deep Features (Deep learning + Transfer Learning)
len(image_train)
# +
#deep_learning_model=graphlab.load_model('http://s3.amazonaws.com/GraphLab-Datasets/deeplearning/imagenet_model_iter45')
#image_train['deep_features'] = deep_learning_model.extract_features(image_train)
# -
# # Given the deep features, let's train a classifier!
image_train.head()
deep_features_model = graphlab.logistic_classifier.create(image_train,
features=['deep_features'],
target='label')
# # Apply the deep features model to first few images of test set
image_test[0:3]['image'].show()
deep_features_model.predict(image_test[0:3])
# # Compute test_data accuracy of deep_features_model
#
# As we can see, deep features provide us with significantly better accuracy (about 78%)
deep_features_model.evaluate(image_test)
|
Image Classification using Deep Features.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/awchisholm/AZ-900T0x-MicrosoftAzureFundamentals/blob/master/Task2Code.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="bd2jgxfzVIVF"
#code for investmant calculator
#T-Level Sample Assessmnet Material (Task 2)
#Non working code
def opening ():
print('#####################################')
print('Welcome to the investment quote system')
print('')
print('Please enter your name')
opening.name = input()
print('Please enter your Address')
opening.address = input()
print ('Please eneter your telephone number')
opening.phone = input()
print ('How much would you like to invest per month (£)?')
opening.investSum = input()
def options ():
print('#####################################')
print('There are two types of investment available:')
print('Option 1 - Savings plan')
print('Option 2 - Managed stock investment')
print('Please select an option (press 1 or 2 followed by enter)')
print('#####################################')
option = input()
while option != '1' and option != '2':
print('Please select an option (press 1 or 2 followed by enter)')
option = input()
if option == '1':
savingsMain()
else:
stockMain()
def savingsMain():
savingsMain.monthlyInvest = opening.investSum
savingsMain.yearlyInvest = savingsMain.monthlyInvest * 12
while savingsMain.yearlyInvest > 20000:
print('The the initial monthly amount is too high for this type of plan' )
print ('How much would you like to invest per month (£)?')
savingsMain.monthlyInvest = float(input())
savingsMain.yearlyInvest = savingsMain.monthlyInvest * 12
savingsPrint()
def savingsMin():
predictReturns = 0.012
yearlyFees = 0.0025 * 12
savingsMin.total = savingsMain.yearlyInvest
print('#####################################')
print('Forecasted perfromance of this plan at the lowest rate of return:')
for i in range(10):
savingsMin.total = (savingsMin.total + (savingsMin.total * predictReturns)) - yearlyFees
if i == 1 or i == 5 or i == 10:
print('At the end of year', str(i))
print('Your investment will be worth:')
print('£', savingsMin.total)
print('')
print('Total fees paid in this period: £', yearlyFees * (i+1))
print('')
print('Total profit in this period: £', savingsMin.total - (yearlyFees * (i+1)))
print('')
print('#####################################')
print('')
def savingsMax():
predictReturns = 0.024
yearlyFees = 0.0025 * 12
savingsMax.total = savingsMain.yearlyInvest
print('#####################################')
print('Forecasted perfromance of this plan at the highest rate of return::')
for i in range(10):
savingsMax.total = (savingsMax.total + (savingsMax.total * predictReturns)) - yearlyFees
if i == 0 or i == 4 or i == 9:
print('At the end of year', str(i+1))
print('Your investment will be worth:')
print('£', savingsMax.total,2)
print('')
print('Total fees paid in this period: £', yearlyFees * (i+1))
print('')
print('Total profit in this period: £', savingsMax.total - (yearlyFees * (i+1)))
print('')
print('#####################################')
print('')
def stocksMain():
stocksMain.monthlyInvest = opening.investSum
stocksMain.yearlyInvest = stocksMain.monthlyInvest * 12
stocksPrint()
def stocksMin():
predictReturns = 0.04
yearlyFees = 0.13 * 12
stocksMin.total = stocksMain.yearlyInvest
print('#####################################')
print('Forecasted perfromance of this plan at the lowest rate of return:')
for i in range(10):
stocksMin.total = (stocksMin.total + (stocksMin.total * predictReturns)) - yearlyFees
if stocksMin.total >= 40000:
taxRate = 0.2
elif stocksMin.total >= 12000:
taxRate = 0.1
else:
taxRate = 0
taxPayable = stocksMin.total * taxRate
postTax = stocksMin.total - taxPayable
if i == 0 or i == 4 or i == 9:
print('At the end of year', str(i+1))
print('Your investment will be worth:')
print('£', postTax)
print('')
print('Total fees paid in this period: £', yearlyFees * (i+1) )
print('')
print('Total profit in this period: £', postTax - (yearlyFees * (i+1)))
print('')
print('Total tax due in this period: £', taxPayable)
print('')
print('#####################################')
print('')
def stocksMax():
predictReturns = 0.23
yearlyFees = 0.13 * 12
stocksMax.total = stocksMain.yearlyInvest
print('#####################################')
print('Forecasted perfromance of this plan at the higher rate of return:')
for i in range(10):
stocksMax.total = (stocksMax.total + (stocksMax.total * predictReturns)) - yearlyFees
if stocksMax.total >= 40000:
taxRate = 0.2
elif stocksMax.total >= 12000:
taxRate = 0.1
else:
taxRate = 0
taxPayable = stocksMin.total * taxRate
postTax = stocksMin.total - taxPayable
if i == 0 or i == 4 or i == 9:
print('At the end of year', str(i+1))
print('Your investment will be worth:')
print('£', postTax)
print('')
print('Total fees paid in this period: £', yearlyFees * (i+1))
print('')
print('Total profit in this period: £', postTax - (yearlyFees * (i+1)))
print('')
print('Total tax due in this period: £', taxPayable,2)
print('')
print('#####################################')
print('')
def savingsPrint ():
print('--------------------------------------------------------')
print('Personal Investment Quote for:')
print('Name: ', opening.name)
print('')
print('Telephone Number: ', opening.phone)
print('--------------------------------------------------------')
print('')
print('You selected a savings plan')
savingsMin()
savingsMax()
def stocksPrint ():
print('--------------------------------------------------------')
print('Personal Investment Quote for:')
print('Name: ', opening.name)
print('')
print('Telephone Number: ', opening.phone)
print('--------------------------------------------------------')
print('')
print('You chose a Managed Stock Investment plan')
stocksMin()
stocksMax()
# + id="DOjzzDphWI-f"
opening()
options()
|
Task2Code.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="K9X_RKB2CeXy" outputId="9475f7e7-bb9b-43c0-8d88-095d4607fd20"
# !nvidia-smi
# + colab={"base_uri": "https://localhost:8080/"} id="1txsp8NFCnTV" outputId="0af289cb-d991-45e7-82e7-965e6bec1388"
# !pip install git+https://github.com/alexus37/MasterThesisCode.git#egg=deepexplain
# + colab={"base_uri": "https://localhost:8080/"} id="OjLIk06MHmeV" outputId="d9c03f36-f950-4a5e-a948-73ec57ad56d0"
# !pip -q install mne
# + id="PsG2dqEmFlyV"
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Activation, Permute, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import SeparableConv2D, DepthwiseConv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import SpatialDropout2D
from tensorflow.keras.regularizers import l1_l2
from tensorflow.keras.layers import Input, Flatten
from tensorflow.keras.constraints import max_norm
def EEGNet(nb_classes, Chans = 64, Samples = 128,
dropoutRate = 0.5, kernLength = 64, F1 = 8,
D = 2, F2 = 16, norm_rate = 0.25, dropoutType = 'Dropout'):
""" Keras Implementation of EEGNet
http://iopscience.iop.org/article/10.1088/1741-2552/aace8c/meta
Note that this implements the newest version of EEGNet and NOT the earlier
version (version v1 and v2 on arxiv). We strongly recommend using this
architecture as it performs much better and has nicer properties than
our earlier version. For example:
1. Depthwise Convolutions to learn spatial filters within a
temporal convolution. The use of the depth_multiplier option maps
exactly to the number of spatial filters learned within a temporal
filter. This matches the setup of algorithms like FBCSP which learn
spatial filters within each filter in a filter-bank. This also limits
the number of free parameters to fit when compared to a fully-connected
convolution.
2. Separable Convolutions to learn how to optimally combine spatial
filters across temporal bands. Separable Convolutions are Depthwise
Convolutions followed by (1x1) Pointwise Convolutions.
While the original paper used Dropout, we found that SpatialDropout2D
sometimes produced slightly better results for classification of ERP
signals. However, SpatialDropout2D significantly reduced performance
on the Oscillatory dataset (SMR, BCI-IV Dataset 2A). We recommend using
the default Dropout in most cases.
Assumes the input signal is sampled at 128Hz. If you want to use this model
for any other sampling rate you will need to modify the lengths of temporal
kernels and average pooling size in blocks 1 and 2 as needed (double the
kernel lengths for double the sampling rate, etc). Note that we haven't
tested the model performance with this rule so this may not work well.
The model with default parameters gives the EEGNet-8,2 model as discussed
in the paper. This model should do pretty well in general, although it is
advised to do some model searching to get optimal performance on your
particular dataset.
We set F2 = F1 * D (number of input filters = number of output filters) for
the SeparableConv2D layer. We haven't extensively tested other values of this
parameter (say, F2 < F1 * D for compressed learning, and F2 > F1 * D for
overcomplete). We believe the main parameters to focus on are F1 and D.
Inputs:
nb_classes : int, number of classes to classify
Chans, Samples : number of channels and time points in the EEG data
dropoutRate : dropout fraction
kernLength : length of temporal convolution in first layer. We found
that setting this to be half the sampling rate worked
well in practice. For the SMR dataset in particular
since the data was high-passed at 4Hz we used a kernel
length of 32.
F1, F2 : number of temporal filters (F1) and number of pointwise
filters (F2) to learn. Default: F1 = 8, F2 = F1 * D.
D : number of spatial filters to learn within each temporal
convolution. Default: D = 2
dropoutType : Either SpatialDropout2D or Dropout, passed as a string.
"""
if dropoutType == 'SpatialDropout2D':
dropoutType = SpatialDropout2D
elif dropoutType == 'Dropout':
dropoutType = Dropout
else:
raise ValueError('dropoutType must be one of SpatialDropout2D '
'or Dropout, passed as a string.')
input1 = Input(shape = (Chans, Samples, 1))
##################################################################
block1 = Conv2D(F1, (1, kernLength), padding = 'same',
input_shape = (Chans, Samples, 1),
use_bias = False)(input1)
block1 = BatchNormalization()(block1)
block1 = DepthwiseConv2D((Chans, 1), use_bias = False,
depth_multiplier = D,
depthwise_constraint = max_norm(1.))(block1)
block1 = BatchNormalization()(block1)
block1 = Activation('elu')(block1)
block1 = AveragePooling2D((1, 4))(block1)
block1 = dropoutType(dropoutRate)(block1)
block2 = SeparableConv2D(F2, (1, 16),
use_bias = False, padding = 'same')(block1)
block2 = BatchNormalization()(block2)
block2 = Activation('elu')(block2)
block2 = AveragePooling2D((1, 8))(block2)
block2 = dropoutType(dropoutRate)(block2)
flatten = Flatten(name = 'flatten')(block2)
dense = Dense(nb_classes, name = 'dense',
kernel_constraint = max_norm(norm_rate))(flatten)
softmax = Activation('softmax', name = 'softmax')(dense)
return Model(inputs=input1, outputs=softmax)
# + colab={"base_uri": "https://localhost:8080/", "height": 304, "referenced_widgets": ["74c8072a4a0b4cc0b69c7c320c7b6b20", "0e0efa06707648f48342809742ff7b1a", "7f7a6bff868742e880e8f852dd06682c", "9b6a00af9e1049d9b9769ff3ea1a409c", "1907af2f695d494195466c79cd1a603e", "f9555d9f2ace43cbb2a3ff24bbf471fa", "<KEY>", "ff0627d0a29b4718a7250802ba82f577", "24a02636a6b447b8b31f44c8e00f9069", "a0b17981e7994743a7ceea417cdabeef", "ddc421db20ab47579bb8cba81d1c0bda"]} id="naScmmLGFfbD" outputId="30bd69e9-b130-41d8-cb31-5fda33054ab6"
# import tensorflow and disable eager execution right up front
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
# mne imports
import mne
from mne import io
from mne.datasets import sample
from tensorflow.keras import utils as np_utils
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow.compat.v1.keras.backend as K
from tensorflow.keras.models import Model
from deepexplain.tf.v2_x import DeepExplain
# while the default tensorflow ordering is 'channels_last' we set it here
# to be explicit in case if the user has changed the default ordering
K.set_image_data_format('channels_last')
##################### Process, filter and epoch the data ######################
data_path = sample.data_path()
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0., 1
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True, verbose=False)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True, verbose=False)
labels = epochs.events[:, -1]
# extract raw data. scale by 1000 due to scaling sensitivity in deep learning
X = epochs.get_data()*1000 # format is in (trials, channels, samples)
y = labels
kernels, chans, samples = 1, 60, 151
# take 50/25/25 percent of the data to train/validate/test
X_train = X[0:144,]
Y_train = y[0:144]
X_validate = X[144:216,]
Y_validate = y[144:216]
X_test = X[216:,]
Y_test = y[216:]
# convert labels to one-hot encodings.
Y_train = np_utils.to_categorical(Y_train-1)
Y_validate = np_utils.to_categorical(Y_validate-1)
Y_test = np_utils.to_categorical(Y_test-1)
# convert data to NHWC (trials, channels, samples, kernels) format. Data
# contains 60 channels and 151 time-points. Set the number of kernels to 1.
X_train = X_train.reshape(X_train.shape[0], chans, samples, kernels)
X_validate = X_validate.reshape(X_validate.shape[0], chans, samples, kernels)
X_test = X_test.reshape(X_test.shape[0], chans, samples, kernels)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# + colab={"base_uri": "https://localhost:8080/"} id="suI5eEbCGwOA" outputId="26a11756-78a7-48da-e794-711e7e0ce322"
# configure the EEGNet-8,2,16 model with kernel length of 32 samples (other
# model configurations may do better, but this is a good starting point)
model = EEGNet(nb_classes = 4, Chans = chans, Samples = samples,
dropoutRate = 0.5, kernLength = 32, F1 = 8, D = 2, F2 = 16,
dropoutType = 'Dropout')
# compile the model and set the optimizers
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics = ['accuracy'])
# count number of parameters in the model
numParams = model.count_params()
# set a valid path for your system to record model checkpoints
checkpointer = ModelCheckpoint(filepath='/tmp/checkpoint.h5', verbose=1,
save_best_only=True)
###############################################################################
# if the classification task was imbalanced (significantly more trials in one
# class versus the others) you can assign a weight to each class during
# optimization to balance it out. This data is approximately balanced so we
# don't need to do this, but is shown here for illustration/completeness.
###############################################################################
# the syntax is {class_1:weight_1, class_2:weight_2,...}. Here just setting
# the weights all to be 1
class_weights = {0:1, 1:1, 2:1, 3:1}
# + colab={"base_uri": "https://localhost:8080/"} id="rbZmAnfJG1Hm" outputId="a1856a01-8159-4107-89a3-d6f4ea0857ea"
fittedModel = model.fit(X_train, Y_train, batch_size = 16, epochs = 5,
verbose = 2, validation_data=(X_validate, Y_validate),
callbacks=[checkpointer], class_weight = class_weights)
# + colab={"base_uri": "https://localhost:8080/"} id="npnxmwZuOHzH" outputId="7cd6533b-358f-4a65-e27a-1f081ca974a3"
with DeepExplain(session = K.get_session()) as de:
input_tensor = model.layers[0].input
fModel = Model(inputs = input_tensor, outputs = model.layers[-2].output)
target_tensor = fModel(input_tensor)
# can use epsilon-LRP as well if you like.
attributions = de.explain('deeplift', target_tensor * Y_test, input_tensor, X_test)
# attributions = de.explain('elrp', target_tensor * Y_test, input_tensor, X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="TuMdLH6AQefM" outputId="13d8c3ac-9072-4026-c31a-ab07e44c5c9e"
import matplotlib.pyplot as plt
plt.imshow(attributions[0, :, :].squeeze())
plt.xlabel('Time (seconds)')
plt.ylabel('Channels')
plt.show()
# + id="pCie6i67Rv4j"
|
examples/deepexpalin_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Fork and Pull
#
# ### Different ways of collaborating
#
# We have just seen how we can work with others on GitHub: we add them as collaborators on our repositories and give them permissions to push changes.
#
# Let's talk now about some other type of collaboration.
#
# Imagine you are a user of an Open Source project like Numpy and find a bug in one of their methods.
#
# You can inspect and clone [Numpy's code in GitHub](https://github.com/numpy/numpy), play around a bit and find how to fix the bug.
#
# Numpy has done so much for you asking nothing in return, that you really want to contribute back by fixing the bug for them.
#
# You make all of the changes but you can't push it back to Numpy's repository because you don't have permissions.
#
# The right way to do this is __forking Numpy's repository__.
# ### Forking a repository on GitHub
#
# By forking a repository, all you do is make a copy of it in your GitHub account, where you will have write permissions as well.
#
# If you fork Numpy's repository, you will find a new repository in your GitHub account that is an exact copy of Numpy. You can then clone it to your computer, work locally on fixing the bug and push the changes to your _fork_ of Numpy.
#
# Once you are happy with with the changes, GitHub also offers you a way to notify Numpy's developers of this changes so that they can include them in the official Numpy repository via starting a __Pull Request__.
# ### Pull Request
#
# You can create a Pull Request and select those changes that you think can be useful for fixing Numpy's bug.
#
# Numpy's developers will review your code and make comments and suggestions on your fix. Then, you can commit more improvements in the pull request for them to review and so on.
#
# Once Numpy's developers are happy with your changes, they'll accept your Pull Request and merge the changes into their original repository, for everyone to use.
# ### Practical example - Team up!
#
# We will be working in the same repository with one of you being the leader and the other being the collaborator.
#
# Collaborators need to go to the leader's GitHub profile and find the repository we created for that lesson. Mine is in https://github.com/jamespjh/github-example
# #### 1. Fork repository
#
# You will see on the top right of the page a `Fork` button with an accompanying number indicating how many GitHub users have forked that repository.
#
# Collaborators need to navigate to the leader's repository and click the `Fork` button.
#
# Collaborators: note how GitHub has redirected you to your own GitHub page and you are now looking at an exact copy of the team leader's repository.
# #### 2. Clone your forked repo
#
# Collaborators: go to your terminal and clone the newly created fork.
#
# ```
# git clone git@github.com:jamespjh/github-example.git
# ```
# #### 3. Create a feature branch
#
# It's a good practice to create a new branch that'll contain the changes we want. We'll learn more about branches later on. For now, just think of this as a separate area where our changes will be kept not to interfere with other people's work.
#
# ```
# git checkout -b southwest
# ```
# #### 4. Make, commit and push changes to new branch
#
# For example, let's create a new file called `SouthWest.md` and edit it to add this text:
#
# ```
# * Exmoor
# * Dartmoor
# * Bodmin Moor
# ```
#
# Save it, and push this changes to your fork's new branch:
#
# ```
# git add SouthWest.md
# git commit -m "The South West is also hilly."
# git push origin southwest
# ```
# #### 5. Create Pull Request
#
# Go back to the collaborator's GitHub site and reload the fork. GitHub has noticed there is a new branch and is presenting us with a green button to `Compare & pull request`. Fantastic! Click that button.
#
# Fill in the form with additional information about your change, as you consider necesary to make the team leader understand what this is all about.
#
# Take some time to inspect the commits and the changes you are submitting for review. When you are ready, click on the `Create Pull Request` button.
#
# Now, the leader needs to go to their GitHub site. They have been notified there is a pull request in their repo awaiting revision.
# #### 6. Feedback from team leader
#
# Leaders can see the list of pull requests in the vertical menu of the repo, on the right hand side of the screen. Select the pull request the collaborator has done, and inspect the changes.
#
# There are three tabs: in one you can start a conversation with the collaborator about their changes, and in the others you can have a look at the commits and changes made.
#
# Go to the tab labeled as "Files Changed". When you hover over the changes, a small `+` button appears. Select one line you want to make a comment on. For example, the line that contains "Exmoor".
#
# GitHub allows you to add a comment about that specific part of the change. Your collaborator has forgotten to add a title at the beginning of the file right before "Exmoor", so tell them so in the form presented after clicking the `+` button.
# #### 7. Fixes by collaborator
#
# Collaborators will be notified of this comment by email and also in their profiles page. Click the link accompanying this notification to read the comment from the team leader.
#
# Go back to your local repository, make the changes suggested and push them to the new branch.
#
# Add this at the beginning of your file:
#
# ```
# Hills in the South West:
# =======================
#
# ```
#
# Then push the change to your fork:
#
# ```
# git add .
# git commit -m "Titles added as requested."
# git push origin southwest
# ```
#
# This change will automatically be added to the pull request you started.
# #### 8. Leader accepts pull request
# The team leader will be notified of the new changes that can be reviewed in the same fashion as earlier.
#
# Let's assume the team leader is now happy with the changes.
#
# Leaders can see in the "Conversation" tab of the pull request a green button labelled ```Merge pull request```. Click it and confirm the decision.
#
# The collaborator's pull request has been accepted and appears now in the original repository owned by the team leader.
#
# Fork and Pull Request done!
# ### Some Considerations
#
# * Fork and Pull Request are things happening only on the repository's server side (GitHub in our case). Consequently, you can't do things like `git fork` or `git pull-request` from the local copy of a repository.
#
# * You don't always need to fork repositories with the intention of contributing. You can fork a library you use, install it manually on your computer, and add more functionality or customise the existing one, so that it is more useful for you and your team.
#
# * Numpy's example is only illustrative. Normally, Open Source projects have in their documentation (sometimes in the form of a wiki) a set of instructions you need to follow if you want to contribute to their software.
#
# * Pull Requests can also be done for merging branches in a non-forked repository. It's typically used in teams to merge code from a branch into the master branch and ask team colleagues for code reviews before merging.
#
# * It's a good practice before starting a fork and a pull request to have a look at existing forks and pull requests. On GitHub, you can find the list of pull requests on the horizontal menu on the top of the page. Try to also find the network graph displaying all existing forks of a repo, e.g., [NumpyDoc repo's network graph](https://github.com/numpy/numpydoc/network).
|
ch02git/06ForkAndPull.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimizing App Offers With Starbucks
# ### Notebook 3: Predictions
#
# ## Data Preprocessing
# +
import pandas as pd
import numpy as np
import os
import io
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
import seaborn as sns
sns.set(rc={'figure.figsize':(12,8)})
import sagemaker
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
from sagemaker.predictor import csv_serializer
import sklearn.model_selection
import warnings
warnings.filterwarnings("ignore")
# -
# Custom colors that we will use in graphs:
custom_colors = ['#006241', '#84233C', '#1E3932', '#9D5116', '#E44C2C']
# +
csv_file = 'data/clean_df.csv'
clean_df = pd.read_csv(csv_file)
clean_df.head()
# -
list(clean_df.columns)
# #### Drop the "unknown" customer data
#
# See Data Exploration notebook for more info about "unknown" customers.
clean_df = clean_df[clean_df.age != -1]
clean_df.age.describe()
# #### Drop the columns that will not be used for modeling
# +
columns_to_keep = ['person',
'gender',
'age',
'income',
'offer_type',
'event_offer_received',
'event_offer_viewed',
'event_offer_completed']
clean_df = clean_df[columns_to_keep]
clean_df.sample(3)
# -
# #### Prepare data labels - "no view", "no order", "possible order"
df = clean_df.groupby(['person', 'gender', 'age', 'income', 'offer_type'], as_index=False).sum()
df.sample(3)
# +
# First, compare how many times the offer was viewed vs. received:
df['label'] = np.where((df['event_offer_viewed'] >= df['event_offer_received']) &
(df['event_offer_viewed'] > 0), "no order", "no view")
# Next, compare how many times offer was completed vs. viewed
# We use 0.5 threshold as in at least half of offers has to be completed:
df['label'] = np.where(((df['event_offer_completed'] / df['event_offer_viewed']) >= 0.5) &
(df['event_offer_viewed'] > 0) &
(df['event_offer_completed'] > 0), "possible order", "no order")
# Finally, if there is no order and no view:
df['label'] = np.where((df['event_offer_viewed'] == 0) &
(df['event_offer_completed'] == 0), "no view", df['label'])
df.sample(10)
# -
# Drop the event columns, we don't need them anymore:
df = df.drop(['event_offer_received', 'event_offer_viewed', 'event_offer_completed'], axis=1)
# #### Convert labels to numeric
# +
df["output"] = np.where(df['label']=='no view', 0, np.nan)
df["output"] = np.where(df['label']=='no order', 1, df["output"])
df["output"] = np.where(df['label']=='possible order', 2, df["output"])
df.groupby(['label', 'output']).mean().index
# -
# #### Check for missing values
df.isnull().sum()
# #### Encode "gender" and "offer_type" using a one-hot encoding scheme
# +
df = df.join(pd.get_dummies(df['gender'])).drop('gender', axis=1) # drop the original column
for column in ['M', 'F', 'O', 'U']:
df = df.rename(columns={column: ("gender_" + column.replace(" ", "_"))}) # rename new columns
df = df.join(pd.get_dummies(df['offer_type'])).drop('offer_type', axis=1) # drop the original column
# -
# #### Drop the "person" and "label" columns
# +
df = df.drop(['person', 'label'], axis=1)
df.sample(3)
# -
df.shape
# #### Split the data to input features (X) and output labels (y)
# +
X_df = df.drop(['output'], axis=1)
y_df = df['output']
print("X:")
print(X_df.head())
print("\n\n\ny:")
print(y_df.head())
# -
# #### Split the rows up into train, test and validation sets
# +
# Split the dataset into 2/3 training and 1/3 testing sets:
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X_df, y_df, test_size=0.33)
# Next, split the TRAINING set further into 2/3 training and 1/3 validation sets:
X_train, X_val, y_train, y_val = sklearn.model_selection.train_test_split(X_train, y_train, test_size=0.33)
print("X training set:")
print(X_train.shape)
print("\nX valitation set:")
print(X_val.shape)
print("\nX test set:")
print(X_test.shape)
# -
# #### Save the data locally as CSV files
#
# Amazon SageMaker requires that a CSV file **doesn't have a header** record and that the **target variable is in the first column** (from https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-training.html).
# +
data_dir = '../data'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# Save testing set:
X_test.to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)
# Save training and validation sets:
pd.concat([y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)
pd.concat([y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
# -
# #### Upload the data files to S3
# +
prefix = 'starbucks-xgboost'
session = sagemaker.Session()
test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)
val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)
train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)
# -
# ## Training the XGBoost model
# #### Construct the estimator object
# +
role = get_execution_role()
# Get the URI for new container:
container = get_image_uri(session.boto_region_name, 'xgboost', '0.90-1')
# Construct the estimator object:
xgb = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
sagemaker_session=session)
# -
# #### Set model specific hyperparameter default values
#
# We will use "**multi:softprob**" objective, which outputs a vector of ndata * nclass, which can be further reshaped to ndata * nclass matrix. The result contains predicted probability of each data point belonging to each class (from https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst).
xgb.set_hyperparameters(num_round=500, # the number of rounds to run the training
num_class=3, # the number of output classes
objective='multi:softprob')
# #### Train model on default parameters
# +
s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
# -
# #### Test default model performance
# +
# Build a transformer object from our fit model:
xgb_transformer = xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')
# Begin a batch transform job using our trained model and applying it to the test data:
xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')
# -
xgb_transformer.wait()
# !aws s3 cp --recursive $xgb_transformer.output_path $data_dir
y_pred = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)
y_pred.head()
# +
# Fix the brackets and convert string to a float:
y_pred[0] = y_pred[0].str.strip('[').astype(float)
y_pred[2] = y_pred[2].str.strip(']').astype(float)
y_pred.head()
# +
# Choose the most likely output based on predicted probabilities:
y_max = pd.DataFrame()
y_max[0] = y_pred[[0, 1, 2]].idxmax(axis=1)
y_max.head()
# +
from sklearn.metrics import accuracy_score
print("Default model accuracy score: {:.4f}".format(accuracy_score(y_test, y_max)))
# -
# ## Model refinement
# #### Create the hyperparameter tuner
#
# Amazon SageMaker automatic model tuning, also known as hyperparameter tuning, finds the best version of a model by running many training jobs on your dataset using the algorithm and ranges of hyperparameters that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by a metric that you choose (from https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning.html).
#
# To compare the trained models, we will use **merror**: multiclass classification error rate. It is calculated as #(wrong cases)/#(all cases).
#
# We will use following hyperparameters to refine the initial model:
# (from https://xgboost.readthedocs.io/en/latest/parameter.html)
#
# - **eta**: Step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features, and eta shrinks the feature weights to make the boosting process more conservative.
#
# - **gamma**: Minimum loss reduction required to make a further partition on a leaf node of the tree. The larger gamma is, the more conservative the algorithm will be.
#
# - **max_depth**: Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit.
#
# - **subsample**: Subsample ratio of the training instances. Setting it to 0.5 means that XGBoost would randomly sample half of the training data prior to growing trees. and this will prevent overfitting. Subsampling will occur once in every boosting iteration
# +
from sagemaker.tuner import IntegerParameter, ContinuousParameter, HyperparameterTuner
xgb_hyperparameter_tuner = HyperparameterTuner(estimator = xgb, # the estimator object to use as the basis
objective_metric_name = 'validation:merror', # the metric used to compare trained models
objective_type = 'Minimize', # whether we wish to minimize or maximize the metric
max_jobs = 32, # the total number of models to train
max_parallel_jobs = 3, # the number of models to train in parallel
hyperparameter_ranges = {
'eta' : ContinuousParameter(0.05, 0.5),
'gamma': ContinuousParameter(0, 10),
'max_depth': IntegerParameter(2, 8),
'subsample': ContinuousParameter(0.5, 1),
})
# -
# #### Find the best version of a model by running many training jobs
xgb_hyperparameter_tuner.fit({'train': s3_input_train, 'validation': s3_input_validation})
xgb_hyperparameter_tuner.wait()
# #### Retrieve and set up the best performing model
xgb_hyperparameter_tuner.best_training_job()
# Set up a batch transform job to test the best model:
xgb_attached = sagemaker.estimator.Estimator.attach(xgb_hyperparameter_tuner.best_training_job())
# #### Test new model performance
# +
# Build a transformer object from our fit model:
xgb_transformer = xgb_attached.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')
# Begin a batch transform job using our trained model and applying it to the test data:
xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')
# -
xgb_transformer.wait()
# !aws s3 cp --recursive $xgb_transformer.output_path $data_dir
# +
y_pred = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)
y_pred[0] = y_pred[0].str.strip('[').astype(float)
y_pred[2] = y_pred[2].str.strip(']').astype(float)
y_max = pd.DataFrame()
y_max[0] = y_pred[[0, 1, 2]].idxmax(axis=1)
print("Tuned model accuracy score: {:.4f}".format(accuracy_score(y_test, y_max)))
# +
from sklearn.metrics import classification_report
labels = ["no view", "no order", "possible order"]
print(classification_report(y_test, y_max, target_names=labels))
# +
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_max)
f, ax = plt.subplots(figsize = (8,8))
sns.heatmap(cm,
cmap=sns.cubehelix_palette(8, start=.5, rot=-.75),
square=True,
annot=True, annot_kws={"size": 12}, fmt='g',
xticklabels=labels,
yticklabels=labels)
plt.title('Confusion matrix of the XGBoost classifier\n\n\n', fontsize=16)
plt.xlabel("\npredicted label")
plt.ylabel("true label\n")
plt.show()
# -
# ## Deploy the XGBoost model
# #### Create a simulated data set with new customers
# The purpose of this model is to be able to use existing data to target **new customers** with offers. When new customer registers in the Starbucks Rewards® program, we can feed it's data to the deployed model and draw a conclusions about possible offer types for this customer.
#
# We don't have real data about new customers, so we will generate this data.
# +
new_customers = pd.DataFrame()
new_customers.colums = ['age', 'income', 'gender', 'bogo', 'discount', 'informational']
new_customers['age'] = np.random.randint(18, 85, 10) # random ages from 18 to 85
new_customers['income'] = np.random.randint(20000, 120000, 10) # random income from 20000 to 120000
new_customers['gender'] = np.random.choice(['F','M','O'], 10) # random gender
# One-hot encode gender:
new_customers = new_customers.join(pd.get_dummies(new_customers['gender'])).drop('gender', axis=1)
for column in ['M', 'F', 'O', 'U']:
new_customers = new_customers.rename(columns={column: ("gender_" + column.replace(" ", "_"))})
new_customers['bogo'] = 0
new_customers['discount'] = 0
new_customers['informational'] = 0
new_customers['person'] = new_customers.index
new_customers
# -
# #### Assign all three offer types to each customer
# +
df1, df2, df3 = new_customers.copy(), new_customers.copy(), new_customers.copy()
df1['bogo'] = 1
df2['discount'] = 1
df3['informational'] = 1
frames = [df1, df2, df3]
new_customers = pd.concat(frames, ignore_index=True)
new_customers = new_customers.sort_values(by=['age', 'bogo', 'discount'])
new_customers.head(6)
# -
# #### Deploy final XGBoost model into production
xgb_predictor = xgb_attached.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
# #### Generate predictions for new customers
# +
X = new_customers.drop(['person'], axis=1)
X_id = new_customers['person']
# We need to tell the endpoint what format the data we are sending is in
xgb_predictor.content_type = 'text/csv'
xgb_predictor.serializer = csv_serializer
# Predictions is currently a comma delimited string:
y_pred_raw = xgb_predictor.predict(X.values).decode('utf-8')
# Convert predictions to a dataframe:
y_pred = pd.DataFrame(y_pred_raw.split("],["))
y_pred = y_pred[0].str.split(',', expand=True)
# Fix the brackets and convert string to a float:
y_pred[0] = y_pred[0].str.strip('[').astype(float)
y_pred[1] = y_pred[0].astype(float)
y_pred[2] = y_pred[2].str.strip(']').astype(float)
y_pred.head()
# -
# #### Shut down the endpoint
xgb_predictor.delete_endpoint()
# ### Explore prediction results
#
# #### Combine new customer data with the predictions
df = pd.concat([new_customers, y_pred], axis=1, sort=False)
df.head()
df.shape
y_pred.iloc[3] # just to check if the merge was correct
# #### Improve data formatting
# +
# Revert one-hot encoded columns back to categorical:
df = df.rename(columns={"gender_F": "female",
"gender_M": "male",
"gender_O": "other"})
gender = df[['female','male','other']]
gender = pd.DataFrame(gender.idxmax(1))
offer = df[['bogo','discount','informational']]
offer = pd.DataFrame(offer.idxmax(1))
df['gender'] = gender[0]
df['offer'] = offer[0]
# Rename columns:
df = df.rename(columns={0: "no view",
1: "no order",
2: "possible order"})
# Keep only these columns:
columns_to_keep = ['person',
'age',
'income',
'gender',
'offer',
'no view',
'no order',
'possible order']
df = df[columns_to_keep]
df.sample(5)
# -
# #### Explore the prediction results
person = df[df.person == 0]
person
def plot_person_preds(df, i):
'''Plot predicted probability of belonging to each class'''
df[df.person == i][["offer",
"no view",
"no order",
"possible order"]].plot(x="offer", kind="barh", stacked=True,
color=[custom_colors[1],
custom_colors[4],
custom_colors[0]])
plt.legend(loc=(1.05,0.75))
plt.xlabel("predicted probability of belonging to each class")
plt.ylabel("offer type")
plt.show()
return None
for i in range (10):
income = df.at[i, "income"]
gender = df.at[i, "gender"]
age = df.at[i, "age"]
print("\nIncome: {} USD, {}, {} y/o:".format(income, gender, age))
plot_person_preds(df, i)
### FILE SAVING CHECKPOINT ###
filename = 'data/new_customer_preds.csv'
df.to_csv(filename)
# ___
# Please refer to [PROJECT REPORT](https://evanca.github.io/machine-learning_optimizing-app-offers-with-starbucks/) for a conclusions and a high-level overview of the full project.
|
part-3-predictions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
#Classes ID and names > for plotting #don't remove
CLASSES_ID = [0, 10, 11, 12, 20, 30, 40, 50, 60, 61, 62, 70, 71, 72, 80, 81, 82, 90, 100, 110, 120, 121, 122, 130, 140, 150,
151, 152, 153, 160, 170, 180, 190, 200, 201, 202, 210, 220]
CLASSES_NAMES = {
0: 'No data',
10: 'Cropland rainfed',
11: 'Cropland rainfed - Herbaceous cover',
12: 'Cropland rainfed - Tree or shrub cover',
20: 'Cropland irrigated or post-flooding',
30: 'Mosaic cropland (>50%) / natural vegetation (tree/shrub/herbaceous cover) (<50%)',
40: 'Mosaic natural vegetation (tree/shrub/herbaceous cover) (>50%) / cropland (<50%)',
50: 'Tree cover broadleaved evergreen closed to open (>15%)',
60: 'Tree cover broadleaved deciduous closed to open (>15%)',
61: 'Tree cover broadleaved deciduous closed (>40%)',
62: 'Tree cover broadleaved deciduous open (15-40%)',
70: 'Tree cover needleleaved evergreen closed to open (>15%)',
71: 'Tree cover needleleaved evergreen closed (>40%)',
72: 'Tree cover needleleaved evergreen open (15-40%)',
80: 'Tree cover needleleaved deciduous closed to open (>15%)',
81: 'Tree cover needleleaved deciduous closed (>40%)',
82: 'Tree cover needleleaved deciduous open (15-40%)',
90: 'Tree cover mixed leaf type (broadleaved and needleleaved)',
100: 'Mosaic tree and shrub (>50%) / herbaceous cover (<50%)',
110: 'Mosaic herbaceous cover (>50%) / tree and shrub (<50%)',
120: 'Shrubland',
121: 'Shrubland evergreen',
122: 'Shrubland deciduous',
130: 'Grassland',
140: 'Lichens and mosses',
150: 'Sparse vegetation (tree/shrub/herbaceous cover) (<15%)',
151: 'Sparse tree (<15%)',
152: 'Sparse shrub (<15%)',
153: 'Sparse herbaceous cover (<15%)',
160: 'Tree cover flooded fresh or brakish water',
170: 'Tree cover flooded saline water',
180: 'Shrub or herbaceous cover flooded fresh/saline/brakish water',
190: 'Urban areas',
200: 'Bare areas',
201: 'Consolidated bare areas',
202: 'Unconsolidated bare areas',
210: 'Water bodies',
220: 'Permanent snow and ice'
}
# -
import os
import pandas as pd
import numpy as np
# +
from sklearn.metrics import accuracy_score, cohen_kappa_score, classification_report, precision_score, recall_score, f1_score, precision_recall_fscore_support
def gather_accuracy_values_per_class(classes,targets,scores):
"""
Gather per class a variety of accuracy metrics from targets and scores
"""
y_pred = np.argmax(scores,axis=1)
y_true = np.argmax(targets,axis=1)
precision_, recall_, fscore_, support_ = precision_recall_fscore_support(y_true, y_pred, beta=0.5, average=None)
fscore = pd.Series(index=classes, data=fscore_, name="f-score")
precision = pd.Series(index=classes, data=precision_, name="precision")
recall = pd.Series(index=classes, data=recall_, name="recall")
support = pd.Series(index=classes, data=support_, name="support")
s = [fscore,precision,recall, support]
names = [el.name for el in s]
return pd.DataFrame(zip(*s), columns=names, index=recall.index).T
# +
import numpy as np
from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, cohen_kappa_score, classification_report, precision_score, recall_score, f1_score, precision_recall_fscore_support
def gather_mean_accuracies(classes, scores, targets, average='weighted', label="label"):
"""
calculate a series for mean accuracy values for all, covered (class id < b) and fields (class id > b)
"""
metrics = []
y_pred = np.argmax(scores,axis=1)
y_true = np.argmax(targets,axis=1)
# class weighted average accuracy
w_all = np.ones(y_true.shape[0])
for idx, i in enumerate(np.bincount(y_true)):
w_all[y_true == idx] *= (i/float(y_true.shape[0]))
w_acc = accuracy_score(y_true, y_pred, sample_weight=w_all)
metrics.append(pd.Series(data=[w_acc], dtype=float, name="accuracy"))
# AUC
try:
# if AUC not possible skip
auc = roc_auc_score(targets, scores, average=average)
metrics.append(pd.Series(data=[auc], dtype=float, name="AUC"))
except:
print "no AUC calculated"
pass
# Kappa
kappa = cohen_kappa_score(y_true, y_pred)
metrics.append(pd.Series(data=[kappa], dtype=float, name="kappa"))
# Precision, Recall, F1, support
prec, rec, f1, support = precision_recall_fscore_support(y_true, y_pred, beta=1, average=average)
metrics.append(pd.Series(data=[prec], dtype=float, name="precision"))
metrics.append(pd.Series(data=[rec], dtype=float, name="recall"))
metrics.append(pd.Series(data=[f1], dtype=float, name="fscore"))
df_ = pd.DataFrame(metrics).T
if label is not None:
df_.index = [[label],["all"]]
else:
df_.index = ["all"]
return df_
# -
savedir = 'acocac'
best_runs = ['1l4r50d2f','1l2r50d2f','1l6r50d1f']
networks = ['lstm','rnn','cnn']
# +
from sklearn.metrics import confusion_matrix
obs_file = "eval_observations.npy"
probs_file = "eval_probabilities.npy"
targets_file = "eval_targets.npy"
conf_mat_file = "eval_confusion_matrix.npy"
networklabels = ["LSTM","RNN","CNN"]
acc=[]
mean = []
cms_targets = []
cms_scores = []
cms_classes = []
for best_run, network, label_ in zip(best_runs,networks,networklabels):
print network
path = os.path.join(savedir,network,best_run)
scores = np.load(os.path.join(path,probs_file))
targets = np.load(os.path.join(path,targets_file))
y_pred = np.argmax(scores,axis=1)
y_true = np.argmax(targets,axis=1)
cm = confusion_matrix(y_true,y_pred)
labels_idx = list(set(y_pred) | set(y_true))
mykeys = [CLASSES_ID[i] for i in labels_idx]
classes = [CLASSES_NAMES[x] for x in mykeys]
cms_classes.append(classes)
cms_targets.append(targets)
cms_scores.append(scores)
df_ = gather_mean_accuracies(classes, scores, targets, label=label_)
mean.append(df_)
mean_df = pd.concat(mean)
# -
mean_df
# performance by model by class
models = {'lstm': 0,'rnn':1, 'cnn':2}
model = 'lstm'
gather_accuracy_values_per_class(cms_classes[models[model]],cms_targets[models[model]],cms_scores[models[model]])
# +
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_confusion_matrix(confusion_matrix, classes, normalize_axis=None, figsize=(7, 7), colormap=None):
"""
Plots a confusion matrix using seaborn heatmap functionality
@param confusion_matrix: np array [n_classes, n_classes] with rows reference and cols predicted
@param classes: list of class labels
@param normalize_axis: 0 sum of rows, 1: sum of cols, None no normalization
@return matplotlib figure
"""
# Set up the matplotlib figure
plt.figure()
f, ax = plt.subplots(figsize=figsize)
# normalize
normalized_str = "" # add on at the title
if normalize_axis is not None:
with np.errstate(divide='ignore'): # ignore divide by zero and replace with 0
confusion_matrix = np.nan_to_num(
confusion_matrix.astype(float) / np.sum(confusion_matrix, axis=normalize_axis))
# Draw the heatmap with the mask and correct aspect ratio
g = sns.heatmap(confusion_matrix,
square=True,
linewidths=1,
cbar=False,
ax=ax,
cmap=colormap, vmin=0, vmax=1)
divider = make_axes_locatable(g)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = g.figure.colorbar(ax.collections[0],cax=cax)
if normalize_axis == 0:
cbar.set_label("precision")
if normalize_axis == 1:
cbar.set_label("recall")
n_classes = len(classes)
# if n_classes < threshold plot values in plot
cols = np.arange(0, n_classes)
rows = np.arange(n_classes - 1, -1, -1)
#g.set_title("Confusion Matrix")
g.set_xticklabels([])
g.set_yticklabels(classes[::-1], rotation=0)
g.set_xlabel("predicted")
g.set_ylabel("reference")
return f, g
# +
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
networks = ["lstm","rnn","cnn"]
def calc_confusion_matrix(path):
probs_file = "eval_probabilities.npy"
targets_file = "eval_targets.npy"
scores = np.load(os.path.join(path,probs_file))
targets = np.load(os.path.join(path,targets_file))
y_pred = np.argmax(scores,axis=1)
y_true = np.argmax(targets,axis=1)
labels = list(set(y_pred) | set(y_true))
cm = confusion_matrix(y_true,y_pred)
return cm, labels
cms_prec = []
cms_rec = []
cms_labels = []
for best_run, network in zip(best_runs, networks):
path = os.path.join(savedir, network, best_run)
cm, labels = calc_confusion_matrix(path)
cms_labels.append(labels)
cms_prec.append(cm.astype(float)/np.sum(cm,axis=0))
cms_rec.append(cm.astype(float)/np.sum(cm,axis=1))
models = {'lstm': 0,'rnn':1, 'cnn':2}
model = 'cnn'
cm = cms_prec[models[model]] #0: lstm, 1:rnn, 2:cnn
labels_idx = cms_labels[models[model]]
mykeys = [CLASSES_ID[i] for i in labels_idx]
labels = [CLASSES_NAMES[x] for x in mykeys]
# Generate a custom diverging colormap
cmap = sns.color_palette("Blues")
figsize=(6,6)
f,ax = plot_confusion_matrix(cm, labels[::-1], figsize=figsize, normalize_axis=0, colormap = "Blues")
# double checked at http://stackoverflow.com/questions/20927368/python-how-to-normalize-a-confusion-matrix
precision = cm/np.sum(cm,axis=0)
recall = cm/np.sum(cm,axis=1)
# -
# ## Influence of element in sequence
# +
networks = ['lstm','rnn','cnn']
lstm_network, _, _ = networks
lstm_best, _, _ = best_runs
path = os.path.join(savedir, lstm_network, lstm_best)
obs = np.load(os.path.join(path,obs_file))
scores = np.load(os.path.join(path,probs_file))
targets = np.load(os.path.join(path,targets_file))
# -
def get_obs_subset(targets,scores,obs,obs_idx, classes):
"""
This function calls the gather_mean_accuracies, which is used for the calculation of accuracy tables
on a subset of targets and scores filtered by obs_idx
"""
# select by observation
sc = scores[obs==obs_idx]
ta = targets[obs==obs_idx]
return gather_mean_accuracies(classes, sc, ta, average='weighted', label="label")
#a = get_obs_subset(targets,scores,3, b, classes)
sc = scores[obs==0]
ta = targets[obs==0]
gather_mean_accuracies(classes, sc, ta, average='weighted', label="label")
# +
# gather accuracy values for fields
#from util.db import conn
import sklearn
#t = pd.read_sql("select distinct doa, doy from products order by doa",conn)["doy"]
#t.to_pickle(os.path.join("loc","t.pkl"))
#t = pd.read_pickle(os.path.join("loc","t.pkl"))
def collect_data_per_obs(targets, scores, obs, classes, metric="accuracy", classcategory="all"):
"""
this function calculates `metric` based on scores and targets for each available observations `t` 0..25
This function takes a
- target matrix resembling ground thruth,
- scores as calculated probablities for each observation
- obs as indices of observation
"""
#oa = []
outlist=[]
for i in range(len(t)):
try:
per_class_ = get_obs_subset(targets,scores,obs,i, classes)
#per_class.append(per_class_.mean(axis=0))
# append the average <classcategory> <metric> at each time i
outlist.append(per_class_.loc["label"].loc[classcategory][metric])
except:
print "t{} could not calculate accuracy metrics".format(i)
outlist.append(None)
pass
#oa.append(oa_)
print "Collecting doy {} ({}/{})".format(t[i],i+1,len(t))
#oa_s = pd.Series(data=oa, name=over_accuracy_label, index=t)
return pd.DataFrame(outlist, index=t)
def collect_data_for_each_network(networks, best_runs, metric="kappa", classcategory="all"):
"""
This function calls collect_data_per_obs for each network.
First targets, scores and obs are loaded from file at the respective network's best run model
Then collect_data_per_obs is called.
"""
acc_dfs = []
for network, best in zip(networks, best_runs):
path = os.path.join(savedir, network, best)
obs = np.load(os.path.join(path,obs_file))
scores = np.load(os.path.join(path,probs_file))
targets = np.load(os.path.join(path,targets_file))
print
print network
# for every network append a dataframe of observations
observations_df_ = collect_data_per_obs(targets, scores, obs, classes, metric=metric, classcategory=classcategory)
acc_dfs.append(observations_df_.values.reshape(-1))
# create final DataFrame with proper column and indexes of all three networks
return pd.DataFrame(acc_dfs, index=networks,columns=t).T
# -
acc_df = collect_data_for_each_network(networks, best_runs, metric="accuracy", classcategory="all")
rec_df = collect_data_for_each_network(networks, best_runs, metric="recall", classcategory="all")
kappa_df = collect_data_for_each_network(networks, best_runs, metric="kappa", classcategory="all")
prec_df = collect_data_for_each_network(networks, best_runs, metric="precision", classcategory="all")
acc_df.T
# +
x = range(len(t))
def plot_acctime(x,acc_df,metric="measure"):
f,ax = plt.subplots()
#ax.plot(x,oa_s.values, label="overall accuracy")
for col in acc_df.columns:
ax.plot(x,acc_df[col].values, label=col)
plt.xticks(x,t, rotation='vertical')
ax.set_xlabel("day of year")
ax.set_ylabel(metric)
plt.legend()
# 0 lstm, 1 rnn, 2 cnn
#plot_acctime(x,prec_df)
# -
plot_acctime(x,acc_df,metric="accuracy")
plot_acctime(x,acc_df,metric="precision")
|
2_explore_evaluation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# # Sentiment Analysis with Deep Learning using BERT
# + [markdown] Collapsed="true"
# ### Prerequisites
# + [markdown] Collapsed="false"
# - Intermediate-level knowledge of Python 3 (NumPy and Pandas preferably, but not required)
# - Exposure to PyTorch usage
# - Basic understanding of Deep Learning and Language Models (BERT specifically)
# + [markdown] Collapsed="false"
# ### Project Outline
# + [markdown] Collapsed="true"
# **Task 1**: Introduction (this section)
#
# **Task 2**: Exploratory Data Analysis and Preprocessing
#
# **Task 3**: Training/Validation Split
#
# **Task 4**: Loading Tokenizer and Encoding our Data
#
# **Task 5**: Setting up BERT Pretrained Model
#
# **Task 6**: Creating Data Loaders
#
# **Task 7**: Setting Up Optimizer and Scheduler
#
# **Task 8**: Defining our Performance Metrics
#
# **Task 9**: Creating our Training Loop
# + [markdown] Collapsed="false"
# ## Introduction
# + [markdown] Collapsed="true"
# ### What is BERT
#
# BERT is a large-scale transformer-based Language Model that can be finetuned for a variety of tasks.
#
# For more information, the original paper can be found [here](https://arxiv.org/abs/1810.04805).
#
# [HuggingFace documentation](https://huggingface.co/transformers/model_doc/bert.html)
#
# [Bert documentation](https://characters.fandom.com/wiki/Bert_(Sesame_Street) ;)
# + [markdown] Collapsed="false"
# <img src="Images/BERT_diagrams.pdf" width="1000">
# + [markdown] Collapsed="false"
# ## Exploratory Data Analysis and Preprocessing
# + [markdown] Collapsed="false"
# We will use the SMILE Twitter dataset.
#
# _<NAME>; <NAME>; <NAME>; <NAME>; <NAME>; <NAME> (2016): SMILE Twitter Emotion dataset. figshare. Dataset. https://doi.org/10.6084/m9.figshare.3187909.v2_
# + Collapsed="false"
import torch
import pandas as pd
from tqdm.notebook import tqdm
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfTransformer
# + Collapsed="false"
df_train=pd.read_csv("/kaggle/input/janatahack-independence-day-2020-ml-hackathon/train.csv")
df_test=pd.read_csv("/kaggle/input/janatahack-independence-day-2020-ml-hackathon/test.csv")
df_train.set_index('ID', inplace=True)
# + Collapsed="false"
df_train.head()
# +
df_train["text"]=df_train["TITLE"]+df_train["ABSTRACT"]
df_test["text"]=df_test["TITLE"]+df_test["ABSTRACT"]
del df_train["TITLE"]
del df_train["ABSTRACT"]
#del df_train["ID"]
#main_test_ids=df_test["ID"]
main_test_title=df_test["TITLE"]
main_test_abstract=df_test["ABSTRACT"]
del df_test["TITLE"]
del df_test["ABSTRACT"]
#del df_test["ID"]
df_train.head()
# -
df_train["text"][1]
df_train_classes=df_train.drop("text",axis=1)
df_train_classes.head()
df_train.rename(columns={"Physics": "label"})
possible_labels = df_train.Physics.unique()
# + Collapsed="false"
label_dict = {}
for index, possible_label in enumerate(possible_labels):
label_dict[possible_label] = index
# + Collapsed="false"
df_train['label'] = df_train.Physics.replace(label_dict)
# -
# + Collapsed="false"
df_train.head()
# + [markdown] Collapsed="false"
# ## Training/Validation Split
# + Collapsed="false"
from sklearn.model_selection import train_test_split
# + Collapsed="false"
X_train, X_val, y_train, y_val = train_test_split(df_train.index.values,
df_train.label.values,
test_size=0.15,
random_state=17,
stratify=df_train.label.values)
# + Collapsed="false"
df_train['data_type'] = ['not_set']*df_train.shape[0]
# + Collapsed="false"
df_train.loc[X_train, 'data_type'] = 'train'
df_train.loc[X_val, 'data_type'] = 'val'
# + [markdown] Collapsed="false"
# ## Loading Tokenizer and Encoding our Data
# + Collapsed="false"
from transformers import BertTokenizer
from torch.utils.data import TensorDataset
# + Collapsed="false"
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
do_lower_case=True)
# + Collapsed="false"
encoded_data_train = tokenizer.batch_encode_plus(
df_train[df_train.data_type=='train'].text.values,
add_special_tokens=True,
return_attention_mask=True,
pad_to_max_length=True,
max_length=256,
return_tensors='pt'
)
encoded_data_val = tokenizer.batch_encode_plus(
df_train[df_train.data_type=='val'].text.values,
add_special_tokens=True,
return_attention_mask=True,
pad_to_max_length=True,
max_length=256,
return_tensors='pt'
)
input_ids_train = encoded_data_train['input_ids']
attention_masks_train = encoded_data_train['attention_mask']
labels_train = torch.tensor(df_train[df_train.data_type=='train'].label.values)
input_ids_val = encoded_data_val['input_ids']
attention_masks_val = encoded_data_val['attention_mask']
labels_val = torch.tensor(df_train[df_train.data_type=='val'].label.values)
# + Collapsed="false"
dataset_train = TensorDataset(input_ids_train, attention_masks_train, labels_train)
dataset_val = TensorDataset(input_ids_val, attention_masks_val, labels_val)
# + Collapsed="false"
len(dataset_train)
# + Collapsed="false"
len(dataset_val)
# + [markdown] Collapsed="false"
# ## Setting up BERT Pretrained Model
# + Collapsed="false"
from transformers import BertForSequenceClassification
# + Collapsed="false"
model = BertForSequenceClassification.from_pretrained("bert-base-uncased",
num_labels=len(label_dict),
output_attentions=False,
output_hidden_states=False)
# + [markdown] Collapsed="false"
# ## Creating Data Loaders
# + Collapsed="false"
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
# + Collapsed="false"
batch_size = 32
dataloader_train = DataLoader(dataset_train,
sampler=RandomSampler(dataset_train),
batch_size=batch_size)
dataloader_validation = DataLoader(dataset_val,
sampler=SequentialSampler(dataset_val),
batch_size=batch_size)
# + [markdown] Collapsed="false"
# ## Setting Up Optimiser and Scheduler
# + Collapsed="false"
from transformers import AdamW, get_linear_schedule_with_warmup
# + Collapsed="false"
optimizer = AdamW(model.parameters(),
lr=1e-5,
eps=1e-8)
# + Collapsed="false"
epochs = 3
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=0,
num_training_steps=len(dataloader_train)*epochs)
# + [markdown] Collapsed="false"
# ## Defining our Performance Metrics
# + [markdown] Collapsed="false"
# Accuracy metric approach originally used in accuracy function in [this tutorial](https://mccormickml.com/2019/07/22/BERT-fine-tuning/#41-bertforsequenceclassification).
# + Collapsed="false"
import numpy as np
# + Collapsed="false"
from sklearn.metrics import f1_score
# + Collapsed="false"
def f1_score_func(preds, labels):
preds_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return f1_score(labels_flat, preds_flat, average='weighted')
# + Collapsed="false"
def accuracy_per_class(preds, labels):
label_dict_inverse = {v: k for k, v in label_dict.items()}
preds_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
for label in np.unique(labels_flat):
y_preds = preds_flat[labels_flat==label]
y_true = labels_flat[labels_flat==label]
print(f'Class: {label_dict_inverse[label]}')
print(f'Accuracy: {len(y_preds[y_preds==label])}/{len(y_true)}\n')
# + [markdown] Collapsed="false"
# ## Creating our Training Loop
# + [markdown] Collapsed="false"
# Approach adapted from an older version of HuggingFace's `run_glue.py` script. Accessible [here](https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128).
# + Collapsed="false"
import random
seed_val = 17
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# + Collapsed="false"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
print(device)
# + Collapsed="false"
def evaluate(dataloader_val):
model.eval()
loss_val_total = 0
predictions, true_vals = [], []
for batch in dataloader_val:
batch = tuple(b.to(device) for b in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[2],
}
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
logits = outputs[1]
loss_val_total += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = inputs['labels'].cpu().numpy()
predictions.append(logits)
true_vals.append(label_ids)
loss_val_avg = loss_val_total/len(dataloader_val)
predictions = np.concatenate(predictions, axis=0)
true_vals = np.concatenate(true_vals, axis=0)
return loss_val_avg, predictions, true_vals
# + Collapsed="false"
for epoch in tqdm(range(1, epochs+1)):
model.train()
loss_train_total = 0
progress_bar = tqdm(dataloader_train, desc='Epoch {:1d}'.format(epoch), leave=False, disable=False)
for batch in progress_bar:
model.zero_grad()
batch = tuple(b.to(device) for b in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[2],
}
outputs = model(**inputs)
loss = outputs[0]
loss_train_total += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
progress_bar.set_postfix({'training_loss': '{:.3f}'.format(loss.item()/len(batch))})
torch.save(model.state_dict(), f'finetuned_BERT_epoch_{epoch}.model')
tqdm.write(f'\nEpoch {epoch}')
loss_train_avg = loss_train_total/len(dataloader_train)
tqdm.write(f'Training loss: {loss_train_avg}')
val_loss, predictions, true_vals = evaluate(dataloader_validation)
val_f1 = f1_score_func(predictions, true_vals)
tqdm.write(f'Validation loss: {val_loss}')
tqdm.write(f'F1 Score (Weighted): {val_f1}')
# + Collapsed="false"
model = BertForSequenceClassification.from_pretrained("bert-base-uncased",
num_labels=len(label_dict),
output_attentions=False,
output_hidden_states=False)
model.to(device)
# + Collapsed="false"
model.load_state_dict(torch.load('./finetuned_BERT_epoch_2.model', map_location=torch.device('cpu')))
# + Collapsed="false"
_, predictions, true_vals = evaluate(dataloader_validation)
# + Collapsed="false"
accuracy_per_class(predictions, true_vals)
# + Collapsed="false"
Ypred = model.predict(test_data)
|
bert-2020.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (gpr)
# language: python
# name: venv-gpr
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import itertools
import pymc3 as pm
# %matplotlib inline
# +
#Seismic displacement in three-dimensions
#Defining the main parameters of the model
rho0 = 1800 #Density of the homogeneous medium in kg/m^3, as taken from Beker's thesis paper, pg. 84
alpha = 0.31 #Parameter used in Beker's paper to determine primary wave speed pg. 84
beta = 0.25 #Parameter used in Beker's paper to determine primary wave speed pg 84.
nu = 0.25 #Poisson ratio as taken from Beker's thesis paper pg. 84
#Calculating the wave speeds using above paramters
CP = ((rho0/1000) / alpha)**(1.0 / beta) #Calculating primary wave speed using equation from Beker's paper, pg. 84
CS = np.sqrt((1-2*nu)/(2-2*nu)) * CP #Calculating secondary wave speed using equation from Beker's paper, pg. 84
Root = np.roots([1, -8, 8 * ((2 - nu)/(1 - nu)), -8 / (1 - nu)]) #Calculating the the ratio of the R wave speed to the p wave speed squared using equation found in Harm's and Beker's paper, pg. 20 in Beker's paper
for i in Root:
if 0<i<1:
CR = np.sqrt(CS**2 *i) #calculating R wave speed
x2_list = np.linspace(0, 500, 24) #x-values to be evaluated
y2_list = np.linspace(0, 500, 24) #y-values to be evaluated
z2_list = np.array([0]) #z-values to be evaluated
t2_list = np.linspace(2, 4, 50) #time interval to be evaluated over
#Calculating seismic displacement using equation from Harm's paper "Terrestial Gravity Fluctuations", pg. 31
def xi_horiz(x, y, z, t, f, theta, phi):
omega = 2*np.pi*f #calculating the angular frequency
ke = omega / CR #Calculating horizontal wave number of the Rayleigh wave
ke_vec = [np.cos(theta) * ke, np.sin(theta) * ke] #Rayleigh wave vector in x-y plane
ks = omega / CS #Calculatin the secondary wave number of the Rayleigh wave
kp = omega / CP #Calculating the primary wave number of the Rayleigh wave
q_z_s = np.sqrt(ke**2 - ks**2) #Calculating wave parameter used in Harm's model, pg. 31
q_z_p = np.sqrt(ke**2 - kp**2) #Calculating wave parameter used in Harm's model, pg. 31
zeta = np.sqrt(q_z_p / q_z_s) #Calculating wave parameter used in Harm's model, pg. 32
return (ke * np.exp(q_z_p * z) - zeta * np.exp(q_z_s * z)) * np.sin(np.dot(ke_vec, [x,y]) - omega * t + phi)
def xi_vert(x, y, z, t, f, theta, phi):
omega = 2*np.pi*f #calculating the angular frequency
ke = omega / CR #Calculating horizontal wave number of the Rayleigh wave
ke_vec = [np.cos(theta) * ke, np.sin(theta) * ke] #Rayleigh wave vector in x-y plane
ks = omega / CS #Calculatin the secondary wave number of the Rayleigh wave
kp = omega / CP #Calculating the primary wave number of the Rayleigh wave
q_z_s = np.sqrt(ke**2 - ks**2) #Calculating wave parameter used in Harm's model, pg. 31
q_z_p = np.sqrt(ke**2 - kp**2) #Calculating wave parameter used in Harm's model, pg. 31
zeta = np.sqrt(q_z_p / q_z_s) #Calculating wave parameter used in Harm's model, pg. 32
return (q_z_p * np.exp(q_z_p * z) - zeta * ke * np.exp(q_z_s * z)) * np.cos(np.dot(ke_vec, [x,y]) - omega * t + phi)
#Defining displacement vectors to be used in the Newtonian Noise calculation, according to Harm's definition in "Terrestial Gravity Fluctuations" on pg.32
def xi_vect(x, y, z, t, f, theta, phi):
return np.array([np.cos(theta) * xi_horiz(x, y, z, t, f, theta, phi), np.sin(theta) * xi_horiz(x, y, z, t, f, theta, phi), xi_vert(x, y, z, t, f, theta, phi)])
def xi_horiz_vect(x, y, z, t, f, theta, phi):
return np.array([np.cos(theta) * xi_horiz(x, y, z, t, f, theta, phi), np.sin(theta) * xi_horiz(x, y, z, t, f, theta, phi)])
# +
#Newtonian noise from an impulse of a Rayleigh wave in three dimensions
#Defining constants
G = 6.67e-11 #Newton's constant of gravitation
rho0 = 1800 #Density of the medium
x2_list = np.linspace(0, 500, 31) #x-values to be evaluated
y2_list = np.linspace(0, 500, 31) #y-values to be evaluated
z2_list = np.array([0]) #z-values to be evaluated
t2_list = np.linspace(0, 1, 200) #time interval to be evaluated over
V = (500 * 500 * 3) / (len(x2_list) * len(y2_list) * len(z2_list))
#Calculating the Seismic Newtonian Noise contribution of a single point using equation (4.13) from Beker's thesis paper on pg. 92
def seisnn3d(x, y, z, t, f, theta, phi, x0, y0, z0):
r = [x - x0, y - y0, z-z0] #vector from point to test mass
r_mag = np.linalg.norm(r) #magnitude of r vector
r_hat = r / r_mag #unit vector in r direction
xi = xi_vect(x, y, z, t, f, theta, phi)
return G * rho0 * V * (1.0 / r_mag**3) * (xi - 3 * np.dot(r_hat, xi) * r_hat)
grid = itertools.product(x2_list, y2_list, z2_list) #Creating Cartesian product of the lists for each direction
gridlist = list(grid) #Creating list of the Cartesian products
points = len(x2_list) * len(y2_list) * len(z2_list) #Calculatin the number of points in the array
nnlist = np.zeros((1,1,points,3)) #Defining list to place the Newtonian Noise calculation at each point into
#Defining a spiral array to use for the Newtonian Noise calculation
pointlist = np.array([[[0,0,0], [np.pi * np.sqrt(3) / 12, np.pi / 12, 0], [np.pi * np.sqrt(2) / 8, np.pi * np.sqrt(2) / 8, 0], [-np.pi / 3, np.pi * np.sqrt(3) / 3, 0], [-np.pi, 0, 0], [- 5 * np.pi * np.sqrt(2) / 8, -5 * np.pi * np.sqrt(2) / 8, 0], [0, -3 * np.pi / 2, 0], [11 * np.pi * np.sqrt(3)/12, -11*np.pi/12,0], [2*np.pi, 0, 0], [7 * np.pi/6, 7*np.pi*np.sqrt(3)/6,0], [17*np.pi*np.sqrt(3)/12, 17*np.pi/2, 0]]])
#Defining function to calculate total Seismic Newtonian Noise at a single point in time
def seisnn3d_total(t, f, theta, phi, x0, y0, z0):
for i, p in enumerate(pointlist): #Using the points defined in the spiral array for the calculations
x = pointlist[0,i,0] #Referencing the x-coordinate of each Cartesian product in gridlist
y = pointlist[0,i,1] #Referencing the y-coordinate of each Cartesian product in gridlist
z = pointlist[0,i,2] #Referencing the z-coordinate of each Cartesian product in gridlist
if ((-1.0 / np.tan(theta)) * x + (CR * t - (CR / 2)) / np.cos(theta)) <= y <= ((-1.0 / np.tan(theta)) * x + (CR * t) / np.cos(theta)):
nnlist[0,0,i] = seisnn3d(x, y, z, t, f, theta, phi, x0, y0, z0)
else:
nnlist[0,0,i] = np.array([0,0,0])
nntotal = np.sum(nnlist, 2)
nntotal_mag = np.linalg.norm(nntotal)
return nntotal_mag
# +
T_list = np.zeros(len(t2_list))
for i, tn in enumerate(t2_list):
T_list[i] = seisnn3d_total(tn, 2, np.pi/4, 0, 250, 250, 1)
fig = plt.figure(figsize =(20,10))
ax = fig.add_subplot(111, xlabel = 'Time (Sec.)', ylabel = 'Total Seismic NN', title = 'Seismic NN due to Propagating Rayleigh Wave')
ax.plot(t2_list, T_list)
# +
#Newtonian noise from a Rayleigh wave in three dimensions
#Defining constants
G = 6.67e-11 #Newton's constant of gravitation
x2_list = np.linspace(0, 500, 201) #x-values to be evaluated
y2_list = np.linspace(0, 500, 201) #y-values to be evaluated
z2_list = np.array([0]) #z-values to be evaluated
t2_list = np.linspace(3, 5, 50) #time interval to be evaluated over
V = (500 * 500 * 100)/(len(x2_list) * len(y2_list) * len(z2_list))
#Calculating the Seismic Newtonian Noise contribution of a single point using equation (4.13) from Beker's thesis paper on pg. 92
def seisnn3d(x, y, z, t, f, theta, phi, x0, y0, z0):
r = [x - x0, y - y0, z - z0] #vector from point to test mass
r_mag = np.linalg.norm(r) #magnitude of r vector
r_hat = r / r_mag #unit vector in r direction
xi = xi_vect(x, y, z, t, f, theta, phi)
return G * rho0 * V * (1.0 / r_mag**3) * (xi - 3 * np.dot(r_hat, xi) * r_hat)
grid = itertools.product(x2_list, y2_list, z2_list) #Creating Cartesian product of the lists for each direction
gridlist = list(grid) #Creating list of the Cartesian products
points = len(x2_list) * len(y2_list) * len(z2_list) #Calculatin the number of points in the array
nnlist = np.zeros((1,1,points,3)) #Defining list to place the Newtonian Noise calculation at each point into
#Defining a spiral array to use for the Newtonian Noise calculation
pointlist = np.array([[[0,0,0], [np.pi * np.sqrt(3) / 12, np.pi / 12, 0], [np.pi * np.sqrt(2) / 8, np.pi * np.sqrt(2) / 8, 0], [-np.pi / 3, np.pi * np.sqrt(3) / 3, 0], [-np.pi, 0, 0], [- 5 * np.pi * np.sqrt(2) / 8, -5 * np.pi * np.sqrt(2) / 8, 0], [0, -3 * np.pi / 2, 0], [11 * np.pi * np.sqrt(3)/12, -11*np.pi/12,0], [2*np.pi, 0, 0], [7 * np.pi/6, 7*np.pi*np.sqrt(3)/6,0], [17*np.pi*np.sqrt(3)/12, 17*np.pi/2, 0]]])
#Defining function to calculate total Seismic Newtonian Noise at a single point in time
def seisnn3d_total(t, f, theta, phi, x0, y0, z0):
for i, p in enumerate(gridlist):
x = gridlist[i][0] #Referencing the x-coordinate of each Cartesian product in gridlist
y = gridlist[i][1] #Referencing the y-coordinate of each Cartesian product in gridlist
z = gridlist[i][2] #Referencing the z-coordinate of each Cartesian product in gridlist
nnlist[0,0,i] = seisnn3d(x, y, z, t, f, theta, phi, x0, y0, z0)
nntotal = np.sum(nnlist, 2)
nntotal_mag = np.linalg.norm(nntotal)
return nntotal_mag
# +
T_list = np.zeros(len(t2_list))
for i, tn in enumerate(t2_list):
T_list[i] = seisnn3d_total(tn, 2, np.pi/4, 0, 250,250,1)
fig = plt.figure(figsize =(20,10))
ax = fig.add_subplot(111, xlabel = 'Time (Sec.)', ylabel = 'Total Seismic NN', title = 'Seismic NN due to Propagating Rayleigh Wave')
ax.plot(t2_list, T_list)
# -
np.sum(T_list) / len(T_list)
np.sum(T_list) / len(T_list)
|
notebooks/Seismic Newtonian Noise 3D.ipynb
|
% -*- coding: utf-8 -*-
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Octave
% language: octave
% name: octave
% ---
% # SPM12 scripting in a Jupyter notebook with an Octave kernel
% This is a [Jupyter notebook](https://jupyter.org/) running an [Octave kernel](https://github.com/Calysto/octave_kernel) on [Binder](https://mybinder.org/), which allows cloud-based computation using SPM12 and Matlab scripts. It is therefore a reproducible environment for fMRI analysis using [SPM12](https://www.fil.ion.ucl.ac.uk/spm/software/download/). Anyone with internet access can run this same code on the same data and should get the same results. It is not only a useful tool for tutorials and sharing work with collaborators, but specifically also as a means to share the data and methods underlying your scientific results.
%
% For more information on how this service was set up, see [this blog post]().
%
% The code below is based on a tutorial I previously shared in a [blog post](https://jsheunis.github.io/2018-06-28-spm12-matlab-scripting-tutorial-3/), and then later extended to a [reproducible environment with Code Ocean](https://jsheunis.github.io/2018-10-31-reproducible-fmri-codeocean/). This notebook achieves more or less the same using Binder, which is free for use and open source.
%
% The instructions and code below will run through the process of performing several standard preprocessing and statistical analysis steps on a task-fMRI dataset using Matlab-based scripts and SPM12. We’re using an open and [freely available dataset](https://openneuro.org/datasets/ds000157/versions/00001) from [OpenNeuro](https://openneuro.org/), which includes functional and anatomical data for multiple subjects that took part in a “block design food and nonfood picture viewing task” (many thanks to the researchers for sharing their study data online!).
%
% **Disclaimer**: these scripts (excluding external libraries) are for illustrative/tutorial purposes only. They have not been rigorously peer-reviewed and could contain errors. They also do not necessaroly include all steps that would typically form part of a robust analysis pipeline. Please use your own discretion.
%
%
% ## Data and experimental design
%
% The data contains anatomical and functional images for multiple subjects from a task-based experiment about food temptation. More details can be found in the article (available through PubMed [here](https://www.ncbi.nlm.nih.gov/pubmed/23578759)), but the important thing for our purposes is that the experiment was a task-based design alternating between blocks of visual stimulus (photos of food and other objects shown to the subjects) and rest periods (where nothing was shown except for a small fixation cursor). Such a design allows us to apply statistical processes to the time series data (after appropriate preprocessing) in order to determine which voxels in the brain are more likely to be involved in visual processing. It is well known that the visual cortex is located at the posterior part of the human brain, so it will therefore be easy for us to know if our code fails miserably or whether it does what we expect it to do.
% ## STEP 1: Initialize directory structure for data inputs and outputs
% If you are reading this on Binder, the necessary data and scripts would already have been downloaded to your compute environment hosted in the cloud. This includes the `ds000157-download` directory downloaded from OpenNeuro, which contains a single subject's data. It also includes Matlab scripts and libraries used below, such as directories `scripts`, `spm12`, and `dicm2nii`.
%
% Run the code below in order to extract the required image files, move them to the correct locations, setup the desired directory structure, and initialize required variables.
% Preprocessed and analysed data will be in "results/" folder
data_dir = fullfile(getenv('HOME'),'ds000157-download');
spm_dir = fullfile(getenv('HOME'),'spm12');
results_dir = fullfile(getenv('HOME'),'results');
stats_dir = [results_dir filesep 'stats'];
processing_dir = [results_dir filesep 'preproc_data'];
if ~exist(stats_dir,'dir')
mkdir(stats_dir)
end
if ~exist(processing_dir,'dir')
mkdir(processing_dir)
end
% Initialize subject data; extract; copy to preprocessing folder
sub = '01';
s_raw_fn = fullfile(data_dir, ['sub-' sub], 'anat', ['sub-' sub '_T1w.nii.gz']);
if exist(s_raw_fn, 'file')
out_fns = gunzip(s_raw_fn);
s_fn = out_fns{1};
else
s_fn = strrep(s_raw_fn, '.gz', '');
end
f_raw_fn = fullfile(data_dir, ['sub-' sub], 'func', ['sub-' sub '_task-passiveimageviewing_bold.nii.gz']);
if exist(f_raw_fn, 'file')
out_fns = gunzip(f_raw_fn);
f_fn = out_fns{1};
else
f_fn = strrep(f_raw_fn, '.gz', '');
end
% Create preprocessing subfolders
anat_dir = fullfile(processing_dir, ['sub-' sub], 'anat');
func_dir = fullfile(processing_dir, ['sub-' sub], 'func');
if ~exist(anat_dir, 'dir')
mkdir(anat_dir)
end
if ~exist(func_dir, 'dir')
mkdir(func_dir)
end
% Copy extracted files to preprocessing subfolders
anat_fn = fullfile(processing_dir, ['sub-' sub], 'anat', ['sub-' sub '_T1w.nii']);
func_fn = fullfile(processing_dir, ['sub-' sub], 'func', ['sub-' sub '_task-passiveimageviewing_bold.nii']);
if ~exist(anat_fn, 'file')
copyfile(s_fn, anat_dir)
end
if ~exist(func_fn, 'file')
copyfile(f_fn, func_dir)
end
% Initialize processing variables
fwhm = 6; % mm
% ## STEP 2: Quick inspection of data
%
% Let's load the anatomical data (using a derived form of `nii_viewer` from `dicm2nii`) to display the brain in three dimensions (saggital, coronal, axial).
% + magic_args="QUICK INSPECTION OF ANATOMICAL DATA"
[p_anat, frm1, rg1, dim1] = fmrwhy_util_readNifti(anat_fn);
anat_3Dimg = p_anat.nii.img;
[Ni, Nj, Nk] = size(anat_3Dimg);
subplot(131); imagesc(rot90(squeeze(anat_3Dimg(round(Ni/2),:,:)))); colormap gray; axis image
subplot(132); imagesc(rot90(squeeze(anat_3Dimg(:,round(Nj/2),:)))); colormap gray; axis image
subplot(133); imagesc(rot90(squeeze(anat_3Dimg(:,:,round(Nk/2))))); colormap gray; axis image
% -
% Let's load the function data (using a derived form of `nii_viewer` from `dicm2nii`) to display the brain in three dimensions (saggital, coronal, axial).
% + pycharm={"name": "#%%\n"} magic_args="QUICK INSPECTION OF FUNCTIONAL DATA"
[p_func, frm1, rg1, dim1] = fmrwhy_util_readNifti(func_fn);
func_4Dimg = p_func.nii.img;
[Ni, Nj, Nk, Nt] = size(func_4Dimg);
subplot(131); imagesc(rot90(squeeze(func_4Dimg(round(Ni/2),:,:,1)))); colormap gray; axis image
subplot(132); imagesc(rot90(squeeze(func_4Dimg(:,round(Nj/2),:,1)))); colormap gray; axis image
subplot(133); imagesc(rot90(squeeze(func_4Dimg(:,:,round(Nk/2),1)))); colormap gray; axis image
% -
% ## STEP 3: Preprocessing
%
% Preprocessing the data starts by calling `spm_standardPreproc_jsh` (located in the `scripts` directory). This function includes steps for:
%
% 1. Realigning all functional image volumes to the first functional image volume in the timeseries.
% 2. Coregistering the anatomical image to the first functional image volume in the timeseries.
% 3. Segmenting the coregistered anatomical image into tissue types (grey matter, white matter, cerebrospinal fluid).
% 4. Reslicing the coregistered anatomical image and all segmented tissue type images to the same resolution as the first functional image volume in the timeseries.
% 5. Smoothing the realigned functional timeseries data with a specified Gaussian kernel size.
% 6. Returning all results to a preproc_data structure.
%
% This can take some time to run, perhaps around 15 minutes. Grab yourself a coffee...
% Preprocess structural and functional images (if not already)
% Calls function `spm_standardPreproc_jsh`
[d, f, e] = fileparts(anat_fn);
[d1, f1, e1] = fileparts(func_fn);
if exist([d filesep 'rc1' f e], 'file')
disp('...preproc already done, saving variables...')
preproc_data = struct;
% Structural filenames
preproc_data.forward_transformation = [d filesep 'y_' f e];
preproc_data.inverse_transformation = [d filesep 'iy_' f e];
preproc_data.gm_fn = [d filesep 'c1' f e];
preproc_data.wm_fn = [d filesep 'c2' f e];
preproc_data.csf_fn = [d filesep 'c3' f e];
preproc_data.bone_fn = [d filesep 'c4' f e];
preproc_data.soft_fn = [d filesep 'c5' f e];
preproc_data.air_fn = [d filesep 'c6' f e];
preproc_data.rstructural_fn = [d filesep 'r' f e];
preproc_data.rgm_fn = [d filesep 'rc1' f e];
preproc_data.rwm_fn = [d filesep 'rc2' f e];
preproc_data.rcsf_fn = [d filesep 'rc3' f e];
preproc_data.rbone_fn = [d filesep 'rc4' f e];
preproc_data.rsoft_fn = [d filesep 'rc5' f e];
preproc_data.rair_fn = [d filesep 'rc6' f e];
% Functional filenames
preproc_data.rfunctional_fn = [d1 filesep 'r' f1 e1];
preproc_data.srfunctional_fn = [d1 filesep 'sr' f1 e1];
preproc_data.mp_fn = [d1 filesep 'rp_' f1 '.txt'];
preproc_data.MP = load(preproc_data.mp_fn);
else
disp('...running preprocessing batch jobs...')
preproc_data = spm_standardPreproc_jsh(func_fn, anat_fn, fwhm, spm_dir);
end
% ## STEP 4: Create 1st level statistical design
% Here we create the first level statistical design to analyse the preprocessed data. This is done with `spm_specify1stlevel_jsh`.
%
% We set the timing units as seconds because that is how we want to specify the task stimulus onsets and durations (the other unit option is ‘scans’). The repetition time is set to 1.6 seconds, as specified in the `task-passiveimageviewing_bold.json` file supplied together with the dataset. The task stimulus onsets and durations are given in the `sub-01_task-passiveimageviewing_events.tsv` file supplied with the `sub-01` data. The `.tsv` file also gives the type of stimulus, either food or non-food pictures. For this exercise, we are not interested in the differences in brain responses between food and non-food picture viewing, rather we just want to see which voxels in the brain are likely involved in any type of picture viewing experience vs only looking at a black screen with a cursor. This is why we specified vectors of all onset times and durations (except for the break period) for the respective parameters. Finally we also supply the text file with the movement parameters (`preproc_data.mp_fn`; resulting from the 6 DOF rigid body transformations during the realignment step) to be included as regressors in the design matrix.
% Set up statistical design parameters, based on task data
sess_params = struct;
sess_params.timing_units = 'secs';
sess_params.timing_RT = 1.6;
sess_params.cond_name = 'Pictures';
sess_params.cond_onset = [0; 40.1; 77.2; 111.3; 143.3; 179.4; 218.5; 251.5; 299.6; 334.7; 374.8; 411.9; 445.9; 478.0; 514.1; 553.2];
sess_params.cond_duration = [24.1000; 24.06; 24.07; 24.06; 24.06; 24.07; 24.04; 24.06; 24.07; 24.10; 24.06; 24.06; 24.09; 24.09; 24.06; 24.07];
% Call script to set up design
spm_specify1stlevel_jsh(stats_dir, preproc_data.srfunctional_fn, preproc_data.mp_fn, sess_params)
% Display/explore design matrix
load([stats_dir filesep 'SPM.mat']);
spm_DesRep('fMRIDesMtx',SPM,1,1)
saveas(gcf, [processing_dir filesep 'des_mat.png'])
% ## STEP 5: Estimate the model
% Estimating the model fit, i.e. running the general linear model with the specified design. This is done with `spm_estimateModel_jsh`.
spm_estimateModel_jsh(stats_dir)
% ## STEP 6: Setup the task contrast
% Here we create the task contrast used as input for the statistical testing. This is done with `spm_setupTaskContrast_jsh`.
[Ntt, Nregr] = size(SPM.xX.X);
contrast_params = struct;
contrast_params.weights = zeros(1, Nregr);
contrast_params.weights(1) = 1;
contrast_params.name = 'Picture viewing';
spm_setupTaskContrast_jsh(stats_dir, contrast_params)
% ## STEP 7: Generate results
% Finally, we apply statistical tests and some correction factors to generate a 3D map of thresholded t-values (resulting from t-tests), which indicate voxels that are likely to be involved in the task. This is done with ``
% `spm_runResults_jsh`.
spm_runResults_jsh(stats_dir)
% ## STEP 8: Explore and visualise results
%
% This part is still to be expanded...
|
spm12_scripting_tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Import Necessary Libraries
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import pickle
# #### Import Saved data
# +
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)
# -
# #### Normalize data
X = X/255.0
# #### Build Model
#
# #### The basic CNN structure is as follows: Convolution -> Pooling -> Convolution -> Pooling -> Fully Connected Layer -> Output
# +
model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# -
# #### Fit model, set batch size, set preffered number of epochs and split validation
model.fit(X, y, batch_size=32, epochs=3, validation_split=0.1)
# #### Higher epochs and lower validation split increases the accuracy of the result
|
ApplyingCNN_onpepperbell_documentation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/thimotyb/real-world-machine-learning/blob/python3/Forest%2C_Features_and_LightGBM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="MJAripWzDzMo"
# # Dimostrazione di valutazione dell'importanza delle feature selezionate nella foresta
# ## Confronto tra classificazione ad albero singolo, random forest e LightGBM
#
# + [markdown] colab_type="text" id="SjPAEh6tEjU4"
# Import SciKit Tree Library and LightGBM Library
# + colab={} colab_type="code" id="K-AAeVxODnbw"
from sklearn import tree
import pandas
import numpy as np
import matplotlib.pyplot as plt
import lightgbm as lgb # Microsoft lightGBM install with: conda install -c conda-forge lightgbm
# + [markdown] colab_type="text" id="ZiGYOJy_EoKI"
# My functions to do OHE and do feature engineering on Titanic
# + colab={} colab_type="code" id="PMGlN204Dv3l"
def cat_to_num(data):
categories = np.unique(data)
features = {}
for cat in categories:
binary = (data == cat)
features["%s=%s" % (data.name, cat)] = binary.astype("int")
return pandas.DataFrame(features)
def prepare_data(data):
"""Takes a dataframe of raw data and returns ML model features
"""
# Initially, we build a model only on the available numerical values
features = data.drop(["PassengerId", "Survived", "Fare", "Name", "Sex", "Ticket", "Cabin", "Embarked"], axis=1)
# Setting missing age values to -1
features["Age"] = data["Age"].fillna(-1)
# Adding the sqrt of the fare feature
features["sqrt_Fare"] = np.sqrt(data["Fare"])
# Adding gender categorical value
features = features.join( cat_to_num(data['Sex']) )
# Adding Embarked categorical value
features = features.join( cat_to_num(data['Embarked'].fillna("")) )
return features
# + [markdown] colab_type="text" id="ppqQdGe8EfmA"
# Load, split, prepare data
# + colab={} colab_type="code" id="E53EOtVxEGRD"
data = pandas.read_csv("https://raw.githubusercontent.com/thimotyb/real-world-machine-learning/master/data/titanic.csv")
data_train = data[:int(0.8*len(data))]
data_test = data[int(0.8*len(data)):]
features = prepare_data(data_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 419} colab_type="code" id="AFcvuajTyCl9" outputId="f1073fc9-3211-464c-bff2-2652198a0683"
features
# + [markdown] colab_type="text" id="2tPeUXNcEuBz"
# ## First Model: Using a Single Tree Classifier
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="7IfusBm2ELLH" outputId="849db573-8bea-4f9c-a232-2665e6c8f188"
model = tree.DecisionTreeClassifier(max_depth = 4)
model.fit(features, data_train["Survived"])
print(model.score(prepare_data(data_train), data_train["Survived"]))
model.score(prepare_data(data_test), data_test["Survived"])
# + [markdown] colab_type="text" id="54pptlrfE_eI"
# ## Second Model: Using a Random Forest
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="1AylbtlzE3Ct" outputId="7f3a6806-e21a-46bc-89b2-8b3968a91d5d"
from sklearn import ensemble # RF is an ensemble category of model
forest = ensemble.RandomForestClassifier(n_estimators=5000, bootstrap=True, max_features=0.5)
forest.fit(features, data_train["Survived"])
print(forest.score(prepare_data(data_train), data_train["Survived"]))
forest.score(prepare_data(data_test), data_test["Survived"])
# + [markdown] colab_type="text" id="xRz1Aql03JTM"
# Si possono anche richiedere le predizioni in formato probabilistico, per settare la soglia di conversione del falso/vero positivo
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" id="Nz4OlkU721Yt" outputId="d989d8c2-83a6-4932-9677-1fd8497abf60"
forest_predict_proba=forest.predict_proba(prepare_data(data_test))
forest_predict_proba[:20]
# + [markdown] colab_type="text" id="7BUefXwXGUL0"
# Feature importance in the Random Forest
# + colab={} colab_type="code" id="8Le6pdE7KbmA"
# Elenca e visualizza il ranking delle feature usate nelal foresta
def show_features(forest, X, y):
importances = forest.feature_importances_
# Calcola deviazione standard per plottare errore
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %s (%f)" % (indices[f], features.columns[indices[f]], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 485} colab_type="code" id="X2l-QRi8FZYA" outputId="c7feeb2c-664a-4bb1-8885-87f9052ec66d"
show_features(forest, features, data_train["Survived"])
# + [markdown] colab_type="text" id="SihiKhcJKLJ5"
# Mostra gli Stimatori che sono stati usati per comporre la foresta
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="cIawiZT8GvqT" outputId="e80f42d5-c83f-459f-f39e-277659d69f23"
len(forest.estimators_) # Questo coincide con il numero di stimatori prodotti per popolare la foresta
# + [markdown] colab_type="text" id="Z3LOY4g7LDU2"
# ## Classificazione con una foresta più grande
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="pkpLx3p5JUcT" outputId="3cd1ea71-1008-45b7-aa37-48bf69071f99"
extraforest = ensemble.ExtraTreesClassifier(n_estimators=100, bootstrap=True)
extraforest.fit(features, data_train["Survived"])
print(extraforest.score(prepare_data(data_train), data_train["Survived"]))
extraforest.score(prepare_data(data_test), data_test["Survived"])
# + colab={"base_uri": "https://localhost:8080/", "height": 485} colab_type="code" id="rbNh7MfOLUAZ" outputId="1d85c359-1684-4ec1-90ac-ef8ddfd5d754"
show_features(extraforest, features, data_train["Survived"])
# + [markdown] colab_type="text" id="b2iJj-PsLxxa"
# ## Classificazione con LightGBM
# + colab={} colab_type="code" id="nB4wkDzsLqnJ"
d_train = lgb.Dataset(features, label=data_train["Survived"])
params = {}
params['learning_rate'] = 0.003
params['boosting_type'] = 'gbdt'
params['objective'] = 'binary'
params['metric'] = 'binary_logloss'
params['sub_feature'] = 0.5
params['num_leaves'] = 10
params['min_data'] = 50
params['max_depth'] = 10
clf = lgb.train(params, d_train, 100)
# + colab={} colab_type="code" id="fYfMgukFyLcy"
#Prediction
y_pred=clf.predict(prepare_data(data_test))
pred_proba=clf.predict(prepare_data(data_test))
#convert into binary values
threshold = .45 # setting threshold
for i in range(0,len(y_pred)):
if y_pred[i]>=threshold:
y_pred[i]=1
else:
y_pred[i]=0
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="WFuQW6ORzOoG" outputId="0839f390-9683-42ab-bfad-098a3700e776"
data_test["Survived"]
y_pred
# + [markdown] colab_type="text" id="zSLF5XeV1iPU"
# By definition a confusion matrix is such that Ci,j is equal to the number of observations known to be in group i but predicted to be in group j.
#
# Thus in binary classification, the count of true negatives is C0,0 , false negatives is C1,0, true positives is C1,1 and false positives is C0,1.
# + colab={} colab_type="code" id="mlhQ7rnlykl1"
#Confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(data_test["Survived"], y_pred)
#Accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_pred,data_test["Survived"])
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="T7ODroB5y2iT" outputId="c37f0607-23aa-48e6-dc23-ab99776f418f"
print("Confusion Matrix:")
print(cm)
print("Model Accuracy: {0}".format(accuracy))
# + [markdown] colab_type="text" id="cekK0P1Bj8RG"
# ## SHAP Explainer per interpretare i dati dell'ensemble LightGBM
#
# [link text](https://github.com/slundberg/shap)
# + colab={"base_uri": "https://localhost:8080/", "height": 340} colab_type="code" id="KkEpaduDz75P" outputId="2b6a492b-6340-462a-cafc-2154132b43f4"
# !pip install --upgrade shap
import shap
# + colab={} colab_type="code" id="I0i4IiPolg7r"
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="vOCqvRQbkJ6p" outputId="af86f9ea-b6f4-4a20-8966-5fd3677981be"
explainer = shap.TreeExplainer(clf)
prepared_test = prepare_data(data_test)
shap_values = explainer.shap_values(prepared_test)
# + [markdown] colab_type="text" id="NorAayMdk3p8"
# ## Visualize a single prediction
#
# + colab={"base_uri": "https://localhost:8080/", "height": 482} colab_type="code" id="rFuRTP2Skg-9" outputId="19869dc6-15db-4ad5-dc71-d4e257a08d28"
# print the JS visualization code to the notebook
shap.initjs()
observation = 18 # Plotta il diagramma di forza dell'osservazione 0 del data set di training
print("Name: {0}".format(data_test.loc[data_test.index[observation],'Name']))
print("Survived: {0}".format(data_test.loc[data_test.index[observation],'Survived']))
print("Predicted: {0}".format(y_pred[observation]))
print("Predicted Proba: {0}".format(pred_proba[observation]))
print("SHAP Score: {0}".format(explainer.expected_value[1]))
print(prepared_test.iloc[observation,:])
shap.force_plot(explainer.expected_value[1], shap_values[1][observation,:], prepared_test.iloc[observation,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 412} colab_type="code" id="yoshowkzk5KQ" outputId="8f812b61-7a71-426a-a072-88f8ec4c1925"
# visualize the training set predictions
shap.initjs()
shap.force_plot(explainer.expected_value[1], shap_values[1][:1000,:], prepared_test.iloc[:1000,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 386} colab_type="code" id="vdfJCJEp_clZ" outputId="8d95f211-86e4-4689-eddc-bb003e61109c"
shap.summary_plot(shap_values, prepared_test)
# + colab={} colab_type="code" id="s6PhB6SjAH4o"
|
Forest,_Features_and_LightGBM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sensitive Data Detection with the Labeler
# In this example, we utilize the Labeler component of the Data Profiler to detect the sensitive information for both structured and unstructured data. In addition, we show how to train the Labeler on some specific dataset with different list of entities.
#
# First, let's dive into what the Labeler is.
# ## What is the Labeler
# The Labeler is a pipeline designed to make building, training, and predictions with ML models quick and easy. There are 3 major components to the Labeler: the preprocessor, the model, and the postprocessor.
# 
# Each component can be switched out individually to suit your needs. As you might expect, the preprocessor takes in raw data and prepares it for the model, the model performs the prediction or training, and the postprocessor takes prediction results and turns them into human-readable results.
#
# Now let's run some examples. Start by importing all the requirements.
import os
import sys
import json
import pandas as pd
sys.path.insert(0, '..')
import dataprofiler as dp
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# ## Structured Data Prediction
# We'll use the aws honeypot dataset in the test folder for this example. First, look at the data using the Data Reader class of the Data Profiler. This dataset is from the US department of educations, [found here!](https://data.ed.gov/dataset/college-scorecard-all-data-files-through-6-2020/resources?resource=823ac095-bdfc-41b0-b508-4e8fc3110082)
data = dp.Data("../dataprofiler/tests/data/csv/SchoolDataSmall.csv")
df_data = data.data
df_data.head()
# We can directly predict the labels of a structured dataset on the cell level.
# +
labeler = dp.DataLabeler(labeler_type='structured')
# print out the labels and label mapping
print("Labels: {}".format(labeler.labels))
print("\n")
print("Label Mapping: {}".format(labeler.label_mapping))
print("\n")
# make predictions and get labels for each cell going row by row
# predict options are model dependent and the default model can show prediction confidences
predictions = labeler.predict(data, predict_options={"show_confidences": True})
# display prediction results
print("Predictions: {}".format(predictions['pred']))
print("\n")
# display confidence results
print("Confidences: {}".format(predictions['conf']))
# -
# The profiler uses the Labeler to perform column by column predictions. The data contains 11 columns, each of which has data label. Next, we will use the Labeler of the Data Profiler to predict the label for each column in this tabular dataset. Since we are only going to demo the labeling functionality, other options of the Data Profiler are disabled to keep this quick.
# +
# set options to only run the labeler
profile_options = dp.ProfilerOptions()
profile_options.set({"structured_options.text.is_enabled": False,
"int.is_enabled": False,
"float.is_enabled": False,
"order.is_enabled": False,
"category.is_enabled": False,
"datetime.is_enabled": False,})
profile = dp.Profiler(data, options=profile_options)
# get the prediction from the data profiler
def get_structured_results(results):
columns = []
predictions = []
for col_report in results['data_stats']:
columns.append(col_report['column_name'])
predictions.append(col_report['data_label'])
df_results = pd.DataFrame({'Column': columns, 'Prediction': predictions})
return df_results
results = profile.report()
print(get_structured_results(results))
# -
# In this example, the results show that the Data Profiler is able to detect integers, URLs, address, and floats appropriately. Unknown is typically strings of text, which is appropriate for those columns.
# ## Unstructured Data Prediction
# Besides structured data, the Labeler detects the sensitive information on the unstructured text. We use a sample of spam email in Enron email dataset for this demo. As above, we start investigating the content of the given email sample.
# +
# load data
data = "Message-ID: <11111111.1111111111111.<EMAIL>>\n" + \
"Date: Fri, 10 Aug 2005 11:31:37 -0700 (PDT)\n" + \
"From: w..<EMAIL>\n" + \
"To: <EMAIL>\n" + \
"Subject: RE: ABC\n" + \
"Mime-Version: 1.0\n" + \
"Content-Type: text/plain; charset=us-ascii\n" + \
"Content-Transfer-Encoding: 7bit\n" + \
"X-From: Smith, <NAME>. </O=ENRON/OU=NA/CN=RECIPIENTS/CN=SSMITH>\n" + \
"X-To: <NAME> </O=ENRON/OU=NA/CN=RECIPIENTS/CN=JSMITH>\n" + \
"X-cc: \n" + \
"X-bcc: \n" + \
"X-Folder: \SSMITH (Non-Privileged)\Sent Items\n" + \
"X-Origin: Smith-S\n" + \
"X-FileName: SSMITH (Non-Privileged).pst\n\n" + \
"All I ever saw was the e-mail from the office.\n\n" + \
"Mary\n\n" + \
"-----Original Message-----\n" + \
"From: <NAME> \n" + \
"Sent: Friday, August 10, 2005 13:07 PM\n" + \
"To: Smith, <NAME>.\n" + \
"Subject: ABC\n\n" + \
"Have you heard any more regarding the ABC sale? I guess that means that " + \
"it's no big deal here, but you think they would have send something.\n\n\n" + \
"<NAME>\n" + \
"123-456-7890\n"
# convert string data to list to feed into the labeler
data = [data]
# -
# By default, the Labeler predicts the results at the character level for unstructured text.
# +
labeler = dp.DataLabeler(labeler_type='unstructured')
# make predictions and get labels per character
predictions = labeler.predict(data)
# display results
print(predictions['pred'])
# -
# In addition to the character-level result, the Labeler provides the results at the word level following the standard NER (Named Entity Recognition), e.g., utilized by spaCy.
# +
# convert prediction to word format and ner format
# Set the output to the NER format (start position, end position, label)
labeler.set_params(
{ 'postprocessor': { 'output_format':'ner', 'use_word_level_argmax':True } }
)
# make predictions and get labels per character
predictions = labeler.predict(data)
# display results
print('\n')
print('=======================Prediction======================\n')
for pred in predictions['pred'][0]:
print('{}: {}'.format(data[0][pred[0]: pred[1]], pred[2]))
print('--------------------------------------------------------')
# -
# Here, the Labeler is able to identify sensitive information such as datetime, email address, person names, and phone number in an email sample.
# ## Train the Labeler from Scratch
# The Labeler can be trained from scratch with a new list of labels. Below, we show an example of training the Labeler on a dataset with labels given as the columns of that dataset. For brevity's sake, let's only train a few epochs with a subset of a dataset.
# +
data = dp.Data("../dataprofiler/tests/data/csv/SchoolDataSmall.csv")
df = data.data[["OPEID6", "INSTURL", "SEARCH_STRING"]]
df.head()
# split data to training and test set
split_ratio = 0.2
df = df.sample(frac=1).reset_index(drop=True)
data_train = df[:int((1 - split_ratio) * len(df))]
data_test = df[int((1 - split_ratio) * len(df)):]
# train a new labeler with column names as labels
if not os.path.exists('data_labeler_saved'):
os.makedirs('data_labeler_saved')
labeler = dp.train_structured_labeler(
data=data_train,
save_dirpath="data_labeler_saved",
epochs=10,
default_label="OPEID6"
)
# -
# The trained Labeler is then used by the Data Profiler to provide the prediction on the new dataset.
# +
# predict with the labeler object
profile_options.set({'structured_options.data_labeler.data_labeler_object': labeler})
profile = dp.Profiler(data_test, options=profile_options)
# get the prediction from the data profiler
results = profile.report()
print(get_structured_results(results))
# -
# Another way to use the trained Labeler is through the directory path of the saved labeler.
# +
# predict with the labeler loaded from path
profile_options.set({'structured_options.data_labeler.data_labeler_dirpath': 'data_labeler_saved'})
profile = dp.Profiler(data_test, options=profile_options)
# get the prediction from the data profiler
results = profile.report()
print(get_structured_results(results))
# -
# ## Transfer Learning a Labeler
# Instead of training a model from scratch, we can also transfer learn to improve the model and/or extend the labels. Again for brevity's sake, let's only train a few epochs with a small dataset at the cost of accuracy.
# +
data = dp.Data("../dataprofiler/tests/data/csv/SchoolDataSmall.csv")
df_data = data.data[["OPEID6", "INSTURL", "SEARCH_STRING"]]
# prep data
df_data = df_data.reset_index(drop=True).melt()
df_data.columns = [1, 0] # labels=1, values=0 in that order
df_data = df_data.astype(str)
new_labels = df_data[1].unique().tolist()
# load structured Labeler w/ trainable set to True
labeler = dp.DataLabeler(labeler_type='structured', trainable=True)
# Reconstruct the model to add each new label
for label in new_labels:
labeler.add_label(label)
# this will use transfer learning to retrain the labeler on your new
# dataset and labels.
# Setting labels with a list of labels or label mapping will overwrite the existing labels with new ones
# Setting the reset_weights parameter to false allows transfer learning to occur
model_results = labeler.fit(x=df_data[0], y=df_data[1], validation_split=0.2,
epochs=10, labels=None, reset_weights=False)
# -
# Let's display the training results of the last epoch:
print("{:16s} Precision Recall F1-score Support".format(""))
for item in model_results[-1][2]:
print("{:16s} {:4.3f} {:4.3f} {:4.3f} {:7.0f}".format(item,
model_results[-1][2][item]["precision"],
model_results[-1][2][item]["recall"],
model_results[-1][2][item]["f1-score"],
model_results[-1][2][item]["support"]))
# It is now trained to detect additional labels! The model results here show all the labels training accuracy. Since only new labels existed in the dataset, only the new labels are given accuracy scores. Keep in mind this is a small dataset for brevity's sake and that real training would involve more samples and better results.
# ## Saving and Loading a Labeler
# The Labeler can easily be saved or loaded with one simple line.
# +
# Ensure save directory exists
if not os.path.exists('my_labeler'):
os.makedirs('my_labeler')
# Saving the labeler
labeler.save_to_disk("my_labeler")
# Loading the labeler
labeler = dp.DataLabeler(labeler_type='structured', dirpath="my_labeler")
# -
# ## Building a Labeler from the Ground Up
# As mentioned earlier, the labeler is comprised of three components, and each of the compenents can be created and interchanged in the the labeler pipeline.
# +
import random
from dataprofiler.labelers.character_level_cnn_model import \
CharacterLevelCnnModel
from dataprofiler.labelers.data_processing import \
StructCharPreprocessor, StructCharPostprocessor
model = CharacterLevelCnnModel({"PAD":0, "UNKNOWN":1, "Test_Label":2})
preprocessor = StructCharPreprocessor()
postprocessor = StructCharPostprocessor()
labeler = dp.DataLabeler(labeler_type='structured')
labeler.set_preprocessor(preprocessor)
labeler.set_model(model)
labeler.set_postprocessor(postprocessor)
# check for basic compatibility between the processors and the model
labeler.check_pipeline()
# Optionally set the parameters
parameters={
'preprocessor':{
'max_length': 100,
},
'model':{
'max_length': 100,
},
'postprocessor':{
'random_state': random.Random(1)
}
}
labeler.set_params(parameters)
labeler.help()
# -
# The components can each be created if you inherit the BaseModel and BaseProcessor for the model and processors, respectively. More info can be found about coding your own components in the Labeler section of the [documentation]( https://capitalone.github.io/dataprofiler). In summary, the Data Profiler open source library can be used to scan sensitive information in both structured and unstructured data with different file types. It supports multiple input formats and output formats at word and character levels. Users can also train the labeler on their own datasets.
|
examples/labeler.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Word sets by accents
#
# We make some classes of words, defined by the accents they contain, and save them as sets, to be used in queries.
import re
from tf.app import use
from tf.lib import writeSets
A = use("bhsa:clone", hoist=globals())
# We define the accents and create a regular expression out of them.
A_ACCENTS = set("04 24 33 63 70 71 72 73 74 93 94".split())
A_PAT = "|".join(A_ACCENTS)
A_RE = re.compile(f"(?:{A_PAT})")
A_RE
# We make two sets of words: words that contain one or more accents in `A_ACCENTS` and words that don't.
#
# The first set we call `word_a` and the other set `word_non_a`.
#
# We go through all words of the whole corpus.
# +
wordA = set()
wordNonA = set()
A.indent(reset=True)
A.info("Classifying words")
for w in F.otype.s("word"):
translit = F.g_word.v(w)
if A_RE.search(translit):
wordA.add(w)
else:
wordNonA.add(w)
A.info(f"word_a has {len(wordA):>6} members")
A.info(f"word_non_a has {len(wordNonA):>6} members")
# -
# Collect the sets in a dictionary that assigns names to them:
accents = dict(
word_a=wordA,
word_non_a=wordNonA,
)
# Test the set in a query:
query = """
book book=Genesis
word_a
g_cons~^(?![KL]$)
trailer~[^&]
"""
results = A.search(query, sets=accents)
A.table(results, end=5)
A.table(results, end=5, fmt="text-trans-full")
query = """
book book=Genesis
word_non_a
g_cons~^(?![KL]$)
trailer~[^&]
"""
results = A.search(query, sets=accents)
A.table(results, end=5)
A.table(results, end=5, fmt="text-trans-full")
# Now save the sets as a TF file in your Downloads folder (if you want it in an other place,
# tweak the variable `SET_DIR` below.
#
# We use the TF helper function
# [`writeSets`](https://annotation.github.io/text-fabric/tf/lib.html#tf.lib.writeSets)
# to do the work.
# +
SET_DIR = "~/Downloads"
writeSets(accents, f"{SET_DIR}/accents")
# -
# Check:
# !ls -l ~/Downloads/accents
# Now you can use this set in the text-fabric browser by saying:
#
# ```sh
# text-fabric bhsa --sets=~/Downloads/accents
# ```
# 
|
tutorial/cookbook/accents.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/siriMallika/3D-Object-Reconstruction-from-Multi-View-Monocular-RGB-images/blob/master/Day_5_python_by_Kru_Lookkaew.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="FAcomD0hew-c"
# https://www.coursehero.com/file/48638801/2110101-Midterm-Sample411115062170533624pdf/
# + [markdown] id="doifmu4cep25"
# 1. ถ้าให้คำสั่ง a = 123.4 ทำงาน ตามด้วยให้ a = "123.4" ทำงาน แล้วตัวแปร a เก็บข้อมูลประเภทใด
# + colab={"base_uri": "https://localhost:8080/"} id="bJMYxKMgYBZE" outputId="2a44f724-f25c-4bb9-fc05-aa1ab81c48f7"
a = 123.4
a = "123.4"
type(a)
# + [markdown] id="fhDfJDrVfLSg"
# 2. ชื่อตัวแปรในข้อใดผิดกฎในภาษา
# + id="hW4btDwQfPgX"
T^T
# + [markdown] id="vPg4yVyEfU82"
# 3. ถ้า a = 87634 ค าสั่ง a%1000//10%10 มีค่าเท่ากับข้อใด
#
#
# ---
#
#
# A) 7 B) 6 C) 3 D) 4 E) ไม่มีข้อใดถูก
#
# + colab={"base_uri": "https://localhost:8080/"} id="thqFWp27fSQO" outputId="c191fcaa-0468-46e6-daa1-cffd51bbc828"
a = 87634
a%1000//10%10
# + [markdown] id="kqjEmWatfy-s"
# 4. การคำนวณในข้อใดได้ผลต่างจากข้ออื่น
# + [markdown] id="oy3ia1mohYKT"
# */% +- --> ซ้ายไปขวา
# + [markdown] id="VUMsOEDUhjKO"
# ** ขวา -->ซ้าย
# + id="Xekd06Awgp4B"
import math
# + colab={"base_uri": "https://localhost:8080/"} id="nMkvCRCtgVC5" outputId="a3f0f28a-85b5-464a-b46a-8b2f51cee3d0"
9**1/2 #correct
# + colab={"base_uri": "https://localhost:8080/"} id="9z_y0V2og4v0" outputId="94ed46d4-855a-4374-f631-8483c60c70ae"
9**(1/2)
# + colab={"base_uri": "https://localhost:8080/"} id="Y_JTDPYagzkL" outputId="938fad4e-530a-4a65-ff64-a62abe0ed40a"
9**0.5
# + colab={"base_uri": "https://localhost:8080/"} id="4dVRtOAMgVsL" outputId="b941a433-0120-4225-a82f-b04094456580"
math.sqrt(9)
# + colab={"base_uri": "https://localhost:8080/"} id="2kSU93BkgYUW" outputId="1d6c75bc-c6b5-4cd1-e0e7-c9eb061a3507"
math.pow(9,1/2)
# + colab={"base_uri": "https://localhost:8080/"} id="uLnwwQtmgZ8T" outputId="118ae692-8928-4aee-9422-814b50003dce"
3/1
# + id="tWan--_zghlK"
เหมือนกันทุกข้อ
# + [markdown] id="yA5gginih2Mo"
# 5. เมื่อโปรแกรมทางขวานี้ท างาน แล้วป้อนเลข 12345 จะได้ a มีค่าเท่าใด
# + id="2foQh3Z5h8Dw"
n = int(input())
a = n%10
n //= 10; a = 10*a + n%10
n //= 10; a = 10*a + n%10
n //= 10; a = 10*a + n%10
n //= 10; a = 10*a + n%10
# + [markdown] id="Mcr0bKiuiA79"
# A) 10000 B) 50000 C) 12345 D) 54321
# + [markdown] id="WQVEv7EyiIev"
# 6. คำสั่ง 5+a/9*(c + 32) แทนการคำนวณสูตรใด
# + id="3H8tOXBeiPsu"
# + id="SX-ydYB8j07J"
# + [markdown] id="5lUk_4m2jbu4"
# 7. ให้ m เก็บจ านวนเต็ม ข้อใดไม่ใช่ค าสั่งที่ทดสอบว่า m เก็บเลขเดือนที่ถูกต้อง
# + id="Qcs8_lQflioL"
if m > 0 and m < 13 :
# + id="YEuNohrMl53r"
if not(m < 1 or m > 12) :
# + id="o-h23ovOl9Cw"
if 1 <= m <= 12 :
# + id="smT-vjLjmGxs"
if 0 < m < 13:
# + [markdown] id="K410ludoncZ7"
# 13.ถ้าให้โปรแกรมทางขวานี้ท างาน แล้วป้อนข้อมูลเป็น 7 1 4 6 อยากทราบว่าจะแสดงอะไร
# + id="9_KjClrNoSSe"
a,b,c,d = [int(e) for e in input().split()]
if a > b : a,b = b,a
if b > c : b,c = c,b
if c > d : c,d = d,c
if a > b : a,b = b,a
if b > c : b,c = c,b
if a > b : a,b = b,a
print(b)
# + id="jPxPsWO5ne9m"
A) 7
B) 1
C) 4
D) 6
# + [markdown] id="JyvrbOQCoy23"
# ใช้โปรแกรมข้างล่างนี้ ตอบค าถาม 3 ข้อต่อไปน 14-16
# + id="oynBgCc7oyDU"
a,b,c = [int(e) for e in input().split()]
if a > b :
a += b
elif a < b :
b += a
else:
a = b + c
print(a,b,c)
|
Day_5_python_by_Kru_Lookkaew.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="aQuWDmfm9YOi" colab_type="text"
# <a href="https://colab.research.google.com/github/smnahidemon/Debain-Colab-RDP/blob/main/Debain%20Colab%20RDP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="qYk44mBwJf6E"
# # **Colab RDP** : Remote Desktop to Colab Instance
#
# Google Colab can give you Instance with 12GB of RAM and GPU for 12 hours (Max.) for Free users. Anyone can use it to perform Heavy Tasks.
#
# To use other similiar Notebooks use my Repository **[Colab RDP](https://github.com/smnahidemon/Debain-Colab-RDP)**
# + id="NaFa7M-e9YOr" cellView="form"
#@title **CREATE USER**
#@markdown Enter Username and Password
import os
username = "NAHID" #@param {type:"string"}
password = "<PASSWORD>" #@param {type:"string"}
print("Creating User and Setting it up")
# Creation of user
os.system(f"useradd -m {username}")
# Add user to sudo group
os.system(f"adduser {username} sudo")
# Set password of user to '<PASSWORD>'
os.system(f"echo '{username}:{password}' | sudo chpasswd")
# Change default shell from sh to bash
os.system("sed -i 's/\/bin\/sh/\/bin\/bash/g' /etc/passwd")
print("User Created and Configured")
# + id="m6hF0emftx4h" cellView="form"
#@title **RDP**
#@markdown It takes 4-5 minutes for installation
import os
import subprocess
#@markdown Visit http://remotedesktop.google.com/headless and Copy the command after authentication
CRP = "" #@param {type:"string"}
#@markdown Enter a pin more or equal to 6 digits
Pin = 123456 #@param {type: "integer"}
class CRD:
def __init__(self):
os.system("apt update")
self.installCRD()
self.installDesktopEnvironment()
self.installGoogleChorme()
self.installExtra()
self.finish()
@staticmethod
def installCRD():
print("Installing Chrome Remote Desktop...")
subprocess.run(['wget', 'https://dl.google.com/linux/direct/chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE)
subprocess.run(['dpkg', '--install', 'chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE)
subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE)
@staticmethod
def installDesktopEnvironment():
print("Installing Desktop Environment...")
os.system("export DEBIAN_FRONTEND=noninteractive")
os.system("apt install --assume-yes xfce4 desktop-base xfce4-terminal")
os.system("bash -c 'echo \"exec /etc/X11/Xsession /usr/bin/xfce4-session\" > /etc/chrome-remote-desktop-session'")
os.system("apt remove --assume-yes gnome-terminal")
os.system("apt install --assume-yes xscreensaver")
os.system("systemctl disable lightdm.service")
@staticmethod
def installGoogleChorme():
print("Installing Google Chrome...")
subprocess.run(["wget", "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE)
subprocess.run(["dpkg", "--install", "google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE)
subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE)
@staticmethod
def installExtra():
print("Installing Extra Packed...")
subprocess.run(['apt', 'install', 'nautilus ', 'nano', 'y'], stdout=subprocess.PIPE)
subprocess.run(['apt', 'y', 'install', 'obs-studio'], stdout=subprocess.PIPE)
subprocess.run(['apt', 'y', 'install', 'firefox'], stdout=subprocess.PIPE)
subprocess.run(['apt', 'y', 'install', 'qbittorrent'], stdout=subprocess.PIPE)
subprocess.run(['apt', 'install', 'nload'], stdout=subprocess.PIPE)
@staticmethod
def finish():
print("Finalizing...")
os.system(f"adduser {username} chrome-remote-desktop")
command = f"{CRP} --pin={Pin}"
os.system(f"su - {username} -c '{command}'")
os.system("service chrome-remote-desktop start")
print("Finished Succesfully!")
try:
if username:
if CRP == "":
print("Please enter authcode from the given link")
elif len(str(Pin)) < 6:
print("Enter a pin more or equal to 6 digits")
else:
CRD()
except NameError as e:
print("username variable not found")
print("Create a User First")
|
Debain Colab RDP.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: local-venv
# language: python
# name: local-venv
# ---
import sys
import numpy as np
np.random.seed(42)
import gensim
import keras.backend as K
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Embedding, Lambda
from keras.utils import np_utils
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
# In case your sys.path does not contain the base repo, cd there.
print(sys.path)
# %cd 'PATH_OF_BASE_REPO' # In the solution it will be the path to my repo. This is such that python loads al the files from the top.
# + pycharm={"name": "#%%\n"}
# First let's load the dataset
path = 'dataset/docv2_train_queries.tsv'
queries = pd.read_csv(path, sep='\t', lineterminator='\r', names=['query_id', 'query'])
queries.head()
# -
corpus = [sentence for sentence in queries['query'].values if type(sentence) == str and len(sentence.split(' ')) >= 3]
# + [markdown] pycharm={"name": "#%%\n"}
# At this point we have a list (any iterable will do) of queries that are longer than 3 words. This is normal to filter random queries. Now we must use the `Tokenizer` object to `fit` on the corpus, in order to convert each wor to an ID, and later convert such corpus of list of words into their identifiers.
#
# +
tokenizer = Tokenizer()
# Use the fit_on_text method to fit the tokenizer
None # Fill
print(f'Before the tokenizer: {corpus[:1]}')
#Now use the same "trained" tokenizer to convert the corpus from words to IDs with the text_to_sequences method
corpus = None
print(f'After the tokenizer: {corpus[:1]}')
# -
nb_samples = sum(len(s) for s in corpus)
V = len(tokenizer.word_index) + 1 # Size of the vocabulary, adding the UNK word.
dim = 100 # size of the embedding to create
window_size = 3
epochs=50
batch_size = 1000 # Note that this is because the workstation doesn't have a GPU. In reallity we would like a batch size and events such that we passthrough the dataset a couple of times
BATCH = True
print(f'First 5 corpus items are {corpus[:5]}')
print(f'Length of corpus is {len(corpus)}')
# Now comes the core part, defining the model. Keras provides a convenient Sequential model class to just `add` layers of any type and they will just work. Let's add an `Embedding` layer (that will map the word ids into a vector of size 100), a `Lambda` to average the words out in a sentence, and a `Dense layer` to select the best word on the other end. This is classic CBOW.
# + pycharm={"name": "#%%\n"}
cbow = Sequential()
cbow.add() # Add an Embedding layer with input_dim V, output_dim to be 100, and the input_length to be twice our window
cbow.add() # Add a Lambda that takes a lambda function using the K.mean method to average the words. The output_shape should be (dim, ).
cbow.add(Dense(V, activation='softmax')) # We add a classic Dense layer to just select with a softmax the best word
# + pycharm={"name": "#%%\n"}
# Compile the model with a loss and optimizer of your liking.
cbow.compile()
cbow.summary()
# +
# This is the algorithmic part of batching the dataset and yielding the window of words and expected middle word for each bacth as a generator.
def generate_data(corpus, window_size, V, batch_size=batch_size):
number_of_batches = (len(corpus) // batch_size) + 1
for batch in range(number_of_batches):
lower_end = batch*batch_size
upper_end = (batch+1)*batch_size if batch+1 < number_of_batches else len(corpus)
mini_batch_size = upper_end - lower_end
maxlen = window_size*2
X = np.zeros((mini_batch_size, maxlen))
Y = np.zeros((mini_batch_size, V))
for query_id, words in enumerate(corpus[lower_end:upper_end]):
L = len(words)
for index, word in enumerate(words):
contexts = []
labels = []
s = index - window_size
e = index + window_size + 1
contexts.append([words[i] for i in range(s, e) if 0 <= i < L and i != index])
labels.append(word)
x = sequence.pad_sequences(contexts, maxlen=maxlen)
y = np_utils.to_categorical(labels, V)
X[query_id] = x
Y[query_id] = y
yield (X, Y)
# +
# If data is small, you can just generate the whole dataset and load it in memory to use the fit method
#
# def generate_data(corpus, window_size, V):
# maxlen = window_size*2
# X = np.zeros((len(corpus), maxlen))
# Y = np.zeros((len(corpus), V))
# for query_id, words in enumerate(corpus):
# L = len(words)
# for index, word in enumerate(words):
# contexts = []
# labels = []
# s = index - window_size
# e = index + window_size + 1
# contexts.append([words[i] for i in range(s, e) if 0 <= i < L and i != index])
# labels.append(word)
# x = sequence.pad_sequences(contexts, maxlen=maxlen)
# y = np_utils.to_categorical(labels, V)
# X[query_id] = x
# Y[query_id] = y
# return (X, Y)
# -
def fit_model():
if not BATCH:
X, Y = generate_data(corpus, window_size, V)
print(f'Size of X is {X.shape} and Y is {Y.shape}')
cbow.fit(X, Y, epochs = epochs)
else:
index = 1
for x, y in generate_data(corpus, window_size, V):
print(f'Training on Iteration: {index}')
index += 1
history = cbow.train_on_batch(x, y, reset_metrics=False, return_dict=True)
print(history)
if index > epochs:
break
fit_model()
# What happens with the losses? Try to plot them to see what trend the follow! Could it happen that some iteration gets a bigger loss than a previous iteration?
# For each word we save the weights in a standard format
with open('./1-synonyms/lab1/vectors.txt' ,'w') as f:
f.write('{} {}\n'.format(V-1, dim))
vectors = cbow.get_weights()[0]
for word, i in tokenizer.word_index.items():
str_vec = ' '.join(map(str, list(vectors[i, :])))
f.write('{} {}\n'.format(word, str_vec))
w2v = gensim.models.KeyedVectors.load_word2vec_format('../1-synonyms/lab1/vectors.txt', binary=False)
# Check if they make sense. You can play a bit.
w2v.most_similar(positive=['gasoline'])
w2v.most_similar(negative=['apple'])
|
1-synonyms/lab1/exercise/create_embedding.ipynb
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// # Create your first deep learning neural network
//
// ## Introduction
//
// This is the first of our [beginner tutorial series](https://github.com/awslabs/djl/tree/master/jupyter/tutorial) that will take you through creating, training, and running inference on a neural network. In this tutorial, you will learn how to use the built-in `Block` to create your first neural network. We will be building one of the simplest deep learning networks, a Multilayer Perceptron (MLP). For more information, see [Multilayer Perceptron](https://en.wikipedia.org/wiki/Multilayer_perceptron).
//
// ## Block API
//
// [Blocks](https://javadoc.djl.ai/api/0.2.1/index.html?ai/djl/nn/Block.html) serve a purpose similar to functions that convert an input `NDList` to an output `NDList`. They can represent single operations, parts of a neural network, and even the whole neural network. What makes blocks special is that they contain a number of parameters that are used in their function and are trained during deep learning. As these parameters are trained, the function represented by the blocks get more and more accurate.
//
// When building these block functions, the easiest way is to use composition. Similar to how functions are built by calling other functions, blocks can be built by combining other blocks. We refer to the containing block as the parent and the sub-blocks as the children.
//
//
// ## Step 1: Setup development environment
//
// ### Installation
//
// This tutorial requires the installation of the Java Jupyter Kernel. To install the kernel, see the [Jupyter README](https://github.com/awslabs/djl/blob/master/jupyter/README.md).
// +
// %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
// %maven ai.djl:api:0.3.0-SNAPSHOT
// %maven org.slf4j:slf4j-api:1.7.26
// %maven org.slf4j:slf4j-simple:1.7.26
// -
import ai.djl.*;
import ai.djl.nn.*;
import ai.djl.nn.core.*;
import ai.djl.training.*;
// ## Step 2: Determine your input and output size
//
// The MLP model uses a one dimensional vector as the input and the output. You should determine the appropriate size of this vector based on your input data and what you will use the output of the model for. In a later tutorial, we will use a 28x28 image as the input and a 10 class classification as the output.
long inputSize = 28*28;
long outputSize = 10;
// ## Step 3: Create a **SequentialBlock**
//
// We provide several helpers to make it easy to build common block structures. For the MLP we will use the [SequentialBlock](https://javadoc.djl.ai/api/0.2.1/index.html?ai/djl/nn/SequentialBlock.html), a container block whose children form a chain of blocks with each child block feeding its output in sequence to the next.
//
SequentialBlock block = new SequentialBlock();
// ## Step 4: Add blocks to SequentialBlock
//
// An MLP is organized into several layers. Each layer is composed of a [Linear Block](https://javadoc.djl.ai/api/0.2.1/index.html?ai/djl/nn/core/Linear.html) and a non-linear activation function. We will use the popular [ReLU](https://javadoc.djl.ai/api/0.2.1/ai/djl/nn/Activation.html#reluBlock--) as our activation function.
//
// The first layer and last layers have fixed sizes depending on your desired input and output size. However, you are free to choose the number and sizes of the middle layers in the network. We will create a smaller MLP with two middle layers.
// +
block.add(Blocks.batchFlattenBlock(inputSize));
block.add(new Linear.Builder().setOutChannels(128).build());
block.add(Activation.reluBlock());
block.add(new Linear.Builder().setOutChannels(64).build());
block.add(Activation.reluBlock());
block.add(new Linear.Builder().setOutChannels(outputSize).build());
block
// -
// ## Summary
//
// Now that you've successfully created your first neural network, you can use this network to train your model.
//
// Next chapter: [Train your first model](train_your_first_model.ipynb)
//
// You can find the complete source code for this tutorial in the [model zoo](https://github.com/awslabs/djl/blob/master/model-zoo/src/main/java/ai/djl/basicmodelzoo/cv/classification/Mlp.java).
|
jupyter/tutorial/create_your_first_network.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + code_folding=[0, 19]
def table_to_output(groupby_table):
temp = groupby_table.reset_index()
temp2 = temp[temp['True_Label']=="Phage"]
if len(temp2)==0:
False_Negative_Phage = Phage_Count
else:
Phage_Predicted = temp2.iloc[0][1]
False_Negative_Phage = Phage_Count - Phage_Predicted
temp3 = temp[temp['True_Label']=="Prophage"]
if len(temp3)==0:
False_Negative_Prophage = Prophage_Count
else:
Prophage_Predicted = temp3.iloc[0][1]
False_Negative_Prophage = Prophage_Count - Prophage_Predicted
return(False_Negative_Phage, False_Negative_Prophage)
def Score(FNP, FNPro, data, Prophage=True):
if Prophage==True:
y_true = [1 if x == "Phage" else 0 for x in data['True_Label']]
y_pred = [1]*len(data)
listofzeros = [0]*FNP
listofones = [1]*FNP
y_pred = y_pred + listofzeros
y_true = y_true + listofones
else:
data['True_Label'] = data['True_Label'].replace(regex='Prophage', value="Phage")
y_true = [1 if x == "Phage" else 0 for x in data['True_Label']]
y_pred = [1]*len(data)
listofzeros = [0]*FNP
listofones = [1]*FNP
y_pred = y_pred + listofzeros
y_true = y_true + listofones
listofzeros2 = [0]*FNPro
listofones2 = [1]*FNPro
y_pred = y_pred + listofzeros2
y_true = y_true + listofones2
p = precision_score(y_true, y_pred)
r = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
return(p, r, f1)
# +
import pandas as pd
from sklearn.metrics import precision_score, recall_score, f1_score
input_file = "../../Kraken_Output/ZHC.csv"
df = pd.read_csv(input_file)
simulation = 'Zero'
complexity = 'High'
df2 = pd.read_csv("../../Contig_Abundance_By_Class_Simulation.csv")
df1 = df2[(df2.Simulation == simulation) & (df2.Complexity == complexity)]
try:
Phage_Count = int(df1['Phage'])
except:
Phage_Count = 0
try:
Prophage_Count = int(df1['Prophage'])
except:
Prophage_Count = 0
df['True_Label'] = [x.split("_")[0] for x in df["ContigID"]]
# +
e = df.groupby("True_Label").count()
FNP, FNPro = table_to_output(e)
p, r, f1 = Score(FNP, FNPro, df, Prophage=True)
my_dict = {}
my_dict['Prophage_Bacteria'] = [p,r,f1]
p, r, f1 = Score(FNP, FNPro, df, Prophage=False)
my_dict['Prophage_Viruses'] = [p,r,f1]
w = pd.DataFrame(my_dict.items(), columns=["Prophage Category",2])
w = w.set_index("Prophage Category")
w['Precision'] = [x[0] for x in w[2]]
w['Recall'] = [x[1] for x in w[2]]
w['F1 Score'] = [x[2] for x in w[2]]
w['Simulation'] = "zero"
w['Complexity'] = "high"
w['Tool'] = "Kraken"
w['Parameter'] ="NA"
w = w.drop(2, axis=1)
w.to_csv("../../Kraken_Output/Kraken_Scores.csv", header=None, mode='a')
# -
# + code_folding=[1, 12]
## Remove Prophages
def table_to_output(groupby_table):
temp = groupby_table.reset_index()
temp2 = temp[temp['True_Label']=="Phage"]
if len(temp2)==0:
False_Negative_Phage = Phage_Count
else:
Phage_Predicted = temp2.iloc[0][1]
False_Negative_Phage = Phage_Count - Phage_Predicted
return(False_Negative_Phage)
def Score(FNP, data):
y_true = [1 if x == "Phage" else 0 for x in data['True_Label']]
y_pred = [1]*len(data)
listofzeros = [0]*FNP
listofones = [1]*FNP
y_pred = y_pred + listofzeros
y_true = y_true + listofones
p = precision_score(y_true, y_pred)
r = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
return(p, r, f1)
# +
import pandas as pd
from sklearn.metrics import precision_score, recall_score, f1_score
input_file = "../../Kraken_Output/ELC.csv"
df = pd.read_csv(input_file)
simulation = 'Exponential'
complexity = 'Low'
df2 = pd.read_csv("../../Contig_Abundance_By_Class_Simulation.csv")
df1 = df2[(df2.Simulation == simulation) & (df2.Complexity == complexity)]
try:
Phage_Count = int(df1['Phage'])
except:
Phage_Count = 0
df['True_Label'] = [x.split("_")[0] for x in df["ContigID"]]
# +
e = df.groupby("True_Label").count()
FNP = table_to_output(e)
my_dict = {}
p, r, f1 = Score(FNP, df)
my_dict['Prophage_Removed'] = [p,r,f1]
w = pd.DataFrame(my_dict.items(), columns=["Prophage Category",2])
w = w.set_index("Prophage Category")
w['Precision'] = [x[0] for x in w[2]]
w['Recall'] = [x[1] for x in w[2]]
w['F1 Score'] = [x[2] for x in w[2]]
w['Simulation'] = "exponential"
w['Complexity'] = "low"
w['Tool'] = "Kraken"
w['Parameter'] ="NA"
w = w.drop(2, axis=1)
w.to_csv("../../Kraken_Output/Kraken_Scores_Prophage_Removed.csv", header=None, mode='a')
# -
|
data/Tool_Performance/Tool_Predictions/PerformanceScripts/Kraken_Parser.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Interactive time series with time slice retrieval
#
# This notebook shows you how to use interactive plots to select time series for different locations and retrieve the imagery that corresponds with different points on a time series
# %pylab notebook
from __future__ import print_function
import datacube
import xarray as xr
from datacube.storage import masking
from datacube.storage.masking import mask_to_dict
from matplotlib import pyplot as plt
from IPython.display import display
import ipywidgets as widgets
dc = datacube.Datacube(app='Interactive time series analysis')
# +
#### DEFINE SPATIOTEMPORAL RANGE AND BANDS OF INTEREST
#Use this to manually define an upper left/lower right coords
#Define temporal range
start_of_epoch = '2013-01-01'
end_of_epoch = '2016-12-31'
#Define wavelengths/bands of interest, remove this kwarg to retrieve all bands
bands_of_interest = [#'blue',
'green',
#'red',
'nir',
'swir1',
#'swir2'
]
#Define sensors of interest
sensors = ['ls8']#, 'ls7', 'ls5']
query = {'time': (start_of_epoch, end_of_epoch)}
lat_max = -17.42
lat_min = -17.45
lon_max = 140.90522
lon_min = 140.8785
query['x'] = (lon_min, lon_max)
query['y'] = (lat_max, lat_min)
query['crs'] = 'EPSG:4326'
# -
print(query)
# ## retrieve the NBAR and PQ for the spatiotemporal range of interest
#
#Define which pixel quality artefacts you want removed from the results
mask_components = {'cloud_acca':'no_cloud',
'cloud_shadow_acca' :'no_cloud_shadow',
'cloud_shadow_fmask' : 'no_cloud_shadow',
'cloud_fmask' :'no_cloud',
'blue_saturated' : False,
'green_saturated' : False,
'red_saturated' : False,
'nir_saturated' : False,
'swir1_saturated' : False,
'swir2_saturated' : False,
'contiguous':True}
#Retrieve the NBAR and PQ data for sensor n
sensor_clean = {}
for sensor in sensors:
#Load the NBAR and corresponding PQ
sensor_nbar = dc.load(product= sensor+'_nbar_albers', group_by='solar_day', measurements = bands_of_interest, **query)
sensor_pq = dc.load(product= sensor+'_pq_albers', group_by='solar_day', **query)
#grab the projection info before masking/sorting
crs = sensor_nbar.crs
crswkt = sensor_nbar.crs.wkt
affine = sensor_nbar.affine
#This line is to make sure there's PQ to go with the NBAR
sensor_nbar = sensor_nbar.sel(time = sensor_pq.time)
#Apply the PQ masks to the NBAR
cloud_free = masking.make_mask(sensor_pq, **mask_components)
good_data = cloud_free.pixelquality.loc[start_of_epoch:end_of_epoch]
sensor_nbar = sensor_nbar.where(good_data)
sensor_clean[sensor] = sensor_nbar
# ## Plotting an image and select a location to retrieve a time series
#select time slice of interest - this is trial and error until you get a decent image
time_slice_i = 140
rgb = sensor_clean['ls8'].isel(time =time_slice_i).to_array(dim='color').sel(color=['swir1', 'nir', 'green']).transpose('y', 'x', 'color')
#rgb = nbar_clean.isel(time =time_slice).to_array(dim='color').sel(color=['swir1', 'nir', 'green']).transpose('y', 'x', 'color')
fake_saturation = 4500
clipped_visible = rgb.where(rgb<fake_saturation).fillna(fake_saturation)
max_val = clipped_visible.max(['y', 'x'])
scaled = (clipped_visible / max_val)
# +
#Click on this image to chose the location for time series extraction
w = widgets.HTML("Event information appears here when you click on the figure")
def callback(event):
global x, y
x, y = int(event.xdata + 0.5), int(event.ydata + 0.5)
w.value = 'X: {}, Y: {}'.format(x,y)
fig = plt.figure(figsize =(12,6))
#plt.scatter(x=trans.coords['x'], y=trans.coords['y'], c='r') #turn this on or off to show location of transect
plt.imshow(scaled, interpolation = 'nearest',
extent=[scaled.coords['x'].min(), scaled.coords['x'].max(),
scaled.coords['y'].min(), scaled.coords['y'].max()])
fig.canvas.mpl_connect('button_press_event', callback)
date_ = sensor_clean['ls8'].time[time_slice_i]
plt.title(date_.astype('datetime64[D]'))
plt.show()
display(w)
# -
#this converts the map x coordinate into image x coordinates
image_coords = ~affine * (x, y)
imagex = int(image_coords[0])
imagey = int(image_coords[1])
#retrieve the time series that corresponds with the location clicked, and drop the no data values
green_ls8 = sensor_clean['ls8'].green.isel(x=[imagex],y=[imagey]).dropna('time', how = 'any')
# ## Click on an interactive time series and pull back an image that corresponds with a point on the time seris
# +
#Use this plot to visualise a time series and select the image that corresponds with a point in the time series
def callback(event):
global time_int, devent
devent = event
time_int = event.xdata
#time_int_ = time_int.astype(datetime64[D])
w.value = 'time_int: {}'.format(time_int)
fig = plt.figure(figsize=(10,5))
fig.canvas.mpl_connect('button_press_event', callback)
plt.show()
display(w)
green_ls8.plot(linestyle= '--', c= 'b', marker = '8', mec = 'b', mfc ='r')
plt.grid()
# -
time_slice = matplotlib.dates.num2date(time_int).date()
rgb2 = sensor_clean['ls8'].sel(time =time_slice, method = 'nearest').to_array(dim='color').sel(color=['swir1', 'nir', 'green']).transpose('y', 'x', 'color')
fake_saturation = 6000
clipped_visible = rgb2.where(rgb2<fake_saturation).fillna(fake_saturation)
max_val = clipped_visible.max(['y', 'x'])
scaled2 = (clipped_visible / max_val)
#This image shows the time slice of choice and the location of the time series
fig = plt.figure(figsize =(12,6))
#plt.scatter(x=trans.coords['x'], y=trans.coords['y'], c='r')
plt.scatter(x = [x], y = [y], c= 'yellow', marker = 'D')
plt.imshow(scaled2, interpolation = 'nearest',
extent=[scaled.coords['x'].min(), scaled.coords['x'].max(),
scaled.coords['y'].min(), scaled.coords['y'].max()])
plt.title(time_slice)
plt.show()
|
notebooks/05_interactive_time_series_with_time_slice_retrieval.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Custom Models
# The goal of this notebook is to show the user how to build their own application-dependant model. To do so we will consider a univariate stochastic volatility model:
# $y_t = \epsilon_t \exp\big(\frac{h_t}{2}\big), \quad \epsilon_t \sim \mathcal{N}(0,1)$
#
# $h_{t+1} = \mu(1 − \phi) + \phi h_t + \sigma_\eta \eta_t, \quad \eta_t \sim \mathcal{N}(0,1)$, $t = 1,..., T,$
#
# $h_0 \sim \mathcal{N}\big(0, \frac{\sigma^2_η}{(1 − \phi^2)}\big)$
# ## Imports
# First thing first: let's import what we'll be needing.
import sys
sys.path.append("../..")
import matplotlib.pyplot as plt
import numpy as np
# tensorflow imports
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
# filterflow model imports
from filterflow import SMC, State
from filterflow.observation import ObservationModelBase
from filterflow.proposal import BootstrapProposalModel
from filterflow.transition import RandomWalkModel
# ### The Model
# The transition model can be encoded by adding a location to the normal distribution:
# +
dx = 1
mu = tf.constant(-2.)
phi = tf.constant(0.2)
sigma_x = 0.1 * tf.ones(dx)
noise_rv = tfd.MultivariateNormalDiag(mu*(1-phi), sigma_x)
transition_matrix = phi * tf.eye(dx)
transition_model = RandomWalkModel(transition_matrix, noise_rv)
# -
# The Observation model however is a bit more complicated and you'll have to code it yourself, hopefully, this is done in a few lines of code:
# ObservationModelBase is a base class that only has a loglikelihood method
# +
# ObservationModelBase.loglikelihood?
# -
class StochVolObservationModel(ObservationModelBase):
def __init__(self, observation_error, name='StochVolObservationModel'):
super(StochVolObservationModel, self).__init__(name=name)
self._observation_error = observation_error
def _get_distribution(self, exp_particles):
scale = tfp.bijectors.Scale(exp_particles)
return tfd.TransformedDistribution(self._observation_error, scale)
def loglikelihood(self, state, observation):
# To simplify our work, let's simply use the tfp.bijectors utilities:
exp_particles = tf.exp(state.particles / 2.)
dist = self._get_distribution(exp_particles)
return dist.log_prob(observation)
# +
dy = 1
sigma_y = 0.1 * tf.ones(dx)
error_rv = tfd.MultivariateNormalDiag(scale_diag=sigma_y)
observation_model = StochVolObservationModel(error_rv)
# -
# ### Let's test it
# First get some data:
import quandl
API_KEY = ""
# You can visit https://www.quandl.com/ to get your own API key
start_date = "2019-06-01"
end_date = "2020-01-01"
quandl.ApiConfig.api_key = API_KEY
d = quandl.get("ECB/EURUSD", start_date=start_date, end_date=end_date)
d.head()
# Now take the log-returns:
logreturns = np.log(d).diff().dropna()
logreturns.plot()
y = logreturns.values.astype(np.float32)
# ### We can now build our filter:
# filterflow resampling imports
from filterflow.resampling import NeffCriterion, StratifiedResampler, RegularisedTransform
proposal_model = BootstrapProposalModel(transition_model)
criterion = NeffCriterion(0.5, is_relative=True)
resampler = StratifiedResampler()
regularized_resampler = RegularisedTransform(0.25)
smc = SMC(observation_model, transition_model, proposal_model, criterion, resampler)
regularized_smc = SMC(observation_model, transition_model, proposal_model, criterion, regularized_resampler)
batch_size = 1
n_particles = 100
initial_state = State(np.random.normal(mu, sigma_x ** 2 / (1 - phi ** 2), [batch_size, n_particles, dx]).astype(np.float32))
observations_dataset = tf.data.Dataset.from_tensor_slices(y)
filtered_states = smc(initial_state, observations_dataset, n_observations=len(y), return_final=False, seed=111)
filtered_states_regularized = regularized_smc(initial_state, observations_dataset, n_observations=len(y), return_final=False, seed=111)
# +
from filterflow.utils import mean, std
weighted_average = mean(filtered_states)
weighted_std = std(filtered_states, weighted_average)
weighted_average = weighted_average.numpy().squeeze()
weighted_std = weighted_std.numpy().squeeze()
weighted_average_regularized = mean(filtered_states_regularized)
weighted_std_regularized = std(filtered_states_regularized, weighted_average_regularized)
weighted_average_regularized = weighted_average_regularized.numpy().squeeze()
weighted_std_regularized = weighted_std_regularized.numpy().squeeze()
# -
linespace = np.arange(0, len(y))
plt.plot(linespace, weighted_average)
plt.fill_between(linespace, weighted_average - weighted_std, weighted_average + weighted_std, alpha = 0.5)
linespace = np.arange(0, len(y))
plt.plot(linespace, weighted_average_regularized)
plt.fill_between(linespace, weighted_average_regularized - weighted_std_regularized, weighted_average_regularized + weighted_std_regularized, alpha = 0.5)
|
notebooks/examples/CustomModel.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Model inference
#
# In the previous notebook, we saw how to do model inference on the test set. Here, we show how to load an already trained/fine-tuned model and a dataset and then do model inference.
# +
# solve issue with autocomplete
# %config Completer.use_jedi = False
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
from mapreader import loader
from mapreader import classifier
from mapreader import load_patches
from mapreader import patchTorchDataset
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy.interpolate import griddata
from torchvision import transforms
try:
import cartopy.crs as ccrs
ccrs_imported = True
except ImportError:
print(f"[WARNING] cartopy could not be imported!")
print(f"[WARNING] cartopy is used for plotting the results on maps.")
print(f"[WARNING] You can ignore this if you don't want to plot the results.")
ccrs_imported = False
# -
# ## Read patches (i.e., sliced images) and add metadata
#
# First, we need to load a set of images/pathces. We use a CV model to do inference on these images.
# +
mymaps = load_patches("./maps_tutorial/slice_50_50/*101168609*PNG",
parent_paths="./maps_tutorial/map_101168609.png")
path2metadata = "./maps_tutorial/metadata.csv"
mymaps.add_metadata(metadata=path2metadata)
# -
# Calculate coordinates and some pixel stats
mymaps.add_center_coord()
mymaps.calc_pixel_stats()
maps_pd, patches_pd = mymaps.convertImages(fmt="dataframe")
patches_pd.head()
# In `.add_metadata`:
#
# ```python
# # remove duplicates using "name" column
# if columns == None:
# columns = list(metadata_df.columns)
#
# if ("name" in columns) and ("image_id" in columns):
# print(f"Both 'name' and 'image_id' columns exist! Use 'name' to index.")
# image_id_col = "name"
# if "name" in columns:
# image_id_col = "name"
# elif "image_id" in columns:
# image_id_col = "image_id"
# else:
# raise ValueError("'name' or 'image_id' should be one of the columns.")
# ```
#
# The dataframe should have either `name` or `image_id` column, and that column should be the image ID (NOT the path to the image).
# Rename image_path to image_id
# This is needed later (see `.add_metadata`)
patches_pd = patches_pd.reset_index()
patches_pd.rename(columns={"index": "image_id"},
inplace=True)
patches_pd.head()
patches2infer = patches_pd[["image_path"]]
patches2infer
# +
# XXX TESTING
# patches2infer = patches2infer[:1000]
# -
# ## Add patches to `patchTorchDataset`
# +
# ------------------
# --- Transformation
# ------------------
# FOR INCEPTION
#resize2 = 299
# otherwise:
resize2 = 224
# mean and standard deviations of pixel intensities in
# all the patches in 6", second edition maps
normalize_mean = 1 - np.array([0.82860442, 0.82515008, 0.77019864])
normalize_std = 1 - np.array([0.1025585, 0.10527616, 0.10039222])
# other options:
# normalize_mean = [0.485, 0.456, 0.406]
# normalize_std = [0.229, 0.224, 0.225]
data_transforms = {
'val': transforms.Compose(
[transforms.Resize(resize2),
transforms.ToTensor(),
transforms.Normalize(normalize_mean, normalize_std)
]),
}
# -
patches2infer_dataset = patchTorchDataset(patches2infer,
transform=data_transforms["val"])
# ## Load a classifier (normally trained in notebook 003)
# +
myclassifier = classifier(device="default")
# HERE, you need to load a model stored in ./models_tutorial/
# e.g.,
# myclassifier.load("./models_tutorial/checkpoint_10.pkl")
myclassifier.load("./models_tutorial/INSERT_MODEL_NAME")
# -
# Add dataset to myclassifier
batch_size=64
myclassifier.add2dataloader(patches2infer_dataset,
set_name="infer_test",
batch_size=batch_size,
shuffle=False,
num_workers=0)
#
# ## Inference on `set_name`
myclassifier.inference(set_name="infer_test")
#
# ## Plot sample results
myclassifier.class_names
myclassifier.inference_sample_results(num_samples=8,
class_index=1,
set_name="infer_test",
min_conf=50,
max_conf=None)
# ## Add model inference outputs to `mymaps`
patches2infer['pred'] = myclassifier.pred_label
patches2infer['conf'] = np.max(np.array(myclassifier.pred_conf),
axis=1)
patches2infer
patches_pd = \
patches_pd.merge(patches2infer,
how="outer",
on="image_path",
validate="1:1")
patches_pd.head()
mymaps.add_metadata(patches_pd,
tree_level="child")
# ## Write outputs as CSVs, one file per map sheet
maps_pd, patches_pd = mymaps.convertImages(fmt="dataframe")
patches_pd.head()
output_dir = "./infer_output_tutorial"
os.makedirs(output_dir, exist_ok=True)
for one_map in list(maps_pd.index):
# --- paths
map_name = one_map.split(".")[0]
patch2write = os.path.join(output_dir, f"patch_{map_name}.csv")
sheet2write = os.path.join(output_dir, f"sheet_{map_name}.csv")
# --- write outputs
patches_pd[patches_pd["parent_id"] == one_map].to_csv(patch2write, index=False)
maps_pd[maps_pd.index == one_map].to_csv(sheet2write, index=False)
# ## Load outputs and plot
#
# Although we already have all the required dataframes/variables loaded, we re-load them here as this is a required step in most realistic applications.
# +
mymaps = load_patches("./maps_tutorial/slice_50_50/*101168609*PNG",
parent_paths="./maps_tutorial/*101168609*png")
# add metadata (using CSV files):
path2metadata = "./maps_tutorial/metadata.csv"
mymaps.add_metadata(metadata=path2metadata)
# +
# load the CSV files which contain predictions/confidence/...
path2patch = glob.glob("./infer_output_tutorial/patch*101168609*csv")
for path2metadata in path2patch:
print(path2metadata)
mymaps.add_metadata(metadata=path2metadata,
tree_level="child",
delimiter=",")
# or directly:
# mymaps.add_metadata(patches_pd, tree_level="child")
# -
# Other ways to read:
#
# - Load dataframes, add metadata:
#
# ```python
# mymaps_filt = loader()
#
# mymaps_filt.loadDataframe(parents=maps_pd,
# children_df=patches_filt)
#
# # add metadata (using CSV files):
# path2metadata = "./maps_tutorial/metadata.csv"
# mymaps_filt.add_metadata(metadata=path2metadata)
# ```
#
# - Load CSV files
#
# ```python
# from mapreader import loader
#
# mymaps = loader()
# mymaps.load_csv_file(parent_path="./infer_output_tutorial/sheet_map_101168609.csv",
# child_path="./infer_output_tutorial/patch_map_101168609.csv")
# ```
# +
# List of all parents
all_parents = mymaps.list_parents()
mymaps.show_par(all_parents[0],
value="pred",
border=True,
plot_parent=True,
vmin=0, vmax=1,
figsize=(15, 15),
alpha=0.5,
colorbar="inferno")
# -
maps_pd, patches_pd = mymaps.convertImages(fmt="dataframe")
print(len(patches_pd))
patches_pd.head()
# filter patches with NaNs
patches_filt = patches_pd[~patches_pd["pred"].isna()]
patches_filt = patches_pd[patches_pd["pred"] >= 0]
patches_filt["pred"].value_counts()
# ### other plots
patches_filt2plot = patches_filt[(patches_filt["mean_pixel_A"] > 0.01)]
plt.figure(figsize=(20, 10))
plt.scatter(patches_filt2plot["center_lon"].values,
patches_filt2plot["center_lat"].values,
c="k",
s=1)
plt.xlabel("Longitude", size=30)
plt.ylabel("Latitude", size=30)
plt.xticks(size=24)
plt.yticks(size=24)
plt.show()
plt.figure(figsize=(20, 10))
plt.scatter(patches_filt2plot["center_lon"].values,
patches_filt2plot["center_lat"].values,
c=patches_filt2plot["mean_pixel_RGB"].values,
vmin=0.6, vmax=0.9,
s=30)
plt.xlabel("Longitude", size=30)
plt.ylabel("Latitude", size=30)
plt.xticks(size=24)
plt.yticks(size=24)
plt.grid()
plt.show()
# +
# inputs
vmin = 0.6
vmax = 0.92
levels = 15
ngridx = 200
ngridy = 200
grouped = patches_filt2plot.groupby("parent_id")
plt.figure(figsize=(20, 10))
for name, group in grouped:
x = group["center_lon"].values
y = group["center_lat"].values
z = group["mean_pixel_RGB"].values
# Create grid values first.
xi = np.linspace(min(x), max(x), ngridx)
yi = np.linspace(min(y), max(y), ngridy)
zi = griddata((x, y), z,
(xi[None, :], yi[:, None]),
method='linear')
# plt.contour(xi, yi, zi,
# levels=levels,
# linewidths=0.5, colors='k',
# vmin=vmin, vmax=vmax)
plt.contourf(xi, yi, zi,
levels=levels,
cmap="RdBu_r",
vmin=vmin, vmax=vmax)
plt.colorbar()
plt.show()
# # Linearly interpolate the data (x, y) on a grid defined by (xi, yi).
# triang = tri.Triangulation(x, y)
# interpolator = tri.LinearTriInterpolator(triang, z)
# Xi, Yi = np.meshgrid(xi, yi)
# zi = interpolator(Xi, Yi)
# +
# inputs
vmin=0.6
vmax=0.92
levels=15
ngridx = 200
ngridy = 200
if ccrs_imported:
grouped = patches_filt2plot.groupby("parent_id")
fig = plt.figure(figsize=(20, 10))
ax = plt.axes(projection=ccrs.PlateCarree())
#extent = [-8.08999993, 1.81388127, 49.8338702, 60.95000002]
extent = [-0.45, 0.45, 51.3, 51.7] # extracted from metadata
ax.set_extent(extent)
ax.coastlines(resolution='10m', color='black', linewidth=1)
for name, group in grouped:
x = group["center_lon"].values
y = group["center_lat"].values
z = group["mean_pixel_RGB"].values
# Create grid values first.
xi = np.linspace(min(x), max(x), ngridx)
yi = np.linspace(min(y), max(y), ngridy)
zi = griddata((x, y), z,
(xi[None, :], yi[:, None]),
method='linear')
# plt.contour(xi, yi, zi,
# levels=levels,
# linewidths=0.5, colors='k',
# vmin=vmin, vmax=vmax,
# transform=ccrs.PlateCarree())
plt.contourf(xi, yi, zi,
levels=levels,
cmap="RdBu_r",
vmin=vmin, vmax=vmax,
transform=ccrs.PlateCarree())
ax.gridlines(draw_labels=True)#, xlocs=[150, 152, 154, 155])
plt.show()
else:
print(f"[WARNING] cartopy could not be imported!")
print(f"[WARNING] cartopy is used for plotting the results on maps.")
print(f"[WARNING] You can ignore this if you don't want to plot the results.")
# -
|
examples/geospatial/classification_one_inch_maps_001/004_inference.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={}
# # Project Euler: Problem 6
# + [markdown] nbgrader={}
# https://projecteuler.net/problem=6
#
# The sum of the squares of the first ten natural numbers is,
#
# $$1^2 + 2^2 + ... + 10^2 = 385$$
#
# The square of the sum of the first ten natural numbers is,
#
# $$(1 + 2 + ... + 10)^2 = 552 = 3025$$
#
# Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
# + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true}
x=[]
y=[]
for i in range(101):
x.append(i**2)
y.append(i)
f=sum(x)
g=sum(y)**2
print(g-f)
raise NotImplementedError()
# + deletable=false nbgrader={"checksum": "4a8ce9efca8c824de365eec816018842", "grade": true, "grade_id": "projecteuler6", "points": 10}
# This cell will be used for grading, leave it at the end of the notebook.
|
assignments/assignment02/ProjectEuler6.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# **Name**: <NAME>
#
# **Date**: April 2017
# + [markdown] deletable=true editable=true
# # Test a Perceptual Phenomenon: The Stroop Effect
# + [markdown] deletable=true editable=true
# The Stroop dataset contains data from participants who were presented with a list of words, with each word displayed in a color of ink. The participant’s task was to say out loud the color of the ink in which the word was printed. The task had two conditions: a congruent words condition, and an incongruent words condition.
# - In the congruent words condition, the words being displayed are color words whose names match the colors in which they are printed.
# - In the incongruent words condition, the words displayed are color words whose names do not match the colors in which they are printed. In each case, the time it took to name the ink colors were measured in equally-sized lists.
# + deletable=true editable=true
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
# %pylab inline
# + [markdown] deletable=true editable=true
# # Variables
# + deletable=true editable=true
stroop_data = pd.read_csv('./stroopdata.csv')
stroop_data.head()
# + [markdown] deletable=true editable=true
# **Independent variable**: Treatment condition consisting of congruent and incongruent words
#
# **Dependent variable**: Response time
# + [markdown] deletable=true editable=true
# # Hypothesis
# + [markdown] deletable=true editable=true
# $H_0 : \mu_C = \mu_I $ There is no difference in mean response time between the congruent and incongruent word conditions
#
# $H_a : \mu_C \neq \mu_I $ There is a difference in mean response time between the congruent and incongruent word conditions
#
# $\mu_C$ and $\mu_I$ denote the population means for the congruent and incongruent groups respectively.
#
# - Statistical test: Dependent t-test for paired samples is the statistical test that will be used.
#
# - This is a within-subject design, where the same subjects are being presented with two test conditions.
#
# The reasons for choosing this test are as follows:
#
# 1) The sample size is less than 30
#
# 2) The population standard deviation is unknown
#
# 3) It is assumed that the distributions are Gaussian
#
# + [markdown] deletable=true editable=true
# # Data Exploration and Visualization
# + deletable=true editable=true
stroop_data.describe()
# + deletable=true editable=true
print "Median:\n", stroop_data.median()
print "\nVariance:\n", stroop_data.var()
# + deletable=true editable=true
fig, axs = plt.subplots(figsize=(18, 5), ncols = 3, sharey=True)
plt.figure(figsize=(8, 6))
sns.set_palette("Set2")
# Fig 1 - Congruent Words - Response Time
sns.boxplot(y="Congruent", data=stroop_data,
ax=axs[0]).set_title("Fig 1: Congruent Words - Response Time (in seconds)")
# Fig 2 - Incongruent Words - Response Time
sns.boxplot(y="Incongruent", data=stroop_data, color="coral",
ax=axs[1]).set_title("Fig 2: Incongruent Words - Response Time (in seconds)")
# Fig 3 - Congruence vs. Incongruence
sns.regplot(x="Congruent", y="Incongruent", data=stroop_data, color="m", fit_reg=False,
ax=axs[2]).set_title("Fig 3: Congruence vs. Incongruence (in seconds)")
# + [markdown] deletable=true editable=true
# - The above visualizations clearly show that the response time for the congruent words condition is much lower in comparison to the incongruent words condition.
# - Even if the two outliers present in Fig 2 are ignored, it is evident that not just the mean (14 seconds vs. 22 seconds), but the lower and upper bounds for both conditions are markedly different as well.
# - Fig 3 shows a scatter plot of response times from both treatment conditions. The plot shows that for every x value (time taken for congruent words) plotted, the y value (time taken for incongruent words) is higher.
# + [markdown] deletable=true editable=true
# # Statistical Test
# + [markdown] deletable=true editable=true
# α: 0.05
#
# Confidence level: 95%
#
# t-critical value: 1.714
# + deletable=true editable=true
# Dependent t-test for paired samples
stats.ttest_rel(stroop_data["Congruent"], stroop_data["Incongruent"])
# + [markdown] deletable=true editable=true
# - We reject the null hypothesis since p-value < α level of 0.05
# - Hence it can be concluded that there is a difference in mean response time between the congruent and incongruent word conditions
# - The results match expectations because every one of the 24 samples in the dataset showed increased response time during the incongruent words condition.
# + [markdown] deletable=true editable=true
# **6. Optional: What do you think is responsible for the effects observed? Can you think of an alternative or similar task that would result in a similar effect? Some research about the problem will be helpful for thinking about these two questions!**
# + [markdown] deletable=true editable=true
# - When we are presented with words, we are trained to process the meaning. When we are asked to process the color of the word instead of the word meaning, we are trying to do the opposite of what we are so used to doing. This interference causes a delay in information processing, which is why the time it takes to process incongruent words is more.
# - A similar effect is produced in a "Directional Stroop Effect" experiment, where you are required to say the word location in a box, contrary to the actual direction the word states.
# + [markdown] deletable=true editable=true
# # References
# + [markdown] deletable=true editable=true
# https://en.wikipedia.org/wiki/Stroop_effect
#
# https://faculty.washington.edu/chudler/java/readyd.html
#
# https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.stats.ttest_rel.html
|
Inferential_Statistics/Inferential_Statistics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="7XitTDO_4Ejl" outputId="34fe0a5f-d671-4848-88ba-e17152690666"
pip install panda
# + colab={"base_uri": "https://localhost:8080/", "height": 320} id="b6t7BeQ54TEb" outputId="536e66fb-13ac-4012-d379-0a7fadcb25ab"
import pandas as pd
# save filepath to variable for easier access
melbourne_file_path = 'melb_data.csv'
# read the data and store data in DataFrame titled melbourne_data
melbourne_data = pd.read_csv(melbourne_file_path)
# print a summary of the data in Melbourne data
melbourne_data.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="9uLm1cJc4VGf" outputId="fdc06a76-e48b-4059-8f95-b5ce6dcc7798"
melbourne_data.columns
# + id="WdJMS3ZT4VPV"
# The Melbourne data has some missing values (some houses for which some variables weren't recorded.)
# We'll learn to handle missing values in a later tutorial.
# Your Iowa data doesn't have missing values in the columns you use.
# So we will take the simplest option for now, and drop houses from our data.
# Don't worry about this much for now, though the code is:
# dropna drops missing values (think of na as "not available")
melbourne_data = melbourne_data.dropna(axis=0)
# + id="pQQdlOq74VT6"
y = melbourne_data.Price
# + id="o1BtU6u34VWC"
melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'Lattitude', 'Longtitude']
# + id="ZIn9FhR_4VYi"
X = melbourne_data[melbourne_features]
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="kJo844Qy4VaZ" outputId="e32d60ad-7443-42a9-9d06-998a752511c6"
X.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="Rwy4zzkB4Vcb" outputId="3e470b83-c264-4638-d9f3-103c63b9e38a"
X.head()
# + colab={"base_uri": "https://localhost:8080/"} id="jMt0uOxT4Vwm" outputId="7200e356-b309-4a00-e83f-678658500084"
from sklearn.tree import DecisionTreeRegressor
# Define model. Specify a number for random_state to ensure same results each run
melbourne_model = DecisionTreeRegressor(random_state=1)
# Fit model
melbourne_model.fit(X, y)
print("Making predictions for the following 5 houses:")
print(X.head())
print("The predictions are")
print(melbourne_model.predict(X.head()))
# + colab={"base_uri": "https://localhost:8080/"} id="h5Cty8Zo5NzN" outputId="a1aa6824-ba93-4579-cb36-9734b4c3e62d"
# Data Loading Code Hidden Here
import pandas as pd
# Filter rows with missing price values
filtered_melbourne_data = melbourne_data.dropna(axis=0)
# Choose target and features
y = filtered_melbourne_data.Price
melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'BuildingArea',
'YearBuilt', 'Lattitude', 'Longtitude']
X = filtered_melbourne_data[melbourne_features]
from sklearn.tree import DecisionTreeRegressor
# Define model
melbourne_model = DecisionTreeRegressor()
# Fit model
melbourne_model.fit(X, y)
# + colab={"base_uri": "https://localhost:8080/"} id="g8wxmSm45OCa" outputId="00412f29-85f0-4b58-adc6-3fa480f2b47d"
from sklearn.metrics import mean_absolute_error
predicted_home_prices = melbourne_model.predict(X)
mean_absolute_error(y, predicted_home_prices)
# + colab={"base_uri": "https://localhost:8080/"} id="7DVuqOIE5OJj" outputId="73db1607-fe0b-44d1-e050-65179604b5c3"
from sklearn.model_selection import train_test_split
# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state = 0)
# Define model
melbourne_model = DecisionTreeRegressor()
# Fit model
melbourne_model.fit(train_X, train_y)
# get predicted prices on validation data
val_predictions = melbourne_model.predict(val_X)
print(mean_absolute_error(val_y, val_predictions))
# + id="DxZ0eg0V5mao"
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(train_X, train_y)
preds_val = model.predict(val_X)
mae = mean_absolute_error(val_y, preds_val)
return(mae)
# + id="T0Dt4ikR5mm0"
# Data Loading Code Runs At This Point
import pandas as pd
# Filter rows with missing values
filtered_melbourne_data = melbourne_data.dropna(axis=0)
# Choose target and features
y = filtered_melbourne_data.Price
melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'BuildingArea',
'YearBuilt', 'Lattitude', 'Longtitude']
X = filtered_melbourne_data[melbourne_features]
from sklearn.model_selection import train_test_split
# split data into training and validation data, for both features and target
train_X, val_X, train_y, val_y = train_test_split(X, y,random_state = 0)
# + colab={"base_uri": "https://localhost:8080/"} id="YwgADCdd5mso" outputId="58b47b59-6411-4183-ff16-eaa119cd31bf"
# compare MAE with differing values of max_leaf_nodes
for max_leaf_nodes in [5, 50, 500, 5000]:
my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y)
print("Max leaf nodes: %d \t\t Mean Absolute Error: %d" %(max_leaf_nodes, my_mae))
# + id="enTd_3-g55-e"
import pandas as pd
# Filter rows with missing values
melbourne_data = melbourne_data.dropna(axis=0)
# Choose target and features
y = melbourne_data.Price
melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'BuildingArea',
'YearBuilt', 'Lattitude', 'Longtitude']
X = melbourne_data[melbourne_features]
from sklearn.model_selection import train_test_split
# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X, val_X, train_y, val_y = train_test_split(X, y,random_state = 0)
# + colab={"base_uri": "https://localhost:8080/"} id="cSCjz5-Z56FU" outputId="899d05bd-f07a-4793-af6f-a7bed9791de9"
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
forest_model = RandomForestRegressor(random_state=1)
forest_model.fit(train_X, train_y)
melb_preds = forest_model.predict(val_X)
print(mean_absolute_error(val_y, melb_preds))
|
Homework_Intro_Machine_Learning_Kaggle.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/geral98atehortua/Mujeres_Digitales/blob/main/Clase3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="a5D1-SWIK1f9"
# **ESTRUCTURA DE CONDICIONALES (Control)**
#
# ---
# al momento de construir una instrucion de tio condicional se debera comprender que dichas instrucciones estan diseñadas para ayudar en la toma de decisiones:
#
# Ejemplo:
#
#
# **Si** pedro va a la tienda por la izquierda llegara mas rapido, sino se demora más.
#
# Existen varios tipos de instrucciones algunos puedes ser simplesy otros multiples.
#
# Sin embargo solo se generan dos resultados.
#
# 1.verdadero
#
# 2.falso
#
# En Python tales resultados serian true o false.
#
#
#
#
# + [markdown] id="wb5dtx9pOSY4"
# par aaplicar los condicionales que veremos a continuacion debemos recordar los comandos de operadores matematicos vistos en la clase anterior:
#
# 1.igualdad(==) 2==7 // false
# 2.diferencia(!=) rosado!=verde // true
# 3.menor que(<) 12<11 // false
# 4.mayor que(>) >1.5 false
# 5.menor o igual(<=) 30<=30 // true
# 6.mayor o igual que(>=) 1>=2 // false
#
#
# + [markdown] id="RvG4ekajUYu_"
# En condiciones multiples podemos enlazar los operadores logicos
#
# 1.y(AND)
# 2.o(OR)
# 3.no(NOT)
#
# Ejemplos
#
# 1.3==8 and 8>2 // false
#
# 2.3==3 or 15<3 // true
#
# 3.not true // false
# + [markdown] id="SV8B00zAIs3p"
# Para aplicar los operadores matematicos y logicos tenemos en cuenta lo que llamamos **Diagrama de flujo**, esto nos permite mayor organización de las ideas para la toma de decisiones.
# + id="DmsnOm_GI832"
# + [markdown] id="eM2q7hhFh23K"
# **EL COMANDO IF**
#
# Este comando permite evaluar si una sentencia es verdadera o falsa. Es decir, se ejecuta una acción establecida mediante un comando de instruccio o varias instrucciones inmediatanmente en las lineas siguientes a dicha condición.
# + colab={"base_uri": "https://localhost:8080/"} id="Vm2OfA_sinkH" outputId="587e92d2-71bc-4f71-ed0e-c7620c1e595f"
num = int(input("Escribe un número cualquiera "))
if num == 200:
print("Escribiste 200")
# + [markdown] id="U_-vT8WWit6T"
# **EL COMANDO ELSE**
#
# Este comando permite relacionar las acciones que debería realizar en caso de que la condicion sea falsa
# + colab={"base_uri": "https://localhost:8080/"} id="LlHrNjPZi3G-" outputId="555c5a7b-5b00-44b3-fa14-24fe147987ad"
num = int(input("Escribe un número cualquiera "))
if num == 200:
print ("Escribiste 200")
else:
print("El número escrito no es 200")
# + [markdown] id="VWtquy3hi8Dq"
# **EL COMANDO ELIF**
#
# Significa "sino, si" y permite concatenar condicionales
# + colab={"base_uri": "https://localhost:8080/"} id="LKytSUSejAnq" outputId="8f0cfca4-7987-4848-cfaf-446d5d2f7e38"
num = int(input("Escribe un número cualquiera "))
if num == 200:
print ("Escribiste 200")
elif num > 200:
print("El número escrito es MAYOR a 200")
else:
print("El número escrito no es 200")
# + [markdown] id="B1JR2wxujHl9"
# **CONDICIONALES MÚLTIPLES**
#
# Cuando se presentan situaciones con más de una condición que depende unas a otras, estas se pueden tratar mediante las sentencias o comandos if, o mediante el manejo adecuado del comando elif. Sin embargo, en muchos casos cuando hay multiples condiciones, la programación necesita mayor cantidad de lineas de código
#
# En esos casos, es necesario, el uso de operadores lógicos como el AND y el OR
# + id="qsiV5dOFjLLn"
x = int(input("valor")) #condicional anidada
if 0 < x:
if x < 10:
print("x es un numero positivo ")
# + id="MEsTXD8jjNHX"
x = int(input("valor")) #Expresión booleana
if 0 < x and x < 10:
print("x es un numero de un solo digito")
# + id="acViAmn7m2eC"
x=int(input("valor"))#esta condicion es la misma expresion boleana compuesta y la misma expresion condicional anidada
if 0<x<10:
print ("x es un numero de un digito")
# + [markdown] id="K4dKDD9SjPzh"
# Esta condición es la misma expresión booleana compuesta y la misma expresión condicional anidada
# + [markdown] id="IvRQe_qfjgwz"
# **ESTRUCTURAS DE CONTROL ITERATIVO**
#
# Las variables son claves en las estructuras de control iterativas, puesto que son el medio entre la iteracion de la condicion uq se está utilizando
#
# **¿QUÉ ES ITERACIÓN?**
#
# Iteracion es la consecucion del codigo tantas veces requiere hasta que se cumplen las condiciones establecidad
#
# **Banderas** son las variables que toman un valor preferiblemente binario, booleano e indican un estado Ejemplo
# + colab={"base_uri": "https://localhost:8080/"} id="cEs9hRWSnTcN" outputId="387faac3-42a6-4597-9f53-dc9ca79ca59e"
suma = False
total =0
a=3
b=10
if (suma==False):
total = a + b
suma = True
if (suma == True):
print("el valor total de la suma es: ", total)
# + [markdown] id="7Yq4C4QWnXed"
# la variable suma en este caso del tipo booleano y su funcion es indicar cuando se ejecuto la suma, por lo tanto tiene un estado inicial false, pero luego de ejecutar la suma el valor true.
#
# cuando pasa esto escuchamos la frase "la bandera se levanto", quiere decir que una u otra accion hizo que el estado de la bandera cambiara.
#
# Ejemplo cualitativo: **caso contagio**
# + colab={"base_uri": "https://localhost:8080/"} id="9yndwJMQoECV" outputId="fed6527b-d5c3-430d-977e-2e69aec96467"
paciente=input("nombre del paciente: ")
contagio_v = input("te has realziado la prueba de contagio: ")
if(contagio_v=="No"):
print("la paciente ", paciente, "no se ha realizado la prueba para validar, por favor realizarse la prueba en el centro de salud mas cercano")
print("aplicarse la prueba")
if(contagio_v== "pendiente"):
print("la paciente", paciente, "por favor revisar el correo donde se adjunta el resultado de la prueba")
if(contagio_v=="Si"):
print(paciente, "dado que es positivo el resultado de su prueba mantener una distancia de 2 metros de las personas por lo menos 15 dias")
# + [markdown] id="RqHGhk1coYB_"
# **Ejercicio practico en clase**
#
# El Bootcam comenzo el 26 de julio del 2021, se les explico a las estudiantes que se les evaluaria de la siguiente forma:
#
# Tareas =10%
#
# Talleres=25%
#
# Asistencia=5%
#
# Participacion=15%
#
# Proyecto:35%
#
# teniendo en cuenta las ponderaciones de calificacion del Bootcamp las estudiantes deciden desarrollar un programa que les permita saber cuanto tendria que sacar en el proyecto para tener una definitiva de 3.7 minimo, si las notas que llevan hasta el momento son las siguientes:
#
# -Tareas: 4.5
#
# -Talleres: 4
#
# -Asistencia: 5
#
# -Participacion: 4
#
# ¿cuanto tendrian que sacar en el proyecto?
# + colab={"base_uri": "https://localhost:8080/"} id="LcjT1jj2oipa" outputId="13b2f157-8ecf-456e-98f0-3af2d405f8da"
tarea=0.10
pc_taller=0.25
pc_asis=0.05
pc_parti=0.15
pc_proye=0.45
nota_tarea=float(input("Escriba su nota de las tareas: "))
nota_taller=float(input("Escriba su nota de los talleres: "))
nota_asis=float(input("Escriba su nota de asistencia: "))
nota_parti=float(input("Escriba su nota de participación: "))
nota_mini=float(input("Escriba la definitiva minima que necesita: "))
nota_obli=((nota_mini-((nota_tarea*pc_tarea)+(nota_taller*pc_taller)+(nota_asis*pc_asis)+(nota_parti*pc_parti)))/pc_proye)
print("Usted debe sacar en el proyecto una nota minima de:", round(nota_obli,2))
# + id="ReTnt6ZF1SPC" outputId="ba421c71-0737-4969-ea96-06012a5001fe" colab={"base_uri": "https://localhost:8080/"}
tarea=4.5
taller=4
asis=5
parti=4
nota1=tarea*0.1
nota2=taller*0.25
nota3=asis*0.05
nota4=parti*0.15
notaParcial= nota1+nota2+nota3+nota4
nota5=(3.7-notaParcial)/0.45
print("Usted debe sacar en el proyecto una nota minima de:",round(nota5,2))
|
Clase3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
from fastai.tabular import *
# # Rossmann
# ## Data preparation
# To create the feature-engineered train_clean and test_clean from the Kaggle competition data, run `rossman_data_clean.ipynb`. One important step that deals with time series is this:
#
# ```python
# add_datepart(train, "Date", drop=False)
# add_datepart(test, "Date", drop=False)
# ```
path = Config().data_path()/'rossmann'
train_df = pd.read_pickle(path/'train_clean')
train_df.head().T
n = len(train_df); n
# ### Experimenting with a sample
idx = np.random.permutation(range(n))[:2000] # grab 2000 ids at random
idx.sort()
# grab 5 columns
small_train_df = train_df.iloc[idx[:1000]]
small_test_df = train_df.iloc[idx[1000:]]
small_cont_vars = ['CompetitionDistance', 'Mean_Humidity']
small_cat_vars = ['Store', 'DayOfWeek', 'PromoInterval']
small_train_df = small_train_df[small_cat_vars + small_cont_vars + ['Sales']]
small_test_df = small_test_df[small_cat_vars + small_cont_vars + ['Sales']]
small_train_df.head()
small_test_df.head()
categorify = Categorify(small_cat_vars, small_cont_vars)
categorify(small_train_df)
categorify(small_test_df, test=True)
small_test_df.head()
small_train_df.PromoInterval.cat.categories
small_train_df['PromoInterval'].cat.codes[:5]
fill_missing = FillMissing(small_cat_vars, small_cont_vars)
fill_missing(small_train_df)
fill_missing(small_test_df, test=True)
small_train_df[small_train_df['CompetitionDistance_na'] == True]
# ### Preparing full data set
train_df = pd.read_pickle(path/'train_clean')
test_df = pd.read_pickle(path/'test_clean')
len(train_df),len(test_df)
procs=[FillMissing, Categorify, Normalize]
# +
cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen',
'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',
'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw',
'SchoolHoliday_fw', 'SchoolHoliday_bw']
cont_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',
'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h',
'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',
'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']
# -
dep_var = 'Sales'
df = train_df[cat_vars + cont_vars + [dep_var,'Date']].copy()
test_df['Date'].min(), test_df['Date'].max()
cut = train_df['Date'][(train_df['Date'] == train_df['Date'][len(test_df)])].index.max()
cut
valid_idx = range(cut)
df[dep_var].head()
data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs,)
.split_by_idx(valid_idx)
.label_from_df(cols=dep_var, label_cls=FloatList, log=True)
.add_test(TabularList.from_df(test_df, path=path, cat_names=cat_vars, cont_names=cont_vars))
.databunch())
doc(FloatList)
# ## Model
max_log_y = np.log(np.max(train_df['Sales'])*1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000,500], ps=[0.001,0.01], emb_drop=0.04,
y_range=y_range, metrics=exp_rmspe)
learn.model
len(data.train_ds.cont_names)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, 1e-3, wd=0.2)
learn.save('1')
learn.recorder.plot_losses(last=-1)
learn.load('1');
learn.fit_one_cycle(5, 3e-4)
learn.fit_one_cycle(5, 3e-4)
# (10th place in the competition was 0.108)
test_preds=learn.get_preds(DatasetType.Test)
test_df["Sales"]=np.exp(test_preds[0].data).numpy().T[0]
test_df[["Id","Sales"]]=test_df[["Id","Sales"]].astype("int")
test_df[["Id","Sales"]].to_csv("rossmann_submission.csv",index=False)
|
nbs/dl1/lesson6-rossmann.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analyzing time series
#
# Although the Raven server by no means offer a complete suite of tools for time series analysis, we strive to provide basic algorithms that can reduce the volume of data to be downloaded and analyzed locally. To this end, Raven offers a couple of indicators based on stream flow series.
#
# Here. we'll test those indicators on a simple test file with around ten years of daily streamflow generated by a Raven simulation.
# +
# %matplotlib inline
from birdy import WPSClient
url = "http://localhost:9099/wps"
wps = WPSClient(url)#, processes='base_flow_index')
from example_data import TESTDATA
fn = str(TESTDATA['simfile_single'])
# -
# ## Base flow index
#
# The base flow index is the minimum 7-day average flow divided by the mean flow.
help(wps.base_flow_index)
# The base flow index needs as input arguments the link to a NetCDF file storing the stream flow time series, the name of the stream flow variable, and the frequency at which the index is computed (`YS`: yearly, `QS-DEC`: seasonally).
resp = wps.base_flow_index(fn, variable='q_sim')
out, log = resp.get(asobj=True)
out.base_flow_index.plot()
# To compute generic statistics of a time series, use the `ts_stats` process.
help(wps.ts_stats)
# Here we compute the annual summer (JJA) minimum
resp = wps.ts_stats(fn, variable='q_sim', op='min', season='JJA')
out, log = resp.get(asobj=True)
out.ts_stats.plot()
# ## Frequency analysis
#
# The process `freq_analysis` is similar to the previous stat sin the it fits a series of annual maxima or minima to a statistical distribution, and returns the values corresponding to different return periods.
help(wps.freq_analysis)
# For example, computing the Q27, the minimum 7-days streamflow of reccurrence two years, can be done using the following.
resp = wps.freq_analysis(fn, variable='q_sim', mode='min', t=2, dist='gumbel_r', window=7)
out, log = resp.get(asobj=True)
out.freq_analysis
# An array of return periods can be passed.
resp = wps.freq_analysis(fn, variable='q_sim', mode='max', t=(2, 5, 10, 25, 50, 100), dist='gumbel_r')
out, log = resp.get(asobj=True)
out.freq_analysis.plot()
|
docs/source/notebooks/Time series analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="LhWiLK0IEsqX"
# # Ungraded Lab: Class Activation Maps with Fashion MNIST
#
# In this lab, you will see how to implement a simple class activation map (CAM) of a model trained on the [Fashion MNIST dataset](https://github.com/zalandoresearch/fashion-mnist). This will show what parts of the image the model was paying attention to when deciding the class of the image. Let's begin!
# + [markdown] id="wOAlJAfRIY50"
# ## Imports
# + id="zSyMHuCVys-O"
import keras
from keras.datasets import fashion_mnist
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential,Model
from keras.layers import Dense, Conv2D, MaxPooling2D, GlobalAveragePooling2D
import scipy as sp
# + [markdown] id="uMIJYr6FIbZt"
# ## Download and Prepare the Data
# + id="01974419yy5W"
# load the Fashion MNIST dataset
(X_train,Y_train),(X_test,Y_test) = fashion_mnist.load_data()
# + id="VfVB6x6Oy1yF"
# Put an additional axis for the channels of the image.
# Fashion MNIST is grayscale so we place 1 at the end. Other datasets
# will need 3 if it's in RGB.
X_train = X_train.reshape(60000,28,28,1)
X_test = X_test.reshape(10000,28,28,1)
# Normalize the pixel values from 0 to 1
X_train = X_train/255
X_test = X_test/255
# Cast to float
X_train = X_train.astype('float')
X_test = X_test.astype('float')
# + id="SDx99oRCzfTr"
def show_img(img):
'''utility function for reshaping and displaying an image'''
# convert to float array if img is not yet preprocessed
img = np.array(img,dtype='float')
# remove channel dimension
img = img.reshape((28,28))
# display image
plt.imshow(img)
# + id="rAL6lD1PLlj2"
# test the function for the first train image. you can vary the index of X_train
# below to see other images
show_img(X_train[1])
# + [markdown] id="1xPggGPyOhm5"
# ## Build the Classifier
#
# Let's quickly recap how we can build a simple classifier with this dataset.
# + [markdown] id="Ds9n85HmJySy"
# ### Define the Model
#
# You can build the classifier with the model below. The image will go through 4 convolutions followed by pooling layers. The final Dense layer will output the probabilities for each class.
# + id="QyCoMd93zpc_"
# use the Sequential API
model = Sequential()
# notice the padding parameter to recover the lost border pixels when doing the convolution
model.add(Conv2D(16,input_shape=(28,28,1),kernel_size=(3,3),activation='relu',padding='same'))
# pooling layer with a stride of 2 will reduce the image dimensions by half
model.add(MaxPooling2D(pool_size=(2,2)))
# pass through more convolutions with increasing filters
model.add(Conv2D(32,kernel_size=(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,kernel_size=(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128,kernel_size=(3,3),activation='relu',padding='same'))
# use global average pooling to take into account lesser intensity pixels
model.add(GlobalAveragePooling2D())
# output class probabilities
model.add(Dense(10,activation='softmax'))
model.summary()
# + [markdown] id="Fxsip1WuOk0Q"
# ### Train the Model
# + id="t8LswgblLvc8"
# configure the training
model.compile(loss='sparse_categorical_crossentropy',metrics=['accuracy'],optimizer='adam')
# train the model. just run a few epochs for this test run. you can adjust later.
model.fit(X_train,Y_train,batch_size=32, epochs=5, validation_split=0.1, shuffle=True)
# + [markdown] id="eriUpnzxQkus"
# ## Generate the Class Activation Map
#
# To generate the class activation map, we want to get the features detected in the last convolution layer and see which ones are most active when generating the output probabilities. In our model above, we are interested in the layers shown below.
# + id="UblFRpsK0Gd7"
# final convolution layer
print(model.layers[-3].name)
# global average pooling layer
print(model.layers[-2].name)
# output of the classifier
print(model.layers[-1].name)
# + [markdown] id="cXAYTZZVrZFB"
# You can now create your CAM model as shown below.
# + id="JSc42Fd4rZFB"
# same as previous model but with an additional output
cam_model = Model(inputs=model.input,outputs=(model.layers[-3].output,model.layers[-1].output))
cam_model.summary()
# + [markdown] id="MvWbe6sLrZFB"
# Use the CAM model to predict on the test set, so that it generates the features and the predicted probability for each class (`results`).
# + id="R8625UmNrxOU"
# get the features and results of the test images using the newly created model
features,results = cam_model.predict(X_test)
# shape of the features
print("features shape: ", features.shape)
print("results shape", results.shape)
# + [markdown] id="bfQMCOtmTaDX"
# You can generate the CAM by getting the dot product of the class activation features and the class activation weights.
#
# You will need the weights from the Global Average Pooling layer (GAP) to calculate the activations of each feature given a particular class.
# - Note that you'll get the weights from the dense layer that follows the global average pooling layer.
# - The last conv2D layer has (h,w,depth) of (3 x 3 x 128), so there are 128 features.
# - The global average pooling layer collapses the h,w,f (3 x 3 x 128) into a dense layer of 128 neurons (1 neuron per feature).
# - The activations from the global average pooling layer get passed to the last dense layer.
# - The last dense layer assigns weights to each of those 128 features (for each of the 10 classes),
# - So the weights of the last dense layer (which immmediately follows the global average pooling layer) are referred to in this context as the "weights of the global average pooling layer".
#
# For each of the 10 classes, there are 128 features, so there are 128 feature weights, one weight per feature.
# + id="UwfG1vyprZFC"
# these are the weights going into the softmax layer
last_dense_layer = model.layers[-1]
# get the weights list. index 0 contains the weights, index 1 contains the biases
gap_weights_l = last_dense_layer.get_weights()
print("gap_weights_l index 0 contains weights ", gap_weights_l[0].shape)
print("gap_weights_l index 1 contains biases ", gap_weights_l[1].shape)
# shows the number of features per class, and the total number of classes
# Store the weights
gap_weights = gap_weights_l[0]
print(f"There are {gap_weights.shape[0]} feature weights and {gap_weights.shape[1]} classes.")
# + [markdown] id="Y7GtR-4NrZFC"
# Now, get the features for a specific image, indexed between 0 and 999.
# + id="x2aA6_ZTrZFD"
# Get the features for the image at index 0
idx = 0
features_for_img = features[idx,:,:,:]
print(f"The features for image index {idx} has shape (height, width, num of feature channels) : ", features_for_img.shape)
# + [markdown] id="0OSiDiTnrZFD"
# The features have height and width of 3 by 3. Scale them up to the original image height and width, which is 28 by 28.
# + id="1sNUNDLDrZFD"
features_for_img_scaled = sp.ndimage.zoom(features_for_img, (28/3, 28/3,1), order=2)
# Check the shape after scaling up to 28 by 28 (still 128 feature channels)
print("features_for_img_scaled up to 28 by 28 height and width:", features_for_img_scaled.shape)
# + [markdown] id="AYj5w9NhrZFD"
# For a particular class (0...9), get the 128 weights.
#
# Take the dot product with the scaled features for this selected image with the weights.
#
# The shapes are:
# scaled features: (h,w,depth) of (28 x 28 x 128).
# weights for one class: 128
#
# The dot product produces the class activation map, with the shape equal to the height and width of the image: 28 x 28.
# + id="sNj4D8FprZFD"
# Select the weights that are used for a specific class (0...9)
class_id = 0
# take the dot product between the scaled image features and the weights for
gap_weights_for_one_class = gap_weights[:,class_id]
print("features_for_img_scaled has shape ", features_for_img_scaled.shape)
print("gap_weights_for_one_class has shape ", gap_weights_for_one_class.shape)
# take the dot product between the scaled features and the weights for one class
cam = np.dot(features_for_img_scaled, gap_weights_for_one_class)
print("class activation map shape ", cam.shape)
# + [markdown] id="Vag5enPdrZFE"
# ### Conceptual interpretation
# To think conceptually about what what you're doing and why:
# - In the 28 x 28 x 128 feature map, each of the 128 feature filters is tailored to look for a specific set of features (for example, a shoelace).
# - The actual features are learned, not selected by you directly.
# - Each of the 128 weights for a particular class decide how much weight to give to each of the 128 features, for that class.
# - For instance, for the "shoe" class, it may have a higher weight for the feature filters that look for shoelaces.
# - At each of the 28 by 28 pixels, you can take the vector of 128 features and compare them with the vector of 128 weights.
# - You can do this comparison with a dot product.
# - The dot product results in a scalar value at each pixel.
# - Apply this dot product across all of the 28 x 28 pixels.
# - The scalar result of the dot product will be larger when the image both has the particular feature (e.g. shoelace), and that feature is also weighted more heavily for the particular class (e.g shoe).
#
# So you've created a matrix with the same number of pixels as the image, where the value at each pixel is higher when that pixel is relevant to the prediction of a particular class.
# + [markdown] id="G-e9U5poVBis"
# Here is the function that implements the Class activation map calculations that you just saw.
# + id="YByJ8J1008Ms"
def show_cam(image_index):
'''displays the class activation map of a particular image'''
# takes the features of the chosen image
features_for_img = features[image_index,:,:,:]
# get the class with the highest output probability
prediction = np.argmax(results[image_index])
# get the gap weights at the predicted class
class_activation_weights = gap_weights[:,prediction]
# upsample the features to the image's original size (28 x 28)
class_activation_features = sp.ndimage.zoom(features_for_img, (28/3, 28/3, 1), order=2)
# compute the intensity of each feature in the CAM
cam_output = np.dot(class_activation_features,class_activation_weights)
print('Predicted Class = ' +str(prediction)+ ', Probability = ' + str(results[image_index][prediction]))
# show the upsampled image
plt.imshow(np.squeeze(X_test[image_index],-1), alpha=0.5)
# strongly classified (95% probability) images will be in green, else red
if results[image_index][prediction]>0.95:
cmap_str = 'Greens'
else:
cmap_str = 'Reds'
# overlay the cam output
plt.imshow(cam_output, cmap=cmap_str, alpha=0.5)
# display the image
plt.show()
# + [markdown] id="9fjIdEmlXbez"
# You can now test generating class activation maps. Let's use the utility function below.
# + id="8iQjMfg9U03_"
def show_maps(desired_class, num_maps):
'''
goes through the first 10,000 test images and generates CAMs
for the first `num_maps`(int) of the `desired_class`(int)
'''
counter = 0
if desired_class < 10:
print("please choose a class less than 10")
# go through the first 10000 images
for i in range(0,10000):
# break if we already displayed the specified number of maps
if counter == num_maps:
break
# images that match the class will be shown
if np.argmax(results[i]) == desired_class:
counter += 1
show_cam(i)
# + [markdown] id="ctjDEfzrjXa5"
# For class 8 (handbag), you'll notice that most of the images have dark spots in the middle and right side.
# - This means that these areas were given less importance when categorizing the image.
# - The other parts such as the outline or handle contribute more when deciding if an image is a handbag or not.
#
# Observe the other classes and see if there are also other common areas that the model uses more in determining the class of the image.
# + id="AkPMvVurezkb"
show_maps(desired_class=7, num_maps=20)
# + id="8hZNmPudI5wY"
|
Advanced Computer Vision with Tensorflow/Week4/C3_W4_Lab_1_FashionMNIST_CAM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="9frci5AAr9rZ" colab_type="code" outputId="41fb0152-3ed4-46b8-8464-f22e1ed4ba60" executionInfo={"status": "ok", "timestamp": 1543777057532, "user_tz": 120, "elapsed": 7840, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 451}
# !pip install git+https://github.com/albermax/innvestigate
# + id="2cglYO71bmQg" colab_type="code" colab={}
import warnings
warnings.simplefilter('ignore')
# + [markdown] id="IAQcWTF0bmQu" colab_type="text"
# https://github.com/albermax/innvestigate
# + id="552uVMQdsk8j" colab_type="code" outputId="61bedbe4-77ea-4bec-c2ed-e927cfb97707" executionInfo={"status": "ok", "timestamp": 1543777057542, "user_tz": 120, "elapsed": 7789, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/drive')
# + id="UzrBCc2KbmQw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="733d1c9a-129a-4fd3-a4bd-a9a2de30faa8" executionInfo={"status": "ok", "timestamp": 1543777058721, "user_tz": 120, "elapsed": 8952, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}}
import keras
# + id="yA5lbWTCbmQ7" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import cv2
import os
from glob import glob
import seaborn as sns
from PIL import Image
np.random.seed(123)
from sklearn.preprocessing import label_binarize
from sklearn.metrics import confusion_matrix
import itertools
import keras
from keras.utils.np_utils import to_categorical # used for converting labels to one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras import backend as K
import itertools
from keras.layers.normalization import BatchNormalization
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
from keras.applications import Xception
from keras import models, layers
import keras.backend
import innvestigate
import innvestigate.utils as iutils
# + id="_5wRCatWbmRE" colab_type="code" colab={}
X_test=np.load("/content/drive/My Drive/Colab Notebooks/imagens_teste.npy")
y_test=np.load("/content/drive/My Drive/Colab Notebooks/labels_teste.npy")
X_train=np.load("/content/drive/My Drive/Colab Notebooks/imagens_treino.npy")
y_train=np.load("/content/drive/My Drive/Colab Notebooks/labels_treino.npy")
# + id="mvfFcRWobmRJ" colab_type="code" colab={}
from keras.applications import DenseNet121
xc = DenseNet121(weights='imagenet',
include_top=False,
input_shape=(64, 64, 3))
# + id="XeacGmQ9bmRP" colab_type="code" outputId="dea6dd4c-fda5-46c5-9e34-8a298f677e61" executionInfo={"status": "ok", "timestamp": 1543777081022, "user_tz": 120, "elapsed": 31187, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 15977}
xc.summary()
# + id="XSBRMnf0bmRZ" colab_type="code" outputId="66391bb9-191c-4536-d1ec-d40aa26bf5ef" executionInfo={"status": "ok", "timestamp": 1543777081026, "user_tz": 120, "elapsed": 31165, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
from keras import Model
lname = "relu"
last_vgg_layer = xc.get_layer(lname)
x = Flatten()(last_vgg_layer.get_output_at(0))
x = Dense(1024, activation='relu')(x)
x=Dropout(0.5)(x)
x = Dense(18, activation='softmax')(x)
model = Model(inputs=xc.inputs, outputs=x)
len(model.layers)
# + id="0muxMDUQbmRf" colab_type="code" colab={}
model_vgg = models.Sequential()
model_vgg.add(xc)
model_vgg.add(layers.Flatten())
model_vgg.add(layers.Dense(1024, activation='relu'))
model_vgg.add(layers.Dropout(0.5))
model_vgg.add(layers.Dense(18, activation='softmax'))
# + id="8IIRSv9RbmRq" colab_type="code" colab={}
model_vgg.load_weights('/content/drive/My Drive/Colab Notebooks/weights_densenet121_simpsons.h5')
# + id="BZ8VncSMbmR4" colab_type="code" outputId="61cb9ca4-f887-469a-fd39-7e0c3dd08378" executionInfo={"status": "ok", "timestamp": 1543777099950, "user_tz": 120, "elapsed": 50034, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
weights=model_vgg.get_weights()
len(model_vgg.layers)
# + id="bAt4743wbmR_" colab_type="code" colab={}
last_weights=model_vgg.layers[4].get_weights()
ante_weights=model_vgg.layers[2].get_weights()
# + id="c_CAxjVIbmSF" colab_type="code" colab={}
model.layers[430].set_weights(last_weights)
model.layers[428].set_weights(ante_weights)
# + id="7_ZTOVtgbmSK" colab_type="code" outputId="f16c1f48-42d1-4013-d71a-9d619d68f3d9" executionInfo={"status": "ok", "timestamp": 1543777103398, "user_tz": 120, "elapsed": 53431, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
len(model.layers)
# + id="a5-KF8gebmSS" colab_type="code" outputId="36606d9c-6d3d-4305-fc7d-5776616deeaa" executionInfo={"status": "ok", "timestamp": 1543778738810, "user_tz": 120, "elapsed": 832, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 268}
img = cv2.cvtColor(X_test[6], cv2.COLOR_BGR2RGB)
X_test[6].shape[0]
imgplot = plt.imshow(img)
# + id="0NORXe41bmSZ" colab_type="code" colab={}
images=[(X_test[120],y_test[120]),(X_test[534],y_test[534]),(X_test[448],y_test[448]),(X_test[430],y_test[430]),(X_test[327],y_test[327]),
(X_test[786],y_test[786]),(X_test[221],y_test[221]),(X_test[835],y_test[835]),(X_test[728],y_test[728]),(X_test[561],y_test[561]),(X_test[6],y_test[6])]
# + id="i3wFOCJ6bmSg" colab_type="code" colab={}
def plot_image_grid(grid,
row_labels_left,
row_labels_right,
col_labels,
file_name=None,
figsize=None,
dpi=448):
n_rows = len(grid)
n_cols = len(grid[0])
if figsize is None:
figsize = (n_cols, n_rows+1)
plt.clf()
plt.rc("font", family="sans-serif")
plt.figure(figsize=figsize)
for r in range(n_rows):
for c in range(n_cols):
ax = plt.subplot2grid(shape=[n_rows+1, n_cols], loc=[r+1, c])
# TODO controlled color mapping wrt all grid entries,
# or individually. make input param
if grid[r][c] is not None:
ax.imshow(grid[r][c], interpolation='none')
else:
for spine in plt.gca().spines.values():
spine.set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
# column labels
if not r:
if col_labels != []:
ax.set_title(col_labels[c],
rotation=22.5,
horizontalalignment='left',
verticalalignment='bottom')
# row labels
if not c:
if row_labels_left != []:
txt_left = [l+'\n' for l in row_labels_left[r]]
ax.set_ylabel(
''.join(txt_left),
rotation=0,
verticalalignment='center',
horizontalalignment='right',
)
if c == n_cols-1:
if row_labels_right != []:
txt_right = [l+'\n' for l in row_labels_right[r]]
ax2 = ax.twinx()
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_ylabel(
''.join(txt_right),
rotation=0,
verticalalignment='center',
horizontalalignment='left'
)
if file_name is None:
plt.show()
else:
print('Saving figure to {}'.format(file_name))
plt.savefig(file_name, orientation='landscape', dpi=dpi, bbox_inches='tight', pad_inches=0.2)
# + id="kBSeqzAQbmSp" colab_type="code" colab={}
# Methods we use and some properties.
methods = [
# NAME OPT.PARAMS POSTPROC FXN TITLE
# Show input.
("input", {}, "Input"),
# Function
("gradient", {"postprocess": "abs"}, "Gradient"),
# Signal
("deconvnet", {}, "Deconvnet"),
("lrp.sequential_preset_a_flat",{"epsilon": 1}, "LRP-PresetAFlat"),
]
# + id="9lsTt-JZbmS2" colab_type="code" colab={}
# Stripping the softmax activation from the model
model_wo_sm = iutils.keras.graph.model_wo_softmax(model)
# Create analyzers.
analyzers = []
for method in methods:
try:
analyzer = innvestigate.create_analyzer(method[0], # analysis method identifier
model_wo_sm, # model without softmax output
**method[1]) # optional analysis parameters
except innvestigate.NotAnalyzeableModelException:
# Not all methods work with all models.
analyzer = None
analyzers.append(analyzer)
# + id="lji3oLi7V8Wz" colab_type="code" colab={}
def recons_ima(X):
im=[]
for g in range(X.shape[0]):
im.append([])
for f in range (X.shape[1]):
im[g].append([X[g][f][2],X[g][f][1],X[g][f][0]])
im=np.array(im)
return im
# + id="95REExCAbmS8" colab_type="code" colab={}
analysis = np.zeros([len(images), len(analyzers)]+[64,64]+[3])
text = []
map_characters = {0: 'abraham', 1: 'apu', 2: 'bart',
3: 'burns', 4: 'chief_wiggum', 5: 'comic_book_guy', 6: 'edna',
7: 'homer', 8: 'kent', 9: 'krusty', 10: 'lisa',
11: 'marge', 12: 'milhouse', 13: 'moe',
14: 'ned', 15: 'nelson', 16: 'skinner', 17: 'sideshow_bob'}
for i, (x, y) in enumerate(images):
# Add batch axis.
x_pp = x[None, :, :, :]
#x_pp = imgnetutils.preprocess(x, net)
# Predict final activations, probabilites, and label.
presm = model_wo_sm.predict_on_batch(x_pp)[0]
prob = model.predict_on_batch(x_pp)[0]
y_hat = prob.argmax()
y=y.argmax()
# Save prediction info:
text.append(("%s" % map_characters[y], # ground truth label
"%.2f" % presm.max(), # pre-softmax logits
"%.2f" % prob.max(), # probabilistic softmax output
"%s" % map_characters[y_hat] # predicted label
))
for aidx, analyzer in enumerate(analyzers):
if methods[aidx][0] == "input":
# Do not analyze, but keep not preprocessed input.
a = [x]
elif analyzer:
# Analyze.
a = analyzer.analyze(x_pp)
# Apply common postprocessing, e.g., re-ordering the channels for plotting.
#a = imgnetutils.postprocess(a, color_conversion, channels_first)
# Apply analysis postprocessing, e.g., creating a heatmap.
#a = methods[aidx][2](a)
else:
a = np.zeros_like(image)
# Store the analysis.
analysis[i, aidx] = recons_ima(a[0]-a[0].min())/(a[0].max()-a[0].min())
# + id="S1aVuP9jbmTE" colab_type="code" outputId="fa7ad074-8b07-4d39-9c55-bbb4ae2a7732" executionInfo={"status": "ok", "timestamp": 1543777258985, "user_tz": 120, "elapsed": 208910, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
analysis.shape
# + id="iCrY5Q4KbmTR" colab_type="code" outputId="0c60bb6f-5385-4d45-9171-8714be97026c" executionInfo={"status": "ok", "timestamp": 1543778173647, "user_tz": 120, "elapsed": 6209, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08905560025286693506"}} colab={"base_uri": "https://localhost:8080/", "height": 715}
# Prepare the grid as rectengular list
grid = [[analysis[i, j] for j in range(analysis.shape[1])]
for i in range(analysis.shape[0])]
# Prepare the labels
label, presm, prob, pred = zip(*text)
row_labels_left = [('label: {}'.format(label[i]),'pred: {}'.format(pred[i])) for i in range(len(label))]
row_labels_right = [('logit: {}'.format(presm[i]),'prob: {}'.format(prob[i])) for i in range(len(label))]
col_labels = [''.join(method[2]) for method in methods]
# Plot the analysis.
plot_image_grid(grid, row_labels_left, row_labels_right, col_labels,
file_name='/content/drive/My Drive/Colab Notebooks/fig_dense121')
# + id="88bbYAh0bmTf" colab_type="code" colab={}
|
experiment/train/Simpson_train_denseNet121.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Impedance Matching
# ## Introduction
# The general problem is illustrated by the figure below: a generator with an internal impedance $Z_S$ delivers a power to a passive load $Z_L$, through a 2-ports matching network. This problem is commonly named as "the double matching problem". Impedance matching is important for the following reasons:
#
# - maximizing the power transfer. Maximum power is delivered to the load when the generator _and_ the load are matched to the line and power loss in the line minimized
# - improving signal-to-noise ratio of the system
# - reducing amplitude and phase errors
# - reducing reflected power toward generator
#
# <img src="figures/Impedance_matching_general.svg">
#
# As long as the load impedance $Z_L$ has a real positive part, a matching network can always be found. Many choices are available and the examples below only describe a few. The examples are taken from the D.Pozar book "Microwave Engineering", 4th edition.
import numpy as np
import matplotlib.pyplot as plt
import skrf as rf
rf.stylely()
# ## Matching with Lumped Elements
# To begin, let's assume that the matching network is lossless and the feeding line characteristic impedance is $Z_0$:
#
# <img src="figures/Impedance_matching_lumped1.svg">
#
# The simplest type of matching network is the "L" network, which uses two reactive elements to match an arbitrary load impedance. Two possible configuration exist and are illustrated by the figures below. In either configurations, the reactive elements can be inductive of capacitive, depending on the load impedance.
#
# <img src="figures/Impedance_matching_lumped2.svg">
# <img src="figures/Impedance_matching_lumped3.svg">
# Let's assume the load is $Z_L = 200 - 100j \Omega$ for a line $Z_0=100\Omega$ at the frequency of 500 MHz.
Z_L = 200 - 100j
Z_0 = 100
f_0_str = '500MHz'
# Let's define the `Frequency` and load `Network`:
# frequency band centered on the frequency of interest
frequency = rf.Frequency(start=300, stop=700, npoints=401, unit='MHz')
# transmission line Media
line = rf.DefinedGammaZ0(frequency=frequency, z0=Z_0)
# load Network
load = line.load(rf.zl_2_Gamma0(Z_0, Z_L))
# We are searching for a L-C Network corresponding to the first configuration above:
# <img src="figures/Impedance_matching_lumped4.svg">
# +
def matching_network_LC_1(L, C):
' L and C in nH and pF'
return line.inductor(L*1e-9)**line.shunt_capacitor(C*1e-12)**load
def matching_network_LC_2(L, C):
' L and C in nH and pF'
return line.capacitor(C*1e-12)**line.shunt_inductor(L*1e-9)**load
# -
# Finding the set of inductance $L$ and the capacitance $C$ which matches the load is an optimization problem. The `scipy` package provides the necessary optimization function(s) for that:
# +
from scipy.optimize import minimize
# initial guess values
L0 = 10 # nH
C0 = 1 # pF
x0 = (L0, C0)
# bounds
L_minmax = (1, 100) #nH
C_minmax = (0.1, 10) # pF
# the objective functions minimize the return loss at the target frequency f_0
def optim_fun_1(x, f0=f_0_str):
_ntw = matching_network_LC_1(*x)
return np.abs(_ntw[f_0_str].s).ravel()
def optim_fun_2(x, f0=f_0_str):
_ntw = matching_network_LC_2(*x)
return np.abs(_ntw[f_0_str].s).ravel()
# -
res1 = minimize(optim_fun_1, x0, bounds=(L_minmax, C_minmax))
print(f'Optimum found for LC network 1: L={res1.x[0]} nH and C={res1.x[1]} pF')
res2 = minimize(optim_fun_2, x0, bounds=(L_minmax, C_minmax))
print(f'Optimum found for LC network 2: L={res2.x[0]} nH and C={res2.x[1]} pF')
ntw1 = matching_network_LC_1(*res1.x)
ntw2 = matching_network_LC_2(*res2.x)
ntw1.plot_s_mag(lw=2, label='LC network 1')
ntw2.plot_s_mag(lw=2, label='LC network 2')
plt.ylim(bottom=0)
# ## Single-Stub Matching
# Matching can be made with a piece of open-ended or shorted transmission line ( _stub_ ), connected either in parallel ( _shunt_ ) or in series. In the example below, a matching network is realized from a shorted transmission line of length ($\theta_{stub}$) connected in parallel, in association with a series transmission line ($\theta_{line}$). Let's assume a load impedance $Z_L=60 - 80j$ connected to a 50 Ohm transmission line.
#
# <img src="figures/Impedance_matching_stub1.svg">
#
# Let's match this load at 2 GHz:
Z_L = 60 - 80j
Z_0 = 50
f_0_str = '2GHz'
# Frequency, wavenumber and transmission line media
freq = rf.Frequency(start=1, stop=3, npoints=301, unit='GHz')
beta = freq.w/rf.c
line = rf.DefinedGammaZ0(freq, gamma=1j*beta, z0=Z_0)
def resulting_network(theta_delay, theta_stub):
'''
Return a loaded single stub matching network
NB: theta_delay and theta_stub lengths are in deg
'''
delay_load = line.delay_load(rf.zl_2_Gamma0(Z_0, Z_L), theta_delay)
shunted_stub = line.shunt_delay_short(theta_stub)
return shunted_stub ** delay_load
# Optimize the matching network variables `theta_delay` and `theta_stub` to match the resulting 1-port network ($|S|=0$)
# +
from scipy.optimize import minimize
def optim_fun(x):
return resulting_network(*x)[f_0_str].s_mag.ravel()
x0 = (50, 50)
bnd = (0, 180)
res = minimize(optim_fun, x0, bounds=(bnd, bnd))
print(f'Optimum found for: theta_delay={res.x[0]:.1f} deg and theta_stub={res.x[1]:.1f} deg')
# -
# Optimized network at f0
ntw = resulting_network(*res.x)
ntw.plot_s_db(lw=2)
|
doc/source/examples/matching/Impedance Matching.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="yaSd5pinrOyM"
# # Clustering methods
# We are going to use the following libraries and packages:
#
# * **numpy**: "NumPy is the fundamental package for scientific computing with Python." (http://www.numpy.org/)
# * **matplotlib**: "Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms." (https://matplotlib.org/)
# * **sklearn**: Scikit-learn is a machine learning library for Python programming language. (https://scikit-learn.org/stable/)
# * **pandas**: "Pandas provides easy-to-use data structures and data analysis tools for Python." (https://pandas.pydata.org/)
# + id="vWb5dngPrRp3"
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import offsetbox
import pandas as pd
# + [markdown] id="Bvd9DZ4us3Fa"
# ## Implementing clustering methods on synthetic data
#
#
# + [markdown] id="wZmz-k5VfA3F"
# ## Synthetic data
#
# Let's generate synthetic data as follows:
# 1) Points are scattered in 2 dimensional space as follows. There are N-2 other dimensions that all the points have same values in each dimension
# 2) We will reduce the dimensionality of the data to 2D
#
# + id="_418G2YbihU7" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="9ea8214c-32e0-48df-ef11-1f96ceb5e9f6"
group_1_X = np.repeat(2,90)+np.random.normal(loc=0, scale=1,size=90)
group_1_Y = np.repeat(2,90)+np.random.normal(loc=0, scale=1,size=90)
group_2_X = np.repeat(10,90)+np.random.normal(loc=0, scale=1,size=90)
group_2_Y = np.repeat(10,90)+np.random.normal(loc=0, scale=1,size=90)
plt.scatter(group_1_X,group_1_Y, c='blue')
plt.scatter(group_2_X,group_2_Y,c='green')
plt.xlabel('1st dimension')
plt.ylabel('2nd dimension')
# + id="1SkR8CzcjRXj" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="1409ef17-53c1-498d-c22f-2a15fd35b899"
####
combined = pd.DataFrame(np.column_stack((np.concatenate([group_1_X,group_2_X]),np.concatenate([group_1_Y,group_2_Y]))))
####
from sklearn import cluster
combined_AC = cluster.AgglomerativeClustering(n_clusters=2,affinity='euclidean', linkage='complete').fit(combined)
combined_KMeans = cluster.KMeans(n_clusters=2,max_iter=300).fit(combined)
combined_AP = cluster.AffinityPropagation( max_iter=200, convergence_iter=15).fit(combined)
combined_DB = cluster.DBSCAN(eps=0.5, min_samples=8).fit(combined)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
ax1.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_AC.labels_)
ax1.set_title('Agglomerative')
ax2.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_KMeans.labels_)
ax2.set_title('k-means')
ax3.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_AP.labels_)
ax3.set_title('Affinity Prop.')
ax4.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_DB.labels_)
ax4.set_title('DBSCAN')
# + [markdown] id="N90MHTM7iWhy"
# ### Let's change the structure of synthetic data
#
# Let's generate synthetic data as follows:
# + id="3rK3aNP2fa4b" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="ac8c18ef-5c6f-4626-d010-81d9185336a1"
group_1_X = np.arange(10,100)
group_1_Y = np.arange(10,100)+np.random.normal(loc=0, scale=0.3,size=90)-np.repeat(4,90)
group_2_X = np.arange(10,100)
group_2_Y = np.arange(10,100)+np.random.normal(loc=0, scale=0.3,size=90)+np.repeat(4,90)
plt.scatter(group_1_X,group_1_Y, c='blue')
plt.scatter(group_2_X,group_2_Y,c='green')
plt.xlabel('1st dimension')
plt.ylabel('2nd dimension')
# + id="fm8zMo1shEt2" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="2ba79b23-060b-45d0-bddd-1fb1c31c3b6f"
####
combined = pd.DataFrame(np.column_stack((np.concatenate([group_1_X,group_2_X]),np.concatenate([group_1_Y,group_2_Y]))))
####
from sklearn import cluster
combined_AC = cluster.AgglomerativeClustering(n_clusters=2,affinity='euclidean', linkage='complete').fit(combined)
combined_KMeans = cluster.KMeans(n_clusters=2,max_iter=300).fit(combined)
combined_AP = cluster.AffinityPropagation(damping=0.5, max_iter=200, convergence_iter=15).fit(combined)
combined_DB = cluster.DBSCAN(eps=0.02, min_samples=2).fit(combined)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
ax1.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_AC.labels_)
ax1.set_title('Agglomerative')
ax2.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_KMeans.labels_)
ax2.set_title('k-means')
ax3.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_AP.labels_)
ax3.set_title('Affinity Prop.')
ax4.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_DB.labels_)
ax4.set_title('DBSCAN')
# + [markdown] id="5GX7-HkEkSWN"
# ### Another synthetic data
#
# Let's generate synthetic data as follows:
#
# + id="RW3_YL01lHHP" colab={"base_uri": "https://localhost:8080/", "height": 300} outputId="1b184fa5-843e-47d7-b48a-e8164c6b853a"
group_1_X = np.arange(start=0,stop=1**2,step=0.001)
group_1_Y = np.sqrt(np.repeat(1**2,1000)-group_1_X**2)
group_2_X = np.arange(start=0,stop=1.5,step=0.001)
group_2_Y = np.sqrt(np.repeat(1.5**2,1500)-group_2_X**2)
plt.scatter(group_1_X,group_1_Y, c='blue', )
plt.scatter(group_2_X,group_2_Y,c='green')
plt.xlabel('1st dimension')
plt.ylabel('2nd dimension')
plt.xlim(0,2.5)
plt.ylim(0,2.5)
# + id="YzSp0OU_nZqS" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="8f9e95d5-fa82-4da6-8ce1-1e2a02687102"
combined = pd.DataFrame(np.column_stack((np.concatenate([group_1_X,group_2_X]),np.concatenate([group_1_Y,group_2_Y]))))
####
from sklearn import cluster
combined_AC = cluster.AgglomerativeClustering(n_clusters=2,affinity='euclidean', linkage='complete').fit(combined)
combined_KMeans = cluster.KMeans(n_clusters=2,max_iter=300).fit(combined)
combined_AP = cluster.AffinityPropagation( max_iter=200, convergence_iter=15).fit(combined)
combined_DB = cluster.DBSCAN(eps=0.1, min_samples=5).fit(combined)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
ax1.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_AC.labels_)
ax1.set_title('Agglomerative')
ax2.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_KMeans.labels_)
ax2.set_title('k-means')
ax3.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_AP.labels_)
ax3.set_title('Affinity Prop.')
ax4.scatter(combined.iloc[:,0], combined.iloc[:,1], c=combined_DB.labels_)
ax4.set_title('DBSCAN')
# + [markdown] id="4Vm4C26Cjm8Z"
# ### Conclusion:
# * There is no best model for all cases
# * Hypeparameters of the models are the major players in the quality of clustering.
# + [markdown] id="o7_8PcyXUOY1"
# ## UCI ML digit image data
#
# * load and return digit data set
# + id="pJNt6IVjOYir" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="9e584922-dca6-4d72-9432-046d694a68ca"
from sklearn import datasets
# Loading digit images
digits = datasets.load_digits()
X = pd.DataFrame(digits.data).iloc[0:100,]
y = digits.target[0:100]
n_samples, n_features = X.shape
print("number of samples (data points):", n_samples)
print("number of features:", n_features)
# + [markdown] id="5qLnPNWTo_-L"
# Pixels of images have values between 0 and 16.
# Chacking varaince of each feature across samples is informative.
# + [markdown] id="ZRoVRGKZhAVN"
# Let's write a function to use it for visualization of the results of all the dimension reduction methods.
# + [markdown] id="ddASf4sMsrVm"
# #### Let's visualize some of the images
# + id="T7k8dJm-pHhl" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="53d46d7b-86a2-4a63-aa0f-3af19f182fa5"
fig, ax_array = plt.subplots(1,10)
axes = ax_array.flatten()
for i, ax in enumerate(axes):
ax.imshow(digits.images[i])
plt.setp(axes, xticks=[], yticks=[])
plt.tight_layout(h_pad=0.5, w_pad=0.01)
# + [markdown] id="JvM_I_DG086G"
# Now that we understood how different clustering methods works, let's implement it on the UCI ML digit image data:
# + id="v40lj5CLsLV5"
import collections
from sklearn import metrics
UCIML_AC = cluster.AgglomerativeClustering(n_clusters=10,affinity='euclidean', linkage='single').fit(X)
UCIML_KMeans = cluster.KMeans(n_clusters=10,max_iter=1000).fit(X)
UCIML_AP = cluster.AffinityPropagation( max_iter=200, convergence_iter=15).fit(X)
UCIML_DB = cluster.DBSCAN(eps=0.5, min_samples=5).fit(X)
# + [markdown] id="eg10Awed-eI3"
# Let's check the clusters using proper visualiation:
# + id="bRm7czaP6Q61" colab={"base_uri": "https://localhost:8080/", "height": 450} outputId="1e52ee6f-04d6-49dd-b491-846276dc4d41"
conf_frame = pd.DataFrame(0,index=np.arange(0,10), columns=np.arange(0,10))
for i in y:
for j in UCIML_KMeans.labels_:
conf_frame.iloc[i,j] = conf_frame.iloc[i,j]+1
print(conf_frame)
import seaborn as sns
with sns.axes_style("white"):
ax = sns.heatmap(conf_frame, square=True, cmap="YlGnBu")
plt.show()
# + [markdown] id="aM2dq26cdRkT"
# ## Normalizing data before clustering
#
# It is a good idea usually to normalize the data so that the scale of values for different features would become similar.
# + id="9jV7KGPOdkUD"
from sklearn import preprocessing
X_norm = pd.DataFrame(preprocessing.scale(X))
UCIML_AC = cluster.AgglomerativeClustering(n_clusters=10,affinity='euclidean', linkage='complete').fit(X_norm)
UCIML_KMeans = cluster.KMeans(n_clusters=10,max_iter=1000).fit(X_norm)
UCIML_AP = cluster.AffinityPropagation( max_iter=200, convergence_iter=15).fit(X_norm)
UCIML_DB = cluster.DBSCAN(eps=0.01, min_samples=5).fit(X_norm)
# + id="fx1Jr31a8BIw" colab={"base_uri": "https://localhost:8080/", "height": 450} outputId="0ae02f5f-9cdf-4a15-95eb-713a50377cca"
conf_frame = pd.DataFrame(0,index=np.arange(0,10), columns=np.arange(0,10))
for i in y:
for j in UCIML_AC.labels_:
conf_frame.iloc[i,j] = conf_frame.iloc[i,j]+1
print(conf_frame)
import seaborn as sns
with sns.axes_style("white"):
ax = sns.heatmap(conf_frame, square=True, cmap="YlGnBu")
plt.show()
# + id="iFfY_ABvkSQn"
|
python/Session6_Clustering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (herschelhelp_internal)
# language: python
# name: helpint
# ---
# # HATLAS-SGP master catalogue
# ## Preparation of DES data
#
# Blanco DES catalogue: the catalogue comes from `dmu0_DES`.
#
# In the catalogue, we keep:
#
# - The identifier (it's unique in the catalogue);
# - The position;
# - The G band stellarity;
# - The magnitude for each band.
# - The auto/kron magnitudes/fluxes to be used as total magnitude.
# - The aperture magnitudes, which are used to compute a corrected 2 arcsec aperture magnitude.
#
# We don't know when the maps have been observed. We will take the final observation date as 2017.
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
import datetime
print("This notebook was executed on: \n{}".format(datetime.datetime.now()))
# +
# %matplotlib inline
# #%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
from collections import OrderedDict
import os
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Column, Table
import numpy as np
from herschelhelp_internal.flagging import gaia_flag_column
from herschelhelp_internal.masterlist import nb_astcor_diag_plot, remove_duplicates, \
nb_plot_mag_ap_evol, nb_plot_mag_vs_apcor
from herschelhelp_internal.utils import astrometric_correction, mag_to_flux, flux_to_mag, aperture_correction
# +
OUT_DIR = os.environ.get('TMP_DIR', "./data_tmp")
try:
os.makedirs(OUT_DIR)
except FileExistsError:
pass
RA_COL = "des_ra"
DEC_COL = "des_dec"
# -
# Pristine HSC catalogue
orig_des = Table.read("../../dmu0/dmu0_DES/data/DES-DR1_HATLAS-SGP.fits")
# ## 1 - Aperture correction
#
# To compute aperture correction we need to dertermine two parametres: the target aperture and the range of magnitudes for the stars that will be used to compute the correction.
#
# Target aperture: To determine the target aperture, we simulate a curve of growth using the provided apertures and draw two figures:
#
# The evolution of the magnitudes of the objects by plotting on the same plot aperture number vs the mean magnitude.
# The mean gain (loss when negative) of magnitude is each aperture compared to the previous (except for the first of course).
# As target aperture, we should use the smallest (i.e. less noisy) aperture for which most of the flux is captures.
#
# Magnitude range: To know what limits in aperture to use when doing the aperture correction, we plot for each magnitude bin the correction that is computed and its RMS. We should then use the wide limits (to use more stars) where the correction is stable and with few dispersion.
# +
bands = ["g", "r", "i", "z", "y"]
apertures = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"] #Removed "40" and "235" because they lack errors
magnitudes = {}
stellarities = {}
for band in bands:
magnitudes[band] = np.array(
[orig_des["MAG_APER_{}_{}".format(aperture, band.upper())] for aperture in apertures]
)
stellarities[band] = np.array(orig_des["CLASS_STAR_{}".format(band.upper())])
# Some sources have an infinite magnitude
mask = np.isclose(magnitudes[band], 99.)
magnitudes[band][mask] = np.nan
mag_corr = {}
# -
# ### I.a - g band
# +
nb_plot_mag_ap_evol(magnitudes['g'], stellarities['g'], labels=apertures)
# -
# We will use aperture 10 as target.
nb_plot_mag_vs_apcor(orig_des['MAG_APER_4_G'], orig_des['MAG_APER_10_G'], stellarities['g'])
#
# We will use magnitudes between 15.0 and 16.0
# Aperture correction
mag_corr['g'], num, std = aperture_correction(
orig_des['MAG_APER_4_G'], orig_des['MAG_APER_10_G'],
stellarities['g'],
mag_min=15.0, mag_max=16.0)
print("Aperture correction for g band:")
print("Correction: {}".format(mag_corr['g']))
print("Number of source used: {}".format(num))
print("RMS: {}".format(std))
# ### I.b - r band
# +
nb_plot_mag_ap_evol(magnitudes['r'], stellarities['r'], labels=apertures)
# -
# We will use aperture 10 as target.
# +
nb_plot_mag_vs_apcor(orig_des['MAG_APER_4_R'], orig_des['MAG_APER_10_R'], stellarities['r'])
# -
# We use magnitudes between 15.0 and 16.0.
# Aperture correction
mag_corr['r'], num, std = aperture_correction(
orig_des['MAG_APER_4_R'], orig_des['MAG_APER_10_R'],
stellarities['r'],
mag_min=15.0, mag_max=16.0)
print("Aperture correction for r band:")
print("Correction: {}".format(mag_corr['r']))
print("Number of source used: {}".format(num))
print("RMS: {}".format(std))
# ### I.b - i band
nb_plot_mag_ap_evol(magnitudes['i'], stellarities['i'], labels=apertures)
# We will use aperture 10 as target.
nb_plot_mag_vs_apcor(orig_des['MAG_APER_4_I'], orig_des['MAG_APER_10_I'], stellarities['i'])
# We use magnitudes between 15.0 and 16.0.
# Aperture correction
mag_corr['i'], num, std = aperture_correction(
orig_des['MAG_APER_4_I'], orig_des['MAG_APER_10_I'],
stellarities['i'],
mag_min=15.0, mag_max=16.0)
print("Aperture correction for i band:")
print("Correction: {}".format(mag_corr['i']))
print("Number of source used: {}".format(num))
print("RMS: {}".format(std))
# ### I.b - z band
# +
nb_plot_mag_ap_evol(magnitudes['z'], stellarities['z'], labels=apertures)
# -
# We will use aperture 57 as target.
# +
nb_plot_mag_vs_apcor(orig_des['MAG_APER_4_Z'], orig_des['MAG_APER_10_Z'], stellarities['z'])
# -
# We use magnitudes between 15.0 and 16.0.
# Aperture correction
mag_corr['z'], num, std = aperture_correction(
orig_des['MAG_APER_4_Z'], orig_des['MAG_APER_10_Z'],
stellarities['z'],
mag_min=15.0, mag_max=16.0)
print("Aperture correction for z band:")
print("Correction: {}".format(mag_corr['z']))
print("Number of source used: {}".format(num))
print("RMS: {}".format(std))
# ### I.b - y band
nb_plot_mag_ap_evol(magnitudes['y'], stellarities['y'], labels=apertures)
# We will use aperture 10 as target.
# +
nb_plot_mag_vs_apcor(orig_des['MAG_APER_4_Y'], orig_des['MAG_APER_10_Y'], stellarities['y'])
# -
# We use magnitudes between 15.0 and 16.0.
# Aperture correction
mag_corr['y'], num, std = aperture_correction(
orig_des['MAG_APER_4_Y'], orig_des['MAG_APER_10_Y'],
stellarities['y'],
mag_min=15.0, mag_max=16.0)
print("Aperture correction for y band:")
print("Correction: {}".format(mag_corr['y']))
print("Number of source used: {}".format(num))
print("RMS: {}".format(std))
# ## 2 - Column selection
# +
imported_columns = OrderedDict({
'COADD_OBJECT_ID': "des_id",
'RA': "des_ra",
'DEC': "des_dec",
'CLASS_STAR_G': "des_stellarity",
'MAG_AUTO_G': "m_decam_g",
'MAGERR_AUTO_G': "merr_decam_g",
'MAG_APER_4_G': "m_ap_decam_g",
'MAGERR_APER_4_G': "merr_ap_decam_g",
'MAG_AUTO_R': "m_decam_r",
'MAGERR_AUTO_R': "merr_decam_r",
'MAG_APER_4_R': "m_ap_decam_r",
'MAGERR_APER_4_R': "merr_ap_decam_r",
'MAG_AUTO_I': "m_decam_i",
'MAGERR_AUTO_I': "merr_decam_i",
'MAG_APER_4_I': "m_ap_decam_i",
'MAGERR_APER_4_I': "merr_ap_decam_i",
'MAG_AUTO_Z': "m_decam_z",
'MAGERR_AUTO_Z': "merr_decam_z",
'MAG_APER_4_Z': "m_ap_decam_z",
'MAGERR_APER_4_Z': "merr_ap_decam_z",
'MAG_AUTO_Y': "m_decam_y",
'MAGERR_AUTO_Y': "merr_decam_y",
'MAG_APER_4_Y': "m_ap_decam_y",
'MAGERR_APER_4_Y': "merr_ap_decam_y",
})
catalogue = Table.read("../../dmu0/dmu0_DES/data/DES-DR1_HATLAS-SGP.fits")[list(imported_columns)]
for column in imported_columns:
catalogue[column].name = imported_columns[column]
epoch = 2017
# Clean table metadata
catalogue.meta = None
# -
# Aperture correction
for band in bands:
catalogue["m_ap_decam_{}".format(band)] += mag_corr[band]
# +
# Adding flux and band-flag columns
for col in catalogue.colnames:
if col.startswith('m_'):
errcol = "merr{}".format(col[1:])
# Some objects have -99.0 values
mask = (catalogue[col] > 50.) | (catalogue[col] < 0.)
catalogue[col][mask] = np.nan
catalogue[errcol][mask] = np.nan
flux, error = mag_to_flux(np.array(catalogue[col]), np.array(catalogue[errcol]))
# Fluxes are added in µJy
catalogue.add_column(Column(flux * 1.e6, name="f{}".format(col[1:])))
catalogue.add_column(Column(error * 1.e6, name="f{}".format(errcol[1:])))
# Band-flag column
if 'ap' not in col:
catalogue.add_column(Column(np.zeros(len(catalogue), dtype=bool), name="flag{}".format(col[1:])))
# TODO: Set to True the flag columns for fluxes that should not be used for SED fitting.
# -
catalogue[:10].show_in_notebook()
# ## II - Removal of duplicated sources
# We remove duplicated objects from the input catalogues.
# +
SORT_COLS = ['merr_ap_decam_g', 'merr_ap_decam_r','merr_ap_decam_i','merr_ap_decam_z','merr_ap_decam_y']
FLAG_NAME = 'des_flag_cleaned'
nb_orig_sources = len(catalogue)
catalogue = remove_duplicates(catalogue, RA_COL, DEC_COL, sort_col=SORT_COLS,flag_name=FLAG_NAME)
nb_sources = len(catalogue)
print("The initial catalogue had {} sources.".format(nb_orig_sources))
print("The cleaned catalogue has {} sources ({} removed).".format(nb_sources, nb_orig_sources - nb_sources))
print("The cleaned catalogue has {} sources flagged as having been cleaned".format(np.sum(catalogue[FLAG_NAME])))
# -
# ## III - Astrometry correction
#
# We match the astrometry to the Gaia one. We limit the Gaia catalogue to sources with a g band flux between the 30th and the 70th percentile. Some quick tests show that this give the lower dispersion in the results.
gaia = Table.read("../../dmu0/dmu0_GAIA/data/GAIA_SGP.fits")
gaia_coords = SkyCoord(gaia['ra'], gaia['dec'])
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec, near_ra0=True)
# +
delta_ra, delta_dec = astrometric_correction(
SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]),
gaia_coords
)
print("RA correction: {}".format(delta_ra))
print("Dec correction: {}".format(delta_dec))
# -
catalogue[RA_COL] = catalogue[RA_COL] + delta_ra.to(u.deg)
catalogue[DEC_COL] = catalogue[DEC_COL] + delta_dec.to(u.deg)
catalogue[RA_COL].unit = u.deg
catalogue[DEC_COL].unit = u.deg
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec, near_ra0=True)
# ## IV - Flagging Gaia objects
catalogue.add_column(
gaia_flag_column(SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]), epoch, gaia)
)
# +
GAIA_FLAG_NAME = "des_flag_gaia"
catalogue['flag_gaia'].name = GAIA_FLAG_NAME
print("{} sources flagged.".format(np.sum(catalogue[GAIA_FLAG_NAME] > 0)))
# -
# ## V - Flagging objects near bright stars
# # VI - Saving to disk
catalogue.write("{}/DES.fits".format(OUT_DIR), overwrite=True)
|
dmu1/dmu1_ml_SGP/1.5_DES.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Selenium
#
# Selenium Python API provides a tool for web testing and web scraping.
# ## Installation
#
# __Install Selenium__
#
# `pip install selenium`
#
# __Install Webdrivers__
#
# Selenium requires a driver to interface with the browser. Webdriver needs to be installed and be placed _in your PATH_.
#
# Remember, place it in _/usr/bin_ or _/usr/local/bin_
#
# [Chrome driver](https://sites.google.com/a/chromium.org/chromedriver/downloads)
#
# [Firefox driver](https://github.com/mozilla/geckodriver/releases)
# ## Initialization
#
# Initializing webdriver in headless mode (the program will not open a real brower window while running).
# +
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument("--headless")
driver = webdriver.Chrome(chrome_options=options)
# can also set window size
# driver.set_window_size(1024, 960)
# -
# ## Closing the webdriver
#
# The webdriver should be closed everytime you finished the task.
driver.close()
# ## Get webpage source
#
# Request and get the webpage source with webdriver.
url = "www.google.com"
driver.get(url)
# ## Locating Elements
#
# * find_element_by_id
# * find_element_by_name
# * find_element_by_xpath
# * find_element_by_link_text
# * find_element_by_partial_link_text
# * find_element_by_tag_name
# * find_element_by_class_name
# * find_element_by_css_selector
#
# To find multiple elements (these methods will return a list):
#
# * find_elements_by_name
# * find_elements_by_xpath
# * find_elements_by_link_text
# * find_elements_by_partial_link_text
# * find_elements_by_tag_name
# * find_elements_by_class_name
# * find_elements_by_css_selector
# ## Search for input field and put search string
#
# Search input field by its XPATH and put search word into the field
# find the input field
input_xpath = "//*[@id=\"tsf\"]/div[2]/div[1]/div[1]/div/div[2]/input"
field = driver.find_element_by_xpath(input_xpath)
# to be safe, clear any pre-populated text in the input field
field.clear()
# put search string into input field
field.send_keys("python selenium")
# ## Press enter or search button to search
#
# There are two ways to make the search.
#
# * Press enter at the input field, or
# * click on the search button
# +
# send the Enter key to the input field
from selenium.webdriver.common.keys import Keys
field.send_keys(Keys.RETURN)
# or
# find the send button and click it
send_btn = driver.find_element_by_xpath("//*[@id=\"tsf\"]/div[2]/div[1]/div[3]/center/input[1]")
send_btn.click()
|
Selenium/Selenium.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Simple selections
#
# It's time to begin writing your own queries! In this first coding exercise, you will use `SELECT` statements to retrieve columns from a database table. You'll be working with the `eurovision table`, which contains data relating to individual country performance at the [Eurovision Song Contest](https://en.wikipedia.org/wiki/Eurovision_Song_Contest) from 1998 to 2012.
#
# After selecting columns, you'll also practice renaming columns, and limiting the number of rows returned.
#
# Instructions
#
# 1. `SELECT` the `country` column `FROM` the `eurovision` table.
# 2. Amend your query to return the `points` column instead of the `country` column.
# 3. Use `TOP` to change the existing query so that only the first `50` rows are returned.
# 4. Return a list of unique countries using `DISTINCT`. Give the results an alias of `unique_country`.
# +
-- SELECT the country column FROM the eurovision table
SELECT country
FROM eurovision;
# country
# Israel
# France
# Sweden
# Croatia
# Portugal
# ...
# +
-- Select the points column
SELECT points
FROM eurovision;
# points
# 53
# 107
# 33
# 45
# 57
# ...
# +
-- Limit the number of rows returned
SELECT TOP(50) points
FROM eurovision;
# points
# 53
# 107
# 33
# 45
# 57
# ...
# +
-- Return unique countries and use an alias
SELECT DISTINCT country AS unique_country
FROM eurovision;
# unique_country
# Albania
# Andorra
# Armenia
# Austria
# Azerbaijan
# ...
# -
# ## More selections
#
# Now that you've practiced how to select one column at a time, it's time to practice selecting more than one column. You'll continue working with the `eurovision` table.
#
# Instructions
#
# 1. `SELECT` the `country` and `event_year` columns from the `eurovision` table.
# 2. Use a shortcut to amend the current query, returning ALL rows from ALL columns in the table.
# 3. This time, restrict the rows to the first half using 'PERCENT', using the same shortcut as before to return all columns.
# +
-- Select country and event_year from eurovision
SELECT country,
event_year
FROM eurovision;
# country event_year
# Israel 2009
# France 2009
# Sweden 2009
# Croatia 2009
# Portugal 2009
# ...
# +
-- Amend the code to select all rows and columns
SELECT *
FROM eurovision;
# euro_id event_year country gender group_type place points host_country host_region is_final sf_number song_in_english
# 1 2009 Israel Female Group 16 53 Away Away 1 null 1
# 2 2009 France Female Solo 8 107 Away Away 1 null 0
# 3 2009 Sweden Female Solo 21 33 Away Away 1 null 1
# 4 2009 Croatia Both Group 18 45 Away Away 1 null 0
# 5 2009 Portugal Both Group 15 57 Away Away 1 null 0
# ...
# +
-- Return all columns, restricting the percent of rows returned
SELECT TOP (50) PERCENT *
FROM eurovision;
# euro_id event_year country gender group_type place points host_country host_region is_final sf_number song_in_english
# 1 2009 Israel Female Group 16 53 Away Away 1 null 1
# 2 2009 France Female Solo 8 107 Away Away 1 null 0
# 3 2009 Sweden Female Solo 21 33 Away Away 1 null 1
# 4 2009 Croatia Both Group 18 45 Away Away 1 null 0
# 5 2009 Portugal Both Group 15 57 Away Away 1 null 0
# ...
# -
# ## Order by
#
# In this exercise, you'll practice the use of `ORDER BY` using the `grid` dataset. It's loaded and waiting for you! It contains a subset of wider publicly available information on US power outages.
#
# Some of the main columns include:
#
# - `description`: The reason/ cause of the outage.
# - `nerc_region`: The North American Electricity Reliability Corporation was formed to ensure the reliability of the grid and comprises several regional entities).
# - `demand_loss_mw`: How much energy was not transmitted/consumed during the outage.
#
# Instructions
#
# 1. Select `description` and `event_date` from `grid`. Your query should return the first 5 rows, ordered by `event_date`.
# 2. Modify your code based on the comments provided on the right.
# +
-- Select the first 5 rows from the specified columns
SELECT TOP(5) description,
event_date
FROM grid
-- Order your results by the event_date column
ORDER BY event_date;
# description event_date
# Electrical Fault at Generator 2011-01-11
# Winter Storm 2011-01-12
# Firm System Load Shed 2011-01-13
# Vandalism 2011-01-18
# Vandalism 2011-01-23
# +
-- Select the top 20 rows from description, nerc_region and event_date
SELECT TOP (20) description,
nerc_region,
event_date
FROM grid
-- Order by nerc_region, affected_customers & event_date
-- Event_date should be in descending order
ORDER BY nerc_region,
affected_customers,
event_date DESC;
# description nerc_region event_date
# Suspected Physical Attack ERCOT 2014-06-12
# Fuel Supply Emergency Coal ERCOT 2014-06-06
# Physical Attack Vandalism ERCOT 2014-06-03
# Suspected Physical Attack FRCC 2013-03-18
# Load Shed of 100+ MW Under Emergency Operational Policy FRCC 2013-06-17
# ...
# -
# ## Where
#
# You won't usually want to retrieve _every_ row in your database. You'll have specific information you need in order to answer questions from your boss or colleagues.
#
# The `WHERE` clause is essential for selecting, updating (and deleting!) data from your tables. You'll continue working with the `grid` dataset for this exercise.
#
# Instructions
#
# 1. Select the `description` and `event_year` columns.
# 2. Return rows `WHERE` the description is `'Vandalism'`.
# +
-- Select description and event_year
SELECT description,
event_year
FROM grid
-- Filter the results
WHERE description = 'Vandalism';
# description event_year
# Vandalism 2014
# Vandalism 2013
# Vandalism 2013
# Vandalism 2013
# Vandalism 2013
# ...
# -
# ## Where again
#
# When filtering strings, you need to wrap your value in 'single quotes', as you did in the previous exercise. You don't need to do this for numeric values, but you DO need to use single quotes for date columns.
#
# In this course, dates are always represented in the `YYYY-MM-DD` format (Year-Month-Day), which is the default in Microsoft SQL Server.
#
# Instructions
#
# 1. Select the `nerc_region` and `demand_loss_mw` columns, limiting the results to those where `affected_customers` is greater than or equal to 500000 (500,000)
# 2. Update your code to select `description` and `affected_customers`, returning records where the `event_date` was the 22nd December, 2013.
# 3. Limit the results to those where the `affected_customers` is `BETWEEN` `50000` and `150000`, and order in descending order of `event_date`.
# +
-- Select nerc_region and demand_loss_mw
SELECT nerc_region,
demand_loss_mw
FROM grid
-- Retrieve rows where affected_customers is >= 500000 (500,000)
WHERE affected_customers >= 500000;
# nerc_region demand_loss_mw
# WECC 3900
# WECC 3300
# WECC 9750
# RFC null
# SERC 4545
# ...
# +
-- Select description and affected_customers
SELECT description,
affected_customers
FROM grid
-- Retrieve rows where the event_date was the 22nd December, 2013
WHERE event_date = '2013-12-22';
# description affected_customers
# Severe Weather IceSnow 59000
# Severe Weather IceSnow 50000
# Severe Weather IceSnow 140735
# +
-- Select description, affected_customers and event date
SELECT description,
affected_customers,
event_date
FROM grid
-- The affected_customers column should be >= 50000 and <=150000
WHERE affected_customers BETWEEN 50000 AND 150000
-- Define the order
ORDER BY event_date DESC;
# description affected_customers event_date
# Severe Weather Thunderstorms 127000 2014-06-30
# Severe Weather Thunderstorms 120000 2014-06-30
# Severe Weather Thunderstorms 138802 2014-06-18
# Severe Weather Thunderstorms 55951 2014-06-15
# Severe Weather Thunderstorms 66383 2014-06-10
# ...
# -
# ## Working with NULL values
#
# A NULL value could mean 'zero' - if something doesn't happen, it can't be logged in a table. However, NULL can also mean 'unknown' or 'missing'. So consider if it is appropriate to replace them in your results. NULL values provide feedback on data quality. If you have NULL values, and you didn't expect to have any, then you have an issue with either how data is captured or how it's entered in the database.
#
# In this exercise, you'll practice filtering for NULL values, excluding them from results, and replacing them with alternative values.
#
# Instructions
#
# 1. Use a shortcut to select all columns from `grid`. Then filter the results to only include rows where `demand_loss_mw` is unknown or missing.
# 2. Adapt your code to return rows where `demand_loss_mw` is not unknown or missing.
# +
-- Retrieve all columns
SELECT *
FROM grid
-- Return only rows where demand_loss_mw is missing or unknown
WHERE demand_loss_mw IS NULL;
# grid_id description event_year event_date restore_date nerc_region demand_loss_mw affected_customers
# 1 Severe Weather Thunderstorms 2014 2014-06-30 2014-07-01 RFC null 127000
# 3 Fuel Supply Emergency Coal 2014 2014-06-27 null MRO null null
# 4 Physical Attack Vandalism 2014 2014-06-24 2014-06-24 SERC null null
# 5 Physical Attack Vandalism 2014 2014-06-19 2014-06-19 SERC null null
# 6 Physical Attack Vandalism 2014 2014-06-18 2014-06-18 WECC null null
# ...
# +
-- Retrieve all columns
SELECT *
FROM grid
-- Return rows where demand_loss_mw is not missing or unknown
WHERE demand_loss_mw IS NOT NULL;
# grid_id description event_year event_date restore_date nerc_region demand_loss_mw affected_customers
# 2 Severe Weather Thunderstorms 2014 2014-06-30 2014-07-01 MRO 424 120000
# 14 Severe Weather Thunderstorms 2014 2014-06-07 2014-06-08 SERC 217 65000
# 16 Severe Weather Thunderstorms 2014 2014-06-05 2014-06-07 SERC 494 38500
# 18 Electrical System Islanding 2014 2014-06-03 2014-06-03 WECC 338 null
# 24 Public Appeal to Reduce Electricity Usage Wild Fires 2014 2014-05-16 2014-05-16 WECC 3900 1400000
# ...
# -
# ## Exploring classic rock songs
#
# It's time to rock and roll! In this set of exercises, you'll use the `songlist` table, which contains songs featured on the playlists of 25 classic rock radio stations.
#
# First, let's get familiar with the data.
#
# Instructions
#
# 1. Retrieve the `song`, `artist`, and `release_year` columns from the `songlist` table.
# 2. Make sure there are no `NULL` values in the `release_year` column.
# 3. Order the results by `artist` and `release_year`.
# +
-- Retrieve the song, artist and release_year columns
SELECT song,
artist,
release_year
FROM songlist
# song artist release_year
# Keep On Loving You REO Speedwagon 1980
# Keep Pushin 1977 REO Speedwagon null
# Like You Do REO Speedwagon null
# Ridin the Storm Out REO Speedwagon null
# Roll With the Changes REO Speedwagon null
# ...
# +
-- Retrieve the song, artist and release_year columns
SELECT song,
artist,
release_year
FROM songlist
-- Ensure there are no missing or unknown values in the release_year column
WHERE release_year IS NOT NULL
# song artist release_year
# Keep On Loving You REO Speedwagon 1980
# Take It on the Run REO Speedwagon 1981
# <NAME> <NAME> 1981
# Back Off Boogaloo Ringo Starr 1972
# Early 1970 [*] Ringo Starr 1971
# ...
# +
-- Retrieve the song, artist and release_year columns
SELECT song,
artist,
release_year
FROM songlist
-- Ensure there are no missing or unknown values in the release_year column
WHERE release_year IS NOT NULL
-- Arrange the results by the artist and release_year columns
ORDER BY artist,
release_year;
# song artist release_year
# Rockin Into the Night .38 Special 1980
# Hold On Loosely .38 Special 1981
# Caught Up in You .38 Special 1982
# Art For Arts Sake 10cc 1975
# Kryptonite 3 Doors Down 2000
# ...
# -
# ## Exploring classic rock songs - AND/OR
#
# Having familiarized yourself with the `songlist` table, you'll now extend your `WHERE` clause from the previous exercise.
#
# Instructions
#
# 1. Extend the `WHERE` clause so that the results are those with a `release_year` greater than or equal to `1980` and less than or equal to `1990`.
# 2. Update your query to use an `OR` instead of an `AND`.
# +
SELECT song,
artist,
release_year
FROM songlist
-- Retrieve records greater than and including 1980
WHERE release_year >= 1980 AND
-- Also retrieve records up to and including 1990
release_year <= 1990
ORDER BY artist,
release_year;
# song artist release_year
# Rockin Into the Night .38 Special 1980
# Hold On Loosely .38 Special 1981
# Caught Up in You .38 Special 1982
# Take On Me a-ha 1985
# Back In Black AC/DC 1980
# ...
# +
SELECT song,
artist,
release_year
FROM songlist
-- Retrieve records greater than and including 1980
WHERE release_year >= 1980 OR
-- Also retrieve records up to and including 1990
release_year <= 1990
ORDER BY artist,
release_year;
# song artist release_year
# Rockin Into the Night .38 Special 1980
# Hold On Loosely .38 Special 1981
# Caught Up in You .38 Special 1982
# Art For Arts Sake 10cc 1975
# Kryptonite 3 Doors Down 2000
# ...
# -
# ## Using parentheses in your queries
#
# You can use parentheses to make the intention of your code clearer. This becomes very important when using AND and OR clauses, to ensure your queries return the exact subsets you need.
#
# Instructions
#
# 1. Select all artists beginning with `B` who released tracks in `1986`, but also retrieve any records where the `release_year` is greater than `1990`.
# +
SELECT artist,
release_year,
song
FROM songlist
-- Choose the correct artist and specify the release year
WHERE (artist LIKE 'B%' AND release_year = 1986) OR
-- Or return all songs released after 1990
release_year > 1990
-- Order the results
ORDER BY release_year,
artist,
song;
# artist release_year song
# Beastie Boys 1986 (You Gotta) Fight for Your Right (To Party)
# Beastie Boys 1986 No Sleep Till Brooklyn
# Bon Jovi 1986 Livin On A Prayer
# Bon Jovi 1986 Wanted Dead or Alive
# Bon Jovi 1986 You Give Love A Bad Name
# ...
|
introduction_to_sql_server/1_selection_box.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Two dimensional Lagrange interpolation
#
# To extend to the two dimensional case, we start from two sets of distinct points,
# say $(n+1)$ points in the $x$ direction, and $m+1$ points in the $y$ direction,
# in the interval $[0,1]$.
#
# A two dimensional version of the the Lagrange interpolation is then used to constuct
# polynomial approximations of functions of two dimensions. A polynomial from
# $\Omega := [0,1]\times[0,1]$ to $R$ is defined as:
#
# $$
# \mathcal{P}^{n,m} : \text{span}\{p_i(x) p_j(y) \}_{i,j=0}^{n,m}
# $$
#
# and each *multi-index* $(i,j)$ represents a polynomial of order $i+j$, $i$ along
# $x$, and $j$ along $y$. For convenience, we define
#
# $$
# p_{i,j}(x,y) := p_i(x)p_j(y) \qquad i = 0,\ldots,n \qquad j=0,\ldots,m
# $$
#
# Alternatively we can construct a basis starting from the Lagrange polynomials:
#
#
# $$
# l_{i,j}(x,y) := l_i(x) l_j(y) \qquad i = 0,\ldots,n \qquad j=0,\ldots,m
# $$
#
# where we use the same symbol for the polynomials along the two directions for
# notational convenience, even though they are constructed from two different sets
# of points.
#
# We define the *Lagrange interpolation* operator $\mathcal{L}^{n,m}$ the operator
# $$
# \mathcal{L}^{n,m} : C^0([0,1]\times[0,1]) \mapsto \mathcal{P}^{n,m}
# $$
# which satisfies
# $$
# (\mathcal{L}^{n,m} f)(q_{i,j}) = f(q_{i,j}), \qquad i=0,\dots,n, \qquad q_{i,j} := (x_i, y_j)
# $$
#
# In order to prevent indices bugs, we define two different refinement spaces, and two
# different orders, to make sure that no confusion is done along the $x$ and $y$ directions.
#
# We try to be dimension independent, so we define everything starting from tuples of objects.
# The dimension of the tuple defines if we are working in 1, 2, or 3 dimensions.
# +
# %matplotlib inline
from numpy import *
from pylab import *
dim = 2
ref = (301, 311)
n = (4,5)
assert dim == len(ref) == len(n), 'Check your dimensions!'
x = [linspace(0,1,r) for r in ref]
q = [linspace(0,1,r+1) for r in n]
# -
# We start by constructing the one dimensional basis, for each dimension. Once this is done, we
# compute the product $l_i(x)l_j(y)$, for each x and y in the two dimensional list x,
# containing the x and y points. This product can be reshaped to obtain a matrix of the right
# dimension, provided that we did the right thing in broadcasting the dimensions...
# +
Ln = [zeros((n[i]+1, ref[i])) for i in xrange(dim)]
# Construct the lagrange basis in all directions
for d in xrange(dim):
for i in xrange(n[d]+1):
Ln[d][i] = product([ (x[d]-q[d][j])/(q[d][i]-q[d][j]) for j in xrange(n[d]+1) if j != i], axis=0)
# Now construct the product between each basis in
# each coordinate direction, to use for plotting and interpolation
if dim == 2:
L = einsum('ij,kl -> ikjl', Ln[1], Ln[0])
elif dim == 3:
L = einsum('ij,kl,mn -> ikmjln', Ln[2], Ln[1], Ln[0])
elif dim == 1:
L = Ln[0]
else:
raise
print(L.shape)
Lf = reshape(L, (prod(L.shape[:dim]), prod(L.shape[dim:])))
print(Lf.shape)
# +
from mpl_toolkits.mplot3d import Axes3D
X = meshgrid(x[0], x[1])
Q = meshgrid(q[0], q[1])
fig = figure(figsize=[10,10])
ax = fig.gca(projection='3d')
surf2 = ax.plot_surface(X[0], X[1], L[2,2], rstride=5, cstride=5, cmap=cm.coolwarm, alpha=0.5)
scatter = ax.scatter(Q[0], Q[1], zeros_like(Q[0]), c='r', marker='o')
# -
# Now we try to make an interpolation. First we need to evaluate the function at the interpolation
# points. This is done by expressing all possible combinations of the points by meshgrid on q:
# +
Q = meshgrid(q[0], q[1])
def f(x,y):
return 1/(1+100*((x-.5)**2+(y-.5)**2))
def my_plot(f):
fig = figure(figsize=[10,10])
ax = fig.gca(projection='3d')
surf2 = ax.plot_surface(X[0], X[1], f(X[0], X[1]), cmap=cm.coolwarm, alpha=0.8)
show()
fig = figure(figsize=[10,10])
ax = fig.gca(projection='3d')
scatter = ax.scatter(Q[0], Q[1], zeros_like(Q[0]), c='r', marker='o')
F = f(Q[0], Q[1])
interp = Lf.T.dot(F.reshape((-1,))).reshape(X[0].shape)
surf3 = ax.plot_surface(X[0], X[1], interp, alpha=0.4)
show()
my_plot(f)
|
notes/01a_interpolation-2d.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import psycopg2
import pandas as pd
# from sqlalchemy.types import Integer, Text, String, DateTime
import sqlalchemy as s
import matplotlib
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import json
with open("config.json") as config_file:
config = json.load(config_file)
database_connection_string = 'postgres+psycopg2://{}:{}@{}:{}/{}'.format(config['user'], config['password'], config['host'], config['port'], config['database'])
dbschema='augur_data'
engine = s.create_engine(
database_connection_string,
connect_args={'options': '-csearch_path={}'.format(dbschema)})
# -
repo_list = pd.DataFrame()
repo_list_query = f"""
SELECT repo_id, repo_name, repo_path from repo
WHERE repo_name = 'concourse' OR repo_name = 'postfacto' or repo_name = 'clarity' or repo_name = 'gpdb' or
repo_name = 'kpack' or repo_name = 'rabbitmq-server' or repo_name = 'sonobuoy'
or repo_name = 'spring-framework' or repo_name = 'gporca-pipeline-misc' or repo_name = 'photon';
"""
repo_list = pd.read_sql_query(repo_list_query, con=engine)
print(repo_list)
# +
## List of repository IDs for the report
repo_dict = {25432} #scidb
# +
#from datetime import date
import datetime
current = datetime.date.today()
today = "'" + str(current) + "'"
print(today)
first_current = current.replace(day=1)
last_month = first_current - datetime.timedelta(days=1)
end_date = "'" + str(last_month) + "'"
print(end_date)
print
start = last_month - datetime.timedelta(days=365)
year_ago = "'" + str(start) + "'"
print(year_ago)
# +
# Monthly PRs closed
pr_monthDF = pd.DataFrame()
for value in repo_dict:
pr_monthquery = f"""
SELECT
*
FROM
(
SELECT
date_part( 'year', month :: DATE ) AS YEAR,
date_part( 'month', month :: DATE ) AS month
FROM
( SELECT * FROM ( SELECT month :: DATE FROM generate_series ( TIMESTAMP {year_ago}, TIMESTAMP {end_date}, INTERVAL '1 month' ) month ) d ) x
) y
LEFT OUTER JOIN (
SELECT
repo_id,
repo_name,
repo_group,
date_part( 'year', pr_created_at :: DATE ) AS YEAR,
date_part( 'month', pr_created_at :: DATE ) AS month,
COUNT ( pr_src_id ) AS total_prs_open_closed
FROM
(
SELECT
repo.repo_id AS repo_id,
repo.repo_name AS repo_name,
repo_groups.rg_name AS repo_group,
pull_requests.pr_created_at AS pr_created_at,
pull_requests.pr_closed_at AS pr_closed_at,
pull_requests.pr_src_id AS pr_src_id
FROM
repo,
repo_groups,
pull_requests
WHERE
repo.repo_group_id = repo_groups.repo_group_id
AND repo.repo_id = pull_requests.repo_id
AND repo.repo_id = {value}
AND pull_requests.pr_src_state = 'closed'
) L
GROUP BY
L.repo_id,
L.repo_name,
L.repo_group,
YEAR,
month
ORDER BY
repo_id,
YEAR,
month
) T USING ( month, YEAR )
ORDER BY
YEAR,
month;
"""
pr_monthDFa = pd.read_sql_query(pr_monthquery, con=engine)
repo_id = value
pr_monthDFa[['repo_id']] = pr_monthDFa[['repo_id']].fillna(value=repo_id)
# Hack to fill in repo_name where there are nan's
get_repo_name_query = f"""
SELECT repo_name from repo where repo_id = {value};
"""
get_repo_name = pd.read_sql_query(get_repo_name_query, con=engine)
repo_name = get_repo_name.repo_name[0]
pr_monthDFa[['repo_name']] = pr_monthDFa[['repo_name']].fillna(value=repo_name)
if not pr_monthDF.empty:
pr_monthDF = pd.concat([pr_monthDF, pr_monthDFa])
else:
pr_monthDF = pr_monthDFa
pr_monthDF.set_index('repo_id', 'year', 'month')
pr_monthDF.set_index('repo_id', 'year', 'month')
pr_monthDF[['total_prs_open_closed']] = pr_monthDF[['total_prs_open_closed']].fillna(0)
pr_monthDF['year'] = pr_monthDF['year'].map(int)
pr_monthDF['month'] = pr_monthDF['month'].map(int)
pr_monthDF['month'] = pr_monthDF['month'].apply('{:0>2}'.format)
pr_monthDF['yearmonth'] = pr_monthDF['year'].map(str)+ pr_monthDF['month'].map(str)
# -
pr_monthDF
pr_sustainDF = pd.DataFrame()
pr_sustainDF['yearmonth'] = pr_monthDF['yearmonth']
pr_sustainDF['repo_name'] = pr_monthDF['repo_name']
pr_sustainDF['repo_id'] = pr_monthDF['repo_id']
pr_sustainDF['closed_total'] = pr_monthDF['total_prs_open_closed']
print(pr_sustainDF)
# +
# Monthly PRs ALL - removed where pull_requests.pr_src_state = 'closed'
pr_monthDF = pd.DataFrame()
for value in repo_dict:
pr_monthquery = f"""
SELECT
*
FROM
(
SELECT
date_part( 'year', month :: DATE ) AS YEAR,
date_part( 'month', month :: DATE ) AS month
FROM
( SELECT * FROM ( SELECT month :: DATE FROM generate_series ( TIMESTAMP {year_ago}, TIMESTAMP {end_date}, INTERVAL '1 month' ) month ) d ) x
) y
LEFT OUTER JOIN (
SELECT
repo_id,
repo_name,
repo_group,
date_part( 'year', pr_created_at :: DATE ) AS YEAR,
date_part( 'month', pr_created_at :: DATE ) AS month,
COUNT ( pr_src_id ) AS total_prs_open_closed
FROM
(
SELECT
repo.repo_id AS repo_id,
repo.repo_name AS repo_name,
repo_groups.rg_name AS repo_group,
pull_requests.pr_created_at AS pr_created_at,
pull_requests.pr_closed_at AS pr_closed_at,
pull_requests.pr_src_id AS pr_src_id
FROM
repo,
repo_groups,
pull_requests
WHERE
repo.repo_group_id = repo_groups.repo_group_id
AND repo.repo_id = pull_requests.repo_id
AND repo.repo_id = {value}
) L
GROUP BY
L.repo_id,
L.repo_name,
L.repo_group,
YEAR,
month
ORDER BY
repo_id,
YEAR,
month
) T USING ( month, YEAR )
ORDER BY
YEAR,
month;
"""
pr_monthDFa = pd.read_sql_query(pr_monthquery, con=engine)
repo_id = value
# pr_monthDFa[['wk_avg_hours_to_close', 'wk_avg_days_to_close', 'total_prs_open_closed' ]] = pr_monthDFa[['wk_avg_hours_to_close', 'wk_avg_days_to_close', 'total_prs_open_closed' ]].fillna(value=0)
pr_monthDFa[['repo_id']] = pr_monthDFa[['repo_id']].fillna(value=repo_id)
# Hack to fill in repo_name where there are nan's
get_repo_name_query = f"""
SELECT repo_name from repo where repo_id = {value};
"""
get_repo_name = pd.read_sql_query(get_repo_name_query, con=engine)
repo_name = get_repo_name.repo_name[0]
pr_monthDFa[['repo_name']] = pr_monthDFa[['repo_name']].fillna(value=repo_name)
if not pr_monthDF.empty:
pr_monthDF = pd.concat([pr_monthDF, pr_monthDFa])
else:
pr_monthDF = pr_monthDFa
pr_monthDF.set_index('repo_id', 'year', 'month')
pr_monthDF.set_index('repo_id', 'year', 'month')
pr_monthDF[['total_prs_open_closed']] = pr_monthDF[['total_prs_open_closed']].fillna(0)
# -
pr_monthDF['total_prs_open_closed'].sum()
pr_sustainDF['all_total'] = pr_monthDF['total_prs_open_closed']
pr_sustainDF['diff'] = pr_sustainDF['all_total'] - pr_sustainDF['closed_total']
pr_sustainDF['diff_per'] = pr_sustainDF['diff'] / pr_sustainDF['all_total']
print(pr_sustainDF,'\n')
#print((pr_sustainDF['diff']).describe())
today = datetime.date.today()
current_year_month = str(today.year) + '-' + '{:02d}'.format(today.month)
print(current_year_month)
# +
import warnings
warnings.simplefilter("ignore")
pr_sustainDF['repo_id'] = pr_sustainDF['repo_id'].map(int)
#pr_sustainDF['yearmonth'] = pr_sustainDF['yearmonth'].map(str)
pr_sustainDF.set_index('repo_id', 'yearmonth')
sns.set_style('ticks')
sns.set(style="whitegrid", font_scale=2)
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(24, 8)
risk_num = 0
h = 1
for diff_per in pr_sustainDF['diff_per']:
if (diff_per > 0.10 and h >=6):
risk_num+=1
h+=1
print(risk_num)
title = pr_sustainDF['repo_name'][0] + "\nSustains and Keeps up with Contributions Metric:"
if risk_num >= 1:
title += " AT RISK\n" + str(risk_num) + " month(s) with > 10% of total pull requests not closed"
title_color = 'firebrick'
else:
title += " Healthy\nMore than 90% of total pull requests are closed each month."
title_color = 'forestgreen'
plottermonth = sns.lineplot(x='yearmonth', y='all_total', data=pr_sustainDF, sort=False, color='black', label='Total', linewidth=2.5)
plottermonth = sns.lineplot(x='yearmonth', y='closed_total', data=pr_sustainDF, sort=False, color='green', label='Closed', linewidth=2.5, linestyle='dashed').set_title(title, fontsize=30, color=title_color)
plottermonthlabels = ax.set_xticklabels(pr_sustainDF['yearmonth'])
plottermonthlabels = ax.set_ylabel('Number of PRs')
plottermonthlabels = ax.set_xlabel('Year Month\n\nInterpretation: Healthy projects will have little or no gap. A large or increasing gap requires attention.')
today = datetime.date.today()
current_year_month = str(today.year) + '-' + '{:02d}'.format(today.month)
filename = 'output/sustains_pr_' + repo_name + "_" + current_year_month + '.png'
fig.savefig(filename, bbox_inches='tight')
# -
print(sum(pr_monthDF.total_prs_open_closed))
|
inquiry-notebooks/project_health.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assess predictions on multiclass wine data with a DNN model
# This notebook demonstrates the use of the `responsibleai` API to assess a DNN pytorch model trained on the multiclass wine dataset. It walks through the API calls necessary to create a widget with model analysis insights, then guides a visual analysis of the model.
# * [Launch Responsible AI Toolbox](#Launch-Responsible-AI-Toolbox)
# * [Train a DNN Model](#Train-a-DNN-Model)
# * [Create Model and Data Insights](#Create-Model-and-Data-Insights)
# * [Assess Your Model](#Assess-Your-Model)
# * [Aggregate Analysis](#Aggregate-Analysis)
# * [Individual Analysis](#Individual-Analysis)
# ## Launch Responsible AI Toolbox
# The following section examines the code necessary to create datasets and a model. It then generates insights using the `responsibleai` API that can be visually analyzed.
# ### Train a DNN Model
# *The following section can be skipped. It loads a dataset and trains a model for illustrative purposes.*
# +
import sklearn
import zipfile
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.datasets import load_wine
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
import pandas as pd
from lightgbm import LGBMClassifier
# -
# #### Load the wine data
wine = load_wine()
X = wine['data']
y = wine['target']
classes = wine['target_names']
feature_names = wine['feature_names']
# Split data into train and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
# #### Define a simple pytorch classification model.
# +
def pytorch_net(numCols, numClasses=3):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.norm = nn.LayerNorm(numCols)
self.fc1 = nn.Linear(numCols, 100)
self.fc2 = nn.Dropout(p=0.2)
self.fc3 = nn.Linear(100, numClasses)
self.output = nn.Softmax()
def forward(self, X):
X = self.norm(X)
X = F.relu(self.fc1(X))
X = self.fc2(X)
X = self.fc3(X)
return self.output(X)
return Net()
torch_X = torch.Tensor(X_train).float()
torch_y = torch.Tensor(y_train).long()
# Create network structure
net = pytorch_net(X_train.shape[1])
# -
# #### Train the pytorch DNN classifier on the training data.
# +
# Train the model
epochs = 12
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
for epoch in range(epochs):
optimizer.zero_grad()
out = net(torch_X)
loss = criterion(out, torch_y)
loss.backward()
optimizer.step()
print('epoch: ', epoch, ' loss: ', loss.data.item())
# -
# Wrap the model with scikit-learn style predict/predict_proba functions using the wrap_model function from https://github.com/microsoft/ml-wrappers to make it compatible with RAIInsights and the ResponsibleAIDashboard
from ml_wrappers import wrap_model, DatasetWrapper
model = wrap_model(net, DatasetWrapper(X_train), model_task='classification')
# ### Create Model and Data Insights
from raiwidgets import ResponsibleAIDashboard
from responsibleai import RAIInsights
# To use Responsible AI Toolbox, initialize a RAIInsights object upon which different components can be loaded.
#
# RAIInsights accepts the model, the full dataset, the test dataset, the target feature string, the task type string, and a list of strings of categorical feature names as its arguments.
# +
target_feature = 'wine'
X_train = pd.DataFrame(X_train, columns=feature_names)
X_test = pd.DataFrame(X_test, columns=feature_names)
X_train[target_feature] = y_train
X_test[target_feature] = y_test
rai_insights = RAIInsights(model, X_train, X_test, target_feature, 'classification')
# -
# Add the components of the toolbox that are focused on model assessment.
# Interpretability
rai_insights.explainer.add()
# Error Analysis
rai_insights.error_analysis.add()
# Once all the desired components have been loaded, compute insights on the test set.
rai_insights.compute()
# Finally, visualize and explore the model insights. Use the resulting widget or follow the link to view this in a new tab.
ResponsibleAIDashboard(rai_insights)
|
notebooks/responsibleaidashboard/responsibleaidashboard-multiclass-dnn-model-debugging.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Spam Classification for Text Messages</h1>
# <b><NAME></b><br>
# <h2>Table of Contents</h2>
# <ol>
# <a href = '#intro'><li><b>Introduction</b></li></a>
# <a href = '#lib'><li><b>Libraries</b></li></a>
# <a href = '#load'><li><b>Loading Data</b></li></a>
# <a href = '#wordcloud'><li><b>Word Cloud</b></li></a>
# <a href = '#model1'><li><b>CountVectorizer + Naive Bayes</b></li></a>
# <a href = '#model2'><li><b>Term Frequency - Inverse Document Frequency</b></li></a>
# <a href = '#characters'><li><b>Number of Characters</b></li></a>
# <a href = '#digits'><li><b>Number of Digits</b></li></a>
# <a href = '#nonwords'><li><b>Non-Words</b></li></a>
# <a href = '#model3'><li><b>Support Vector Machine + Number of Characters Feature</b></li></a>
# <a href = '#model4'><li><b>Logistic Regression + Number of Digits Feature</b></li></a>
# <a href = '#model5'><li><b>Logistic Regression + Non-Words Feature</b></li></a>
# <a href = '#conclusion'><li><b>Conclusion</b></li></a>
# </ol>
# <h2>1. Introduction</h2>
# <p>
# If you have an email account or a cell phone, I would say the probability that you have already received a spam message goes around 100%.
# </p>
# <p>
# Although it seems pretty easy for a human to tell if it's spam or not, it doesn't seem so easy for the algorithms. We all had already rescued important messages from the trash box and still had to deal with inbox spam.
# </p>
# <p>
# It happens because spam classification requires natural language processing: a natural task for humans, a complex task for algorithms.
# </p>
# <p>
# In this project, we are going to dive into a dataset containing several text messages previously classified as spam or not, and our job is to train a model capable of identifying the patterns with the highest accuracy.
# </p>
# <p>
# Some questions guiding us through this project:
# <li>Which would be the best approach to correctly classify spam: <b>bag-of-words</b> or evaluating <b>words' importance</b>?</li>
# <li>Are there significant differences between spam and not spam messages, considering the <b>length of the text</b>?</li>
# <li>And what about the <b>number of digits</b> in the messages?</li>
# <li>Are there differences between spam and not spam if we analyze the number of <b>non-words</b> characters?</li>
# <li>By using <b>n-grams</b>, can we improve our model's performance?</li>
# </p>
# <p>
# This dataset <i>spam.csv</i> was made available by the <a href = "https://umich.edu/">University of Michigan</a>, through its <a href = "https://www.coursera.org/learn/python-text-mining">Applied Text Mining in Python</a> course made available through the Coursera platform.
# </p>
# <h2>2. Libraries</h2>
# +
import pandas as pd
import numpy as np
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, plot_confusion_matrix
import re
import warnings
warnings.filterwarnings('ignore')
# -
# <h2>3. Loading Data</h2>
# +
# Reading the file:
spam_data = pd.read_csv('spam.csv')
# Changing label to 1-0 format (1 for spam, 0 for not spam):
spam_data['target'] = np.where(spam_data['target']=='spam',1,0)
spam_data.head(10)
# -
# Checking for number of observations:
print('This dataset contains {} labeled text messages.'.format(spam_data.shape[0]))
# Checking for percentage of spam observations:
spam_perc = (spam_data['target'].mean())*100
print('{}% of the text messages are labeled as spam.'.format(round(spam_perc, 1)))
# <p>
# Clearly, we have an unbalanced dataset (fewer spam observations in comparison to not spam text messages).
# </p>
# <p>
# Let's check some random text messages to get a sense of their content:
# </p>
# +
# Separating spam from not spam data:
not_spam_text = spam_data.text[spam_data['target'] == 0].reset_index(drop = True)
spam_text = spam_data.text[spam_data['target'] == 1].reset_index(drop = True)
# Random int values:
not_spam_rand = np.random.randint(low = 0, high = len(not_spam_text), size = 3)
spam_rand = np.random.randint(low = 0, high = len(spam_text), size = 3)
print('\nNOT SPAM SAMPLES:')
for i in not_spam_rand:
print(str(not_spam_text.iloc[i])+'\n')
print('SPAM SAMPLES:')
for i in spam_rand:
print(str(spam_text.iloc[i])+'\n')
# -
# <h2>4. Word Cloud</h2>
# <p>
# Let's try word cloud visualization for spam and not spam to see if we can indentify important differences between these two types of text messages.
# </p>
# <p>
# <b>Not Spam</b> word cloud:
# </p>
# +
# Joining text messages classified as not spam:
text_nspam = " ".join(text.lower() for text in not_spam_text)
# Creating and generating a word cloud image for not spam text messages:
stopwords = set(STOPWORDS)
wordcloud = WordCloud(stopwords=stopwords, background_color = 'white', colormap = 'winter').generate(text_nspam)
# Displaying the image:
plt.figure(figsize=[10,7])
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.title('Word Cloud - Not Spam Text Messages')
plt.show()
# -
# <p>
# <b>Spam</b> word cloud:
# </p>
# +
# Joining text messages classified as not spam:
text_spam = " ".join(text.lower() for text in spam_text)
# Creating and generating a word cloud image for not spam text messages:
stopwords = set(STOPWORDS)
wordcloud = WordCloud(stopwords=stopwords, background_color = 'white', colormap = 'autumn').generate(text_spam)
# Displaying the image:
plt.figure(figsize=[10,7])
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.title('Word Cloud - Spam Text Messages')
plt.show()
# -
# <p>
# We can see some significant differences between the two types of messages.
# </p>
# <p>
# While <b>not spam</b> messages tend to present some classic shortened forms of words like "u", "ur", "lt", "gt", and words like "love", "ok", and "will", <b>spam</b> messages tend to present a call for action like "call", "call now", "reply", offering some advantages as we can see through the outstanding of the word "free".
# </p>
# <h2>5. CountVectorizer + Naive Bayes</h2>
# <p>
# As a first approach, we will simply create a vector representing the occurrence of the words (bag-of-words), and then train and evaluate a Naive Bayes model for predicting whether a text message is spam or not.
# </p>
# +
# Splitting data into train and test sets:
X_train, X_test, y_train, y_test = train_test_split(spam_data['text'], spam_data['target'],
test_size = .25, random_state=0)
# Creating bag-of-words vector, considering only unigrams:
vect_1 = CountVectorizer(ngram_range = (1,1)).fit(X_train)
# Extracting tokens to check which ones are the longest:
X_train_tokens = vect_1.get_feature_names()
sorted_tokens = sorted(X_train_tokens, key = len, reverse = True)
print('Longest tokens in the bag-of-words:\n')
for i in range(5):
print(str(i+1)+'. '+str(sorted_tokens[i])+'\n')
# +
# Transforming X_train and X_test to vectorized matrix of words:
X_train_vect_1 = vect_1.transform(X_train)
X_test_vect_1 = vect_1.transform(X_test)
# Instatiating Naïve Bayes model:
multinom_nb = MultinomialNB(alpha = 0.1)
# Fitting model:
multinom_nb.fit(X_train_vect_1, y_train)
# Predictions for X_test:
y_pred_1 = multinom_nb.predict(X_test_vect_1)
# Evalutaing model on test set:
roc_score_1 = roc_auc_score(y_test, y_pred_1)
print('First Naive Bayes model achieved a Roc AUC score of {}.'.format(round(roc_score_1, 2)))
# -
# <p>
# Our first model achieved an excellent overall score over the test set.
# </p>
# <p>
# For this Multinomial Naive Bayes model, we defined an alpha parameter of 0.1 to smooth the probability of a word that hasn't happened yet.
# </p>
# <p>
# Let's create a confusion matrix to see how well our model did considering the different classes:
# </p>
# Creating confusion matrix:
class_names = ['not spam', 'spam']
disp = plot_confusion_matrix(multinom_nb, X_test_vect_1, y_test,
display_labels = class_names,
cmap=plt.cm.PuBu)
disp.ax_.set_title('Confusion Matrix for the MultinomialNB Model')
plt.show()
# <p>
# As we can see, the first model did well for both classes (spam and not spam) even with the unbalanced dataset.
# </p>
# <p>
# It did especially well when classifying <i>not spam</i> text messages (all the text instances were correctly classified), but we would still have some spam messages not being classified as so, even though the majority of spam messages were correctly identified.
# </p>
# <p>
# Next, we are going to try some different approaches and models to see if we can get better evaluation scores.
# </p>
# <h2>6. Term Frequency - Inverse Document Frequency</h2>
# <p>
# Istead of using the bag-of-words approach for the text's tokens, we are going to use the <b>Tf-idf</b> approach. It allows us to weight the terms based on how important they are to a document, instead of just counting occurrences.
# </p>
# <p>
# Features with low Tf-idf are either commonly used across all documents (text messages, in this case), or rarely used.
# </p>
# +
# Fitting Tfidf vector to X_train:
tfidf_vect = TfidfVectorizer().fit(X_train)
X_train_tfidf = tfidf_vect.transform(X_train)
# Getting list of feature names:
feat_names = tfidf_vect.get_feature_names()
# Getting list of tf-idf values:
tfidf_list = X_train_tfidf.max(0).toarray()[0]
# Creating a list for features and tf-idf scores:
tfidf_feat_scores = list()
for i in range(len(tfidf_list)):
tfidf_feat_scores.append((tfidf_list[i], feat_names[i]))
tfidf_asc = sorted(tfidf_feat_scores)
tfidf_desc = sorted(tfidf_feat_scores, reverse = True)
print('Most important words:\n')
for i in range(5):
print(str(i+1)+'. '+str(tfidf_desc[i][1])+'\n')
print('\nLess important words:\n')
for i in range(5):
print(str(i+1)+'. '+str(tfidf_asc[i][1])+'\n')
# -
# <p>
# Next step, we are going to train another Naive Bayes model, this time with the tf-idf score, instead of word occurences.
# </p>
# <p>
# Also, we are going to ignore terms with document frequency lower than 3:
# </p>
# +
# Creating tf-idf vector for X_train with min_df = 3:
tfidf_vec_2 = TfidfVectorizer(min_df = 3).fit(X_train)
# Transforming X_train and X_test to tf-idf matrix:
X_train_tfidf_2 = tfidf_vec_2.transform(X_train)
X_test_tfidf_2 = tfidf_vec_2.transform(X_test)
# Instatiating Naïve Bayes model:
multinom_nb_2 = MultinomialNB(alpha = 0.1)
# Fitting the model:
multinom_nb_2.fit(X_train_tfidf_2, y_train)
# Making predictions for X_train:
y_pred_2 = multinom_nb_2.predict(X_test_tfidf_2)
# Evaluating the model on train data:
roc_score_2 = roc_auc_score(y_test, y_pred_2)
print('Second Naive Bayes model achieved a Roc AUC score of {}.'.format(round(roc_score_2, 2)))
# -
# Creating confusion matrix:
disp = plot_confusion_matrix(multinom_nb_2, X_test_tfidf_2, y_test,
display_labels = class_names,
cmap=plt.cm.PuBu)
disp.ax_.set_title('Confusion Matrix for the 2nd MultinomialNB Model')
plt.show()
# <p>
# We can see that this second approach didn't come up with better results in comparison to the first: more spam text messages were missclassified.
# </p>
# <p>
# Next, we are going to check if there are significant differences between spam and not spam messages, considering <b>number of characters</b> and <b>digits</b> and <b>non-words</b>.
# </p>
# <h2>7. Number of Characters</h2>
# <p>
# We are going to compute the number of characters (document length) for each document and check for differences between the average length for spam and not spam messages:
# </p>
# +
# Separating spam_data into spam and not spam df:
df_list = [spam_data.text[spam_data['target'] == 0].reset_index(drop = True),
spam_data.text[spam_data['target'] == 1].reset_index(drop = True)]
# Creating a list for avg text length:
avg_text_len = list()
# Calculating average text length for spam and not spam texts:
for df in df_list:
text_len = list()
for i in range(0, len(df)):
text_len.append(len(df.iloc[i]))
avg_text_len.append(np.mean(text_len))
print('Not Spam messages have an average length of {}.\n'.format(round(avg_text_len[0], 2)))
print('Spam messages have an average length of {}.'.format(round(avg_text_len[1], 2)))
# -
# <p>
# We can see that the average length of text messages classified as spam is approximately twice the average length for not spam messages.
# </p>
# <p>
# It makes sense, considering that text messages are commonly used for quick information exchange, while spam messages intend to convince people to buy some product, or maybe to offer a service, and that could take more characters to be done.
# </p>
# <p>
# With that in mind, we are going to add the text messages' length as a feature in order to improve the model score.
# </p>
# <h2>8. Number of Digits</h2>
# <p>
# The same logic used before for number of characters:
# </p>
# +
# Creating a list for avg text length:
avg_num_dig = list()
# Calculating average number of digits for spam and not spam texts:
for df in df_list:
digit_count = 0
for i in range(0, len(df)):
for c in df.iloc[i]:
if c.isdigit():
digit_count += 1
avg_digit = digit_count / len(df)
avg_num_dig.append(avg_digit)
print('Not Spam messages have an average number of digits of {}.\n'.format(round(avg_num_dig[0], 2)))
print('Spam messages have an average number of digits of {}.'.format(round(avg_num_dig[1], 2)))
# -
# <p>
# Again, we can see a significant difference between spam and not spam messages, considering the average number of digits. We are also adding number of digits as a feature, and see if we can improve the model score.
# </p>
# <p>
# Next, we are going to check for differences by observing the number of non-word characters - anything different from alphanumeric characters.
# </p>
# <h2>9. Non-Words</h2>
# +
# Creating a list for avg non-word:
avg_non_word = list()
# Calculating average number of non-words for spam and not spam texts:
for df in df_list:
non_word_list = list()
for i in range(0, len(df)):
non_word_len = len(re.findall(r'\W', df.iloc[i]))
non_word_list.append(non_word_len)
avg_non_word_char = np.mean(non_word_list)
avg_non_word.append(avg_non_word_char)
print('Not Spam messages have an average non-word characters of {}.\n'.format(round(avg_non_word[0], 2)))
print('Spam messages have an average non-word characters of {}.'.format(round(avg_non_word[1], 2)))
# -
# <p>
# Although the difference doesn't seem to be as significative as the number of characters and digits, the average number of non-word characters still presents a significant difference for spam and not spam messages.
# </p>
# <p>
# We are adding this information as a new feature in order to improve our model performance.
# </p>
# <p>
# We are creating the function <b>add_feature()</b> to help us adding the new features together with the word matrix:
# </p>
def add_feature(X, feature_to_add):
"""
Returns sparse feature matrix with added feature.
feature_to_add can also be a list of features.
"""
from scipy.sparse import csr_matrix, hstack
return hstack([X, csr_matrix(feature_to_add).T], 'csr')
# <h2>10. Support Vector Machine + Number of Characters Feature</h2>
# <p>
# We are not going to add the new features at once. Let's try different models and features to see what we can get.
# </p>
# <p>
# First, we are adding the number of characters as a new feature, and training a SVC model. From now on, we are setting the minimum of 5 occurences for not ignoring the term:
# </p>
# +
# Creating tf-idf vector for X_train with min_df = 5:
tfidf_vec_3 = TfidfVectorizer(min_df = 5). fit(X_train)
# Transforming X_train and X_test to tf-idf matrix:
X_train_tfidf_3 = tfidf_vec_3.transform(X_train)
X_test_tfidf_3 = tfidf_vec_3.transform(X_test)
# Creating a list of documents length both for train and set sets:
# train:
train_text_len = list()
for i in range(0, len(X_train)):
train_text_len.append(len(X_train.iloc[i]))
# test:
test_text_len = list()
for i in range(0, len(X_test)):
test_text_len.append(len(X_test.iloc[i]))
# Adding length of document as a feature:
X_train_3 = add_feature(X_train_tfidf_3, train_text_len)
X_test_3 = add_feature(X_test_tfidf_3, test_text_len)
# Instatiating SVC model:
svc_model_3 = SVC(C = 10000)
# Fitting model to train data:
svc_model_3.fit(X_train_3, y_train)
# Predicting for test set:
y_pred_3 = svc_model_3.predict(X_test_3)
# Evaluating on test set:
roc_score_3 = roc_auc_score(y_test, y_pred_3)
print('Support Vector Machine model achieved a Roc AUC score of {}.'.format(round(roc_score_3, 2)))
# Confusion matrix:
disp = plot_confusion_matrix(svc_model_3, X_test_3, y_test,
display_labels = class_names,
cmap=plt.cm.PuBu)
disp.ax_.set_title('Confusion Matrix for SVC Model')
plt.show()
# -
# <p>
# We achieved a good perfomance, but still not better than our first one. For the first time we had some not spam messages missclassified.
# </p>
# <h2>11. Logistic Regression + Number of Digits Feature</h2>
# <p>
# Besides the <i>min_df</i> parameter set to 5, we are setting word <i>n-grams</i> from 1 to 3.
# </p>
# <p>
# We are now training a Logistic Regression Model with regularization set to 100, including the number of digits feature:
# </p>
# +
# Creating tf-idf vector for X_train with min_df = 5 and 1-3 n-grams:
tfidf_vec_4 = TfidfVectorizer(min_df = 5, ngram_range = (1,3)). fit(X_train)
# Transforming X_train and X_test to tf-idf matrix:
X_train_tfidf_4 = tfidf_vec_4.transform(X_train)
X_test_tfidf_4 = tfidf_vec_4.transform(X_test)
# Creating a list of number of digits per document for train and set sets:
# train:
digit_count_train = list()
for i in range(0, len(X_train)):
doc_digit_count = 0
for c in X_train.iloc[i]:
if c.isdigit():
doc_digit_count += 1
digit_count_train.append(doc_digit_count)
# test:
digit_count_test = list()
for i in range(0, len(X_test)):
doc_digit_count = 0
for c in X_test.iloc[i]:
if c.isdigit():
doc_digit_count += 1
digit_count_test.append(doc_digit_count)
# Adding length of document as a feature:
X_train_4 = add_feature(X_train_tfidf_4, train_text_len)
X_test_4 = add_feature(X_test_tfidf_4, test_text_len)
# Adding number of digits as a feature:
X_train_4 = add_feature(X_train_4, digit_count_train)
X_test_4 = add_feature(X_test_4, digit_count_test)
# Instatiating Logistic Regression model:
lr_model_4 = LogisticRegression(C = 100)
# Fitting model to train data:
lr_model_4.fit(X_train_4, y_train)
# Predicting for test set:
y_pred_4 = lr_model_4.predict(X_test_4)
# Evaluating on test set:
roc_score_4 = roc_auc_score(y_test, y_pred_4)
print('First Logistic Regression model achieved a Roc AUC score of {}.'.format(round(roc_score_4, 2)))
# Confusion matrix:
disp = plot_confusion_matrix(lr_model_4, X_test_4, y_test,
display_labels = class_names,
cmap=plt.cm.PuBu)
disp.ax_.set_title('Confusion Matrix for First Logistic Regression Model')
plt.show()
# -
# <p>
# Finally, we achieved some improvement while classifying spam messages. In the other hand, we still have some mistakes when classifying not spam messages, which didn't happen in our first Naive Bayes model.
# </p>
# <h2>12. Logistic Regression + Non-Words Feature</h2>
# <p>
# For our last attempt, we are going back to the <i>CountVectorizer</i> approach, since it gave us better performance given the first model we have tried.
# </p>
# <p>
# We are changing <i>min_df</i> parameter to 3, <i>n-grams</i> will be kept from 1 to 3, and we are are also including the non-word feature:
# </p>
# +
# Creating vector for X_train with min_df = 3 and 1-3 n-grams:
vect_5 = CountVectorizer(min_df = 3, ngram_range = (1,3)). fit(X_train)
# Transforming X_train and X_test to tf-idf matrix:
X_train_vect_5 = vect_5.transform(X_train)
X_test_vect_5 = vect_5.transform(X_test)
# Creating a list of number of non-word characters per document for train and set sets:
# train:
non_word_train = list()
for i in range(0, len(X_train)):
non_word_len = len(re.findall(r'\W', X_train.iloc[i]))
non_word_train.append(non_word_len)
# test:
non_word_test = list()
for i in range(0, len(X_test)):
non_word_len = len(re.findall(r'\W', X_test.iloc[i]))
non_word_test.append(non_word_len)
# Adding length of document as a feature:
X_train_5 = add_feature(X_train_vect_5, train_text_len)
X_test_5 = add_feature(X_test_vect_5, test_text_len)
# Adding number of digits as a feature:
X_train_5 = add_feature(X_train_5, digit_count_train)
X_test_5 = add_feature(X_test_5, digit_count_test)
# Adding number of non-word characteres as a feature:
X_train_5 = add_feature(X_train_5, non_word_train)
X_test_5 = add_feature(X_test_5, non_word_test)
# Instatiating Logistic Regression model:
lr_model_5 = LogisticRegression(C = 100)
# Fitting model to train data:
lr_model_5.fit(X_train_5, y_train)
# Predicting for test set:
y_pred_5 = lr_model_5.predict(X_test_5)
# Evaluating on test set:
roc_score_5 = roc_auc_score(y_test, y_pred_5)
print('Second Logistic Regression model achieved a Roc AUC score of {}.'.format(round(roc_score_5, 2)))
# Confusion matrix:
disp = plot_confusion_matrix(lr_model_5, X_test_5, y_test,
display_labels = class_names,
cmap=plt.cm.PuBu)
disp.ax_.set_title('Confusion Matrix for Second Logistic Regression Model')
plt.show()
# -
# <p>
# We could improve our first Logistic Regression model by reducing in one its mistakes while classifying not spam messages.
# </p>
# <h2>13. Conclusion</h2>
# <p>
# Overall, the last Logistic Regression model would be our best model, since the mistakes were not concentrated over one single label. However, since the model would be responsible for filtering messages according to its classification, our first Naive Bayes model would be considered as the most appropriate for the job.
# </p>
# <p>
# If, in one hand, some spam messages would still achieve its recipient, in the other hand, not spam messages would not be blocked, and the reciever would not take the risk of missing important regular messages.
# </p>
# <p>
# Text can be worked in several ways, by setting values for n-grams, creating new features, counting occurences or using word importance scores, besides the different approaches when applying different machine learning algorithms. In this project, we could see that the simplest approach ended up achieving the best result.
# </p>
|
Spam-Classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Batch Processing Example
#
# In this example, we use the `micasense.imageset` class to load a set of directories of images into a list of `micasense.capture` objects, and we iterate over that list saving out each image as an aligned stack of images as separate bands in a single tiff file each. Next, we use the metadata from the original captures to write out a log file of the captures and their locations. Finally, we use `exiftool` from the command line to inject that metadata into the processed images, allowing us to stitch those images using commercial software such as Pix4D or Agisoft.
#
# Note: for this example to work, the images must have a valid RigRelatives tag. This requires RedEdge version of at least 3.4.0 or any version of Altum. If your images don't meet that spec, you can also follow this support ticket to add the RigRelatives tag to them: https://support.micasense.com/hc/en-us/articles/360006368574-Modifying-older-collections-for-Pix4Dfields-support
# %load_ext autoreload
# %autoreload 2
# ## Load Images into ImageSet
# +
from ipywidgets import FloatProgress, Layout
from IPython.display import display
import micasense.imageset as imageset
import micasense.capture as capture
import os, glob
import multiprocessing
panelNames = None
useDLS = True
imagePath = os.path.expanduser(os.path.join('~','Downloads','RedEdgeImageSet','0000SET'))
panelNames = glob.glob(os.path.join(imagePath,'000','IMG_0000_*.tif'))
panelCap = capture.Capture.from_filelist(panelNames)
outputPath = os.path.join(imagePath,'..','stacks')
thumbnailPath = os.path.join(outputPath, '..', 'thumbnails')
overwrite = False # usefult to set to false to continue interrupted processing
generateThumbnails = True
# Allow this code to align both radiance and reflectance images; bu excluding
# a definition for panelNames above, radiance images will be used
# For panel images, efforts will be made to automatically extract the panel information
# but if the panel/firmware is before Altum 1.3.5, RedEdge 5.1.7 the panel reflectance
# will need to be set in the panel_reflectance_by_band variable.
# Note: radiance images will not be used to properly create NDVI/NDRE images below.
if panelNames is not None:
panelCap = capture.Capture.from_filelist(panelNames)
else:
panelCap = None
if panelCap is not None:
if panelCap.panel_albedo() is not None:
panel_reflectance_by_band = panelCap.panel_albedo()
else:
panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67] #RedEdge band_index order
panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
img_type = "reflectance"
else:
if useDLS:
img_type='reflectance'
else:
img_type = "radiance"
# +
## This progress widget is used for display of the long-running process
f = FloatProgress(min=0, max=1, layout=Layout(width='100%'), description="Loading")
display(f)
def update_f(val):
if (val - f.value) > 0.005 or val == 1: #reduces cpu usage from updating the progressbar by 10x
f.value=val
# %time imgset = imageset.ImageSet.from_directory(imagePath, progress_callback=update_f)
update_f(1.0)
# +
import math
import numpy as np
from mapboxgl.viz import *
from mapboxgl.utils import df_to_geojson, create_radius_stops, scale_between
from mapboxgl.utils import create_color_stops
import pandas as pd
data, columns = imgset.as_nested_lists()
df = pd.DataFrame.from_records(data, index='timestamp', columns=columns)
#Insert your mapbox token here
token = '<KEY>'
color_property = 'dls-yaw'
num_color_classes = 8
min_val = df[color_property].min()
max_val = df[color_property].max()
import jenkspy
breaks = jenkspy.jenks_breaks(df[color_property], nb_class=num_color_classes)
color_stops = create_color_stops(breaks,colors='YlOrRd')
geojson_data = df_to_geojson(df,columns[3:],lat='latitude',lon='longitude')
viz = CircleViz(geojson_data, access_token=token, color_property=color_property,
color_stops=color_stops,
center=[df['longitude'].median(),df['latitude'].median()],
zoom=16, height='600px',
style='mapbox://styles/mapbox/satellite-streets-v9')
viz.show()
# -
# ## Define which warp method to use
# For newer data sets with RigRelatives tags (images captured with RedEdge version 3.4.0 or greater with a valid calibration load, see https://support.micasense.com/hc/en-us/articles/360005428953-Updating-RedEdge-for-Pix4Dfields), we can use the RigRelatives for a simple alignment.
#
# For sets without those tags, or sets that require a RigRelatives optimization, we can go through the Alignment.ipynb notebook and get a set of `warp_matrices` that we can use here to align.
# +
from numpy import array
from numpy import float32
# Set warp_matrices to none to align using RigRelatives
# Or
# Use the warp_matrices derived from the Alignment Tutorial for this RedEdge set without RigRelatives
warp_matrices = [array([[ 1.0022864e+00, -2.5218755e-03, -7.8898020e+00],
[ 2.3614739e-03, 1.0036649e+00, -1.3134377e+01],
[-1.7785899e-06, 1.1343118e-06, 1.0000000e+00]], dtype=float32), array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32), array([[ 9.9724638e-01, -1.5535230e-03, 1.2301294e+00],
[ 8.6745428e-04, 9.9738181e-01, -1.6499169e+00],
[-8.2816513e-07, -3.4488804e-07, 1.0000000e+00]], dtype=float32), array([[ 1.0007139e+00, -8.4427800e-03, 1.6312805e+01],
[ 6.2834378e-03, 9.9977130e-01, -1.6011697e+00],
[-1.9520389e-06, -6.3762940e-07, 1.0000000e+00]], dtype=float32), array([[ 9.9284178e-01, 9.2155562e-04, 1.6069822e+01],
[-3.2895457e-03, 9.9262553e-01, -5.0333548e-01],
[-1.5845577e-06, -1.7680986e-06, 1.0000000e+00]], dtype=float32)]
# -
# ## Align images and save each capture to a layered tiff file
# +
import exiftool
import datetime
## This progress widget is used for display of the long-running process
f2 = FloatProgress(min=0, max=1, layout=Layout(width='100%'), description="Saving")
display(f2)
def update_f2(val):
f2.value=val
if not os.path.exists(outputPath):
os.makedirs(outputPath)
if generateThumbnails and not os.path.exists(thumbnailPath):
os.makedirs(thumbnailPath)
# Save out geojson data so we can open the image capture locations in our GIS
with open(os.path.join(outputPath,'imageSet.json'),'w') as f:
f.write(str(geojson_data))
try:
irradiance = panel_irradiance+[0]
except NameError:
irradiance = None
start = datetime.datetime.now()
for i,capture in enumerate(imgset.captures):
outputFilename = capture.uuid+'.tif'
thumbnailFilename = capture.uuid+'.jpg'
fullOutputPath = os.path.join(outputPath, outputFilename)
fullThumbnailPath= os.path.join(thumbnailPath, thumbnailFilename)
if (not os.path.exists(fullOutputPath)) or overwrite:
if(len(capture.images) == len(imgset.captures[0].images)):
capture.create_aligned_capture(irradiance_list=irradiance, warp_matrices=warp_matrices)
capture.save_capture_as_stack(fullOutputPath)
if generateThumbnails:
capture.save_capture_as_rgb(fullThumbnailPath)
capture.clear_image_data()
update_f2(float(i)/float(len(imgset.captures)))
update_f2(1.0)
end = datetime.datetime.now()
print("Saving time: {}".format(end-start))
print("Alignment+Saving rate: {:.2f} images per second".format(float(len(imgset.captures))/float((end-start).total_seconds())))
# -
# ## Extract Metadata from Captures list and save to log.csv
# +
def decdeg2dms(dd):
is_positive = dd >= 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
degrees = degrees if is_positive else -degrees
return (degrees,minutes,seconds)
header = "SourceFile,\
GPSDateStamp,GPSTimeStamp,\
GPSLatitude,GpsLatitudeRef,\
GPSLongitude,GPSLongitudeRef,\
GPSAltitude,GPSAltitudeRef,\
FocalLength,\
XResolution,YResolution,ResolutionUnits\n"
lines = [header]
for capture in imgset.captures:
#get lat,lon,alt,time
outputFilename = capture.uuid+'.tif'
fullOutputPath = os.path.join(outputPath, outputFilename)
lat,lon,alt = capture.location()
#write to csv in format:
# IMG_0199_1.tif,"33 deg 32' 9.73"" N","111 deg 51' 1.41"" W",526 m Above Sea Level
latdeg, latmin, latsec = decdeg2dms(lat)
londeg, lonmin, lonsec = decdeg2dms(lon)
latdir = 'North'
if latdeg < 0:
latdeg = -latdeg
latdir = 'South'
londir = 'East'
if londeg < 0:
londeg = -londeg
londir = 'West'
resolution = capture.images[0].focal_plane_resolution_px_per_mm
linestr = '"{}",'.format(fullOutputPath)
linestr += capture.utc_time().strftime("%Y:%m:%d,%H:%M:%S,")
linestr += '"{:d} deg {:d}\' {:.2f}"" {}",{},'.format(int(latdeg),int(latmin),latsec,latdir[0],latdir)
linestr += '"{:d} deg {:d}\' {:.2f}"" {}",{},{:.1f} m Above Sea Level,Above Sea Level,'.format(int(londeg),int(lonmin),lonsec,londir[0],londir,alt)
linestr += '{}'.format(capture.images[0].focal_length)
linestr += '{},{},mm'.format(resolution,resolution)
linestr += '\n' # when writing in text mode, the write command will convert to os.linesep
lines.append(linestr)
fullCsvPath = os.path.join(outputPath,'log.csv')
with open(fullCsvPath, 'w') as csvfile: #create CSV
csvfile.writelines(lines)
# -
# ## Use Exiftool from the command line to write metadata to images
# +
import subprocess
old_dir = os.getcwd()
os.chdir(outputPath)
cmd = 'exiftool -csv="{}" -overwrite_original .'.format(fullCsvPath)
print(cmd)
try:
subprocess.check_call(cmd)
finally:
os.chdir(old_dir)
|
Batch Processing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import calmap
import numpy as np
import pandas as pd
covid = pd.read_csv('covid_19_world.csv')
covid.drop('SNo',axis=1,inplace=True)
covid['ObservationDate'] = pd.to_datetime(covid['ObservationDate'])
covid = covid.set_index('ObservationDate')
datewise_covid = covid.groupby(['ObservationDate']).agg({'Confirmed': 'sum','Deaths':'sum','Recovered':'sum'})
datewise_covid
datewise_covid['Confirmed']
calmap.yearplot(datewise_covid['Confirmed'], year=2020)
calmap.calendarplot(datewise_covid['Confirmed'], monthticks=True, daylabels='MTWTFSS',
dayticks=[0, 2, 4, 6], cmap='YlGn',
fillcolor='grey', linewidth=0,
fig_kws=dict(figsize=(12, 8)))
calmap.calendarplot(datewise_covid['Confirmed'], daylabels='MTWTFSS',
cmap='YlGn',
fillcolor='grey', linewidth=0,
fig_kws=dict(figsize=(12, 8)))
|
Online Certificate Course in Data Science and Machine Learning rearranged/10 DateTimeseries/Calendar heatmaps from Pandas time series data-5-10-2020.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# +
#Load data.
train_features = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/train_features.csv')
train_labels = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/train_labels.csv')
test_features = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/test_features.csv')
sample_submission = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/sample_submission.csv')
# +
#Let's take a look at our data.
#This is our core training data.
train_features.head()
# -
train_labels.head()
test_features.head()
# +
#Merge train_features and train_labels into one dataframe.
training_data = pd.merge(train_features, train_labels)
# +
#Split training data into a train and validation set.
from sklearn.model_selection import train_test_split
train, val = train_test_split(training_data, train_size=0.80, test_size=0.20,
stratify=training_data['status_group'], random_state=42)
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
X = X.drop(columns=['recorded_by', 'id'])
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test_features)
# -
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# +
#What if we used ordinal encoding on high cardinality categoricals?
#Arrange our X features matrix and y target vector so that X includes all features except
#the target as we now want all features inclusive of high cardinality categoricals.
target = 'status_group'
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# Make pipeline!
pipeline = make_pipeline(ce.OrdinalEncoder(), SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1))
# Fit on train, score on val, predict on test.
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_test)
#Save submission.
#submission = sample_submission.copy()
#submission['status_group'] = y_pred
#submission.to_csv('Submission-04.csv', index=False)
# +
#What was the shape of X_train before encoding?
print('Before encoding:', X_train.shape)
#What was the shape after?
encoder = pipeline.named_steps['ordinalencoder']
print('After encoding:', encoder.transform(X_train).shape)
#We observe that the same number columns didn't change after applying ordinal encoding. This increases the computational
#speed of the model.
|
module2-random-forests/Unit 2_Sprint 2_Module 2_Kaggle Competition Day 3 Notebook Continued.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # JIT Engine: Scalar x Tensor
#
# This example will go over how to compile MLIR code for multiplying a scalar by a tensor.
#
# Previous tutorials have gone over how to broadcast vectors. For the simple task of multiplying a each tensor's elements by a scalar, broadcasting may be unwarranted or unnecessary. We'll go over how to implement this in a much simpler and more straightforward fashion.
#
# Let’s first import some necessary modules and generate an instance of our JIT engine.
# +
import mlir_graphblas
import numpy as np
engine = mlir_graphblas.MlirJitEngine()
# -
# Here's the MLIR code we'll use.
mlir_text = """
#trait_add = {
indexing_maps = [
affine_map<(i, j) -> (i, j)>,
affine_map<(i, j) -> (i, j)>
],
iterator_types = ["parallel", "parallel"]
}
func @scale(%arg_tensor: tensor<2x3xf32>, %arg_scalar: f32) -> tensor<2x3xf32> {
%output_storage = constant dense<0.0> : tensor<2x3xf32>
%answer = linalg.generic #trait_add
ins(%arg_tensor: tensor<2x3xf32>)
outs(%arg_tensor: tensor<2x3xf32>) {
^bb(%a: f32, %s: f32):
%scaled = mulf %a, %arg_scalar : f32
linalg.yield %scaled : f32
} -> tensor<2x3xf32>
return %answer : tensor<2x3xf32>
}
"""
# These are the passes we'll utilize.
passes = [
"--linalg-bufferize",
"--func-bufferize",
"--tensor-bufferize",
"--tensor-constant-bufferize",
"--convert-linalg-to-loops",
"--finalizing-bufferize",
"--convert-scf-to-std",
"--convert-std-to-llvm",
]
# Let's compile our MLIR code.
engine.add(mlir_text, passes)
# Let's try out our compiled function.
# +
# grab our callable
scale = engine.scale
# generate inputs
a = np.arange(6, dtype=np.float32).reshape([2,3])
# generate output
result = scale(a, 100)
# -
result
# Let's verify that our function works as expected.
np.all(result == a*100)
|
docs/tools/engine/scalar_times_tensor.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#1.program to find whether a number is divisible by 5 and 11
a=int(input('enter the number'))
if(a%5==0) and (a%11==0):
print('The number is divisible by both 5 and 11')
else:
print('The number is not divisible by both 5 and 11')
#2.program to check a character is a alphabet or not
ai=input('enter the character')
if((ord(ai)>=65)and(ord(ai)<=90))or ((ord(ai)>=97) and (ord(ai)<=122)):
print('The given character is alphabet')
else:
print('The given character is not alphabet')
#3.program to check whether a number is even or odd
b=int(input('enter the number'))
if((b%2)==0 or (b==0)):
print('The number is even')
else:
print('The number is odd')
#4.Program to check whether a character is uppercase or lower case alphabet
c=input('enter the character')
if((ord(c)>=65) or (ord(c)<=90)):
print("The character is lower case")
else:
print("The character is upper case")
#5.Program to check whether the alphabet is vowel or consonant
dh=input('enter the character')
if (dh=='a'or dh=='e'or dh=='i'or dh=='o'or dh=='u'):
print('The character is vowel')
else:
print('The character is consonant ')
# +
#6.program to input week day number and print week day
s=int(input('enter the week day number'))
if(s==0):
print("Monday")
elif(s==1):
print("Tuesday")
elif(s==2):
print("Wednesday")
elif(s==3):
print("Thrusday")
elif(s==4):
print("Friday")
elif(s==5):
print("Saturday")
elif(s==6):
print("Sunday")
else:
print("Enter the weeday number between 0 to 6")
# -
#7.program to input month number and print number of days in month
h=int(input('enter the month number'))
if(h==1)or (h==3)or (h==5) or(h==7)or(h==8)or(h==10)or(H==12):
print("The number of days in this month is 31 days")
elif(h==4)or(h==6)or(h==9)or(h==11):
print("The number of days in this month is 30 days")
elif(h==2):
print("The number of days in this month is 28 days")
else:
print("enter the month number between 1-12")
# +
#8.Program to input angles of a triangle and check whether triangle is valid or not
a1=int(input("enter the first angle"))
b1=int(input("enter the second angle"))
c1=int(input("enter the third angle"))
#sum of angles in triangle is equal to 180
sum=a1+b1+c1
if(sum==180):
print("The triangle is valid")
else:
print("The triangle is not valid")
# -
#9.To find the roots of quadratic equation
from math import sqrt
x=float(input('enter the value a of quadratic equation'))
y=float(input('enter the value b of a quadratic equation'))
z=float(input('enter the value c of a quadratic equation'))
p=(y*y)-4*x*z
if(p>0):
root1=(-y+sqrt(p))/(2*x)
root2=(-y-sqrt(p))/(2*x)
print("The roots are:",root1,root2)
elif(p==0):
root1=root2=(-y)/(2*x)
print("The roots are:",root1,root2)
else:
print("no roots")
|
Harshitha/Assignment on conditional statements.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Joint Embedding
#
# by <NAME>, reviewed by <NAME>
#
# ## Setting
#
# ### Task
#
# Given $\left\{G_i\right\}_{i=1}^N$, a collection of graphs with adjacency matrix $A_i \in \mathbb{R}^{v\times v}$ where each graph contains the same vertex set of length $v$. We desire to embed each $G_i$ into $\mathbb{R}^d$ by a vector $\lambda_i \in \mathbb{R}^d$, where $\mathbb{R}^d$ is a $d$ dimensional space.
#
#
# Notation:
#
# \begin{align*}
# H &= \begin{bmatrix}
# h_1^{(1)} & ...&h_d^{(1)} \\
# h_1^{(2)} & ...& h_d^{(2)} \\
# \vdots & \ddots & \vdots \\
# h_1^{(v)} & ... & h_d^{(v)}
# \end{bmatrix}
# \end{align*}
#
# where each $H \in \mathbb{R}^{v \times d}, h_i \in \mathbb{R}^v$. Here, each $h_i$ is a norm-1 vector. Then we can use the Multiple Eigen Graphs (MREG) model to form a probability matrix. First, we define the probability of each edge as a linear combination of our $1$-norms that define our $d$-dimensional space, and our coefficients $\lambda_i$:
#
# \begin{align*}
# A_i[s, t] \sim Bern\left(p = \sum_{k=1}^d\lambda_i^{(k)} h_k^{(s)}h_k^{(t)} \right)
# \end{align*}
#
# Then we can define the probability of each edge as:
#
# \begin{align*}
# P_i &= \sum_{k=1}^d \lambda_i^{(k)} h_k h_k^T
# \end{align*}
#
# And we can say that our graphs are simulated from the MREG model with coefficients $\Lambda$ and $H$:
#
# \begin{align*}
# \left\{A_i \right\}_{i=1}^n \sim MREG(\lambda, H)
# \end{align*}
#
# that is, each graph $A_i$ is somewhere on the space spanned by $H$ given by $\lambda_i$.
#
# ### Loss Function
#
# \begin{align*}
# L(\lambda, H | A) = \sum_{i=1}^n \left|\left|A_i - \sum_{k=1}^d \lambda_i^{(k)} h_kh_k^T\right|\right|^2
# \end{align*}
#
# ### Statistical Goal
#
# The statistical goal is to find the norm-1 vectors $h_k$ that define the subspace, and the coefficients $\lambda_i$ that define the unique combinations of $h_k^Th_k$ for each $A_i$ that maximize our fit to $A_i$. Stated as an optimization problem, we have:
#
# \begin{align*}
# \left(\hat{\Lambda}, \hat{H}\right) &= \textrm{argmin}_{\lambda_i, h_k:||h_k||=1} \sum_{i=1}^n \left|\left|A_i - \sum_{k=1}^d \lambda_i^{(k)} h_kh_k^T\right|\right|^2
# \end{align*}
#
# Another problem (addressed in other papers referenced by Shangsi) is how to choose $d$, the dimensionality of the subspace we are embedding to. In experimental settings, it is fine to arbitrarily set $d$, however, in real world settings, $d$ can be chosen by computing the algorithm under a range of $d = 1,2,...D$ for some sufficiently large $D$, and then plotting the objective function given the optimal parameters selected by each $d$ and selecting $d$ where the objective roughly begins to flatten out. From a theoretical perspective, this amounts to fitting continuously larger $d$ until adding more dimensions to our subspace provides a substantially small enough improvement in the objective that we can assume that we have found optimal $d$.
#
# ### Desiderata
#
# It is clear that the hypothesis class is going to be heavily non-convex. this means that any algorithms is going to have to make concessions somewhere to arrive at a robust, reliable solution in any sort of computationally-efficient time period. So our desiderata are:
#
# 1. works well in theory in certain settings
# 2. epirically performs well in simulation settings
# 3. empirically performs well in real data scenarios
# 4. is relatively fast in that it doesn't traverse the entire hypothesis set, as the hypothesis set is incredibly large.
#
# ## Approach
#
# ### Algorithm
#
# For the algorithm, the hypothesis set is incredibly large. We are attempting to maximize two high-dimensional quantities, $\Lambda$ and $H$, simultaneously as we pass over each of the graphs. To accomplish this, Shangsi chooses the Alternating Descent approach; that is, maximize $\lambda_k$ by holding $h_k$ fixed using a gradient-descent approach, and then maximize $H$ using least-squares, in an alternating fashion. We repeat this until convergence for each $k=1:d$ individually, the dimensions we are attempting to embed our graphs on. Our algorithm is as follows:
#
# 1. Set residuals $R_i^{(1)} = A_i$ // initialize our error as the graphs themselves
# 2. for $k=1:d$
# a. Initialize $h_k, \lambda_k$
# b. while not converged:
# * Fix $\lambda_k$, and update $h_k$ using gradient descent on $L(\Lambda | A, H)$ by minimizing wrt $\lambda_k$
# * Renormalize $h_k$
# * Fix $h_k$, and update $\lambda_k$ using gradient descent on $L(H | A, \Lambda)$ by minimizing wrt $h_k$
# * Recompute $L(\Lambda, H | A)$
#
# c. endwhile
# d. Update residuals $R_i^{(k+1)} = R_i^{(k)} - \Lambda_{i}^{(k)}h_k^Th_k$
# 3. endfor
# 4. Output $\Lambda, H$
#
# ### Evaluation Methods
#
# 1. Generate a simple example with a $1$ dimensional subspace and vary the number of graphs to see how effectively we can find the joint embedding by checking the mean difference between our single prediction vector and the true norm-$1$ vector.
# 2. Generate pairs of related graphs and find a classifier with minimal classification error from a $2$ dimensional subspace, measuring accuracy as the correct-classification rate.
# 3. Predict the composite creativity index from samples of DTI
#
# ## Results Overview
#
# ### Simulations
#
# #### Basic Simulation
#
# For this experiment, we generate $\lambda_i \sim Unif(1, 2)$ and $h_1$, our single norm vector that our graphs embed on. Then we simulate graphs $\left\{G_i\right\}_{i=1}^n \sim MREG(\left\{\lambda_i\right\}_{i=1}^n, h_1)$ using the $MREG$ model described above.
#
# We run the joint-embedding algorithm 100 times, and vary the number of graphs simulated from $n=2^j$ for $j=4...16$. We compute the quantity $||\hat{h}_1^m - h_1||$, or the model bias after convergence on iteration $m$. This is simply a measure of how close we are to the expected $h_1$. As we can see in the plots below, the more graph examples we have, the more accurately we are able to predict $\hat{h}_1$. Additionally in this simulation, we look at the model convergence $||\hat{h}_1^n - \hat{h}_1^{n/2}||$. This compares our $\hat{h}_1^n$ on $n$ graphs to previous estimate $\hat{h}_1^{n/2}$ on $n/2$ graphs, and verifies that our model converges towards a solution as we increase the number of graphs.
#
# <img="joint embedding simple graph", src="./images/week_25/JE_simple.png">
#
# As we can see, our estimation gets closer to the correct estimation (blue line), and our solution converges as we increase the number of iterations.
#
# #### Classification Simulation
#
# For this experiment, we generate $m$ pairs $\left\{(A_i, Y_i)\right\}_{i=1}^m$ of binarized graphs with a binarized label, $Y_i \in \left\{0, 1\right\}$, with our embedding space defined by two vectors $h_1$ and $h_2$. To classify graphs, $m=200$ graphs are sampled and analyzed using the Joint-Embedding algorithm. A $1-$NN rule is used to classify the graphs, and prediction accuracy is measured from $m=4:200$ graphs. As can be seen below, Joint-Embedding (JE) vastly outperforms the similar algorithm Laplacian Eigenmap (LE) in terms of the classification.
#
# <img="JE vs LE", src="./images/week_25/JE_vs_LE.png">
#
# ### Real Data
#
# For this study, Shangsi investigates the usage of the Joint-Embedding algorithm on predicting individual composite creativity index (CCI) given sets of diffusion-tensor imaging (DTI)-derived connectomes. Predictions are made by embedding 113 graphs with $d=10$, and fitting a linear model by performing:
#
# \begin{align*}
# CCI_i \sim \beta_0 + \hat{\lambda}^T_i \beta + \epsilon_i
# \end{align*}
#
# Or a linear regression of the $CCI_i$ onto $\lambda_i$. The model performs significantly better than the null model with a p-value of 0.0018 after performing an $F$-test. We compare this to exclusively looking at the graphs themselves, which produces a p-value of 0.0039. The first test reveals particularly that having more connections across hemispheres in particular results in greater creativity, while the experiment performed on the raw graphs only indicates that more connectivity itself is responsible for greater creativity.
#
#
# <img="Real Data", src="./images/week_25/JE_real.png">
#
|
reveal/pdfs/joint_embedding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="zB3S1L1Mne1l" colab_type="text"
# # **Question-1**
#
# Bank Account
# + id="81nHrMYNnohl" colab_type="code" outputId="fa6221b4-ec6b-404e-f288-9418180a1941" colab={"base_uri": "https://localhost:8080/", "height": 697}
class bankAccount:
owner='Arman'
balance=0.0
def deposit(self,bal):
bankAccount.balance+= bal
print("Money Deposited in",self.owner,"\n Amount in Account : $",self.balance)
def withdraw(self,bal):
if self.balance<bal:
print("Insufficient Money")
else:
bankAccount.balance-= bal
print("Money Withdrawn from",self.owner,"\n Amount left : $",self.balance)
print("*****Welcome to bank*****\n")
while True:
print("\n1. Deposit ")
print("\n2. Withdraw \n")
ch = input("Enter your choice:\n")
obj=bankAccount()
if ch in ['Deposit','deposit'] or ch=='1':
bal= float(input("Enter Amount to be Deposited:\n"))
obj.deposit(bal)
elif ch in ['Withdraw','withdraw'] or ch=='2':
bal= float(input("Enter Amount to be Withdrawn:\n"))
obj.withdraw(bal)
else:
print("Invalid Choice")
cont=input("\nDo you want to continue banking: Y/N\n")
if cont.lower()=='n':
break
print("\nThanks for banking",getattr(obj,'owner'))
# + [markdown] id="jjHewrgs8Gut" colab_type="text"
# # **Question-2**
#
# Cone ka class
# + id="nJDNKz16AhFT" colab_type="code" outputId="60810abf-dea4-46b2-ecd0-5e9945338aa6" colab={"base_uri": "https://localhost:8080/", "height": 204}
import math as m
class cone:
radius=0.0
height=0.0
def __init__(self,r,h):
self.radius=r
self.height=h
def volume(self):
print("\nVolume of cone = ",(1/3)*3.14*self.height*(m.pow(self.radius,2)))
def surfaceArea(self):
print("\nSurface Area of cone = ",(3.14*(m.pow(self.radius,2)))+(3.14*self.radius*(m.sqrt(m.pow(self.radius,2) + m.pow(self.height,2)))))
print("****Cone****\n")
r=float(input("Enter Radius :\n"))
h=float(input("\nEnter Height :\n"))
obj1 = cone(r,h)
obj1.volume()
obj1.surfaceArea()
|
OOPs(classes and objects).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Tweepy
#
# > Searching tweets using tweepy
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [twitter, tweepy, Python]
# 
# ## Resources
#
# #### A. Getting started with twitter API - Twitter Developer Platform
# https://developer.twitter.com/en/docs/twitter-api/getting-started/about-twitter-api
#
#
# #### B. Tweepy Documentation
#
# https://docs.tweepy.org/en/stable/index.html
#
#
# #### C. A comprehensive guide for using the Twitter API v2 with Tweepy in Python - <NAME>
#
# https://dev.to/twitterdev/a-comprehensive-guide-for-using-the-twitter-api-v2-using-tweepy-in-python-15d9
#
#
# #### D. Making queries to Twitter API on tweepy - <NAME>
#
# https://medium.com/@robguilarr/making-queries-to-twitter-api-on-tweepy-66afeb7184a4
#
#
# #### E. Accessing the Twitter API with Python - <NAME>
#
# https://stackabuse.com/accessing-the-twitter-api-with-python/
#
#
# #### F. How to Apply for a Twitter Developer Account - jean-christophe-chouinard
#
# https://www.jcchouinard.com/apply-for-a-twitter-developer-account/
#
#
# #### G. How to get Twitter API Credentials (API Keys) - jean-christophe-chouinard
#
# https://www.jcchouinard.com/twitter-api-credentials/
#
#
# #### H. Twitter API with Python (Complete Guide) - jean-christophe-chouinard
#
# https://www.jcchouinard.com/twitter-api/
#
# ## Setting up twitter API
#
# ### 1. Apply for a developer account
#
# Before using the Twitter API, you first need a Twitter account, and to have obtained some credentials. The process of getting credentials could change with time, but currently it is as follows:
#
# Visit the Application Management page at https://apps.twitter.com/, and sign in with your Twitter account
# Click on the "Create New App" button, fill in the details and agree the Terms of Service
# Navigate to "Keys and Access Tokens" section and take a note of your Consumer Key and Secret
# In the same section click on "Create my access token" button
# Take note of your Access Token and Access Token Secret
# And that's all. The consumer key/secret is used to authenticate the app that is using the Twitter API, while the access token/secret authenticates the user. All of these parameters should be treated as passwords, and should not be included in your code in plain text. One suitable way is to store them in a JSON file "twitter_credentials.json" and load these values from your code when needed.
#
# Source [E. Accessing the Twitter API with Python - <NAME>](https://stackabuse.com/accessing-the-twitter-api-with-python/)
#
# Twitter API access levels and versions
#
# While the Twitter API v2 is the primary Twitter API, the platform currently supports previous versions (v1.1, Gnip 2.0) as well. We recommend that all users start with v2 as this is where all future innovation will happen.
#
# The Twitter API v2 includes a few access levels to help you scale your usage on the platform. In general, new accounts can quickly sign up for free, Essential access. Should you want additional access, you may choose to apply for free Elevated access and beyond.
#
# Source [A. Getting started with twitter API - Twitter Developer Platform](https://developer.twitter.com/en/docs/twitter-api/getting-started/about-twitter-api)
#
# 
#
#
#
# I applied for elevated access. This involved filling in several questions about what I would user twitter API for and a follow up email.
#
# For more info on how to apply for a twitter development account see [F. How to Apply for a Twitter Developer Account - jean-christophe-chouinard](https://www.jcchouinard.com/apply-for-a-twitter-developer-account/)
#
#
# ### 2. Create a project/app
#
# To use the twitter API you need to create a twitter App. From this you can then get the security IDs, bearer_token, API_key etc.
#
# For more details on this see [G. How to get Twitter API Credentials (API Keys) - jean-christophe-chouinard](https://www.jcchouinard.com/twitter-api-credentials/)
#
#
# ### 3. Set up an environment
#
# Twitter API bestows us several endpoints at the moment we request our App access. From which 3 of them are for searching methods, those that bring samples of the tweets we want according to specific criteria.
# api.search_30_day()
#
# Premium Search for tweets from the last 30 days.
# Monthly limitation of 4500 tweets per minute, without exceeding 25K tweets per month — Sandbox
#
# api.search_full_archive()
#
# Premium Search for tweets from March of 2006.
# Monthly limitation of 3000 tweets per minute, without exceeding 5K tweets per month — Sandbox
#
# api.search_tweets()
#
# Regular Search for tweets from the last 6–9 days as maximum.
# Monthly limitation of 3000 tweets per minute — Sandbox, this is the one that we gonna use in this case.
#
# From [D. Making queries to Twitter API on tweepy - <NAME>](https://medium.com/@robguilarr/making-queries-to-twitter-api-on-tweepy-66afeb7184a4)
#
#
# These environments are found at https://developer.twitter.com/en/account/environments.
#
# And the name of the environment is included in the particular search (as shown below `label=XXX`.
#
# ### 4. Install tweepy
#
# `pip install tweepy`
# ## Tweepy code
import os
os.environ["BEARER_TOKEN"]=" insert here "
os.environ["API_key"]='insert here'
os.environ["API_secret"]=' insert here'
os.environ["access_token"] = " insert here "
os.environ["access_token_secret"]=" insert here "
# ### Import and check it is authenticated
# +
import tweepy
# API keys that yous saved earlier
api_key = os.environ.get("API_KEY")
api_secrets = os.environ.get("API_secret")
access_token = os.environ.get("access_token")
access_secret = os.environ.get("access_token_secret")
# Authenticate to Twitter
auth = tweepy.OAuthHandler(api_key,api_secrets)
auth.set_access_token(access_token,access_secret)
api = tweepy.API(auth)
try:
api.verify_credentials()
print('Successful Authentication')
except:
print('Failed authentication')
# -
# ### Search tweets
#
# Two main ones:
#
# For last 30 days:
#
# `outa = api.search_30_day(label, query, *, tag, fromDate, toDate, maxResults,next)`
#
# And for any times:
#
# `outa = api.search_full_archive(label, query, *, tag, fromDate, toDate, maxResults, next)`
#
# #### Query
#
# from the python script:
#
# The equivalent of one premium rule/filter, with up to 1,024 characters (256 with Sandbox dev environments).
#
# This parameter should include ALL portions of the rule/filter,including all operators, and portions of the rule should not be separated into other parameters of the query.
#
# Following adapted from [D. Making queries to Twitter API on tweepy - <NAME>](https://medium.com/@robguilarr/making-queries-to-twitter-api-on-tweepy-66afeb7184a4)
#
# - Search for a term
#
# `query = 'holiday'`
# - Containing two words
#
# `query = 'holiday jet2'`
# - Containing exact words
#
# `query='"jet2 braces" "holiday"'`
# - Contain one word OR another
#
# `query='jet2 OR "tuiuk"'`
# - Hashtags and mentions
#
# `query = '@jet2tweets #holiday'`
# - Exclude words
#
# `query='@jet2tweets -travel'`
# - Who the tweet is sent from and to
#
# `query='holiday from:jet2tweets'`
#
# `query='holiday to:jet2tweets'`
#
#
# #### Dates
#
# Dates are in the format YYYYMMDDHHmm
#
# So 0:00 21st March 2021 = '202103210000'
# #hide
# #### Tag
#
# from script:
#
# Tags can be used to segregate rules and their matching data into
# different logical groups. If a rule tag is provided, the rule tag
# is included in the 'matching_rules' attribute.
#
# It is recommended to assign rule-specific UUIDs to rule tags and
# maintain desired mappings on the client side.
#
# Following adapted from [D. Making queries to Twitter API on tweepy - <NAME>](https://medium.com/@robguilarr/making-queries-to-twitter-api-on-tweepy-66afeb7184a4)
#
# - Retweets
#
# `tag='-filter:retweets'` without retweets
#
# `tag='filter:retweets'` and just RTs
#
# - Has links
#
# `tag='-filter:links'`
#
# - Has media
#
# `tag='-filter:media'`
#
# - Trusted
#
# `tag='-filter:trusted'`
#
# More filters can be seen in the [Docs](https://developer.twitter.com/en/docs/twitter-api/tweets/search/migrate)
# ### The results file
#
# `outa=api.search_full_archive(label=label,
# query=query,toDate=end_time,fromDate=start_time)`
#
# The information from outa is accessed for each element using _json, i.e.
#
# `outa[0]._json` gives the first search result
xx=outa[0]._json
for ii,x in enumerate(xx):
print(ii,x)
# 
#
# If the tweet is longer than 140 char the `text` property doesn't display all the text of the tweet.
#
# This will normally then be in the `extended_tweet` property but also sometimes in the `retweeted_status` property.
#
# So to get around this put in exceptions.
# +
textAll=[]
for numa in range(0,10):
xx=outa1[numa]._json
try:
textAll.append(numa,'1', xx['created_at'],
xx['retweeted_status']['extended_tweet']['full_text'] )
except:
try:
textAll.append(numa,'2',xx['created_at'],
xx['extended_tweet']['full_text'])
except:
textAll.append(numa,'3',xx['created_at'],xx['text'])
|
_notebooks/2022-04-08-Tweepy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import PIL.Image
import numpy as np
# +
# infile = 'original/yeah.jpeg'
# outfile = 'yeah.png'
# -
d = PIL.Image.open(infile)
w, h = d.size
targetsize = min(w, h)
if h>=w:
hn = int(targetsize/w*h)
wn = targetsize
else:
hn = targetsize
wn = int(targetsize/h*w)
dn = d.resize([wn, hn], PIL.Image.BICUBIC).save(outfile)
d.size
|
Layerwise/data/exemplar/resize.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Vehicle steering
# <NAME> and <NAME>
# 23 Jul 2019
#
# This notebook contains the computations for the vehicle steering running example in *Feedback Systems*.
# RMM comments to Karl, 27 Jun 2019
# * I'm using this notebook to walk through all of the vehicle steering examples and make sure that all of the parameters, conditions, and maximum steering angles are consitent and reasonable.
# * Please feel free to send me comments on the contents as well as the bulletted notes, in whatever form is most convenient.
# * Once we have sorted out all of the settings we want to use, I'll copy over the changes into the MATLAB files that we use for creating the figures in the book.
# * These notes will be removed from the notebook once we have finalized everything.
import numpy as np
import matplotlib.pyplot as plt
import control as ct
ct.use_fbs_defaults()
ct.use_numpy_matrix(False)
# ## Vehicle steering dynamics (Example 3.11)
#
# The vehicle dynamics are given by a simple bicycle model. We take the state of the system as $(x, y, \theta)$ where $(x, y)$ is the position of the reference point of the vehicle in the plane and $\theta$ is the angle of the vehicle with respect to horizontal. The vehicle input is given by $(v, \delta)$ where $v$ is the forward velocity of the vehicle and $\delta$ is the angle of the steering wheel. We take as parameters the wheelbase $b$ and the offset $a$ between the rear wheels and the reference point. The model includes saturation of the vehicle steering angle (`maxsteer`).
#
# * System state: `x`, `y`, `theta`
# * System input: `v`, `delta`
# * System output: `x`, `y`
# * System parameters: `wheelbase`, `refoffset`, `maxsteer`
#
# Assuming no slipping of the wheels, the motion of the vehicle is given by a rotation around a point O that depends on the steering angle $\delta$. To compute the angle $\alpha$ of the velocity of the reference point with respect to the axis of the vehicle, we let the distance from the center of rotation O to the contact point of the rear wheel be $r_\text{r}$ and it the follows from Figure 3.17 in FBS that $b = r_\text{r} \tan \delta$ and $a = r_\text{r} \tan \alpha$, which implies that $\tan \alpha = (a/b) \tan \delta$.
#
# Reasonable limits for the steering angle depend on the speed. The physical limit is given in our model as 0.5 radians (about 30 degrees). However, this limit is rarely possible when the car is driving since it would cause the tires to slide on the pavement. We us a limit of 0.1 radians (about 6 degrees) at 10 m/s ($\approx$ 35 kph) and 0.05 radians (about 3 degrees) at 30 m/s ($\approx$ 110 kph). Note that a steering angle of 0.05 rad gives a cross acceleration of $(v^2/b) \tan \delta \approx (100/3) 0.05 = 1.7$ $\text{m/s}^2$ at 10 m/s and 15 $\text{m/s}^2$ at 30 m/s ($\approx$ 1.5 times the force of gravity).
# +
def vehicle_update(t, x, u, params):
# Get the parameters for the model
a = params.get('refoffset', 1.5) # offset to vehicle reference point
b = params.get('wheelbase', 3.) # vehicle wheelbase
maxsteer = params.get('maxsteer', 0.5) # max steering angle (rad)
# Saturate the steering input
delta = np.clip(u[1], -maxsteer, maxsteer)
alpha = np.arctan2(a * np.tan(delta), b)
# Return the derivative of the state
return np.array([
u[0] * np.cos(x[2] + alpha), # xdot = cos(theta + alpha) v
u[0] * np.sin(x[2] + alpha), # ydot = sin(theta + alpha) v
(u[0] / b) * np.tan(delta) # thdot = v/l tan(phi)
])
def vehicle_output(t, x, u, params):
return x[0:2]
# Default vehicle parameters (including nominal velocity)
vehicle_params={'refoffset': 1.5, 'wheelbase': 3, 'velocity': 15,
'maxsteer': 0.5}
# Define the vehicle steering dynamics as an input/output system
vehicle = ct.NonlinearIOSystem(
vehicle_update, vehicle_output, states=3, name='vehicle',
inputs=('v', 'delta'), outputs=('x', 'y'), params=vehicle_params)
# -
# ## Vehicle driving on a curvy road (Figure 8.6a)
#
# To illustrate the dynamics of the system, we create an input that correspond to driving down a curvy road. This trajectory will be used in future simulations as a reference trajectory for estimation and control.
# RMM notes, 27 Jun 2019:
# * The figure below appears in Chapter 8 (output feedback) as Example 8.3, but I've put it here in the notebook since it is a good way to demonstrate the dynamics of the vehicle.
# * In the book, this figure is created for the linear model and in a manner that I can't quite understand, since the linear model that is used is only for the lateral dynamics. The original file is `OutputFeedback/figures/steering_obs.m`.
# * To create the figure here, I set the initial vehicle angle to be $\theta(0) = 0.75$ rad and then used an input that gives a figure approximating Example 8.3 To create the lateral offset, I think subtracted the trajectory from the averaged straight line trajectory, shown as a dashed line in the $xy$ figure below.
# * I find the approach that we used in the MATLAB version to be confusing, but I also think the method of creating the lateral error here is a hart to follow. We might instead consider choosing a trajectory that goes mainly vertically, with the 2D dynamics being the $x$, $\theta$ dynamics instead of the $y$, $\theta$ dynamics.
#
# KJA comments, 1 Jul 2019:
#
# 0. I think we should point out that the reference point is typically the projection of the center of mass of the whole vehicle.
#
# 1. The heading angle $\theta$ must be marked in Figure 3.17b.
#
# 2. I think it is useful to start with a curvy road that you have done here but then to specialized to a trajectory that is essentially horizontal, where $y$ is the deviation from the nominal horizontal $x$ axis. Assuming that $\alpha$ and $\theta$ are small we get the natural linearization of (3.26) $\dot x = v$ and $\dot y =v(\alpha + \theta)$
#
# RMM response, 16 Jul 2019:
# * I've changed the trajectory to be about the horizontal axis, but I am ploting things vertically for better figure layout. This corresponds to what is done in Example 9.10 in the text, which I think looks OK.
#
# KJA response, 20 Jul 2019: Fig 8.6a is fine
# +
# System parameters
wheelbase = vehicle_params['wheelbase']
v0 = vehicle_params['velocity']
# Control inputs
T_curvy = np.linspace(0, 7, 500)
v_curvy = v0*np.ones(T_curvy.shape)
delta_curvy = 0.1*np.sin(T_curvy)*np.cos(4*T_curvy) + 0.0025*np.sin(T_curvy*np.pi/7)
u_curvy = [v_curvy, delta_curvy]
X0_curvy = [0, 0.8, 0]
# Simulate the system + estimator
t_curvy, y_curvy, x_curvy = ct.input_output_response(
vehicle, T_curvy, u_curvy, X0_curvy, params=vehicle_params, return_x=True)
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Plot the resulting trajectory (and some road boundaries)
plt.subplot(1, 4, 2)
plt.plot(y_curvy[1], y_curvy[0])
plt.plot(y_curvy[1] - 9/np.cos(x_curvy[2]), y_curvy[0], 'k-', linewidth=1)
plt.plot(y_curvy[1] - 3/np.cos(x_curvy[2]), y_curvy[0], 'k--', linewidth=1)
plt.plot(y_curvy[1] + 3/np.cos(x_curvy[2]), y_curvy[0], 'k-', linewidth=1)
plt.xlabel('y [m]')
plt.ylabel('x [m]');
plt.axis('Equal')
# Plot the lateral position
plt.subplot(2, 2, 2)
plt.plot(t_curvy, y_curvy[1])
plt.ylabel('Lateral position $y$ [m]')
# Plot the steering angle
plt.subplot(2, 2, 4)
plt.plot(t_curvy, delta_curvy)
plt.ylabel('Steering angle $\\delta$ [rad]')
plt.xlabel('Time t [sec]')
plt.tight_layout()
# -
# ## Linearization of lateral steering dynamics (Example 6.13)
#
# We are interested in the motion of the vehicle about a straight-line path ($\theta = \theta_0$) with constant velocity $v_0 \neq 0$. To find the relevant equilibrium point, we first set $\dot\theta = 0$ and we see that we must have $\delta = 0$, corresponding to the steering wheel being straight. The motion in the xy plane is by definition not at equilibrium and so we focus on lateral deviation of the vehicle from a straight line. For simplicity, we let $\theta_\text{e} = 0$, which corresponds to driving along the $x$ axis. We can then focus on the equations of motion in the $y$ and $\theta$ directions with input $u = \delta$.
# +
# Define the lateral dynamics as a subset of the full vehicle steering dynamics
lateral = ct.NonlinearIOSystem(
lambda t, x, u, params: vehicle_update(
t, [0., x[0], x[1]], [params.get('velocity', 1), u[0]], params)[1:],
lambda t, x, u, params: vehicle_output(
t, [0., x[0], x[1]], [params.get('velocity', 1), u[0]], params)[1:],
states=2, name='lateral', inputs=('phi'), outputs=('y')
)
# Compute the linearization at velocity v0 = 15 m/sec
lateral_linearized = ct.linearize(lateral, [0, 0], [0], params=vehicle_params)
# Normalize dynamics using state [x1/b, x2] and timescale v0 t / b
b = vehicle_params['wheelbase']
v0 = vehicle_params['velocity']
lateral_transformed = ct.similarity_transform(
lateral_linearized, [[1/b, 0], [0, 1]], timescale=v0/b)
# Set the output to be the normalized state x1/b
lateral_normalized = lateral_transformed * (1/b)
print("Linearized system dynamics:\n")
print(lateral_normalized)
# Save the system matrices for later use
A = lateral_normalized.A
B = lateral_normalized.B
C = lateral_normalized.C
# -
# ## Eigenvalue placement controller design (Example 7.4)
#
# We want to design a controller that stabilizes the dynamics of the vehicle and tracks a given reference value $r$ of the lateral position of the vehicle. We use feedback to design the dynamics of the system to have the characteristic polynomial
# $p(s) = s^2 + 2 \zeta_\text{c} \omega_\text{c} + \omega_\text{c}^2$.
#
# To find reasonable values of $\omega_\text{c}$ we observe that the initial response of the steering angle to a unit step change in the steering command is $\omega_\text{c}^2 r$, where $r$ is the commanded lateral transition. Recall that the model is normalized so that the length unit is the wheelbase $b$ and the time unit is the time $b/v_0$ to travel one wheelbase. A typical car has a wheelbase of about 3 m and, assuming a speed of 30 m/s, a normalized time unit corresponds to 0.1 s. To determine a reasonable steering angle when making a gentle lane change, we assume that the turning radius is $R$ = 600 m. For a wheelbase of 3 m this corresponds to a steering angle $\delta \approx 3/600 = 0.005$ rad and a lateral acceleration of $v^2/R$ = 302/600 = 1.5 m/s$^2$. Assuming that a lane change corresponds to a translation of one wheelbase we find $\omega_\text{c} = \sqrt{0.005}$ = 0.07 rad/s.
#
# The unit step responses for the closed loop system for different values of the design parameters are shown below. The effect of $\omega_c$ is shown on the left, which shows that the response speed increases with increasing $\omega_\text{c}$. All responses have overshoot less than 5% (15 cm), as indicated by the dashed lines. The settling times range from 30 to 60 normalized time units, which corresponds to about 3–6 s, and are limited by the acceptable lateral acceleration of the vehicle. The effect of $\zeta_\text{c}$ is shown on the right. The response speed and the overshoot increase with decreasing damping. Using these plots, we conclude that a reasonable design choice is $\omega_\text{c} = 0.07$ and $\zeta_\text{c} = 0.7$.
# RMM note, 27 Jun 2019:
# * The design guidelines are for $v_0$ = 30 m/s (highway speeds) but most of the examples below are done at lower speed (typically 10 m/s). Also, the eigenvalue locations above are not the same ones that we use in the output feedback example below. We should probably make things more consistent.
#
# KJA comment, 1 Jul 2019:
# * I am all for maikng it consist and choosing e.g. v0 = 30 m/s
#
# RMM comment, 17 Jul 2019:
# * I've updated the examples below to use v0 = 30 m/s for everything except the forward/reverse example. This corresponds to ~105 kph (freeway speeds) and a reasonable bound for the steering angle to avoid slipping is 0.05 rad.
# +
# Utility function to place poles for the normalized vehicle steering system
def normalized_place(wc, zc):
# Get the dynamics and input matrices, for later use
A, B = lateral_normalized.A, lateral_normalized.B
# Compute the eigenvalues from the characteristic polynomial
eigs = np.roots([1, 2*zc*wc, wc**2])
# Compute the feedback gain using eigenvalue placement
K = ct.place_varga(A, B, eigs)
# Create a new system representing the closed loop response
clsys = ct.StateSpace(A - B @ K, B, lateral_normalized.C, 0)
# Compute the feedforward gain based on the zero frequency gain of the closed loop
kf = np.real(1/clsys.evalfr(0))
# Scale the input by the feedforward gain
clsys *= kf
# Return gains and closed loop system dynamics
return K, kf, clsys
# Utility function to plot simulation results for normalized vehicle steering system
def normalized_plot(t, y, u, inpfig, outfig):
plt.sca(outfig)
plt.plot(t, y)
plt.sca(inpfig)
plt.plot(t, u[0])
# Utility function to label plots of normalized vehicle steering system
def normalized_label(inpfig, outfig):
plt.sca(inpfig)
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Steering angle $\delta$ [rad]')
plt.sca(outfig)
plt.ylabel('Lateral position $y/b$')
plt.plot([0, 20], [0.95, 0.95], 'k--')
plt.plot([0, 20], [1.05, 1.05], 'k--')
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Explore range of values for omega_c, with zeta_c = 0.7
outfig = plt.subplot(2, 2, 1)
inpfig = plt.subplot(2, 2, 3)
zc = 0.7
for wc in [0.5, 0.7, 1]:
# Place the poles of the system
K, kf, clsys = normalized_place(wc, zc)
# Compute the step response
t, y, x = ct.step_response(clsys, np.linspace(0, 20, 100), return_x=True)
# Compute the input used to generate the control response
u = -K @ x + kf * 1
# Plot the results
normalized_plot(t, y, u, inpfig, outfig)
# Add labels to the figure
normalized_label(inpfig, outfig)
plt.legend(('$\omega_c = 0.5$', '$\omega_c = 0.7$', '$\omega_c = 0.1$'))
# Explore range of values for zeta_c, with omega_c = 0.07
outfig = plt.subplot(2, 2, 2)
inpfig = plt.subplot(2, 2, 4)
wc = 0.7
for zc in [0.5, 0.7, 1]:
# Place the poles of the system
K, kf, clsys = normalized_place(wc, zc)
# Compute the step response
t, y, x = ct.step_response(clsys, np.linspace(0, 20, 100), return_x=True)
# Compute the input used to generate the control response
u = -K @ x + kf * 1
# Plot the results
normalized_plot(t, y, u, inpfig, outfig)
# Add labels to the figure
normalized_label(inpfig, outfig)
plt.legend(('$\zeta_c = 0.5$', '$\zeta_c = 0.7$', '$\zeta_c = 1$'))
plt.tight_layout()
# -
# RMM notes, 17 Jul 2019
# * These step responses are *very* slow. Note that the steering wheel angles are about 10X less than a resonable bound (0.05 rad at 30 m/s). A consequence of these low gains is that the tracking controller in Example 8.4 has to use a different set of gains. We could update, but the gains listed here have a rationale that we would have to update as well.
# * Based on the discussion below, I think we should make $\omega_\text{c}$ range from 0.5 to 1 (10X faster).
#
# KJA response, 20 Jul 2019: Makes a lot of sense to make $\omega_\text{c}$ range from 0.5 to 1 (10X faster). The plots were still in the range 0.05 to 0.1 in the note you sent me.
#
# RMM response: 23 Jul 2019: Updated $\omega_\text{c}$ to 10X faster. Note that this makes size of the inputs for the step response quite large, but that is in part because a unit step in the desired position produces an (instantaneous) error of $b = 3$ m $\implies$ quite a large error. A lateral error of 10 cm with $\omega_c = 0.7$ would produce an (initial) input of 0.015 rad.
# ## Eigenvalue placement observer design (Example 8.3)
#
# We construct an estimator for the (normalized) lateral dynamics by assigning the eigenvalues of the estimator dynamics to desired value, specifified in terms of the second order characteristic equation for the estimator dynamics.
# +
# Find the eigenvalue from the characteristic polynomial
wo = 1 # bandwidth for the observer
zo = 0.7 # damping ratio for the observer
eigs = np.roots([1, 2*zo*wo, wo**2])
# Compute the estimator gain using eigenvalue placement
L = np.transpose(
ct.place(np.transpose(A), np.transpose(C), eigs))
print("L = ", L)
# Create a linear model of the lateral dynamics driving the estimator
est = ct.StateSpace(A - L @ C, np.block([[B, L]]), np.eye(2), np.zeros((2,2)))
# -
# ### Linear observer applied to nonlinear system output
#
# A simulation of the observer for a vehicle driving on a curvy road is shown below. The first figure shows the trajectory of the vehicle on the road, as viewed from above. The response of the observer is shown on the right, where time is normalized to the vehicle length. We see that the observer error settles in about 4 vehicle lengths.
# RMM note, 27 Jun 2019:
# * As an alternative, we can attempt to estimate the state of the full nonlinear system using a linear estimator. This system does not necessarily converge to zero since there will be errors in the nominal dynamics of the system for the linear estimator.
# * The limits on the $x$ axis for the time plots are different to show the error over the entire trajectory.
# * We should decide whether we want to keep the figure above or the one below for the text.
#
# KJA comment, 1 Jul 2019:
# * I very much like your observation about the nonlinear system. I think it is a very good idea to use your new simulation
#
# RMM comment, 17 Jul 2019: plan to use this version in the text.
#
# KJA comment, 20 Jul 2019: I think this is a big improvement we show that an observer based on a linearized model works on a nonlinear simulation, If possible we could add a line telling why the linear model works and that this is standard procedure in control engineering.
# +
# Convert the curvy trajectory into normalized coordinates
x_ref = x_curvy[0] / wheelbase
y_ref = x_curvy[1] / wheelbase
theta_ref = x_curvy[2]
tau = v0 * T_curvy / b
# Simulate the estimator, with a small initial error in y position
t, y_est, x_est = ct.forced_response(est, tau, [delta_curvy, y_ref], [0.5, 0])
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Plot the actual and estimated states
ax = plt.subplot(2, 2, 1)
plt.plot(t, y_ref)
plt.plot(t, x_est[0])
ax.set(xlim=[0, 10])
plt.legend(['actual', 'estimated'])
plt.ylabel('Lateral position $y/b$')
ax = plt.subplot(2, 2, 2)
plt.plot(t, x_est[0] - y_ref)
ax.set(xlim=[0, 10])
plt.ylabel('Lateral error')
ax = plt.subplot(2, 2, 3)
plt.plot(t, theta_ref)
plt.plot(t, x_est[1])
ax.set(xlim=[0, 10])
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Vehicle angle $\\theta$')
ax = plt.subplot(2, 2, 4)
plt.plot(t, x_est[1] - theta_ref)
ax.set(xlim=[0, 10])
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Angle error')
plt.tight_layout()
# -
# ## Output Feedback Controller (Example 8.4)
# RMM note, 27 Jun 2019
# * The feedback gains for the controller below are different that those computed in the eigenvalue placement example (from Ch 7), where an argument was given for the choice of the closed loop eigenvalues. Should we choose a single, consistent set of gains in both places?
# * This plot does not quite match Example 8.4 because a different reference is being used for the laterial position.
# * The transient in $\delta$ is quiet large. This appears to be due to the error in $\theta(0)$, which is initialized to zero intead of to `theta_curvy`.
#
# KJA comment, 1 Jul 2019:
# 1. The large initial errors dominate the plots.
#
# 2. There is somehing funny happening at the end of the simulation, may be due to the small curvature at the end of the path?
#
# RMM comment, 17 Jul 2019:
# * Updated to use the new trajectory
# * We will have the issue that the gains here are different than the gains that we used in Chapter 7. I think that what we need to do is update the gains in Ch 7 (they are too sluggish, as noted above).
# * Note that unlike the original example in the book, the errors do not converge to zero. This is because we are using pure state feedback (no feedforward) => the controller doesn't apply any input until there is an error.
#
# KJA comment, 20 Jul 2019: We may add that state feedback is a proportional controller which does not guarantee that the error goes to zero for example by changing the line "The tracking error ..." to "The tracking error can be improved by adding integral action (Section7.4), later in this chapter "Disturbance Modeling" or feedforward (Section 8,5). Should we do an exercises?
# +
# Compute the feedback gains
# K, kf, clsys = normalized_place(1, 0.707) # Gains from MATLAB
# K, kf, clsys = normalized_place(0.07, 0.707) # Original gains
K, kf, clsys = normalized_place(0.7, 0.707) # Final gains
# Print out the gains
print("K = ", K)
print("kf = ", kf)
# Construct an output-based controller for the system
clsys = ct.StateSpace(
np.block([[A, -B@K], [L@C, A - B@K - L@C]]),
np.block([[B], [B]]) * kf,
np.block([[C, np.zeros(C.shape)], [np.zeros(C.shape), C]]),
np.zeros((2,1)))
# Simulate the system
t, y, x = ct.forced_response(clsys, tau, y_ref, [0.4, 0, 0.0, 0])
# Calcaluate the input used to generate the control response
u_sfb = kf * y_ref - K @ x[0:2]
u_ofb = kf * y_ref - K @ x[2:4]
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Plot the actual and estimated states
ax = plt.subplot(1, 2, 1)
plt.plot(t, x[0])
plt.plot(t, x[2])
plt.plot(t, y_ref, 'k-.')
ax.set(xlim=[0, 30])
plt.legend(['state feedback', 'output feedback', 'reference'])
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Lateral position $y/b$')
ax = plt.subplot(2, 2, 2)
plt.plot(t, x[1])
plt.plot(t, x[3])
plt.plot(t, theta_ref, 'k-.')
ax.set(xlim=[0, 15])
plt.ylabel('Vehicle angle $\\theta$')
ax = plt.subplot(2, 2, 4)
plt.plot(t, u_sfb[0])
plt.plot(t, u_ofb[0])
plt.plot(t, delta_curvy, 'k-.')
ax.set(xlim=[0, 15])
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Steering angle $\\delta$')
plt.tight_layout()
# -
# ## Trajectory Generation (Example 8.8)
#
# To illustrate how we can use a two degree-of-freedom design to improve the performance of the system, consider the problem of steering a car to change lanes on a road. We use the non-normalized form of the dynamics, which were derived in Example 3.11.
# KJA comment, 1 Jul 2019:
# 1. I think the reference trajectory is too much curved in the end compare with Example 3.11
#
# In summary I think it is OK to change the reference trajectories but we should make sure that the curvature is less than $\rho=600 m$ not to have too high acceleratarion.
#
# RMM response, 16 Jul 2019:
# * Not sure if the comment about the trajectory being too curved is referring to this example. The steering angles (and hence radius of curvature/acceleration) are quite low. ??
#
# KJA response, 20 Jul 2019: You are right the curvature is not too small. We could add the sentence "The small deviations can be eliminated by adding feedback."
#
# RMM response, 23 Jul 2019: I think the small deviation you are referring to is in the velocity trace. This occurs because I gave a fixed endpoint in time and so the velocity had to be adjusted to hit that exact point at that time. This doesn't show up in the book, so it won't be a problem ($\implies$ no additional explanation required).
# +
import control.flatsys as fs
# Function to take states, inputs and return the flat flag
def vehicle_flat_forward(x, u, params={}):
# Get the parameter values
b = params.get('wheelbase', 3.)
# Create a list of arrays to store the flat output and its derivatives
zflag = [np.zeros(3), np.zeros(3)]
# Flat output is the x, y position of the rear wheels
zflag[0][0] = x[0]
zflag[1][0] = x[1]
# First derivatives of the flat output
zflag[0][1] = u[0] * np.cos(x[2]) # dx/dt
zflag[1][1] = u[0] * np.sin(x[2]) # dy/dt
# First derivative of the angle
thdot = (u[0]/b) * np.tan(u[1])
# Second derivatives of the flat output (setting vdot = 0)
zflag[0][2] = -u[0] * thdot * np.sin(x[2])
zflag[1][2] = u[0] * thdot * np.cos(x[2])
return zflag
# Function to take the flat flag and return states, inputs
def vehicle_flat_reverse(zflag, params={}):
# Get the parameter values
b = params.get('wheelbase', 3.)
# Create a vector to store the state and inputs
x = np.zeros(3)
u = np.zeros(2)
# Given the flat variables, solve for the state
x[0] = zflag[0][0] # x position
x[1] = zflag[1][0] # y position
x[2] = np.arctan2(zflag[1][1], zflag[0][1]) # tan(theta) = ydot/xdot
# And next solve for the inputs
u[0] = zflag[0][1] * np.cos(x[2]) + zflag[1][1] * np.sin(x[2])
thdot_v = zflag[1][2] * np.cos(x[2]) - zflag[0][2] * np.sin(x[2])
u[1] = np.arctan2(thdot_v, u[0]**2 / b)
return x, u
vehicle_flat = fs.FlatSystem(vehicle_flat_forward, vehicle_flat_reverse, inputs=2, states=3)
# -
# To find a trajectory from an initial state $x_0$ to a final state $x_\text{f}$ in time $T_\text{f}$ we solve a point-to-point trajectory generation problem. We also set the initial and final inputs, which sets the vehicle velocity $v$ and steering wheel angle $\delta$ at the endpoints.
# +
# Define the endpoints of the trajectory
x0 = [0., 2., 0.]; u0 = [15, 0.]
xf = [75, -2., 0.]; uf = [15, 0.]
Tf = xf[0] / uf[0]
# Define a set of basis functions to use for the trajectories
poly = fs.PolyFamily(6)
# Find a trajectory between the initial condition and the final condition
traj = fs.point_to_point(vehicle_flat, x0, u0, xf, uf, Tf, basis=poly)
# Create the trajectory
t = np.linspace(0, Tf, 100)
x, u = traj.eval(t)
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Plot the trajectory in xy coordinate
plt.subplot(1, 4, 2)
plt.plot(x[1], x[0])
plt.xlabel('y [m]')
plt.ylabel('x [m]')
# Add lane lines and scale the axis
plt.plot([-4, -4], [0, x[0, -1]], 'k-', linewidth=1)
plt.plot([0, 0], [0, x[0, -1]], 'k--', linewidth=1)
plt.plot([4, 4], [0, x[0, -1]], 'k-', linewidth=1)
plt.axis([-10, 10, -5, x[0, -1] + 5])
# Time traces of the state and input
plt.subplot(2, 4, 3)
plt.plot(t, x[1])
plt.ylabel('y [m]')
plt.subplot(2, 4, 4)
plt.plot(t, x[2])
plt.ylabel('theta [rad]')
plt.subplot(2, 4, 7)
plt.plot(t, u[0])
plt.xlabel('Time t [sec]')
plt.ylabel('v [m/s]')
plt.axis([0, Tf, u0[0] - 1, uf[0] +1])
plt.subplot(2, 4, 8)
plt.plot(t, u[1]);
plt.xlabel('Time t [sec]')
plt.ylabel('$\delta$ [rad]')
plt.tight_layout()
# -
# ## Vehicle transfer functions for forward and reverse driving (Example 10.11)
#
# The vehicle steering model has different properties depending on whether we are driving forward or in reverse. The figures below show step responses from steering angle to lateral translation for a the linearized model when driving forward (dashed) and reverse (solid). In this simulation we have added an extra pole with the time constant $T=0.1$ to approximately account for the dynamics in the steering system.
#
# With rear-wheel steering the center of mass first moves in the wrong direction and the overall response with rear-wheel steering is significantly delayed compared with that for front-wheel steering. (b) Frequency response for driving forward (dashed) and reverse (solid). Notice that the gain curves are identical, but the phase curve for driving in reverse has non-minimum phase.
# RMM note, 27 Jun 2019:
# * I cannot recreate the figures in Example 10.11. Since we are looking at the lateral *velocity*, there is a differentiator in the output and this takes the step function and creates an offset at $t = 0$ (intead of a smooth curve).
# * The transfer functions are also different, and I don't quite understand why. Need to spend a bit more time on this one.
#
# KJA comment, 1 Jul 2019: The reason why you cannot recreate figures i Example 10.11 is because the caption in figure is wrong, sorry my fault, the y-axis should be lateral position not lateral velocity. The approximate expression for the transfer functions
#
# $$
# G_{y\delta}=\frac{av_0s+v_0^2}{bs} = \frac{1.5 s + 1}{3s^2}=\frac{0.5s + 0.33}{s}
# $$
#
# are quite close to the values that you get numerically
#
# In this case I think it is useful to have v=1 m/s because we do not drive to fast backwards.
#
# RMM response, 17 Jul 2019
# * Updated figures below use the same parameters as the running example (the current text uses different parameters)
# * Following the material in the text, a pole is added at s = -1 to approximate the dynamics of the steering system. This is not strictly needed, so we could decide to take it out (and update the text)
#
# KJA comment, 20 Jul 2019: I have been oscillating a bit about this example. Of course it does not make sense to drive in reverse in 30 m/s but it seems a bit silly to change parameters just in this case (if we do we have to motivate it). On the other hand what we are doing is essentially based on transfer functions and a RHP zero. My current view which has changed a few times is to keep the standard parameters. In any case we should eliminate the extra time constant. A small detail, I could not see the time response in the file you sent, do not resend it!, I will look at the final version.
#
# RMM comment, 23 Jul 2019: I think it is OK to have the speed be different and just talk about this in the text. I have removed the extra time constant in the current version.
# +
# Magnitude of the steering input (half maximum)
Msteer = vehicle_params['maxsteer'] / 2
# Create a linearized model of the system going forward at 2 m/s
forward_lateral = ct.linearize(lateral, [0, 0], [0], params={'velocity': 2})
forward_tf = ct.ss2tf(forward_lateral)[0, 0]
print("Forward TF = ", forward_tf)
# Create a linearized model of the system going in reverise at 1 m/s
reverse_lateral = ct.linearize(lateral, [0, 0], [0], params={'velocity': -2})
reverse_tf = ct.ss2tf(reverse_lateral)[0, 0]
print("Reverse TF = ", reverse_tf)
# +
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure()
# Forward motion
t, y = ct.step_response(forward_tf * Msteer, np.linspace(0, 4, 500))
plt.plot(t, y, 'b--')
# Reverse motion
t, y = ct.step_response(reverse_tf * Msteer, np.linspace(0, 4, 500))
plt.plot(t, y, 'b-')
# Add labels and reference lines
plt.axis([0, 4, -0.5, 2.5])
plt.legend(['forward', 'reverse'], loc='upper left')
plt.xlabel('Time $t$ [s]')
plt.ylabel('Lateral position [m]')
plt.plot([0, 4], [0, 0], 'k-', linewidth=1)
# Plot the Bode plots
plt.figure()
plt.subplot(1, 2, 2)
ct.bode_plot(forward_tf[0, 0], np.logspace(-1, 1, 100), color='b', linestyle='--')
ct.bode_plot(reverse_tf[0, 0], np.logspace(-1, 1, 100), color='b', linestyle='-')
plt.legend(('forward', 'reverse'));
# -
# ## Feedforward Compensation (Example 12.6)
#
# For a lane transfer system we would like to have a nice response without overshoot, and we therefore consider the use of feedforward compensation to provide a reference trajectory for the closed loop system. We choose the desired response as $F_\text{m}(s) = a^22/(s + a)^2$, where the response speed or aggressiveness of the steering is governed by the parameter $a$.
# RMM note, 27 Jun 2019:
# * $a$ was used in the original description of the dynamics as the reference offset. Perhaps choose a different symbol here?
# * In current version of Ch 12, the $y$ axis is labeled in absolute units, but it should actually be in normalized units, I think.
# * The steering angle input for this example is quite high. Compare to Example 8.8, above. Also, we should probably make the size of the "lane change" from this example match whatever we use in Example 8.8
#
# KJA comments, 1 Jul 2019: Chosen parameters look good to me
#
# RMM response, 17 Jul 2019
# * I changed the time constant for the feedforward model to give something that is more reasonable in terms of turning angle at the speed of $v_0 = 30$ m/s. Note that this takes about 30 body lengths to change lanes (= 9 seconds at 105 kph).
# * The time to change lanes is about 2X what it is using the differentially flat trajectory above. This is mainly because the feedback controller applies a large pulse at the beginning of the trajectory (based on the input error), whereas the differentially flat trajectory spreads the turn over a longer interval. Since are living the steering angle, we have to limit the size of the pulse => slow down the time constant for the reference model.
#
# KJA response, 20 Jul 2019: I think the time for lane change is too long, which may depend on the small steering angles used. The largest steering angle is about 0.03 rad, but we have admitted larger values in previous examples. I suggest that we change the design so that the largest sterring angel is closer to 0.05, see the remark from Bjorn O a lane change could take about 5 s at 30m/s.
#
# RMM response, 23 Jul 2019: I reset the time constant to 0.2, which gives something closer to what we had for trajectory generation. It is still slower, but this is to be expected since it is a linear controller. We now finish the trajectory in 20 body lengths, which is about 6 seconds.
# +
# Define the desired response of the system
a = 0.2
P = ct.ss2tf(lateral_normalized)
Fm = ct.TransferFunction([a**2], [1, 2*a, a**2])
Fr = Fm / P
# Compute the step response of the feedforward components
t, y_ffwd = ct.step_response(Fm, np.linspace(0, 25, 100))
t, delta_ffwd = ct.step_response(Fr, np.linspace(0, 25, 100))
# Scale and shift to correspond to lane change (-2 to +2)
y_ffwd = 0.5 - 1 * y_ffwd
delta_ffwd *= 1
# Overhead view
plt.subplot(1, 2, 1)
plt.plot(y_ffwd, t)
plt.plot(-1*np.ones(t.shape), t, 'k-', linewidth=1)
plt.plot(0*np.ones(t.shape), t, 'k--', linewidth=1)
plt.plot(1*np.ones(t.shape), t, 'k-', linewidth=1)
plt.axis([-5, 5, -2, 27])
# Plot the response
plt.subplot(2, 2, 2)
plt.plot(t, y_ffwd)
# plt.axis([0, 10, -5, 5])
plt.ylabel('Normalized position y/b')
plt.subplot(2, 2, 4)
plt.plot(t, delta_ffwd)
# plt.axis([0, 10, -1, 1])
plt.ylabel('$\\delta$ [rad]')
plt.xlabel('Normalized time $v_0 t / b$');
plt.tight_layout()
# -
# ## Fundamental Limits (Example 14.13)
#
# Consider a controller based on state feedback combined with an observer where we want a faster closed loop system and choose $\omega_\text{c} = 10$, $\zeta_\text{c} = 0.707$, $\omega_\text{o} = 20$, and $\zeta_\text{o} = 0.707$.
# KJA comment, 20 Jul 2019: This is a really troublesome case. If we keep it as a vehicle steering problem we must have an order of magnitude lower valuer for $\omega_c$ and $\omega_o$ and then the zero will not be slow. My recommendation is to keep it as a general system with the transfer function. $P(s)=(s+1)/s^2$. The text then has to be reworded.
#
# RMM response, 23 Jul 2019: I think the way we have it is OK. Our current value for the controller and observer is $\omega_\text{c} = 0.7$ and $\omega_\text{o} = 1$. Here we way we want something faster and so we got to $\omega_\text{c} = 7$ (10X) and $\omega_\text{o} = 10$ (10X).
# +
# Compute the feedback gain using eigenvalue placement
wc = 10
zc = 0.707
eigs = np.roots([1, 2*zc*wc, wc**2])
K = ct.place(A, B, eigs)
kr = np.real(1/clsys.evalfr(0))
print("K = ", np.squeeze(K))
# Compute the estimator gain using eigenvalue placement
wo = 20
zo = 0.707
eigs = np.roots([1, 2*zo*wo, wo**2])
L = np.transpose(
ct.place(np.transpose(A), np.transpose(C), eigs))
print("L = ", np.squeeze(L))
# Construct an output-based controller for the system
C1 = ct.ss2tf(ct.StateSpace(A - B@K - L@C, L, K, 0))
print("C(s) = ", C1)
# Compute the loop transfer function and plot Nyquist, Bode
L1 = P * C1
plt.figure(); ct.nyquist_plot(L1, np.logspace(0.5, 3, 500))
plt.figure(); ct.bode_plot(L1, np.logspace(-1, 3, 500));
# +
# Modified control law
wc = 10
zc = 2.6
eigs = np.roots([1, 2*zc*wc, wc**2])
K = ct.place(A, B, eigs)
kr = np.real(1/clsys.evalfr(0))
print("K = ", np.squeeze(K))
# Construct an output-based controller for the system
C2 = ct.ss2tf(ct.StateSpace(A - B@K - L@C, L, K, 0))
print("C(s) = ", C2)
# -
# Plot the gang of four for the two designs
ct.gangof4(P, C1, np.logspace(-1, 3, 100))
ct.gangof4(P, C2, np.logspace(-1, 3, 100))
|
examples/steering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Rotation matrix and mahalanobis distance
#
# It is possible to rotate N dimensional dataset into a space. Since mahalanobis distance uses this background information, then this space will have an impact to distance calculations.
#
# Firstly, we shall clear away old variables.
rm(list=ls())
# Load additional libraries for to generate random data from standard distribution. This package is missing from base jupyter-datascience docker image, so make sure to build this into your container.
#
# We will create two datasets, one to represent local area and second as global.
# +
library(mvtnorm)
sigma<-matrix(c(4,-4,-4,5),ncol=2)
mean <- c(0,0)
local <- rmvnorm(n=800, mean=mean, sigma = sigma)
global <- rnorm(200, mean=mean, 5)
global <- matrix(global, ncol=2, nrow=100)
data <- rbind(local, global)
# -
# Then, lets define a function that rotates a data matrix N degrees. To achieve this, we have to multiply our dataset with a newly created rotation matrix. Note that $\sin$ and $\cos$ functions in R use radians instead of degrees, we we have to convert this into degree form with $\pi$ formula. This skews our results because of floating point issues, but result is good enough.
rotate <- function(deg, data) {
rad <- (pi / 180)*deg
form <- c(cos(rad), sin(rad), -sin(rad), cos(rad))
rotMatr <- matrix(form,ncol=2, nrow=2)
data2 <- matrix(,ncol=ncol(data),nrow=nrow(data))
for(i in seq_along(1:nrow(data))) {
pp <- rotMatr%*%data[i,]
data2[i,] <- pp
}
return(data2)
}
# Remember mahalanobis distance from [distance notebook](distances.ipynb).
myMahal <- function(A, B, invCOV) {
diff = A - B
dist = sqrt( t(diff) %*% invCOV %*% diff )
return(dist)
}
# Now we will rotate our generated dataset while measuring distance between two points as background changes. The two arbitrarily chosen points remain stationary.
# +
DEBUG <- TRUE
point1 <- c(-4,-4)
point2 <- c(4,4)
distances_l <- c()
distances_g <- c()
max_deg <- 360
print_on <- 30
for( deg in seq_along(1:max_deg)) {
rotated_l <- rotate(deg, local)
rotated_g <- rotate(deg, data)
covar_l <- solve(cov(rotated_l))
covar_g <- solve(cov(rotated_g))
distances_l[deg] <- myMahal(point1, point2, covar_l)
distances_g[deg] <- myMahal(point1, point2, covar_g)
if(DEBUG==TRUE) {
if (deg %% print_on == 0) {
plot(rotated_g, xlim=c(-20,20), ylim=c(-20,20))
points(point1[1], point1[2], col = "red")
points(point2[1], point2[2], col = "blue")
}
#Sys.sleep(0.1*1)
}
}
dev.off()
# -
# Finally, we can plot the measured distances. Note how the distance between points changes as our dataset expands and contracts during rotation. Effects are more severe in local dataset as global set is more spread out and thus its properties change less.
plot(0, xlim=c(1,max_deg), ylim=c(1,max(distances_l)), xlab = "Angle", ylab = "Distance", type="n")
lines(distances_l, type= "l", col="blue")
lines(distances_g, type= "l", col="red")
legend("bottomleft", lty= c(1,1), legend=c("local", "global"), col=c("blue", "red"))
|
SDM/R-jupyter/004-rotate.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ml-project
# language: python
# name: ml-project
# ---
# +
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from utility import *
# +
def load_cup():
ml_cup = np.delete(np.genfromtxt('../../cup/ML-CUP20-TR.csv',
delimiter=','), obj=0, axis=1)
return ml_cup[:, :-2], ml_cup[:, -2:]
def load_cup_blind():
return np.delete(np.genfromtxt('../../cup/ML-CUP20-TS.csv',
delimiter=','), obj=0, axis=1)
def mean_euclidean_error(y_true, y_pred):
assert y_true.shape == y_pred.shape
return np.mean(np.linalg.norm(y_pred - y_true, axis=1))
# -
neg_mean_euclidean_error = make_scorer(mean_euclidean_error, greater_is_better=False)
# +
X, y = load_cup()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
# -
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR
from sklearn.multioutput import MultiOutputRegressor
from sklearn.metrics import make_scorer
import matplotlib.pyplot as plt
finite = []
for i in range(0,3):
svr = MultiOutputRegressor(SVR(epsilon=0.9, C=14, gamma=0.08, kernel='rbf'))
svr.fit(X_train, y_train)
out = svr.predict(X_test)
m = mean_euclidean_error(out, y_test)
finite.append(m)
np.mean(finite)
from sklearn.model_selection import learning_curve
# Ploting Learning Curve
# Creating CV training and test scores forvarious training set sizes
train_sizes, train_scores, test_scores = learning_curve(svr,
X, y, scoring=neg_mean_euclidean_error, n_jobs=-1)
train_sizes
train_scores = train_scores * (-1)
test_scores = test_scores * (-1)
# +
# Creating means and standard deviations of training set scores
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
# Creating means and standard deviations of test set scores
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# Drawing lines
# plt.subplots(1, figsize=(10,10))
plt.plot(train_sizes, train_mean, '--', color="blue", label="Training score")
plt.plot(train_sizes, test_mean, color="orange", label="Cross-validation score")
# Drawing bands
plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, color="#DDDDDD")
plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, color="#DDDDDD")
# Creating plot
plt.title("Learning Curve")
plt.xlabel("Training Set Size"), plt.ylabel("Loss Score"), plt.legend(loc="best")
plt.tight_layout(); plt.show()
# -
|
src/scikit/SVR Test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Fine-Tuning a BERT Model and Create a Text Classifier
#
# We have already performed the Feature Engineering to create BERT embeddings from the `reviews_body` text using the pre-trained BERT model, and split the dataset into train, validation and test files. To optimize for Tensorflow training, we saved the files in TFRecord format.
#
# Now, let’s fine-tune the BERT model to our Customer Reviews Dataset and add a new classification layer to predict the `star_rating` for a given `review_body`.
#
# 
#
# As mentioned earlier, BERT’s attention mechanism is called a Transformer. This is, not coincidentally, the name of the popular BERT Python library, “Transformers,” maintained by a company called [HuggingFace](https://github.com/huggingface/transformers). We will use a variant of BERT called [DistilBert](https://arxiv.org/pdf/1910.01108.pdf) which requires less memory and compute, but maintains very good accuracy on our dataset.
# # DEMO 2:
#
# # Run Model Training on Amazon Elastic Kubernetes Service (Amazon EKS)
#
# Amazon EKS is a managed service that makes it easy for you to run Kubernetes on AWS without needing to install and operate your own Kubernetes control plane or worker nodes.
# ## Amazon FSx For Lustre
#
# Amazon FSx for Lustre is a fully managed service that provides cost-effective, high-performance storage for compute workloads. Many workloads such as machine learning, high performance computing (HPC), video rendering, and financial simulations depend on compute instances accessing the same set of data through high-performance shared storage.
#
# Powered by Lustre, the world's most popular high-performance file system, FSx for Lustre offers sub-millisecond latencies, up to hundreds of gigabytes per second of throughput, and millions of IOPS. It provides multiple deployment options and storage types to optimize cost and performance for your workload requirements.
#
# FSx for Lustre file systems can also be linked to Amazon S3 buckets, allowing you to access and process data concurrently from both a high-performance file system and from the S3 API.
# ## Using Amazon FSx for Lustre Container Storage Interface (CSI)
#
# The Amazon FSx for Lustre Container Storage Interface (CSI) driver provides a CSI interface that allows Amazon EKS clusters to manage the lifecycle of Amazon FSx for Lustre file systems.
#
# * https://docs.aws.amazon.com/eks/latest/userguide/fsx-csi.html
# * https://github.com/kubernetes-sigs/aws-fsx-csi-driver
#
# ```
# code/
# train.py
#
# input/
# data/
# test/
# *.tfrecord
# train/
# *.tfrecord
# validation/
# *.tfrecord
#
# ```
# ## List FSx Files
# !pip install -q awscli==1.18.183 boto3==1.16.23
# !aws s3 ls --recursive s3://fsx-antje/
# ## Model Training Code `train.py`
# !pygmentize code/train.py
# ## Write `train.yaml`
# !pygmentize ./train.yaml
# !aws s3 cp code/train.py s3://fsx-antje/code/train.py
# ## Create Kubernetes Training Job
# !kubectl get nodes
# !kubectl delete -f train.yaml
# !kubectl create -f train.yaml
# ## Describe Training Job
# !kubectl get pods
# !kubectl get pod bert-model-training
# !kubectl describe pod bert-model-training
# ## Review Training Job Logs
# +
# %%time
# !kubectl logs -f bert-model-training
|
10_pipeline/kubeflow/wip/dlc/02_Run_ML_Training_EKS.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import dc_stat_think as dcst
# dcst.pearson_r?
# -
x, y = dcst.ecdf(array)
# + active=""
# The p-value only makes sense if the below are clearly defined:
# null hypothesis
# test statistic
# meaning of "at least as extreme as"
# + active=""
# So, the pipeline for doing a hypothesis test is to:
#
# clearly state the null hypothesis and the test statistic.
#
# Then you *simulate* production of the data as if the null hypothesis were true.
#
# For each of these simulated data sets, compute the test statistic.
#
# The p-value is then the fraction of your simulated data sets
# for which the test statistic is at least as extreme as for the real data.
# -
permutation test (use from dc_stat_think)
plt.semilogy
|
notebooks/machine_learning_algorithms/3D-Statistical-Thinking.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/passionlee428/my-repo/blob/master/Alex_net.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="rqJ7s0608Pe9" colab_type="code" outputId="0b0e013c-4291-487c-c4de-35c96bf8008b" colab={"base_uri": "https://localhost:8080/", "height": 332}
# !nvidia-smi
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import torch.optim as optim
import torch.functional as F
print(torch.__version__)
# + id="QvpNLMNw81-8" colab_type="code" colab={}
# Number of classes
num_classes = 10
# Loss function & Optimizers
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR
# Hyperparameters
num_epochs = 300
batch_size = 128
learning_rate = 0.001
weight_decay = 1e-4
lr_min = 0.1 * learning_rate
class MyAlexNet(nn.Module):
def __init__(self):
super(MyAlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 96, 3, stride=2, padding=1),
nn.ReLU(),
nn.MaxPool2d(3, stride=2),
nn.Conv2d(96, 256, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(3, stride=2),
nn.Conv2d(256, 384, 3, padding=1),
nn.ReLU(),
nn.Conv2d(384, 384, 3, padding=1),
nn.ReLU(),
nn.Conv2d(384, 384, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(3, stride=2),
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Dropout(),
nn.Linear(384, 4096),
nn.ReLU(),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Linear(4096, num_classes)
)
def forward(self, inputs):
hidden = self.features(inputs)
outputs = self.classifier(hidden)
return outputs
# Prepare optimizers
model = MyAlexNet().cuda()
optimizer = optimizer(model.parameters(),
lr=learning_rate,
weight_decay=weight_decay)
# + id="qSpdX52J82KU" colab_type="code" outputId="0008586f-a608-4636-b64e-2567054a7836" colab={"base_uri": "https://localhost:8080/", "height": 101, "referenced_widgets": ["23ad11fd84de412fb85187dd67c41dbe", "dfd8f5cf97ac4b5383885e694120f7e9", "3dadb5ce21b2465b86f869f06f2a746e", "2f4ee29506ef4a4ba56d88d09d6c01b1", "<KEY>", "1b3f0d7820e14f139fc7945e7c030af8", "5f0c360ecf894fafaa432b741f5f0063", "dbfa3b780d0a4bbd978496380f768f8c"]}
import sys
sys.path.insert(0, "..")
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
# CIFAR preprocessing
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# DataLoader for CIFAR-10
cifar_train = torchvision.datasets.CIFAR10(root="./",
train=True,
download=True,
transform=transform_train
)
cifar_test = torchvision.datasets.CIFAR10(root="./",
train=False,
download=True,
transform=transform_test)
train_loader = DataLoader(cifar_train,
batch_size=batch_size,
shuffle=True,
num_workers=4)
test_loader = DataLoader(cifar_test,
batch_size=batch_size,
shuffle=False,
num_workers=4)
# + id="NmaMhfcMoUK7" colab_type="code" colab={}
accuracy_graph = {'train':[], 'test':[], 'epoch': []}
loss_graph = {'train':[], 'test':[], 'epoch': []}
class accumulator():
def __init__(self):
self.cnt = 0
self.obs = 0
self.loss = 0
def add(self, predict, output):
assert len(predict) == len(output)
self.cnt += self.count(predict, output).cpu().numpy()
self.obs += len(predict)
def count(self, predict, output):
correct = torch.sum(torch.argmax(predict, dim=1) == output)
return correct
def loss_update(self, loss, batch_size):
self.loss += loss * batch_size
def running_stat(self):
if self.obs != 0:
run_acc = 100 * self.cnt / self.obs
run_loss = self.loss / self.obs
else:
run_acc = 0
rum_loss = 0
return run_acc, run_loss
def reset(self):
self.__init__()
# + id="Amsq_9b4oZui" colab_type="code" outputId="e987c284-381e-4ef8-bd6f-a08789f16164" colab={"base_uri": "https://localhost:8080/", "height": 332}
# Template for running epoch
def run_epoch(data_loader, metric_accumulator, train=False):
if train:
model.train()
scheduler = lr_scheduler(optimizer,
len(data_loader),
eta_min=lr_min)
else:
model.eval()
for inputs, targets in data_loader:
# Upload data to GPU
inputs, targets = inputs.cuda(), targets.cuda()
if train:
predict = model(inputs)
else:
with torch.autograd.no_grad():
predict = model(inputs)
# Compute loss
loss = criterion(predict, targets)
# Backpropagation
if train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
metric_accumulator.add(predict, targets)
metric_accumulator.loss_update(loss, batch_size)
# Log initiate
with open("log_alexnet.txt", "w") as log:
init_log = "Epoch: {}, Batch size: {}, Optimizer: {}".format(num_epochs,
batch_size,
optimizer)
log.write(init_log)
# Experiment
for epoch in range(num_epochs):
train_accuracy = accumulator()
test_accuracy = accumulator()
# Training
run_epoch(train_loader, train_accuracy, train=True)
# Evaluation
if epoch % 10 ==0:
run_epoch(test_loader, test_accuracy, train=False)
train_acc, train_loss = train_accuracy.running_stat()
test_acc, test_loss = test_accuracy.running_stat()
accuracy_graph['train'].append(train_acc)
accuracy_graph['test'].append(test_acc)
accuracy_graph['epoch'].append(epoch)
loss_graph['train'].append(train_loss)
loss_graph['test'].append(test_loss)
loss_graph['epoch'].append(epoch)
# Upodate log
log_accuracy = "\ntrain accuracy: {:.2f}%, test accuracy: {:.2f}%".format(train_acc,
test_acc)
log_epoch = "\nepoch:{}, train_loss: {:.4f}, test_loss: {:.4f}".format(epoch+1,
train_loss,
test_loss)
with open("log_alexnet.txt", "a") as log:
log.write(log_accuracy)
log.write(log_epoch)
print(log_accuracy, log_epoch)
train_accuracy.reset(), test_accuracy.reset()
# Model save
torch.save(model.state_dict(), "./alexnet.pt")
|
Alex_net.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-nlp]
# language: python
# name: conda-env-.conda-nlp-py
# ---
# +
import pandas as pd
import numpy as np
# Reading CSV from link
def read_csv_from_link(url):
path = 'https://drive.google.com/uc?export=download&id='+url.split('/')[-2]
df = pd.read_csv(path,delimiter="\t",error_bad_lines=False, header=None)
return df
# Loading All Data
malayalam_train = read_csv_from_link('https://drive.google.com/file/d/13JCCr-IjZK7uhbLXeufptr_AxvsKinVl/view?usp=sharing')
malayalam_dev = read_csv_from_link('https://drive.google.com/file/d/1J0msLpLoM6gmXkjC6DFeQ8CG_rrLvjnM/view?usp=sharing')
malayalam_test = read_csv_from_link('https://drive.google.com/file/d/1zx1wCC9A-Pp80mzbqixb52WlWQQ7ATyJ/view?usp=sharing')
# +
# malayalam Preprocess
malayalam_train = malayalam_train.iloc[:, 0:2]
malayalam_train = malayalam_train.rename(columns={0: "text", 1: "label"})
malayalam_dev = malayalam_dev.iloc[:, 0:2]
malayalam_dev = malayalam_dev.rename(columns={0: "text", 1: "label"})
# Stats
malayalam_train['label'] = pd.Categorical(malayalam_train.label)
malayalam_dev['label'] = pd.Categorical(malayalam_dev.label)
# +
# Change Device - CPU/GPU-0/GPU-1
torch.cuda.set_device(0)
device = 'cuda'
device = device if torch.cuda.is_available() else 'cpu'
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import os
from sklearn.metrics import classification_report, f1_score
from torch.utils.data import Dataset
# Dataset
class malayalam_Offensive_Dataset(Dataset):
def __init__(self, encodings, labels, bpe = False):
self.encodings = encodings
self.labels = labels
self.is_bpe_tokenized = bpe
def __getitem__(self, idx):
if not self.is_bpe_tokenized:
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
else:
item = {
'input_ids': torch.LongTensor(self.encodings[idx].ids),
'attention_mask': torch.LongTensor(self.encodings[idx].attention_mask)
}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
# -
# list of random seeds to try
r_seeds = [5,10,15,23,45,52,100,150,210,500]
# +
from transformers import BertTokenizer, BertForSequenceClassification
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
model = BertForSequenceClassification.from_pretrained('bert-base-multilingual-cased', num_labels=6)
model_name = 'Mbert_base_cased_kannada'
from transformers import AdamW
optimizer = AdamW(model.parameters(), lr=1e-5)
label_mapping = {
'Not_offensive': 0,
'not-Malayalam': 1,
'Offensive_Targeted_Insult_Other': 2,
'Offensive_Targeted_Insult_Group': 3,
'Offensive_Untargetede': 4
}
# Collecting Text and Labels
train_batch_sentences = list(malayalam_train['text'])
train_batch_labels = [label_mapping[x] for x in malayalam_train['label']]
dev_batch_sentences = list(malayalam_dev['text'])
dev_batch_labels = [label_mapping[x] for x in malayalam_dev['label']]
# Defining Datasets
train_dataset = malayalam_Offensive_Dataset(train_encodings, train_labels, bpe = False)
dev_dataset = malayalam_Offensive_Dataset(dev_encodings, dev_labels, bpe = False)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
best_val_f1 = 0
count = 0
# Dataloaders
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
dev_loader = DataLoader(dev_dataset, batch_size=16, shuffle=False)
# -
for i in r_seeds:
random.seed(i)
np.random.seed(i)
torch.manual_seed(i)
for epoch in range(100):
train_preds = []
train_labels = []
total_train_loss = 0
model.train()
print("==========================================================")
print("Epoch {}".format(epoch))
print("Train")
for batch in tqdm(train_loader):
optimizer.zero_grad()
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
if loss_weighted:
loss = loss_function(outputs[1], labels)
else:
loss = outputs[0]
loss.backward()
optimizer.step()
for logits in outputs[1].detach().cpu().numpy():
train_preds.append(np.argmax(logits))
for logits in labels.cpu().numpy():
train_labels.append(logits)
total_train_loss += loss.item()/len(train_loader)
print("Dev")
dev_preds = []
model.eval()
total_val_loss = 0
with torch.set_grad_enabled(False):
for batch in tqdm(dev_loader):
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
if loss_weighted:
loss = loss_function(outputs[1], labels)
else:
loss = outputs[0]
total_val_loss += loss.item()/len(dev_loader)
for logits in outputs[1].cpu().numpy():
dev_preds.append(np.argmax(logits))
y_true = dev_batch_labels
y_pred = dev_preds
target_names = label_mapping.keys()
train_report = classification_report(train_labels, train_preds, target_names=target_names)
report = classification_report(y_true, y_pred, target_names=target_names)
val_f1 = f1_score(y_true, y_pred, average='macro')
# Save Best Model
if val_f1 > best_val_f1:
PATH = '../../finetuned_models/' + model_name +'_seed_' + i + '.pth'
torch.save(model.state_dict(), PATH)
model.save_pretrained(os.path.join('../../finetuned_berts/', (model_name+'_seed_' + i)))
best_val_f1 = val_f1
count = 0
else:
count += 1
print(train_report)
print(report)
print("Epoch {}, Train Loss = {}, Val Loss = {}, Val F1 = {}, Best Val f1 = {}, stagnant = {}".format(epoch, total_train_loss, total_val_loss, val_f1, best_val_f1, count))
if count == 5:
print("No increase for 5 epochs, Stopping ...")
break
|
Random_seed_ensemble/RandomSeed_Ensemble_malayalam.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# This is just an extract of the bookings data set
data = pd.read_csv('testdata.csv', sep=',', header=0)
len(data)
data.columns
data.head()
# sort the data so we can efficiently compare line n with line n+1
data.sort_values(by=['Customer ID', 'Booking date'], axis=0, ascending=True, inplace=True)
# Is this the customers 1st second, third, ... booking?
data['Booking Sequence'] = data.groupby(['Customer ID']).cumcount()+1;
data.tail(20)
data['Booking Sequence'].describe()
data.reset_index(inplace=True)
len(data)
# +
## unperformant solution, but describes what we want to do: On which channel was the customer aquired last time?
#for i in range(1, len(data)):
# if (groupdata.loc[i, 'Customer ID'] == groupdata.loc[i-1, 'Customer ID']):
# #print(str(data.loc[i, 'Customer ID']) + '==' + str(data.loc[i-1, 'Customer ID']))
# data.loc[i, 'Previous Channel_Name'] = data.loc[i-1, 'Channel_Name']
# data.loc[i, 'Previous Channel'] = data.loc[i-1, 'Channel']
# data.loc[i, 'Previous Product ID'] = data.loc[i-1, 'Product ID']
# data.loc[i, 'Previous Landingpage'] = data.loc[i-1, 'Landingpage']
# -
# performant solution: shift the index by 1 and join again to original df
#Customer ID Product ID Channel Landingpage Channel_Name Booking Sequence
data_shifted = data[['Customer ID', 'Product ID', 'Channel',
'Channel_Name', 'Landingpage', 'Booking Sequence', 'Booking date']].copy(deep=True)
data_shifted.index = data_shifted.index+1
data = data.join(data_shifted, how='left', rsuffix='_shifted')
data.columns
data[(data['Customer ID'] == data['Customer ID_shifted'])].head(20)
# +
# 'Previous Booking date', 'Previous Product ID','Previous Channel', 'Previous Landingpage', 'Previous Channel_Name',
data['Previous Booking date'] = pd.NaT
data.loc[data['Customer ID'] == data['Customer ID_shifted'],'Previous Booking date']= data['Booking date_shifted']
# -
data['Previous Product ID'] = None
data.loc[data['Customer ID'] == data['Customer ID_shifted'],'Previous Product ID']= data['Product ID_shifted']
data['Previous Channel'] = None
data.loc[data['Customer ID'] == data['Customer ID_shifted'],'Previous Channel']= data['Channel_shifted']
data['Previous Channel_Name'] = None
data.loc[data['Customer ID'] == data['Customer ID_shifted'],'Previous Channel_Name']= data['Channel_Name_shifted']
data['Previous Landingpage'] = None
data.loc[data['Customer ID'] == data['Customer ID_shifted'],'Previous Landingpage']= data['Landingpage_shifted']
data_save= data.copy(deep=True)
data=data[['Booking date', 'Quantity', 'Customer ID', 'Product ID',
'Channel', 'Landingpage', 'Channel_Name', 'Booking Sequence',
'Previous Booking date', 'Previous Product ID', 'Previous Channel',
'Previous Channel_Name', 'Previous Landingpage']]
data.head(20)
data.dtypes
data.to_csv('ChannelMigration.csv')
|
Channel_Migration_Book.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# - nb45の編集
# - fc 特徴量を作成する
# # Import everything I need :)
import warnings
import time
import multiprocessing
import glob
import gc
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
from sklearn.metrics import mean_absolute_error
import lightgbm as lgb
from fastprogress import progress_bar
# # Preparation
nb = 47
isSmallSet = False
length = 20000
pd.set_option('display.max_columns', 200)
warnings.filterwarnings('ignore')
# use atomic numbers to recode atomic names
ATOMIC_NUMBERS = {
'H': 1,
'C': 6,
'N': 7,
'O': 8,
'F': 9
}
file_path = '../input/champs-scalar-coupling/'
glob.glob(file_path + '*')
# train
path = file_path + 'train.csv'
if isSmallSet:
train = pd.read_csv(path) [:length]
else:
train = pd.read_csv(path)
# test
path = file_path + 'test.csv'
if isSmallSet:
test = pd.read_csv(path)[:length]
else:
test = pd.read_csv(path)
# structure
path = file_path + 'structures.csv'
structures = pd.read_csv(path)
# + active=""
# # fc_test
# path = file_path + 'nb29_fc_test_feature.csv'
# if isSmallSet:
# fc_test = pd.read_csv(path)[:length]
# else:
# fc_test = pd.read_csv(path)
# -
# scalar_coupling_contributions
path = file_path + 'scalar_coupling_contributions.csv'
if isSmallSet:
fc = pd.read_csv(path)['fc'][:length]
else:
fc = pd.read_csv(path)['fc']
# train dist-interact
path = file_path + 'nb33_train_dist-interaction.csv'
if isSmallSet:
dist_interact_train = pd.read_csv(path)[:length]
else:
dist_interact_train = pd.read_csv(path)
# test dist-interact
path = file_path + 'nb33_test_dist-interaction.csv'
if isSmallSet:
dist_interact_test = pd.read_csv(path)[:length]
else:
dist_interact_test = pd.read_csv(path)
# ob charge train
path = file_path + 'train_ob_charges_V7EstimatioofMullikenChargeswithOpenBabel.csv'
if isSmallSet:
ob_charge_train = pd.read_csv(path)[:length].drop(['Unnamed: 0', 'error'], axis=1)
else:
ob_charge_train = pd.read_csv(path).drop(['Unnamed: 0', 'error'], axis=1)
# ob charge test
path = file_path + 'test_ob_charges_V7EstimatioofMullikenChargeswithOpenBabel.csv'
if isSmallSet:
ob_charge_test = pd.read_csv(path)[:length].drop(['Unnamed: 0', 'error'], axis=1)
else:
ob_charge_test = pd.read_csv(path).drop(['Unnamed: 0', 'error'], axis=1)
# +
if isSmallSet:
print('using SmallSet !!')
print('-------------------')
print(f'There are {train.shape[0]} rows in train data.')
print(f'There are {test.shape[0]} rows in test data.')
print(f"There are {train['molecule_name'].nunique()} distinct molecules in train data.")
print(f"There are {test['molecule_name'].nunique()} distinct molecules in test data.")
print(f"There are {train['atom_index_0'].nunique()} unique atoms.")
print(f"There are {train['type'].nunique()} unique types.")
# -
# ---
# ## myFunc
# **metrics**
def kaggle_metric(df, preds):
df["prediction"] = preds
maes = []
for t in df.type.unique():
y_true = df[df.type==t].scalar_coupling_constant.values
y_pred = df[df.type==t].prediction.values
mae = np.log(mean_absolute_error(y_true, y_pred))
maes.append(mae)
return np.mean(maes)
# ---
# **momory**
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
c_prec = df[col].apply(lambda x: np.finfo(x).precision).max()
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max and c_prec == np.finfo(np.float16).precision:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max and c_prec == np.finfo(np.float32).precision:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
# # Feature Engineering
# Build Distance Dataset
# +
def build_type_dataframes(base, structures, coupling_type):
base = base[base['type'] == coupling_type].drop('type', axis=1).copy()
base = base.reset_index()
base['id'] = base['id'].astype('int32')
structures = structures[structures['molecule_name'].isin(base['molecule_name'])]
return base, structures
# a,b = build_type_dataframes(train, structures, '1JHN')
# -
def add_coordinates(base, structures, index):
df = pd.merge(base, structures, how='inner',
left_on=['molecule_name', f'atom_index_{index}'],
right_on=['molecule_name', 'atom_index']).drop(['atom_index'], axis=1)
df = df.rename(columns={
'atom': f'atom_{index}',
'x': f'x_{index}',
'y': f'y_{index}',
'z': f'z_{index}'
})
return df
def add_atoms(base, atoms):
df = pd.merge(base, atoms, how='inner',
on=['molecule_name', 'atom_index_0', 'atom_index_1'])
return df
def merge_all_atoms(base, structures):
df = pd.merge(base, structures, how='left',
left_on=['molecule_name'],
right_on=['molecule_name'])
df = df[(df.atom_index_0 != df.atom_index) & (df.atom_index_1 != df.atom_index)]
return df
# +
def add_center(df):
df['x_c'] = ((df['x_1'] + df['x_0']) * np.float32(0.5))
df['y_c'] = ((df['y_1'] + df['y_0']) * np.float32(0.5))
df['z_c'] = ((df['z_1'] + df['z_0']) * np.float32(0.5))
def add_distance_to_center(df):
df['d_c'] = ((
(df['x_c'] - df['x'])**np.float32(2) +
(df['y_c'] - df['y'])**np.float32(2) +
(df['z_c'] - df['z'])**np.float32(2)
)**np.float32(0.5))
def add_distance_between(df, suffix1, suffix2):
df[f'd_{suffix1}_{suffix2}'] = ((
(df[f'x_{suffix1}'] - df[f'x_{suffix2}'])**np.float32(2) +
(df[f'y_{suffix1}'] - df[f'y_{suffix2}'])**np.float32(2) +
(df[f'z_{suffix1}'] - df[f'z_{suffix2}'])**np.float32(2)
)**np.float32(0.5))
# -
def add_distances(df):
n_atoms = 1 + max([int(c.split('_')[1]) for c in df.columns if c.startswith('x_')])
for i in range(1, n_atoms):
for vi in range(min(4, i)):
add_distance_between(df, i, vi)
def add_n_atoms(base, structures):
dfs = structures['molecule_name'].value_counts().rename('n_atoms').to_frame()
return pd.merge(base, dfs, left_on='molecule_name', right_index=True)
# +
def build_couple_dataframe(some_csv, structures_csv, coupling_type, n_atoms=10):
base, structures = build_type_dataframes(some_csv, structures_csv, coupling_type)
base = add_coordinates(base, structures, 0)
base = add_coordinates(base, structures, 1)
base = base.drop(['atom_0', 'atom_1'], axis=1)
atoms = base.drop('id', axis=1).copy()
if 'scalar_coupling_constant' in some_csv:
atoms = atoms.drop(['scalar_coupling_constant'], axis=1)
add_center(atoms)
atoms = atoms.drop(['x_0', 'y_0', 'z_0', 'x_1', 'y_1', 'z_1'], axis=1)
atoms = merge_all_atoms(atoms, structures)
add_distance_to_center(atoms)
atoms = atoms.drop(['x_c', 'y_c', 'z_c', 'atom_index'], axis=1)
atoms.sort_values(['molecule_name', 'atom_index_0', 'atom_index_1', 'd_c'], inplace=True)
atom_groups = atoms.groupby(['molecule_name', 'atom_index_0', 'atom_index_1'])
atoms['num'] = atom_groups.cumcount() + 2
atoms = atoms.drop(['d_c'], axis=1)
atoms = atoms[atoms['num'] < n_atoms]
atoms = atoms.set_index(['molecule_name', 'atom_index_0', 'atom_index_1', 'num']).unstack()
atoms.columns = [f'{col[0]}_{col[1]}' for col in atoms.columns]
atoms = atoms.reset_index()
# # downcast back to int8
for col in atoms.columns:
if col.startswith('atom_'):
atoms[col] = atoms[col].fillna(0).astype('int8')
# atoms['molecule_name'] = atoms['molecule_name'].astype('int32')
full = add_atoms(base, atoms)
add_distances(full)
full.sort_values('id', inplace=True)
return full
# -
def take_n_atoms(df, n_atoms, four_start=4):
labels = ['id', 'molecule_name', 'atom_index_1', 'atom_index_0']
for i in range(2, n_atoms):
label = f'atom_{i}'
labels.append(label)
for i in range(n_atoms):
num = min(i, 4) if i < four_start else 4
for j in range(num):
labels.append(f'd_{i}_{j}')
if 'scalar_coupling_constant' in df:
labels.append('scalar_coupling_constant')
return df[labels]
# +
atoms = structures['atom'].values
types_train = train['type'].values
types_test = test['type'].values
structures['atom'] = structures['atom'].replace(ATOMIC_NUMBERS).astype('int8')
fulls_train = []
fulls_test = []
for type_ in progress_bar(train['type'].unique()):
full_train = build_couple_dataframe(train, structures, type_, n_atoms=10)
full_test = build_couple_dataframe(test, structures, type_, n_atoms=10)
full_train = take_n_atoms(full_train, 10)
full_test = take_n_atoms(full_test, 10)
fulls_train.append(full_train)
fulls_test.append(full_test)
structures['atom'] = atoms
train = pd.concat(fulls_train).sort_values(by=['id']) #, axis=0)
test = pd.concat(fulls_test).sort_values(by=['id']) #, axis=0)
train['type'] = types_train
test['type'] = types_test
train = train.fillna(0)
test = test.fillna(0)
# -
# <br>
# <br>
# dist-interact
train['dist_interact'] = dist_interact_train.values
test['dist_interact'] = dist_interact_test.values
# <br>
# <br>
# basic
# +
def map_atom_info(df_1,df_2, atom_idx):
df = pd.merge(df_1, df_2, how = 'left',
left_on = ['molecule_name', f'atom_index_{atom_idx}'],
right_on = ['molecule_name', 'atom_index'])
df = df.drop('atom_index', axis=1)
return df
# structure and ob_charges
ob_charge = pd.concat([ob_charge_train, ob_charge_test])
merge = pd.merge(ob_charge, structures, how='left',
left_on = ['molecule_name', 'atom_index'],
right_on = ['molecule_name', 'atom_index'])
for atom_idx in [0,1]:
train = map_atom_info(train, merge, atom_idx)
test = map_atom_info(test, merge, atom_idx)
train = train.rename(columns={
'atom': f'atom_{atom_idx}',
'x': f'x_{atom_idx}',
'y': f'y_{atom_idx}',
'z': f'z_{atom_idx}',
'eem': f'eem_{atom_idx}',
'mmff94': f'mmff94_{atom_idx}',
'gasteiger': f'gasteiger_{atom_idx}',
'qeq': f'qeq_{atom_idx}',
'qtpie': f'qtpie_{atom_idx}',
'eem2015ha': f'eem2015ha_{atom_idx}',
'eem2015hm': f'eem2015hm_{atom_idx}',
'eem2015hn': f'eem2015hn_{atom_idx}',
'eem2015ba': f'eem2015ba_{atom_idx}',
'eem2015bm': f'eem2015bm_{atom_idx}',
'eem2015bn': f'eem2015bn_{atom_idx}',})
test = test.rename(columns={
'atom': f'atom_{atom_idx}',
'x': f'x_{atom_idx}',
'y': f'y_{atom_idx}',
'z': f'z_{atom_idx}',
'eem': f'eem_{atom_idx}',
'mmff94': f'mmff94_{atom_idx}',
'gasteiger': f'gasteiger_{atom_idx}',
'qeq': f'qeq_{atom_idx}',
'qtpie': f'qtpie_{atom_idx}',
'eem2015ha': f'eem2015ha_{atom_idx}',
'eem2015hm': f'eem2015hm_{atom_idx}',
'eem2015hn': f'eem2015hn_{atom_idx}',
'eem2015ba': f'eem2015ba_{atom_idx}',
'eem2015bm': f'eem2015bm_{atom_idx}',
'eem2015bn': f'eem2015bn_{atom_idx}'})
# test = test.rename(columns={'atom': f'atom_{atom_idx}',
# 'x': f'x_{atom_idx}',
# 'y': f'y_{atom_idx}',
# 'z': f'z_{atom_idx}'})
# ob_charges
# train = map_atom_info(train, ob_charge_train, 0)
# test = map_atom_info(test, ob_charge_test, 0)
# train = map_atom_info(train, ob_charge_train, 1)
# test = map_atom_info(test, ob_charge_test, 1)
# -
# <br>
# <br>
# type0
def create_type0(df):
df['type_0'] = df['type'].apply(lambda x : x[0])
return df
# train['type_0'] = train['type'].apply(lambda x: x[0])
# test['type_0'] = test['type'].apply(lambda x: x[0])
# <br>
# <br>
# distances
# +
def distances(df):
df_p_0 = df[['x_0', 'y_0', 'z_0']].values
df_p_1 = df[['x_1', 'y_1', 'z_1']].values
df['dist'] = np.linalg.norm(df_p_0 - df_p_1, axis=1)
df['dist_x'] = (df['x_0'] - df['x_1']) ** 2
df['dist_y'] = (df['y_0'] - df['y_1']) ** 2
df['dist_z'] = (df['z_0'] - df['z_1']) ** 2
return df
# train = distances(train)
# test = distances(test)
# -
# <br>
# <br>
# 統計量
def create_features(df):
df['molecule_couples'] = df.groupby('molecule_name')['id'].transform('count')
df['molecule_dist_mean'] = df.groupby('molecule_name')['dist'].transform('mean')
df['molecule_dist_min'] = df.groupby('molecule_name')['dist'].transform('min')
df['molecule_dist_max'] = df.groupby('molecule_name')['dist'].transform('max')
df['atom_0_couples_count'] = df.groupby(['molecule_name', 'atom_index_0'])['id'].transform('count')
df['atom_1_couples_count'] = df.groupby(['molecule_name', 'atom_index_1'])['id'].transform('count')
df[f'molecule_atom_index_0_x_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['x_1'].transform('std')
df[f'molecule_atom_index_0_y_1_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('mean')
df[f'molecule_atom_index_0_y_1_mean_diff'] = df[f'molecule_atom_index_0_y_1_mean'] - df['y_1']
df[f'molecule_atom_index_0_y_1_mean_div'] = df[f'molecule_atom_index_0_y_1_mean'] / df['y_1']
df[f'molecule_atom_index_0_y_1_max'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('max')
df[f'molecule_atom_index_0_y_1_max_diff'] = df[f'molecule_atom_index_0_y_1_max'] - df['y_1']
df[f'molecule_atom_index_0_y_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('std')
df[f'molecule_atom_index_0_z_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['z_1'].transform('std')
df[f'molecule_atom_index_0_dist_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('mean')
df[f'molecule_atom_index_0_dist_mean_diff'] = df[f'molecule_atom_index_0_dist_mean'] - df['dist']
df[f'molecule_atom_index_0_dist_mean_div'] = df[f'molecule_atom_index_0_dist_mean'] / df['dist']
df[f'molecule_atom_index_0_dist_max'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('max')
df[f'molecule_atom_index_0_dist_max_diff'] = df[f'molecule_atom_index_0_dist_max'] - df['dist']
df[f'molecule_atom_index_0_dist_max_div'] = df[f'molecule_atom_index_0_dist_max'] / df['dist']
df[f'molecule_atom_index_0_dist_min'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')
df[f'molecule_atom_index_0_dist_min_diff'] = df[f'molecule_atom_index_0_dist_min'] - df['dist']
df[f'molecule_atom_index_0_dist_min_div'] = df[f'molecule_atom_index_0_dist_min'] / df['dist']
df[f'molecule_atom_index_0_dist_std'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('std')
df[f'molecule_atom_index_0_dist_std_diff'] = df[f'molecule_atom_index_0_dist_std'] - df['dist']
df[f'molecule_atom_index_0_dist_std_div'] = df[f'molecule_atom_index_0_dist_std'] / df['dist']
df[f'molecule_atom_index_1_dist_mean'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('mean')
df[f'molecule_atom_index_1_dist_mean_diff'] = df[f'molecule_atom_index_1_dist_mean'] - df['dist']
df[f'molecule_atom_index_1_dist_mean_div'] = df[f'molecule_atom_index_1_dist_mean'] / df['dist']
df[f'molecule_atom_index_1_dist_max'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('max')
df[f'molecule_atom_index_1_dist_max_diff'] = df[f'molecule_atom_index_1_dist_max'] - df['dist']
df[f'molecule_atom_index_1_dist_max_div'] = df[f'molecule_atom_index_1_dist_max'] / df['dist']
df[f'molecule_atom_index_1_dist_min'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('min')
df[f'molecule_atom_index_1_dist_min_diff'] = df[f'molecule_atom_index_1_dist_min'] - df['dist']
df[f'molecule_atom_index_1_dist_min_div'] = df[f'molecule_atom_index_1_dist_min'] / df['dist']
df[f'molecule_atom_index_1_dist_std'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('std')
df[f'molecule_atom_index_1_dist_std_diff'] = df[f'molecule_atom_index_1_dist_std'] - df['dist']
df[f'molecule_atom_index_1_dist_std_div'] = df[f'molecule_atom_index_1_dist_std'] / df['dist']
df[f'molecule_atom_1_dist_mean'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('mean')
df[f'molecule_atom_1_dist_min'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('min')
df[f'molecule_atom_1_dist_min_diff'] = df[f'molecule_atom_1_dist_min'] - df['dist']
df[f'molecule_atom_1_dist_min_div'] = df[f'molecule_atom_1_dist_min'] / df['dist']
df[f'molecule_atom_1_dist_std'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('std')
df[f'molecule_atom_1_dist_std_diff'] = df[f'molecule_atom_1_dist_std'] - df['dist']
df[f'molecule_type_0_dist_std'] = df.groupby(['molecule_name', 'type_0'])['dist'].transform('std')
df[f'molecule_type_0_dist_std_diff'] = df[f'molecule_type_0_dist_std'] - df['dist']
df[f'molecule_type_dist_mean'] = df.groupby(['molecule_name', 'type'])['dist'].transform('mean')
df[f'molecule_type_dist_mean_diff'] = df[f'molecule_type_dist_mean'] - df['dist']
df[f'molecule_type_dist_mean_div'] = df[f'molecule_type_dist_mean'] / df['dist']
df[f'molecule_type_dist_max'] = df.groupby(['molecule_name', 'type'])['dist'].transform('max')
df[f'molecule_type_dist_min'] = df.groupby(['molecule_name', 'type'])['dist'].transform('min')
df[f'molecule_type_dist_std'] = df.groupby(['molecule_name', 'type'])['dist'].transform('std')
df[f'molecule_type_dist_std_diff'] = df[f'molecule_type_dist_std'] - df['dist']
# fc
# df[f'molecule_type_fc_max'] = df.groupby(['molecule_name', 'type'])['fc'].transform('max')
# df[f'molecule_type_fc_min'] = df.groupby(['molecule_name', 'type'])['fc'].transform('min')
# df[f'molecule_type_fc_std'] = df.groupby(['molecule_name', 'type'])['fc'].transform('std')
# df[f'molecule_type_fc_std_diff'] = df[f'molecule_type_fc_std'] - df['fc']
# dist-interact
df[f'molecule_atom_index_0_dist_interact_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['dist_interact'].transform('mean')
df[f'molecule_atom_index_0_dist_interact_mean_diff'] = df[f'molecule_atom_index_0_dist_interact_mean'] - df['dist_interact']
df[f'molecule_atom_index_0_dist_interact_mean_div'] = df[f'molecule_atom_index_0_dist_interact_mean'] / df['dist_interact']
df[f'molecule_atom_index_0_dist_interact_max'] = df.groupby(['molecule_name', 'atom_index_0'])['dist_interact'].transform('max')
df[f'molecule_atom_index_0_dist_interact_max_diff'] = df[f'molecule_atom_index_0_dist_interact_max'] - df['dist_interact']
df[f'molecule_atom_index_0_dist_interact_max_div'] = df[f'molecule_atom_index_0_dist_interact_max'] / df['dist_interact']
df[f'molecule_atom_index_0_dist_interact_min'] = df.groupby(['molecule_name', 'atom_index_0'])['dist_interact'].transform('min')
df[f'molecule_atom_index_0_dist_interact_min_diff'] = df[f'molecule_atom_index_0_dist_interact_min'] - df['dist_interact']
df[f'molecule_atom_index_0_dist_interact_min_div'] = df[f'molecule_atom_index_0_dist_interact_min'] / df['dist_interact']
df[f'molecule_atom_index_0_dist_interact_std'] = df.groupby(['molecule_name', 'atom_index_0'])['dist_interact'].transform('std')
df[f'molecule_atom_index_0_dist_interact_std_diff'] = df[f'molecule_atom_index_0_dist_interact_std'] - df['dist_interact']
df[f'molecule_atom_index_0_dist_interact_std_div'] = df[f'molecule_atom_index_0_dist_interact_std'] / df['dist_interact']
df[f'molecule_atom_index_1_dist_interact_mean'] = df.groupby(['molecule_name', 'atom_index_1'])['dist_interact'].transform('mean')
df[f'molecule_atom_index_1_dist_interact_mean_diff'] = df[f'molecule_atom_index_1_dist_interact_mean'] - df['dist_interact']
df[f'molecule_atom_index_1_dist_interact_mean_div'] = df[f'molecule_atom_index_1_dist_interact_mean'] / df['dist_interact']
df[f'molecule_atom_index_1_dist_interact_max'] = df.groupby(['molecule_name', 'atom_index_1'])['dist_interact'].transform('max')
df[f'molecule_atom_index_1_dist_interact_max_diff'] = df[f'molecule_atom_index_1_dist_interact_max'] - df['dist_interact']
df[f'molecule_atom_index_1_dist_interact_max_div'] = df[f'molecule_atom_index_1_dist_interact_max'] / df['dist_interact']
df[f'molecule_atom_index_1_dist_interact_min'] = df.groupby(['molecule_name', 'atom_index_1'])['dist_interact'].transform('min')
df[f'molecule_atom_index_1_dist_interact_min_diff'] = df[f'molecule_atom_index_1_dist_interact_min'] - df['dist_interact']
df[f'molecule_atom_index_1_dist_interact_min_div'] = df[f'molecule_atom_index_1_dist_interact_min'] / df['dist_interact']
df[f'molecule_atom_index_1_dist_interact_std'] = df.groupby(['molecule_name', 'atom_index_1'])['dist_interact'].transform('std')
df[f'molecule_atom_index_1_dist_interact_std_diff'] = df[f'molecule_atom_index_1_dist_interact_std'] - df['dist_interact']
df[f'molecule_atom_index_1_dist_interact_std_div'] = df[f'molecule_atom_index_1_dist_interact_std'] / df['dist_interact']
df[f'molecule_type_dist_interact_max'] = df.groupby(['molecule_name', 'type'])['dist_interact'].transform('max')
df[f'molecule_type_dist_interact_min'] = df.groupby(['molecule_name', 'type'])['dist_interact'].transform('min')
df[f'molecule_type_dist_interact_std'] = df.groupby(['molecule_name', 'type'])['dist_interact'].transform('std')
df[f'molecule_type_dist_interact_std_diff'] = df[f'molecule_type_dist_interact_std'] - df['dist_interact']
return df
# <br>
# <br>
# angle features
# +
def map_atom_info(df_1,df_2, atom_idx):
df = pd.merge(df_1, df_2, how = 'left',
left_on = ['molecule_name', f'atom_index_{atom_idx}'],
right_on = ['molecule_name', 'atom_index'])
df = df.drop('atom_index', axis=1)
return df
def create_closest(df):
df_temp=df.loc[:,["molecule_name","atom_index_0","atom_index_1","dist","x_0","y_0","z_0","x_1","y_1","z_1"]].copy()
df_temp_=df_temp.copy()
df_temp_= df_temp_.rename(columns={'atom_index_0': 'atom_index_1',
'atom_index_1': 'atom_index_0',
'x_0': 'x_1',
'y_0': 'y_1',
'z_0': 'z_1',
'x_1': 'x_0',
'y_1': 'y_0',
'z_1': 'z_0'})
df_temp=pd.concat(objs=[df_temp,df_temp_],axis=0)
df_temp["min_distance"]=df_temp.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')
df_temp= df_temp[df_temp["min_distance"]==df_temp["dist"]]
df_temp=df_temp.drop(['x_0','y_0','z_0','min_distance', 'dist'], axis=1)
df_temp= df_temp.rename(columns={'atom_index_0': 'atom_index',
'atom_index_1': 'atom_index_closest',
'distance': 'distance_closest',
'x_1': 'x_closest',
'y_1': 'y_closest',
'z_1': 'z_closest'})
for atom_idx in [0,1]:
df = map_atom_info(df,df_temp, atom_idx)
df = df.rename(columns={'atom_index_closest': f'atom_index_closest_{atom_idx}',
'distance_closest': f'distance_closest_{atom_idx}',
'x_closest': f'x_closest_{atom_idx}',
'y_closest': f'y_closest_{atom_idx}',
'z_closest': f'z_closest_{atom_idx}'})
return df
def add_cos_features(df):
df["distance_0"]=((df['x_0']-df['x_closest_0'])**2+(df['y_0']-df['y_closest_0'])**2+(df['z_0']-df['z_closest_0'])**2)**(1/2)
df["distance_1"]=((df['x_1']-df['x_closest_1'])**2+(df['y_1']-df['y_closest_1'])**2+(df['z_1']-df['z_closest_1'])**2)**(1/2)
df["vec_0_x"]=(df['x_0']-df['x_closest_0'])/df["distance_0"]
df["vec_0_y"]=(df['y_0']-df['y_closest_0'])/df["distance_0"]
df["vec_0_z"]=(df['z_0']-df['z_closest_0'])/df["distance_0"]
df["vec_1_x"]=(df['x_1']-df['x_closest_1'])/df["distance_1"]
df["vec_1_y"]=(df['y_1']-df['y_closest_1'])/df["distance_1"]
df["vec_1_z"]=(df['z_1']-df['z_closest_1'])/df["distance_1"]
df["vec_x"]=(df['x_1']-df['x_0'])/df["dist"]
df["vec_y"]=(df['y_1']-df['y_0'])/df["dist"]
df["vec_z"]=(df['z_1']-df['z_0'])/df["dist"]
df["cos_0_1"]=df["vec_0_x"]*df["vec_1_x"]+df["vec_0_y"]*df["vec_1_y"]+df["vec_0_z"]*df["vec_1_z"]
df["cos_0"]=df["vec_0_x"]*df["vec_x"]+df["vec_0_y"]*df["vec_y"]+df["vec_0_z"]*df["vec_z"]
df["cos_1"]=df["vec_1_x"]*df["vec_x"]+df["vec_1_y"]*df["vec_y"]+df["vec_1_z"]*df["vec_z"]
df=df.drop(['vec_0_x','vec_0_y','vec_0_z','vec_1_x','vec_1_y','vec_1_z','vec_x','vec_y','vec_z'], axis=1)
return df
# +
# %%time
print('type0')
print(len(train), len(test))
train = create_type0(train)
test = create_type0(test)
print('distances')
print(len(train), len(test))
train = distances(train)
test = distances(test)
print('create_featueres')
print(len(train), len(test))
train = create_features(train)
test = create_features(test)
print('create_closest')
print(len(train), len(test))
train = create_closest(train)
test = create_closest(test)
train.drop_duplicates(inplace=True, subset=['id']) # なぜかtrainの行数が増えるバグが発生
print('add_cos_features')
print(len(train), len(test))
train = add_cos_features(train)
test = add_cos_features(test)
# -
# ---
# <br>
# <br>
# LabelEncode
#
# - `atom_1` = {H, C, N}
# - `type_0` = {1, 2, 3}
# - `type` = {2JHC, ...}
for f in ['atom_1', 'type_0', 'type']:
if f in train.columns:
lbl = LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
# ---
# **show features**
train.head(2)
print(train.columns)
# # create train, test data
# +
y = fc
train = train.drop(['id', 'molecule_name', 'atom_0', 'scalar_coupling_constant'], axis=1)
test = test.drop(['id', 'molecule_name', 'atom_0'], axis=1)
train = reduce_mem_usage(train)
test = reduce_mem_usage(test)
X = train.copy()
X_test = test.copy()
assert len(X.columns) == len(X_test.columns), f'X と X_test のサイズが違います X: {len(X.columns)}, X_test: {len(X_test.columns)}'
# -
del train, test, full_train, full_test
gc.collect()
# # Training model
# **params**
# +
# Configuration
TARGET = 'scalar_coupling_constant'
CAT_FEATS = ['type']
N_ESTIMATORS = 4000
VERBOSE = 400
EARLY_STOPPING_ROUNDS = 200
RANDOM_STATE = 529
METRIC = mean_absolute_error
N_JOBS = multiprocessing.cpu_count() -10
# lightgbm params
lgb_params = {'num_leaves': 128,
'min_child_samples': 79,
'objective': 'regression',
'max_depth': 9,
'learning_rate': 0.2,
"boosting_type": "gbdt",
"subsample_freq": 1,
"subsample": 0.9,
"bagging_seed": 11,
"metric": 'mae',
"verbosity": -1,
'reg_alpha': 0.1,
'reg_lambda': 0.3,
'colsample_bytree': 1.0
}
# +
# init
def train_lgb(X, X_test, y, lgb_params, folds,
verbose, early_stopping_rounds, n_estimators, categorical_feature=None):
result_dict = {}
oof = np.zeros(len(X))
prediction = np.zeros(len(X_test))
scores = []
models = []
feature_importance = pd.DataFrame()
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X)):
print('------------------')
print(f'- fold{fold_n + 1}' )
print(f'Fold {fold_n + 1} started at {time.ctime()}')
X_train, X_valid = X.iloc[train_idx], X.iloc[valid_idx]
y_train, y_valid = y[train_idx], y[valid_idx]
# from IPython.core.debugger import Pdb; Pdb().set_trace()
# Train the model
model = lgb.LGBMRegressor(**lgb_params, n_estimators=n_estimators, n_jobs=N_JOBS)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=verbose,
early_stopping_rounds=early_stopping_rounds,
categorical_feature=CAT_FEATS)
# predict
y_valid_pred = model.predict(X_valid, num_iteration=model.best_iteration_)
y_test_pred = model.predict(X_test)
oof[valid_idx] = y_valid_pred.reshape(-1,) # oof: out of folds
scores.append(mean_absolute_error(y_valid, y_valid_pred))
prediction += y_test_pred
# feature_importance
fold_importance = pd.DataFrame()
fold_importance['feature'] = X.columns
fold_importance['importance'] = model.feature_importances_
fold_importance['fold'] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance], axis=0)
# result
prediction /= folds.n_splits
feature_importance["importance"] /= folds.n_splits
result_dict['oof'] = oof
result_dict['prediction'] = prediction
result_dict['scores'] = scores
result_dict['feature_importance'] = feature_importance
print('------------------')
print('====== finish ======')
print('score list:', scores)
X['scalar_coupling_constant'] = y
cv_score = kaggle_metric(X, oof)
# X = X.drop(['scalar_coupling_constant', 'prediction'], axis=1)
print('CV mean score(group log mae): {0:.4f}'.format(cv_score))
return result_dict, cv_score
# -
n_folds = 5
folds = KFold(n_splits=n_folds, shuffle=True)
X.shape, y.shape, X_test.shape
X.columns[np.isnan(X).any()]
# +
# %%time
# type ごとの学習
feature_importance = pd.DataFrame()
X_short = pd.DataFrame({'ind': list(X.index), 'type': X['type'].values, 'oof': [0] * len(X), 'target': y.values})
X_short_test = pd.DataFrame({'ind': list(X_test.index), 'type': X_test['type'].values, 'prediction': [0] * len(X_test)})
for t in X['type'].unique():
print('')
print('*'*100)
print(f'Training of type {lbl.inverse_transform([t])[0]}({t})')
print('*'*100)
X_t = X.loc[X['type'] == t]
X_test_t = X_test.loc[X_test['type'] == t]
y_t = X_short.loc[X_short['type'] == t, 'target'].values
# from IPython.core.debugger import Pdb; Pdb().set_trace()
# result_dict_lgb3 = train_model_regression(X=X_t, X_test=X_test_t, y=y_t, params=params, folds=folds, model_type='lgb', eval_metric='group_mae', plot_feature_importance=False,
# verbose=500, early_stopping_rounds=200, n_estimators=3000)
result_dict, cv_score = train_lgb(X=X_t, X_test=X_test_t, y=y_t, folds=folds, lgb_params=lgb_params,
verbose=VERBOSE, early_stopping_rounds=EARLY_STOPPING_ROUNDS,
n_estimators=N_ESTIMATORS,
categorical_feature=CAT_FEATS)
X_short.loc[X_short['type'] == t, 'oof'] = result_dict['oof']
X_short_test.loc[X_short_test['type'] == t, 'prediction'] = result_dict['prediction']
feature_importance = pd.concat([feature_importance, result_dict['feature_importance']], axis=0)
print('')
print('===== finish =====')
X['scalar_coupling_constant'] = fc.values
metric = kaggle_metric(X, X_short['oof'].values)
X = X.drop(['scalar_coupling_constant', 'prediction'], axis=1)
print('CV mean score(group log mae): {0:.4f}'.format(metric))
fc_pred_test = X_short_test['prediction'].values
fc_pred_train = X_short['oof'].values
# X = X.drop(['scalar_coupling_constant', 'prediction'], axis=1)
# sub['scalar_coupling_constant'] = X_short_test['prediction']
# sub.to_csv('submission_t.csv', index=False)
# sub.head()
# -
print('CV mean score(group log mae): {0:.4f}'.format(metric))
# ## plot feature importance
# +
# top 50 features
cols = feature_importance[["feature", "importance"]].groupby("feature").mean().sort_values(
by="importance", ascending=False)[:50].index
best_features = feature_importance.loc[feature_importance.feature.isin(cols)]
plt.figure(figsize=(16, 12));
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False));
plt.title('LGB Features (avg over folds)');
# -
# # Save
# test
# path_submittion = './output/' + 'nb{}_submission_lgb_{}.csv'.format(nb, metric)
path = f'../input/champs-scalar-coupling/nb{nb}_fc_test.csv'
print(f'save pash: {path}')
_ = pd.DataFrame({'fc': fc_pred_test})
if isSmallSet:
print('using small datasets')
else:
_.to_csv(path, index=False)
# <br>
# <br>
# train
# path_submittion = './output/' + 'nb{}_submission_lgb_{}.csv'.format(nb, metric)
path = f'../input/champs-scalar-coupling/nb{nb}_fc_train.csv'
print(f'save pash: {path}')
_ = pd.DataFrame({'fc': fc_pred_train})
if isSmallSet:
print('using small datasets')
else:
_.to_csv(path, index=False)
# # analysis
# +
plot_data = pd.DataFrame(y)
plot_data.index.name = 'id'
plot_data['yhat'] = X_short['oof']
plot_data['type'] = lbl.inverse_transform(X['type'])
def plot_oof_preds(ctype, llim, ulim):
plt.figure(figsize=(6,6))
sns.scatterplot(x='fc',y='yhat',
data=plot_data.loc[plot_data['type']==ctype,
['fc', 'yhat']]);
plt.xlim((llim, ulim))
plt.ylim((llim, ulim))
plt.plot([llim, ulim], [llim, ulim])
plt.xlabel('fc')
plt.ylabel('predicted')
plt.title(f'{ctype}', fontsize=18)
plt.show()
plot_oof_preds('1JHC', 0, 250)
plot_oof_preds('1JHN', 0, 100)
plot_oof_preds('2JHC', -30, 50)
plot_oof_preds('2JHH', -40, 30)
plot_oof_preds('2JHN', -10, 25)
plot_oof_preds('3JHC', -25, 100)
plot_oof_preds('3JHH', -5, 20)
plot_oof_preds('3JHN', -10, 15)
# -
|
src/47_Create_oof_fc_feature_02.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
Please run notebook locally (if you have all the dependencies and a GPU).
Technically you can run this notebook on Google Colab but you need to set up microphone for Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
5. Set up microphone for Colab
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
# !pip install wget
# !apt-get install sox libsndfile1 ffmpeg portaudio19-dev
# !pip install unidecode
# !pip install pyaudio
# ## Install NeMo
BRANCH = 'v1.0.0'
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
# !pip install torchaudio>=0.6.0 -f https://download.pytorch.org/whl/torch_stable.html
# -
#
# This notebook demonstrates offline and online (from a microphone's stream in NeMo) speech commands recognition
#
# It is **not a recommended** way to do inference in production workflows. If you are interested in
# production-level inference using NeMo ASR models, please sign-up to Jarvis early access program: https://developer.nvidia.com/nvidia-jarvis
# The notebook requires PyAudio library to get a signal from an audio device.
# For Ubuntu, please run the following commands to install it:
# ```
# sudo apt-get install -y portaudio19-dev
# pip install pyaudio
# ```
# This notebook requires the `torchaudio` library to be installed for MatchboxNet. Please follow the instructions available at the [torchaudio Github page](https://github.com/pytorch/audio#installation) to install the appropriate version of torchaudio.
#
# If you would like to install the latest version, please run the following command to install it:
#
# ```
# conda install -c pytorch torchaudio
# ```
# +
import numpy as np
import pyaudio as pa
import os, time
import librosa
import IPython.display as ipd
import matplotlib.pyplot as plt
# %matplotlib inline
import nemo
import nemo.collections.asr as nemo_asr
# -
# sample rate, Hz
SAMPLE_RATE = 16000
# ## Restore the model from NGC
mbn_model = nemo_asr.models.EncDecClassificationModel.from_pretrained("commandrecognition_en_matchboxnet3x1x64_v2")
# Since speech commands model MatchBoxNet doesn't consider non-speech scenario,
# here we use a Voice Activity Detection (VAD) model to help reduce false alarm for background noise/silence. When there is speech activity detected, the speech command inference will be activated.
#
# **Please note the VAD model is not perfect for various microphone input and you might need to finetune on your input and play with different parameters.**
vad_model = nemo_asr.models.EncDecClassificationModel.from_pretrained('vad_marblenet')
# ## Observing the config of the model
from omegaconf import OmegaConf
import copy
# Preserve a copy of the full config
vad_cfg = copy.deepcopy(vad_model._cfg)
mbn_cfg = copy.deepcopy(mbn_model._cfg)
print(OmegaConf.to_yaml(mbn_cfg))
# ## What classes can this model recognize?
#
# Before we begin inference on the actual audio stream, let's look at what are the classes this model was trained to recognize.
#
# **MatchBoxNet model is not designed to recognize words out of vocabulary (OOV).**
labels = mbn_cfg.labels
for i in range(len(labels)):
print('%-10s' % (labels[i]), end=' ')
# ## Setup preprocessor with these settings
# Set model to inference mode
mbn_model.eval();
vad_model.eval();
# ## Setting up data for Streaming Inference
from nemo.core.classes import IterableDataset
from nemo.core.neural_types import NeuralType, AudioSignal, LengthsType
import torch
from torch.utils.data import DataLoader
# simple data layer to pass audio signal
class AudioDataLayer(IterableDataset):
@property
def output_types(self):
return {
'audio_signal': NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
}
def __init__(self, sample_rate):
super().__init__()
self._sample_rate = sample_rate
self.output = True
def __iter__(self):
return self
def __next__(self):
if not self.output:
raise StopIteration
self.output = False
return torch.as_tensor(self.signal, dtype=torch.float32), \
torch.as_tensor(self.signal_shape, dtype=torch.int64)
def set_signal(self, signal):
self.signal = signal.astype(np.float32)/32768.
self.signal_shape = self.signal.size
self.output = True
def __len__(self):
return 1
data_layer = AudioDataLayer(sample_rate=mbn_cfg.train_ds.sample_rate)
data_loader = DataLoader(data_layer, batch_size=1, collate_fn=data_layer.collate_fn)
# ## inference method for audio signal (single instance)
def infer_signal(model, signal):
data_layer.set_signal(signal)
batch = next(iter(data_loader))
audio_signal, audio_signal_len = batch
audio_signal, audio_signal_len = audio_signal.to(model.device), audio_signal_len.to(model.device)
logits = model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
return logits
# we don't include postprocessing techniques here.
# class for streaming frame-based ASR
# 1) use reset() method to reset FrameASR's state
# 2) call transcribe(frame) to do ASR on
# contiguous signal's frames
class FrameASR:
def __init__(self, model_definition,
frame_len=2, frame_overlap=2.5,
offset=0):
'''
Args:
frame_len (seconds): Frame's duration
frame_overlap (seconds): Duration of overlaps before and after current frame.
offset: Number of symbols to drop for smooth streaming.
'''
self.task = model_definition['task']
self.vocab = list(model_definition['labels'])
self.sr = model_definition['sample_rate']
self.frame_len = frame_len
self.n_frame_len = int(frame_len * self.sr)
self.frame_overlap = frame_overlap
self.n_frame_overlap = int(frame_overlap * self.sr)
timestep_duration = model_definition['AudioToMFCCPreprocessor']['window_stride']
for block in model_definition['JasperEncoder']['jasper']:
timestep_duration *= block['stride'][0] ** block['repeat']
self.buffer = np.zeros(shape=2*self.n_frame_overlap + self.n_frame_len,
dtype=np.float32)
self.offset = offset
self.reset()
@torch.no_grad()
def _decode(self, frame, offset=0):
assert len(frame)==self.n_frame_len
self.buffer[:-self.n_frame_len] = self.buffer[self.n_frame_len:]
self.buffer[-self.n_frame_len:] = frame
if self.task == 'mbn':
logits = infer_signal(mbn_model, self.buffer).to('cpu').numpy()[0]
decoded = self._mbn_greedy_decoder(logits, self.vocab)
elif self.task == 'vad':
logits = infer_signal(vad_model, self.buffer).to('cpu').numpy()[0]
decoded = self._vad_greedy_decoder(logits, self.vocab)
else:
raise("Task should either be of mbn or vad!")
return decoded[:len(decoded)-offset]
def transcribe(self, frame=None,merge=False):
if frame is None:
frame = np.zeros(shape=self.n_frame_len, dtype=np.float32)
if len(frame) < self.n_frame_len:
frame = np.pad(frame, [0, self.n_frame_len - len(frame)], 'constant')
unmerged = self._decode(frame, self.offset)
return unmerged
def reset(self):
'''
Reset frame_history and decoder's state
'''
self.buffer=np.zeros(shape=self.buffer.shape, dtype=np.float32)
self.mbn_s = []
self.vad_s = []
@staticmethod
def _mbn_greedy_decoder(logits, vocab):
mbn_s = []
if logits.shape[0]:
class_idx = np.argmax(logits)
class_label = vocab[class_idx]
mbn_s.append(class_label)
return mbn_s
@staticmethod
def _vad_greedy_decoder(logits, vocab):
vad_s = []
if logits.shape[0]:
probs = torch.softmax(torch.as_tensor(logits), dim=-1)
probas, preds = torch.max(probs, dim=-1)
vad_s = [preds.item(), str(vocab[preds]), probs[0].item(), probs[1].item(), str(logits)]
return vad_s
# # Streaming Inference
# ## offline inference
# Here we show an example of offline streaming inference. you can use your file or download the provided demo audio file.
#
# Streaming inference depends on a few factors, such as the frame length (STEP) and buffer size (WINDOW SIZE). Experiment with a few values to see their effects in the below cells.
STEP = 0.25
WINDOW_SIZE = 1.28 # input segment length for NN we used for training
# +
import wave
def offline_inference(wave_file, STEP = 0.25, WINDOW_SIZE = 0.31):
"""
Arg:
wav_file: wave file to be performed inference on.
STEP: infer every STEP seconds
WINDOW_SIZE : lenght of audio to be sent to NN.
"""
FRAME_LEN = STEP
CHANNELS = 1 # number of audio channels (expect mono signal)
RATE = SAMPLE_RATE # sample rate, 16000 Hz
CHUNK_SIZE = int(FRAME_LEN * SAMPLE_RATE)
mbn = FrameASR(model_definition = {
'task': 'mbn',
'sample_rate': SAMPLE_RATE,
'AudioToMFCCPreprocessor': mbn_cfg.preprocessor,
'JasperEncoder': mbn_cfg.encoder,
'labels': mbn_cfg.labels
},
frame_len=FRAME_LEN, frame_overlap = (WINDOW_SIZE - FRAME_LEN)/2,
offset=0)
wf = wave.open(wave_file, 'rb')
data = wf.readframes(CHUNK_SIZE)
while len(data) > 0:
data = wf.readframes(CHUNK_SIZE)
signal = np.frombuffer(data, dtype=np.int16)
mbn_result = mbn.transcribe(signal)
if len(mbn_result):
print(mbn_result)
mbn.reset()
# -
demo_wave = 'SpeechCommands_demo.wav'
if not os.path.exists(demo_wave):
# !wget "https://dldata-public.s3.us-east-2.amazonaws.com/SpeechCommands_demo.wav"
# +
wave_file = demo_wave
CHANNELS = 1
audio, sample_rate = librosa.load(wave_file, sr=SAMPLE_RATE)
dur = librosa.get_duration(audio)
print(dur)
# -
ipd.Audio(audio, rate=sample_rate)
# Ground-truth is Yes No
offline_inference(wave_file, STEP, WINDOW_SIZE)
# ## Online inference through microphone
# Please note MatchBoxNet and VAD model are not perfect for various microphone input and you might need to finetune on your input and play with different parameter. \
# **We also recommend to use a headphone.**
# +
vad_threshold = 0.8
STEP = 0.1
WINDOW_SIZE = 0.15
mbn_WINDOW_SIZE = 1
CHANNELS = 1
RATE = SAMPLE_RATE
FRAME_LEN = STEP # use step of vad inference as frame len
CHUNK_SIZE = int(STEP * RATE)
vad = FrameASR(model_definition = {
'task': 'vad',
'sample_rate': SAMPLE_RATE,
'AudioToMFCCPreprocessor': vad_cfg.preprocessor,
'JasperEncoder': vad_cfg.encoder,
'labels': vad_cfg.labels
},
frame_len=FRAME_LEN, frame_overlap=(WINDOW_SIZE - FRAME_LEN) / 2,
offset=0)
mbn = FrameASR(model_definition = {
'task': 'mbn',
'sample_rate': SAMPLE_RATE,
'AudioToMFCCPreprocessor': mbn_cfg.preprocessor,
'JasperEncoder': mbn_cfg.encoder,
'labels': mbn_cfg.labels
},
frame_len=FRAME_LEN, frame_overlap = (mbn_WINDOW_SIZE-FRAME_LEN)/2,
offset=0)
# +
vad.reset()
mbn.reset()
# Setup input device
p = pa.PyAudio()
print('Available audio input devices:')
input_devices = []
for i in range(p.get_device_count()):
dev = p.get_device_info_by_index(i)
if dev.get('maxInputChannels'):
input_devices.append(i)
print(i, dev.get('name'))
if len(input_devices):
dev_idx = -2
while dev_idx not in input_devices:
print('Please type input device ID:')
dev_idx = int(input())
def callback(in_data, frame_count, time_info, status):
"""
callback function for streaming audio and performing inference
"""
signal = np.frombuffer(in_data, dtype=np.int16)
vad_result = vad.transcribe(signal)
mbn_result = mbn.transcribe(signal)
if len(vad_result):
# if speech prob is higher than threshold, we decide it contains speech utterance
# and activate MatchBoxNet
if vad_result[3] >= vad_threshold:
print(mbn_result) # print mbn result when speech present
else:
print("no-speech")
return (in_data, pa.paContinue)
# streaming
stream = p.open(format=pa.paInt16,
channels=CHANNELS,
rate=SAMPLE_RATE,
input=True,
input_device_index=dev_idx,
stream_callback=callback,
frames_per_buffer=CHUNK_SIZE)
print('Listening...')
stream.start_stream()
# Interrupt kernel and then speak for a few more words to exit the pyaudio loop !
try:
while stream.is_active():
time.sleep(0.1)
finally:
stream.stop_stream()
stream.close()
p.terminate()
print()
print("PyAudio stopped")
else:
print('ERROR: No audio input device found.')
# + [markdown] pycharm={"name": "#%% md\n"}
# ## ONNX Deployment
# You can also export the model to ONNX file and deploy it to TensorRT or MS ONNX Runtime inference engines. If you don't have one installed yet, please run:
# -
# !mkdir -p ort
# %cd ort
# !git clone --depth 1 --branch v1.5.1 https://github.com/microsoft/onnxruntime.git .
# !./build.sh --skip_tests --config Release --build_shared_lib --parallel --use_cuda --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu --build_wheel
# !pip install ./build/Linux/Release/dist/onnxruntime*.whl
# %cd ..
# Then just replace `infer_signal` implementation with this code:
# + pycharm={"name": "#%%\n"}
import onnxruntime
mbn_model.export('mbn.onnx')
ort_session = onnxruntime.InferenceSession('mbn.onnx')
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def infer_signal(signal):
data_layer.set_signal(signal)
batch = next(iter(data_loader))
audio_signal, audio_signal_len = batch
audio_signal, audio_signal_len = audio_signal.to(mbn_model.device), audio_signal_len.to(mbn_model.device)
processed_signal, processed_signal_len = mbn_model.preprocessor(
input_signal=audio_signal, length=audio_signal_len,
)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(processed_signal), }
ologits = ort_session.run(None, ort_inputs)
alogits = np.asarray(ologits)
logits = torch.from_numpy(alogits[0])
return logits
|
tutorials/asr/04_Online_Offline_Speech_Commands_Demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import imutils
import cv2
import matplotlib.pyplot as plt
from model import AnimeFaceDetectionModel
# os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
# -
model = AnimeFaceDetectionModel(margin=0)
# +
path = './imgs/Image_1.png'
image = cv2.imread(path)
plt.imshow(faces[0])
# -
|
models/face_detection/eval_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import mixture
green_df = pd.read_csv("./clean_data/clean_green.csv")
green_df.head(4)
green_df = pd.get_dummies(green_df)
green_df.head(4)
X = green_df = green_df.drop(columns=['experts::0','experts::1','experts::2','experts::3','experts::4','experts::5','consensus'], axis=1)
print(type(X))
X = X.as_matrix()
print(type(X))
print(X)
X.shape
print(X[1,:])
print(X[:4,:])
print(X[0:2,0:2])
print(X[0:2,:])
from sklearn.cluster import KMeans
# +
cluster_range = range( 1, 20 )
cluster_errors = []
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_scaled = scaler.fit_transform( X )
for num_clusters in cluster_range:
clusters = KMeans( num_clusters )
clusters.fit( X_scaled )
cluster_errors.append( clusters.inertia_ )
# -
clusters_df = pd.DataFrame( { "num_clusters":cluster_range, "cluster_errors": cluster_errors } )
plt.figure(figsize=(12,6))
plt.plot( clusters_df.num_clusters, clusters_df.cluster_errors, marker = "o" )
kmeans = KMeans(n_clusters=4, random_state=0).fit(X)
kmeans.labels_
plt.scatter(X[:, 1], X[:, 2], c=kmeans.labels_, s=40, cmap='viridis');
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=[0, 1])
data_rescaled = scaler.fit_transform(X)
#Fitting the PCA algorithm with our Data
pca = PCA().fit(data_rescaled)
#Plotting the Cumulative Summation of the Explained Variance
plt.figure()
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Pulsar Dataset Explained Variance')
plt.show()
pca = PCA(n_components=25)
dataset = pca.fit_transform(data_rescaled)
# +
cluster_range = range( 1, 20 )
cluster_errors = []
for num_clusters in cluster_range:
clusters = KMeans( num_clusters )
clusters.fit( dataset )
cluster_errors.append( clusters.inertia_ )
# +
from scipy.spatial.distance import cdist
def plot_kmeans(kmeans, X, n_clusters=4, rseed=0, ax=None):
labels = kmeans.fit_predict(X)
# plot the input data
ax = ax or plt.gca()
ax.axis('equal')
ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)
# plot the representation of the KMeans model
centers = kmeans.cluster_centers_
radii = [cdist(X[labels == i], [center]).max()
for i, center in enumerate(centers)]
for c, r in zip(centers, radii):
ax.add_patch(plt.Circle(c, r, fc='#CCCCCC', lw=3, alpha=0.5, zorder=1))
# -
clusters_df_pca = pd.DataFrame( { "num_clusters":cluster_range, "cluster_errors": cluster_errors } )
# +
# Create plots with pre-defined labels.
fig, ax = plt.subplots(figsize=(12,6))
ax.plot( clusters_df_pca.num_clusters, clusters_df_pca.cluster_errors, marker = "o" , color= "red", label= 'Kmeans after PCA')
ax.plot( clusters_df.num_clusters, clusters_df.cluster_errors, marker = "o",color= "blue",label='Kmeans')
legend = ax.legend(loc='upper right', shadow=True, fontsize='x-large')
# Put a nicer background color on the legend.
#legend.get_frame().set_facecolor('C0')
plt.show()
# -
kmeans = KMeans(n_clusters=4, random_state=0).fit(dataset)
plt.scatter(X[:, 1], X[:, 2], c=kmeans.labels_, s=40, cmap='viridis');
plot_kmeans(kmeans, X)
rng = np.random.RandomState(13)
X_stretched = np.dot(X, rng.randn(62,2))
plot_kmeans(kmeans, X_stretched)
gmm = mixture.GaussianMixture(n_components=4).fit(X)
labels = gmm.predict(X)
plt.scatter(X[:, 1], X[:, 2], c=labels, s=40, cmap='viridis');
probs = gmm.predict_proba(X)
print(probs[:5].round(3))
print(probs[5:10].round(3))
print(probs[150:155].round(3))
print(probs[75:80].round(3))
size = 50 * probs.max(1)
plt.scatter(X[:, 1], X[:, 2], c=labels, s=40, cmap='viridis');
gmm = mixture.GaussianMixture(n_components=4, covariance_type='full', random_state=42)
plt.scatter(X[:, 1], X[:, 2], c=labels, s=40, cmap='viridis');
# +
from matplotlib.patches import Ellipse
def draw_ellipse(position, covariance, ax=None, **kwargs):
"""Draw an ellipse with a given position and covariance"""
ax = ax or plt.gca()
# Convert covariance to principal axes
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width, height = 2 * np.sqrt(s)
else:
angle = 0
width, height = 2 * np.sqrt(covariance)
# Draw the Ellipse
for nsig in range(1, 4):
ax.add_patch(Ellipse(position, nsig * width, nsig * height,
angle, **kwargs))
def plot_gmm(gmm, X, label=True, ax=None):
ax = ax or plt.gca()
labels = gmm.fit(X).predict(X)
if label:
ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)
else:
ax.scatter(X[:, 0], X[:, 1], s=40, zorder=2)
ax.axis('equal')
w_factor = 0.2 / gmm.weights_.max()
for pos, covar, w in zip(gmm.means_, gmm.covars_, gmm.weights_):
draw_ellipse(pos, covar, alpha=w * w_factor)
# -
plot_gmm(gmm, X_stretched)
plot_gmm(gmm, X, label=False)
plot_gmm(gmm, X_stretched, label=False)
gmm16 = mixture.GaussianMixture(n_components=16, covariance_type='full', random_state=0)
plot_gmm(gmm16, X_stretched, label=False)
# +
n_components = np.arange(1, 21)
models = [mixture.GaussianMixture(n, covariance_type='full', random_state=0).fit(X)
for n in n_components]
plt.plot(n_components, [m.bic(X) for m in models], label='BIC')
plt.plot(n_components, [m.aic(X) for m in models], label='AIC')
plt.legend(loc='best')
plt.xlabel('Number of components');
# -
# ### Modelos de mistura para as ALP
|
Clustering_GreenData.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# +
# 31-Dec-2016
import numpy as np
import scipy as sp
import os, re, json, csv
import networkx as nx
import matplotlib.pyplot as plt
# %matplotlib inline
from pylab import rcParams
rcParams['figure.figsize'] = 7,4 # Bigger figures
rcParams['lines.linewidth'] = 2.0
def NestWhileList(func, arg, stopTestQ, nmax = 1000):
# stopTestQ takes full FP list to calc stop flag. stop when True
tmp_lst = [func(arg)]
tmp_lst = tmp_lst + [func(tmp_lst[-1])]
while ( not(stopTestQ(tmp_lst)) & (len(tmp_lst) < nmax) ):
tmp_lst = tmp_lst + [func(tmp_lst[-1])]
return([list(t) for t in tmp_lst])
def stopcritQ(res_lst):
res_lst = [str(t) for t in res_lst]
return( len(set(res_lst))!=len(res_lst) )
def Clamp(actvn, mask):
assert(len(actvn) == len(mask))
clamped = actvn
clamped[ [j for j in range(len(mask)) if mask[j]==1] ] = 1
clamped[ [j for j in range(len(mask)) if mask[j]<0] ] = 0
return clamped
class FCM:
def __init__(self, title):
self.title = title
self.graph = nx.DiGraph()
self.ActivationFunction = lambda x: 1*(x>=0.5) #S fxn; use member fxn to update
def add_edges(self, edge_lst):
self.graph.add_weighted_edges_from(edge_lst)
def label_edges(self, label_dict):
self.graph = nx.relabel_nodes(self.graph, label_dict, copy=False)
def set_activation(self, actvn):
self.ActivationFunction = actvn
def get_FCM_Matrix(self):
return(nx.adjacency_matrix(self.graph).todense())
def VizFCM(self):
nx.draw(self.graph,
with_labels=True, node_size=700,
nodecolor='g', edge_color='b')
plt.title(self.title)
def EvolveOnce(self, inp, mask):
assert(len(inp) == len(mask))
return Clamp( self.ActivationFunction(
np.asarray(np.matmul(Clamp(inp, mask),
nx.adjacency_matrix(self.graph).todense())).ravel()
), mask)
def EvolveToLimit(self, inp, mask, nmax = 1000):
assert(len(inp) == len(mask))
seq = NestWhileList(
lambda inp_vec: self.EvolveOnce(inp_vec, mask),
inp, stopcritQ, nmax
)
seq = [inp] + seq
return(np.matrix(seq))
# def VizFCMEvol(self):
# +
clotlist = [(1, 1, 1), (1, 2, 0.4), (1, 3, 1), (1, 4, 1),
(2, 3, 0.5), (2, 6, 0.45), (3, 2, 0.4), (3, 4, 0.75),
(3, 6, 0.4), (4, 6, 0.4), (5, 6, 0.45), (6, 2, 0.7),
(7, 5, -0.6), (8, 6, 0.95), (9, 10, -0.9), (10, 6, 1),
(11, 8, 0.95), (12, 11, -0.6)]
mapping = {1:"HCP", 2:"stas", 3:"inju",
4:"HCF", 5:"ADP", 6:"PAgg",
7:"clop", 8:"A2", 9:"war",
10:"K", 11:"cox", 12:"aspi"}
tstfcm = FCM("Testing with Clot FCM")
tstfcm.add_edges(clotlist)
tstfcm.label_edges(mapping)
tstfcm.VizFCM()
vec = np.random.randint(2, size=len(tstfcm.graph.nodes()) )
print tstfcm.EvolveOnce(vec, mask = np.zeros(len(vec)))
print tstfcm.EvolveToLimit(vec, mask = np.zeros(len(vec)))
# -
|
FCM-Class-Test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="5GSzHEgpuo0G" executionInfo={"status": "ok", "timestamp": 1638912687570, "user_tz": 300, "elapsed": 245, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghrkm05RoKPUOEy_8fBWgAXP-_ELu51T_jQRs3xPg=s64", "userId": "16382698357113713759"}} outputId="eab0bfce-c03a-4874-c501-e66afd9759ce"
# ------------------------- #
# SET - UP #
# ------------------------- #
# ---- Requirements ----- #
import sys
from google.colab import drive
import pandas as pd
import numpy as np
import random
# ----- Mounting Google Drive ----- #
drive.mount('/content/drive')
sys.path.append('/content/drive/MyDrive/CIS6930_final')
# ----- Reading in the Data ----- #
pegasus = pd.read_csv('/content/drive/MyDrive/CIS6930_final/summaries/pegasus_test_summaries2.csv')
bart = pd.read_csv('/content/drive/MyDrive/CIS6930_final/summaries/bart_test_summaries2.csv')
t5 = pd.read_csv('/content/drive/MyDrive/CIS6930_final/summaries/t5_test_summaries.csv')
models = {"pegasus": pegasus,
"bart": bart,
"t5": t5}
subframes_list = []
for key,value in models.items():
value = value.iloc[: , 1:]
value['model_name'] = key
indices = random.sample(range(0, len(value)), 3)
subframes_list.append(value.iloc[indices])
sampled_rows = pd.concat(subframes_list)
# ----- Get rid of non-decoded padding?
# Re-ran the models to fix this ; I forgot to set skip-special-tokens
# to true when I was doing the predictions outside of the trainer fuunction
# Oops! :D Incidentally, the special tokens did not have a noticable impact on
# ROUGE performance.
# EXTRA NOTES:
# ----- Commented out code needed to do this but retained for documentation
# ----- purposes
#import re
#def cleaner(text):
# return re.sub("<.*?>", "", text)
#sampled_rows['candidate'] = sampled_rows['candidate'].apply(cleaner)
#sampled_rows['reference'] = sampled_rows['reference'].apply(cleaner)
#sampled_rows.to_csv('/content/drive/MyDrive/CIS6930_final/hev_examples.csv')
# + [markdown] id="j5oZPltEzsaZ"
# # DOWNLAOD FILES
# Because Google Drive will no longer let me save files (I am unsure why - the error message is uninformative in the sense that it provides no detail beyond "Google Drive Error" :'( ), I have to download them and then re-upload them to the drive.
# + id="y-iRWcUK2I9O" colab={"base_uri": "https://localhost:8080/", "height": 17} executionInfo={"status": "ok", "timestamp": 1638912734228, "user_tz": 300, "elapsed": 237, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/<KEY>", "userId": "16382698357113713759"}} outputId="db55d200-742a-42da-ac75-8095e6b8fa31"
from google.colab import files
sampled_rows.to_csv('hev_examples.csv')
files.download("hev_examples.csv")
|
human evaluation/selecting_summaries_for_HEv.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modeling and Simulation in Python
#
# Chapter 8
#
# Copyright 2017 <NAME>
#
# License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
#
# +
# Configure Jupyter so figures appear in the notebook
# %matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
# %config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
from pandas import read_html
# -
# ### Functions from the previous chapter
def plot_results(census, un, timeseries, title):
"""Plot the estimates and the model.
census: TimeSeries of population estimates
un: TimeSeries of population estimates
timeseries: TimeSeries of simulation results
title: string
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(timeseries, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title=title)
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end-1):
results[t+1] = update_func(results[t], t, system)
return results
# ### Reading the data
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
# +
un = table2.un / 1e9
census = table2.census / 1e9
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
decorate(xlabel='Year',
ylabel='World population (billion)',
title='Estimated world population')
# -
# ### Running the quadratic model
# Here's the update function for the quadratic growth model with parameters `alpha` and `beta`.
def update_func_quad(pop, t, system):
"""Update population based on a quadratic model.
pop: current population in billions
t: what year it is
system: system object with model parameters
"""
net_growth = system.alpha * pop + system.beta * pop**2
return pop + net_growth
# Extract the starting time and population.
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = get_first_value(census)
# Initialize the system object.
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
alpha=0.025,
beta=-0.0018)
# Run the model and plot results.
results = run_simulation(system, update_func_quad)
plot_results(census, un, results, 'Quadratic model')
# ### Generating projections
# To generate projections, all we have to do is change `t_end`
system.t_end = 2250
results = run_simulation(system, update_func_quad)
plot_results(census, un, results, 'World population projection')
savefig('chap04-fig01.pdf')
# The population in the model converges on the equilibrium population, `-alpha/beta`
results[system.t_end]
-system.alpha / system.beta
# **Exercise:** What happens if we start with an initial population above the carrying capacity, like 20 billion? Run the model with initial populations between 1 and 20 billion, and plot the results on the same axes.
# +
# Solution
p0_array = linspace(1, 25, 11)
for system.p_0 in p0_array:
results = run_simulation(system, update_func_quad)
plot(results)
# -
# ### Comparing projections
# We can compare the projection from our model with projections produced by people who know what they are doing.
table3 = tables[3]
table3.head()
# `NaN` is a special value that represents missing data, in this case because some agencies did not publish projections for some years.
table3.columns = ['census', 'prb', 'un']
# This function plots projections from the UN DESA and U.S. Census. It uses `dropna` to remove the `NaN` values from each series before plotting it.
def plot_projections(table):
"""Plot world population projections.
table: DataFrame with columns 'un' and 'census'
"""
census_proj = table.census / 1e9
un_proj = table.un / 1e9
plot(census_proj.dropna(), 'b:', label='US Census')
plot(un_proj.dropna(), 'g--', label='UN DESA')
# Run the model until 2100, which is as far as the other projections go.
system = System(t_0=t_0,
t_end=2100,
p_0=p_0,
alpha=0.025,
beta=-0.0018)
# +
results = run_simulation(system, update_func_quad)
plot_results(census, un, results, 'World population projections')
plot_projections(table3)
savefig('chap04-fig02.pdf')
# -
# People who know what they are doing expect the growth rate to decline more sharply than our model projects.
# ## Exercises
#
# **Optional exercise:** The net growth rate of world population has been declining for several decades. That observation suggests one more way to generate projections, by extrapolating observed changes in growth rate.
#
# The `modsim` library provides a function, `compute_rel_diff`, that computes relative differences of the elements in a sequence. It is a wrapper for the NumPy function `ediff1d`:
source_code(compute_rel_diff)
# Here's how we can use it to compute the relative differences in the `census` and `un` estimates:
# +
alpha_census = compute_rel_diff(census)
plot(alpha_census)
alpha_un = compute_rel_diff(un)
plot(alpha_un)
decorate(xlabel='Year', label='Net growth rate')
# -
# Other than a bump around 1990, net growth rate has been declining roughly linearly since 1965. As an exercise, you can use this data to make a projection of world population until 2100.
#
# 1. Define a function, `alpha_func`, that takes `t` as a parameter and returns an estimate of the net growth rate at time `t`, based on a linear function `alpha = intercept + slope * t`. Choose values of `slope` and `intercept` to fit the observed net growth rates since 1965.
#
# 2. Call your function with a range of `ts` from 1960 to 2020 and plot the results.
#
# 3. Create a `System` object that includes `alpha_func` as a system variable.
#
# 4. Define an update function that uses `alpha_func` to compute the net growth rate at the given time `t`.
#
# 5. Test your update function with `t_0 = 1960` and `p_0 = census[t_0]`.
#
# 6. Run a simulation from 1960 to 2100 with your update function, and plot the results.
#
# 7. Compare your projections with those from the US Census and UN.
# +
# Solution
def alpha_func(t):
intercept = 0.02
slope = -0.00021
return intercept + slope * (t - 1970)
# +
# Solution
ts = linrange(1960, 2020)
alpha_model = TimeSeries(alpha_func(ts), ts)
plot(alpha_model, color='gray', label='model')
plot(alpha_census)
plot(alpha_un)
decorate(xlabel='Year', label='Net growth rate')
# +
# Solution
t_0 = 1960
t_end = 2100
p_0 = census[t_0]
# +
# Solution
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
alpha_func=alpha_func)
# +
# Solution
def update_func_alpha(pop, t, system):
"""Update population based on a quadratic model.
pop: current population in billions
t: what year it is
system: system object with model parameters
"""
net_growth = system.alpha_func(t) * pop
return pop + net_growth
# +
# Solution
update_func_alpha(p_0, t_0, system)
# +
# Solution
results = run_simulation(system, update_func_alpha);
# +
# Solution
plot_results(census, un, results, 'World population projections')
plot_projections(table3)
# -
|
code/soln/chap08soln.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Warning: API calls
import json
from binance.client import Client
client = Client("", "")
symbol = "NEOUSDT"
start = "1 Jan, 2018"
end = "1 Jan, 2019"
interval = Client.KLINE_INTERVAL_1DAY
klines = client.get_historical_klines(symbol, interval, start, end)
# +
#filename_json = "Binance_{}_{}_{}-{}.json".format(symbol, interval, start, end)
filename_json = "NEOUSDT_2018_1DAY_Binance.json"
with open(filename_json,'w') as f:
f.write(json.dumps(klines))
# +
# 0) Open time
# 1) Open
# 2) High
# 3) Low
# 4) Close
# 5) Volume
# 6) Close time
# 7) Quote asset volume
# 8) Number of trades
# 9) Taker buy base asset volume
# 10) Taker buy quote asset volume
# 11) Ignore
# +
import pandas as pd
df = pd.read_json(filename_json)
df.drop([6, 7, 9, 10, 11], inplace=True, axis=1) #drop inconsiderable data
df.columns = ["open time", "open", "high", "low", "close", "volume", "number of trades"]
df["volume"] = df["volume"].apply(lambda x : round(x, 1))
#converting milliseconds to datetime
df["open time"] = df["open time"].apply(lambda x: pd.to_datetime(x, unit='ms').to_pydatetime())
df.head()
# +
# .csv if needed
filename_csv = filename_json[:-4] + "csv"
df.to_csv(filename_csv, sep='\t', encoding='utf-8', index=False)
data = pd.read_csv(filename_csv, sep='\t')
#print((df.shape, (31+28)*24))
data.head()
|
data/collector.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plagiarism Detector
# In this notebook, I examine text files to perform data classification. Each file is labeled as either plagiarized or not.
# The notebook was created to be used in **AWS Sagemaker** environment.
# ## Download data and save locally
#
# Source for database:
#
# <NAME>. and <NAME>. Developing A Corpus of Plagiarised Short Answers, Language Resources and Evaluation: Special Issue on Plagiarism and Authorship Analysis, In Press. [Download]
# !wget https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c4147f9_data/data.zip
# !unzip data
# import libraries
import pandas as pd
import numpy as np
import os
# This plagiarism dataset is made of multiple text files; each of these files has characteristics that are is summarized in a .csv file named file_information.csv, which we can read in using pandas.
# +
csv_file = 'data/file_information.csv'
plagiarism_df = pd.read_csv(csv_file)
# print out the first few rows of data info
plagiarism_df.head(10)
# -
# (Text extracted from original source)
# ### Five task types, A-E
# Each text file contains an answer to one short question; these questions are labeled as tasks A-E.
#
# - Each task, A-E, is about a topic that might be included in the Computer Science curriculum that was created by the authors of this dataset.
# - For example, Task A asks the question: "What is inheritance in object oriented programming?"
#
# Four categories of plagiarism
# Each text file has an associated plagiarism label/category:
#
# - `cut`: An answer is plagiarized; it is copy-pasted directly from the relevant Wikipedia source text.
# - `light`: An answer is plagiarized; it is based on the Wikipedia source text and includes some copying and paraphrasing.
# - `heavy`: An answer is plagiarized; it is based on the Wikipedia source text but expressed using different words and structure. Since this doesn't copy directly from a source text, this will likely be the most challenging kind of plagiarism to detect.
# - `non`: An answer is not plagiarized; the Wikipedia source text is not used to create this answer.
# - `orig`: This is a specific category for the original, Wikipedia source text. Files for comparison purposes only.
# ### Data visualization and analysis
# print out some stats about the data
print('Number of files: ', plagiarism_df.shape[0]) # .shape[0] gives the rows
# .unique() gives unique items in a specified column
print('Number of unique tasks/question types (A-E): ', (len(plagiarism_df['Task'].unique())))
print('Unique plagiarism categories: ', (plagiarism_df['Category'].unique()))
# +
# Show counts by different tasks and amounts of plagiarism
# group and count by task
counts_per_task = plagiarism_df.groupby(['Task']).size().reset_index(name="Counts")
print("\nTask:")
display(counts_per_task)
# group by plagiarism level
counts_per_category = plagiarism_df.groupby(['Category']).size().reset_index(name="Counts")
print("\nPlagiarism Levels:")
display(counts_per_category)
# group by task AND plagiarism level
counts_task_and_plagiarism = plagiarism_df.groupby(['Task', 'Category']).size().reset_index(name="Counts")
print("\nTask & Plagiarism Level Combos :")
display(counts_task_and_plagiarism)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# counts
group = ['Task', 'Category']
counts = plagiarism_df.groupby(group).size().reset_index(name="Counts")
plt.figure(figsize=(8,5))
plt.bar(range(len(counts)), counts['Counts'], color = 'blue')
# -
# ## Feature Engineering
# Tasks:
# - Clean and pre-process the data.
# - Define features for comparing the similarity of an answer text and a source text, and extract similarity features.
# - Select "good" features, by analyzing the correlations between different features.
# - Create train/test .csv files that hold the relevant features and class labels for train/test data points
# import extra library
from sklearn.feature_extraction.text import CountVectorizer
# ### Convert categorical to numerical data
#
# Two columns will be created to provide a numerical value for each of the samples.
# They are:
# - `Category`: labels to numerical labels according to the following rules (a higher value indicates a higher degree of plagiarism):
# * 0 = non;
# * 1 = heavy;
# * 2 = light;
# * 3 = cut;
# * -1 = orig, this is a special value that indicates an original file.
# - `Class`: Any answer text that is not plagiarized (non) should have the class label 0. Any plagiarized answer texts should have the class label 1.
# And any orig texts will have a special label -1.
# Read in a csv file and return a transformed dataframe
def numerical_dataframe(csv_file='data/file_information.csv'):
'''Reads in a csv file which is assumed to have `File`, `Category` and `Task` columns.
This function does two things:
1) converts `Category` column values to numerical values
2) Adds a new, numerical `Class` label column.
The `Class` column will label plagiarized answers as 1 and non-plagiarized as 0.
Source texts have a special label, -1.
:param csv_file: The directory for the file_information.csv file
:return: A dataframe with numerical categories and a new `Class` label column'''
# your code here
df = pd.read_csv(csv_file)
category_conversion = {
'non': 0,
'heavy': 1,
'light': 2,
'cut': 3,
'orig': -1
}
df["Category"] = df["Category"].apply(lambda x: category_conversion[x])
df["Class"] = df["Category"].apply(lambda x: 1 if x > 0 else (-1 if x < 0 else 0))
return df
# +
# informal testing, print out the results of a called function
# create new `transformed_df`
transformed_df = numerical_dataframe(csv_file ='data/file_information.csv')
# check work
# check that all categories of plagiarism have a class label = 1
transformed_df.head()
# -
# ### Text processing and data spliting
#
# Two new columns will be created:
# * A Text column; this holds all the lowercase text for a File, with extraneous punctuation removed.
# * A Datatype column; this is a string value train, test, or orig that labels a data point as part of our train or test set
import re
# helper function for pre-processing text given a file
def process_file(file):
# put text in all lower case letters
all_text = file.read().lower()
# remove all non-alphanumeric chars
all_text = re.sub(r"[^a-zA-Z0-9]", " ", all_text)
# remove newlines/tabs, etc. so it's easier to match phrases, later
all_text = re.sub(r"\t", " ", all_text)
all_text = re.sub(r"\n", " ", all_text)
all_text = re.sub(" ", " ", all_text)
all_text = re.sub(" ", " ", all_text)
return all_text
# +
# create a text column
def create_text_column(df, file_directory='data/'):
'''Reads in the files, listed in a df and returns that df with an additional column, `Text`.
:param df: A dataframe of file information including a column for `File`
:param file_directory: the main directory where files are stored
:return: A dataframe with processed text '''
# create copy to modify
text_df = df.copy()
# store processed text
text = []
# for each file (row) in the df, read in the file
for row_i in df.index:
filename = df.iloc[row_i]['File']
#print(filename)
file_path = file_directory + filename
with open(file_path, 'r', encoding='utf-8', errors='ignore') as file:
# standardize text using helper function
file_text = process_file(file)
# append processed text to list
text.append(file_text)
# add column to the copied dataframe
text_df['Text'] = text
return text_df
text_df = create_text_column(transformed_df)
text_df.head()
# -
# Use function to label datatype for training 1 or test 2
def create_datatype(df, train_value, test_value, datatype_var, compare_dfcolumn, operator_of_compare, value_of_compare,
sampling_number, sampling_seed):
# Subsets dataframe by condition relating to statement built from:
# 'compare_dfcolumn' 'operator_of_compare' 'value_of_compare'
df_subset = df[operator_of_compare(df[compare_dfcolumn], value_of_compare)]
df_subset = df_subset.drop(columns = [datatype_var])
# Prints counts by task and compare_dfcolumn for subset df
#print("\nCounts by Task & " + compare_dfcolumn + ":\n", df_subset.groupby(['Task', compare_dfcolumn]).size().reset_index(name="Counts") )
# Sets all datatype to value for training for df_subset
df_subset.loc[:, datatype_var] = train_value
# Performs stratified random sample of subset dataframe to create new df with subset values
df_sampled = df_subset.groupby(['Task', compare_dfcolumn], group_keys=False).apply(lambda x: x.sample(min(len(x), sampling_number), random_state = sampling_seed))
df_sampled = df_sampled.drop(columns = [datatype_var])
# Sets all datatype to value for test_value for df_sampled
df_sampled.loc[:, datatype_var] = test_value
# Prints counts by compare_dfcolumn for selected sample
#print("\nCounts by "+ compare_dfcolumn + ":\n", df_sampled.groupby([compare_dfcolumn]).size().reset_index(name="Counts") )
#print("\nSampled DF:\n",df_sampled)
# Labels all datatype_var column as train_value which will be overwritten to
# test_value in next for loop for all test cases chosen with stratified sample
for index in df_sampled.index:
# Labels all datatype_var columns with test_value for straified test sample
df_subset.loc[index, datatype_var] = test_value
#print("\nSubset DF:\n",df_subset)
# Adds test_value and train_value for all relevant data in main dataframe
for index in df_subset.index:
# Labels all datatype_var columns in df with train_value/test_value based upon
# stratified test sample and subset of df
df.loc[index, datatype_var] = df_subset.loc[index, datatype_var]
# returns nothing because dataframe df already altered
# +
import operator
# create new df with Datatype (train, test, orig) column
def train_test_dataframe(clean_df, random_seed=100):
new_df = clean_df.copy()
# Initialize datatype as 0 initially for all records - after function 0 will remain only for original wiki answers
new_df.loc[:,'Datatype'] = 0
# Creates test & training datatypes for plagiarized answers (1,2,3)
create_datatype(new_df, 1, 2, 'Datatype', 'Category', operator.gt, 0, 1, random_seed)
# Creates test & training datatypes for NON-plagiarized answers (0)
create_datatype(new_df, 1, 2, 'Datatype', 'Category', operator.eq, 0, 2, random_seed)
# creating a dictionary of categorical:numerical mappings for plagiarsm categories
mapping = {0:'orig', 1:'train', 2:'test'}
# traversing through dataframe and replacing categorical data
new_df.Datatype = [mapping[item] for item in new_df.Datatype]
return new_df
random_seed = 1 # can change; set for reproducibility
# pass in `text_df` from above to create a complete dataframe, with all the information you need
complete_df = train_test_dataframe(text_df, random_seed=random_seed)
# check results
complete_df.head(20)
# -
# ### Similarity features
# One of the ways we might go about detecting plagiarism, is by computing similarity features that measure how similar a given answer text is as compared to the original wikipedia source text (for a specific task, a-e). The similarity features are informed by [this paper on plagiarism detection](https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c412841_developing-a-corpus-of-plagiarised-short-answers/developing-a-corpus-of-plagiarised-short-answers.pdf).
# In this paper, researchers created features called __containment__ and __longest common subsequence__.
# #### Containment calculation
#
# The general steps to complete this function are as follows:
#
# 1. From all of the text files in a given df, create an array of n-gram counts; it is suggested that you use a CountVectorizer for this purpose.
# 2. Get the processed answer and source texts for the given answer_filename.
# 3. Calculate the containment between an answer and source text according to the following equation.
#
# $$ \frac{\sum{count(\text{ngram}_{A}) \cap count(\text{ngram}_{S})}}{\sum{count(\text{ngram}_{A})}} $$
#
# 4. Return that containment value.
#
# Calculate the ngram containment for one answer file/source file pair in a df
def calculate_containment(df, n, answer_filename):
'''Calculates the containment between a given answer text and its associated source text.
This function creates a count of ngrams (of a size, n) for each text file in our data.
Then calculates the containment by finding the ngram count for a given answer text,
and its associated source text, and calculating the normalized intersection of those counts.
:param df: A dataframe with columns,
'File', 'Task', 'Category', 'Class', 'Text', and 'Datatype'
:param n: An integer that defines the ngram size
:param answer_filename: A filename for an answer text in the df, ex. 'g0pB_taskd.txt'
:return: A single containment value that represents the similarity
between an answer text and its source text.
'''
source_filename = 'orig_' + answer_filename.split('_')[1]
answer_text = df[df['File'] == answer_filename].iloc[0]['Text']
source_text = df[df['File'] == source_filename].iloc[0]['Text']
cv = CountVectorizer(ngram_range=(n,n))
matrix = cv.fit_transform([answer_text, source_text]).toarray()
intersection = np.min(matrix, 0)
return sum(intersection)/sum(matrix[0])
# #### Test cells
# +
# select a value for n
n = 3
# indices for first few files
test_indices = range(5)
# iterate through files and calculate containment
category_vals = []
containment_vals = []
for i in test_indices:
# get level of plagiarism for a given file index
category_vals.append(complete_df.loc[i, 'Category'])
# calculate containment for given file and n
filename = complete_df.loc[i, 'File']
c = calculate_containment(complete_df, n, filename)
containment_vals.append(c)
# print out result, does it make sense?
print('Original category values: \n', category_vals)
print()
print(str(n)+'-gram containment values: \n', containment_vals)
# -
# #### Longest Common Subsequence
#
# It may be helpful to think of this in a concrete example. A Longest Common Subsequence (LCS) problem may look as follows:
#
# * Given two texts: text A (answer text) of length n, and string S (original source text) of length m. Our goal is to produce their longest common subsequence of words: the longest sequence of words that appear left-to-right in both texts (though the words don't have to be in continuous order).
# * Consider:
#
# * A = "i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents"
# * S = "pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents"
# - In this case, we can see that the start of each sentence of fairly similar, having overlap in the sequence of words, "pagerank is a link analysis algorithm used by" before diverging slightly. Then we continue moving left -to-right along both texts until we see the next common sequence; in this case it is only one word, "google". Next we find "that" and "a" and finally the same ending "to each element of a hyperlinked set of documents".
# Compute the normalized LCS given an answer text and a source text
def lcs_norm_word(answer_text, source_text):
'''Computes the longest common subsequence of words in two texts; returns a normalized value.
:param answer_text: The pre-processed text for an answer text
:param source_text: The pre-processed text for an answer's associated source text
:return: A normalized LCS value'''
answer_words = [''] + answer_text.split()
source_words = [''] + source_text.split()
# Prepare matrix for Dynamic Programmaing
matrix = np.zeros((len(answer_words), len(source_words)))
for i in range(1, len(answer_words)):
for j in range(1, len(source_words)):
matrix[i][j] = (matrix[i-1][j-1] + 1) if (source_words[j] == answer_words[i]) else max(matrix[i-1][j], matrix[i][j-1])
return matrix[-1,-1] / (len(answer_words) - 1)
# #### Test cells
# +
# Run the test scenario from above
# does your function return the expected value?
A = "i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents"
S = "pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents"
# calculate LCS
lcs = lcs_norm_word(A, S)
print('LCS = ', lcs)
# expected value test
assert lcs==20/27., "Incorrect LCS value, expected about 0.7408, got "+str(lcs)
print('Test passed!')
# +
# test on your own
test_indices = range(5) # look at first few files
category_vals = []
lcs_norm_vals = []
# iterate through first few docs and calculate LCS
for i in test_indices:
category_vals.append(complete_df.loc[i, 'Category'])
# get texts to compare
answer_text = complete_df.loc[i, 'Text']
task = complete_df.loc[i, 'Task']
# we know that source texts have Class = -1
orig_rows = complete_df[(complete_df['Class'] == -1)]
orig_row = orig_rows[(orig_rows['Task'] == task)]
source_text = orig_row['Text'].values[0]
# calculate lcs
lcs_val = lcs_norm_word(answer_text, source_text)
lcs_norm_vals.append(lcs_val)
# print out result, does it make sense?
print('Original category values: \n', category_vals)
print()
print('Normalized LCS values: \n', lcs_norm_vals)
# -
# ## Create all features
# ### Multiple containment features
# This function returns a list of containment features, calculated for a given n and for all files in a df (assumed to the the complete_df).
# Function returns a list of containment features, calculated for a given n
# Should return a list of length 100 for all files in a complete_df
def create_containment_features(df, n, column_name=None):
containment_values = []
if(column_name==None):
column_name = 'c_'+str(n) # c_1, c_2, .. c_n
# iterates through dataframe rows
for i in df.index:
file = df.loc[i, 'File']
# Computes features using calculate_containment function
if df.loc[i,'Category'] > -1:
c = calculate_containment(df, n, file)
containment_values.append(c)
# Sets value to -1 for original tasks
else:
containment_values.append(-1)
print(str(n)+'-gram containment features created!')
return containment_values
# ### LCS features
#
# Function creates lcs feature and add it to the dataframe
def create_lcs_features(df, column_name='lcs_word'):
lcs_values = []
# iterate through files in dataframe
for i in df.index:
# Computes LCS_norm words feature using function above for answer tasks
if df.loc[i,'Category'] > -1:
# get texts to compare
answer_text = df.loc[i, 'Text']
task = df.loc[i, 'Task']
# we know that source texts have Class = -1
orig_rows = df[(df['Class'] == -1)]
orig_row = orig_rows[(orig_rows['Task'] == task)]
source_text = orig_row['Text'].values[0]
# calculate lcs
lcs = lcs_norm_word(answer_text, source_text)
lcs_values.append(lcs)
# Sets to -1 for original tasks
else:
lcs_values.append(-1)
print('LCS features created!')
return lcs_values
# In the below cell I define an n-gram range; these will be the n's I use to create n-gram containment features.
# +
# Define an ngram range
ngram_range = range(1,15)
features_list = []
# Create features in a features_df
all_features = np.zeros((len(ngram_range)+1, len(complete_df)))
# Calculate features for containment for ngrams in range
i=0
for n in ngram_range:
column_name = 'c_'+str(n)
features_list.append(column_name)
# create containment features
all_features[i]=np.squeeze(create_containment_features(complete_df, n))
i+=1
# Calculate features for LCS_Norm Words
features_list.append('lcs_word')
all_features[i]= np.squeeze(create_lcs_features(complete_df))
# create a features dataframe
features_df = pd.DataFrame(np.transpose(all_features), columns=features_list)
# Print all features/columns
print()
print('Features: ', features_list)
print()
# -
# print some results
features_df
# ## Correlated features
# Some features are too highly-correlated. We have to extract only some features that present a lower correlation to avoid overfitting.
# +
# Create correlation matrix for just Features to determine different models to test
corr_matrix = features_df.corr().abs().round(2)
# display shows all of a dataframe
display(corr_matrix)
# -
# The function below takes in dataframes and a list of selected features (column names) and returns (train_x, train_y), (test_x, test_y)
def train_test_data(complete_df, features_df, selected_features):
'''Gets selected training and test features from given dataframes, and
returns tuples for training and test features and their corresponding class labels.
:param complete_df: A dataframe with all of our processed text data, datatypes, and labels
:param features_df: A dataframe of all computed, similarity features
:param selected_features: An array of selected features that correspond to certain columns in `features_df`
:return: training and test features and labels: (train_x, train_y), (test_x, test_y)'''
# get the training features
train_x = features_df[complete_df['Datatype'] == 'train'][selected_features].to_numpy()
# And training class labels (0 or 1)
train_y = complete_df[complete_df['Datatype'] == 'train']['Category'].to_numpy()
# get the test features and labels
test_x = features_df[complete_df['Datatype'] == 'test'][selected_features].to_numpy()
test_y = complete_df[complete_df['Datatype'] == 'test']['Category'].to_numpy()
return (train_x, train_y), (test_x, test_y)
# ## Select features
# Select two of the features that are not that correlated
# +
# Select your list of features, this should be column names from features_df
# ex. ['c_1', 'lcs_word']
selected_features = ['c_1', 'c_5']
(train_x, train_y), (test_x, test_y) = train_test_data(complete_df, features_df, selected_features)
# check that division of samples seems correct
# these should add up to 95 (100 - 5 original files)
print('Training size: ', len(train_x))
print('Test size: ', len(test_x))
print()
print('Training df sample: \n', train_x[:10])
# -
# ## Creating final data files
#
# In this project, SageMaker will expect the following format for train/test data:
#
# - Training and test data should be saved in one .csv file each, ex train.csv and test.csv
# - These files should have class labels in the first column and features in the rest of the columns
# #### Creating csv files
def make_csv(x, y, filename, data_dir):
'''Merges features and labels and converts them into one csv file with labels in the first column.
:param x: Data features
:param y: Data labels
:param file_name: Name of csv file, ex. 'train.csv'
:param data_dir: The directory where files will be saved
'''
# make data dir, if it does not exist
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# your code here
pd.concat([pd.DataFrame(y), pd.DataFrame(x)], axis=1).dropna().to_csv(data_dir+'/'+filename, index=False, header=False)
# nothing is returned, but a print statement indicates that the function has run
print('Path created: '+str(data_dir)+'/'+str(filename))
# #### Test
# +
fake_x = [ [0.39814815, 0.0001, 0.19178082],
[0.86936937, 0.44954128, 0.84649123],
[0.44086022, 0., 0.22395833] ]
fake_y = [0, 1, 1]
make_csv(fake_x, fake_y, filename='to_delete.csv', data_dir='test_csv')
# read in and test dimensions
fake_df = pd.read_csv('test_csv/to_delete.csv', header=None)
# check shape
assert fake_df.shape==(3, 4), \
'The file should have as many rows as data_points and as many columns as features+1 (for indices).'
# check that first column = labels
assert np.all(fake_df.iloc[:,0].values==fake_y), 'First column is not equal to the labels, fake_y.'
print('Tests passed!')
# -
# delete the test csv file, generated above
# ! rm -rf test_csv
# +
# create train.csv and test.csv files in a directory
# to be specified when uploading data to S3
data_dir = 'plagiarism_data'
make_csv(train_x, train_y, filename='train.csv', data_dir=data_dir)
make_csv(test_x, test_y, filename='test.csv', data_dir=data_dir)
# -
# # Training a model
# import libraries
import boto3
import sagemaker
# ## Load data to S3
# +
# session and role
session = boto3.Session()
sagemaker_session = sagemaker.Session(session)
try:
role = os.environ['AWS_INSTANCE_ROLE']
except:
role = sagemaker.get_execution_role()
# create an S3 bucket
try:
bucket = os.environ['AWS_PROJECT_BUCKET']
except:
bucket = sagemaker_session.default_bucket()
# name of directory created to save features data
data_dir = 'plagiarism_data'
# set prefix, a descriptive name for a directory
prefix = 'plagiarism_project'
# folder in S3 to save data to
output_path = f's3://{bucket}/{prefix}/output'
code_path = f's3://{bucket}/{prefix}/code'
# -
# upload all data to S3
s3_path = sagemaker_session.upload_data(key_prefix=prefix, bucket=bucket, path=data_dir)
s3_path
# #### Test cell
# +
# confirm that data is in S3 bucket
empty_check = []
for obj in boto3.resource('s3').Bucket(bucket).objects.all():
empty_check.append(obj.key)
print(obj.key)
assert len(empty_check) !=0, 'S3 bucket is empty.'
print('Test passed!')
# -
# ### Modeling
# Here I'm going to use SKLearn from Sagemaker module in order to define an estimator. The `train.py` script is used as source for the training routine.
from sagemaker.sklearn.estimator import SKLearn
# your import and estimator code, here
estimator = SKLearn(entry_point='train.py',
source_dir='source_sklearn',
output_path=output_path,
code_path=code_path,
role=role,
instance_count=1,
instance_type='ml.c4.xlarge',
framework_version='0.23-1',
sagemaker_session=sagemaker_session,
hyperparameters= {
'neighbors': 10
})
# ### Train
#
# +
# %%time
# Train your estimator on S3 training data
estimator.fit({'train': s3_path})
# -
# ### Deploy endpoint
# +
# %%time
# deploy your model to create a predictor
predictor = estimator.deploy(instance_type='ml.t2.medium', initial_instance_count=1)
# -
# ### Evaluate model
#
# +
import os
# read in test data, assuming it is stored locally
test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None)
# labels are in the first column
test_y = test_data.iloc[:,0]
test_x = test_data.iloc[:,1:]
# +
# First: generate predicted, class labels
test_y_preds = predictor.predict(test_x)
# test that model generates the correct number of labels
assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.'
print('Test passed!')
# +
# Second: calculate the test accuracy
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(test_y, test_y_preds)
print(accuracy)
## print out the array of predicted and true labels, if you want
print('\nPredicted class labels: ')
print(test_y_preds)
print('\nTrue class labels: ')
print(test_y.values)
# +
prediction_matrix = pd.DataFrame(np.zeros((4,4), int))
for real, pred in zip(test_y, test_y_preds):
prediction_matrix.iloc[real, pred] += 1
# Row: true label,
# Column: predicted label
prediction_matrix
# -
# ## Clean up resources
predictor.delete_endpoint()
bucket_to_delete = boto3.resource('s3').Bucket(bucket)
bucket_to_delete.objects.all().delete()
|
PlagiarismDetection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
dr = 'D:/Projects/GitHub/TSAI_EMLO1.0/Session09_AWSSagemakerAndLargeScaleModelTraining'
os.environ['SM_OUTPUT_DATA_DIR'] = os.path.join(dr,'output')
os.environ['SM_MODEL_DIR'] = os.path.join(dr,'model')
os.environ['SM_CHANNEL'] = os.path.join(dr,'data')
# -
# %cd D:/Projects/GitHub/TSAI_EMLO1.0/Session09_AWSSagemakerAndLargeScaleModelTraining
# %run -i train
|
Session09_AWSSagemakerAndLargeScaleModelTraining/.ipynb_checkpoints/CIFAR100ResNet34ClassifierOnSpotInstance-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # 循环神经网络语言模型
# ## 自然语言编码
# 神经网络无法直接处理汉字,需要将汉字编号。下面这段代码就是利用Python的字典,对一句话中的每个字进行编号。
#
def encode_sentence(s,chars):
sid=[0]
for c in s:
if not c in chars:
chars[c]=len(chars)
sid.append(chars[c])
sid.append(1)
return sid
chars={'<BOS>':0,'<EOS>':1,'<UNK>':2}
sen="巴黎是法国的首都及最大都市,同时是法兰西岛大区首府,为法国的政治与文化中心,隶属法兰西岛大区之下的巴黎省"
encode_sentence(sen,chars)
# ## 读取数据
# 读取的同时将汉字处理成上述的编号,同时要记录汉字和编号的对应表
# +
import os
import json
import pickle
def prepare_data(dir_):
chars={'<BOS>':0,'<EOS>':1,'<UNK>':2}
sentences=[]
sids=[]
files=os.listdir(dir_)
for file_ in files:
al=os.path.join(dir_,file_)
print al
with open(al,'r') as f:
lines=f.readlines()
for line in lines:
data=json.loads(line)
text=data['text']
sen=text.split('\n')
for s in sen:
if len(s.strip())>0:
sentences.append(s)
sid=encode_sentence(s,chars)
sids.append(sid)
n_char=len(chars)
print 'vocabulary_size=%d data_size=%d'%(n_char,len(sids))
pickle.dump(chars,open('chars.pkl','wb'))
return sentences,sids,chars
# -
sentences,sids,chars=prepare_data("corpus")
# ## 训练神经网络
# 首先设置一些超参数
class Args(object):
max_length=256
n_emb=80
vocab_size=12000
n_hidden=512
batch_size=16
# ## 开始训练流程
# +
from lstm import LSTMLM
import numpy as np
import copy
def train(sids):
args=Args()
lstm=LSTMLM(args)
lstm.build_model()
for i in range(40000):
batch_sen=np.random.choice(sids,size=args.batch_size)
batch_sen=[copy.copy(s) for s in batch_sen]
loss=lstm.train(batch_sen)
if i%10==0:
print 'step=%d, loss=%.3f'%(i,loss)
if i%1000==0 and i!=0:
lstm.save_model('model')
train(sids)
# -
# ## 测试和使用
# 语言模型可以判断任意字符串是自然语言的概率,有非常多的用处。
# ### 判断几句话中哪句更通顺
# +
from lstm import LSTMLM
import numpy as np
def get_prob(sen):
sen=sen.decode('utf-8')
args=Args()
lstm=LSTMLM(args)
lstm.build_model()
lstm.load_model('model')
chars=pickle.load(open('chars.pkl','rb'))
prob=0.
segments=[c for c in sen]
segments.insert(0,'<BOS>')
segments.append('<EOS>')
sid=[(chars[c] if c in chars else 2)for c in segments]
eprob=0
for i in range(1,len(sid)):
dist=lstm.next_char([sid[:i]])[0]
eprob+=np.log(sid[i])
epp=-eprob/i
print sen[:i].encode('utf-8'),epp,eprob
return epp
# -
sen1="分哈啊词腌可"
print get_prob(sen1)
sen2="数学是一门历史悠久的学科。"
print get_prob(sen2)
# ### 将句子补齐
# +
from lstm import LSTMLM
import numpy as np
import copy
def maximum_generate(prefix):
prefix=prefix.decode('utf-8')
args=Args()
lstm=LSTMLM(args)
lstm.build_model()
lstm.load_model('model')
chars=pickle.load(open('chars.pkl','rb'))
rchars={chars[c]:c for c in chars}
segments=[c for c in prefix]
segments.insert(0,'<BOS>')
sid=[(chars[c] if c in chars else 2)for c in segments]
str_=prefix
while sid[-1]!=1 and len(sid)<64:
dist=lstm.next_char([copy.copy(sid)])[0]
nxt=np.random.choice(range(args.vocab_size),p=dist)
sid.append(nxt)
c2=rchars[nxt]
str_+=c2
print str_.encode('utf-8')
return str_
# -
print maximum_generate("数学").encode('utf-8')
|
rnnlm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gaussian Process Modeling of Light Curves
#
# In this notebook we exemplify the modeling of the light curves using a Gaussian process (GP).
#
# #### Index<a name="index"></a>
# 1. [Import Packages](#imports)
# 2. [Load the Original Dataset](#loadData)
# 3. [Fit Gaussian Processes](#gps)
# 1. [Set Path to Save GP Files](#saveGps)
# 2. [Compute GP Fits](#makeGps)
# 4. [Light Curve Visualization](#see)
#
# ## 1. Import Packages<a name="imports"></a>
import collections
import os
import pickle
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from snmachine import gps, sndata
from utils.plasticc_pipeline import create_folder_structure, get_directories, load_dataset
# %config Completer.use_jedi = False # enable autocomplete
# #### Aestetic settings
# +
# %matplotlib inline
sns.set(font_scale=1.3, style="ticks")
# -
# ## 2. Load Dataset<a name="loadData"></a>
#
# First, **write** the path to the folder that contains the dataset we want to use, `folder_path`.
folder_path = '../snmachine/example_data'
# Then, **write** in `data_file_name` the name of the file where your dataset is saved.
#
# In this notebook we use the dataset saved in [2_preprocess_data](2_preprocess_data.ipynb).
data_file_name = 'example_dataset_gapless50.pckl'
# Load the dataset.
data_path = os.path.join(folder_path, data_file_name)
dataset = load_dataset(data_path)
# ## 3. Fit Gaussian Processes<a name="gps"></a>
#
# ### 3.1. Set Path to Save GP Files<a name="saveGps"></a>
#
# We can now generate a folder structure to neatly save the files. Otherwise, you can directly write the path to the folder to save the GP files in `saved_gps_path`.
#
# **<font color=Orange>A)</font>** Generate the folder structure.
#
# **Write** the name of the folder you want in `analysis_name`.
analysis_name = data_file_name[:-5]
analysis_name
# Create the folder structure.
create_folder_structure(folder_path, analysis_name)
# See the folder structure.
directories = get_directories(folder_path, analysis_name)
directories
# Set the path to the folder to save the GP files.
path_saved_gps = directories['intermediate_files_directory']
# **<font color=Orange>B)</font>** Directly choose where to save the GP files.
#
# **Write** the path to the folder to save the GP files in `saved_gps_path`.
# ```python
# saved_gps_path = os.path.join(folder_path, data_file_name[:-5])
# ```
# ### 3.2. Compute GP Fits<a name="makeGps"></a>
#
# **Choose**:
# - `t_min`: minimim time to evaluate the Gaussian Process Regression at.
# - `t_max`: maximum time to evaluate the Gaussian Process Regression at.
# - `gp_dim`: dimension of the Gaussian Process Regression. If `gp_dim` is 1, the filters are fitted independently. If `gp_dim` is 2, the Matern kernel is used to fit light curves both in time and wavelength.
# - `number_gp`: number of points to evaluate the Gaussian Process Regression at.
# - `number_processes`: number of processors to use for parallelisation (**<font color=green>optional</font>**).
# +
t_min = 0
t_max = 278
gp_dim = 2
number_gp = 276
number_processes = 1
# -
gps.compute_gps(dataset, number_gp=number_gp, t_min=t_min, t_max=t_max,
gp_dim=gp_dim, output_root=path_saved_gps,
number_processes=number_processes)
# [Go back to top.](#index)
#
# ## 4. Light Curve Visualization<a name="see"></a>
#
# Here we show the light curve of an event and the Gaussian process used to fit it.
obj_show = '7033'
sndata.PlasticcData.plot_obj_and_model(dataset.data[obj_show],
dataset.models[obj_show])
# [Go back to top.](#index)
#
# *Previous notebook:* [2_preprocess_data](2_preprocess_data.ipynb)
#
# **Next notebook:** [4_augment_data](4_augment_data.ipynb)
|
examples/3_model_lightcurves.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install opencv-contrib-python
import os
files = []
for i in os.listdir('C:/Users/rashi.budati/Desktop/images_faces/subset'):
if i.endswith('.png'):
files.append(i)
files[0]
imagePath = 'C:/Users/rashi.budati/Desktop/images_faces/subset'
for i,val in enumerate(files):
name = str(val)
final_path = os.path.join(imagePath + "/" + name)
print(final_path)
# # for i,val in enumerate(files):
# # print(str(val))
# image = cv2.imread(final_path)
# print(image)
# +
import cv2
import sys
from pathlib import Path
import glob
imagePath = 'C:/Users/rashi.budati/Desktop/images_faces/subset'
result_path = 'C:/Users/rashi.budati/Desktop/result_face_detector'
dsize = (224,224)
folderLen = len(imagePath)
for img in glob.glob(imagePath + "/*.png"):
image = cv2.imread(img)
# output = cv2.resize(image,dsize, interpolation = cv2.INTER_AREA)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(60, 60),flags=cv2.CASCADE_SCALE_IMAGE
)
print("[INFO] Found {0} Faces!".format(len(faces)))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi_color = image[y:y + h, x:x + w]
print("[INFO] Object found. Saving locally.")
cv2.imwrite('C:/Users/rashi.budati/Desktop/result_face_detector/extracted_faces' + str(w) + str(h) + '_faces.jpg', roi_color)
status = cv2.imwrite(result_path+img[folderLen:], image)
print("[INFO] Image faces_detected.jpg written to filesystem: ", status)
# -
|
face_detector.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# First, let's load the JSON file which describes the human pose task. This is in COCO format, it is the category descriptor pulled from the annotations file. We modify the COCO category slightly, to add a neck keypoint. We will use this task description JSON to create a topology tensor, which is an intermediate data structure that describes the part linkages, as well as which channels in the part affinity field each linkage corresponds to.
# +
import json
import trt_pose.coco
with open('human_pose.json', 'r') as f:
human_pose = json.load(f)
topology = trt_pose.coco.coco_category_to_topology(human_pose)
# -
# Next, we'll load our model. Each model takes at least two parameters, *cmap_channels* and *paf_channels* corresponding to the number of heatmap channels
# and part affinity field channels. The number of part affinity field channels is 2x the number of links, because each link has a channel corresponding to the
# x and y direction of the vector field for each link.
# +
import trt_pose.models
num_parts = len(human_pose['keypoints'])
num_links = len(human_pose['skeleton'])
model = trt_pose.models.resnet18_baseline_att(num_parts, 2 * num_links).cuda().eval()
# -
# Next, let's load the model weights. You will need to download these according to the table in the README.
# +
import torch
MODEL_WEIGHTS = 'resnet18_baseline_att_224x224_A_epoch_249.pth'
model.load_state_dict(torch.load(MODEL_WEIGHTS))
# -
# In order to optimize with TensorRT using the python library *torch2trt* we'll also need to create some example data. The dimensions
# of this data should match the dimensions that the network was trained with. Since we're using the resnet18 variant that was trained on
# an input resolution of 224x224, we set the width and height to these dimensions.
# +
WIDTH = 224
HEIGHT = 224
data = torch.zeros((1, 3, HEIGHT, WIDTH)).cuda()
# -
# Next, we'll use [torch2trt](https://github.com/NVIDIA-AI-IOT/torch2trt) to optimize the model. We'll enable fp16_mode to allow optimizations to use reduced half precision.
# +
import torch2trt
model_trt = torch2trt.torch2trt(model, [data], fp16_mode=True, max_workspace_size=1<<25)
# -
# The optimized model may be saved so that we do not need to perform optimization again, we can just load the model. Please note that TensorRT has device specific optimizations, so you can only use an optimized model on similar platforms.
# +
OPTIMIZED_MODEL = 'resnet18_baseline_att_224x224_A_epoch_249_trt.pth'
torch.save(model_trt.state_dict(), OPTIMIZED_MODEL)
# -
# We could then load the saved model using *torch2trt* as follows.
# +
from torch2trt import TRTModule
model_trt = TRTModule()
model_trt.load_state_dict(torch.load(OPTIMIZED_MODEL))
# -
# We can benchmark the model in FPS with the following code
# +
import time
t0 = time.time()
torch.cuda.current_stream().synchronize()
for i in range(50):
y = model_trt(data)
torch.cuda.current_stream().synchronize()
t1 = time.time()
print(50.0 / (t1 - t0))
# -
# Next, let's define a function that will preprocess the image, which is originally in BGR8 / HWC format.
# +
import cv2
import torchvision.transforms as transforms
import PIL.Image
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
device = torch.device('cuda')
def preprocess(image):
global device
device = torch.device('cuda')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = PIL.Image.fromarray(image)
image = transforms.functional.to_tensor(image).to(device)
image.sub_(mean[:, None, None]).div_(std[:, None, None])
return image[None, ...]
# -
# Next, we'll define two callable classes that will be used to parse the objects from the neural network, as well as draw the parsed objects on an image.
# +
from trt_pose.draw_objects import DrawObjects
from trt_pose.parse_objects import ParseObjects
parse_objects = ParseObjects(topology)
draw_objects = DrawObjects(topology)
# -
# Assuming you're using NVIDIA Jetson, you can use the [jetcam](https://github.com/NVIDIA-AI-IOT/jetcam) package to create an easy to use camera that will produce images in BGR8/HWC format.
#
# If you're not on Jetson, you may need to adapt the code below.
# +
# from jetcam.usb_camera import USBCamera
from jetcam.csi_camera import CSICamera
from jetcam.utils import bgr8_to_jpeg
# camera = USBCamera(width=WIDTH, height=HEIGHT, capture_fps=30)
camera = CSICamera(width=WIDTH, height=HEIGHT, capture_fps=30)
camera.running = True
# -
# Next, we'll create a widget which will be used to display the camera feed with visualizations.
# +
import ipywidgets
from IPython.display import display
image_w = ipywidgets.Image(format='jpeg')
display(image_w)
# -
# Finally, we'll define the main execution loop. This will perform the following steps
#
# 1. Preprocess the camera image
# 2. Execute the neural network
# 3. Parse the objects from the neural network output
# 4. Draw the objects onto the camera image
# 5. Convert the image to JPEG format and stream to the display widget
def execute(change):
image = change['new']
data = preprocess(image)
cmap, paf = model_trt(data)
cmap, paf = cmap.detach().cpu(), paf.detach().cpu()
counts, objects, peaks = parse_objects(cmap, paf)#, cmap_threshold=0.15, link_threshold=0.15)
draw_objects(image, counts, objects, peaks)
image_w.value = bgr8_to_jpeg(image[:, ::-1, :])
# If we call the cell below it will execute the function once on the current camera frame.
execute({'new': camera.value})
# Call the cell below to attach the execution function to the camera's internal value. This will cause the execute function to be called whenever a new camera frame is received.
camera.observe(execute, names='value')
# Call the cell below to unattach the camera frame callbacks.
camera.unobserve_all()
|
tasks/human_pose/live_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7 (tensorflow)
# language: python
# name: tensorflow
# ---
import numpy as np, matplotlib.pyplot as plt, pandas as pd
dataset = pd.read_csv('Churn_Modelling.csv')
dataset.head()
X = dataset.iloc[:,3:13].values # Taking only relavent inputs
Y = dataset.iloc[:,13].values
X
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
#handling categorical data like geography and gender
ct = ColumnTransformer([("Geography", OneHotEncoder(),[1])], remainder="passthrough") # The last arg ([0]) is the list of columns you want to transform in this step
X = ct.fit_transform(X)
X = X[:,1:]
ct_2 = ColumnTransformer([("Gender", OneHotEncoder(),[3])], remainder="passthrough") # The last arg ([0]) is the list of columns you want to transform in this step
X = ct_2.fit_transform(X)
X = X[:,1:]
X[0:1,:]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
# +
#unit is number of nodes in hidden layer you choose it accroding to you or average of input and output dimension
# kernal_initializer is assigning initial weights and uniform means uniform number close to zero
#activation is activation function used in this hidden layer
#input_dim is number of input or number of independent variable this is must argue for first hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
# -
# 2nd hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# ouput layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
#optimizer is algo you want to use for handling weights we using stochastic gradient
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.fit(X_train, Y_train, batch_size = 10, epochs = 70)
# +
y_pred = classifier.predict(X_test) # This will give probabilities in y_pred
y_pred = (y_pred > 0.5) # This is will conver these prob in true and false so we can compare
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test, y_pred)
cm
# -
|
Deep Learning/Artificial Neural Network (ANN)/ANN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
d = pd.read_csv(r"C:\Users\SANJAY\Downloads\csv\dia.csv")
y=d['Class']
x=d.iloc[:,0:7]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test=train_test_split(x,y,test_size=0.2,random_state=52)
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
x_train=sc_x.fit_transform(x_train)
x_test=sc_x.transform(x_test)
from sklearn.neighbors import KNeighborsClassifier
model=KNeighborsClassifier(n_neighbors=7)
model.fit(x_train, y_train)
y_pred=model.predict(x_test)
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test, y_pred)
cm
from sklearn.metrics import accuracy_score
cm=accuracy_score(y_test, y_pred)
cm
|
KNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question Answering System
# In this example we will be going over the code used to build a question answering system. This example uses a modified BERT model to extract features from questions and Milvus to search for similar questions and answers.
# ## Data
# This example uses the [InsuranceQA Corpus](https://github.com/shuzi/insuranceQA) dataset, which contains 27,413 answers with the 3,065,492 running words of answers.
#
# Download location: https://github.com/chatopera/insuranceqa-corpus-zh/tree/release/corpus/pairs
#
# In this example, we use a small subset of the dataset that contains 100 pairs of quesiton-answers, it can be found under the **data** directory.
# ## Requirements
#
#
# | Packages | Servers |
# |- | - |
# | pymilvus | milvus-1.1.0 |
# | sentence_transformers | postgres |
# | psycopg2 |
# | pandas |
# | numpy |
#
# We have included a `requirements.txt` file in order to easily satisfy the required packages.
#
# ## Up and Running
# ### Installing Packages
# Install the required python packages with `requirements.txt`.
pip install -r requirements.txt
# ### Starting Milvus Server
#
# This demo uses Milvus 1.1.0, please refer to the [Install Milvus](https://milvus.io/docs/v1.1.0/install_milvus.md) guide to learn how to use this docker container. For this example we wont be mapping any local volumes.
# ! docker run --name milvus_cpu_1.1.0 -d \
# -p 19530:19530 \
# -p 19121:19121 \
# milvusdb/milvus:1.1.0-cpu-d050721-5e559c
# ### Starting Postgres Server
# For now, Milvus doesn't support storing string data. Thus, we need a relational database to store questions and answers. In this example, we use [PostgreSQL](https://www.postgresql.org/).
# ! docker run --name postgres -d -p 5432:5432 -e POSTGRES_HOST_AUTH_METHOD=trust postgres
# ### Confirm Running Servers
# ! docker logs milvus_cpu_1.1.0
# ! docker logs postgres --tail 6
# ## Code Overview
# ### Connecting to Servers
# We first start off by connecting to the servers. In this case the docker containers are running on localhost and the ports are the default ports.
# +
#Connectings to Milvus, BERT and Postgresql
import milvus
import psycopg2
milv = milvus.Milvus(host='localhost', port='19530')
conn = psycopg2.connect(host='localhost', port='5432', user='postgres', password='<PASSWORD>')
cursor = conn.cursor()
# -
# ### Creating Collection and Setting Index
# #### 1. Creating the Collection
# A collection in Milvus is similar to a table in a relational database, and is used for storing all the vectors.
# The required parameters for creating a collection are as follows:
# - `collection_name`: the name of a collection.
# - `dimension`: BERT generates 728-dimensional vectors.
# - `index_file_size`: how large each data segment will be within the collection.
# - `metric_type`: the distance formula being used to calculate similarity. In this example we are using Inner product (IP).
# +
TABLE_NAME = 'question_answering'
#Deleting previouslny stored table for clean run
milv.drop_collection(TABLE_NAME)
collection_param = {
'collection_name': TABLE_NAME,
'dimension': 768,
'index_file_size': 1024,
'metric_type': milvus.MetricType.IP
}
status = milv.create_collection(collection_param)
print(status)
# -
# #### 2. Setting an Index
# After creating the collection we want to assign it an index type. This can be done before or after inserting the data. When done before, indexes will be made as data comes in and fills the data segments. In this example we are using IVF_FLAT which requires the 'nlist' parameter. Each index types carries its own parameters. More info about this param can be found [here](https://milvus.io/docs/v1.1.0/index.md#CPU).
param = {'nlist': 40}
status = milv.create_index(TABLE_NAME, milvus.IndexType.IVF_FLAT, param)
print(status)
# ### Creating Table in Postgres
# PostgresSQL will be used to store the Milvus ID and its corresponding question-answer combo.
# +
#Deleting previouslny stored table for clean run
drop_table = "DROP TABLE IF EXISTS " + TABLE_NAME
cursor.execute(drop_table)
conn.commit()
try:
sql = "CREATE TABLE if not exists " + TABLE_NAME + " (ids bigint, question text, answer text);"
cursor.execute(sql)
conn.commit()
print("create postgres table successfully!")
except Exception as e:
print("can't create a postgres table: ", e)
# -
# ### Processing and Storing QA Dataset
# #### 1. Generating Embeddings
# In this example we are using the sentence_transformer library to encode the sentence into vectors. This library uses a modified BERT model to generate the embeddings, and in this example we are using a model pretrained using Microsoft's `mpnet`. More info can be found [here](https://www.sbert.net/docs/pretrained_models.html#sentence-embedding-models).
# +
from sentence_transformers import SentenceTransformer
import pandas as pd
from sklearn.preprocessing import normalize
model = SentenceTransformer('paraphrase-mpnet-base-v2')
# Get questions and answers.
data = pd.read_csv('data/example.csv')
question_data = data['question'].tolist()
answer_data = data['answer'].tolist()
sentence_embeddings = model.encode(question_data)
sentence_embeddings = normalize(sentence_embeddings)
# -
# #### 2. Inserting Vectors into Milvus
# Since this example dataset contains only 100 vectors, we are inserting all of them as one batch insert.
status, ids = milv.insert(collection_name=TABLE_NAME, records=sentence_embeddings)
print(status)
# #### 3. Inserting IDs and Questions-answer Combos into PostgreSQL
# In order to transfer the data into Postgres, we are creating a new file that combines all the data into a readable format. Once created, we pass this file into the Postgress server through STDIN due to the Postgres container not having access to the file locally.
# +
import os
def record_temp_csv(fname, ids, answer, question):
with open(fname,'w') as f:
for i in range(len(ids)):
line = str(ids[i]) + "|" + question[i] + "|" + answer[i] + "\n"
f.write(line)
def copy_data_to_pg(table_name, fname, conn, cur):
fname = os.path.join(os.getcwd(),fname)
try:
sql = "COPY " + table_name + " FROM STDIN DELIMITER '|' CSV HEADER"
cursor.copy_expert(sql, open(fname, "r"))
conn.commit()
print("Inserted into Postgress Sucessfully!")
except Exception as e:
print("Copy Data into Postgress failed: ", e)
DATA_WITH_IDS = 'data/test.csv'
record_temp_csv(DATA_WITH_IDS, ids, answer_data, question_data)
copy_data_to_pg(TABLE_NAME, DATA_WITH_IDS, conn, cursor)
# -
# ### Search
# #### 1. Processing Query
# When searching for a question, we first put the question through the same model to generate an embedding. Then with that embedding vector we can search for similar embeddings in Milvus.
#
# +
SEARCH_PARAM = {'nprobe': 40}
query_vec = []
question = "What is AAA?"
query_embeddings = []
embed = model.encode(question)
embed = embed.reshape(1,-1)
embed = normalize(embed)
query_embeddings = embed.tolist()
status, results = milv.search(collection_name=TABLE_NAME, query_records=query_embeddings, top_k=5, params=SEARCH_PARAM)
# -
# #### 2. Getting the Similar Questions
# There may not have questions that are similar to the given one. So we can set a threshold value, here we use 0.5, and when the most similar distance retrieved is less than this value, a hint that the system doesn't include the relevant question is returned. We then use the result ID's to pull out the similar questions from the Postgres server and print them with their corresponding similarity score.
# +
similar_questions = []
if results[0][0].distance < 0.5:
print("There are no similar questions in the database, here are the closest matches:")
else:
print("There are similar questions in the database, here are the closest matches: ")
for result in results[0]:
sql = "select question from " + TABLE_NAME + " where ids = " + str(result.id) + ";"
cursor.execute(sql)
rows=cursor.fetchall()
if len(rows):
similar_questions.append((rows[0][0], result.distance))
print((rows[0][0], result.distance))
# -
# #### 3. Get the answer
# After getting a list of similar questions, choose the one that you feel is closest to yours. Then you can use that question to find the corresponding answer in Postgres.
sql = "select answer from " + TABLE_NAME + " where question = '" + similar_questions[0][0] + "';"
cursor.execute(sql)
rows=cursor.fetchall()
print("Question:")
print(question)
print("Answer:")
print(rows[0][0])
|
solutions/question_answering_system/question_answering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Método k-fold de Validación Cruzada
# ## Defino la función kFold
#
# ### Parámetros
#
# **Datos:** Path al archivos train.csv.<br>
# **cantFolds:** Es la cantidad de folds. Se ejecuta kNN por cada fold, así que no conviene poner más de 5 para que no demore tanto la ejecución.<br>
# **k:** Parámetro k de kNN.<br>
# **alfa:** Parámetro alfa de PCA. Si ponen alfa=0 se ejecuta kNN sin PCA.<br>
# **semilla:** Es un parámetro opcional para que los resultados sean reproducibles, porque los datos de entrada se distribuyen en los folds de manera aleatoria.
#
# Devuelve una lista con el accuracy de cada ejecución de kNN
# +
import pandas as pd
import numpy as np
import metnum
from tqdm import tqdm
from sklearn.metrics import accuracy_score
def kFold(datos, cantFolds, k, alfa, semilla=None):
# Cargo los datos
df_train = pd.read_csv(datos)
# Desordeno los datos
df_train = df_train.sample(frac=1, random_state=semilla)
# Separo los datos en pixeles y etiquetas y los guardo en arrays de numpy
X = df_train[df_train.columns[1:]].values
y = df_train["label"].values.reshape(-1, 1)
# Aplico PCA
if (alfa > 0):
pca = metnum.PCA(alfa)
X = pca.transform(X)
# Particiono el conjunto de datos en k folds del mismo tamaño
folds = []
limites = [0]
for i in range(1,cantFolds+1):
datosPorFold = len(df_train)//cantFolds
limites.append(i*datosPorFold)
for i in range(len(limites)-1):
folds.append([X[limites[i]:limites[i+1]], y[limites[i]:limites[i+1]]])
# Aplico kNN
clf = metnum.KNNClassifier(k)
acc = []
for i in tqdm(range(cantFolds)):
# Creo una lista para separar el índice del fold que voy a usar para validar
indices = [[],i]
for j in range(cantFolds):
if (j != i):
indices[0].append(j)
X_val = folds[indices[1]][0]
y_val = folds[indices[1]][1]
# Uno todos los folds que voy a usar para entrenar en un único array de numpy
X_train = folds[indices[0][0]][0]
y_train = folds[indices[0][0]][1]
indices[0].pop()
for j in indices[0]:
X_train = np.concatenate([X_train, folds[j][0]])
y_train = np.concatenate([y_train, folds[j][1]])
# Aplico kNN
clf.fit(X_train, y_train)
y_pred = clf.predict(X_val)
acc.append(accuracy_score(y_val, y_pred))
return acc
# -
# ### Fijo los parámetros k de kNN, alfa de PCA y la cantidad de folds, y pruebo desordenar el dataset con varias semillas diferentes.
datos = "../data/train.csv"
semillas = [10,20,50,100,200,300,400,500,5000,6000,7000,8000]
resultados1 = []
alfa = 15
k = 10
cantFolds = 5
for s in tqdm(semillas):
resultados1.append(kFold(datos,cantFolds,k,alfa,s))
resultados1
resultados1 = [[0.9488095238095238,
0.9521428571428572,
0.9544047619047619,
0.9486904761904762,
0.9453571428571429],
[0.9517857142857142,
0.9501190476190476,
0.9479761904761905,
0.9486904761904762,
0.9513095238095238],
[0.9536904761904762,
0.949047619047619,
0.9502380952380952,
0.9473809523809524,
0.949047619047619],
[0.9514285714285714,
0.9523809523809523,
0.9523809523809523,
0.950952380952381,
0.9471428571428572],
[0.9513095238095238,
0.9508333333333333,
0.9492857142857143,
0.9492857142857143,
0.9528571428571428],
[0.9513095238095238,
0.9494047619047619,
0.9511904761904761,
0.9520238095238095,
0.9533333333333334],
[0.9471428571428572,
0.9502380952380952,
0.9508333333333333,
0.9513095238095238,
0.9539285714285715],
[0.9519047619047619,
0.9471428571428572,
0.9482142857142857,
0.9503571428571429,
0.9534523809523809],
[0.9525,
0.950595238095238,
0.9529761904761904,
0.9508333333333333,
0.9472619047619047],
[0.9501190476190476,
0.9527380952380953,
0.9514285714285714,
0.9465476190476191,
0.9489285714285715],
[0.9444047619047619,
0.9527380952380953,
0.9520238095238095,
0.9544047619047619,
0.9463095238095238],
[0.9528571428571428,
0.9478571428571428,
0.9489285714285715,
0.9466666666666667,
0.9525]]
plt.boxplot(resultados1)
plt.rcParams["figure.figsize"] = [15,15]
plt.xlabel('Semilla', fontsize=20)
plt.ylabel('Accuracy', fontsize=20)
plt.xticks(range(13), ["",10,20,50,100,200,300,400,500,5000,6000,7000,8000])
plt.savefig("kfold_resultados1.png", bbox_inches='tight')
# ### Ahora me fijo las diferencias entre los resultados promedio y los resultados de las iteraciones que dejan al último fold para validar
# +
promedios1 = []
for r in resultados1:
promedios1.append(np.mean(r))
diferencias1 = []
for r in resultados1:
diferencias1.append(abs(np.mean(r) - r[4]))
# -
promedios1
diferencias1
plt.boxplot(diferencias1)
plt.rcParams["figure.figsize"] = [10,15]
plt.xlabel('Error', fontsize=20)
plt.ylabel('Accuracy', fontsize=20)
plt.xticks([1],[""])
plt.savefig("kfold_diferencias1.png", bbox_inches='tight')
# ### Fijo una semilla y pruebo variando la cantidad de folds
# +
s = 100
cantFolds = [2,3,4,5,6,7,8,9,10,11,12,13,14,15]
resultados2 = []
for cf in cantFolds:
resultados2.append(kFold(datos,cf,k,alfa,s))
# -
resultados2
resultados2 = [[0.9473809523809524, 0.946952380952381],
[0.9442857142857143, 0.9452142857142857, 0.9439285714285715],
[0.9497142857142857, 0.9496190476190476, 0.9482857142857143, 0.946],
[0.9514285714285714,
0.9523809523809523,
0.9523809523809523,
0.950952380952381,
0.9471428571428572],
[0.9525714285714286,
0.9508571428571428,
0.9504285714285714,
0.9514285714285714,
0.9527142857142857,
0.9478571428571428],
[0.9516666666666667,
0.9525,
0.9523333333333334,
0.9521666666666667,
0.953,
0.951,
0.9486666666666667],
[0.952,
0.9527619047619048,
0.9544761904761905,
0.9542857142857143,
0.952952380952381,
0.9527619047619048,
0.9514285714285714,
0.9499047619047619],
[0.9541363051864552,
0.953493356193742,
0.9549935705100728,
0.9554222031718816,
0.9502786112301758,
0.9564937848264038,
0.9502786112301758,
0.9513501928846978,
0.9504929275610802],
[0.9538095238095238,
0.954047619047619,
0.9554761904761905,
0.9523809523809523,
0.9566666666666667,
0.9521428571428572,
0.9564285714285714,
0.9530952380952381,
0.9516666666666667,
0.9495238095238095],
[0.9531168150864326,
0.9588789942378209,
0.9518072289156626,
0.9539025667888947,
0.9596647459402828,
0.9499738082765846,
0.9583551597695128,
0.9531168150864326,
0.9544264012572027,
0.9494499738082766,
0.9489261393399686],
[0.952,
0.9582857142857143,
0.9522857142857143,
0.9551428571428572,
0.9542857142857143,
0.956,
0.9534285714285714,
0.958,
0.9517142857142857,
0.9554285714285714,
0.9497142857142857,
0.9497142857142857],
[0.9538699690402477,
0.958204334365325,
0.9523219814241486,
0.9523219814241486,
0.9538699690402477,
0.9588235294117647,
0.9489164086687306,
0.9578947368421052,
0.9575851393188854,
0.9541795665634675,
0.9575851393188854,
0.9492260061919504,
0.9486068111455108],
[0.9553333333333334,
0.9553333333333334,
0.954,
0.957,
0.953,
0.9566666666666667,
0.957,
0.9523333333333334,
0.9606666666666667,
0.954,
0.9536666666666667,
0.954,
0.9523333333333334,
0.9483333333333334],
[0.955,
0.9560714285714286,
0.9532142857142857,
0.9557142857142857,
0.9557142857142857,
0.9539285714285715,
0.9589285714285715,
0.9485714285714286,
0.96,
0.9575,
0.9532142857142857,
0.9542857142857143,
0.9517857142857142,
0.9539285714285715,
0.9471428571428572]]
# +
promedios2 = []
for r in resultados2:
promedios2.append(np.mean(r))
diferencias2 = []
for r in resultados2:
diferencias2.append(abs(np.mean(r) - r[len(r)-1]))
# -
promedios2
diferencias2
plt.boxplot(resultados2)
plt.rcParams["figure.figsize"] = [20,15]
plt.xticks(range(16),["",2,3,4,5,6,7,8,9,10,11,12,13,14,15])
plt.xlabel('Cantidad de folds', fontsize=20)
plt.ylabel('Accuracy', fontsize=20)
plt.savefig("kfold_resultados2.png", bbox_inches='tight')
|
tp2/notebooks/kfold.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="UT3GnHJaEziz" outputId="1899439d-e215-47f7-e1e6-a742aeb7fc1b"
# !pip install tensorflow -t ./
# + colab={} colab_type="code" id="PFppAGwlE9QO"
import numpy as np
import pandas as pd
import os
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="Hj_lRwSVfll2" outputId="687291df-febd-4934-fc80-3dcb78ef56a7"
# For Pip Installation
# !pip install --upgrade pip
# !pip install tqdm
# !pip install numpy==1.17.4
np.__version__
# + [markdown] colab_type="text" id="5o0x2LsYHnVr"
# ## Test the file directory of google drive with dummy csv
# not used since this is the sagemaker version
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="EvopVDVaGga3" outputId="9c137595-0ffd-4597-e2da-917b979db0b2"
os.getcwd()
# + colab={} colab_type="code" id="rfMFntMpGafu"
#a = pd.read_csv("./gdrive/My Drive/datasets/dummy.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="BM2aUBUxGpfL" outputId="a138e73f-213c-4d48-b0d7-15c7321000e2"
#a.head(5)
# + [markdown] colab_type="text" id="k9y4D5IfHuVI"
# ## Actual CV Task
# + colab={} colab_type="code" id="RouIPsTCHwma"
import os
import PIL
import json
import pickle
import numpy as np
from math import log, exp
from tqdm import tqdm
from random import shuffle
from PIL import ImageEnhance, ImageFont, ImageDraw
from IPython.display import Image, display
from multiprocessing import Pool
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.python.keras.utils.data_utils import Sequence
# + colab={} colab_type="code" id="7-rrTqzDITkg"
cat_list = ['tops', 'trousers', 'outerwear', 'dresses', 'skirts']
input_shape = (224,224,3)
wt_decay = 5e-4
dims_list = [(7,7),(14,14)]
aspect_ratios = [(1,1), (1,2), (2,1)]
# + colab={} colab_type="code" id="Q01wn6aNIV5b"
# Set up directory
data_folder = '/datasets/'
submission_folder = os.path.join(data_folder, 'submissions/')
train_imgs_folder = os.path.join(data_folder,'train/', 'train/')
train_annotations = os.path.join(data_folder,'train.json')
val_imgs_folder = os.path.join(data_folder,'val/','val/')
val_annotations = os.path.join(data_folder,'val.json')
train_pickle = os.path.join( data_folder, 'train.p/','train.p')
val_pickle = os.path.join( data_folder, 'val.p/','val.p')
save_model_folder = submission_folder
load_model_folder = submission_folder
# + colab={"base_uri": "https://localhost:8080/", "height": 185} colab_type="code" id="nBuDUwC5IdYn" outputId="e88a6195-0239-4372-8a89-5ff6c56bcaa8"
# Check if the directories are correct
print(train_imgs_folder)
print(train_annotations)
print(val_imgs_folder)
print(val_annotations)
print(train_pickle)
print(val_pickle)
print(submission_folder)
print(save_model_folder)
print(load_model_folder)
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="U87MX6fsIgC-" outputId="77b74624-7264-4d9d-a8bb-36a6e57a7494"
tf.__version__
# + [markdown] colab_type="text" id="FwhKCBvlIyu_"
# ## Data Augmentation Methods
# + colab={} colab_type="code" id="gEB7brBFIjBK"
# Helper methods: Computes the boundary of the image that includes all bboxes
def compute_reasonable_boundary(labels):
bounds = [ (x-w/2, x+w/2, y-h/2, y+h/2) for _,x,y,w,h in labels]
xmin = min([bb[0] for bb in bounds])
xmax = max([bb[1] for bb in bounds])
ymin = min([bb[2] for bb in bounds])
ymax = max([bb[3] for bb in bounds])
return xmin, xmax, ymin, ymax
def aug_horizontal_flip(img, labels):
flipped_labels = []
for c,x,y,w,h in labels:
flipped_labels.append( (c,1-x,y,w,h) )
return img.transpose(PIL.Image.FLIP_LEFT_RIGHT), np.array(flipped_labels)
def aug_crop(img, labels):
# Compute bounds such that no boxes are cut out
xmin, xmax, ymin, ymax = compute_reasonable_boundary(labels)
# Choose crop_xmin from [0, xmin]
crop_xmin = max( np.random.uniform() * xmin, 0 )
# Choose crop_xmax from [xmax, 1]
crop_xmax = min( xmax + (np.random.uniform() * (1-xmax)), 1 )
# Choose crop_ymin from [0, ymin]
crop_ymin = max( np.random.uniform() * ymin, 0 )
# Choose crop_ymax from [ymax, 1]
crop_ymax = min( ymax + (np.random.uniform() * (1-ymax)), 1 )
# Compute the "new" width and height of the cropped image
crop_w = crop_xmax - crop_xmin
crop_h = crop_ymax - crop_ymin
cropped_labels = []
for c,x,y,w,h in labels:
c_x = (x - crop_xmin) / crop_w
c_y = (y - crop_ymin) / crop_h
c_w = w / crop_w
c_h = h / crop_h
cropped_labels.append( (c,c_x,c_y,c_w,c_h) )
W,H = img.size
# Compute the pixel coordinates and perform the crop
impix_xmin = int(W * crop_xmin)
impix_xmax = int(W * crop_xmax)
impix_ymin = int(H * crop_ymin)
impix_ymax = int(H * crop_ymax)
return img.crop( (impix_xmin, impix_ymin, impix_xmax, impix_ymax) ), np.array( cropped_labels )
def aug_translate(img, labels):
# Compute bounds such that no boxes are cut out
xmin, xmax, ymin, ymax = compute_reasonable_boundary(labels)
trans_range_x = [-xmin, 1 - xmax]
tx = trans_range_x[0] + (np.random.uniform() * (trans_range_x[1] - trans_range_x[0]))
trans_range_y = [-ymin, 1 - ymax]
ty = trans_range_y[0] + (np.random.uniform() * (trans_range_y[1] - trans_range_y[0]))
trans_labels = []
for c,x,y,w,h in labels:
trans_labels.append((c,x+tx,y+ty,w,h))
W,H = img.size
tx_pix = int(W * tx)
ty_pix = int(H * ty)
return img.rotate(0, translate=(tx_pix, ty_pix)), np.array( trans_labels )
def aug_colorbalance(img, labels, color_factors=[0.2,2.0]):
factor = color_factors[0] + np.random.uniform() * (color_factors[1] - color_factors[0])
enhancer = ImageEnhance.Color(img)
return enhancer.enhance(factor), labels
def aug_contrast(img, labels, contrast_factors=[0.2,2.0]):
factor = contrast_factors[0] + np.random.uniform() * (contrast_factors[1] - contrast_factors[0])
enhancer = ImageEnhance.Contrast(img)
return enhancer.enhance(factor), labels
def aug_brightness(img, labels, brightness_factors=[0.2,2.0]):
factor = brightness_factors[0] + np.random.uniform() * (brightness_factors[1] - brightness_factors[0])
enhancer = ImageEnhance.Brightness(img)
return enhancer.enhance(factor), labels
def aug_sharpness(img, labels, sharpness_factors=[0.2,10.0]):
factor = sharpness_factors[0] + np.random.uniform() * (sharpness_factors[1] - sharpness_factors[0])
enhancer = ImageEnhance.Sharpness(img)
return enhancer.enhance(factor), labels
# Performs no augmentations and returns the original image and bbox. Used for the validation images.
def aug_identity(pil_img, label_arr):
return np.array(pil_img), label_arr
# This is the default augmentation scheme that we will use for each training image.
def aug_default(img, labels, p={'flip':0.5, 'crop':0.5, 'translate':0.5, 'color':0.2, 'contrast':0.2, 'brightness':0.2, 'sharpness':0.2}):
if p['color'] > np.random.uniform():
img, labels = aug_colorbalance(img, labels)
if p['contrast'] > np.random.uniform():
img, labels = aug_contrast(img, labels)
if p['brightness'] > np.random.uniform():
img, labels = aug_brightness(img, labels)
if p['sharpness'] > np.random.uniform():
img, labels = aug_sharpness(img, labels)
if p['flip'] > np.random.uniform():
img, labels = aug_horizontal_flip(img, labels)
if p['crop'] > np.random.uniform():
img, labels = aug_crop(img, labels)
if p['translate'] > np.random.uniform():
img, labels = aug_translate(img, labels)
return np.array(img), labels
# + [markdown] colab_type="text" id="DD-rtvHTI0pS"
# ## Custom Loss Function
# + colab={} colab_type="code" id="1srd_S6OIlNk"
# Shape of ypred: ( batch, i, j, aspect_ratios, 7 ). For a batch,i,j, we get #aspect_ratios vectors of length 7.
# Shape of ytrue: ( batch, i, j, aspect_ratios, 9 ). For a batch,i,j, we get #aspect_ratios vectors of length 9 (two more for objectness and cat/loc indicators)
def custom_loss(ytrue, ypred):
obj_loss_weight = 1.0
cat_loss_weight = 1.0
loc_loss_weight = 1.0
end_cat = len(cat_list) + 1
objloss_indicators = ytrue[:,:,:,:,-2:-1]
catlocloss_indicators = ytrue[:,:,:,:,-1:]
ytrue_obj, ypred_obj = ytrue[:,:,:,:,:1], ypred[:,:,:,:,:1]
ytrue_obj = tf.where( objloss_indicators != 0, ytrue_obj, 0 )
ypred_obj = tf.where( objloss_indicators != 0, ypred_obj, 0 )
objectness_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)( ytrue_obj, ypred_obj )
ytrue_cat, ypred_cat = ytrue[:,:,:,:,1:end_cat], ypred[:,:,:,:,1:end_cat]
ytrue_cat = tf.where( catlocloss_indicators != 0, ytrue_cat, 0 )
ypred_cat = tf.where( catlocloss_indicators != 0, ypred_cat, 0 )
categorical_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True) ( ytrue_cat, ypred_cat )
# Remember that ytrue is longer than ypred, so we will need to stop at index -2, which is where the indicators are stored
ytrue_loc, ypred_loc = ytrue[:,:,:,:,end_cat:-2], ypred[:,:,:,:,end_cat:]
ytrue_loc = tf.where( catlocloss_indicators != 0, ytrue_loc, 0 )
ypred_loc = tf.where( catlocloss_indicators != 0, ypred_loc, 0 )
localisation_loss = tf.keras.losses.Huber() ( ytrue_loc, ypred_loc )
return obj_loss_weight*objectness_loss + cat_loss_weight*categorical_loss + loc_loss_weight*localisation_loss
# + [markdown] colab_type="text" id="P22FJ9JNI6Lv"
# ## IOU
# + colab={} colab_type="code" id="yJNl2i-SIn7D"
# Computes the intersection-over-union (IoU) of two bounding boxes
def iou(bb1, bb2):
x1,y1,w1,h1 = bb1
xmin1 = x1 - w1/2
xmax1 = x1 + w1/2
ymin1 = y1 - h1/2
ymax1 = y1 + h1/2
x2,y2,w2,h2 = bb2
xmin2 = x2 - w2/2
xmax2 = x2 + w2/2
ymin2 = y2 - h2/2
ymax2 = y2 + h2/2
area1 = w1*h1
area2 = w2*h2
# Compute the boundary of the intersection
xmin_int = max(xmin1, xmin2)
xmax_int = min(xmax1, xmax2)
ymin_int = max(ymin1, ymin2)
ymax_int = min(ymax1, ymax2)
intersection = max(xmax_int - xmin_int, 0) * max(ymax_int - ymin_int, 0)
# Remove the double counted region
union = area1+area2-intersection
return intersection / union
# + [markdown] colab_type="text" id="SCf8Z3IzJAeZ"
# ## Sampling Schemes
# + colab={} colab_type="code" id="98QIHd94IrKL"
# Sampling schemes
def yolo_posneg_sampling(iou_scores_dict, label_tensor, gtclass, cat_list, iou_threshold=0.5):
iou_scores = []
for _, scores in iou_scores_dict.items():
iou_scores.extend(scores)
iou_scores.sort( key=lambda x: x[0], reverse=True )
top_iou_score = iou_scores.pop(0)
_, key, i, j, k, dx, dy, dw, dh = top_iou_score
zeros = [0] * len(cat_list)
payload = [1, *zeros, dx,dy,dw,dh]
payload[gtclass + 1] = 1
# Train objectness, class and loc for the positive
label_tensor[key][i,j,k,-2:] = 1
label_tensor[key][i,j,k,:len(payload)] = payload
# Train objectness only for the negatives
low_iou_scores = [iou_score for iou_score in iou_scores if iou_score[0] < iou_threshold]
for _, key, i, j, k, _, _, _, _ in low_iou_scores:
label_tensor[key][i,j,k,-2] = 1
def modified_yolo_posneg_sampling(iou_scores_dict, label_tensor, gtclass, cat_list, iou_threshold=0.5):
iou_scores = []
zeros = [0] * len(cat_list)
for _, scores in iou_scores_dict.items():
iou_scores.extend(scores)
iou_scores.sort( key=lambda x: x[0], reverse=True )
top_iou_score = iou_scores.pop(0)
_, key, i, j, k, dx, dy, dw, dh = top_iou_score
payload = [1, *zeros, dx,dy,dw,dh]
payload[gtclass + 1] = 1
# Train objectness, class and loc for the positive
label_tensor[key][i,j,k,-2:] = 1
label_tensor[key][i,j,k,:len(payload)] = payload
# Train objectness only for the negatives
low_iou_scores = [iou_score for iou_score in iou_scores if iou_score[0] < iou_threshold]
for _, key, i, j, k, _, _, _, _ in low_iou_scores:
label_tensor[key][i,j,k,-2] = 1
# Train cat/loc only for the in-betweens - those with high IoU but not positive
high_iou_scores = [iou_score for iou_score in iou_scores if iou_score[0] >= iou_threshold]
for _, key, i, j, k, dx, dy, dw, dh in high_iou_scores:
label_tensor[key][i,j,k,-1] = 1
payload = [0,*zeros,dx,dy,dw,dh]
payload[gtclass + 1] = 1
label_tensor[key][i,j,k,:len(payload)] = payload
def top_ratio_sampling(iou_scores_dict, label_tensor, gtclass, cat_list, positive_ratio=0.25):
iou_scores = []
# Let all tensors learn objectness score
for v in label_tensor.values():
v[:,:,:,-2] = 1
for _, iou_score_list in iou_scores_dict.items():
iou_score_list.sort( key=lambda x: x[0], reverse=True )
top_percentile_iou_scores = iou_score_list[:round(len(iou_score_list) * positive_ratio)]
# Include the rest that cross the IoU threshold
iou_score_list = top_percentile_iou_scores + [iou_score for iou_score in iou_score_list[len(top_percentile_iou_scores):] if iou_score[0] >= self.iou_threshold]
iou_scores.extend( iou_score_list )
for iou_score in iou_scores:
IoU, key, i, j, k, dx, dy, dw, dh = iou_score
zeros = [0] * len(cat_list)
payload = [IoU, *zeros, dx,dy,dw,dh]
payload[gtclass + 1] = 1
label_tensor[key][i,j,k,:len(payload)] = payload
# Set the classification/localisation indicator at this location to positive
label_tensor[key][i,j,k,-1] = 1
# + [markdown] colab_type="text" id="j48ueWUTJJI2"
# ## Encoding labels/ Decoding model output
# + colab={} colab_type="code" id="eBO__QLLJKWA"
'''
Encoder: label -> tensor
label_arr: np array like:
[[class_idx x y w h]]: num_labels x 5
...
Used to figure out for each label line, which tensor entry to shove it into.
If the box corresponding to the tensor entry overlaps the ground truth by at least a predefined threshold, then we shove it in.
'''
def encode_label(label_arr, dims_list, aspect_ratios, iou_fn, sampling_fn, cat_list):
num_entries = 7 + len(cat_list) # objectness, ... len(cat_list) ..., dx, dy, dw, dh, obj_indicator, catloc_indicator
np_labels = {}
for dims in dims_list:
dimkey = '{}x{}'.format(*dims)
np_labels[dimkey] = np.zeros( (*dims, len(aspect_ratios), num_entries ) )
for label in label_arr:
gtclass, gtx, gty, gtw, gth = label
gtclass = int(gtclass)
gt_bbox = [gtx, gty, gtw, gth]
iou_scores_dict = {}
for dims in dims_list:
key = '{}x{}'.format(*dims)
kx,ky = dims
gapx = 1.0 / kx
gapy = 1.0 / ky
# There are kx x ky tiles.
# For now, all have the same w,h of gapx,gapy.
# For the (i,j)-th tile, x = 0.5*gapx + i*gapx = (0.5+i)*gapx | y = (0.5+j)*gapy
for i in range(kx):
for j in range(ky):
for k in range( len(aspect_ratios) ):
dims_aspect_key = (*dims, k) # a 3-tuple: (dim1,dim2,ar)
if dims_aspect_key not in iou_scores_dict:
iou_scores_dict[dims_aspect_key] = []
x = (0.5+i)*gapx
y = (0.5+j)*gapy
# Different aspect ratios alter the anchor box default dimensions
w = gapx * aspect_ratios[k][0]
h = gapy * aspect_ratios[k][1]
cand_bbox = [x,y,w,h]
# SSD formulation
dx = (gtx - x) / w
dy = (gty - y) / h
dw = log( gtw / w )
dh = log( gth / h )
int_over_union = iou_fn( cand_bbox, gt_bbox )
iou_scores_dict[dims_aspect_key].append( (int_over_union, key, i, j, k, dx, dy, dw, dh) )
sampling_fn( iou_scores_dict, np_labels, gtclass, cat_list )
return np_labels
def decode_tensor(pred_dict, aspect_ratios):
results = []
for dim_str, pred_tensor in pred_dict.items():
pred_tensor = pred_tensor[0] # remove the batch
kx, ky = [int(g) for g in dim_str.split('x')]
gapx = 1. / kx
gapy = 1. / ky
# We trained without activations, so we need to process the logits into probabilities/scores
pred_arr = np.array(pred_tensor)
obj_logits = pred_arr[:,:,:,0]
obj_scores = 1. / (1 + np.exp(-obj_logits))
pred_arr[:,:,:,0] = obj_scores
cls_logits = pred_arr[:,:,:,1:-4]
cls_scores = np.exp(cls_logits)
cls_scores = cls_scores / cls_scores.sum(axis=-1)[...,np.newaxis]
pred_arr[:,:,:,1:-4] = cls_scores
for k, ar in enumerate(aspect_ratios):
for i in range(kx):
for j in range(ky):
cx = (0.5+i)*gapx
cy = (0.5+j)*gapy
w = gapx * ar[0]
h = gapy * ar[1]
payload = pred_arr[i,j,k]
obj_score = payload[0]
dx, dy, dw, dh = payload[-4:]
cls_probs = payload[1:-4]
predx = (dx * w) + cx
predy = (dy * h) + cy
predw = w * exp( dw )
predh = h * exp( dh )
max_cls_idx = np.argmax( cls_probs )
max_cls_prob = cls_probs[max_cls_idx]
category_id = max_cls_idx + 1
det_score = obj_score * max_cls_prob
results.append( (det_score, category_id, predx, predy, predw, predh) )
return results
# + [markdown] colab_type="text" id="IhMy4TlYJW4j"
# ## Data Generators
# + colab={} colab_type="code" id="CZ_snVQlJROm"
class TILSequence(Sequence):
def __init__(self, img_folder, json_annotation_file, batch_size, augment_fn, input_size, label_encoder, preprocess_fn, testmode=False):
self._prepare_data(img_folder, json_annotation_file)
self.batch_size = batch_size
self.augment_fn = augment_fn
self.input_wh = (*input_size[:2][::-1],input_size[2])
self.label_encoder = label_encoder
self.preprocess_fn = preprocess_fn
self.testmode = testmode
def _prepare_data(self, img_folder, json_annotation_file):
imgs_dict = {im.split('.')[0]:im for im in os.listdir(img_folder) if im.endswith('.jpg')}
data_dict = {}
with open(json_annotation_file, 'r') as f:
annotations_dict = json.load(f)
annotations_list = annotations_dict['annotations']
for annotation in annotations_list:
img_id = str(annotation['image_id'])
c = annotation['category_id'] - 1 # TODO: make sure that category ids start from 1, not 0
boxleft,boxtop,boxwidth,boxheight = annotation['bbox']
if img_id in imgs_dict:
img_fp = os.path.join(img_folder, imgs_dict[img_id])
imwidth,imheight = PIL.Image.open(img_fp).size
if img_id not in data_dict:
data_dict[img_id] = []
box_cenx = boxleft + boxwidth/2.
box_ceny = boxtop + boxheight/2.
x,y,w,h = box_cenx/imwidth, box_ceny/imheight, boxwidth/imwidth, boxheight/imheight
data_dict[img_id].append( [c,x,y,w,h] )
self.x, self.y, self.ids = [], [], []
for img_id, labels in data_dict.items():
self.x.append( os.path.join(img_folder, imgs_dict[img_id]) )
self.y.append( np.array(labels) )
self.ids.append( img_id )
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
return self.get_batch_test(idx) if self.testmode else self.get_batch(idx)
def get_batch_test(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_ids = self.ids[idx * self.batch_size:(idx + 1) * self.batch_size]
x_acc, y_acc = [], {}
original_img_dims = []
with Pool(self.batch_size) as p:
# Read in the PIL objects from filepaths
batch_x = p.map(load_img, batch_x)
for x,y in zip( batch_x, batch_y ):
W,H = x.size
original_img_dims.append( (W,H) )
x_aug, y_aug = self.augment_fn( x, y )
if x_aug.size != self.input_wh[:2]:
x_aug.resize( self.input_wh )
x_acc.append( np.array(x_aug) )
y_dict = self.label_encoder( y_aug )
for dimkey, label in y_dict.items():
if dimkey not in y_acc:
y_acc[dimkey] = []
y_acc[dimkey].append( label )
return batch_ids, original_img_dims, self.preprocess_fn( np.array( x_acc ) ), { dimkey: np.array( gt_tensor ) for dimkey, gt_tensor in y_acc.items() }
def get_batch(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
x_acc, y_acc = [], {}
with Pool(self.batch_size) as p:
# Read in the PIL objects from filepaths
batch_x = p.map(load_img, batch_x)
for x,y in zip( batch_x, batch_y ):
x_aug, y_aug = self.augment_fn( x, y )
if x_aug.size != self.input_wh[:2]:
x_aug.resize( self.input_wh )
x_acc.append( np.array(x_aug) )
y_dict = self.label_encoder( y_aug )
for dimkey, label in y_dict.items():
if dimkey not in y_acc:
y_acc[dimkey] = []
y_acc[dimkey].append( label )
return self.preprocess_fn( np.array( x_acc ) ), { dimkey: np.array( gt_tensor ) for dimkey, gt_tensor in y_acc.items() }
# + colab={} colab_type="code" id="7_s5AA9mJZui"
class TILPickle(Sequence):
def __init__(self, pickle_file, batch_size, augment_fn, input_size, label_encoder, preprocess_fn, testmode=False):
with open(pickle_file, 'rb') as p:
self.ids, self.x, self.y = pickle.load(p)
self.batch_size = batch_size
self.augment_fn = augment_fn
self.input_wh = (*input_size[:2][::-1],input_size[2])
self.label_encoder = label_encoder
self.preprocess_fn = preprocess_fn
self.testmode = testmode
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_ids = self.ids[idx * self.batch_size:(idx + 1) * self.batch_size]
x_acc, y_acc = [], {}
for x,y in zip( batch_x, batch_y ):
x_aug, y_aug = self.augment_fn( x, y )
if x_aug.size != self.input_wh[:2]:
x_aug.resize( self.input_wh )
x_acc.append( np.array(x_aug) )
y_dict = self.label_encoder( y_aug )
for dimkey, label in y_dict.items():
if dimkey not in y_acc:
y_acc[dimkey] = []
y_acc[dimkey].append(label)
if self.testmode:
return batch_ids, self.preprocess_fn( np.array( x_acc ) ), { dimkey: np.array( gt_tensor ) for dimkey, gt_tensor in y_acc.items() }
return self.preprocess_fn( np.array( x_acc ) ), { dimkey: np.array( gt_tensor ) for dimkey, gt_tensor in y_acc.items() }
# + [markdown] colab_type="text" id="6wxIQJBBJ-0u"
# ## Constructing Models
# + colab={} colab_type="code" id="fM0J24l-JcAg"
def transfer_model(backbone_model, input_shape, dims_list, num_aspect_ratios, wt_decay, model_name='transfer-objdet-model'):
inputs = keras.Input(shape=input_shape)
backbone_output = backbone_model(inputs) #7
x = layers.Conv2D(512, 1, padding='same', kernel_regularizer=l2(wt_decay))(backbone_output) #7
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(512, 3, padding='valid', kernel_regularizer=l2(wt_decay))(x) #5
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(256, 1, padding='same', kernel_regularizer=l2(wt_decay))(x) #5
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(512, 3, padding='valid', kernel_regularizer=l2(wt_decay))(x) #3
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(256, 1, padding='same', kernel_regularizer=l2(wt_decay))(x) #3
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(512, 3, padding='same', kernel_regularizer=l2(wt_decay))(x) #3
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
# You can accumulate more scales via shortcut. Imagine each (n,m) is a grid super-imposed on the original image.
# See the next cell for an example for more scales.
dim_tensor_map = {'3x3': x}
# For each dimension, construct a predictions tensor. Accumulate them into a dictionary for keras to understand multiple labels.
preds_dict = {}
for dims in dims_list:
dimkey = '{}x{}'.format(*dims)
tens = dim_tensor_map[dimkey]
ar_preds = []
for _ in range(num_aspect_ratios):
objectness_preds = layers.Conv2D(1, 1, kernel_regularizer=l2(wt_decay))( tens )
class_preds = layers.Conv2D(len(cat_list), 1, kernel_regularizer=l2(wt_decay))( tens )
bbox_preds = layers.Conv2D(4, 1, kernel_regularizer=l2(wt_decay))( tens )
ar_preds.append( layers.Concatenate()([objectness_preds, class_preds, bbox_preds]) )
if num_aspect_ratios > 1:
predictions = layers.Concatenate()(ar_preds)
elif num_aspect_ratios == 1:
predictions = ar_preds[0]
predictions = layers.Reshape( (*dims, num_aspect_ratios, 5+len(cat_list)), name=dimkey )(predictions)
preds_dict[dimkey] = predictions
model = keras.Model(inputs, preds_dict, name=model_name)
model.compile( optimizer=tf.keras.optimizers.Adam(1e-5),
loss=custom_loss )
return model
# + colab={} colab_type="code" id="cbA9GtEgJ_c6"
def transfer_model_7x7_14x14(backbone_model, input_shape, dims_list, num_aspect_ratios, wt_decay, model_name='transfer-objdet-model-7x7-14x14'):
inputs = keras.Input(shape=input_shape)
intermediate_layer_model = keras.Model(inputs=backbone_model.input,
outputs=backbone_model.get_layer('conv4_block6_out').output)
intermediate_output = intermediate_layer_model(inputs) #14
backbone_output = backbone_model(inputs) #7
x = layers.Conv2D(512, 1, padding='same', kernel_regularizer=l2(wt_decay))(backbone_output) #7
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(1024, 3, padding='same', kernel_regularizer=l2(wt_decay))(x) #7
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(512, 1, padding='same', kernel_regularizer=l2(wt_decay))(x) #7
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(1024, 3, padding='same', kernel_regularizer=l2(wt_decay))(x) #7
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(512, 1, padding='same', kernel_regularizer=l2(wt_decay))(x) #7
x = layers.BatchNormalization()(x)
upsample = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(2048, 3, padding='same', kernel_regularizer=l2(wt_decay))(upsample) #7
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
tens_7x7 = layers.Add()([x,backbone_output])
x = layers.Conv2D(256, 1, padding='same', kernel_regularizer=l2(wt_decay))(upsample) #7
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2DTranspose(512, 5, strides=(2, 2), padding='same')(x) #14
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Concatenate()([x,intermediate_output])
x = layers.Conv2D(256, 1, padding='same', kernel_regularizer=l2(wt_decay))(x) #14
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(512, 3, padding='same', kernel_regularizer=l2(wt_decay))(x) #14
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(256, 1, padding='same', kernel_regularizer=l2(wt_decay))(x) #14
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(512, 3, padding='same', kernel_regularizer=l2(wt_decay))(x) #14
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(256, 1, padding='same', kernel_regularizer=l2(wt_decay))(x) #14
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.01)(x)
x = layers.Conv2D(512, 3, padding='same', kernel_regularizer=l2(wt_decay))(x) #14
x = layers.BatchNormalization()(x)
tens_14x14 = layers.LeakyReLU(0.01)(x)
dim_tensor_map = {'7x7': tens_7x7, '14x14': tens_14x14}
# For each dimension, construct a predictions tensor. Accumulate them into a dictionary for keras to understand multiple labels.
preds_dict = {}
for dims in dims_list:
dimkey = '{}x{}'.format(*dims)
tens = dim_tensor_map[dimkey]
ar_preds = []
for _ in range(num_aspect_ratios):
objectness_preds = layers.Conv2D(1, 1, kernel_regularizer=l2(wt_decay))( tens )
class_preds = layers.Conv2D(len(cat_list), 1, kernel_regularizer=l2(wt_decay))( tens )
bbox_preds = layers.Conv2D(4, 1, kernel_regularizer=l2(wt_decay))( tens )
ar_preds.append( layers.Concatenate()([objectness_preds, class_preds, bbox_preds]) )
if num_aspect_ratios > 1:
predictions = layers.Concatenate()(ar_preds)
elif num_aspect_ratios == 1:
predictions = ar_preds[0]
predictions = layers.Reshape( (*dims, num_aspect_ratios, 5+len(cat_list)), name=dimkey )(predictions)
preds_dict[dimkey] = predictions
model = keras.Model(inputs, preds_dict, name=model_name)
model.compile( optimizer=tf.keras.optimizers.Adam(1e-5),
loss=custom_loss )
return model
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="sut7VnL2KEVK" outputId="b14ffeca-b32c-4d5f-c39d-56a1aadbcd22"
# Choose whether to start a new model
# or load a previously trained one
model_context = 'model-7x7-14x14-3aspect-modyoloposneg-wd{}'.format(wt_decay)
#load_model_path = os.path.join( load_model_folder, '{}-best_val_loss.h5'.format(model_context) )
load_model_path = None
if load_model_path is None:
backbone_model = tf.keras.applications.ResNet50(input_shape=input_shape, include_top=False)
model = transfer_model_7x7_14x14(backbone_model, input_shape=input_shape, dims_list=dims_list, num_aspect_ratios=len(aspect_ratios), wt_decay=wt_decay, model_name=model_context+'-res50')
else:
model = tf.keras.models.load_model(load_model_path, custom_objects={'custom_loss':custom_loss})
model.summary()
# + [markdown] colab_type="text" id="B-8zEFSUKWhq"
# ## Training/Transfer Learning of Model
# + colab={} colab_type="code" id="4W9qNyKBKIFQ"
'''
- There is overfitting now that I set top 25% (of each dim-ar combination) as positives. How?
- Larger image size - maybe 448
- Transfer learning
- Change weights of losses?
# Also add more callbacks, such as tensorboard
dataset, batch_size, augment_fn, input_size, label_encoder, preprocess_fn
encode_label(label_arr, dims_list, aspect_ratios, iou_fn, sampling_fn, cat_list)
img_folder, json_annotation_file, batch_size, augment_fn, input_size, label_encoder, preprocess_fn
'''
bs=16
n_epochs_warmup = 30
n_epochs_after = 70
label_encoder = lambda y: encode_label(y, dims_list, aspect_ratios, iou, modified_yolo_posneg_sampling, cat_list)
preproc_fn = lambda x: x / 255.
print('Creating training sequence...')
train_sequence = TILSequence(train_imgs_folder, train_annotations, bs, aug_default, input_shape, label_encoder, preproc_fn)
train_sequence = TILPickle(train_pickle, bs, aug_default, input_shape, label_encoder, preproc_fn)
print('Creating validation sequence...')
val_sequence = TILSequence(val_imgs_folder, val_annotations, bs, aug_identity, input_shape, label_encoder, preproc_fn)
val_sequence = TILPickle(val_pickle, bs, aug_identity, input_shape, label_encoder, preproc_fn)
# + colab={} colab_type="code" id="SRBSZy-iKXmh"
save_model_path = os.path.join(save_model_folder, '{}-best_val_loss.h5'.format(model_context))
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=save_model_path,
save_weights_only=False,
monitor='val_loss',
mode='auto',
save_best_only=True)
earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=30)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=1e-8)
for layer in backbone_model.layers:
layer.trainable = False
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="7ykhJC9EKi9R" outputId="9ce78464-8ed6-43a0-94d7-b2f8339347cc"
print('Warming up the model...')
model.fit(x=train_sequence,
epochs=n_epochs_warmup,
validation_data=val_sequence,
callbacks=[model_checkpoint_callback, earlystopping, reduce_lr])
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="v8R3xiNAKlSX" outputId="3563781b-5443-4bb2-c45c-63ccdb2e47b9"
# Fine tuning
print('Model warmed. Loading best val version of model...')
load_model_path = os.path.join( load_model_folder, '{}-best_val_loss.h5'.format(model_context) )
del model
model = tf.keras.models.load_model(load_model_path, custom_objects={'custom_loss':custom_loss})
for layer in model.get_layer('resnet50').layers:
layer.trainable = True
model.compile(optimizer=tf.keras.optimizers.Adam(1e-5), loss=custom_loss)
model_context = 'ft-' + model_context
save_model_path = os.path.join( save_model_folder, '{}-best_val_loss.h5'.format(model_context) )
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=save_model_path,
save_weights_only=False,
monitor='val_loss',
mode='auto',
save_best_only=True)
earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=30)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=1e-8)
model.fit(x=train_sequence,
epochs=n_epochs_after,
validation_data=val_sequence,
callbacks=[model_checkpoint_callback, earlystopping, reduce_lr])
# Final save
model.save(os.path.join(save_model_folder, '{}-final.h5'.format(model_context)))
# + [markdown] colab_type="text" id="VtzLZ4MruJVM"
# ## Non-max suppression
# + colab={} colab_type="code" id="Rz2w3a_il9ba"
# To fix multiple, we introduce non-maximum suppression, or NMS for short
def nms(detections, iou_thresh=0.):
dets_by_class = {}
final_result = []
for det in detections:
cls = det[1]
if cls not in dets_by_class:
dets_by_class[cls] = []
dets_by_class[cls].append( det )
for _, dets in dets_by_class.items():
candidates = list(dets)
candidates.sort( key=lambda x:x[0], reverse=True )
while len(candidates) > 0:
candidate = candidates.pop(0)
_,_,cx,cy,cw,ch = candidate
copy = list(candidates)
for other in candidates:
# Compute the IoU. If it exceeds thresh, we remove it
_,_,ox,oy,ow,oh = other
if iou( (cx,cy,cw,ch), (ox,oy,ow,oh) ) > iou_thresh:
copy.remove(other)
candidates = list(copy)
final_result.append(candidate)
return final_result
# + [markdown] colab_type="text" id="DG7zDN-PuTo6"
# ## Load a pre-trained model
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="NBvbAWSVuRDs" outputId="cb71795d-4e66-4fd9-d062-94a418d05de4"
# load the model
load_model_path = os.path.join( load_model_folder, 'model-7x7-14x14-3aspect-modyoloposneg-wd0.0005-best_val_loss.h5' )
model = tf.keras.models.load_model(load_model_path, custom_objects={'custom_loss':custom_loss})
# + colab={} colab_type="code" id="85U566AJuZZJ"
# load the test data
label_encoder = lambda y: encode_label(y, dims_list, aspect_ratios, iou, modified_yolo_posneg_sampling, cat_list)
preproc_fn = lambda x: x / 255.
test_sequence_pickle = TILPickle(val_pickle, 1, aug_identity, input_shape, label_encoder, preproc_fn)
# + colab={} colab_type="code" id="y9ZYrvJyWNwO"
test_sequence = TILSequence(val_imgs_folder, val_annotations, 1, aug_identity, input_shape, label_encoder, preproc_fn, testmode=True)
# + [markdown] colab_type="text" id="V_42ECKouX4y"
# ## Visualize Model Output
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="v93eWhNZud3P" outputId="854d9867-85c8-4702-f305-ff26d615bfe5"
# Run this to visualize
rank_colors = ['cyan', 'magenta', 'pink']
det_threshold=0.
top_dets=3
start=0
end=20
for k in range(start,end):
img_arr, label_cxywh = test_sequence_pickle[k]
img_arr = img_arr[0]
pil_img = PIL.Image.fromarray( (img_arr * 255.).astype(np.uint8) )
W,H = pil_img.size
pred_dict = model(np.array([img_arr]))
preds = decode_tensor( pred_dict, aspect_ratios )
# Post-processing
preds.sort( key=lambda x:x[0], reverse=True )
preds = [pred for pred in preds if pred[0] >= det_threshold]
preds = preds[:top_dets]
preds = nms(preds, iou_thresh=0.5)
draw_img = pil_img.copy()
draw = ImageDraw.Draw(draw_img)
for i, pred in enumerate(preds):
conf,cls,x,y,w,h = pred
bb_x = int(x * W)
bb_y = int(y * H)
bb_w = int(w * W)
bb_h = int(h * H)
left = int(bb_x - bb_w / 2)
top = int(bb_y - bb_h / 2)
right = int(bb_x + bb_w / 2)
bot = int(bb_y + bb_h / 2)
cls_str = cat_list[cls-1]
draw.rectangle(((left, top), (right, bot)), outline=rank_colors[i])
draw.text((bb_x, bb_y), cls_str, fill=rank_colors[i])
draw.text( ( int(left + bb_w*.1), int(top + bb_h*.1) ), '{:.2f}'.format(conf), fill=rank_colors[i] )
display(draw_img)
# + [markdown] colab_type="text" id="3IWZgZULSJWV"
# ## Generating detections
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="6Xu57IkMR1jK" outputId="7df1b201-8df8-4895-c096-4a3a601b3fc3"
# Generating detections on the folder of validation images
detections = []
det_threshold=0.
for i in tqdm(range(len(test_sequence))):
img_id, dims, input_arr, _ = test_sequence[i]
img_id = int(img_id[0])
W,H = dims[0]
# Here, I'm inferencing one-by-one, but you can batch it if you want it faster
pred_dict = model(input_arr)
preds = decode_tensor( pred_dict, aspect_ratios )
# Post-processing
preds = [pred for pred in preds if pred[0] >= det_threshold]
preds.sort( key=lambda x:x[0], reverse=True )
preds = preds[:100] # we only evaluate you on 100 detections per image
for i, pred in enumerate(preds):
conf,cat_id,x,y,w,h = pred
left = W * (x - w/2.)
left = round(left,1)
top = H * (y - h/2.)
top = round(top,1)
width = W*w
width = round(width,1)
height = H*h
height = round(height,1)
conf = float(conf)
cat_id = int(cat_id)
detections.append( {'image_id':img_id, 'category_id':cat_id, 'bbox':[left, top, width, height], 'score':conf} )
with open('detections-7x7-14x14-top100-nonms.json', 'w') as f:
json.dump(detections, f)
# + colab={"base_uri": "https://localhost:8080/", "height": 372} colab_type="code" id="iMOqbqfkSJKO" outputId="4f6b1757-ccef-488a-97dc-a9d6de0cdedb"
#This installation is a modified version of the original to suit this competition
# ! pip install git+https://github.com/jinmingteo/cocoapi.git#subdirectory=PythonAPI
# + colab={} colab_type="code" id="FnEFeuvUdiKK"
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# + colab={"base_uri": "https://localhost:8080/", "height": 482} colab_type="code" id="amv4RY1Ndo2H" outputId="8997b833-cabc-46cb-958f-d32ac4d2bb0a"
# Get evaluation score against validation set
coco_gt = COCO(val_annotations)
coco_dt = coco_gt.loadRes('./gdrive/My Drive/datasets/detections-7x7-14x14-top100-nonms.json')
cocoEval = COCOeval(cocoGt=coco_gt, cocoDt=coco_dt, iouType='bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
# + colab={} colab_type="code" id="IczmnrU8hF3s"
# + colab={} colab_type="code" id="UJ2xmDV6drOQ"
|
CV-sagemaker.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Generating C Code to implement Method of Lines Timestepping for Explicit Runge Kutta Methods
#
# ## Authors: <NAME> & <NAME>
#
# ## This tutorial notebook generates three blocks of C Code in order to perform Method of Lines timestepping.
#
# **Notebook Status:** <font color='green'><b> Validated </b></font>
#
# **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). All Runge-Kutta Butcher tables were validated using truncated Taylor series in [a separate module](Tutorial-RK_Butcher_Table_Validation.ipynb). Finally, C-code implementation of RK4 was validated against a trusted version. C-code implementations of other RK methods seem to work as expected in the context of solving the scalar wave equation in Cartesian coordinates.
#
# ### NRPy+ Source Code for this module:
# * [MoLtimestepping/C_Code_Generation.py](../edit/MoLtimestepping/C_Code_Generation.py)
# * [MoLtimestepping/RK_Butcher_Table_Dictionary.py](../edit/MoLtimestepping/RK_Butcher_Table_Dictionary.py) ([**Tutorial**](Tutorial-RK_Butcher_Table_Dictionary.ipynb)) Stores the Butcher tables for the explicit Runge Kutta methods
#
# ## Introduction:
#
# When numerically solving a partial differential equation initial-value problem, subject to suitable boundary conditions, we implement Method of Lines to "integrate" the solution forward in time.
#
#
# ### The Method of Lines:
#
# Once we have the initial data for a PDE, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle
# 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and
# 2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs), like Runge Kutta methods** so long as the initial value problem PDE can be written in the first-order-in-time form
# $$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$
# where $\mathbf{M}$ is an $N\times N$ matrix containing only *spatial* differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$.
#
# You may find the next module [Tutorial-ScalarWave](Tutorial-ScalarWave.ipynb) extremely helpful as an example for implementing the Method of Lines for solving the Scalar Wave equation in Cartesian coordinates.
#
# ### Generating the C code:
# This module describes how three C code blocks are written to implement Method of Lines timestepping for a specified RK method. The first block is dedicated to allocating memory for the appropriate number of grid function lists needed for the given RK method. The second block will implement the Runge Kutta numerical scheme based on the corresponding Butcher table. The third block will free up the previously allocated memory after the Method of Lines run is complete. These blocks of code are stored within the following three header files respectively
#
# 1. `MoLtimestepping/RK_Allocate_Memory.h`
# 1. `MoLtimestepping/RK_MoL.h`
# 1. `MoLtimestepping/RK_Free_Memory.h`
#
# The generated code is then included in future Start-to-Finish example tutorial notebooks when solving PDEs numerically.
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This notebook is organized as follows
#
# 1. [Step 1](#initializenrpy): Initialize needed Python/NRPy+ modules
# 1. [Step 2](#diagonal): Checking if Butcher Table is Diagonal
# 1. [Step 3](#ccode): Generating the C Code
# 1. [Step 3.a](#allocate): Allocating Memory, `MoLtimestepping/RK_Allocate_Memory.h`
# 1. [Step 3.b](#rkmol): Implementing the Runge Kutta Scheme for Method of Lines Timestepping, `MoLtimestepping/RK_MoL.h`
# 1. [Step 3.c](#free): Freeing Allocated Memory, `MoLtimestepping/RK_Free_Memory.h`
# 1. [Step 4](#code_validation): Code Validation against `MoLtimestepping.RK_Butcher_Table_Generating_C_Code` NRPy+ module
# 1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
# <a id='initializenrpy'></a>
#
# # Step 1: Initialize needed Python/NRPy+ modules [Back to [top](#toc)\]
# $$\label{initializenrpy}$$
#
# Let's start by importing all the needed modules from Python/NRPy+:
import sympy as sp
import NRPy_param_funcs as par
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
# <a id='diagonal'></a>
#
# # Step 2: Checking if a Butcher table is Diagonal [Back to [top](#toc)\]
# $$\label{diagonal}$$
#
# A diagonal Butcher table takes the form
#
# $$\begin{array}{c|cccccc}
# 0 & \\
# a_1 & a_1 & \\
# a_2 & 0 & a_2 & \\
# a_3 & 0 & 0 & a_3 & \\
# \vdots & \vdots & \ddots & \ddots & \ddots \\
# a_s & 0 & 0 & 0 & \cdots & a_s \\ \hline
# & b_1 & b_2 & b_3 & \cdots & b_{s-1} & b_s
# \end{array}$$
#
# where $s$ is the number of required predictor-corrector steps for a given RK method (see [<NAME>. (2008)](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470753767)). One known diagonal RK method is the classic RK4 represented in Butcher table form as:
#
# $$\begin{array}{c|cccc}
# 0 & \\
# 1/2 & 1/2 & \\
# 1/2 & 0 & 1/2 & \\
# 1 & 0 & 0 & 1 & \\ \hline
# & 1/6 & 1/3 & 1/3 & 1/6
# \end{array} $$
#
# Diagonal Butcher tables are nice when it comes to saving required memory space. Each new step for a diagonal RK method, when computing the new $k_i$, does not depend on the previous calculation, and so there are ways to save memory. Signifcantly so in large three-dimensional spatial grid spaces.
# +
def diagonal(key):
diagonal = True # Start with the Butcher table is diagonal
Butcher = Butcher_dict[key][0]
L = len(Butcher)-1 # Establish the number of rows to check for diagonal trait, all bust last row
row_idx = 0 # Initialize the Butcher table row index
for i in range(L): # Check all the desired rows
for j in range(1,row_idx): # Check each element before the diagonal element in a row
if Butcher[i][j] != sp.sympify(0): # If any element is non-zero, then the table is not diagonal
diagonal = False
break
row_idx += 1 # Update to check the next row
return diagonal
# State whether each Butcher table is diagonal or not
for key, value in Butcher_dict.items():
if diagonal(key) == True:
print("The RK method "+str(key)+" is diagonal!")
else:
print("The RK method "+str(key)+" is NOT diagonal!")
# -
# <a id='ccode'></a>
#
# # Step 3: Generating the C Code [Back to [top](#toc)\]
# $$\label{ccode}$$
#
# The following sections build up the C code for implementing the Method of Lines timestepping algorithm for solving PDEs. To see what the C code looks like for a particular method, simply change the `RK_method` below, otherwise it will default to `"RK4"`.
# <a id='allocate'></a>
#
# ## Step 3.a: Allocating Memory, `MoLtimestepping/RK_Allocate_Memory.h` [Back to [top](#toc)\]
# $$\label{allocate}$$
#
# We define the function `RK_Allocate()` which generates the C code for allocating the memory for the appropriate number of grid function lists given a Runge Kutta method. The function writes the C code to the header file `MoLtimestepping/RK_Allocate_Memory.h`.
# Choose a method to see the C code print out for
RK_method = "RK3 Ralston"
# +
def RK_Allocate(RK_method="RK4"):
with open("MoLtimestepping/RK_Allocate_Memory"+str(RK_method).replace(" ", "_")+".h", "w") as file:
file.write("// Code snippet allocating gridfunction memory for \""+str(RK_method)+"\" method:\n")
# No matter the method we define gridfunctions "y_n_gfs" to store the initial data
file.write("REAL *restrict y_n_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);\n")
if diagonal(RK_method) == True and "RK3" in RK_method:
file.write("""REAL *restrict k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *restrict k2_or_y_nplus_a32_k2_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *restrict diagnostic_output_gfs = k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs;""")
else:
if diagonal(RK_method) == False: # Allocate memory for non-diagonal Butcher tables
# Determine the number of k_i steps based on length of Butcher Table
num_k = len(Butcher_dict[RK_method][0])-1
# For non-diagonal tables an intermediate gridfunction "next_y_input" is needed for rhs evaluations
file.write("REAL *restrict next_y_input_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);\n")
for i in range(num_k): # Need to allocate all k_i steps for a given method
file.write("REAL *restrict k"+str(i+1)+"_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);\n")
file.write("REAL *restrict diagnostic_output_gfs = k1_gfs;\n")
else: # Allocate memory for diagonal Butcher tables, which use a "y_nplus1_running_total gridfunction"
file.write("REAL *restrict y_nplus1_running_total_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);\n")
if RK_method != 'Euler': # Allocate memory for diagonal Butcher tables that aren't Euler
# Need k_odd for k_1,3,5... and k_even for k_2,4,6...
file.write("REAL *restrict k_odd_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);\n")
file.write("REAL *restrict k_even_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);\n")
file.write("REAL *restrict diagnostic_output_gfs = y_nplus1_running_total_gfs;\n")
RK_Allocate(RK_method)
print("This is the memory allocation C code for the "+str(RK_method)+" method: \n")
with open("MoLtimestepping/RK_Allocate_Memory"+str(RK_method).replace(" ", "_")+".h", "r") as file:
print(file.read())
# -
# <a id='rkmol'></a>
#
# ## Step 3.b: Implementing the Runge Kutta Scheme for Method of Lines Timestepping, `MoLtimestepping/RK_MoL.h` [Back to [top](#toc)\]
# $$\label{rkmol}$$
#
# We define the function `RK_MoL()` which generates the C code for implementing Method of Lines using a specified Runge Kutta scheme. The function writes the C code to the header file `MoLtimestepping/RK_MoL.h`.
# +
def RK_MoL(RK_method,RHS_string, post_RHS_string):
Butcher = Butcher_dict[RK_method][0] # Get the desired Butcher table from the dictionary
num_steps = len(Butcher)-1 # Specify the number of required steps to update solution
indent = " "
with open("MoLtimestepping/RK_MoL"+str(RK_method).replace(" ", "_")+".h", "w") as file:
file.write("// Code snippet implementing "+RK_method+" algorithm for Method of Lines timestepping\n")
# Diagonal RK3 only!!!
if diagonal(RK_method) == True and "RK3" in RK_method:
# In a diagonal RK3 method, only 3 gridfunctions need be defined. Below implements this approach.
file.write("""
// In a diagonal RK3 method like this one, only 3 gridfunctions need be defined. Below implements this approach.
// Using y_n_gfs as input, compute k1 and apply boundary conditions
"""+RHS_string.replace("RK_INPUT_GFS" ,"y_n_gfs").
replace("RK_OUTPUT_GFS","k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs")+"""
LOOP_ALL_GFS_GPS(i) {
// Store k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs now as
// the update for the next rhs evaluation y_n + a21*k1*dt:
k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i] = ("""+sp.ccode(Butcher[1][1]).replace("L","")+""")*k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i]*dt + y_n_gfs[i];
}
// Apply boundary conditions to y_n + a21*k1*dt:
"""+post_RHS_string.replace("RK_OUTPUT_GFS","k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs")+"""
// Compute k2 using yn + a21*k1*dt
"""+RHS_string.replace("RK_INPUT_GFS" ,"k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs").
replace("RK_OUTPUT_GFS","k2_or_y_nplus_a32_k2_gfs")+"""
LOOP_ALL_GFS_GPS(i) {
// Reassign k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs to be
// the running total y_{n+1}
k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i] = ("""+sp.ccode(Butcher[3][1]).replace("L","")+""")*(k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i] - y_n_gfs[i])/("""+sp.ccode(Butcher[1][1]).replace("L","")+""") + y_n_gfs[i];
// Add a32*k2*dt to the running total
k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i]+= ("""+sp.ccode(Butcher[3][2]).replace("L","")+""")*k2_or_y_nplus_a32_k2_gfs[i]*dt;
// Store k2_or_y_nplus_a32_k2_gfs now as y_n + a32*k2*dt
k2_or_y_nplus_a32_k2_gfs[i] = ("""+sp.ccode(Butcher[2][2]).replace("L","")+""")*k2_or_y_nplus_a32_k2_gfs[i]*dt + y_n_gfs[i];
}
// Apply boundary conditions to both y_n + a32*k2 (stored in k2_or_y_nplus_a32_k2_gfs)
// ... and the y_{n+1} running total, as they have not been applied yet to k2-related gridfunctions:
"""+post_RHS_string.replace("RK_OUTPUT_GFS","k2_or_y_nplus_a32_k2_gfs")+"""
"""+post_RHS_string.replace("RK_OUTPUT_GFS","k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs")+"""
// Compute k3
"""+RHS_string.replace("RK_INPUT_GFS" ,"k2_or_y_nplus_a32_k2_gfs").
replace("RK_OUTPUT_GFS","y_n_gfs")+"""
LOOP_ALL_GFS_GPS(i) {
// Add k3 to the running total and save to y_n
y_n_gfs[i] = k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs[i] + ("""+sp.ccode(Butcher[3][3]).replace("L","")+""")*y_n_gfs[i]*dt;
}
// Apply boundary conditions to the running total
"""+post_RHS_string.replace("RK_OUTPUT_GFS","y_n_gfs")+"\n")
else:
y_n = "y_n_gfs"
if diagonal(RK_method) == False:
for s in range(num_steps):
next_y_input = "next_y_input_gfs"
# If we're on the first step (s=0), we use y_n gridfunction as input.
# Otherwise next_y_input is input. Output is just the reverse.
if s==0: # If on first step:
file.write(RHS_string.replace("RK_INPUT_GFS",y_n).replace("RK_OUTPUT_GFS","k"+str(s+1)+"_gfs")+"\n")
else: # If on second step or later:
file.write(RHS_string.replace("RK_INPUT_GFS",next_y_input).replace("RK_OUTPUT_GFS","k"+str(s+1)+"_gfs")+"\n")
file.write("LOOP_ALL_GFS_GPS(i) {\n")
RK_update_string = ""
if s == num_steps-1: # If on final step:
RK_update_string += indent + y_n+"[i] += dt*("
else: # If on anything but the final step:
RK_update_string += indent + next_y_input+"[i] = "+y_n+"[i] + dt*("
for m in range(s+1):
if Butcher[s+1][m+1] != 0:
if Butcher[s+1][m+1] != 1:
RK_update_string += " + k"+str(m+1)+"_gfs[i]*("+sp.ccode(Butcher[s+1][m+1]).replace("L","")+")"
else:
RK_update_string += " + k"+str(m+1)+"_gfs[i]"
RK_update_string += " );\n}\n"
file.write(RK_update_string)
if s == num_steps-1: # If on final step:
file.write(post_RHS_string.replace("RK_OUTPUT_GFS",y_n)+"\n")
else: # If on anything but the final step:
file.write(post_RHS_string.replace("RK_OUTPUT_GFS",next_y_input)+"\n")
else:
y_nplus1_running_total = "y_nplus1_running_total_gfs"
if RK_method == 'Euler': # Euler's method doesn't require any k_i, and gets its own unique algorithm
file.write(RHS_string.replace("RK_INPUT_GFS",y_n).replace("RK_OUTPUT_GFS",y_nplus1_running_total)+"\n")
file.write("LOOP_ALL_GFS_GPS(i) {\n")
file.write(indent + y_n+"[i] += "+y_nplus1_running_total+"[i]*dt;\n")
file.write("}\n")
file.write(post_RHS_string.replace("RK_OUTPUT_GFS",y_n)+"\n")
else:
for s in range(num_steps):
# If we're on the first step (s=0), we use y_n gridfunction as input.
# and k_odd as output.
if s == 0:
rhs_input = "y_n_gfs"
rhs_output = "k_odd_gfs"
# For the remaining steps the inputs and ouputs alternate between k_odd and k_even
elif s%2 == 0:
rhs_input = "k_even_gfs"
rhs_output = "k_odd_gfs"
else:
rhs_input = "k_odd_gfs"
rhs_output = "k_even_gfs"
file.write(RHS_string.replace("RK_INPUT_GFS",rhs_input).replace("RK_OUTPUT_GFS",rhs_output)+"\n")
file.write("LOOP_ALL_GFS_GPS(i) {\n")
if s == num_steps-1: # If on the final step
if Butcher[num_steps][s+1] !=0:
if Butcher[num_steps][s+1] !=1:
file.write(indent+y_n+"[i] += "+y_nplus1_running_total+"[i] + "+rhs_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+");\n")
else:
file.write(indent+y_n+"[i] += "+y_nplus1_running_total+"[i] + "+rhs_output+"[i]*dt;\n")
file.write("}\n")
file.write(post_RHS_string.replace("RK_OUTPUT_GFS",y_n)+"\n")
else: # For anything besides the final step
if s == 0:
file.write(indent+y_nplus1_running_total+"[i] = "+rhs_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+");\n")
file.write(indent+rhs_output+"[i] = "+y_n+"[i] + "+rhs_output+"[i]*dt*("+sp.ccode(Butcher[s+1][s+1]).replace("L","")+");\n")
else:
if Butcher[num_steps][s+1] !=0:
if Butcher[num_steps][s+1] !=1:
file.write(indent+y_nplus1_running_total+"[i] += "+rhs_output+"[i]*dt*("+sp.ccode(Butcher[num_steps][s+1]).replace("L","")+");\n")
else:
file.write(indent+y_nplus1_running_total+"[i] += "+rhs_output+"[i]*dt;\n")
if Butcher[s+1][s+1] !=0:
if Butcher[s+1][s+1] !=1:
file.write(indent+rhs_output+"[i] = "+y_n+"[i] + "+rhs_output+"[i]*dt*("+sp.ccode(Butcher[s+1][s+1]).replace("L","")+");\n")
else:
file.write(indent+rhs_output+"[i] = "+y_n+"[i] + "+rhs_output+"[i]*dt;\n")
file.write("}\n")
file.write(post_RHS_string.replace("RK_OUTPUT_GFS",rhs_output)+"\n")
RK_MoL(RK_method,"rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, RK_INPUT_GFS, RK_OUTPUT_GFS);",
"")
print("This is the MoL timestepping RK scheme C code for the "+str(RK_method)+" method: \n")
with open("MoLtimestepping/RK_MoL"+str(RK_method).replace(" ", "_")+".h", "r") as file:
print(file.read())
# -
# <a id='free'></a>
#
# ## Step 3.c: Freeing Allocated Memory, `MoLtimestepping/RK_Free_Memory.h` [Back to [top](#toc)\]
# $$\label{free}$$
#
# We define the function `RK_free()` which generates the C code for freeing the memory that was being occupied by the grid functions lists that had been allocated. The function writes the C code to the header file `MoLtimestepping/RK_Free_Memory.h`
# +
def RK_free(RK_method):
L = len(Butcher_dict[RK_method][0])-1 # Useful when freeing k_i gridfunctions
with open("MoLtimestepping/RK_Free_Memory"+str(RK_method).replace(" ", "_")+".h", "w") as file:
file.write("// CODE SNIPPET FOR FREEING ALL ALLOCATED MEMORY FOR "+str(RK_method)+" METHOD:\n")
if diagonal(RK_method) == True and "RK3" in RK_method:
file.write("""
free(k1_or_y_nplus_a21_k1_or_y_nplus1_running_total_gfs);
free(k2_or_y_nplus_a32_k2_gfs);
free(y_n_gfs);""")
else:
file.write("free(y_n_gfs);\n")
if diagonal(RK_method) == False: # Free memory for allocations made for non-diagonal cases
file.write("free(next_y_input_gfs);\n")
for i in range(L):
file.write("free(k"+str(i+1)+"_gfs);\n")
else: # Free memory for allocations made for diagonal cases
file.write("free(y_nplus1_running_total_gfs);\n")
if RK_method != 'Euler':
file.write("free(k_odd_gfs);\n")
file.write("free(k_even_gfs);\n")
RK_free(RK_method)
print("This is the freeing allocated memory C code for the "+str(RK_method)+" method: \n")
with open("MoLtimestepping/RK_Free_Memory"+str(RK_method).replace(" ", "_")+".h", "r") as file:
print(file.read())
# -
# <a id='code_validation'></a>
#
# # Step 4: Code Validation against `MoLtimestepping.RK_Butcher_Table_Generating_C_Code` NRPy+ module [Back to [top](#toc)\]
# $$\label{code_validation}$$
#
# As a code validation check, we verify agreement in the dictionary of Butcher tables between
#
# 1. this tutorial and
# 2. the NRPy+ [MoLtimestepping.RK_Butcher_Table_Generating_C_Code](../edit/MoLtimestepping/RK_Butcher_Table_Generating_C_Code.py) module.
#
# We generate the header files for each RK method and check for agreement with the NRPY+ module.
# +
import sys
import MoLtimestepping.C_Code_Generation as MoLC
print("\n\n ### BEGIN VALIDATION TESTS ###")
import filecmp
fileprefix1 = "MoLtimestepping/RK_Allocate_Memory"
fileprefix2 = "MoLtimestepping/RK_MoL"
fileprefix3 = "MoLtimestepping/RK_Free_Memory"
for key, value in Butcher_dict.items():
MoLC.MoL_C_Code_Generation(key,
"rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, RK_INPUT_GFS, RK_OUTPUT_GFS);",
"apply_bcs(Nxx,Nxx_plus_2NGHOSTS, RK_OUTPUT_GFS);")
RK_Allocate(key)
RK_MoL(key,
"rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, RK_INPUT_GFS, RK_OUTPUT_GFS);",
"apply_bcs(Nxx,Nxx_plus_2NGHOSTS, RK_OUTPUT_GFS);")
RK_free(key)
if filecmp.cmp(fileprefix1+str(key).replace(" ", "_")+".h" , fileprefix1+".h") == False:
print("VALIDATION TEST FAILED ON files: "+fileprefix1+str(key).replace(" ", "_")+".h and "+ fileprefix1+".h")
sys.exit(1)
elif filecmp.cmp(fileprefix2+str(key).replace(" ", "_")+".h" , fileprefix2+".h") == False:
print("VALIDATION TEST FAILED ON files: "+fileprefix2+str(key).replace(" ", "_")+".h and "+ fileprefix2+".h")
sys.exit(1)
elif filecmp.cmp(fileprefix3+str(key).replace(" ", "_")+".h" , fileprefix3+".h") == False:
print("VALIDATION TEST FAILED ON files: "+fileprefix3+str(key).replace(" ", "_")+".h and "+ fileprefix3+".h")
sys.exit(1)
else:
print("VALIDATION TEST PASSED on all files from "+str(key)+" method")
print("### END VALIDATION TESTS ###")
# -
# <a id='latex_pdf_output'></a>
#
# # Step 5: Output this notebook to $\LaTeX$-formatted PDF \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-RK_Butcher_Table_Generating_C_Code.pdf](Tutorial-RK_Butcher_Table_Generating_C_Code.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
# !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-Method_of_Lines-C_Code_Generation.ipynb
# !pdflatex -interaction=batchmode Tutorial-Method_of_Lines-C_Code_Generation.tex
# !pdflatex -interaction=batchmode Tutorial-Method_of_Lines-C_Code_Generation.tex
# !pdflatex -interaction=batchmode Tutorial-Method_of_Lines-C_Code_Generation.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
|
Tutorial-Method_of_Lines-C_Code_Generation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from robust_smc.data import ConstantVelocityModel
from robust_smc.kalman import Kalman
from robust_smc.sampler import LinearGaussianBPF, RobustifiedLinearGaussianBPF
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# Matplotlib settings
from cycler import cycler
import matplotlib as mpl
palette = cycler(color=mpl.cm.Set1.colors)
mpl.rc('axes', prop_cycle=palette)
mpl.rc('lines', lw=2)
mpl.rc('axes', lw=1.2, titlesize='x-large', labelsize='x-large')
mpl.rc('legend', fontsize='x-large')
# -
plot_titles = [
r'Displacement in $x$ direction',
r'Displacement in $y$ direction',
r'Velocity in $x$ direction',
r'Velocity in $y$ direction'
]
observation_cov = 1.0 * np.eye(2)
contamination_probability = 0.1
simulator = ConstantVelocityModel(100, time_step=0.1, observation_cov=observation_cov,
explosion_scale=100.0, contamination_probability=contamination_probability)
kalman = Kalman(
data=simulator.Y,
transition_matrix=simulator.transition_matrix,
observation_matrix=simulator.observation_matrix,
transition_cov=simulator.process_cov,
observation_cov=simulator.observation_cov,
m_0=np.zeros((4, 1)),
P_0=simulator.initial_cov
)
kalman.filter()
filter_means = np.stack(kalman.filter_means)
filter_covs = np.stack(kalman.filter_covs)
filter_vars = np.diagonal(filter_covs, axis1=1, axis2=2)
X_init = simulator.initial_state[None, ...] + np.linalg.cholesky(simulator.initial_cov) @ np.random.randn(1000, 4, 1)
X_init = X_init.squeeze()
vanilla_bpf = LinearGaussianBPF(
data=simulator.Y,
transition_matrix=simulator.transition_matrix,
observation_model=lambda x :(simulator.observation_matrix @ x[:, :, None]).squeeze(),
transition_cov=simulator.process_cov,
observation_cov=np.diag(simulator.observation_cov),
X_init=X_init,
num_samples=1000
)
vanilla_bpf.sample()
robust_bpf = RobustifiedLinearGaussianBPF(
data=simulator.Y,
beta=0.01,
transition_matrix=simulator.transition_matrix,
observation_model=lambda x :(simulator.observation_matrix @ x[:, :, None]).squeeze(),
transition_cov=simulator.process_cov,
observation_cov=np.diag(simulator.observation_cov),
X_init=X_init,
num_samples=1000
)
robust_bpf.sample()
def get_means_and_quantiles(sampler, state):
trajectories = np.stack(sampler.X_trajectories)
means = trajectories[:, :, state].mean(axis=1)
quantiles = np.quantile(trajectories[:, :, state], q=[0.05, 0.95], axis=1)
return means, quantiles
# +
def plot_state(state, figsize, zoom=False, save_file=None):
X = np.arange(0, 100, 0.1)
plt.figure(figsize=figsize)
plt.plot(X, simulator.X[:, state], label='Ground Truth', color='k', alpha=0.8)
# Kalman
means = filter_means[:, state].squeeze()
uncertainties = np.sqrt(filter_vars[:, state]).squeeze()
plt.plot(X, means, color='C2', ls=':', label='Kalman Filter')
plt.fill_between(X, means - 1.64 * uncertainties, means + 1.64 * uncertainties, color='C2', alpha=0.6)
# BPF
means, quantiles = get_means_and_quantiles(vanilla_bpf, state)
plt.plot(X, means, color='C1', ls='--', label='BPF')
plt.fill_between(X, quantiles[0, :], quantiles[1, :], color='C1', alpha=0.6)
# Robust BPF
means, quantiles = get_means_and_quantiles(robust_bpf, state)
plt.plot(X, means, color='C0', ls='-.', label=r'$\beta$-BPF')
plt.fill_between(X, quantiles[0, :], quantiles[1, :], color='C0', alpha=0.6)
if zoom:
y_min = quantiles[0, :].min()
y_max = quantiles[1, :].max()
y_min = 1.2 * y_min if y_min < 0 else 0.8 * y_min
y_max = 1.2 * y_max if y_max > 0 else 0.8 * y_max
plt.ylim((y_min, y_max))
if state < 2:
plt.ylabel(r'metres')
else:
plt.ylabel(r'metres per second')
# plt.xlabel('time')
plt.title(plot_titles[state], fontsize='x-large')
plt.legend(loc='lower center', frameon=False, bbox_to_anchor=(0.5, -0.4), ncol=4, fontsize='large')
plt.savefig(f'./figures/paper_figures/constant-velocity/latent_variable_plots_{state}.pdf',
bbox_inches='tight')
# -
for state in range(4):
plot_state(state, figsize=(20, 2), zoom=True)
# +
vanilla_bpf_ess = [vanilla_bpf.effective_sample_size(logw) for logw in vanilla_bpf.logw]
robust_bpf_ess = [robust_bpf.effective_sample_size(logw) for logw in robust_bpf.logw]
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(16, 4), dpi=150, sharex=True)
ax[0].bar(range(len(vanilla_bpf_ess)), vanilla_bpf_ess, width=0.4)
ax[0].set_title('BPF')
ax[0].set_ylabel('Effective sample size')
ax[1].bar(range(len(robust_bpf_ess)), robust_bpf_ess, width=0.4)
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Effective sample size')
ax[1].set_title('Robustified BPF')
# +
vanilla_bpf_ess = [vanilla_bpf.effective_sample_size(logw) for logw in vanilla_bpf.logw]
robust_bpf_ess = [robust_bpf.effective_sample_size(logw) for logw in robust_bpf.logw]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 2), sharex=True)
X = np.arange(0, 100, 0.1)
ax.plot(X, vanilla_bpf_ess, label='BPF', c='C1')
ax.plot(X, robust_bpf_ess, label=r'$\beta$-BPF', c='C0')
ax.grid(axis='y')
# ax.set_xlabel('time')
ax.set_title('Effective sample size with time', fontsize='x-large')
ax.legend(loc='lower center', bbox_to_anchor=(0.5, -0.5), ncol=2, fontsize='xx-large', frameon=False);
plt.savefig('./figures/paper_figures/constant-velocity/effecive_sample_size.pdf', bbox_inches='tight')
# -
|
notebooks/constant_velocity_model_pretty_plots.ipynb
|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp data.external
# -
#export
from fastai2.torch_basics import *
# # External data
# > Helper functions to download the fastai datasets
# A complete list of datasets that are available by default isnide the library are:
#
# **Main datasets**:
# 1. **ADULT_SAMPLE**: A small of the [adults dataset](https://archive.ics.uci.edu/ml/datasets/Adult) to predict whether income exceeds $50K/yr based on census data.
# - **BIWI_SAMPLE**: A [BIWI kinect headpose database](https://www.kaggle.com/kmader/biwi-kinect-head-pose-database). The dataset contains over 15K images of 20 people (6 females and 14 males - 4 people were recorded twice). For each frame, a depth image, the corresponding rgb image (both 640x480 pixels), and the annotation is provided. The head pose range covers about +-75 degrees yaw and +-60 degrees pitch.
# 1. **CIFAR**: The famous [cifar-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset which consists of 60000 32x32 colour images in 10 classes, with 6000 images per class.
# 1. **COCO_SAMPLE**: A sample of the [coco dataset](http://cocodataset.org/#home) for object detection.
# 1. **COCO_TINY**: A tiny version of the [coco dataset](http://cocodataset.org/#home) for object detection.
# - **HUMAN_NUMBERS**: A synthetic dataset consisting of human number counts in text such as one, two, three, four.. Useful for experimenting with Language Models.
# - **IMDB**: The full [IMDB sentiment analysis dataset](https://ai.stanford.edu/~amaas/data/sentiment/).
#
# - **IMDB_SAMPLE**: A sample of the full [IMDB sentiment analysis dataset](https://ai.stanford.edu/~amaas/data/sentiment/).
# - **ML_SAMPLE**: A movielens sample dataset for recommendation engines to recommend movies to users.
# - **ML_100k**: The movielens 100k dataset for recommendation engines to recommend movies to users.
# - **MNIST_SAMPLE**: A sample of the famous [MNIST dataset](http://yann.lecun.com/exdb/mnist/) consisting of handwritten digits.
# - **MNIST_TINY**: A tiny version of the famous [MNIST dataset](http://yann.lecun.com/exdb/mnist/) consisting of handwritten digits.
# - **MNIST_VAR_SIZE_TINY**:
# - **PLANET_SAMPLE**: A sample of the planets dataset from the Kaggle competition [Planet: Understanding the Amazon from Space](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space).
# - **PLANET_TINY**: A tiny version of the planets dataset from the Kaggle competition [Planet: Understanding the Amazon from Space](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space) for faster experimentation and prototyping.
# - **IMAGENETTE**: A smaller version of the [imagenet dataset](http://www.image-net.org/) pronounced just like 'Imagenet', except with a corny inauthentic French accent.
# - **IMAGENETTE_160**: The 160px version of the Imagenette dataset.
# - **IMAGENETTE_320**: The 320px version of the Imagenette dataset.
# - **IMAGEWOOF**: Imagewoof is a subset of 10 classes from Imagenet that aren't so easy to classify, since they're all dog breeds.
# - **IMAGEWOOF_160**: 160px version of the ImageWoof dataset.
# - **IMAGEWOOF_320**: 320px version of the ImageWoof dataset.
# - **IMAGEWANG**: Imagewang contains Imagenette and Imagewoof combined, but with some twists that make it into a tricky semi-supervised unbalanced classification problem
# - **IMAGEWANG_160**: 160px version of Imagewang.
# - **IMAGEWANG_320**: 320px version of Imagewang.
#
# **Kaggle competition datasets**:
# 1. **DOGS**: Image dataset consisting of dogs and cats images from [Dogs vs Cats kaggle competition](https://www.kaggle.com/c/dogs-vs-cats).
#
# **Image Classification datasets**:
# 1. **CALTECH_101**: Pictures of objects belonging to 101 categories. About 40 to 800 images per category. Most categories have about 50 images. Collected in September 2003 by <NAME>, <NAME>, and <NAME>.
# 1. CARS: The [Cars dataset](https://ai.stanford.edu/~jkrause/cars/car_dataset.html) contains 16,185 images of 196 classes of cars.
# 1. **CIFAR_100**: The CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with 600 images per class.
# 1. **CUB_200_2011**: Caltech-UCSD Birds-200-2011 (CUB-200-2011) is an extended version of the CUB-200 dataset, with roughly double the number of images per class and new part location annotations
# 1. **FLOWERS**: 17 category [flower dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/) by gathering images from various websites.
# 1. **FOOD**:
# 1. **MNIST**: [MNIST dataset](http://yann.lecun.com/exdb/mnist/) consisting of handwritten digits.
# 1. **PETS**: A 37 category [pet dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/) with roughly 200 images for each class.
#
# **NLP datasets**:
# 1. **AG_NEWS**: The AG News corpus consists of news articles from the AG’s corpus of news articles on the web pertaining to the 4 largest classes. The dataset contains 30,000 training and 1,900 testing examples for each class.
# 1. **AMAZON_REVIEWS**: This dataset contains product reviews and metadata from Amazon, including 142.8 million reviews spanning May 1996 - July 2014.
# 1. **AMAZON_REVIEWS_POLARITY**: Amazon reviews dataset for sentiment analysis.
# 1. **DBPEDIA**: The DBpedia ontology dataset contains 560,000 training samples and 70,000 testing samples for each of 14 nonoverlapping classes from DBpedia.
# 1. **MT_ENG_FRA**: Machine translation dataset from English to French.
# 1. **SOGOU_NEWS**: [The Sogou-SRR](http://www.thuir.cn/data-srr/) (Search Result Relevance) dataset was constructed to support researches on search engine relevance estimation and ranking tasks.
# 1. **WIKITEXT**: The [WikiText language modeling dataset](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) is a collection of over 100 million tokens extracted from the set of verified Good and Featured articles on Wikipedia.
# 1. **WIKITEXT_TINY**: A tiny version of the WIKITEXT dataset.
# 1. **YAHOO_ANSWERS**: YAHOO's question answers dataset.
# 1. **YELP_REVIEWS**: The [Yelp dataset](https://www.yelp.com/dataset) is a subset of YELP businesses, reviews, and user data for use in personal, educational, and academic purposes
# 1. **YELP_REVIEWS_POLARITY**: For sentiment classification on YELP reviews.
#
#
# **Image localization datasets**:
# 1. **BIWI_HEAD_POSE**: A [BIWI kinect headpose database](https://www.kaggle.com/kmader/biwi-kinect-head-pose-database). The dataset contains over 15K images of 20 people (6 females and 14 males - 4 people were recorded twice). For each frame, a depth image, the corresponding rgb image (both 640x480 pixels), and the annotation is provided. The head pose range covers about +-75 degrees yaw and +-60 degrees pitch.
# 1. **CAMVID**: Consists of driving labelled dataset for segmentation type models.
# 1. **CAMVID_TINY**: A tiny camvid dataset for segmentation type models.
# 1. **LSUN_BEDROOMS**: [Large-scale Image Dataset](https://arxiv.org/abs/1506.03365) using Deep Learning with Humans in the Loop
# 1. **PASCAL_2007**: [Pascal 2007 dataset](http://host.robots.ox.ac.uk/pascal/VOC/voc2007/) to recognize objects from a number of visual object classes in realistic scenes.
# 1. **PASCAL_2012**: [Pascal 2012 dataset](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/) to recognize objects from a number of visual object classes in realistic scenes.
#
# **Audio classification**:
# 1. **MACAQUES**: [7285 macaque coo calls](https://datadryad.org/stash/dataset/doi:10.5061/dryad.7f4p9) across 8 individuals from [Distributed acoustic cues for caller identity in macaque vocalization](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4806230).
# 2. **ZEBRA_FINCH**: [3405 zebra finch calls](https://ndownloader.figshare.com/articles/11905533/versions/1) classified [across 11 call types](https://link.springer.com/article/10.1007/s10071-015-0933-6). Additonal labels include name of individual making the vocalization and its age.
#
# **Medical Imaging datasets**:
# 1. **SIIM_SMALL**: A smaller version of the [SIIM dataset](https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/overview) where the objective is to classify pneumothorax from a set of chest radiographic images.
#
# **Pretrained models**:
# 1. **OPENAI_TRANSFORMER**: The GPT2 Transformer pretrained weights.
# 1. **WT103_FWD**: The WikiText-103 forward language model weights.
# 1. **WT103_BWD**: The WikiText-103 backward language model weights.
# To download any of the datasets or pretrained weights, simply run `untar_data` by passing any dataset name mentioned above like so:
#
# ```python
# path = untar_data(URLs.PETS)
# path.ls()
#
# >> (#7393) [Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/keeshond_34.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/Siamese_178.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/german_shorthaired_94.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/Abyssinian_92.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/basset_hound_111.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/Russian_Blue_194.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/staffordshire_bull_terrier_91.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/Persian_69.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/english_setter_33.jpg'),Path('/home/ubuntu/.fastai/data/oxford-iiit-pet/images/Russian_Blue_155.jpg')...]
# ```
#
# To download model pretrained weights:
# ```python
# path = untar_data(URLs.PETS)
# path.ls()
#
# >> (#2) [Path('/home/ubuntu/.fastai/data/wt103-bwd/itos_wt103.pkl'),Path('/home/ubuntu/.fastai/data/wt103-bwd/lstm_bwd.pth')]
# ```
# ## Config -
# export
class Config:
"Setup config at `~/.fastai` unless it exists already."
config_path = Path(os.getenv('FASTAI_HOME', '~/.fastai')).expanduser()
config_file = config_path/'config.yml'
def __init__(self):
self.config_path.mkdir(parents=True, exist_ok=True)
if not self.config_file.exists(): self.create_config()
self.d = self.load_config()
def __getitem__(self,k):
k = k.lower()
if k not in self.d: k = k+'_path'
return Path(self.d[k])
def __getattr__(self,k):
if k=='d': raise AttributeError
return self[k]
def __setitem__(self,k,v): self.d[k] = str(v)
def __contains__(self,k): return k in self.d
def load_config(self):
"load and return config if version equals 2 in existing, else create new config."
with open(self.config_file, 'r') as f:
config = yaml.safe_load(f)
if 'version' in config and config['version'] == 2: return config
elif 'version' in config: self.create_config(config)
else: self.create_config()
return self.load_config()
def create_config(self, cfg=None):
"create new config with default paths and set `version` to 2."
config = {'data_path': str(self.config_path/'data'),
'archive_path': str(self.config_path/'archive'),
'storage_path': str(self.config_path/'data'),
'model_path': str(self.config_path/'models'),
'version': 2}
if cfg is not None:
cfg['version'] = 2
config = merge(config, cfg)
self.save_file(config)
def save(self): self.save_file(self.d)
def save_file(self, config):
"save config file at default config location `~/.fastai/config.yml`."
with self.config_file.open('w') as f: yaml.dump(config, f, default_flow_style=False)
# If a config file doesn't exist already, it is always created at `~/.fastai/config.yml` location by default whenever an instance of the `Config` class is created. Here is a quick example to explain:
# +
config_file = Path("~/.fastai/config.yml").expanduser()
if config_file.exists(): os.remove(config_file)
assert not config_file.exists()
config = Config()
assert config_file.exists()
# -
# The config is now available as `config.d`:
config.d
# As can be seen, this is a basic config file that consists of `data_path`, `model_path`, `storage_path` and `archive_path`.
# All future downloads occur at the paths defined in the config file based on the type of download. For example, all future fastai datasets are downloaded to the `data_path` while all pretrained model weights are download to `model_path` unless the default download location is updated.
#hide
config = Config()
config_path = config.config_path
config_file,config_bak = config_path/'config.yml',config_path/'config.yml.bak'
config_file,config_bak
#hide
#This cell is just to make the config file compatible with current fastai
# TODO: make this a method that auto-runs as needed
if 'data_archive_path' not in config:
config['data_archive_path'] = config.data_path
config.save()
# Please note that it is possible to update the default path locations in the config file. Let's first create a backup of the config file, then update the config to show the changes and re update the new config with the backup file.
if config_file.exists(): shutil.move(config_file, config_bak)
config['archive_path'] = Path(".")
config.save()
config = Config()
config.d
# The `archive_path` has been updated to `"."`. Now let's remove any updates we made to Config file that we made for the purpose of this example.
if config_bak.exists(): shutil.move(config_bak, config_file)
config = Config()
config.d
# ## URLs -
#export
class URLs():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
URL = 'http://files.fast.ai/data/examples/'
MDL = 'http://files.fast.ai/models/'
S3 = 'https://s3.amazonaws.com/fast-ai-'
S3_IMAGE = f'{S3}imageclas/'
S3_IMAGELOC = f'{S3}imagelocal/'
S3_AUDI = f'{S3}audio/'
S3_NLP = f'{S3}nlp/'
S3_COCO = f'{S3}coco/'
S3_MODEL = f'{S3}modelzoo/'
# main datasets
ADULT_SAMPLE = f'{URL}adult_sample.tgz'
BIWI_SAMPLE = f'{URL}biwi_sample.tgz'
CIFAR = f'{URL}cifar10.tgz'
COCO_SAMPLE = f'{S3_COCO}coco_sample.tgz'
COCO_TINY = f'{URL}coco_tiny.tgz'
HUMAN_NUMBERS = f'{URL}human_numbers.tgz'
IMDB = f'{S3_NLP}imdb.tgz'
IMDB_SAMPLE = f'{URL}imdb_sample.tgz'
ML_SAMPLE = f'{URL}movie_lens_sample.tgz'
ML_100k = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
MNIST_SAMPLE = f'{URL}mnist_sample.tgz'
MNIST_TINY = f'{URL}mnist_tiny.tgz'
MNIST_VAR_SIZE_TINY = f'{S3_IMAGE}mnist_var_size_tiny.tgz'
PLANET_SAMPLE = f'{URL}planet_sample.tgz'
PLANET_TINY = f'{URL}planet_tiny.tgz'
IMAGENETTE = f'{S3_IMAGE}imagenette2.tgz'
IMAGENETTE_160 = f'{S3_IMAGE}imagenette2-160.tgz'
IMAGENETTE_320 = f'{S3_IMAGE}imagenette2-320.tgz'
IMAGEWOOF = f'{S3_IMAGE}imagewoof2.tgz'
IMAGEWOOF_160 = f'{S3_IMAGE}imagewoof2-160.tgz'
IMAGEWOOF_320 = f'{S3_IMAGE}imagewoof2-320.tgz'
IMAGEWANG = f'{S3_IMAGE}imagewang.tgz'
IMAGEWANG_160 = f'{S3_IMAGE}imagewang-160.tgz'
IMAGEWANG_320 = f'{S3_IMAGE}imagewang-320.tgz'
# kaggle competitions download dogs-vs-cats -p {DOGS.absolute()}
DOGS = f'{URL}dogscats.tgz'
# image classification datasets
CALTECH_101 = f'{S3_IMAGE}caltech_101.tgz'
CARS = f'{S3_IMAGE}stanford-cars.tgz'
CIFAR_100 = f'{S3_IMAGE}cifar100.tgz'
CUB_200_2011 = f'{S3_IMAGE}CUB_200_2011.tgz'
FLOWERS = f'{S3_IMAGE}oxford-102-flowers.tgz'
FOOD = f'{S3_IMAGE}food-101.tgz'
MNIST = f'{S3_IMAGE}mnist_png.tgz'
PETS = f'{S3_IMAGE}oxford-iiit-pet.tgz'
# NLP datasets
AG_NEWS = f'{S3_NLP}ag_news_csv.tgz'
AMAZON_REVIEWS = f'{S3_NLP}amazon_review_full_csv.tgz'
AMAZON_REVIEWS_POLARITY = f'{S3_NLP}amazon_review_polarity_csv.tgz'
DBPEDIA = f'{S3_NLP}dbpedia_csv.tgz'
MT_ENG_FRA = f'{S3_NLP}giga-fren.tgz'
SOGOU_NEWS = f'{S3_NLP}sogou_news_csv.tgz'
WIKITEXT = f'{S3_NLP}wikitext-103.tgz'
WIKITEXT_TINY = f'{S3_NLP}wikitext-2.tgz'
YAHOO_ANSWERS = f'{S3_NLP}yahoo_answers_csv.tgz'
YELP_REVIEWS = f'{S3_NLP}yelp_review_full_csv.tgz'
YELP_REVIEWS_POLARITY = f'{S3_NLP}yelp_review_polarity_csv.tgz'
# Image localization datasets
BIWI_HEAD_POSE = f"{S3_IMAGELOC}biwi_head_pose.tgz"
CAMVID = f'{S3_IMAGELOC}camvid.tgz'
CAMVID_TINY = f'{URL}camvid_tiny.tgz'
LSUN_BEDROOMS = f'{S3_IMAGE}bedroom.tgz'
PASCAL_2007 = f'{S3_IMAGELOC}pascal_2007.tgz'
PASCAL_2012 = f'{S3_IMAGELOC}pascal_2012.tgz'
# Audio classification datasets
MACAQUES = 'https://storage.googleapis.com/ml-animal-sounds-datasets/macaques.zip'
ZEBRA_FINCH = 'https://storage.googleapis.com/ml-animal-sounds-datasets/zebra_finch.zip'
# Medical Imaging datasets
#SKIN_LESION = f'{S3_IMAGELOC}skin_lesion.tgz'
SIIM_SMALL = f'{S3_IMAGELOC}siim_small.tgz'
#Pretrained models
OPENAI_TRANSFORMER = f'{S3_MODEL}transformer.tgz'
WT103_FWD = f'{S3_MODEL}wt103-fwd.tgz'
WT103_BWD = f'{S3_MODEL}wt103-bwd.tgz'
def path(url='.', c_key='archive'):
"Return local path where to download based on `c_key`"
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key=='models' else 'data')/fname
if local_path.exists(): return local_path
return Config()[c_key]/fname
# The default local path is at `~/.fastai/archive/` but this can be updated by passing a different `c_key`. Note: `c_key` should be one of `'archive_path', 'data_archive_path', 'data_path', 'model_path', 'storage_path'`.
url = URLs.PETS
local_path = URLs.path(url)
test_eq(local_path.parent, Config()['archive']);
local_path
local_path = URLs.path(url, c_key='model')
test_eq(local_path.parent, Config()['model'])
local_path
# ## Downloading
# export
def download_url(url, dest, overwrite=False, pbar=None, show_progress=True, chunk_size=1024*1024,
timeout=4, retries=5):
"Download `url` to `dest` unless it exists and not `overwrite`"
if os.path.exists(dest) and not overwrite: return
s = requests.Session()
s.mount('http://',requests.adapters.HTTPAdapter(max_retries=retries))
# additional line to identify as a firefox browser, see fastai/#2438
s.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'})
u = s.get(url, stream=True, timeout=timeout)
try: file_size = int(u.headers["Content-Length"])
except: show_progress = False
with open(dest, 'wb') as f:
nbytes = 0
if show_progress: pbar = progress_bar(range(file_size), leave=False, parent=pbar)
try:
if show_progress: pbar.update(0)
for chunk in u.iter_content(chunk_size=chunk_size):
nbytes += len(chunk)
if show_progress: pbar.update(nbytes)
f.write(chunk)
except requests.exceptions.ConnectionError as e:
fname = url.split('/')[-1]
data_dir = dest.parent
print(f'\n Download of {url} has failed after {retries} retries\n'
f' Fix the download manually:\n'
f'$ mkdir -p {data_dir}\n'
f'$ cd {data_dir}\n'
f'$ wget -c {url}\n'
f'$ tar xf {fname}\n'
f' And re-run your code once the download is successful\n')
# The `download_url` is a very handy function inside fastai! This function can be used to download any file from the internet to a location passed by `dest` argument of the function. It should not be confused, that this function can only be used to download fastai-files. That couldn't be further away from the truth. As an example, let's download the pets dataset from the actual source file:
fname = Path("./dog.jpg")
if fname.exists(): os.remove(fname)
url = "https://i.insider.com/569fdd9ac08a80bd448b7138?width=1100&format=jpeg&auto=webp"
download_url(url, fname)
assert fname.exists()
# Let's confirm that the file was indeed downloaded correctly.
from PIL import Image
im = Image.open(fname)
plt.imshow(im);
# As can be seen, the file has been downloaded to the local path provided in `dest` argument. Calling the function again doesn't trigger a download since the file is already there. This can be confirmed by checking that the last modified time of the file that is downloaded doesn't get updated.
if fname.exists(): last_modified_time = os.path.getmtime(fname)
download_url(url, fname)
test_eq(os.path.getmtime(fname), last_modified_time)
if fname.exists(): os.remove(fname)
# We can also use the `download_url` function to download the pet's dataset straight from the source by simply passing `https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz` in `url`.
# export
def download_data(url, fname=None, c_key='archive', force_download=False):
"Download `url` to `fname`."
fname = Path(fname or URLs.path(url, c_key=c_key))
fname.parent.mkdir(parents=True, exist_ok=True)
if not fname.exists() or force_download: download_url(url, fname, overwrite=force_download)
return fname
# The `download_data` is a convenience function and a wrapper outside `download_url` to download fastai files to the appropriate local path based on the `c_key`.
# If `fname` is None, it will default to the archive folder you have in your config file (or data, model if you specify a different `c_key`) followed by the last part of the url: for instance `URLs.MNIST_SAMPLE` is `http://files.fast.ai/data/examples/mnist_sample.tgz` and the default value for `fname` will be `~/.fastai/archive/mnist_sample.tgz`.
#
# If `force_download=True`, the file is alwayd downloaded. Otherwise, it's only when the file doesn't exists that the download is triggered.
# +
#hide
try:
test_eq(download_data(URLs.MNIST_SAMPLE), config.archive/'mnist_sample.tgz')
test_eq(download_data(URLs.MNIST_TINY, fname=Path('mnist.tgz')), Path('mnist.tgz'))
finally: Path('mnist.tgz').unlink()
try:
tst_model = config.model/'mnist_tiny.tgz'
test_eq(download_data(URLs.MNIST_TINY, c_key='model'), tst_model)
os.remove(tst_model)
finally:
if tst_model.exists(): tst_model.unlink()
# -
# ### Check datasets -
#hide
from nbdev.imports import Config as NbdevConfig
__file__ = NbdevConfig().lib_path/'data'/'external.py'
# +
#export
def _get_check(url):
"internal function to get the hash of the file at `url`."
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
return checks.get(url, None)
def _check_file(fname):
"internal function to get the hash of the local file at `fname`."
size = os.path.getsize(fname)
with open(fname, "rb") as f: hash_nb = hashlib.md5(f.read(2**20)).hexdigest()
return [size,hash_nb]
# -
#hide
test_eq(_get_check(URLs.MNIST_SAMPLE), _check_file(URLs.path(URLs.MNIST_SAMPLE)))
_get_check(URLs.MNIST_SAMPLE), _check_file(URLs.path(URLs.MNIST_SAMPLE))
#export
def _add_check(url, fname):
"Internal function to update the internal check file with `url` and check on `fname`."
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
checks[url] = _check_file(fname)
json.dump(checks, open(Path(__file__).parent/'checks.txt', 'w'), indent=2)
# ### Extract
#export
def file_extract(fname, dest=None):
"Extract `fname` to `dest` using `tarfile` or `zipfile`."
if dest is None: dest = Path(fname).parent
fname = str(fname)
if fname.endswith('gz'): tarfile.open(fname, 'r:gz').extractall(dest)
elif fname.endswith('zip'): zipfile.ZipFile(fname ).extractall(dest)
else: raise Exception(f'Unrecognized archive: {fname}')
# `file_extract` is used by default in `untar_data` to decompress the downloaded file.
#export
def _try_from_storage(dest, storage):
"an internal function to create symbolic links for files from `storage` to `dest` if `storage` exists"
if not storage.exists(): return
os.makedirs(dest, exist_ok=True)
for f in storage.glob('*'): os.symlink(f, dest/f.name, target_is_directory=f.is_dir())
#hide
with tempfile.TemporaryDirectory() as d:
d = Path(d)
for k in ['a', 'b', 'c']: os.makedirs(d/k)
for k in ['d', 'e', 'f']: (d/k).touch()
dest = d.parent/'tmp'
if dest.exists(): shutil.rmtree(dest)
_try_from_storage(dest, d)
assert dest.exists()
for k in ['a', 'b', 'c']:
assert (dest/k).exists()
assert (dest/k).is_dir()
for k in ['d', 'e', 'f']:
assert (dest/k).exists()
assert (dest/k).is_file()
#export
def newest_folder(path):
"Return newest folder on path"
list_of_paths = path.glob('*')
return max(list_of_paths, key=lambda p: p.stat().st_ctime)
#export
def rename_extracted(dest):
"Rename file if different from dest"
extracted = newest_folder(dest.parent)
if not (extracted.name == dest.name): extracted.rename(dest)
# let's rename the untar/unzip data if dest name is different from fname
#export
def untar_data(url, fname=None, dest=None, c_key='data', force_download=False, extract_func=file_extract):
"Download `url` to `fname` if `dest` doesn't exist, and un-tgz or unzip to folder `dest`."
default_dest = URLs.path(url, c_key=c_key).with_suffix('')
dest = default_dest if dest is None else Path(dest)/default_dest.name
fname = Path(fname or URLs.path(url))
if fname.exists() and _get_check(url) and _check_file(fname) != _get_check(url):
print("A new version of this dataset is available, downloading...")
force_download = True
if force_download:
if fname.exists(): os.remove(fname)
if dest.exists(): shutil.rmtree(dest)
if not dest.exists(): _try_from_storage(dest, URLs.path(url, c_key='storage').with_suffix(''))
if not dest.exists():
fname = download_data(url, fname=fname, c_key=c_key)
if _get_check(url) and _check_file(fname) != _get_check(url):
print(f"File downloaded is broken. Remove {fname} and try again.")
extract_func(fname, dest.parent)
rename_extracted(dest)
return dest
# `untar_data` is a very powerful convenience function to download files from `url` to `dest`. The `url` can be a default `url` from the `URLs` class or a custom url. If `dest` is not passed, files are downloaded at the `default_dest` which defaults to `~/.fastai/data/`.
#
# This convenience function extracts the downloaded files to `dest` by default. In order, to simply download the files without extracting, pass the `noop` function as `extract_func`.
#
# Note, it is also possible to pass a custom `extract_func` to `untar_data` if the filetype doesn't end with `.tgz` or `.zip`. The `gzip` and `zip` files are supported by default and there is no need to pass custom `extract_func` for these type of files.
#
# Internally, if files are not available at `fname` location already which defaults to `~/.fastai/archive/`, the files get downloaded at `~/.fastai/archive` and are then extracted at `dest` location. If no `dest` is passed the `default_dest` to download the files is `~/.fastai/data`. If files are already available at the `fname` location but not available then a symbolic link is created for each file from `fname` location to `dest`.
#
# Also, if `force_download` is set to `True`, files are re downloaded even if they exist.
# +
test_eq(untar_data(URLs.MNIST_SAMPLE), config.data/'mnist_sample')
#Test specific fname
untar_data(URLs.MNIST_TINY, fname='mnist_tiny.tgz', force_download=True)
p = Path('mnist_tiny.tgz')
assert p.exists()
p.unlink()
#Test specific dest
test_eq(untar_data(URLs.MNIST_TINY, dest='.'), Path('mnist_tiny'))
assert Path('mnist_tiny').exists()
shutil.rmtree(Path('mnist_tiny'))
#Test c_key
tst_model = config.model/'mnist_sample'
test_eq(untar_data(URLs.MNIST_SAMPLE, c_key='model'), tst_model)
assert not tst_model.with_suffix('.tgz').exists() #Archive wasn't downloaded in the models path
assert (config.archive/'mnist_sample.tgz').exists() #Archive was downloaded there
shutil.rmtree(tst_model)
# -
# Sometimes the extracted folder does not have the same name as the downloaded file.
#test fname!=dest
untar_data(URLs.MNIST_TINY, fname='mnist_tiny.tgz', force_download=True)
Path('mnist_tiny.tgz').rename('nims_tini.tgz')
p = Path('nims_tini.tgz')
dest = Path('nims_tini')
assert p.exists()
file_extract(p, dest.parent)
rename_extracted(dest)
p.unlink()
shutil.rmtree(dest)
#hide
#Check all URLs are in the checks.txt file and match for downloaded archives
_whitelist = "MDL LOCAL_PATH URL WT103_BWD WT103_FWD".split()
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
for d in dir(URLs):
if d.upper() == d and not d.startswith("S3") and not d in _whitelist:
url = getattr(URLs, d)
assert url in checks,f"""{d} is not in the check file for all URLs.
To fix this, you need to run the following code in this notebook before making a PR (there is a commented cell for this below):
url = URLs.{d}
untar_data(url, force_download=True)
_add_check(url, URLs.path(url))
"""
f = URLs.path(url)
if f.exists():
assert checks[url] == _check_file(f),f"""The log we have for {d} in checks does not match the actual archive.
To fix this, you need to run the following code in this notebook before making a PR (there is a commented cell for this below):
url = URLs.{d}
_add_check(url, URLs.path(url))
"""
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
|
nbs/04_data.external.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from IPython.display import display
from pydub import AudioSegment
from pydub.generators import SignalGenerator
import random
audio = AudioSegment.from_mp3("data_unseen/1.wav")
display(audio)
class WhiteNoise2(SignalGenerator):
def __init__(self,level):
self.level = level
super(WhiteNoise2, self).__init__()
def generate(self):
while True:
yield ((random.random() * 2) - 1.0)/self.level
noise = WhiteNoise2(10).to_audio_segment(duration=len(audio))
audio2 = audio.overlay(noise)
display(audio2)
# -
|
Noise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/Peischlili/ComputerVision_WebScraper/blob/main/Yolo3_detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="P20VsZ3H8rN0"
# **Import dependences**
# + id="lWn4YND_yHTP"
import os
import struct
import numpy as np
from keras.layers import Conv2D
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.layers import LeakyReLU
from keras.layers import ZeroPadding2D
from keras.layers import UpSampling2D
from keras.layers.merge import add, concatenate
from keras.models import Model
from numpy import loadtxt, expand_dims
from keras.models import load_model
from keras.preprocessing.image import load_img, img_to_array
from matplotlib import pyplot
from matplotlib.patches import Rectangle
# + [markdown] id="aUl4rfRc89DJ"
# **Functions used to create layers, model and load weight**
# + id="DHCL4o32z60m"
# tutorial from https://machinelearningmastery.com/how-to-perform-object-detection-with-yolov3-in-keras/
# Helper function to create blocks of layers
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
# + id="6vfUBP2Rzeai"
# create the model with custom number of filters
# 1 category
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82 :change 255 filters to 18 to fit 1 class
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 18, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94 : change 255 filters to 18 to fit 1 class
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 18, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106 : change 255 filters to 18 to fit 1 class
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 18, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
# + id="miuKLsLd5W5g"
class WeightReader:
def __init__(self, weight_file):
with open(weight_file, 'rb') as w_f:
major, = struct.unpack('i', w_f.read(4))
minor, = struct.unpack('i', w_f.read(4))
revision, = struct.unpack('i', w_f.read(4))
if (major*10 + minor) >= 2 and major < 1000 and minor < 1000:
w_f.read(8)
else:
w_f.read(4)
transpose = (major > 1000) or (minor > 1000)
binary = w_f.read()
self.offset = 0
self.all_weights = np.frombuffer(binary, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset-size:self.offset]
def load_weights(self, model):
for i in range(106):
try:
conv_layer = model.get_layer('conv_' + str(i))
print("loading weights of convolution #" + str(i))
if i not in [81, 93, 105]:
norm_layer = model.get_layer('bnorm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = self.read_bytes(size) # bias
gamma = self.read_bytes(size) # scale
mean = self.read_bytes(size) # mean
var = self.read_bytes(size) # variance
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = self.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel, bias])
else:
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel])
except ValueError:
print("no convolution #" + str(i))
def reset(self):
self.offset = 0
# + [markdown] id="ODdbOAhz9ImR"
# **Create folder to store pretrained weights and updated weights**
# + colab={"base_uri": "https://localhost:8080/"} id="8jCCwmMi1UQA" outputId="a1fb21b3-c3ad-4e58-c339-5fa6bff9191f"
# define the model
model = make_yolov3_model()
# load the model weights : here we used our trained Yolo3 weights
weight_reader = WeightReader('/Users/peisch/code/WebScraper/weights/yolo-obj_29000_16694img.weights')
# set the model weights into the model
weight_reader.load_weights(model)
# save the model to file
model.save('/Users/peisch/code/WebScraper/test_scrap/model.h5')
# -
model.summary()
# + [markdown] id="vZWvJydqLyfw"
# **Class and functions to add bounding boxes**
# + id="mh_NpdaEJIDW"
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
new_w, new_h = net_w, net_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
# load and prepare an image
def load_image_pixels(filename, shape):
# load the image to get its shape
image = load_img(filename)
width, height = image.size
# load the image with the required size
image = load_img(filename, target_size=shape)
# convert to numpy array
image = img_to_array(image)
# scale pixel values to [0, 1]
image = image.astype('float32')
image /= 255.0
# add a dimension so that we have one sample
image = expand_dims(image, 0)
return image, width, height
# get all of the results above a threshold
def get_boxes(boxes, labels, thresh):
v_boxes, v_labels, v_scores = list(), list(), list()
# enumerate all boxes
for box in boxes:
# enumerate all possible labels
for i in range(len(labels)):
# check if the threshold for this label is high enough
if box.classes[i] > thresh:
v_boxes.append(box)
v_labels.append(labels[i])
v_scores.append(box.classes[i]*100)
# don't break, many labels may trigger for one box
return v_boxes, v_labels, v_scores
# draw all results
def draw_boxes(filename, v_boxes, v_labels, v_scores):
# load the image
data = pyplot.imread(filename)
# plot the image
pyplot.imshow(data)
# get the context for drawing boxes
ax = pyplot.gca()
# plot each box
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='green')
# draw the box
ax.add_patch(rect)
# draw text and score in top left corner
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
pyplot.text(x1, y1, label, color='green')
# show the plot
pyplot.show()
# used to calculate bounding box and annotation IoU stand-alone
# the arguments should be tuples
def find_iou(tuple_box, tuple_annot):
# box.ymin, box.xmin, box.ymax, box.xmax: the structure of the tuple
intersect_w = _interval_overlap([tuple_box[1], tuple_box[3]], [tuple_annot[1], tuple_annot[3]])
intersect_h = _interval_overlap([tuple_box[0], tuple_box[2]], [tuple_annot[0], tuple_annot[2]])
intersect = intersect_w * intersect_h
w1, h1 = tuple_box[3] - tuple_box[1], tuple_box[2] - tuple_box[0]
w2, h2 = tuple_annot[3] - tuple_annot[1], tuple_annot[2] - tuple_annot[0]
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
# draw all annotations and predicted boxes
# this function is adhoc case and should be rewrite to fit multiple annotations
def draw_both(filename, annot_txt, v_boxes, v_labels, v_scores):
# load the image
data = pyplot.imread(filename)
# plot the image
pyplot.imshow(data)
# get the context for drawing boxes
ax = pyplot.gca()
# plot each box
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='red')
# draw the box
ax.add_patch(rect)
# draw text and score in top left corner
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
pyplot.text(x1, y1, label, color='red')
# tuple_1 for single box
tuple_1 = v_boxes[0].ymin, v_boxes[0].xmin, v_boxes[0].ymax, v_boxes[0].xmax
# load annotation txt file and read content
with open(annot_txt, 'r') as f:
content = f.read()
# read content and save to a list
content_l = content.split(" ")
# create tuples with content
ymin, xmin, ymax, xmax = float(content_l[2]), float(content_l[1]), float(content_l[4]), float(content_l[3])
f.close
# tuple of annotation coordinates
tuple_2 = ymin, xmin, ymax, xmax
y_1, x_1, y_2, x_2 = ymin, xmin, ymax, xmax
# w-prime, w-prime are those of annots
width_p, height_p = x_2 - x_1, y_2 - y_1
rect_annot = Rectangle((x_1, y_1), width_p, height_p, fill=False, color='green')
# draw annotation rectangle
ax.add_patch(rect_annot)
# calculate the IoU
iou = find_iou(tuple_1, tuple_2)
# show IoU
label_iou = "%s (%.3f)" % ("IoU", iou)
pyplot.text(x2, y2, label_iou, color='red')
print(f"iou: {iou}")
# show the plot
pyplot.show()
# + [markdown] id="KDNfjmBkF407"
# **Call created model, load test photo, and do prediction**
# + colab={"base_uri": "https://localhost:8080/", "height": 339} id="uDm8i2EYMHAz" outputId="85896de0-d44f-4987-f212-793cbb8f46cc"
# load yolov3 model
model = load_model('/Users/peisch/code/WebScraper/test_scrap/model.h5')
# define the expected input shape for the model
input_w, input_h = 512, 512
# define our new photo
photo_filename = '/Users/peisch/code/WebScraper/iou_dataset/images/25.jpg'
# define photo annotation txt file (if there is any)
annot_filename = "/Users/peisch/code/WebScraper/iou_dataset/labels_yolo3/25.txt"
# load and prepare image
image, image_w, image_h = load_image_pixels(photo_filename, (input_w, input_h))
print(image_w, image_h)
# make prediction
yhat = model.predict(image)
# summarize the shape of the list of arrays
print([a.shape for a in yhat])
# define the anchors
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
# define the probability threshold for detected objects
class_threshold = 0.8
boxes = list()
for i in range(len(yhat)):
# decode the output of the network
boxes += decode_netout(yhat[i][0], anchors[i], class_threshold, input_h, input_w)
# correct the sizes of the bounding boxes for the shape of the image
correct_yolo_boxes(boxes, image_h, image_w, input_h, input_w)
# suppress non-maximal boxes
do_nms(boxes, 0.5)
# define the labels
labels = ["Dress"]
# get the details of the detected objects
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
# summarize what we found
for i in range(len(v_boxes)):
print(v_labels[i], v_scores[i])
# this line should be commented in prediction without annotation case: just for adhoc case
print(f"Bounding box xmin, xmax, ymin, ymax: {v_boxes[i].xmin}, {v_boxes[i].xmax}, {v_boxes[i].ymin}, {v_boxes[i].ymax}")
# draw what we found: in prediction case
#draw_boxes(photo_filename, v_boxes, v_labels, v_scores)
# draw both annotation and prediction box (in test case)
draw_both(photo_filename, annot_filename, v_boxes, v_labels, v_scores)
|
Yolo3_training/Yolo3_detection.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# # Integer Programming
# In the last session, you learned about the JuMP ecosystem and solved simple optimization problems.
# You've probably worked with JuMP to solve even more complicated LPs in 15.081.
# In this session, we'll look into other types of problems, namely, **integer and nonlinear optimization problems**.
#
# In the first half of the session, we'll focus on (Mixed) Integer Programming, which studies optimization problems in which some or all of the variables are restricted to be integers. Integer programs model situations where we need to make discrete decisions, which are frequently encountered in Operations Research.
#
# **REMARK:** The simplest case of IP, namely, Binary Integer Linear Programming is NP-complete. So shouldn't we just go home?
#
#
# ## I. IP Basics
#
# ### I.1. Ryan's Unbounded Knapsack
#
# Every morning, Ryan goes to the coffee shop and gets as much coffee as possible to be productive during the day.
# There are $N$ types of coffee he can choose from, each with different caffeine content $v_i$ and price $w_i$ (you may assume that the coffee shop has an infinite supply of each coffee type).
# Apparently Ryan doesn't want to go bankrupt, so he won't spend more that $C$ dollars.
# How does he choose what coffees to buy to maximize his caffeine intake and hence his productivity?
#
# We can model Ryan's situation as a (pure) integer optimization problem:
#
# \begin{align*}
# \max& \sum_{i=1}^N v_i x_i \\
# \text{s.t.}& \sum_{i=1}^N w_i x_i \leq C \\
# & x_i \in \mathbb{Z}_{\geq 0} \quad \forall i = 1,\ldots,N
# \end{align*}
#
# Variable $x_i$ expresses the number of coffees of type $i$ Ryan will buy. (Ryan's favorite coffee shop only sells one-sized coffee, so all variables are constrained to be integer.)
#
# #### A Toy Example
#
# In particular, let's look into the following toy problem:
#
# \begin{align*}
# \max\:& x + y + 1.5 z \\
# \text{s.t.}\:& x + 2y + 3z \leq 5.5 \\
# & x, y, z \in \mathbb{Z}_{\geq 0}
# \end{align*}
#
# How would you solve this?
# +
using JuMP, Gurobi, LinearAlgebra
# Small toy problem from above
values = [1,1,1.5]
weights = [1,2,3]
C = 5.5
# Another small problem (from JuMP documentation)
# values = [5, 3, 2, 7, 4]
# weights = [2, 8, 4, 2, 5]
# C = 10
function solve_knapsack(values, weights, C)
N = length(values)
knapsackModel=Model(Gurobi.Optimizer)
@variable(knapsackModel, x[1:N]>=0, Int)
@constraint(knapsackModel, capacity, dot(x, weights) <= C)
@objective(knapsackModel, Max, dot(x, values))
print(knapsackModel)
optimize!(knapsackModel)
return value.(x), objective_value(knapsackModel), knapsackModel
end
x_opt, val_opt, model = solve_knapsack(values, weights, C)
println("Optimal solution = $x_opt \nOptimal value = $val_opt")
# -
# #### Modifying the Problem
# During happy hour, the coffee shop sells coffee of type z with a 50% discount. Thankfully, Ryan has already computed the optimal solution before the discount, so he hopes that he can slightly modify his existing model and resolve it, taking advantage of any knowledge he already has.
#
# In the latest versions of JuMP, we can modify and delete constraints as follows:
# +
println("\nModel before modification:")
print(model)
println(" --> Objective value = $(objective_value(model))")
# Now let's modify the model
z = all_variables(model)[3]
con = constraint_by_name(model,"capacity")
set_normalized_coefficient(con, z, 1.5)
println("\nModel after modification:")
print(model)
println()
optimize!(model)
println(" --> Objective value = $(objective_value(model))")
# -
# ### Ι.2. Branch and Bound Tree
#
# Although IP solvers are often viewed as black boxes, in what follows, we'll next try to "open" the box.
#
# For simplicity, we first consider a pure binary optimization problem over two variables (there are only two types of coffee, and Ryan can get at most one cup of each):
#
# \begin{align*}
# \max& \quad v_x x + v_y y\\
# \text{s.t.}& \quad w_x x + w_y y \leq C \\
# & \quad x,y \in \{0,1\}
# \end{align*}
#
# The simple way is just to consider each possible value for $x$ and $y$ and compare the cost.
#
# 
#
# In the general case, this would lead to $2^N$ possible collections of items. After Ryan has examined all of them, he just chooses the best set among the ones he can afford.
#
# Let's visualize this approach as a search tree:
#
# 
#
# It's rooted at what we call the **relaxation**: none of variables have integrality enforced. As we go down leaves of the tree, we pick a variable to **branch** on, and create two descended nodes that fix that variable to one of its possible values. If we follow the tree all the way to the bottom, we reach our enumeration from before.
#
# As we go down the arcs of the tree we restrict our problem more and more, we must have that:
#
# >If node ``q`` is descended from node ``p``, we must have that the optimal cost of subproblem ``q`` is no more than that for node ``p``
#
# This leads us to a powerful tool in solving these enumeration problems:
#
# >If I can show you that the optimal cost for subproblem ``q`` is _less_ than the optimal cost for the original problem, the same is true for any descendent of ``q``.
#
# That is, we can **prune** the tree and safely discard some nodes, kind of like this:
#
# 
# #### Back to our Toy Example
#
# Hopefully we're now familiar with how the branch and bound tree works for IP's with binary variables.
# Let's turn back to our toy example that contains nonnegative integer variables and see how the branch and bound tree would be built.
#
# \begin{align*}
# \max\:& x + y + 1.5 z \\
# \text{s.t.}\:& x + 2y + 3z \leq 5.5 \\
# & x, y, z \in \mathbb{Z}_{\geq 0}
# \end{align*}
#
# * First, we solve the LP relaxation and get $(x^*,y^*,z^*) = (5.5,0,0)$.
# * This isn't integer feasible, so we branch on $x$, which is the only non-integer variable. We construct two subproblems:
# - Subproblem 1 is:
# \begin{align*}
# \max\:& x + y + 1.5 z \\
# \text{s.t.}\:& x + 2y + 3z \leq 5 \\
# & x \leq 5
# & x, y, z \in \mathbb{Z}_{\geq 0}
# \end{align*}
# The optimal solution to this subproblem is obtained for $(x^*,y^*,z^*) = (5,0,0)$ and is integer feasible with an optimal cost of $5.$ This is the best solution we've found so far, so we update our lower bound.
# - Subproblem 2 is:
# \begin{align*}
# \max\:& x + y + 1.5 z \\
# \text{s.t.}\:& x + 2y + 3z \leq 5 \\
# & x \geq 6
# & x, y, z \in \mathbb{Z}_{\geq 0}
# \end{align*}
# This is infeasible.
# * We've exhausted the tree, so we have our optimal solution!
#
# The branch-and-bound scheme can end up solving many subproblems, so for it to work well, we need to *prune* large portions of the tree.
# ### Ι.3. Branch and Bound Algorithm
# We'll keep track of a global _lower bound_ $LB$ for our problem. Each node ``q`` will have an upper bound $UB_q$ that it inherents from its parent. If we get to the point where we have solved all subproblems (or, ideally, pruned off a great deal of them), we know that we're optimal. To do this we'll also keep track of a list $L$ of subproblems left to solve; initially, it's just the relaxation. The procedure is:
#
# While $L$ is not empty, pick a subproblem ``q`` out of our list $L$ and solve it.
# 1. ``if`` ``q`` is infeasible, ``continue``
# 2. ``if`` the solution is integer feasible, update the lower bound $LB$ if the cost is higher than what we had before
# 3. ``if`` the relaxation value is less than our global $LB$ ``continue``
# 4. ``else`` pick a non-integer variable $i$ and _branch_ by adding two subproblems to $L$:
# * One with $x_i = 0$
# * Another with $x_i = 1$
#
# Branch-and-bound is sometimes called an _implicit enumeration_ scheme because of step 3: we avoid solving any subproblems that we can prove won't produce the optimal solution.
# ### I.4. Ιmplementation of the Branch and Bound Algorithm in Gurobi
#
# The "magic" of modern MIP solvers largely comes down to pruning massive portions of the tree. Some of this is essentially beyond your control, but there are certain things which you can do. This is the topic of Part II of this IP crash course.
#
# In what follows, we focus on **Gurobi**, a commercial solver that solves Mixed Integer LPs/QPs/QCQPs. (You can get the full picture of what solvers JuMP supports and what types of problems you can solve with each of them by visiting http://www.juliaopt.org/JuMP.jl/latest/installation/ and scrolling a bit down.)
#
# What are the ingredients of Gurobi's branch and bound implementation?
# - **Presolve**: reduce problem size via removal of redundant constraints and variable substitutions.
# - **Sophisticated Implementations of Continuous Optimization Methods**: simplex-based, barrier-based.
# - **Cutting Planes**: over the course of the solution process, add cuts that tighten the model and remove potential undesired fractional solution. Here is an example:
# - Consider the constraint $6 x_1 + 5 x_2 + 7 x_3 + 4 x_4 + 5 x_5 \leq 15$, where $x_1$ through $x_5$ are restricted to be binary.
# - Suppose in addition that we have just solved an LP relaxation and that these variables take the following values in this LP relaxation: $x_1 = 0, x_2 = 1, x_3 = x_4 = x_5 = \frac{3}{4}$.
# - This undesirable solution can be excluded with the following observation: since $7 + 4 + 5 = 16 > 15$, it is not possible that $x_3 = x_4 = x_5 = 1$, and hence that the following new inequality is a valid addition to the given MIP: $x_3 + x_4 + x_5 \leq 2$. Since $\frac{3}{4} + \frac{3}{4} + \frac{3}{4} = \frac{9}{4} > 2$, the new inequality cuts off the current (fractional and therefore infeasible) solution, but importantly does not cut off any feasible integer solutions.
# - Consequently, the relaxations solved from this point are of higher quality.
# - **Heuristics**: e.g., randomized rounding.
# - **Branch Variable Selection**
# ### Ι.5. Understanding Gurobi's Output
# First, it solves the LP relaxation and reports back:
# ```
# Root relaxation: objective 4.014179e+00, 18 iterations, 0.00 seconds
# ```
# Now it explores the branch-and-bound tree, and updates us as it goes along. Let's look at just the first line:
# ```
# Nodes | Current Node | Objective Bounds | Work
# Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time
#
# 0 0 4.01418 0 7 2.35937 4.01418 70.1% - 0s
# ```
# We see that the information is broken down into four main columns:
#
# 1. ``Nodes``: Global node information
# * how many nodes have we looked at
# * how many do we have in our queue
# 2. ``Current Node``
# * objective
# * depth in the tree
# * number of noninteger variables in the solution
# 3. ``Objective Bounds``
# * Best incumbent (lower bound)
# * node upper bound
# * the gap between the two
# 4. ``Work``
# * average simplex iterations per node
# * total elapsed time
#
# Finally, we get a neat summary of the cutting planes Gurobi found useful:
# ```
# Cutting planes:
# Gomory: 3
# Cover: 2
# MIR: 5
# ```
# All told, we explored 190 nodes, much less than the $2^{100}$ we were worried about. All this only took 698 simplex iterations and 0.21 seconds.
#
# Now what about those ``H``s that appear? That tells us that Gurobi ran a heuristic and found a new best solution. You can see for yourself, as the incumbent value increases while the bound remains the same:
# ```
# Nodes | Current Node | Objective Bounds | Work
# Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time
#
# 0 0 4.01418 0 7 2.35937 4.01418 70.1% - 0s
# H 0 0 3.3780464 4.01418 18.8% - 0s
# ```
# You'll also sometimes see a ``*`` instead of the ``H``, which says that the feasible solution came from branching instead of heuristics.
# ## ΙΙ. Advanced IP
#
# Now that we've mastered the basics, we'll look into more advanced stuff; we'll try to interact with the solver and intervene in the solving process.
#
# ### II.1. Lazy Constraints in Ryan's Unbounded Knapsack
#
# Ryan is willing to sacrifice some caffeine intake to switch between espresso, cold brew, and flat white.
# In particular, he wants the maximum difference between the selected quantities of any two types of coffee to be no more that $\mu$.
# This requirement leads to $2 {n \choose 2}$ constraints of the form: $x_i - x_j \leq \mu \ \forall i\neq j$.
# Instead of enumerating all of them and adding them a priori to the model, we may use a technique known as **lazy constraints**.
#
# In particular, every time our solver reaches a new solution, for example with a heuristic or by solving a problem at a node in the branch and bound tree, it will give the user the chance to provide constraint(s) that would make the current solution infeasible.
#
#
# #### Reasons to Use Lazy Constraints
#
# - The model involves a large number of constraints, many of which will most likely be redundant or non-binding near an optimal solution. In many cases, it can even be intractable to generate all the constraints.
# - In some cases, we may be unable to identify all constraints at the time the model is specified. The feasibility and optimality cuts generated during Benders decomposition fall into this category; we discover them by solving one or more subproblems at certain points in the search for the solution to the master problem.
#
#
# #### Implementing Lazy Constraints
#
# MIP solvers implement lazy constraints via a technique known as **solver callback**.
# JuMP currently supports **solver-independent callbacks** for CPLEX, GLPK, and Gurobi.
#
# **REMARK:** Part of the major changes JuMP underwent between versions 0.18 and 0.19 was the removal of solver-independent callbacks. Support for solver-independent callbacks was restored later.
#
# There are three important steps to providing a lazy constraint callback in JuMP.
#
# - **Callback function**: a function that will analyze the current solution. This function takes as argument a reference to the callback management code inside JuMP. Currently, the only thing we may query in a callback is the primal value of the variables using the function "callback_value". If we need any other information, we may use a **solver-dependent** callback instead (for an example, look here https://discourse.julialang.org/t/solver-dependent-callbacks-in-jump-how-to-do-it-right/32130).
#
# - **Lazy constraint**: after analyzing the current solution, we generate a new constraint using the
#
# "con = @build_constraint(...)"
# macro and submit it to the model via the MOI interface
#
# "MOI.submit(model, MOI.LazyConstraint(cb), con)."
#
# - **Lazy constraint callback**: we again use the MOI interface to tell the solver which function should be used for lazy constraint generation
#
# "MOI.set(model, MOI.LazyConstraintCallback(), my_callback)."
# +
function solve_fair_knapsack(values, weights, C, max_diff)
N = length(values)
fairKnapsackModel=Model(Gurobi.Optimizer)
@variable(fairKnapsackModel, x[1:N]>=0, Int)
@constraint(fairKnapsackModel, dot(x, weights) <= C)
@objective(fairKnapsackModel, Max, dot(x, values))
lazy_called = false
function my_callback(cb) # what is cb? what data can we access during callback?
lazy_called = true
x_vals = callback_value.(Ref(cb), x)
# First, let's find a violated constraint!
i_max, i_min = argmax(x_vals), argmin(x_vals)
con = @build_constraint(x[i_max] - x[i_min] <= max_diff)
MOI.submit(fairKnapsackModel, MOI.LazyConstraint(cb), con)
end
MOI.set(fairKnapsackModel, MOI.LazyConstraintCallback(), my_callback)
print(fairKnapsackModel)
println("\n*** Callback called? $lazy_called\n\n")
optimize!(fairKnapsackModel)
println("\n*** Callback called? $lazy_called\n\n")
return value.(x), objective_value(fairKnapsackModel), fairKnapsackModel
end
max_diff = 2
xf_opt, valf_opt, model = solve_fair_knapsack(values, weights, C, max_diff)
println("Optimal solution = $xf_opt \nOptimal value = $valf_opt")
# -
# ### II.2. Callback Types
#
# JuMP 0.21 supports three types of callbacks:
#
# - **Lazy constraints**: See previous section.
#
#
# - **User cuts**: User cuts provide a way for the user to tighten the LP relaxation using problem-specific knowledge that the solver cannot infer from the model and hence cannot utilize when generating cuts like the ones we saw earlier (Gurobi's cutting planes component).
#
# MOI.submit(model, MOI.UserCut(cb), con)
#
# MOI.set(model, MOI.UserCutCallback(), my_callback_function)
#
# - Importantly, user cuts **should not change the set of integer feasible solutions** and can only remove fractional solutions; if we add a cut that removes an integer solution, the solver may return an incorrect solution. **That's the main difference between user cuts and lazy constraints.**
#
# - Just like with lazy constraints, when a MIP solver reaches a new node in the branch-and-bound tree, it will give the user the chance to provide cuts to make the current relaxed (fractional) solution infeasible in the hopes of obtaining an integer solution.
#
# - Generally speaking, solvers can add general purpose cuts (e.g., CG, split, MIR) and structure specific cuts (e.g., knapsack cover, clique) better than we can. However, we are better at adding problem specific cuts. Therefore, when trying to improve bound quality, a good place to start is identifying problem structure which a solver hasn't found, and exploiting this problem structure.
#
#
#
# - **Heuristic solutions**: By heuristic solution we refer to the method that the solver applies during the solution process to find integer solutions quicker than plain branch-and-bound would and tighten the bound, allowing us to fathom nodes quicker and to tighten the integrality gap.
#
# status = MOI.submit(model, MOI.HeuristicSolution(cb), [x], [floor(Int, x_val)]) # accept/reject/unknown
#
# MOI.set(model, MOI.HeuristicCallback(), my_callback_function)
#
# - Solvers' heuristics are based on neighborhood search (e.g., flipping binary variables, fix some variables and solve a smaller MILP), rounding or "polishing" existing solutions.
#
# - This callback enables us to add heuristics of our own if we have some special insight into the problem structure that the solver is not aware of. For instance, if we're solving a knapsack problem, one simple heuristic is to add a **greedy solution** where we iteratively add the best available item to the sack until you run out of room. This will often be a very good solution, and is a simple example of a problem-specific heuristic scheme.
#
#
# - Previous versions of JuMP also supported **informational callbacks**, which were used to track solver progress without actually changing the algorithm by adding cuts or heuristic solutions.
#
#
#
# # Credit + References
# This material is adapted from previous versions of this course, which have been designed by numerous ORC students.
#
# Some of the sources used to create this year's version include:
# - JuMP documentation
# - Gurobi documentation
# - https://orinanobworld.blogspot.com/2012/08/user-cuts-versus-lazy-constraints.html
|
6+7_julia_and_jump/Session7-1-Integer_complete.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from nustar_gen.radial_profile import find_source, make_radial_profile, optimize_radius_snr
from nustar_gen.wrappers import make_image
from astropy.wcs import WCS
from astropy.io import fits
from astropy.coordinates import SkyCoord
import numpy as np
# +
from nustar_gen import info
obs = info.Observation(path='../data/', seqid='30001143002')
obs.exposure_report()
obs.science_files
for mod in ['A', 'B']:
for file in obs.science_files[mod]:
hdr = fits.getheader(file)
print(f"{file}, exposure: {1e-3*hdr['EXPOSURE']:20.4} ks")
print()
# -
mod='B'
infile = obs.science_files[mod][0]
full_range = make_image(infile, elow = 3, ehigh = 80, clobber=True)
coordinates = find_source(full_range, show_image = True, filt_range=3)
# +
# Get the WCS header and convert the pixel coordinates into an RA/Dec object
hdu = fits.open(full_range, uint=True)[0]
wcs = WCS(hdu.header)
# The "flip" is necessary to go to [X, Y] ordering from native [Y, X] ordering, which wcs seems to require
world = wcs.all_pix2world(np.flip(coordinates), 0)
ra = world[0][0]
dec = world[0][1]
target = SkyCoord(ra, dec, unit='deg', frame='fk5')
print(target)
obj_j2000 = SkyCoord(hdu.header['RA_OBJ'], hdu.header['DEC_OBJ'], unit = 'deg', frame ='fk5')
# How far are we from the J2000 coordinates? If <15 arcsec, all is okay
sep = target.separation(obj_j2000)
print(sep)
# +
# Now the radial image parts.
# Make the radial image for the full energy range (or whatever is the best SNR)
full_range = make_image(infile, elow = 3, ehigh = 80, clobber=True)
rind, rad_profile, radial_err, psf_profile = make_radial_profile(full_range, show_image=False,
coordinates = coordinates)
# +
# Pick energy ranges that you want to check.
# Note that this formalism breaks down when the source isn't detected, so use your best judgement here.
# Below should be used as a "best guess" when choosing a radius for spectral extraction.
# For the 3-20 keV case, the source dominates out the edge of the FoV (and the assumptoons about the PSF
# start to break down in the fit).
# This a soft source (LMC X-1), so for 20-30 keV we already see that we need to restrict the radius that we
# use so that we're not just adding noise to the spectrum.
pairs = [[3, 20], [20, 30], [30, 40], [40, 50], [50, 80]]
coordinates = find_source(full_range, show_image = False)
for pair in pairs:
test_file = make_image(infile, elow = pair[0], ehigh = pair[1], clobber=True)
rind, rad_profile, radial_err, psf_profile = make_radial_profile(test_file, show_image=False,
coordinates = coordinates)
rlimit = optimize_radius_snr(rind, rad_profile, radial_err, psf_profile, show=True)
print('Radius of peak SNR for {} to {} keV: {}'.format(
pair[0], pair[1], rlimit))
# -
import regions
import astropy.units as u
source_reg = [regions.CircleSkyRegion(center=target, radius=60*u.arcsec)]
outfile = obs._evdir+f'/src{mod}01.reg'
regions.write_ds9(source_reg, outfile, radunit='arcsec')
|
notebooks/OptimalRadius_30001143002B01.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:working]
# language: python
# name: conda-env-working-py
# ---
import numpy as np
import pandas as pd
import pandas_profiling
#import pymysql need this if I actaully use the code to talk to a DB
import configparser
tripData1 = pd.read_csv('/dltraining/datasets/Trip Data/trip_data_1.csv',
nrows=1000)
tripData1.head()
# +
tripFare1 = pd.read_csv('/dltraining/datasets/Trip Fare/trip_fare_1.csv',
nrows=1000)
#remove the leading spaces
tripFare1.columns = [
'medallion', 'hack_license', 'vendor_id', 'pickup_datetime',
'payment_type', 'fare_amount', 'surcharge', 'mta_tax', 'tip_amount',
'tolls_amount', 'total_amount'
]
# -
tripFare1.head()
# # Database Connection
config = configparser.ConfigParser()
config
config.read("/dltraining/New-York-Taxi-Analysis/Secrets/passwords.ini")
config.sections()
config.options('Database_NYTaxi')
# +
## Database no longer exists, this is a demo only
# Assigning values to variable
host = config.get('Database_NYTaxi', 'host')
port = config.get('Database_NYTaxi', 'port')
dbname = config.get('Database_NYTaxi', 'dbname')
user = config.get('Database_NYTaxi', 'user')
password = config.get('Database_NYTaxi', 'securitytoken')
print(
"The Database %s is found at %s on port %s. It has a user of %s, with a password of %s"
% (dbname, host, port, user, password))
# -
config['Database_NYLocations']['securitytoken']
# +
# A different take on accessing values in the same way that python does for values [section][option]
host = config['Database_NYLocations']['host']
port = config['Database_NYLocations']['port']
dbname = config['Database_NYLocations']['dbname']
user = config['Database_NYLocations']['user']
password = config['Database_NYLocations']['securitytoken']
print(
"The Database \"%s\" is found at \"%s\" on port %s. \n\n"
"It has a user of \"%s\" with a password of -> %s \n\n"
% (dbname, host, port, user, password))
# -
# create a connection to the database
conn = pymysql.connect(host, user=user, port=port, passwd=password, db=dbname)
# create object to talk to the db
cursorObject = conn.cursor()
# check to see if there are any tables
sqlQuery = "show tables"
cursorObject.execute(sqlQuery)
for x in cursorObject:
print(x)
sqlQuery = "CREATE TABLE tripData(id INT AUTO_INCREMENT PRIMARY KEY, medallion char(32), hack_license char(32), vendor_id varchar(32), rate_code int, store_and_fwd_flag char(5), pickup_datetime datetime, dropoff_datetime datetime , passenger_count int, trip_time_in_secs int, trip_distance float(10,5), pickup_longitude float(12, 7), pickup_latitude float(12, 7), dropoff_longitude float(12, 7), dropoff_latitude float(12, 7))"
cursorObject.execute(sqlQuery)
sqlQuery = "CREATE TABLE fareData(id INT AUTO_INCREMENT PRIMARY KEY, medallion char(32), hack_license char(32), vendor_id varchar(32), pickup_datetime datetime, payment_type varchar(32), fare_amount float(10,3), surcharge float(10,3), mta_tax float(10,3), tip_amount float(10,3), tolls_amount float(10,3), total_amount float(10,3))"
cursorObject.execute(sqlQuery)
sqlQuery = "show tables"
cursorObject.execute(sqlQuery)
for x in cursorObject:
print(x)
tripFare1.to_sql(con=conn, name='fareData_1', if_exists='append', index=False)
sqlQuery = "select count(*) from fareData"
cursorObject.execute(sqlQuery)
for x in cursorObject:
print(x)
sqlQuery = "ALTER TABLE NYCTaxiDB.fareData AUTO_INCREMENT =1"
cursorObject.execute(sqlQuery)
for x in cursorObject:
print(x)
sqlQuery = "SELECT name FROM sqlite_master WHERE type='table' AND name='fareData_1'"
cursorObject.execute(sqlQuery)
for x in cursorObject:
print(x)
# close the connection
conn.close()
|
Working Code/02 Read Data from a SQL instance.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.rcParams['figure.figsize'] = [20, 15]
## Build geno dict for text replacement
df = pd.read_excel('geno-database.xlsx')
# converting df to series, and then to dict
geno_dict = df.set_index('ID')['Shortname']
df1 = pd.read_csv('monthly_data_0.txt', sep='\t')
df2 = pd.read_csv('mut_arrival.txt', sep='\t')
df2['time'] /= 365 # scaling down time to in unit of year
# Text replacement
df2['to'] = df2['to'].replace(geno_dict)
df2['from'] = df2['from'].replace(geno_dict)
df2
# Selecting rows with regex
filtered = df2[df2['to'].str.contains('.....Y2.')]
# +
#df1['year']=2000
# +
scale_x = 365
ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x/scale_x))
fig1 = plt.figure(figsize=(20,10))
ax1 = fig1.add_subplot(111)
ax1.plot(df1['current_time'], df1['blood_slide_prev'])
ax1.xaxis.set_major_locator(ticker.MultipleLocator(1825))
ax1.xaxis.set_major_formatter(ticks_x)
# -
filtered.to_csv('filtered.txt', sep='\t', encoding='utf-8')
# # %matplotlib notebook
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(filtered['time'], filtered['to']) # filtering the double-res type
#ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(df2['time'], df2['to']) # filtering the double-res type
#ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
|
Archives/190724-new-reporter/mut-arrival-report.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import seaborn as sns
from glob import glob
import pathlib
from IPython.core.display import display, HTML
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
import itertools
# -
DATA_DIR = '../data/raw'
df = {}
test_df = None
for fpath in glob(DATA_DIR + '/*.csv'):
p = pathlib.PurePath(fpath)
fname = p.parts[-1]
if fname == 'test.csv': # Create test DataFrame separately
test_df = pd.read_csv(fpath)
elif fname == 'sample_submission.csv': # Ignore the sample_submission.csv
pass
else:
df[p.parts[-1]] = pd.read_csv(fpath)
NUM_TOPICS = len(df)
# ## Data structure
# What do the files look like? (Yes, this is more or less explained on the Kaggle data page for the competition, but good habit to have)
df['biology.csv'].head(5)
display(HTML('<blockquote>' + df['biology.csv'].content[0] + '</blockquote>'))
# Content looks to be just the stackoverflow question. This could be be confirmed more careful analysis but once again the Kaggle data description page clarifies it.
# ## Tag Data Exploration
post_tag_counts = {k: df[k]['tags'].apply(lambda x: len(x.split(' '))) for k in df.keys()}
ax = None
for k, v in post_tag_counts.items():
sns.distplot(v, label=k, kde=False)
plt.title('tags/post Distribution');
plt.gca().legend(loc='upper right');
# +
tags = {k: pd.Series(e for r in df[k]['tags'] for e in r.split(' ')).drop_duplicates() for k in df.keys()}
tag_tokens = {k: pd.Series(e for r in tags[k] for e in r.split('-')).drop_duplicates() for k in df.keys()}
sns.barplot(
x='count',
y='topic',
hue='type',
data=pd.DataFrame({'type': t, 'topic': k, 'count': len(d[k])}
for t, d in zip(('tag', 'tag-token'), (tags, tag_tokens))
for k in d.keys())
);
# +
tfidf_content_vectorizer = {k: TfidfVectorizer() for k in df.keys()}
tfidf_content_matrix = {}
for k, vectorizer in tfidf_content_vectorizer.items():
tfidf_content_matrix[k] = vectorizer.fit_transform(df[k]['content'])
tag_content_tfidf_value = {
k: pd.DataFrame([
{'token': t, 'value': v} for t, v in tfidf_content_vectorizer[k].vocabulary_.items()
]) for k in df.keys()
}
plt.figure(figsize=(12, 2.25 * NUM_TOPICS))
for i, k in enumerate(df.keys()):
ax = plt.subplot(NUM_TOPICS, 3, 3 * i + 1)
plt.title('%s - Content tfidf value comparison' % k);
sns.distplot(tag_content_tfidf_value[k]['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tokens');
plt.xticks(rotation='vertical');
plt.subplot(NUM_TOPICS, 3, 3 * i + 2, sharex=ax)
sns.distplot(pd.merge(tags[k].to_frame(name='token'), tag_content_tfidf_value[k], on='token')['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tags');
plt.xticks(rotation='vertical');
plt.subplot(NUM_TOPICS, 3, 3 * i + 3, sharex=ax)
sns.distplot(pd.merge(tag_tokens[k].to_frame(name='token'), tag_content_tfidf_value[k], on='token')['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tag tokens');
plt.xticks(rotation='vertical');
# +
tfidf_title_vectorizer = {k: TfidfVectorizer() for k in df.keys()}
tfidf_title_matrix = {}
for k, vectorizer in tfidf_title_vectorizer.items():
tfidf_title_matrix[k] = vectorizer.fit_transform(df[k]['title'])
tag_title_tfidf_value = {
k: pd.DataFrame([
{'token': t, 'value': v} for t, v in tfidf_title_vectorizer[k].vocabulary_.items()
]) for k in df.keys()
}
plt.figure(figsize=(12, 2.25 * NUM_TOPICS))
for i, k in enumerate(df.keys()):
ax = plt.subplot(NUM_TOPICS, 3, 3 * i + 1)
plt.title('%s - Title tfidf value comparison' % k);
sns.distplot(tag_title_tfidf_value[k]['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tokens');
plt.xticks(rotation='vertical');
plt.subplot(NUM_TOPICS, 3, 3 * i + 2, sharex=ax)
sns.distplot(pd.merge(tags[k].to_frame(name='token'), tag_title_tfidf_value[k], on='token')['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tags');
plt.xticks(rotation='vertical');
plt.subplot(NUM_TOPICS, 3, 3 * i + 3, sharex=ax)
sns.distplot(pd.merge(tag_tokens[k].to_frame(name='token'), tag_title_tfidf_value[k], on='token')['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tag tokens');
plt.xticks(rotation='vertical');
# -
all_tfidf_vectorizer = TfidfVectorizer().fit(
pd.concat([v['content'] for v in df.values()] + [v['title'] for v in df.values()])
)
# +
tag_all_tfidf_value = pd.DataFrame([
{'token': t, 'value': v} for t, v in all_tfidf_vectorizer.vocabulary_.items()
])
plt.figure(figsize=(12, 2.25 * NUM_TOPICS))
for i, k in enumerate(df.keys()):
ax = plt.subplot(NUM_TOPICS, 3, 3 * i + 1)
plt.title('%s - All tfidf value comparison' % k);
sns.distplot(tag_all_tfidf_value['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tokens');
plt.xticks(rotation='vertical');
plt.subplot(NUM_TOPICS, 3, 3 * i + 2, sharex=ax)
sns.distplot(pd.merge(tags[k].to_frame(name='token'), tag_all_tfidf_value, on='token')['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tags');
plt.xticks(rotation='vertical');
plt.subplot(NUM_TOPICS, 3, 3 * i + 3, sharex=ax)
sns.distplot(pd.merge(tag_tokens[k].to_frame(name='token'), tag_all_tfidf_value, on='token')['value'], kde=False)
plt.tight_layout()
plt.ylabel('# of tag tokens');
plt.xticks(rotation='vertical');
# -
tfidf_types = ((tfidf_content_vectorizer, 'content'),
(tfidf_title_vectorizer, 'title'),
(all_tfidf_vectorizer, 'all topics'))
tmp_data = [
e
for k in df.keys()
for e in [{
'topic': k + ' (tags)',
'count': len(tags[k]),
'type': 'total',
},{
'topic': k + ' (tag-tokens)',
'count': len(tag_tokens[k]),
'type': 'total',
}]
]
for k, tfidf_type in itertools.product(df.keys(), tfidf_types):
tfidf = tfidf_type[0]
corpus_type = tfidf_type[1]
# There are more efficient ways to do this
try:
vocab = pd.DataFrame([
{'token': t, 'value': v} for t, v in tfidf[k].vocabulary_.items()
])
except TypeError:
vocab = pd.DataFrame([
{'token': t, 'value': v} for t, v in tfidf.vocabulary_.items()
])
tmp_data.extend([{
'topic': k + ' (tags)',
'count': len(pd.merge(tags[k].to_frame(name='token'), vocab, on='token')),
'type': corpus_type,
},{
'topic': k + ' (tag-tokens)',
'count': len(pd.merge(tag_tokens[k].to_frame(name='token'), vocab, on='token')),
'type': corpus_type,
}])
sns.barplot(y='topic', x='count', hue='type', data=pd.DataFrame(tmp_data));
plt.tight_layout();
|
notebooks/Data Exploration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Module 1 Required Coding Activity
# Introduction to Python Unit 1
#
# This is an activity from the Jupyter Notebook **`Practice_MOD01_1-2_IntroPy.ipynb`** which you may have already completed.
#
# > **NOTE:** This program requires print output and code syntax used in module 1
#
# | Some Assignment Requirements |
# |:-------------------------------|
# | **NOTE:** This program requires `print` output and using code syntax used in module 1 such as variable assignment, `input`, `in` keyword, `.lower()` or `.upper()` method |
#
#
# ## Program: Allergy Check
#
# 1. **[ ]** get user **`input`** for categories of food eaten in the last 24 hours
# save in a variable called **input_test**
# *example input*
# [ ](https://1drv.ms/i/s!Am_KPRosgtaij65qzFD5CGvv95-ijg)
#
# 2. **[ ]** print **`True`** if "dairy" is in the **input_test** string
# **[ ]** Test the code so far
#
# 3. **[ ]** modify the print statement to output similar to below
# *example output*
# [ ](https://1drv.ms/i/s!Am_KPRosgtaij65rET-wmlpCdMX7CQ)
# Test the code so far trying input including the string "dairy" and without
#
#
# 4. **[ ]** repeat the process checking the input for "nuts", **challenge** add "Seafood" and "chocolate"
# **[ ]** Test your code
#
#
# 5. **[ ] challenge:** make your code work for input regardless of case, e.g. - print **`True`** for "Nuts", "NuTs", "NUTS" or "nuts"
#
# +
# Create Allergy check code
# [ ] get input for input_test variable
input_test = input("Enter food eaten in the past 24 hours: ")
# [ ] print "True" message if "dairy" is in the input or False message if not
print("It is ", "dairy" in input_test.lower(), "that", input_test.lower(), "contains 'dairy'")
# [ ] print True message if "nuts" is in the input or False if not
print("It is ", "nuts" in input_test.lower(), "that", input_test.lower(), "contains 'nuts'")
# [ ] Challenge: Check if "seafood" is in the input - print message
print("It is ", "nuts" in input_test.lower(), "that", input_test.lower(), "contains 'nuts'")
# [ ] Challenge: Check if "chocolate" is in the input - print message
print("It is ", "chocolate" in input_test.lower(), "that", input_test.lower(), "contains 'chocolate'")
# -
# Submit this by creating a python file (.py) and submitting it in D2L. Be sure to test that it works.
|
Python Absolute Beginner/Module_1_Required_Code_IntroPy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Calibrating Particle Number for Space Charge in Synergia
#
# I am resurrecting a previous callibration that was done to test various bunch constructors in Synergia. This notebook will repeat as similar exercise in order to validate the new spectral space charge solver we have implemented in Python. The problem remains simulating a beam expanding in a drift.
#
# **Procedure**
#
# 1. Construct a drifting beam of known current and simulate it with space charge
# 2. Look at the envelope evolution and compare with theory
# 3. Repeat for 2.5D solver, frozen space charge (Bassetti-Erskine), and 3D open solver
#
# **Particle number convention**
#
# 1. The current, I, of interest is that of the beam (not average current)
# 2. The beam consists of N particles with charge $Q = N e$ (Z = 1 for protons/electrons)
# 2. Assume beam has length l, velocity $\beta$c, and particles are uniformly distributed
# 3. The current is then: $I = \frac{Q \beta c}{l}$, and the corresponding number needed is $N = \frac{I l}{\beta c e}$
# 4. The number of particles per unit length is $\frac{N}{l} = \frac{I}{\beta c e}$
#
# **Calculated values and comparison**
#
# With this method, I calculated a proton number scaling of $2.85769 \times 10^8 \frac{p}{m \cdot mA}$, which at 14 mA equates to $4.000763434 \times 10^{9}$ protons per meter.
# This corresponds to $5.01 \times 10^{10}$ protons for a completely filled ring (at 4.3 mA), in good agreement with the numbers provided to me by Sasha.
# ## Imports
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy
import tables
from mpi4py import MPI
# +
try:
import rssynergia
except ImportError:
# !pip -q install git+git://github.com/radiasoft/rssynergia
from rssynergia.base_diagnostics import read_bunch
from rssynergia.base_diagnostics import workflow
from rssynergia.base_diagnostics import lfplot
from rssynergia.base_diagnostics import latticework
from rssynergia.base_diagnostics import basic_calcs
from rssynergia.base_diagnostics import pltbunch
from rssynergia.base_diagnostics import elliptic_sp
from rssynergia.base_diagnostics import singleparticle
from rssynergia.base_diagnostics import options
from rssynergia.base_diagnostics import diagplot
from rssynergia.base_diagnostics import utils
from rssynergia.elliptic import elliptic_beam6d
from rssynergia.standard import standard_beam6d
import synergia
import synergia_workflow
# -
# ## Test problem - Expanding beam in a drift
#
# Benchmarking Parameters:
#
# - 4 m drift (define actively - no need for MADX lattice)
#
# Beam Parameters:
#
# - 14 mA, 2.5 MeV proton beam
# - RMS x and y = 1 mm, emittance = 0.3 mm-mrad normalized
# - 1 million macro particles
# - 64x64 mesh (2D explicit)
# - 1 cm step sizes (perhaps do a 2 cm drift with 2 slice per and do 200 turns)
# - Bunch length should be a few mm - 5 mm perhaps
# +
# Create and populate a Synergia options object
# File I/O
opts = synergia_workflow.Options("zc_drift")
opts.add("output_dir","sc_drift", "Directory for output files", str)
opts.relpath = opts.output_dir
workflow.make_path(opts.output_dir)
opts.add("verbosity", 1, "Verbosity of propagation", int)
opts.add("bunch_file","myBunch.txt","txt file for bunch particles", str)
# Define reference particle to be a proton at 2.5 MeV
total_energy = synergia.foundation.pconstants.proton_mass + 2.5e-3 # [GeV]
four_momentum = synergia.foundation.Four_momentum(synergia.foundation.pconstants.proton_mass, total_energy)
reference_particle = synergia.foundation.Reference_particle(synergia.foundation.pconstants.proton_charge,four_momentum)
opts.gamma = reference_particle.get_gamma()
opts.beta = reference_particle.get_beta()
# beam (physical)
opts.add("emit",9.74e-6, "H0 value corresponding to real sigma horizontal emittance of 0.3 mm-mrad", float)
opts.add("dpop", 0.0, "Delta-p/p spread", float)
opts.add("real_particles", 1.0e11, "Number of real particles", float)
opts.emit_n = 0.3*1.e-6 # 0.3 mm-mrad normalized emittance
opts.emits = [basic_calcs.calc_geometric_emittance(opts.emit_n,opts.beta,opts.gamma)]
dpop = 0.0
# beam (numerical)
opts.add("seed", 1234, "Seed value", int)
opts.add("macro_particles", 50000, "Number of macro particles", int)
opts.add("spacecharge", True, "whether space charge is on", bool)
#opts.add("solver", "2dbassetti-erskine", "other solvers are available", str)
opts.add("solver", "2dopen-hockney", "solver to use, '2dopen-hockney','3dopen-hockney', '2dbassetti-erskine'", str)
# Lattice
opts.add("steps_per_element",5,"Number of steps per element", int)
opts.add("turns",30,"Number of turns", int)
opts.add("checkpointperiod", 15, "Interval for creating checkpoints", int)
opts.add("radius", 0.5, "aperture radius [m]", float)
opts.add("stepper", "splitoperator", "Simulation stepper, either 'independent','elements','splitoperator','soelements'", str)
# +
comm = synergia.utils.Commxx()
myrank = comm.get_rank()
mpisize = comm.get_size()
verbose = opts.verbosity>0
#Construct the lattice
ol = 0.02 #2cm drift
steps_per_element = 2 #2 steps per drift
o = synergia.lattice.Lattice_element("drift", "o")
o.set_double_attribute("l", ol)
lattice = synergia.lattice.Lattice("test", synergia.lattice.Mad8_adaptor_map())
# Add copies of the lattice elements to the fodo lattice
lattice.append(o)
# Define reference particle to be a proton at 2.5 MeV
total_energy = synergia.foundation.pconstants.proton_mass + 2.5e-3 # 2.5 MeV KE
four_momentum = synergia.foundation.Four_momentum(synergia.foundation.pconstants.proton_mass, total_energy)
reference_particle = synergia.foundation.Reference_particle(synergia.foundation.pconstants.proton_charge,
four_momentum)
opts.gamma = reference_particle.get_gamma()
opts.beta = reference_particle.get_beta()
lattice.set_reference_particle(reference_particle)
#force these for test run
gridx = 32
gridy = 32
gridz = 1
grid = [gridx, gridy, gridz]
opts.gridx = gridx
opts.gridy = gridy
opts.gridz = gridz
n_ppc = 100 #n_ppc particles per transverse cell
n_macro = n_ppc*opts.gridx*opts.gridy
opts.macro_particles = n_macro
outputdir = 'SC_drift_test-2'
opts.output_dir = outputdir
opts.relpath = opts.output_dir
workflow.make_path(outputdir)
opts.comm_divide = 4
if opts.comm_divide:
sc_comm = synergia.utils.Commxx_divider(opts.comm_divide, False)
else:
sc_comm = synergia.utils.Commxx(True)
#sc_comm = synergia.utils.Commxx(True)
if opts.solver == "2dopen-hockney":
coll_operator = synergia.collective.Space_charge_2d_open_hockney(sc_comm, grid)
#
map_order = 1
nsteps_per_element = 2
opts.steps_per_element = nsteps_per_element
stepper = synergia.simulation.Split_operator_stepper_elements(lattice, map_order, coll_operator, opts.steps_per_element)
lattice_simulator = stepper.get_lattice_simulator()
opts.lattice = lattice
opts.lattice_simulator = lattice_simulator
# -
# ### Construct a KV bunch with uniform longitudinal density
#
# We can't use Synergia's functions because there is no closed orbit for our drift "lattice." Instead, I must generate my own KV distribution. The KV distribution (in 4D) is defined by two particular properties:
#
# 1. All particles have the same value of J (e.g. the same Courant Synder invariant)
# 2. Particles are evenly distributed in an ellipse in phase space.
#
# Define the envelope of the beam to be $a$, containing th entire bunch distribution in the x(or y) plane for a circular KV bunch. Some other properties of KV beams:
#
# 1. $< x^2 > = \frac{1}{4}a^2$, $< x'^2 > = \frac{1}{4}\frac{\epsilon_x^2}{a^2}$, $< x x'> = 0$
# 2. (Correlary to 1.) $x_{rms} = \frac{1}{2}a$, $\epsilon_{rms,x} = \frac{1}{4}\epsilon_x$
#
# The KV distribution is unique in generating linear space-charge forces, which as a result preserve emittance and allow the beam size to be balanced by external fields.
#
# *Note:* Given the above we expect for our beam the following properties:
#
# 1. Given $\epsilon_{rms,n} = 0.3 \times 10^{-7}$ m-rad $\rightarrow 4.10 \times 10^7$ m-rad geometric emittance, we expect a total emittance of $1.62 \times 10^{-5}$ m-rad for the entire beam.
#
# 2. Assuming we fix $x_{rms} = 1$mm, we expect a beam envelope of $a = 2 x_{rms} = 2$mm
#
# 3. We then expect the mean of in $x'^2$ to follow as $< x'^2 > = \frac{\epsilon_{rms,x}^2}{a^2} = 4.22 \times 10^{-6}$
# +
current = 120.e-3 #mA of current
rp_perlength = current/(opts.beta*scipy.constants.c*scipy.constants.e)
bunch_length = 2*1.e-3 #effective bunch length 2 mm
real_particles = rp_perlength*bunch_length
opts.emit_n = 0.3*1.e-6 #We want 0.3 mm-mrad normalized emittance
opts.emits = [basic_calcs.calc_geometric_emittance(opts.emit_n,opts.beta,opts.gamma)]
dpop = 0.0
opts.real_particles = rp_perlength*bunch_length
opts.betae = 1.0 #statically fix beta
opts.alphae = 0.0
opts.stdz = 0.05
opts.macro_particles = n_macro
particles = standard_beam6d.toyKVbeam6D(opts)
bunch = particles[0]
bunch[:,4] = bunch_length*(np.random.random(len(bunch)) -0.5) #center at 0
bunch[:,5] = opts.dpop*np.random.randn(1,len(bunch)) #set dp/p
#Particle ID 4 is showing odd behavior, so fix that specifically
bunch[4] = bunch[100]
bunch[4,6] = 4.0
np.savetxt('myKVBunch.txt',bunch) #write the bunch to a text file
# -
emit = np.sqrt(np.average(bunch[:,0]**2) * np.average(bunch[:,1]**2) - np.average(bunch[:,0]*bunch[:,1])**2)
print "geometric emittance: %s \nnormalized emittance: %s" % (emit, emit * (opts.beta * opts.gamma))
# +
#read in the bunch
particles_file = 'myKVBunch.txt'
bucket_length = bunch_length #set equal
comm = synergia.utils.Commxx(True) #define a communicator
myBunch = read_bunch.read_bunch(particles_file, reference_particle, opts.real_particles, bucket_length, comm)
# generated longitudinal coordinate is z position (beta*c*dt) but Synergia uses
# c*dt. Divide by beta to get c*dt.
local_particles = myBunch.get_local_particles()
local_particles[:,4] /= opts.beta
# -
# #### Quickly plot and verify the bunch
#
# We should see a uniform bunch, distributed uniformly longitudinally over 2 mm, and with a delta-function profile in the Courant Snyder Invariant.
pltbunch.plot_bunch(myBunch)
pltbunch.plot_long(myBunch)
#Look at z distribution
part = myBunch.get_local_particles()
part[:,4]
zvals = part[:,4]
utils.plot_distribution(zvals, 100)
#Plot the distribution in H
hArray, iArray = elliptic_sp.toy_calc_bunch_H(myBunch,opts,elliptic=False)
#hA, iA = elliptic_sp.calc_H_and_ID(myBunch,opts,elliptic=False)
utils.plot_distribution(hArray*1.e6,10)
#print out some bunch properties
basic_calcs.calc_properties(myBunch,reference_particle)
# ## Run the simulation
# +
bunch_simulator = synergia.simulation.Bunch_simulator(myBunch)
#basic diagnostics - PER STEP
basicdiag = synergia.bunch.Diagnostics_basic("basic.h5", opts.output_dir)
bunch_simulator.add_per_step(basicdiag)
#include full diagnostics
fulldiag = synergia.bunch.Diagnostics_full2("full.h5", opts.output_dir)
bunch_simulator.add_per_turn(fulldiag)
#particle diagnostics - PER TURN
opts.turnsPerDiag = 1
particlediag = synergia.bunch.Diagnostics_particles("particles.h5",0,0,opts.output_dir)
bunch_simulator.add_per_turn(particlediag, opts.turnsPerDiag)
# +
opts.turns = 200
opts.checkpointperiod = 50
opts.maxturns = opts.turns+1
propagator = synergia.simulation.Propagator(stepper)
propagator.set_checkpoint_period(opts.checkpointperiod)
propagator.propagate(bunch_simulator,opts.turns, opts.maxturns,opts.verbosity)
workflow.cleanup(opts.output_dir)
# -
# ## Diagnostics
#
# We now want to plot the beam envelope. There are two simple ways to do this using the diagnostics implement for this Synergia run:
#
# 1. Use the per-step `Diagnostics_basic` to plot the RMS envelope in x/y every 1 cm.
# 2. Use the per-turn `Diagnostics_particle` to extract the particle coordinates and plot the RMS or full envelope every 2 cm.
#
# We will demonstrate both methods and show their agreement.
# #### 1. Plot the RMS evelope using basic diagnostics
# +
opts.inputfile = opts.output_dir + '/basic.h5'
opts.plots = ['x_std', 'y_std']
plotVals = diagplot.getPlotVals(opts.inputfile, opts.plots)
#define specific value arrays
xmaster = plotVals['s']
xstd = plotVals['x_std']
ystd = plotVals['y_std']
fig = plt.figure(figsize=(8,6))
ax = plt.gca()
ax.plot(xmaster,xstd*1.e3,'b-', alpha=0.7, label=r'$\sigma_x$') #plot x
ax.plot(xmaster,ystd*1.e3,'g-', alpha=0.7, label = r'$\sigma_y$') #plot y
axtitle = "RMS envelope evolution over 3 m - 14.1 mA"
ax.set_title(axtitle, y = 1.02, fontsize = 18)
ax.set_xlabel("s [m]",fontsize=14)
ax.set_ylabel("rms beam size $\sigma_x$ [mm]",fontsize=14)
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
ax.set_xlim([0,3.0])
ax.legend()
sv_title = 'SC_test_envelope.pdf'
fig.tight_layout()
#fig.savefig(sv_title,bbox_inches='tight')
# -
# #### 2.Plot the RMS envelop using the turn-by-turn particles diagnostic
# +
opts.relpath = opts.output_dir
#opts.relpath = 'SC_drift_test-2'
files = elliptic_sp.get_file_list(opts)
twiss = elliptic_sp.get_toy_twiss(opts)
lost = elliptic_sp.get_lost_particle_list(opts)
if len(lost) > 0:
#we have lost particles
opts.lost = lost #store these in opts.lost
lost = True #make lost a simple flag
xrms_vals = []
#loop through all files and grab particles - calculate the RMS x value at each turn
for outfile in files:
if lost:
header, particles, lost_particles = elliptic_sp.get_particles(outfile, lost,opts.lost)
else:
header, particles = elliptic_sp.get_particles(outfile, lost)
xrms = np.std(particles[:,0])
xrms_vals.append(xrms)
zvals = (4./200)*np.asarray(list(range(201))) #construct s value parameters
xrms_vals = np.asarray(xrms_vals)*1.e3
# -
fig = plt.figure()
ax = fig.gca()
ax.plot(xmaster,xstd*1.e3, label = 'basic diag')
ax.plot(zvals,xrms_vals, label = 'particles diag')
ax.legend(loc=2)
pltbunch.plot_bunch(myBunch)
basic_calcs.calc_properties(myBunch,reference_particle)
#Look at z distribution
part = myBunch.get_local_particles()
part[:,4]
zvals = part[:,4]
utils.plot_distribution(zvals, 100)
# ## Calculate the expansion from basic theory
# +
def calc_perveance(I,ref,cn=0):
'''Calculate the perveance for a proton beam of a given current and particle energy.
Arguments
- I - current in A
- ref - the reference particle for extracting beta and gamma
- (optional) charge neutralization factor - default 0
'''
I0 = 3.13e7 #characteristic current
beta = ref.get_beta()
gamma = ref.get_gamma()
return (I/I0)*(2/beta**3)*(1/gamma**3)
def calc_characteristic_current():
'''Return characteristics current for proton beam'''
return 4*np.pi*scipy.constants.epsilon_0*scipy.constants.m_p*(scipy.constants.c**3)/scipy.constants.e
# +
#Introduce numerical integrators
#2nd Order RK - Ralston Method
def Ralston(r,z,h,f):
k1 = h*f(r)
return 0.25*k1 + 0.75*h*f(r+(2/3)*k1)
#4th Order Runge-Kutta
def RungeKutta4(r,z,h,f):
k1 = f(r)
k2 = f(r + (h/2)*k1)
k3 = f(r + (h/2)*k2)
k4 = f(r + h*k3)
return h/6*(k1 + 2*k2 +2*k3 + k4)
#function here, which is a function of r and z
def rprime(K,emit,r0,rp0,rm):
'''
Returns the slope of the beam envelope (dr/dz) for a given value of emittance,rm, K, and initial conditions.
This equation follows from Reisier.
Arguments:
- r - beam radius (or RMS)
- K - perveance
- emit - geometric emittance
- r0 - initial envelope radius (or RMS)
- rp0 - initial slope of envelope (or RMS)
'''
first = rp0**2 #first term
second = (emit**2)*((1./r0**2)-(1./rm**2)) #second term
third = 2*K* np.log(rm/r0) / 4
return np.sqrt(first + second + third)
# +
import math
from __future__ import division
def calculate_expansion(current, reference_paricle,r0,rp0,emit=emit,N=1000,zf=opts.turns * lattice.get_length()):
'''Evaluate the expansion of a KV beam envelope in a drift along z-axis, begining at z = 0.
Arguments:
- current - beam current in A
- reference_particle - synergia object for bunch/lattice reference particle
- r0 - initial envelope value (provide RMS for RMS expansion, a for envelope expansion, etc.)
- rp0 - initial slope of envelope (must be non-zero, but calculation is not sensitive to small values)
- (optional) emit - geometric emittance of beam - default 2.05721258396*1.e-6 (for 0.3 mm-mrad KV beam)
- (optional) N - number of steps for integration - default 1000
- (optional) zf - final z value (e.g. length of expansion) - default 50.0
'''
z0 = 0.0 #start
ss = (zf-z0)/N #step size
zpoints = np.linspace(0.0, zf, num=N) #define z values
rpoints = [] #empty array for r values
#calculate perveance
Kp = calc_perveance(current, reference_particle)
#x is r
#z is t (what we step up)
#f is our function describing the relationship between r and z
f = lambda r: rprime(Kp,emit,r0,rprime0,r)
r,z,dz = r0,z0,ss
points = []
while z < zf:
points.append((z,r))
z, r = z+dz, r + Ralston(r,z,dz,f) #incremement
return points
# +
#define the original bunch
particles_file = 'myKVBunch.txt'
bucket_length = bunch_length #set equal
comm = synergia.utils.Commxx(True) #define a communicator
myOrigBunch = read_bunch.read_bunch(particles_file, reference_particle, opts.real_particles, bucket_length, comm)
# generated longitudinal coordinate is z position (beta*c*dt) but Synergia uses
# c*dt. Divide by beta to get c*dt.
local_particles = myOrigBunch.get_local_particles()
local_particles[:,4] /= opts.beta
#calculate orignal x emittance
gemit_x = basic_calcs.get_emittance('x',myOrigBunch)
# -
#Calculate current - 14 mA
current14 = 14*1.e-3
rprime0 = 1.0*(xstd[1]-xstd[0])/(xmaster[1]-xmaster[0])
r0 = xstd[0] #1.0*1.e-3 #initial envelope value
#emit = 4.10849449506e-06 #not used (hard coded into calculate_expansion) #gemit_x #rms geometric emittance
#Run the tests
points14 = calculate_expansion(current14, reference_particle, r0,rprime0)
points14_zemit = calculate_expansion(current14, reference_particle, r0,0, emit = 0)
points0 = calculate_expansion(0.0, reference_particle, r0,rprime0)
# ### Compare zero emittance to emittance run
#Compare the results
fig = plt.figure(figsize=(8,6))
ax = plt.gca()
ax.plot(zvals,xrms_vals,'b-', alpha=0.7, label = 'simulation - 14mA') #plot x
ax.plot([p[0] for p in points14], [p[1]*1.e3 for p in points14],'g--',alpha=0.7, label = 'emittance - 14 mA')
ax.plot([p[0] for p in points14_zemit], [p[1]*1.e3 for p in points14_zemit],'k--',alpha=0.7, label = 'zero emittance- 14 mA')
axtitle = "RMS envelope over %s m - theory" % (opts.turns * lattice.get_length())
ax.set_title(axtitle, y = 1.02, fontsize = 18)
ax.set_xlabel("s [m]",fontsize=14)
ax.set_ylabel("rms beam size $\sigma_x$ [mm]",fontsize=14)
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
ax.legend(loc = 2)
fig.tight_layout()
#fig.savefig('non_SC_dominated_envelope.png')
# #### Compare zero current to 14 mA
#Compare the results - zero current to 14 mA
fig = plt.figure(figsize=(8,6))
ax = plt.gca()
ax.plot(zvals,xrms_vals,'b-', alpha=0.7, label = 'simulation - 14mA') #plot x
ax.plot([p[0] for p in points14], [p[1]*1.e3 for p in points14],'g--',alpha=0.7, label = 'theory - 14 mA')
ax.plot([p[0] for p in points0], [p[1]*1.e3 for p in points0],'k--',alpha=0.7, label = 'theory - zero current')
axtitle = "RMS envelope over %s m - theory vs simulation" % (opts.turns * lattice.get_length())
ax.set_title(axtitle, y = 1.02, fontsize = 18)
ax.set_xlabel("s [m]",fontsize=14)
ax.set_ylabel("rms beam size $\sigma_x$ [mm]",fontsize=14)
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
ax.legend(loc = 2)
#fig.tight_layout()
|
development/expansion_benchmarks/SC_drift_expansion.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import crystalball as cb
import pandas as pd
relative_directory = '../Brainwahve CSV'
ball = cb.CrystalBall.run(relative_directory)
# ## I am interested in searching all the csvs for "names" in order to establish identity.
ball.featureSearch(['name', 'Name'])
#
# ## I see that "firstName" is a promising feature that may tell me more about identity. I need to find the table that contains that column.
#
#
t1 = ball.tableSearch(['firstName'])
t1
# ## I can now read the table, and analyze the contents.
users = ball.openTable(t1[0])
users.head(0)
# ## I am interested in the following features: id, firstName, zip, city
users_subtable = ball.subTable(users, ['id'], ['firstName', 'zip', 'city'])
users_subtable.head(0)
# ## I have now established identity, as observable from the "firstName" column. Futhermore, I see that the column "id" looks promising, as it might lead me to more relevant information about each person.
# educated guesses as to what I am looking for
ball.contains(['ID', 'Id', 'id', 'name', 'Name', 'firstName'])
# find columns that contain the given substrings
ball.featureSearch(['id', 'Id'])
# I am interested in the following Ids: userId, accountId, employeeId, applicantId, applicant_id, placementSpecialistId, companyId
# Let's investigate the userIds
t2 = ball.tableSearch(['userId'])
t2
# I am interested in applicant_profiles.csv
applicant_profiles = ball.openTable(t2[1])
applicant_profiles.head(0)
# +
# I am interested in both id and userId.
# user.csv: has an "id" feature
# applicant_profiles.csv: also has an "id", as well as a "userId"
# PROBLEM: which ids match, and which don't?
# isolate the features of interest:
f1 = pd.Series(users_subtable.index)
f2 = applicant_profiles.id
f3 = applicant_profiles.userId
# to_analyze = {'user.csv': f1, 'applicant_profiles.csv': f2, 'applicant_profiles.csv': f3}
to_analyze = [['user.csv', f1], ['applicant_profiles.csv', f2], ['applicant_profiles.csv', f3]]
ball.analyzeRelationships(to_analyze)
# -
ball.compareRelationship(['user.csv', f1], ['applicant_profiles.csv', f3])
# ### from the boxplots and the analysis tables, it is apparent that user.csv's "id" key corresponds to applicant_profiles.csv's "userId" feature. Now we can create a subtable from applicant_profiles.csv.
# Get birds eye view of table
applicant_profiles.iloc[:,20:30].head(0)
# select the columns you want to include in your subtable
applicant_profiles_subtable = ball.subTable(applicant_profiles, ['userId'], ['totalYearsExperience', 'industryYearsExperience', 'workedLastInIndustry'])
applicant_profiles_subtable.head(0)
# ### now that you have generated two subtables (user_subtable and applicant_profiles_subtable) that have indexes that relate to one another, you can now merge them.
WA_MasterDB = ball.mergeTables([users_subtable, applicant_profiles_subtable])
WA_MasterDB.head(0)
# now you can export your merged table into a new table!!
ball.export(WA_MasterDB, "./test.csv")
test = ball.openTable("test.csv")
# ### we can continue this process now to dynamically add more and more columns of interest to WA_MasterDB!
a1 = ball.featureSearch(['work'])
a1
a2 = ball.tableSearch(['applicantId', 'id'], mode='INTERSECTION')
a2
applicant_education = ball.openTable(a2[1])
applicant_education.head(0)
# +
g1 = applicant_education.id
g2 = applicant_education.applicantId
g3 = users.id
ball.analyzeRelationships([['applicant_education.csv', g1], ['applicant_education.csv', g2], ['WA_MasterDB', g3]])
# -
applicant_education_subtable = ball.subTable(applicant_education, ['applicantId'], ['institutionName', 'subjectOrMajor', 'dateAttendedStart', 'dateAttendedEnd', 'degreeTypeOther'])
applicant_education_subtable.head(0)
# search for tables that contain both "applicant" and "user" information
a3 = ball.tableSearch(['applicant', 'user'], mode='INTERSECTION')
a3
# +
# we've already checked out applican_profiles.csv, so we'll directly merge applicant_education_subtable to WA_MasterDB
WA_MasterDB2 = ball.mergeTables([WA_MasterDB, applicant_education_subtable])
WA_MasterDB2 = WA_MasterDB2.reset_index().set_index(['id','firstName'])
WA_MasterDB2.head(0)
# -
# you can always use pandas groupby aggregate function to combine rows
# use applymap to apply a function to every string. in this case, applymap joins contents of list into a string
WA_MasterDB2_agg = WA_MasterDB2.reset_index().groupby('id').aggregate(lambda x: list(set(x))).applymap(lambda x: ', '.join(map(str, x)))
WA_MasterDB2_agg.head(0)
|
demo/wahve_demo/WAHVE Demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python SDF
# language: python
# name: sdf
# ---
# +
### Frame sampling
# -
# !cd
# +
import cv2
cap = cv2.VideoCapture('.\\images\\cctv31.mp4')
# Image resolution, image width & height frame cpunts & frame rates
print('Frame width: {0:03d}'.format(round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))))
print('Frame height: {0:03d}'.format(round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
print('Frame count: {0:03d}'.format(round(cap.get(cv2.CAP_PROP_FRAME_COUNT))))
fps = round(cap.get(cv2.CAP_PROP_FPS))
print('FPS: {0:03d}'.format(fps))
# +
# Sampling from video
# +
import cv2
import time
#video = cv2.VideoCapture(0)
video = cv2.VideoCapture('.\\images\\cctv31.mp4')
# For outpu image save to create cv2.VideoWriter
#w = round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#h = round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#fps = cap.get(cv2.CAP_PROP_FPS)
#fourcc = cv2.VideoWriter_fourcc(*'DIVX') # *'DIVX' == 'D','I','V','X'
#out = cv2.VideoWriter('.\\images\\output0.avi', fourcc, fps, (w, h))
prev_time = 0
FPS = 1 # ! frame per a second
while True:
ret, frame = video.read()
if not ret:
break
frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Resize
current_time = time.time() - prev_time
if (ret is True) and (current_time > 1./ FPS):
prev_time = time.time()
cv2.imshow('VideoCapture', frame)
cv2.imwrite('beam{0}.jpg'.format(current_time), frame)
if cv2.waitKey(1) > 0 :
break
video.release()
cv2.destroyAllWindows()
# -
|
frame_sampling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Autoregressive Integrated Moving Average with Explanatory Variable (ARIMAX)
#
# The <a href="https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average">ARIMA</a> model is a generalisation of an ARMA model that can be applied to non-stationary time series.
#
# The ARIMAX model is an extended version of ARIMA that includes independent predictor variables.
# +
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from time import time
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
# -
matplotlib.rcParams['figure.figsize'] = (16, 9)
pd.options.display.max_columns = 999
# ## Load Dataset
df = pd.read_csv('../datasets/household-electricity.csv', parse_dates=[0], index_col='DateTime')
print(df.shape)
df.head()
# ## Define Parameters
#
# Make predictions for 24-hour period using a training period of four weeks.
dataset_name = 'Household Electricity Consumption'
dataset_abbr = 'HEC'
model_name = 'ARIMAX'
context_length = 24*7*4 # Four weeks
prediction_length = 24
# ## Define Error Metric
#
# The seasonal variant of the mean absolute scaled error (MASE) will be used to evaluate the forecasts.
def calc_sMASE(training_series, testing_series, prediction_series, seasonality=prediction_length):
a = training_series.iloc[seasonality:].values
b = training_series.iloc[:-seasonality].values
d = np.sum(np.abs(a-b)) / len(a)
errors = np.abs(testing_series - prediction_series)
return np.mean(errors) / d
# ## Example ARIMAX Model
#
# Exploration of how ARIMAX models work using a single example time series.
# +
ts_ex = 'ts10'
df_ex = df.loc[:, ts_ex]
# Plot data from first five days
df_ex.iloc[:24*5].plot();
# -
# ### Time Series Decomposition
#
# Decompose the example time series into trend, seasonal, and residual components.
fig = seasonal_decompose(df_ex.iloc[-500:], model='additive').plot()
# There doesn't appear to be a consistent trend. A Dicky-Fuller test can be used to confirm the stationarity.
dftest = adfuller(df_ex.iloc[-500:], autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
dfoutput
# The very low p-value confirms that the data is stationary. There is daily seasonality, which will be captured as exogenous variables in our ARIMAX model. As there is no trend and seasonality is not being considered directly, no differencing will be applied to the time series.
# ### Plot ACF and PACF
#
# The <a href="https://en.wikipedia.org/wiki/Autocorrelation">Autocorrelation Function</a> (ACF) is the correlation of a signal with a delayed copy of itself as a function of delay.
#
# The <a href="https://en.wikipedia.org/wiki/Partial_autocorrelation_function">Partial Autocorrelation Function</a> (PACF) is the partial correlation of a signal with a delayed copy of itself, controlling for the values of the time series at all shorter delays, as a function of delay.
fig, ax = plt.subplots(2)
ax[0] = sm.graphics.tsa.plot_acf(df_ex, lags=50, ax=ax[0])
ax[1] = sm.graphics.tsa.plot_pacf(df_ex, lags=50, ax=ax[1])
# There is clearly daily seasonality.
# ### Prepare Data
# +
df_ex = pd.DataFrame(df_ex)
days = df_ex.index.dayofweek
dummy_days = pd.get_dummies(days)
dummy_days.columns = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
dummy_days.index = df_ex.index
df_ex = pd.concat([df_ex, dummy_days], axis=1)
df_ex.head()
# -
# ### Build Model
#
# Grid search will be implemented to identify optimal parameters for the ARIMAX(p, d, q) model, using the following possible values:
# +
from itertools import product
ps = range(0, 5) # Up to 4 AR terms
ds = range(0, 2) # Either no or first differencing
qs = range(0, 5) # Up to 4 MA terms
params = product(ps, ds, qs)
params_list = list(params)
print("Number of parameter combinations for grid search: {}".format(len(params_list)))
# -
def optimiseARIMAX(time_series, params_list=params_list, test_length=prediction_length, train_length=context_length):
ts = time_series.iloc[-(test_length+train_length):]
ts_train = ts.iloc[:-test_length]
ts_test = ts.iloc[-test_length:]
# Select the best model using a holdout validation period
val_length = test_length
ts_train_val = ts.iloc[:-(test_length+val_length)]
ts_val = ts.iloc[-(test_length+val_length):-test_length]
results = []
for params in params_list:
p = params[0]
d = params[1]
q = params[2]
# try/except loop in case model fails to converge for given parameters
# Use SARIMAX model but disregard the seasonal parameters
try:
arimax = sm.tsa.SARIMAX(endog=ts_train_val.iloc[:, 0],
exog=ts_train_val.iloc[:, 1:],
order=(p, d, q)).fit()
except:
continue
# Make predictions for validation holdout set and update best model if necessary
val_pred = arimax.predict(start=ts_val.index[0],
end=ts_val.index[-1],
exog=ts_val.iloc[:, 1:],
dynamic=True)
sMASE = calc_sMASE(ts_train.iloc[:, 0], ts_val.iloc[:, 0], val_pred)
results.append([params, sMASE])
df_results = pd.DataFrame(results)
df_results.columns = ['parameters', 'sMASE']
df_results = df_results.sort_values(by='sMASE', ascending=True).reset_index(drop=True)
# Retrain model with best parameters using all training data and generate test forecast
# Use loop to fall back to next best model in case training fails using full dataset
trained = False
model_rank = 1
while not trained:
train_params = df_results.iloc[model_rank-1, 0]
try:
arimax = sm.tsa.SARIMAX(endog=ts_train.iloc[:, 0],
exog=ts_train.iloc[:, 1:],
order=train_params).fit()
trained = True
except:
model_rank += 1
summary = arimax.summary()
# Start index must be greater than q. Fill missing initial entries with zeroes
fcst = arimax.predict(start=ts_train.index[train_params[2]+1],
end=ts_test.index[-1],
exog=ts_test.iloc[:, 1:])
fcst = np.concatenate([np.array([0 for i in range(train_params[2]+1)]), fcst])
fcst = pd.DataFrame(data=fcst, index=ts.index, columns=['pred%s' % ts.columns[0][2:]])
return fcst, train_params, summary
import warnings
warnings.filterwarnings('ignore')
# %%time
fcst, train_params, summary = optimiseARIMAX(df_ex)
df_ex = pd.concat([df_ex, fcst], axis=1)
print("Best model: ARIMAX{}".format(train_params))
print(summary)
# +
# Example forecast
fcst0 = df_ex.copy()
fcst0['pred%s' % ts_ex[2:]][fcst0['pred%s' % ts_ex[2:]] < 0] = 0
fcst0.iloc[-4*prediction_length:, 0].plot(label='Actual', c='k', alpha=0.5)
fcst0.iloc[-4*prediction_length:, -1].plot(label='ARIMAX%s' % str(train_params), c='b', alpha=0.5)
plt.axvline(x=fcst0.index[-prediction_length], linestyle=':', linewidth=2, color='r', label='Start of test data')
plt.legend()
plt.title(ts_ex);
# -
# ## Evaluating ARIMAX
#
# To evaluate ARIMAX, forecasts will be generated for each time series using the grid search methodology shown above (with subsequent zeroing of the negative values). sMASE will be calculated for each individual time series, and the mean of all these scores will be used as the overall accuracy metric for ARIMAX on this dataset.
# +
parameters = []
results = df.iloc[-(prediction_length+context_length):].copy()
tic = time()
for i, col in enumerate(df.columns):
if i % 10 == 0:
toc = time()
print("Running predictions for {}. Cumulative time: {:.1f} minutes.".format(col, (toc-tic)/60))
# Prepare DataFrame for selected column
dft = df.loc[:, col]
dft = pd.DataFrame(dft)
days = dft.index.dayofweek
dummy_days = pd.get_dummies(days)
dummy_days.columns = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
dummy_days.index = dft.index
dft = pd.concat([dft, dummy_days], axis=1)
# Find best model
fcst, train_params, summary = optimiseARIMAX(dft)
# Add predictions to results DataFrame
results['pred%s' % col[2:]] = fcst.values
# Store model parameteres for reference
parameters.append(train_params)
toc = time()
print("Finished! Total run time: {:.1f} minutes.".format((toc-tic)/60))
# -
results0 = results.copy()
results0[results0 < 0] = 0
results0.head()
sMASEs = []
for i, col in enumerate(df.columns):
sMASEs.append(calc_sMASE(results0[col].iloc[-(context_length + prediction_length):-prediction_length],
results0[col].iloc[-prediction_length:],
results0['pred%s' % str(i+1)].iloc[-prediction_length:]))
fig, ax = plt.subplots()
ax.hist(sMASEs, bins=20)
ax.set_title('Distributions of sMASEs for {} dataset'.format(dataset_name))
ax.set_xlabel('sMASE')
ax.set_ylabel('Count');
sMASE = np.mean(sMASEs)
print("Overall sMASE: {:.4f}".format(sMASE))
# Show some example forecasts.
# +
fig, ax = plt.subplots(5, 2, sharex=True)
ax = ax.ravel()
for col in range(1, 11):
ax[col-1].plot(results0.index[-prediction_length:], results0['ts%s' % col].iloc[-prediction_length:],
label='Actual', c='k', linestyle='--', linewidth=1)
ax[col-1].plot(results0.index[-prediction_length:], results0['pred%s' % col].iloc[-prediction_length:],
label='ARIMAX%s' % str(parameters[col-1]), c='b')
ax[col-1].legend()
fig.suptitle('{} Predictions'.format(dataset_name));
# -
# Store the predictions and accuracy score for the ARIMAX models.
# +
import pickle
with open('{}-sMASE.pkl'.format(dataset_abbr), 'wb') as f:
pickle.dump(sMASE, f)
with open('../_results/{}/{}-results.pkl'.format(model_name, dataset_abbr), 'wb') as f:
pickle.dump(results.iloc[-prediction_length:], f)
# -
|
ARIMAX/household-electricity-consumption.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="6yk3PMfBuZhS" colab_type="text"
# Make sure GPU is enabled
# Runtime -> Change Runtime Type -> Hardware Accelerator -> GPU
# + id="qhunyJSod_UT" colab_type="code" colab={}
# !git clone https://github.com/vlomme/Multi-Tacotron-Voice-Cloning.git
# + id="pE6btDZWeFV0" colab_type="code" colab={}
# cd Multi-Tacotron-Voice-Cloning/
# + id="0AVd9vLKeKm6" colab_type="code" colab={}
# !pip install -r requirements.txt
# + id="2920fqapzRoz" colab_type="code" colab={}
# !apt-get install libportaudio2
# + id="VuwgOQlPeN8a" colab_type="code" colab={}
# !gdown https://drive.google.com/uc?id=1aQBmpflbX_ePUdXTSNE4CfEL9hdG2-O8
# + id="vKLpYfRkfyjX" colab_type="code" colab={}
# !unzip pretrained.zip
# + id="YOiGYfpAf2qR" colab_type="code" colab={}
# !python demo_cli.py -p "ex.wav" -t "Hello my friends. Я многоязычный синтез построенный на tacotron. Шла саша по шоссе и сосала сушку" --no_sound
# + id="PyLdbUfks2lv" colab_type="code" colab={}
import IPython.display as ipd
print("original(оригинал)")
ipd.display(ipd.Audio('ex.wav'))#filepath to original voices (Путь до оригинального файла)
print("cloned(клонированный)")
ipd.display(ipd.Audio('demo_output_00.wav'))#filepath to synthesized voices (Путь до синтезированного файла)
|
Multi_Tacotron_Voice_Cloning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=[]
# # Google Analytics - GoogleAnalytics Get stats per country
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Google%20Analytics/GoogleAnalytics_Get_stats_per_country.ipynb" target="_parent"><img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+<KEY>
# + [markdown] papermill={} tags=[]
# **Tags:** #googleanalytics #statspercountry
# + [markdown] papermill={} tags=[]
# Pre-requisite: Create your own <a href="">Google API JSON credential</a>
# + [markdown] papermill={} tags=[]
# ## Input
# + [markdown] papermill={} tags=[]
# ### Import library
# + papermill={} tags=[]
import pycountry
import plotly.graph_objects as go
import plotly.express as px
from naas_drivers import googleanalytics
# + [markdown] papermill={} tags=[]
# ### Get your credential from Google Cloud Platform
# + papermill={} tags=[]
json_path = '/Users/charlesdemontigny/Desktop/naas-335023-90c733ba64dd.json'
# + [markdown] papermill={} tags=[]
# ### Get view id from google analytics
# + papermill={} tags=[]
view_id = "236707574"
# + [markdown] papermill={} tags=[]
# ## Model
# + [markdown] papermill={} tags=[]
# ### Report Website - Google Analytics performance
# + papermill={} tags=[]
googleanalytics.connect(json_path=json_path)
# + [markdown] papermill={} tags=[]
# ### Visitor's country of origin
# + papermill={} tags=[]
country = googleanalytics.views.get_data(
view_id,
metrics="ga:sessions",
pivots_dimensions="ga:country",
dimensions="ga:month",
start_date=None,
end_date=None,
format_type="pivot",
)
# + papermill={} tags=[]
sessions_per_country = googleanalytics.views.get_country(view_id) # default: metrics="ga:sessions"
# + papermill={} tags=[]
sessions_per_country
# + papermill={} tags=[]
users_per_country = googleanalytics.views.get_country(view_id, metrics="ga:users")
# + [markdown] papermill={} tags=[]
# ## Output
# + [markdown] papermill={} tags=[]
# ### Display result
# + papermill={} tags=[]
sessions_per_country.head()
# + papermill={} tags=[]
users_per_country.head()
# + papermill={} tags=[]
sessions_per_country = sessions_per_country.reset_index().rename(columns={"index": "Country"})
mapping = {country.name: country.alpha_3 for country in pycountry.countries}
sessions_per_country['iso_alpha'] = sessions_per_country['Country'].apply(lambda x: mapping.get(x))
# + papermill={} tags=[]
sessions_per_country
# + papermill={} tags=[]
fig = px.choropleth(sessions_per_country, locations="iso_alpha",
color="Sessions",
hover_name="Country",
color_continuous_scale="Greens")
fig.show()
# + papermill={} tags=[]
|
Google Analytics/GoogleAnalytics_Get_stats_per_country.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.
#
# Return a deep copy of the list.
# ### Thought
# Time complexity: O(2N)
# +
# Definition for singly-linked list with a random pointer.
# class RandomListNode(object):
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution(object):
def copyRandomList(self, head):
dic = dict()
m = n = head
while m:
dic[m] = RandomListNode(m.label)
m = m.next
while n:
dic[n].next = dic.get(n.next)
dic[n].random = dic.get(n.random)
n = n.next
return dic.get(head)
|
138. Copy List with Random Pointer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a href="https://cocl.us/corsera_da0101en_notebook_top">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/TopAd.png" width="750" align="center">
# </a>
# </div>
# <a href="https://www.bigdatauniversity.com"><img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/CCLog.png" width = 300, align = "center"></a>
#
# <h1 align=center><font size=5>Data Analysis with Python</font></h1>
# <h1>Data Wrangling</h1>
# <h3>Welcome!</h3>
#
# By the end of this notebook, you will have learned the basics of Data Wrangling!
# <h2>Table of content</h2>
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ul>
# <li><a href="#identify_handle_missing_values">Identify and handle missing values</a>
# <ul>
# <li><a href="#identify_missing_values">Identify missing values</a></li>
# <li><a href="#deal_missing_values">Deal with missing values</a></li>
# <li><a href="#correct_data_format">Correct data format</a></li>
# </ul>
# </li>
# <li><a href="#data_standardization">Data standardization</a></li>
# <li><a href="#data_normalization">Data Normalization (centering/scaling)</a></li>
# <li><a href="#binning">Binning</a></li>
# <li><a href="#indicator">Indicator variable</a></li>
# </ul>
#
# Estimated Time Needed: <strong>30 min</strong>
# </div>
#
# <hr>
# <h2>What is the purpose of Data Wrangling?</h2>
# Data Wrangling is the process of converting data from the initial format to a format that may be better for analysis.
# <h3>What is the fuel consumption (L/100k) rate for the diesel car?</h3>
# <h3>Import data</h3>
# <p>
# You can find the "Automobile Data Set" from the following link: <a href="https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data">https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data</a>.
# We will be using this data set throughout this course.
# </p>
# <h4>Import pandas</h4>
import pandas as pd
import matplotlib.pylab as plt
# <h2>Reading the data set from the URL and adding the related headers.</h2>
# URL of the dataset
# This dataset was hosted on IBM Cloud object click <a href="https://cocl.us/corsera_da0101en_notebook_bottom">HERE</a> for free storage
filename = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv"
# Python list <b>headers</b> containing name of headers
headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style",
"drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type",
"num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower",
"peak-rpm","city-mpg","highway-mpg","price"]
# Use the Pandas method <b>read_csv()</b> to load the data from the web address. Set the parameter "names" equal to the Python list "headers".
# + jupyter={"outputs_hidden": false}
df = pd.read_csv(filename, names = headers)
# -
# Use the method <b>head()</b> to display the first five rows of the dataframe.
# + jupyter={"outputs_hidden": false}
# To see what the data set looks like, we'll use the head() method.
df.head()
# -
# As we can see, several question marks appeared in the dataframe; those are missing values which may hinder our further analysis.
# <div>So, how do we identify all those missing values and deal with them?</div>
#
#
# <b>How to work with missing data?</b>
#
# Steps for working with missing data:
# <ol>
# <li>dentify missing data</li>
# <li>deal with missing data</li>
# <li>correct data format</li>
# </ol>
# <h2 id="identify_handle_missing_values">Identify and handle missing values</h2>
#
#
# <h3 id="identify_missing_values">Identify missing values</h3>
# <h4>Convert "?" to NaN</h4>
# In the car dataset, missing data comes with the question mark "?".
# We replace "?" with NaN (Not a Number), which is Python's default missing value marker, for reasons of computational speed and convenience. Here we use the function:
# <pre>.replace(A, B, inplace = True) </pre>
# to replace A by B
# + jupyter={"outputs_hidden": false}
import numpy as np
# replace "?" to NaN
df.replace("?", np.nan, inplace = True)
df.head(5)
# -
# dentify_missing_values
#
# <h4>Evaluating for Missing Data</h4>
#
# The missing values are converted to Python's default. We use Python's built-in functions to identify these missing values. There are two methods to detect missing data:
# <ol>
# <li><b>.isnull()</b></li>
# <li><b>.notnull()</b></li>
# </ol>
# The output is a boolean value indicating whether the value that is passed into the argument is in fact missing data.
# + jupyter={"outputs_hidden": false}
missing_data = df.isnull()
missing_data.head(5)
# -
# "True" stands for missing value, while "False" stands for not missing value.
# <h4>Count missing values in each column</h4>
# <p>
# Using a for loop in Python, we can quickly figure out the number of missing values in each column. As mentioned above, "True" represents a missing value, "False" means the value is present in the dataset. In the body of the for loop the method ".value_counts()" counts the number of "True" values.
# </p>
# + jupyter={"outputs_hidden": false}
for column in missing_data.columns.values.tolist():
print(column)
print (missing_data[column].value_counts())
print("")
# -
# Based on the summary above, each column has 205 rows of data, seven columns containing missing data:
# <ol>
# <li>"normalized-losses": 41 missing data</li>
# <li>"num-of-doors": 2 missing data</li>
# <li>"bore": 4 missing data</li>
# <li>"stroke" : 4 missing data</li>
# <li>"horsepower": 2 missing data</li>
# <li>"peak-rpm": 2 missing data</li>
# <li>"price": 4 missing data</li>
# </ol>
# <h3 id="deal_missing_values">Deal with missing data</h3>
# <b>How to deal with missing data?</b>
#
# <ol>
# <li>drop data<br>
# a. drop the whole row<br>
# b. drop the whole column
# </li>
# <li>replace data<br>
# a. replace it by mean<br>
# b. replace it by frequency<br>
# c. replace it based on other functions
# </li>
# </ol>
# Whole columns should be dropped only if most entries in the column are empty. In our dataset, none of the columns are empty enough to drop entirely.
# We have some freedom in choosing which method to replace data; however, some methods may seem more reasonable than others. We will apply each method to many different columns:
#
# <b>Replace by mean:</b>
# <ul>
# <li>"normalized-losses": 41 missing data, replace them with mean</li>
# <li>"stroke": 4 missing data, replace them with mean</li>
# <li>"bore": 4 missing data, replace them with mean</li>
# <li>"horsepower": 2 missing data, replace them with mean</li>
# <li>"peak-rpm": 2 missing data, replace them with mean</li>
# </ul>
#
# <b>Replace by frequency:</b>
# <ul>
# <li>"num-of-doors": 2 missing data, replace them with "four".
# <ul>
# <li>Reason: 84% sedans is four doors. Since four doors is most frequent, it is most likely to occur</li>
# </ul>
# </li>
# </ul>
#
# <b>Drop the whole row:</b>
# <ul>
# <li>"price": 4 missing data, simply delete the whole row
# <ul>
# <li>Reason: price is what we want to predict. Any data entry without price data cannot be used for prediction; therefore any row now without price data is not useful to us</li>
# </ul>
# </li>
# </ul>
# <h4>Calculate the average of the column </h4>
# + jupyter={"outputs_hidden": false}
avg_norm_loss = df["normalized-losses"].astype("float").mean(axis=0)
print("Average of normalized-losses:", avg_norm_loss)
# -
# <h4>Replace "NaN" by mean value in "normalized-losses" column</h4>
df["normalized-losses"].replace(np.nan, avg_norm_loss, inplace=True)
# <h4>Calculate the mean value for 'bore' column</h4>
avg_bore=df['bore'].astype('float').mean(axis=0)
print("Average of bore:", avg_bore)
# <h4>Replace NaN by mean value</h4>
df["bore"].replace(np.nan, avg_bore, inplace=True)
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #1: </h1>
#
# <b>According to the example above, replace NaN in "stroke" column by mean.</b>
# </div>
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
# -
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# # calculate the mean vaule for "stroke" column
# avg_stroke = df["stroke"].astype("float").mean(axis = 0)
# print("Average of stroke:", avg_stroke)
#
# # replace NaN by mean value in "stroke" column
# df["stroke"].replace(np.nan, avg_stroke, inplace = True)
#
# -->
#
# <h4>Calculate the mean value for the 'horsepower' column:</h4>
avg_horsepower = df['horsepower'].astype('float').mean(axis=0)
print("Average horsepower:", avg_horsepower)
# <h4>Replace "NaN" by mean value:</h4>
df['horsepower'].replace(np.nan, avg_horsepower, inplace=True)
# <h4>Calculate the mean value for 'peak-rpm' column:</h4>
avg_peakrpm=df['peak-rpm'].astype('float').mean(axis=0)
print("Average peak rpm:", avg_peakrpm)
# <h4>Replace NaN by mean value:</h4>
df['peak-rpm'].replace(np.nan, avg_peakrpm, inplace=True)
# To see which values are present in a particular column, we can use the ".value_counts()" method:
# + jupyter={"outputs_hidden": false}
df['num-of-doors'].value_counts()
# -
# We can see that four doors are the most common type. We can also use the ".idxmax()" method to calculate for us the most common type automatically:
# + jupyter={"outputs_hidden": false}
df['num-of-doors'].value_counts().idxmax()
# -
# The replacement procedure is very similar to what we have seen previously
# + jupyter={"outputs_hidden": false}
#replace the missing 'num-of-doors' values by the most frequent
df["num-of-doors"].replace(np.nan, "four", inplace=True)
# -
# Finally, let's drop all rows that do not have price data:
# +
# simply drop whole row with NaN in "price" column
df.dropna(subset=["price"], axis=0, inplace=True)
# reset index, because we droped two rows
df.reset_index(drop=True, inplace=True)
# + jupyter={"outputs_hidden": false}
df.head()
# -
# <b>Good!</b> Now, we obtain the dataset with no missing values.
# <h3 id="correct_data_format">Correct data format</h3>
# <b>We are almost there!</b>
# <p>The last step in data cleaning is checking and making sure that all data is in the correct format (int, float, text or other).</p>
#
# In Pandas, we use
# <p><b>.dtype()</b> to check the data type</p>
# <p><b>.astype()</b> to change the data type</p>
# <h4>Lets list the data types for each column</h4>
# + jupyter={"outputs_hidden": false}
df.dtypes
# -
# <p>As we can see above, some columns are not of the correct data type. Numerical variables should have type 'float' or 'int', and variables with strings such as categories should have type 'object'. For example, 'bore' and 'stroke' variables are numerical values that describe the engines, so we should expect them to be of the type 'float' or 'int'; however, they are shown as type 'object'. We have to convert data types into a proper format for each column using the "astype()" method.</p>
# <h4>Convert data types to proper format</h4>
# + jupyter={"outputs_hidden": false}
df[["bore", "stroke"]] = df[["bore", "stroke"]].astype("float")
df[["normalized-losses"]] = df[["normalized-losses"]].astype("int")
df[["price"]] = df[["price"]].astype("float")
df[["peak-rpm"]] = df[["peak-rpm"]].astype("float")
# -
# <h4>Let us list the columns after the conversion</h4>
# + jupyter={"outputs_hidden": false}
df.dtypes
# -
# <b>Wonderful!</b>
#
# Now, we finally obtain the cleaned dataset with no missing values and all data in its proper format.
# <h2 id="data_standardization">Data Standardization</h2>
# <p>
# Data is usually collected from different agencies with different formats.
# (Data Standardization is also a term for a particular type of data normalization, where we subtract the mean and divide by the standard deviation)
# </p>
#
# <b>What is Standardization?</b>
# <p>Standardization is the process of transforming data into a common format which allows the researcher to make the meaningful comparison.
# </p>
#
# <b>Example</b>
# <p>Transform mpg to L/100km:</p>
# <p>In our dataset, the fuel consumption columns "city-mpg" and "highway-mpg" are represented by mpg (miles per gallon) unit. Assume we are developing an application in a country that accept the fuel consumption with L/100km standard</p>
# <p>We will need to apply <b>data transformation</b> to transform mpg into L/100km?</p>
#
# <p>The formula for unit conversion is<p>
# L/100km = 235 / mpg
# <p>We can do many mathematical operations directly in Pandas.</p>
# + jupyter={"outputs_hidden": false}
df.head()
# + jupyter={"outputs_hidden": false}
# Convert mpg to L/100km by mathematical operation (235 divided by mpg)
df['city-L/100km'] = 235/df["city-mpg"]
# check your transformed data
df.head()
# -
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #2: </h1>
#
# <b>According to the example above, transform mpg to L/100km in the column of "highway-mpg", and change the name of column to "highway-L/100km".</b>
# </div>
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
# -
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# # transform mpg to L/100km by mathematical operation (235 divided by mpg)
# df["highway-mpg"] = 235/df["highway-mpg"]
#
# # rename column name from "highway-mpg" to "highway-L/100km"
# df.rename(columns={'"highway-mpg"':'highway-L/100km'}, inplace=True)
#
# # check your transformed data
# df.head()
#
# -->
#
# <h2 id="data_normalization">Data Normalization</h2>
#
# <b>Why normalization?</b>
# <p>Normalization is the process of transforming values of several variables into a similar range. Typical normalizations include scaling the variable so the variable average is 0, scaling the variable so the variance is 1, or scaling variable so the variable values range from 0 to 1
# </p>
#
# <b>Example</b>
# <p>To demonstrate normalization, let's say we want to scale the columns "length", "width" and "height" </p>
# <p><b>Target:</b>would like to Normalize those variables so their value ranges from 0 to 1.</p>
# <p><b>Approach:</b> replace original value by (original value)/(maximum value)</p>
# + jupyter={"outputs_hidden": false}
# replace (original value) by (original value)/(maximum value)
df['length'] = df['length']/df['length'].max()
df['width'] = df['width']/df['width'].max()
# -
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Questiont #3: </h1>
#
# <b>According to the example above, normalize the column "height".</b>
# </div>
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
# -
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# df['height'] = df['height']/df['height'].max()
# # show the scaled columns
# df[["length","width","height"]].head()
#
# -->
# Here we can see, we've normalized "length", "width" and "height" in the range of [0,1].
# <h2 id="binning">Binning</h2>
# <b>Why binning?</b>
# <p>
# Binning is a process of transforming continuous numerical variables into discrete categorical 'bins', for grouped analysis.
# </p>
#
# <b>Example: </b>
# <p>In our dataset, "horsepower" is a real valued variable ranging from 48 to 288, it has 57 unique values. What if we only care about the price difference between cars with high horsepower, medium horsepower, and little horsepower (3 types)? Can we rearrange them into three ‘bins' to simplify analysis? </p>
#
# <p>We will use the Pandas method 'cut' to segment the 'horsepower' column into 3 bins </p>
#
#
# <h3>Example of Binning Data In Pandas</h3>
# Convert data to correct format
# + jupyter={"outputs_hidden": false}
df["horsepower"]=df["horsepower"].astype(int, copy=True)
# -
# Lets plot the histogram of horspower, to see what the distribution of horsepower looks like.
# +
# %matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
plt.pyplot.hist(df["horsepower"])
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
# -
# <p>We would like 3 bins of equal size bandwidth so we use numpy's <code>linspace(start_value, end_value, numbers_generated</code> function.</p>
# <p>Since we want to include the minimum value of horsepower we want to set start_value=min(df["horsepower"]).</p>
# <p>Since we want to include the maximum value of horsepower we want to set end_value=max(df["horsepower"]).</p>
# <p>Since we are building 3 bins of equal length, there should be 4 dividers, so numbers_generated=4.</p>
# We build a bin array, with a minimum value to a maximum value, with bandwidth calculated above. The bins will be values used to determine when one bin ends and another begins.
# + jupyter={"outputs_hidden": false}
bins = np.linspace(min(df["horsepower"]), max(df["horsepower"]), 4)
bins
# -
# We set group names:
group_names = ['Low', 'Medium', 'High']
# We apply the function "cut" the determine what each value of "df['horsepower']" belongs to.
# + jupyter={"outputs_hidden": false}
df['horsepower-binned'] = pd.cut(df['horsepower'], bins, labels=group_names, include_lowest=True )
df[['horsepower','horsepower-binned']].head(20)
# -
# Lets see the number of vehicles in each bin.
df["horsepower-binned"].value_counts()
# Lets plot the distribution of each bin.
# +
# %matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
pyplot.bar(group_names, df["horsepower-binned"].value_counts())
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
# -
# <p>
# Check the dataframe above carefully, you will find the last column provides the bins for "horsepower" with 3 categories ("Low","Medium" and "High").
# </p>
# <p>
# We successfully narrow the intervals from 57 to 3!
# </p>
# <h3>Bins visualization</h3>
# Normally, a histogram is used to visualize the distribution of bins we created above.
# + jupyter={"outputs_hidden": false}
# %matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
a = (0,1,2)
# draw historgram of attribute "horsepower" with bins = 3
plt.pyplot.hist(df["horsepower"], bins = 3)
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
# -
# The plot above shows the binning result for attribute "horsepower".
# <h2 id="indicator">Indicator variable (or dummy variable)</h2>
# <b>What is an indicator variable?</b>
# <p>
# An indicator variable (or dummy variable) is a numerical variable used to label categories. They are called 'dummies' because the numbers themselves don't have inherent meaning.
# </p>
#
# <b>Why we use indicator variables?</b>
# <p>
# So we can use categorical variables for regression analysis in the later modules.
# </p>
# <b>Example</b>
# <p>
# We see the column "fuel-type" has two unique values, "gas" or "diesel". Regression doesn't understand words, only numbers. To use this attribute in regression analysis, we convert "fuel-type" into indicator variables.
# </p>
#
# <p>
# We will use the panda's method 'get_dummies' to assign numerical values to different categories of fuel type.
# </p>
# + jupyter={"outputs_hidden": false}
df.columns
# -
# get indicator variables and assign it to data frame "dummy_variable_1"
# + jupyter={"outputs_hidden": false}
dummy_variable_1 = pd.get_dummies(df["fuel-type"])
dummy_variable_1.head()
# -
# change column names for clarity
# + jupyter={"outputs_hidden": false}
dummy_variable_1.rename(columns={'fuel-type-diesel':'gas', 'fuel-type-diesel':'diesel'}, inplace=True)
dummy_variable_1.head()
# -
# We now have the value 0 to represent "gas" and 1 to represent "diesel" in the column "fuel-type". We will now insert this column back into our original dataset.
# +
# merge data frame "df" and "dummy_variable_1"
df = pd.concat([df, dummy_variable_1], axis=1)
# drop original column "fuel-type" from "df"
df.drop("fuel-type", axis = 1, inplace=True)
# + jupyter={"outputs_hidden": false}
df.head()
# -
# The last two columns are now the indicator variable representation of the fuel-type variable. It's all 0s and 1s now.
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4: </h1>
#
# <b>As above, create indicator variable to the column of "aspiration": "std" to 0, while "turbo" to 1.</b>
# </div>
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
# -
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# # get indicator variables of aspiration and assign it to data frame "dummy_variable_2"
# dummy_variable_2 = pd.get_dummies(df['aspiration'])
#
# # change column names for clarity
# dummy_variable_2.rename(columns={'std':'aspiration-std', 'turbo': 'aspiration-turbo'}, inplace=True)
#
# # show first 5 instances of data frame "dummy_variable_1"
# dummy_variable_2.head()
#
# -->
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #5: </h1>
#
# <b>Merge the new dataframe to the original dataframe then drop the column 'aspiration'</b>
# </div>
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
# -
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# #merge the new dataframe to the original datafram
# df = pd.concat([df, dummy_variable_2], axis=1)
#
# # drop original column "aspiration" from "df"
# df.drop('aspiration', axis = 1, inplace=True)
#
# -->
# save the new csv
df.to_csv('clean_df.csv')
# <h1>Thank you for completing this notebook</h1>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
#
# <p><a href="https://cocl.us/corsera_da0101en_notebook_bottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/BottomAd.png" width="750" align="center"></a></p>
# </div>
# <h3>About the Authors:</h3>
#
# This notebook was written by <a href="https://www.linkedin.com/in/mahdi-noorian-58219234/" target="_blank"><NAME> PhD</a>, <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a>, <NAME>, <NAME>, <NAME>, Parizad, <NAME> and <a href="https://www.linkedin.com/in/fiorellawever/" target="_blank"><NAME></a> and <a href=" https://www.linkedin.com/in/yi-leng-yao-84451275/ " target="_blank" >Yi Yao</a>.
#
# <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
# <hr>
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
|
Coursera/Data Analysis with Python/week 2/Labs/data-wrangling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from urllib.request import urlopen
import urllib.request
# Downloading Data for a particular interval of time from a private Thingspeak channel
channel = "1234567" # Enter your channel ID
start_date = "2021-09-08" # Enter start date
end_date = "2021-09-08" # Enter end date
start_time = "2012:00:00" # Enter start time
end_time = "2016:00:00" # Enter end time
file_name= channel+start_date +".csv" # Can customize your output file name
# For Time Zone
continent= Asia
city = Kolkata
api_key = <KEY> # PUT YOUR PRVIATE CHANNEL READ API HERE
URL = "https://api.thingspeak.com/channels/" + channel + "/feeds.csv?api_key=" +api_key +"&start=" +start_date +"%20" +start_time + "&end=" +end_date +"%20"+ end_time +"&timezone="+continent+"%2F"+city
urllib.request.urlretrieve(URL, file_name)
# -
|
Thingspeak _Private_Channel_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # EDA : <NAME>, 2009
# ## Extracting data
# +
import pandas as pd
from urllib.request import urlopen
from bs4 import BeautifulSoup
from tabulate import tabulate
def inr_to_int(text):
try:
text = text.split()
text = text[1].replace(",", "")
except:
return 0
return int(text)
def main():
header = ["Candidate", "Constituency", "Party", "Criminal Cases", "Education", "Total Assets", "Liabilities"]
data = []
constituencies = 0
try:
url = "https://myneta.info/ls2009/index.php?action=show_winners&sort=default"
html = urlopen(url)
soup = BeautifulSoup(html,'html.parser')
title = soup.title.text
title = title.split()
title = title[4:-3]
title = " ".join(title)
title = title.title()
title = title.split(":")
constituency = "+".join(title)
title = ",".join(title)
table = soup.find('table',attrs={'style':'padding: 0px;'})
rows = table.find_all('tr')
for i in range(2, len(rows)):
col_data = []
cols = rows[i].find_all('td')
for i in range(1, 6):
col_data.append(cols[i].text)
for i in range(6, 8):
col_data.append(inr_to_int(cols[i].text))
data.append(col_data)
print("Saving", col_data[1], "=>", col_data[0], "("+col_data[2]+")")
constituencies += 1
except:
print('ERROR')
else:
print('SUCCESS')
df = pd.DataFrame(data = data, columns = header)
print('Saved =>', constituencies, 'constituencies')
df.to_csv('2009.csv', index=False)
main()
# -
# ## Visualizing data
# +
import pandas as pd
import numpy as np
import matplotlib
# %matplotlib inline
matplotlib.rc('figure', figsize=(10, 5))
matplotlib.rc('font', weight='bold', size=12)
# -
data = pd.read_csv('2009.csv')
data
data.describe()
data.info()
# Total number of Parties
np.size(data["Party"].unique()) - 1
# Reducing by one for independent candidates
candidates = np.size(data["Candidate"])
candidates
candidates - np.size(data["Candidate"].unique())
candidates - data.groupby(['Candidate', 'Party']).ngroups
candidates - data.groupby(['Candidate', 'Constituency']).ngroups
data.groupby(['Candidate', 'Education'])['Total Assets'].sum().sort_values(ascending=False).head(25)
data.groupby(['Candidate', 'Education'])['Liabilities'].sum().sort_values(ascending=False).head(25)
party_data = data[data["Party"].isin(["AAP","SAD","BSP","BJP","INC","IND","Republican Party of India (A)", "Punjab Lok Congress Party"])]
party_data.reset_index()
party_data
party_sorted = party_data[["Party","Candidate"]].groupby(["Party"]).aggregate(lambda x: len(x.unique())).sort_values(["Candidate"], ascending = False )
party_sorted
party_sorted.plot(kind="bar")
ed_data = pd.pivot_table(data,values=["Candidate"],aggfunc=lambda x: len(x.unique()), index=["Education"]).sort_values(["Candidate"], ascending = False )
ed_data.reset_index()
ed_data.plot(kind="bar")
ed_party_data = pd.pivot_table(party_data, values=["Candidate"],aggfunc=lambda x: len(x.unique()), index=["Education"], columns=["Party"])
ed_party_data.fillna(0)
criminal_data_asc = data[["Candidate","Constituency","Party", "Education","Criminal Cases"]].sort_values('Criminal Cases', ascending=False).head(25)
criminal_data_asc = criminal_data_asc.reset_index(drop=True)
criminal_data_asc
criminal_data_dsc = data[["Candidate","Constituency","Party", "Education","Criminal Cases"]].sort_values('Criminal Cases', ascending=True).head(25)
criminal_data_dsc = criminal_data_dsc.reset_index(drop=True)
criminal_data_dsc
criminal_data_asc.groupby("Party").aggregate(np.sum).sort_values("Criminal Cases",ascending=False).head(25).plot(kind='bar')
asset_data = data[["Candidate","Constituency","Party", "Total Assets"]].sort_values('Total Assets', ascending=False).head(25)
asset_data = asset_data.reset_index(drop=True)
asset_data
asset_data.groupby("Party").aggregate(np.sum).sort_values("Total Assets",ascending=False).head(25).plot(kind='bar')
|
winners/2009.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: iq
# language: python
# name: iq
# ---
import os
import h5py
import numpy as np
# -- astrologs --
from astrologs.astrologs import Astrologs
# -- galpopfm --
from galpopfm.catalogs import Catalog
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
dat_dir = os.environ['GALPOPFM_DIR']
# Lets read in data from SDSS, SIMBA, and TNG
sdss = Astrologs("tinkergroup", mlim='9.7')
zmax = sdss.data['redshift'].max()
zmin = sdss.data['redshift'].min()
print('%f < z_sdss < %f' % (zmin, zmax))
from astropy.cosmology import Planck13 as cosmo
vol_sdss = (cosmo.comoving_volume(zmax).value - cosmo.comoving_volume(zmin).value) * (7818.28/41253.) * cosmo.h**3 # (Mpc/h)^3
print('comoving volume = %.2f (Mpc/h)^3' % vol_sdss)
# +
fig = plt.figure(figsize=(14,4))
sub = fig.add_subplot(131)
sub.scatter(sdss.data['redshift'], sdss.data['log.M_star'], s=1)
sub.set_xlabel(r'$z$', fontsize=25)
sub.set_xlim(0., 0.04)
sub.set_ylabel(r'$\log M_*$', fontsize=25)
sub.set_ylim(9.5, 13.)
sub = fig.add_subplot(132)
sub.scatter(sdss.data['redshift'], sdss.data['M_r'], s=1)
sub.set_xlabel(r'$z$', fontsize=25)
sub.set_xlim(0., 0.04)
sub.set_ylabel(r'$M_r$', fontsize=25)
sub.set_ylim(-17., -23)
sub = fig.add_subplot(133)
sub.scatter(sdss.data['log.M_star'], sdss.data['M_r'], s=1)
sub.set_xlabel(r'$\log M_*$', fontsize=25)
sub.set_xlim(9.5, 12.)
sub.set_ylabel(r'$M_r$', fontsize=25)
sub.set_ylim(-17., -23)
fig.subplots_adjust(wspace=0.4)
# -
simba = Catalog('simba')
tng = Catalog('tng')
vol_simba = 100.**3 # (Mpc/h)^3
vol_tng = 75.**3 # (Mpc/h)^3
logms_bin = np.linspace(8., 13., 26)
dlogms = logms_bin[1:] - logms_bin[:-1]
# Now calculate the SMFs
Ngal_sdss, _ = np.histogram(sdss.data['log.M_star'], bins=logms_bin)
Ngal_simba, _ = np.histogram(simba.data['logmstar'], bins=logms_bin)
Ngal_tng, _ = np.histogram(tng.data['logmstar'], bins=logms_bin)
phi_sdss = Ngal_sdss.astype(float) / vol_sdss / dlogms
phi_simba = Ngal_simba.astype(float) / vol_simba / dlogms
phi_tng = Ngal_tng.astype(float) / vol_tng / dlogms
fig = plt.figure()
sub = fig.add_subplot(111)
sub.plot(0.5*(logms_bin[1:] + logms_bin[:-1]), phi_sdss, label='SDSS volume-limited')
sub.plot(0.5*(logms_bin[1:] + logms_bin[:-1]), phi_simba, label='SIMBA')
sub.plot(0.5*(logms_bin[1:] + logms_bin[:-1]), phi_tng, label='TNG')
sub.legend(loc='lower left', frameon=True, handletextpad=0.2, fontsize=15)
sub.set_xlabel(r'$\log M_*$', fontsize=25)
sub.set_xlim(8.5, 13.)
sub.set_ylabel(r'central galaxy stellar mass function $\Phi^{\rm cen}_{M_*}$', fontsize=25)
sub.set_yscale("log")
ylim = sub.get_ylim()
sub.plot([9.7, 9.7], ylim, c='k', ls='--')
sub.set_ylim(ylim)
# Now lets calculate jackknife errors for the SDSS SMF
# +
ra_sdss = sdss.data['ra'] * (180./np.pi)
dec_sdss = sdss.data['dec'] * (180./np.pi)
plt.scatter(ra_sdss, dec_sdss, c='k', s=1)
plt.xlabel('RA', fontsize=20)
plt.xlim(0., 360)
plt.ylabel('Dec', fontsize=20)
plt.ylim(-30, 90)
# +
jk_fields = []
jk_fields.append(ra_sdss < 100)
ra_bins = np.linspace(110, 270, 4)
dec_bins = np.linspace(-11., 70., 4)
for i in range(len(ra_bins)-1):
for j in range(len(dec_bins)-1):
infield = ((ra_sdss > ra_bins[i]) & (ra_sdss < ra_bins[i+1]) &
(dec_sdss > dec_bins[j]) & (dec_sdss < dec_bins[j+1]))
jk_fields.append(infield)
jk_fields.append(ra_sdss > 300)
for i, field in enumerate(jk_fields):
print('field %i: %i' % (i, np.sum(field)))
# -
plt.scatter(ra_sdss, dec_sdss, c='k', s=1)
for field in jk_fields:
plt.scatter(ra_sdss[field], dec_sdss[field], s=1)
plt.xlabel('RA', fontsize=20)
plt.xlim(0., 360)
plt.ylabel('Dec', fontsize=20)
plt.ylim(-30, 90)
# +
jk_phis = []
for field in jk_fields:
_Ngal, _ = np.histogram(sdss.data['log.M_star'][~field], bins=logms_bin)
_phi = _Ngal.astype(float) / vol_sdss / dlogms
jk_phis.append(_phi)
n_jk = len(jk_phis)
jk_avg_phi = np.sum(np.array(jk_phis), axis=0)/float(n_jk)
phi_err_jk = np.sqrt(float(n_jk-1)/float(n_jk)*np.sum(np.array([(_phi - jk_avg_phi)**2 for _phi in jk_phis]), axis=0))
# -
fig = plt.figure()
sub = fig.add_subplot(111)
sub.errorbar(0.5*(logms_bin[1:] + logms_bin[:-1]), phi_sdss, yerr=phi_err_jk, fmt='.k', label='SDSS volume-limited')
sub.plot(0.5*(logms_bin[1:] + logms_bin[:-1]), phi_simba, label='SIMBA')
sub.plot(0.5*(logms_bin[1:] + logms_bin[:-1]), phi_tng, label='TNG')
sub.legend(loc='lower left', frameon=True, handletextpad=0.2, fontsize=15)
sub.set_xlabel(r'$\log M_*$', fontsize=25)
sub.set_xlim(8.5, 13.)
sub.set_ylabel(r'central galaxy stellar mass function $\Phi^{\rm cen}_{M_*}$', fontsize=25)
sub.set_yscale("log")
ylim = sub.get_ylim()
sub.plot([9.7, 9.7], ylim, c='k', ls='--')
sub.set_ylim(ylim)
f_phi = os.path.join(dat_dir, 'obs', 'tinker_SDSS_M9.7.phi_logMstar.dat')
np.savetxt(f_phi, np.array([logms_bin[:-1], logms_bin[1:], phi_sdss, phi_err_jk]).T)
# Lets see what the luminosity function looks like. First lets calculate the luminosities of SIMBA and TNG with **no dust attenuation**
from galpopfm import measure_obs as measureObs
R_mag_simba = measureObs.AbsMag_sed(simba['wave'][...], simba['sed_neb'][...], band='r_sdss')
R_mag_tng = measureObs.AbsMag_sed(tng['wave'][...], tng['sed_neb'][...], band='r_sdss')
mr_bin = np.linspace(-25., -17., 17)
dmr = mr_bin[1:] - mr_bin[:-1]
# +
_Ngal_sdss, _ = np.histogram(sdss['mr_tinker'][...], bins=mr_bin)
_Ngal_simba, _ = np.histogram(R_mag_simba, bins=mr_bin)
_Ngal_tng, _ = np.histogram(R_mag_tng, bins=mr_bin)
_phi_sdss = _Ngal_sdss.astype(float) / vol_sdss / dmr
_phi_simba = _Ngal_simba.astype(float) / vol_simba / dmr
_phi_tng = _Ngal_tng.astype(float) / vol_tng / dmr
# +
fig = plt.figure()
sub = fig.add_subplot(111)
sub.plot(0.5*(mr_bin[1:] + mr_bin[:-1]), _phi_sdss)
sub.plot(0.5*(mr_bin[1:] + mr_bin[:-1]), _phi_simba)
sub.plot(0.5*(mr_bin[1:] + mr_bin[:-1]), _phi_tng)
sub.set_xlabel(r'$M_r$', fontsize=25)
sub.set_xlim(-19., -25.)
sub.set_ylabel(r'central galaxy luminosity function $\Phi^{\rm cen}_{M_r}$', fontsize=25)
sub.set_yscale("log")
ylim = sub.get_ylim()
sub.plot([-20., -20.], ylim, c='k', ls='--')
sub.set_ylim(ylim)
# -
mr_complete = (sdss['mr_tinker'][...] < -20.)
# +
fig = plt.figure(figsize=(14,4))
sub = fig.add_subplot(131)
sub.scatter(z_sdss, logm_sdss, s=1)
sub.scatter(z_sdss[mr_complete], logm_sdss[mr_complete], s=1)
sub.set_xlabel(r'$z$', fontsize=25)
sub.set_xlim(0., 0.04)
sub.set_ylabel(r'$\log M_*$', fontsize=25)
sub.set_ylim(9.5, 13.)
sub = fig.add_subplot(132)
sub.scatter(z_sdss, sdss['mr_tinker'][...], s=1)
sub.scatter(z_sdss[mr_complete], sdss['mr_tinker'][...][mr_complete], s=1)
sub.set_xlabel(r'$z$', fontsize=25)
sub.set_xlim(0., 0.04)
sub.set_ylabel(r'$M_r$', fontsize=25)
sub.set_ylim(-17., -23)
sub = fig.add_subplot(133)
sub.scatter(logm_sdss, sdss['mr_tinker'][...], s=1)
sub.scatter(logm_sdss[mr_complete], sdss['mr_tinker'][...][mr_complete], s=1)
sub.set_xlabel(r'$\log M_*$', fontsize=25)
sub.set_xlim(9.5, 12.)
sub.set_ylabel(r'$M_r$', fontsize=25)
sub.set_ylim(-17., -23)
fig.subplots_adjust(wspace=0.4)
# -
# Now lets calculate jack-knife error bars for the luminosity function
mr_sdssmr = sdss['mr_tinker'][...][mr_complete]
ra_sdssmr = sdss['RA'][...][mr_complete]
dec_sdssmr = sdss['DEC'][...][mr_complete]
plt.scatter(ra_sdssmr, dec_sdssmr, c='k', s=1)
plt.xlabel('RA', fontsize=20)
plt.xlim(0., 360)
plt.ylabel('Dec', fontsize=20)
plt.ylim(-30, 90)
# +
jk_fields = []
jk_fields.append(ra_sdssmr < 100)
ra_bins = np.linspace(110, 270, 4)
dec_bins = np.linspace(-11., 70., 4)
for i in range(len(ra_bins)-1):
for j in range(len(dec_bins)-1):
infield = ((ra_sdssmr > ra_bins[i]) & (ra_sdssmr < ra_bins[i+1]) &
(dec_sdssmr > dec_bins[j]) & (dec_sdssmr < dec_bins[j+1]))
jk_fields.append(infield)
jk_fields.append(ra_sdssmr > 300)
# -
for i, field in enumerate(jk_fields):
print('field %i: %i' % (i, np.sum(field)))
plt.scatter(ra_sdssmr, dec_sdssmr, c='k', s=1)
for field in jk_fields:
plt.scatter(ra_sdssmr[field], dec_sdssmr[field], s=1)
plt.xlabel('RA', fontsize=20)
plt.xlim(0., 360)
plt.ylabel('Dec', fontsize=20)
plt.ylim(-30, 90)
jk_phis = []
for field in jk_fields:
_Ngal, _ = np.histogram(mr_sdssmr[~field], bins=mr_bin)
_phi = _Ngal.astype(float) / vol_sdss / dmr
jk_phis.append(_phi)
n_jk = len(jk_phis)
jk_avg_phi = np.sum(np.array(jk_phis), axis=0)/float(n_jk)
phi_err_jk = np.sqrt(float(n_jk-1)/float(n_jk)*np.sum(np.array([(_phi - jk_avg_phi)**2 for _phi in jk_phis]), axis=0))
# +
fig = plt.figure()
sub = fig.add_subplot(111)
sub.errorbar(0.5*(mr_bin[1:] + mr_bin[:-1]), _phi_sdss, yerr=phi_err_jk, fmt='.k')
sub.plot(0.5*(mr_bin[1:] + mr_bin[:-1]), _phi_simba)
sub.plot(0.5*(mr_bin[1:] + mr_bin[:-1]), _phi_tng)
sub.set_xlabel(r'$M_r$', fontsize=25)
sub.set_xlim(-19., -25.)
sub.set_ylabel(r'central galaxy luminosity function $\Phi^{\rm cen}_{M_r}$', fontsize=25)
sub.set_yscale("log")
ylim = sub.get_ylim()
sub.plot([-20., -20.], ylim, c='k', ls='--')
sub.set_ylim(ylim)
# -
f_phi = os.path.join(dat_dir, 'obs', 'tinker_SDSS_centrals_M9.7.phi_Mr.dat')
np.savetxt(f_phi, np.array([mr_bin[:-1], mr_bin[1:], _phi_sdss, phi_err_jk]).T)
|
nb/_sdss_smf_lf.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Copyright Netherlands eScience Center <br>
# ** Function : Test ConvLSTM module ** <br>
# ** Author : <NAME> ** <br>
# ** First Built : 2020.03.02 ** <br>
# ** Last Update : 2020.03.02 ** <br>
# ** Library : Pytorth, os, dlacs. **<br>
# Description : This notebook serves to unittest the ConvLSTM module in DLACs. <br>
# <br>
# Return Values : Time series and figures <br>
# +
# %matplotlib inline
import sys
# for data loading
import os
#import scipy
import torch
import torch.nn.functional
#sys.path.append(os.path.join('C:','Users','nosta','ML4Climate','Scripts','DLACs'))
#sys.path.append("C:\\Users\\nosta\\ML4Climate\\Scripts\\DLACs")
sys.path.append("../../DLACs")
import dlacs
import dlacs.ConvLSTM
import dlacs.preprocess
import dlacs.function
# for visualization
import dlacs.visual
# -
# # Test ConvLSTM module <br>
# The testing device is Dell Inspirion 5680 with Intel Core i7-8700 x64 CPU and Nvidia GTX 1060 6GB GPU.<br>
# Here is a benchmark about cpu v.s. gtx 1060 <br>
# https://www.analyticsindiamag.com/deep-learning-tensorflow-benchmark-intel-i5-4210u-vs-geforce-nvidia-1060-6gb/
if __name__=="__main__":
print ('******************* create basic dimensions for tensor and network *********************')
# specifications of neural network
input_channels = 3
hidden_channels = [3, 2, 1] # number of channels & hidden layers, the channels of last layer is the channels of output, too
#hidden_channels = [3, 3, 3, 3, 2]
#hidden_channels = [2]
kernel_size = 3
# here we input a sequence and predict the next step only
#step = 1 # how many steps to predict ahead
#effective_step = [0] # step to output
batch_size = 1
#num_layers = 1
learning_rate = 0.005
num_epochs = 1500
print ('******************* check the environment *********************')
print ("Pytorch version {}".format(torch.__version__))
# check if CUDA is available
use_cuda = torch.cuda.is_available()
print("Is CUDA available? {}".format(use_cuda))
# CUDA settings torch.__version__ must > 0.4
# !!! This is important for the model!!! The first option is gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print ('******************* pretest model *********************')
model = dlacs.ConvLSTM.ConvLSTM(input_channels, hidden_channels, kernel_size).to(device)
for name, param in model.named_parameters():
if param.requires_grad:
print (name)
print (param.data)
print (param.size())
print ("=========================")
print('##############################################################')
print('############# preview model parameters matrix ###############')
print('##############################################################')
print('Number of parameter matrices: ', len(list(model.parameters())))
for i in range(len(list(model.parameters()))):
print(list(model.parameters())[i].size())
|
tests/unittest_model_ConvLSTM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### <p style="text-align: right;"> ✅ <NAME>.</p>
# # Homework Assignment #5 (Individual)
#
# ## Using SVMs and PCA with familiar data: The Iris Dataset
#
# ### Goals for this homework assignment
#
# By the end of this assignment, you should be able to:
# * Use `git` to track your work and turn in your assignment
# * Read in data and prepare it for modeling
# * Build, fit, and evaluate an SVC model of data
# * Use PCA to reduce the number of important features
# * Build, fit, and evaluate an SVC model of PCA-transformed data
# * Systematically investigate the effects of the number of PCA components on an SVC model of data
#
# ### Assignment instructions:
#
# Work through the following assignment, making sure to follow all of the directions and answer all of the questions.
#
# There are **44 points (+2 bonus points)** possible on this assignment. Point values for each part are included in the section headers.
#
# This assignment is **due at 11:59 pm on Friday, April 16. It should be pushed to your repo (see Part 1) and submitted to D2L**.
#
# #### Imports
#
# It's useful to put all of the imports you need for this assignment in one place. Read through the assignment to figure out which imports you'll need or add them here as you go.
# +
# Put all necessary imports here
from collections import Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA
# -
# ---
# ## 1. Add to your Git repository to track your progress on your assignment (4 points)
#
# As usual, for this assignment, you're going to add it to the `cmse202-s21-turnin` repository you created in class so that you can track your progress on the assignment and preserve the final version that you turn in. In order to do this you need to
#
# **✅ Do the following**:
#
# 1. Navigate to your `cmse202-s21-turnin` repository and create a new directory called `hw-05`.
# 2. Move this notebook into that **new directory** in your repository, then **add it and commit it to your repository**.
# 1. Finally, to test that everything is working, "git push" the file so that it ends up in your GitHub repository.
#
# **Important**: Make sure you've added your Professor and your TA as collaborators to your "turnin" respository with "Read" access so that we can see your assignment (you should have done this in the previous homework assignment)
#
# **Also important**: Make sure that the version of this notebook that you are working on is the same one that you just added to your repository! If you are working on a different copy of the noteobok, **none of your changes will be tracked**!
#
# If everything went as intended, the file should now show up on your GitHub account in the "`cmse202-s21-turnin`" repository inside the `hw-05` directory that you just created. Periodically, **you'll be asked to commit your changes to the repository and push them to the remote GitHub location**. Of course, you can always commit your changes more often than that, if you wish. It can be good to get into a habit of committing your changes any time you make a significant modification, or when you stop working on the project for a bit.
#
# ✅ **Do this**: Before you move on, put the command that your instructor should run to clone your repository in the markdown cell below.
# ``` bash
# # Put the command for cloning your repository here!
#
# ```cd hw-05
# git pull
# git checkout -b NAME-OF-BRANCH
#
# # cd hw-05
#
# # mkdir hw-05
# Then go into the new directory:
#
# # cd hw-05
# Create a README.md or index.md in directory:
#
# touch README.md
# nano README.md
#
#
# git status
#
# git add CHANGES
#
#
# git status
#
#
# git commit -m "DESCRIBE COMMIT IN A FEW WORDS"
#
#
# git push origin NAME-OF-BRANCH
# ---
# <a id="loading"></a>
# ## 2. Loading a familiar dataset: The iris data (6 points)
#
# We've the seen the iris dataset a number of times in the course so far, and since the goal for this assignment is to practice using the SVM and PCA tools we've covered in class, we'll stick with using this simple dataset and avoid any complicated data wrangling headaches. As a reminder: you can find details about the dataset <a href="https://en.wikipedia.org/wiki/Iris_flower_data_set">here</a>.
#
# #### The Iris data
#
# As we've seen, the iris data set is pretty straight forward. Rather than working with a perfectly curated data set though, we'll use the same version of the data that we first looked at during the **Day 6 in-class activity**.
#
# **✅ Do This:** To get started, **you'll need to download the following two files** (or located them from when you used them previously):
#
# `https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/iris.data`
#
# `https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/iris.names`
#
# Once you've downloaded the data or copied it over from where your saved it previously, you should have access to the following : `iris.data` and `iris.names`.
#
# **Open the files using a text browser or other tool on your computer and confirm that they match your expectations and contain the data that we've worked with before.**
# Download data.
# !wget https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/iris.data
# ### 2.1 Load the data
#
# **✅ Task 2.1 (2 points):** Read the ```iris.data``` file into your notebook **with appropriate column headers**. Since we are planning on classifying the data, you should label the fifth column `class`, which should have the iris species class labels:
# * "Iris-setosa"
# * "Iris-versicolor"
# * "Iris-virginica"
#
# Display the DataFrame to make sure it looks reasonable. You should have **5 columns** and **150 rows**.
# +
# Put your code here
data = pd.read_csv('iris.data', sep='\s+',header=None,names=['sepal_l', 'sepal_w', 'petal_l', 'petal_w','class'],skiprows=0)
data
# -
# ### 2.2 Relabeling the classes
#
# To simplify the process of modeling the iris data, we should convert the class labels from strings to integers. For example, rather than `Iris-setosa`, we can consider this to be class "`0`".
#
# **✅ Task 2.2 (2 points):** Replace all of the strings in your "class" column with integers based on the following:
#
# | original label | replaced label |
# | -------- | -------- |
# | Iris-setosa | 0 |
# | Iris-versicolor | 1 |
# | Iris-virginica | 2 |
#
# Once you've replaced the labels, display your DataFrame and confirm that it looks correct.
# +
# Put your code here
data['class'].replace({'Iris-setosa':0,'Iris-versicolor':1,'Iris-virginica':2},inplace=True)
data
# -
# ### 2.3 Separating the "features" from the "labels"
#
# As we've seen when working with `sklearn` it can be much easier to work with the data if we have separate variables that store the features and the labels.
#
# **✅ Task 2.3 (1 point):** Split your DataFrame so that you have two separate DataFrames, one called `features`, which contains all of the iris features, and one called `labels`, which contains all of the *new* iris integer labels you just created.
# +
# Put your code here
features = data[["sepal_l", "sepal_w", "petal_l", "petal_w"]]
labels = data[["class"]]
print("labels count: ", Counter(labels.values[:, 0]))
# -
# ✅ **Question 2.1 (1 point):** How balanced is your set of iris classes? Does it matter for the set of classes to be balanced? Why or why not?
# - The iris classes is balanced because each class have 50 samples.
# - Class balancing does not matter since we are using SVM which is insensitive to class imbalance.
# ---
# ### 🛑 STOP
# **Pause to commit your changes to your Git repository!**
#
# Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing Part 2", and push the changes to GitHub.
#
# ---
# ---
# ## 3. Building an SVC model (4 points)
#
# Now, to tackle this classification problem, we will use a support vector machine just like we've done previously (e.g. in the **Day 19 and Day 20 assignments**). Of course, we could easily replace this with any `sklearn` classifier we choose, but for now we will just use an SVC with a linear kernel.
#
# ### 3.1 Splitting the data
#
# But first, we need to split our data into training and testing data!
#
# **✅ Task 3.1 (1 point):** Split your data into a training and testing set with a training set representing 75% of your data. For reproducibility , set the `random_state` argument to `314159`. Print the lengths to show you have the right number of entries.
# +
# Put your code here
feature_train, feature_test, label_train, label_test = train_test_split(features, labels, train_size=0.75, random_state=314159)
# Convert to numpy array.
feature_train = feature_train.values
feature_test = feature_test.values
label_train = label_train.values
label_test = label_test.values
# -
# ### 3.2 Modeling the data and evaluating the fit
#
# As you have done this a number of times at this point, we ask you to do most of the analysis for this problem in one cell.
#
# **✅ Task 3.2 (2 points):** Build a linear SVC model with `C=0.01`, fit it to the training set, and use the test features to predict the outcomes. Evaluate the fit using the **confusion matrix** and **classification report**.
#
# **Note:** Double-check the documentation on the confusion matrix because the way `sklearn` outputs false positives and false negatives may be different from what most images on the web indicate.
# +
# Put your code here
from sklearn.svm import SVC
# Train.
linear_svc = SVC(C=0.01, kernel="linear")
linear_svc.fit(feature_train, label_train[:, 0])
# Test
pred_test = linear_svc.predict(feature_test)
# Print with confusion matrix.
print("Confusion matrix: ")
print(confusion_matrix(label_test[:, 0], pred_test))
print()
# Print classification report.
print("Classification report: ")
print(classification_report(label_test[:, 0], pred_test))
# -
# ✅ **Question 3.1 (1 point):** How accurate is your model? What evidence are you using to determine that? How many false positives and false negatives does it predict?
# - From classification, the F1-score of the model is 0.89, which is quite good.
# - We can find that from confusion matrix and classification report.
# - there are 3 false positives and 1 false negatives.
# ---
# ### 🛑 STOP
# **Pause to commit your changes to your Git repository!**
#
# Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing Part 3", and push the changes to GitHub.
#
# ---
# ---
# ## 4. Finding and using the best hyperparameters (8 points)
#
# At this point, we have fit one model and determined it's performance, but is it the best model? We can use `GridSearchCV` to find the best model (given our choices of parameters). Once we do that, we will use that best model going forward. This is similar to what we did when working with the "digits" data and the "faces" data in the **Day 20 and Day 21 assignments**.
#
# **Note:** you would typically rerun this grid search in a production environment to continue to verify the best model, but we are not for the sake of speed.
#
# ### 4.1 Performing a grid search
#
# **✅ Task 4.1 (4 points):** Using the following parameters (`C` = `1e-3`, `0.01`, `0.1`, `1`, `10`, `100` and `gamma` = `1e-6`, `1e-5`, `1e-4`, `1e-3`, `0.01`, `0.1`) for both a `linear` and `rbf` kernel use `GridSearchCV` with the `SVC()` model to find the best fit parameters. Once, you're run the grid search, print the "best estimators".
# +
# Put your code here
parameters = {
'kernel':['linear', 'rbf'],
'gamma': [1e-6, 1e-5, 1e-4, 1e-3, 0.01, 0.1],
'C':[1e-3, 0.01, 0.1, 1, 10, 100]
}
svm = SVC()
optimal_clf = GridSearchCV(svm, parameters)
optimal_clf.fit(feature_train, label_train[:, 0])
print("Best estimators: ")
print(optimal_clf.best_estimator_)
# -
# ✅ **Question 4.1 (1 point):** How do the "best estimator" results of the grid search compare to what you used in Part 3? Did the hyper parameter(s) change? What kernel did the grid search determine was the best option?
# - The best estimator's parameters are `C=10, gamma=1.0e-6, kernel="linear"`.
# - The model in Part3 has parameters with `C=0.01, gamma=scale, kernel="linear"`
# - The hyperparameters is changed.
# - the best option will also use linear kernel from grid search.
# ### 4.2 Evaluating the best fit model
#
# Now that we have found the "best estimators", let's determine how good the fit is.
#
# **✅ Task 4.2 (2 points):** Use the test features to predict the outcomes for the best model. Evaluate the fit using the **confusion matrix** and **classification report**.
#
# **Note:** Double-check the documentation on the confusion matrix because the way `sklearn` outputs false positives and false negatives may be different from what most images on the web indicate.
# +
# Put your code here
# Test
pred_test = optimal_clf.best_estimator_.predict(feature_test)
# Print with confusion matrix.
print("Confusion matrix: ")
print(confusion_matrix(label_test[:, 0], pred_test))
print()
# Print classification report.
print("Classification report: ")
print(classification_report(label_test[:, 0], pred_test))
# -
# ✅ **Question 4.2 (1 point):** How accurate is this best model? What evidence are you using to determine that? How many false positives and false negatives does it predict?
# - From classification, the F1-score of the model is 0.95, which is quite improved from part3.
# - We can find that from confusion matrix and classification report.
# - there are 0 false positives and 2 false negatives.
# ---
# ### 🛑 STOP
# **Pause to commit your changes to your Git repository!**
#
# Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing Part 4", and push the changes to GitHub.
#
# ---
# ---
# ## 5. Using Principal Components (10 points)
#
# The full model uses all 4 iris features to predict the results and you likely found that the model is pretty accurate using all 4 features. But in some cases, we might have significantly more features (which means much more computational time!), and we might not need nearly the level of accuracy we can achieve with the full data set or we might not have enough computational resources to use **all** of the features.
#
# In such situations, we might need to see how close we can get with fewer features. But instead of simply removing features, we will use a PCA to determine the features that contribute the most the model (through their accounted variance) and use those to build our SVC model. We did this to improve our classification with the "faces" dataset in the **Day 21 assignment**.
#
# ### 5.1 Running a Principle Component Analysis (PCA)
#
# Since we only have 4 total features to start with, let's see how well we can do if we try to cut this aggressively reduce the feature count and use only **1** principle component. We'll see how well we can predict the classes of the iris dataset with just these two!
#
# **✅ Task 5.1 (3 points):** Using `PCA()` and the associated `fit()` method, run a principle component analysis to your training features using only 1 components. Transform both the test and training features using the result of your PCA. Print the `explained_variance_ratio_`.
# +
# Put your code here
pca = PCA(n_components=1)
# Fit with train features.
pca.fit(feature_train)
# Transform.
pca_train = pca.transform(feature_train)
pca_test = pca.transform(feature_test)
print(pca.explained_variance_ratio_)
# -
# ✅ **Question 5.1 (1 point):** What is the total explained variance ratio captured by this simple 1-component PCA? (just quote the number) How well do you think a model with this many feature will perform? Why?
# - The explained variance ratio is 0.923.
# - Such ratio should be very explainable and will lead a good model.
# ### 5.2 Fit and Evaluate an SVC model
#
# Using the pca transformed features, we will train and test an SVC model using the "best estimators" you found previously.
#
# **✅ Task 5.2 (2 points):** Using the PCA transformed training data, build and train an SVC model using the best estimate values from before. Predict the classes using the PCA transformed test data. Evaluate the model using the classfication report, and the confusion matrix.
# +
# Put your code here
# Build SVC with the previous optimal classifier paramter.
pca_svm = SVC(**optimal_clf.best_params_)
# Train
pca_svm.fit(pca_train, label_train[:, 0])
# Test
pred_test = pca_svm.predict(pca_test)
# Print with confusion matrix.
print("Confusion matrix: ")
print(confusion_matrix(label_test[:, 0], pred_test))
print()
# Print classification report.
print("Classification report: ")
print(classification_report(label_test[:, 0], pred_test))
# -
# ✅ **Question 5.2 (1 point):** How accurate is this model? What evidence are you using to determine that? How many false positives and false negatives does it predict? How does it compare to the full feature model?
# - From classification, the F1-score of the model is 0.92, which is quite good.
# - We can find that from confusion matrix and classification report.
# - there are 0 false positives and 3 false negatives.
# - Compared with full feature model, the accuracy is down.
# ### 5.3 Repeat your analysis with more components
#
# You probably found that the model with just 1 features didn't actually do too bad, which is pretty impressive. That said, can we do better?
#
# What if we increase the number of principle components to **2**? What happens now?
#
# **✅ Task 5.3 (2 points):** Repeat your analysis from 5.1 and 5.2 using **2 components** instead. As part of your analysis, **print the total explained variance ratio for both components as well as the sum of these values**.
# +
# Put your code here
pca = PCA(n_components=2)
# Fit with train features.
pca.fit(feature_train)
# Transform.
pca_train = pca.transform(feature_train)
pca_test = pca.transform(feature_test)
print("Two components: ", pca.explained_variance_ratio_)
print("Sum of the two components: ", np.sum(pca.explained_variance_ratio_))
print()
# Build SVC with the previous optimal clafeassifier paramter.
pca_svm = SVC(**optimal_clf.best_params_)
# Train
pca_svm.fit(pca_train, label_train[:, 0])
# Test
pred_test = pca_svm.predict(pca_test)
# Print with confusion matrix.
print("Confusion matrix: ")
print(confusion_matrix(label_test[:, 0], pred_test))
print()
# Print classification report.
print("Classification report: ")
print(classification_report(label_test[:, 0], pred_test))
# -
# ✅ **Question 5.3 (1 point):** What is the total explained variance ratio captured by this PCA? How accurate is this model? What evidence are you using to determine that? How many false positives and false negatives does it predict? How does it compare to the 1 PCA component model? To the full feature model?
# - The explained variance ratio is 0.978.
# - From classification, the F1-score of the model is 0.97, which is quite good.
# - We can find that from confusion matrix and classification report.
# - there are 0 false positives and 1 false negatives.
# - Compared with 1 PCA model, the performance is improved, with F1-score improved from 0.92 to 0.97.
# - Compared with full feature model, the performance is also improved, with F1-score improved from 0.95 to 0.97.
# ---
# ### 🛑 STOP
# **Pause to commit your changes to your Git repository!**
#
# Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing Part 5", and push the changes to GitHub.
#
# ---
# ---
# ## 6. How well does PCA work? (12 points)
#
# Clearly, the number of components we use in our PCA matters. Let's investigate how they matter by systematically building a model for any number of selected components. While this might seem a bit unnecessary for such a simple dataset, **this can be very useful for more complex datasets and models!**
#
# ### 6.1 Accuracy vs. Components
#
# To systematically explore how well PCA improves our classification model, we will do this by writing a function that creates the PCA, the SVC model, fits the training data, predict the labels using test data, and returns the accuracy scores and the explained variance ratio. So your function will take as input:
# * the number of request PCA components
# * the training feature data
# * the testing feature data
# * the training data labels
# * the test data labels
# and it will return the accuracy score for an SVC model fit to pca transformed features and the **total** explained variance ratio.
#
# **✅ Task 6.1 (4 points):** Create this function, which you will use in the next section.
# Put your code here
def func(n_component, xtrain, xtest, ytrain, ytest):
"""Whole function for PCA + SVC."""
pca = PCA(n_components=n_component)
# Fit with train features.
pca.fit(xtrain)
total_ratio = np.sum(pca.explained_variance_ratio_)
# Transform.
pca_trained = pca.transform(xtrain)
pca_tested = pca.transform(xtest)
# Build SVC with the previous optimal classifier paramter.
clf = SVC(C=10, gamma=1e-06, kernel= 'linear')
# Train
clf.fit(pca_trained, ytrain)
# Test
acc = clf.score(pca_tested, ytest)
return acc, total_ratio
# ### 6.2 Compute accuracies
#
# Now that you have created a function that returns the accuracy for a given number of components, we will use that to plot the how the accuracy of your SVC model changes when we increase the number of components used in the PCA.
#
# **✅ Task 6.2 (2 points):** For 1 through 4 components, use your function above to compute and store (as a list) the accuracy of your models and the total explained variance ratio of your models.
# +
# Put your code here
n_components = [1, 2, 3, 4]
acc_list, ratio_list = [], []
for n_component in n_components:
acc, total_ratio = func(n_component, feature_train, feature_test, label_train[:, 0], label_test[:, 0])
acc_list.append(acc)
ratio_list.append(total_ratio)
# -
# ### 6.3 Plot accuracy vs number of components
#
# Now that we have those numbers, it makes sense to look at the accuracy vs # of components.
#
# **✅ Task 6.3 (2 points):** Plot the accuracy vs # of components.
## your code here
plt.plot(n_components, acc_list)
plt.xlabel("Number of PCA components")
plt.ylabel("Model accuracy")
# **✅ Question 6.1 (1 point):** Where does it seem like we have diminishing returns? That is, at what point is there no major increase in accuracy (or perhaps the accuracy is decreased) as we add additional components to the PCA?
# - The `n_components=2` leads to the best accuracy.
# - When `n_component>2`, the accuracy will be decreased.
# ### 6.4 Plot total explained variance vs number of components
#
# What if we look at total explained variance as a function of # of components?
#
# **✅ Task 6.4 (2 points):** Plot the total explained variance ratio vs # of components.
# Put your code here
plt.plot(n_components, ratio_list)
plt.xlabel("Number of PCA components")
plt.ylabel("Total ratio")
# **✅ Question 6.2 (1 points):** Where does it seem like we have diminishing returns, that is, no major increase in explained variance as we add additional components to the PCA? How does that number of components compare to the diminishing returns for accuracy?
# - The `n_components=4` leads to the best accuracy.
# - The more the componets are, the larger the total explained variances are.
# - There is no optimal components numebr for the total explained variances.
# ---
# ### 🛑 STOP
# **Pause to commit your changes to your Git repository!**
#
# Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing Part 6", and push the changes to GitHub.
#
# ---
# ---
# ## 7. Bonus exercise: visualizing the decision boundaries for a portion of the feature space (2 *bonus* points)
#
# As you might imagine, visualizing decision boundaries with for a multidimensional feature space can be a challenge! That said, when trying to build some intuition about how these classifiers work, visualing 2D decisions boundaries can be useful.
#
# To earn some _extra points_ on this assignment try using the [following example](https://scikit-learn.org/stable/auto_examples/svm/plot_iris_svc.html) as a guide to visualize the decision boundary for your "best estimator" parameters using your **2 PCA components** as your training features. **To be clear, you should be using your PCA component data and your best fit parameters, you should not just be running the example!** You should be able to get a plot that looks something like this:
#
# <img src="https://i.ibb.co/wL4xHGb/pca-boundaries.png" alt="pca-boundaries" border="0">
#
# Since we didn't explicitly cover this in class, **you do not have to complete this part of the assignment unless you would like the extra credit points**.
#
# **✅ Task 7.1 (2 *extra* points):** Try to create a plot of the decision boundaries for the 2 principle components using your "best estimator" parameters.
# +
# Put your code here
# Build up the meshgrid.
X0, X1 = pca_train[:, 0], pca_train[:, 1]
x_min, x_max = X0.min() - 1, X0.max() + 1
y_min, y_max = X1.min() - 1, X1.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
# Predict with the meshgrid.
Z = pca_svm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the decision boundary.
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
# Plot the labels.
plt.scatter(X0, X1, c=label_train[:, 0], cmap=plt.cm.coolwarm, s=20, edgecolors='k')
plt.xlabel('PCA 1')
plt.ylabel('PCA 2')
# -
# ---
# ## Assignment wrap-up¶
# Please fill out the form that appears when you run the code below. **You must completely fill this out in order to receive credit for the assignment!**
from IPython.display import HTML
HTML(
"""
<iframe
src="https://forms.office.com/Pages/ResponsePage.aspx?id=MHEXIi9k2UGSEXQjetVofddd5T-Pwn1DlT6_yoCyuCFUNFFCRjgzN0JOTUFJQVNLR0VMQUZNNlVCTy4u"
width="800px"
height="600px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
"""
)
# ### Congratulations, you're done!
# Submit this assignment by uploading it to the course Desire2Learn web page. Go to the "Homework Assignments" folder, find the submission folder for Homework #5, and upload your notebook.
|
HW-05_SVMandPCA-STUDENT.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
import json
import numpy as np
import pandas as pd
from sklearn. model_selection import cross_val_score
from sklearn.impute import KNNImputer , SimpleImputer
# -
# Reading the dataset
Train = pd.read_csv('./dataset/train.csv', index_col=0)
Test = pd.read_csv('./dataset/test.csv', index_col=0)
Train.head()
# # Pre-Processing
features = ['Ticket', 'Cabin']
Train = Train.drop(features, axis=1)
Test = Test.drop(features, axis=1)
Test_id = Test.index
Train['Name'] = Train.Name.map(lambda x:re.findall('([A-Za-z]+\.)' ,x)[0])
Test['Name'] = Test.Name.map(lambda x:re.findall('([A-Za-z]+\.)' ,x)[0])
def group_titles(titles):
for i, each in enumerate(titles):
if any(each == ele for ele in ['Mr.', 'Miss.', 'Mrs.', 'Master.']):
continue
elif any(each == ele for ele in ['Sir.', 'Ms.', 'Mme.', 'Mlle.', 'Lady.', 'Countess.']):
titles[i] = 'grp1'
else:
titles[i] = 'grp2'
group_titles(Train.Name.values)
group_titles(Test.Name.values)
# +
for attr in ['Age']: #fillna for real valued features with mean
fill = Train[attr].mean()
Train[attr].fillna(fill, inplace=True)
Test[attr].fillna(fill, inplace=True)
# as Fare has skewed distribution using median as central tendancy
for attr in ['Fare']: #fillna for real valued features with median
fill = Train[attr].median()
Train[attr].fillna(fill, inplace=True)
Test[attr].fillna(fill, inplace=True)
for attr in ['Embarked']: #fillna for categorical features with mode
fill = Train[attr].mode()[0]
Train[attr].fillna(fill, inplace=True)
Test[attr].fillna(fill, inplace=True)
# -
train = pd.get_dummies(Train)
test = pd.get_dummies(Test)
# splitting into features (xTrain) and labels (yTrain)
xTrain = train.drop('Survived', axis=1)
yTrain = train['Survived']
xTest = test
from sklearn.preprocessing import StandardScaler
scaller = StandardScaler()
scaller.fit(xTrain[['Age', 'Fare']])
xTrain[['Age', 'Fare']] = scaller.transform(xTrain[['Age', 'Fare']])
xTest[['Age', 'Fare']] = scaller.transform(xTest[['Age', 'Fare']])
xTrain.head()
# # Different Classifiers and Parameter Tuning
def parameterTune(estimator, param_grid):
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(
estimator = estimator,
param_grid = param_grid,
n_jobs = 11,
cv = 5,
)
grid.fit(xTrain, yTrain)
return grid.best_score_, grid.best_params_
# function to generate submission file
def test_eval(estimator, params):
clf = estimator(**params)
clf.fit(xTrain, yTrain)
yPred = clf.predict(xTest)
df = pd.DataFrame({'PassengerId':Test_id, 'Survived':yPred})
return df
# ## Gaussian Naive Bayes
# +
from sklearn.naive_bayes import GaussianNB
estimator = GaussianNB()
param_grid = {}
gnb_best_score_, gnb_best_params_ = parameterTune(estimator, param_grid)
gnb_df = test_eval(GaussianNB, gnb_best_params_)
# -
print('best_score_:',gnb_best_score_,'\nbest_params_:',gnb_best_params_)
# ## Logistic Regression
# +
from sklearn.linear_model import LogisticRegression
estimator = LogisticRegression(tol=1e-4, solver='liblinear', random_state=1)
param_grid = {
'max_iter' : [1000, 2000, 3000],
'penalty' : ['l1', 'l2'],
'solver' : ['liblinear']
}
lrc_best_score_, lrc_best_params_ = parameterTune(estimator, param_grid)
lrc_df = test_eval(LogisticRegression, lrc_best_params_)
# -
print('best_score_:',lrc_best_score_,'\nbest_params_:',lrc_best_params_)
# ## KNN
# +
from sklearn.neighbors import KNeighborsClassifier
estimator = KNeighborsClassifier()
param_grid = {
'n_neighbors' : [3, 5, 7, 10],
'weights' : ['uniform', 'distance'],
'p' : [1, 2]
}
knn_best_score_, knn_best_params_ = parameterTune(estimator, param_grid)
knn_df = test_eval(KNeighborsClassifier, knn_best_params_)
# -
print('best_score_:',knn_best_score_,'\nbest_params_:',knn_best_params_)
# ## Support Vector Classifier
# +
# instantiating Support Vector Classifier
from sklearn.svm import SVC
estimator = SVC()
param_grid = [
{ 'kernel' : ['linear'],
'C' : [0.1, 1, 10, 100]},
{ 'kernel' : ['rbf'],
'C' : [0.1, 1, 10, 100],
'gamma' : ['scale', 'auto', 1e-1, 1e-2, 1e-3, 1e-4],},
]
svc_best_score_, svc_best_params_ = parameterTune(estimator, param_grid)
svc_df = test_eval(SVC, svc_best_params_)
# -
print('best_score_:',svc_best_score_,'\nbest_params_:',svc_best_params_)
# ## Random Forest Classifier
# +
# instantiating RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier()
param_grid = {
'n_estimators' : [50, 100, 250, 500, 750, 1000],
'criterion' : ["gini", "entropy"],
'max_features' : ["auto", 2, 5, 7, 10],
}
rfc_best_score_, rfc_best_params_ = parameterTune(estimator, param_grid)
rfc_df = test_eval(RandomForestClassifier, rfc_best_params_)
# -
print('best_score_:',rfc_best_score_,'\nbest_params_:',rfc_best_params_)
# # Submission File
pd.DataFrame({
'GaussianNB' : gnb_best_score_,
'LogisticRegression' : lrc_best_score_,
'KNeighborsClassifier' : knn_best_score_,
'SVC' : svc_best_score_,
'RandomForestClassifier' : rfc_best_score_
}, index=['Accuracy'])
# +
best_params = {
'GaussianNB' : gnb_best_params_,
'LogisticRegression' : lrc_best_params_,
'KNeighborsClassifier' : knn_best_params_,
'SVC' : svc_best_params_,
'RandomForestClassifier' : rfc_best_params_
}
with open("./results/04_.json", 'w') as file:
json.dump(best_params, file)
# +
#svc_df.to_csv('./results/04_01_svc.csv', index=None) #0.77990
#rfc_df.to_csv('./results/04_02_rfc.csv', index=None) #0.74401
|
01-Titanic_Machine_Learning_from_Disaster/04_feature_engineering.ipynb
|